text
stringlengths 5
1.04M
|
|---|
#include <iostream>
using namespace std;
class Rectangle {
int width, height;
public:
void set_values(int,int);
int area() {return width*height;}
};
void Rectangle::set_values(int x, int y){
width = x;
height = y;
}
int main () {
Rectangle rect,rectb;
rect.set_values(3,4);
rectb.set_values(5,6);
cout << "rect area: " << rect.area() << endl;
cout << "rectb area: " << rectb.area() << endl;
return 0;
}
|
//write include statements
#include "dna.h"
#include<iostream>
//write using statements
using std::cout; using std::cin;
/*
Write code that prompts user to enter 1 for Get GC Content,
or 2 for Get DNA Complement. The program will prompt user for a
DNA string and call either get gc content or get dna complement
function and display the result. Program runs as long as
user enters a y or Y.
*/
int main()
{
string choice; int option;
do
{
string dna;
cout << "Choose a number to perform an action.";
cout << "\n1.) The GC content.\n2.) The DNA compliment.\nOption: ";
cin >> option;
cout << "\nEnter the DNA sequence: ";
cin >> dna;
if (option == 1)
{
cout << "\n" << get_gc_content(dna);
}
else if (option == 2) {
cout << "\n" << get_dna_compliment(dna);
}
cout << "\nContinue? (Y/N): ";
cin >> choice;
} while (choice == "Y" || choice == "y");
return 0;
}
|
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/core/css/cssom/cross_thread_style_value.h"
#include <memory>
#include <utility>
#include "base/synchronization/waitable_event.h"
#include "base/task/single_thread_task_runner.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/blink/renderer/core/css/cssom/cross_thread_color_value.h"
#include "third_party/blink/renderer/core/css/cssom/cross_thread_keyword_value.h"
#include "third_party/blink/renderer/core/css/cssom/cross_thread_unit_value.h"
#include "third_party/blink/renderer/core/css/cssom/cross_thread_unparsed_value.h"
#include "third_party/blink/renderer/core/css/cssom/cross_thread_unsupported_value.h"
#include "third_party/blink/renderer/core/css/cssom/css_keyword_value.h"
#include "third_party/blink/renderer/core/css/cssom/css_style_value.h"
#include "third_party/blink/renderer/core/css/cssom/css_unit_value.h"
#include "third_party/blink/renderer/core/css/cssom/css_unparsed_value.h"
#include "third_party/blink/renderer/core/css/cssom/css_unsupported_color.h"
#include "third_party/blink/renderer/platform/scheduler/public/post_cross_thread_task.h"
#include "third_party/blink/renderer/platform/scheduler/public/thread.h"
#include "third_party/blink/renderer/platform/wtf/cross_thread_functional.h"
namespace blink {
class CrossThreadStyleValueTest : public testing::Test {
public:
void ShutDown(base::WaitableEvent* waitable_event) {
DCHECK(!IsMainThread());
waitable_event->Signal();
}
void ShutDownThread() {
base::WaitableEvent waitable_event;
PostCrossThreadTask(
*thread_->GetTaskRunner(), FROM_HERE,
CrossThreadBindOnce(&CrossThreadStyleValueTest::ShutDown,
CrossThreadUnretained(this),
CrossThreadUnretained(&waitable_event)));
waitable_event.Wait();
}
void CheckUnsupportedValue(
base::WaitableEvent* waitable_event,
std::unique_ptr<CrossThreadUnsupportedValue> value) {
DCHECK(!IsMainThread());
EXPECT_EQ(value->value_, "Unsupported");
waitable_event->Signal();
}
void CheckKeywordValue(base::WaitableEvent* waitable_event,
std::unique_ptr<CrossThreadKeywordValue> value) {
DCHECK(!IsMainThread());
EXPECT_EQ(value->keyword_value_, "Keyword");
waitable_event->Signal();
}
void CheckUnparsedValue(base::WaitableEvent* waitable_event,
std::unique_ptr<CrossThreadUnparsedValue> value) {
DCHECK(!IsMainThread());
EXPECT_EQ(value->value_, "Unparsed");
waitable_event->Signal();
}
void CheckUnitValue(base::WaitableEvent* waitable_event,
std::unique_ptr<CrossThreadUnitValue> value) {
DCHECK(!IsMainThread());
EXPECT_EQ(value->value_, 1);
EXPECT_EQ(value->unit_, CSSPrimitiveValue::UnitType::kDegrees);
waitable_event->Signal();
}
void CheckColorValue(base::WaitableEvent* waitable_event,
std::unique_ptr<CrossThreadColorValue> value) {
DCHECK(!IsMainThread());
EXPECT_EQ(value->value_, Color(0, 255, 0));
waitable_event->Signal();
}
protected:
std::unique_ptr<blink::Thread> thread_;
};
// Ensure that a CrossThreadUnsupportedValue can be safely passed cross
// threads.
TEST_F(CrossThreadStyleValueTest, PassUnsupportedValueCrossThread) {
std::unique_ptr<CrossThreadUnsupportedValue> value =
std::make_unique<CrossThreadUnsupportedValue>("Unsupported");
DCHECK(value);
// Use a Thread to emulate worklet thread.
thread_ = blink::Thread::CreateThread(
ThreadCreationParams(ThreadType::kTestThread).SetSupportsGC(true));
base::WaitableEvent waitable_event;
PostCrossThreadTask(
*thread_->GetTaskRunner(), FROM_HERE,
CrossThreadBindOnce(&CrossThreadStyleValueTest::CheckUnsupportedValue,
CrossThreadUnretained(this),
CrossThreadUnretained(&waitable_event),
std::move(value)));
waitable_event.Wait();
ShutDownThread();
}
TEST_F(CrossThreadStyleValueTest, CrossThreadUnsupportedValueToCSSStyleValue) {
std::unique_ptr<CrossThreadUnsupportedValue> value =
std::make_unique<CrossThreadUnsupportedValue>("Unsupported");
DCHECK(value);
const CSSStyleValue* const style_value = value->ToCSSStyleValue();
EXPECT_EQ(style_value->GetType(),
CSSStyleValue::StyleValueType::kUnknownType);
EXPECT_EQ(style_value->CSSText(), "Unsupported");
}
TEST_F(CrossThreadStyleValueTest, PassUnparsedValueCrossThread) {
std::unique_ptr<CrossThreadUnparsedValue> value =
std::make_unique<CrossThreadUnparsedValue>("Unparsed");
DCHECK(value);
// Use a Thread to emulate worklet thread.
thread_ = blink::Thread::CreateThread(
ThreadCreationParams(ThreadType::kTestThread).SetSupportsGC(true));
base::WaitableEvent waitable_event;
PostCrossThreadTask(
*thread_->GetTaskRunner(), FROM_HERE,
CrossThreadBindOnce(&CrossThreadStyleValueTest::CheckUnparsedValue,
CrossThreadUnretained(this),
CrossThreadUnretained(&waitable_event),
std::move(value)));
waitable_event.Wait();
ShutDownThread();
}
TEST_F(CrossThreadStyleValueTest, CrossThreadUnparsedValueToCSSStyleValue) {
std::unique_ptr<CrossThreadUnparsedValue> value =
std::make_unique<CrossThreadUnparsedValue>("Unparsed");
DCHECK(value);
CSSStyleValue* style_value = value->ToCSSStyleValue();
EXPECT_EQ(style_value->GetType(),
CSSStyleValue::StyleValueType::kUnparsedType);
EXPECT_EQ(static_cast<CSSUnparsedValue*>(style_value)->ToString(),
"Unparsed");
}
TEST_F(CrossThreadStyleValueTest, PassKeywordValueCrossThread) {
std::unique_ptr<CrossThreadKeywordValue> value =
std::make_unique<CrossThreadKeywordValue>("Keyword");
DCHECK(value);
// Use a Thread to emulate worklet thread.
thread_ = blink::Thread::CreateThread(
ThreadCreationParams(ThreadType::kTestThread).SetSupportsGC(true));
base::WaitableEvent waitable_event;
PostCrossThreadTask(
*thread_->GetTaskRunner(), FROM_HERE,
CrossThreadBindOnce(&CrossThreadStyleValueTest::CheckKeywordValue,
CrossThreadUnretained(this),
CrossThreadUnretained(&waitable_event),
std::move(value)));
waitable_event.Wait();
ShutDownThread();
}
TEST_F(CrossThreadStyleValueTest, CrossThreadKeywordValueToCSSStyleValue) {
std::unique_ptr<CrossThreadKeywordValue> value =
std::make_unique<CrossThreadKeywordValue>("Keyword");
DCHECK(value);
CSSStyleValue* style_value = value->ToCSSStyleValue();
EXPECT_EQ(style_value->GetType(),
CSSStyleValue::StyleValueType::kKeywordType);
EXPECT_EQ(static_cast<CSSKeywordValue*>(style_value)->value(), "Keyword");
}
TEST_F(CrossThreadStyleValueTest, PassUnitValueCrossThread) {
std::unique_ptr<CrossThreadUnitValue> value =
std::make_unique<CrossThreadUnitValue>(
1, CSSPrimitiveValue::UnitType::kDegrees);
DCHECK(value);
// Use a Thread to emulate worklet thread.
thread_ = blink::Thread::CreateThread(
ThreadCreationParams(ThreadType::kTestThread).SetSupportsGC(true));
base::WaitableEvent waitable_event;
PostCrossThreadTask(
*thread_->GetTaskRunner(), FROM_HERE,
CrossThreadBindOnce(&CrossThreadStyleValueTest::CheckUnitValue,
CrossThreadUnretained(this),
CrossThreadUnretained(&waitable_event),
std::move(value)));
waitable_event.Wait();
ShutDownThread();
}
TEST_F(CrossThreadStyleValueTest, CrossThreadUnitValueToCSSStyleValue) {
std::unique_ptr<CrossThreadUnitValue> value =
std::make_unique<CrossThreadUnitValue>(
1, CSSPrimitiveValue::UnitType::kDegrees);
DCHECK(value);
CSSStyleValue* style_value = value->ToCSSStyleValue();
EXPECT_EQ(style_value->GetType(), CSSStyleValue::StyleValueType::kUnitType);
EXPECT_EQ(static_cast<CSSUnitValue*>(style_value)->value(), 1);
EXPECT_EQ(static_cast<CSSUnitValue*>(style_value)->unit(), "deg");
}
TEST_F(CrossThreadStyleValueTest, PassColorValueCrossThread) {
std::unique_ptr<CrossThreadColorValue> value =
std::make_unique<CrossThreadColorValue>(Color(0, 255, 0));
DCHECK(value);
// Use a Thread to emulate worklet thread.
thread_ = blink::Thread::CreateThread(
ThreadCreationParams(ThreadType::kTestThread).SetSupportsGC(true));
base::WaitableEvent waitable_event;
PostCrossThreadTask(
*thread_->GetTaskRunner(), FROM_HERE,
CrossThreadBindOnce(&CrossThreadStyleValueTest::CheckColorValue,
CrossThreadUnretained(this),
CrossThreadUnretained(&waitable_event),
std::move(value)));
waitable_event.Wait();
ShutDownThread();
}
TEST_F(CrossThreadStyleValueTest, CrossThreadColorValueToCSSStyleValue) {
std::unique_ptr<CrossThreadColorValue> value =
std::make_unique<CrossThreadColorValue>(Color(0, 255, 0));
DCHECK(value);
CSSStyleValue* style_value = value->ToCSSStyleValue();
EXPECT_EQ(style_value->GetType(),
CSSStyleValue::StyleValueType::kUnsupportedColorType);
EXPECT_EQ(static_cast<CSSUnsupportedColor*>(style_value)->Value(),
Color(0, 255, 0));
}
TEST_F(CrossThreadStyleValueTest, ComparingNullValues) {
// Two null values are equal to each other.
std::unique_ptr<CrossThreadStyleValue> null_value1(nullptr);
std::unique_ptr<CrossThreadStyleValue> null_value2(nullptr);
EXPECT_TRUE(DataEquivalent(null_value1, null_value2));
// If one argument is null and the other isn't they are never equal.
std::unique_ptr<CrossThreadStyleValue> keyword_value(
new CrossThreadKeywordValue("keyword"));
std::unique_ptr<CrossThreadStyleValue> unit_value(
new CrossThreadUnitValue(1, CSSPrimitiveValue::UnitType::kDegrees));
std::unique_ptr<CrossThreadStyleValue> unsupported_value(
new CrossThreadUnsupportedValue("unsupported"));
EXPECT_FALSE(DataEquivalent(null_value1, keyword_value));
EXPECT_FALSE(DataEquivalent(null_value1, unit_value));
EXPECT_FALSE(DataEquivalent(null_value1, unsupported_value));
EXPECT_FALSE(DataEquivalent(keyword_value, null_value1));
EXPECT_FALSE(DataEquivalent(unit_value, null_value1));
EXPECT_FALSE(DataEquivalent(unsupported_value, null_value1));
}
TEST_F(CrossThreadStyleValueTest, ComparingDifferentTypes) {
// Mismatching types are never equal.
std::unique_ptr<CrossThreadStyleValue> keyword_value(
new CrossThreadKeywordValue("keyword"));
std::unique_ptr<CrossThreadStyleValue> unit_value(
new CrossThreadUnitValue(1, CSSPrimitiveValue::UnitType::kDegrees));
std::unique_ptr<CrossThreadStyleValue> unsupported_value(
new CrossThreadUnsupportedValue("unsupported"));
EXPECT_FALSE(DataEquivalent(keyword_value, unit_value));
EXPECT_FALSE(DataEquivalent(keyword_value, unsupported_value));
EXPECT_FALSE(DataEquivalent(unit_value, unsupported_value));
EXPECT_FALSE(DataEquivalent(unit_value, keyword_value));
EXPECT_FALSE(DataEquivalent(unsupported_value, keyword_value));
EXPECT_FALSE(DataEquivalent(unsupported_value, unit_value));
}
TEST_F(CrossThreadStyleValueTest, ComparingCrossThreadKeywordValue) {
// CrossThreadKeywordValues are compared on their keyword; if it is equal then
// so are they.
std::unique_ptr<CrossThreadStyleValue> keyword_value_1(
new CrossThreadKeywordValue("keyword"));
std::unique_ptr<CrossThreadStyleValue> keyword_value_2(
new CrossThreadKeywordValue("keyword"));
std::unique_ptr<CrossThreadStyleValue> keyword_value_3(
new CrossThreadKeywordValue("different"));
EXPECT_TRUE(DataEquivalent(keyword_value_1, keyword_value_2));
EXPECT_FALSE(DataEquivalent(keyword_value_1, keyword_value_3));
}
TEST_F(CrossThreadStyleValueTest, ComparingCrossThreadUnitValue) {
// CrossThreadUnitValues are compared based on their value and unit type; both
// have to match. There are a lot of unit types; we just test a single sample.
std::unique_ptr<CrossThreadStyleValue> unit_value_1(
new CrossThreadUnitValue(1, CSSPrimitiveValue::UnitType::kDegrees));
// Same value, same unit.
std::unique_ptr<CrossThreadStyleValue> unit_value_2(
new CrossThreadUnitValue(1, CSSPrimitiveValue::UnitType::kDegrees));
EXPECT_TRUE(DataEquivalent(unit_value_1, unit_value_2));
// Same value, different unit.
std::unique_ptr<CrossThreadStyleValue> unit_value_3(
new CrossThreadUnitValue(1, CSSPrimitiveValue::UnitType::kPoints));
EXPECT_FALSE(DataEquivalent(unit_value_1, unit_value_3));
// Different value, same unit.
std::unique_ptr<CrossThreadStyleValue> unit_value_4(
new CrossThreadUnitValue(2, CSSPrimitiveValue::UnitType::kDegrees));
EXPECT_FALSE(DataEquivalent(unit_value_1, unit_value_4));
}
TEST_F(CrossThreadStyleValueTest, ComparingCrossThreadColorValue) {
// CrossThreadColorValues are compared on their color channel values; all
// channels must match.
std::unique_ptr<CrossThreadStyleValue> color_value_1(
new CrossThreadColorValue(Color(0, 0, 0)));
std::unique_ptr<CrossThreadStyleValue> color_value_2(
new CrossThreadColorValue(Color(0, 0, 0)));
std::unique_ptr<CrossThreadStyleValue> color_value_3(
new CrossThreadColorValue(Color(0, 255, 0)));
EXPECT_TRUE(DataEquivalent(color_value_1, color_value_2));
EXPECT_FALSE(DataEquivalent(color_value_1, color_value_3));
}
TEST_F(CrossThreadStyleValueTest, ComparingCrossThreadUnsupportedValue) {
// CrossThreadUnsupportedValues are compared on their value; if it is equal
// then so are they.
std::unique_ptr<CrossThreadStyleValue> unsupported_value_1(
new CrossThreadUnsupportedValue("value"));
std::unique_ptr<CrossThreadStyleValue> unsupported_value_2(
new CrossThreadUnsupportedValue("value"));
std::unique_ptr<CrossThreadStyleValue> unsupported_value_3(
new CrossThreadUnsupportedValue("different"));
EXPECT_TRUE(DataEquivalent(unsupported_value_1, unsupported_value_2));
EXPECT_FALSE(DataEquivalent(unsupported_value_1, unsupported_value_3));
}
} // namespace blink
|
/*
* Copyright (c) 2003-2021 Rony Shapiro <ronys@pwsafe.org>.
* All rights reserved. Use of the code is allowed under the
* Artistic License 2.0 terms, as specified in the LICENSE file
* distributed with this code, or available from
* http://www.opensource.org/licenses/artistic-license-2.0.php
*/
/**
* \file Windows-specific implementation of lib.h
*/
#include "../lib.h"
#include "../debug.h"
#include <windows.h>
void *pws_os::LoadLibrary(const TCHAR *lib, loadLibraryTypes type)
{
ASSERT(lib != NULL);
// Qualify full path name. (Lockheed Martin) Secure Coding 11-14-2007
TCHAR szFilePath[MAX_PATH+1];
memset(szFilePath, 0, MAX_PATH+1);
if (type == loadLibraryTypes::SYS) {
if (!GetSystemDirectory(szFilePath, MAX_PATH)) {
pws_os::Trace(_T("GetSystemDirectory failed when loading dynamic library\n"));
return NULL;
}
}
else if (type == loadLibraryTypes::APP || type == loadLibraryTypes::RESOURCE) {
if (!GetModuleFileName(NULL, szFilePath, MAX_PATH)) {
pws_os::Trace(_T("GetModuleFileName failed when loading dynamic library\n"));
return NULL;
}
else
//set last slash to \0 for truncating app name
*_tcsrchr(szFilePath, _T('\\')) = _T('\0');
}
//Add slash after directory path
if (type != loadLibraryTypes::CUSTOM) {
size_t nLen = _tcslen(szFilePath);
if (nLen > 0) {
if (szFilePath[nLen - 1] != '\\')
_tcscat_s(szFilePath, MAX_PATH, _T("\\"));
}
}
_tcscat_s(szFilePath, MAX_PATH, lib);
pws_os::Trace(_T("Loading Library: %s\n"), szFilePath);
HMODULE hMod = NULL;
switch ((loadLibraryTypes)type) {
// We load resource files (e.g language translation resource files) as
// data files to avoid any problem with 32/64 bit DLLs. This allows
// the use of 32-bit language DLLs in 64-bit builds and vice versa.
case loadLibraryTypes::RESOURCE:
hMod = ::LoadLibraryEx(szFilePath, NULL, LOAD_LIBRARY_AS_DATAFILE);
break;
// All other DLLs are loaded for execution
default:
hMod = ::LoadLibrary(szFilePath);
break;
}
return hMod;
// End of change. (Lockheed Martin) Secure Coding 11-14-2007
}
bool pws_os::FreeLibrary(void *handle)
{
if (handle != NULL)
return ::FreeLibrary(HMODULE(handle)) == TRUE;
else
return false;
}
void *pws_os::GetFunction(void *handle, const char *name)
{
ASSERT(handle != NULL && name != NULL);
return ::GetProcAddress(HMODULE(handle), name);
}
|
#pragma once
#include <eosio/asset.hpp>
#include <eosio/eosio.hpp>
#include <string>
namespace eosiosystem {
class system_contract;
}
namespace eosio {
using std::string;
CONTRACT Token : public contract {
using contract::contract;
public:
struct transfer_args {
name from;
name to;
asset quantity;
string memo;
};
[[eosio::action]] void create(name issuer, asset maximum_supply);
[[eosio::action]] void issue(name to, asset quantity, string memo);
[[eosio::action]] void retire(asset quantity, string memo);
[[eosio::action]] void transfer(name from, name to, asset quantity, string memo);
[[eosio::action]] void open(name owner, symbol_code symbol, name ram_payer);
[[eosio::action]] void close(name owner, symbol_code symbol);
static asset get_supply(name token_contract_account, symbol_code sym) {
stats statstable(token_contract_account, sym.raw());
const auto& st = statstable.get(sym.raw());
return st.supply;
}
static asset get_balance(name token_contract_account, name owner, symbol_code sym) {
accounts accountstable(token_contract_account, owner.value);
const auto& ac = accountstable.get(sym.raw());
return ac.balance;
}
private:
TABLE account {
asset balance;
uint64_t primary_key() const { return balance.symbol.code().raw(); }
};
TABLE currency_stats {
asset supply;
asset max_supply;
name issuer;
uint64_t primary_key() const { return supply.symbol.code().raw(); }
};
typedef eosio::multi_index<"accounts"_n, account> accounts;
typedef eosio::multi_index<"stat"_n, currency_stats> stats;
void sub_balance(name owner, asset value);
void add_balance(name owner, asset value, name ram_payer);
};
} /// namespace eosio
|
#include "openmp_impl/v_wrapper.hpp"
using namespace schro_omp;
bool test_v_wrapper()
{
int n = 3;
std::vector<arma::mat> u(n, arma::ones(2,2));
std::vector<arma::mat> v(n, 2*arma::ones(2,2));
v_wrapper<double> U;
U.values.push_back(std::move(u));
U.values.push_back(std::move(v));
v_wrapper<double> W = 0.5*U;
U += 2.0 * (3.0*U - W);
bool success = true;
if ((U[0][0][0] != 6.0) or (U[1][0][0] != 12.0)) {
std::cout << "v_wrapper failed template expression computations\n";
success = false;
}
if ((U.size() != 2) or (U[0].size() != n) or (U[1].size() != n)) {
std::cout << "v_wrapper failed to preserve structure of original vectors\n";
success = false;
}
return success;
}
|
/***************************************************************************
*
* $Id: StSigmaMinus1385.hh,v 1.1 2010/01/28 21:54:20 jwebb Exp $
*
* Author: Thomas Ullrich, May 99 (based on Geant4 code, see below)
***************************************************************************
*
* The design of the StParticleDefinition class and all concrete
* classes derived from it is largely based on the design of the
* G4ParticleDefinition class from Geant4 (RD44).
* Although the code is in large parts different (modified or rewritten)
* and adapted to the STAR framework the basic idea stays the same.
*
***************************************************************************
*
* $Log: StSigmaMinus1385.hh,v $
* Revision 1.1 2010/01/28 21:54:20 jwebb
* Added the Sigma(1385) baryons.
*
* Revision 1.1 1999/05/14 18:50:13 ullrich
* Initial Revision
*
**************************************************************************/
#ifndef StSigmaMinus1385_hh
#define StSigmaMinus1385_hh
#include "StBaryon.hh"
class StSigmaMinus1385 : public StBaryon {
public:
static StSigmaMinus1385* instance() {return &mSigmaMinus1385;}
static StSigmaMinus1385* SigmaMinus1385() {return &mSigmaMinus1385;}
private:
static StSigmaMinus1385 mSigmaMinus1385;
StSigmaMinus1385(const string & aName,
double mass,
double width,
double charge,
int iSpin,
int iParity,
int iConjugation,
int iIsospin,
int iIsospinZ,
int gParity,
const string & pType,
int lepton,
int baryon,
int encoding,
bool stable,
double lifetime);
};
#endif
|
// THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF
// ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED
// TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
// PARTICULAR PURPOSE.
//
// Copyright (C) 2008 Microsoft Corporation. All rights reserved.
#include <windows.h>
#pragma comment(linker,"/manifestdependency:\"type='win32' name='Microsoft.Windows.Common-Controls' version='6.0.0.0' processorArchitecture='*' publicKeyToken='6595b64144ccf1df' language='*'\"")
#define ID_BUTTON 100
#define ID_CHECKBOX 200
#define ID_LABEL 300
HINSTANCE g_hInst;
HWND g_hwndApp; // Owner window
HWND g_hwndLabel; // static text window
// Forward declarations
LRESULT CALLBACK WndProc(HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam);
void InitDefaultLF(LOGFONT *lf);
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE, PSTR pszCmdLine, int iCmdShow)
{
g_hInst = hInstance; // Save our hInstance for later
MSG msg;
WCHAR const szWindowName[] = L"ChooseFont Sample";
WCHAR const szWindowClass[] = L"ChooseFontSampleWClass";
WNDCLASS wc = {};
wc.style = CS_HREDRAW | CS_VREDRAW;
wc.lpfnWndProc = WndProc;
wc.hInstance = hInstance;
wc.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wc.hCursor = LoadCursor(NULL, IDC_ARROW);
wc.hbrBackground = (HBRUSH)GetStockObject(WHITE_BRUSH);
wc.lpszClassName = szWindowClass;
RegisterClass(&wc);
g_hwndApp = CreateWindow(szWindowClass, szWindowName,
WS_OVERLAPPEDWINDOW, CW_USEDEFAULT, CW_USEDEFAULT,
490, 120, NULL, NULL, hInstance, NULL);
if (g_hwndApp)
{
ShowWindow(g_hwndApp, iCmdShow);
UpdateWindow(g_hwndApp);
while(GetMessage(&msg, NULL, 0, 0))
{
TranslateMessage(&msg);
DispatchMessage(&msg);
}
}
return (int)msg.wParam;
}
LRESULT CALLBACK WndProc(HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam)
{
static LOGFONT lf = {};
switch(uMsg)
{
case WM_CREATE:
{
// Create "Choose Font" button
CreateWindow(L"button",
L"Choose Font",
BS_PUSHBUTTON | WS_CHILD | WS_VISIBLE,
20, 20,
100, 20,
hwnd, (HMENU)ID_BUTTON,
g_hInst, NULL);
// Create "Show all fonts?" checkbox
CreateWindow(L"button",
L"Show all fonts?",
BS_AUTOCHECKBOX | WS_CHILD | WS_VISIBLE,
20, 45,
120, 20,
hwnd, (HMENU)ID_CHECKBOX,
g_hInst, NULL);
// Create the static label with our sample text
g_hwndLabel = CreateWindow(L"static",
L"Some words.",
SS_CENTER | WS_CHILD | WS_VISIBLE,
150, 10,
300, 40,
hwnd, (HMENU)ID_LABEL,
g_hInst, NULL);
InitDefaultLF(&lf);
break;
}
case WM_COMMAND:
{
if (LOWORD(wParam) == ID_BUTTON)
{
CHOOSEFONT cf = { sizeof(cf) };
cf.hwndOwner = hwnd;
cf.lpLogFont = &lf;
if (BST_CHECKED == IsDlgButtonChecked(hwnd, ID_CHECKBOX))
{
// show all fonts (ignore auto-activation)
cf.Flags |= CF_INACTIVEFONTS;
}
if (ChooseFont(&cf) == TRUE)
{
HFONT hfont = CreateFontIndirect(&lf);
if (hfont)
{
// delete the old font if being used for the control if there is one
HFONT hfontOld = (HFONT)SendMessage(g_hwndLabel, WM_GETFONT, 0, 0);
if (hfontOld)
{
DeleteObject(hfontOld);
}
SendMessage(g_hwndLabel, WM_SETFONT, (WPARAM)hfont, MAKELPARAM(TRUE, 0));
}
}
}
break;
}
case WM_DESTROY:
{
// cleanup font resoruces created above
HFONT hfontOld = (HFONT)SendMessage(g_hwndLabel, WM_GETFONT, 0, 0);
if (hfontOld)
{
DeleteObject(hfontOld);
}
PostQuitMessage(0);
return 0;
}
}
return DefWindowProc(hwnd, uMsg, wParam, lParam);
}
void InitDefaultLF(LOGFONT *plf)
{
HDC hdc = GetDC(NULL);
ZeroMemory(plf, sizeof(*plf));
plf->lfCharSet = (BYTE) GetTextCharset(hdc);
plf->lfOutPrecision = OUT_DEFAULT_PRECIS;
plf->lfClipPrecision = CLIP_DEFAULT_PRECIS;
plf->lfQuality = DEFAULT_QUALITY;
plf->lfPitchAndFamily = DEFAULT_PITCH;
plf->lfWeight = FW_NORMAL;
plf->lfHeight = -MulDiv(10, GetDeviceCaps(hdc, LOGPIXELSY), 2);
ReleaseDC(NULL, hdc);
}
|
#include "targetselectornode.h"
#include <QMetaEnum>
static const int _ =
qRegisterMetaType<QSharedPointer<Command::TargetSelectorNode> >();
const QMap<Command::TargetSelectorNode::Variable, char>
Command::TargetSelectorNode::variableMap =
{ { Variable::A, 'a' },
{ Variable::E, 'e' },
{ Variable::P, 'p' },
{ Variable::R, 'r' },
{ Variable::S, 's' } };
Command::TargetSelectorNode::TargetSelectorNode(int pos)
: Command::ParseNode(pos) {
}
QString Command::TargetSelectorNode::toString() const {
auto ret = QString("TargetSelectorNode(@%1)").arg(variableMap[m_variable]);
if (m_args)
ret += '{' + m_args->toString() + '}';
return ret;
}
void Command::TargetSelectorNode::accept(Command::NodeVisitor *visitor,
Command::NodeVisitor::Order order) {
if (order == NodeVisitor::Order::Preorder)
visitor->visit(this);
if (m_args) {
m_args->accept(visitor, order);
}
if (order == NodeVisitor::Order::Postorder)
visitor->visit(this);
}
QSharedPointer<Command::MultiMapNode> Command::TargetSelectorNode::args() const
{
return m_args;
}
void Command::TargetSelectorNode::setArgs(QSharedPointer<MultiMapNode> args) {
m_args = args;
}
Command::TargetSelectorNode::Variable Command::TargetSelectorNode::variable()
const {
return m_variable;
}
void Command::TargetSelectorNode::setVariable(const Variable &variable) {
m_variable = variable;
}
|
#ifndef MODEL_HPP
#define MODEL_HPP
#include <vescLib/datatypes.h>
class ModelListener;
class Model
{
public:
Model();
void bind(ModelListener *listener)
{
modelListener = listener;
}
void tick();
float GetCurrentBatteryLevel() const
{
return currentBatteryLevel;
}
int GetCurrentSpeed() const
{
return currentSpeed;
}
int GetCurrentLightState() const
{
return currentLightState;
}
protected:
ModelListener *modelListener;
mc_values controllerValues_;
mc_configuration controllerMotorConfig_;
float speedFactor_;
float currentBatteryLevel;
float motorCurrent;
int currentSpeed;
int currentLightState;
float currentDistance;
bool initialized_;
};
#endif // MODEL_HPP
|
////////////////////////////////////////////////////////////////////////////
// File: SiftMatch.cpp
// Author: Changchang Wu
// Description : implementation of SiftMatchGPU
//
//
// Copyright (c) 2007 University of North Carolina at Chapel Hill
// All Rights Reserved
//
// Permission to use, copy, modify and distribute this software and its
// documentation for educational, research and non-profit purposes, without
// fee, and without a written agreement is hereby granted, provided that the
// above copyright notice and the following paragraph appear in all copies.
//
// The University of North Carolina at Chapel Hill make no representations
// about the suitability of this software for any purpose. It is provided
// 'as is' without express or implied warranty.
//
// Please send BUG REPORTS to ccwu@cs.unc.edu
//
////////////////////////////////////////////////////////////////////////////
#include "stdafx.h"
#include <iostream>
#include <iomanip>
#include <vector>
#include <sstream>
#include <algorithm>
using namespace std;
#include <string.h>
#include <cuda_runtime.h>
#include <cutil_inline.h>
#include "GlobalUtil.h"
#include "SiftMatch.h"
#include "CuTexImage.h"
#include "ProgramCU.h"
#include "CUDATimer.h"
SiftMatchGPU::SiftMatchGPU(int max_sift)
{
_num_sift[0] = _num_sift[1] = 0;
_id_sift[0] = _id_sift[1] = 0;
_have_loc[0] = _have_loc[1] = 0;
_max_sift = max_sift <= 0 ? 4096 : ((max_sift + 31) / 32 * 32);
_initialized = 0;
d_rowMatchDistances = NULL;
_timer = new CUDATimer();
}
SiftMatchGPU::~SiftMatchGPU()
{
if (d_rowMatchDistances) cutilSafeCall(cudaFree(d_rowMatchDistances));
if (_timer) delete _timer;
}
void SiftMatchGPU::InitSiftMatch()
{
//if (!CheckCudaDevice(GlobalUtil::_DeviceIndex)) {
// std::cout << "ERROR checking cuda device" << std::endl;
// return;
//}
if (_initialized) return;
_initialized = 1;
cutilSafeCall(cudaMalloc(&d_rowMatchDistances, sizeof(float) * 4096));
}
//void* SiftMatchGPU::operator new (size_t size){
// void * p = malloc(size);
// if (p == 0)
// {
// const std::bad_alloc ba;
// throw ba;
// }
// return p;
//}
//int SiftMatchGPU::CheckCudaDevice(int device)
//{
// return ProgramCU::CheckCudaDevice(device);
//}
void SiftMatchGPU::SetDescriptors(int index, int num, unsigned char* d_descriptors, int id)
{
if (_initialized == 0) return;
if (index > 1) index = 1;
if (index < 0) index = 0;
_have_loc[index] = 0;
//the same feature is already set
if (id != -1 && id == _id_sift[index]) return;
_id_sift[index] = id;
if (num > _max_sift) num = _max_sift;
_num_sift[index] = num;
_texDes[index].setImageData(8 * num, 1, 4, d_descriptors);
}
void SiftMatchGPU::SetDescriptorsFromCPU(int index, int num, const unsigned char* descriptors, int id)
{
if (_initialized == 0) return;
if (index > 1) index = 1;
if (index < 0) index = 0;
_have_loc[index] = 0;
//the same feature is already set
if (id != -1 && id == _id_sift[index]) return;
_id_sift[index] = id;
if (num > _max_sift) num = _max_sift;
_num_sift[index] = num;
_texDes[index].InitTexture(8 * num, 1, 4);
_texDes[index].CopyFromHost((void*)descriptors);
}
void SiftMatchGPU::SetDescriptorsFromCPU(int index, int num, const float* descriptors, int id)
{
if (_initialized == 0) return;
if (index > 1) index = 1;
if (index < 0) index = 0;
if (num > _max_sift) num = _max_sift;
sift_buffer.resize(num * 128 / 4);
unsigned char * pub = (unsigned char*)&sift_buffer[0];
for (int i = 0; i < 128 * num; ++i)
{
pub[i] = int(512 * descriptors[i] + 0.5);
}
SetDescriptorsFromCPU(index, num, pub, id);
}
void SiftMatchGPU::SetFeautreLocation(int index, const float* locations, int gap)
{
if (_num_sift[index] <= 0) return;
_texLoc[index].InitTexture(_num_sift[index], 1, 2);
if (gap == 0)
{
_texLoc[index].CopyFromHost(locations);
}
else
{
sift_buffer.resize(_num_sift[index] * 2);
float* pbuf = (float*)(&sift_buffer[0]);
for (int i = 0; i < _num_sift[index]; ++i)
{
pbuf[i * 2] = *locations++;
pbuf[i * 2 + 1] = *locations++;
locations += gap;
}
_texLoc[index].CopyFromHost(pbuf);
}
_have_loc[index] = 1;
}
void SiftMatchGPU::GetSiftMatch(int max_match, ImagePairMatch& imagePairMatch, uint2 keyPointOffset, float distmax, float ratiomax, int mutual_best_match)
{
if (_initialized == 0 || _num_sift[0] <= 0 || _num_sift[1] <= 0) {
cudaMemset(imagePairMatch.d_numMatches, 0, sizeof(int));
return;
}
if (GlobalUtil::_EnableDetailedTimings) {
_timer->startEvent("MultiplyDescriptor");
}
ProgramCU::MultiplyDescriptor(_texDes, _texDes + 1, &_texDot, (mutual_best_match ? &_texCRT : NULL));
if (GlobalUtil::_EnableDetailedTimings) {
_timer->endEvent();
}
GetBestMatch(max_match, imagePairMatch, distmax, ratiomax, keyPointOffset);//, mutual_best_match);
}
void SiftMatchGPU::GetBestMatch(int max_match, ImagePairMatch& imagePairMatch, float distmax, float ratiomax, uint2 keyPointOffset)//, int mbm)
{
_texMatch[0].InitTexture(_num_sift[0], 1);
if (GlobalUtil::_EnableDetailedTimings) {
_timer->startEvent("GetRowMatch");
}
ProgramCU::GetRowMatch(&_texDot, _texMatch, d_rowMatchDistances, distmax, ratiomax);
if (GlobalUtil::_EnableDetailedTimings) {
_timer->endEvent();
}
//_texMatch[1].InitTexture(_num_sift[1], 1);
if (GlobalUtil::_EnableDetailedTimings) {
_timer->startEvent("GetColMatch");
}
ProgramCU::GetColMatch(&_texCRT, distmax, ratiomax, &_texMatch[0], d_rowMatchDistances, imagePairMatch.d_keyPointIndices, imagePairMatch.d_distances, imagePairMatch.d_numMatches, keyPointOffset);
if (GlobalUtil::_EnableDetailedTimings) {
_timer->endEvent();
}
}
void SiftMatchGPU::EvaluateTimings()
{
if (!GlobalUtil::_EnableDetailedTimings) {
std::cout << "Error timings not enabled" << std::endl;
return;
}
else {
_timer->evaluate(true);
}
}
SiftMatchGPU* CreateNewSiftMatchGPU(int max_sift)
{
return new SiftMatchGPU(max_sift);
}
|
// Copyright (c) 2010 Satoshi Nakamoto
// Copyright (c) 2009-2013 The Bitcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "base58.h"
#include "core.h"
#include "init.h"
#include "keystore.h"
#include "main.h"
#include "net.h"
#include "rpcserver.h"
#include "uint256.h"
#ifdef ENABLE_WALLET
#include "wallet.h"
#endif
#include <stdint.h>
#include <boost/assign/list_of.hpp>
#include "json/json_spirit_utils.h"
#include "json/json_spirit_value.h"
using namespace std;
using namespace boost;
using namespace boost::assign;
using namespace json_spirit;
void ScriptPubKeyToJSON(const CScript& scriptPubKey, Object& out, bool fIncludeHex)
{
txnouttype type;
vector<CTxDestination> addresses;
int nRequired;
out.push_back(Pair("asm", scriptPubKey.ToString()));
if (fIncludeHex)
out.push_back(Pair("hex", HexStr(scriptPubKey.begin(), scriptPubKey.end())));
if (!ExtractDestinations(scriptPubKey, type, addresses, nRequired))
{
out.push_back(Pair("type", GetTxnOutputType(type)));
return;
}
out.push_back(Pair("reqSigs", nRequired));
out.push_back(Pair("type", GetTxnOutputType(type)));
Array a;
BOOST_FOREACH(const CTxDestination& addr, addresses)
a.push_back(CBitcoinAddress(addr).ToString());
out.push_back(Pair("addresses", a));
}
void TxToJSON(const CTransaction& tx, const uint256 hashBlock, Object& entry)
{
entry.push_back(Pair("txid", tx.GetHash().GetHex()));
entry.push_back(Pair("version", tx.nVersion));
entry.push_back(Pair("locktime", (boost::int64_t)tx.nLockTime));
Array vin;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
{
Object in;
if (tx.IsCoinBase())
in.push_back(Pair("coinbase", HexStr(txin.scriptSig.begin(), txin.scriptSig.end())));
else
{
in.push_back(Pair("txid", txin.prevout.hash.GetHex()));
in.push_back(Pair("vout", (boost::int64_t)txin.prevout.n));
Object o;
o.push_back(Pair("asm", txin.scriptSig.ToString()));
o.push_back(Pair("hex", HexStr(txin.scriptSig.begin(), txin.scriptSig.end())));
in.push_back(Pair("scriptSig", o));
}
in.push_back(Pair("sequence", (boost::int64_t)txin.nSequence));
vin.push_back(in);
}
entry.push_back(Pair("vin", vin));
Array vout;
for (unsigned int i = 0; i < tx.vout.size(); i++)
{
const CTxOut& txout = tx.vout[i];
Object out;
out.push_back(Pair("value", ValueFromAmount(txout.nValue)));
out.push_back(Pair("n", (boost::int64_t)i));
Object o;
ScriptPubKeyToJSON(txout.scriptPubKey, o, true);
out.push_back(Pair("scriptPubKey", o));
vout.push_back(out);
}
entry.push_back(Pair("vout", vout));
if (hashBlock != 0)
{
entry.push_back(Pair("blockhash", hashBlock.GetHex()));
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashBlock);
if (mi != mapBlockIndex.end() && (*mi).second)
{
CBlockIndex* pindex = (*mi).second;
if (chainActive.Contains(pindex))
{
entry.push_back(Pair("confirmations", 1 + chainActive.Height() - pindex->nHeight));
entry.push_back(Pair("time", (boost::int64_t)pindex->nTime));
entry.push_back(Pair("blocktime", (boost::int64_t)pindex->nTime));
}
else
entry.push_back(Pair("confirmations", 0));
}
}
}
Value getrawtransaction(const Array& params, bool fHelp)
{
if (fHelp || params.size() < 1 || params.size() > 2)
throw runtime_error(
"getrawtransaction \"txid\" ( verbose )\n"
"\nReturn the raw transaction data.\n"
"\nIf verbose=0, returns a string that is serialized, hex-encoded data for 'txid'.\n"
"If verbose is non-zero, returns an Object with information about 'txid'.\n"
"\nArguments:\n"
"1. \"txid\" (string, required) The transaction id\n"
"2. verbose (numeric, optional, default=0) If 0, return a string, other return a json object\n"
"\nResult (if verbose is not set or set to 0):\n"
"\"data\" (string) The serialized, hex-encoded data for 'txid'\n"
"\nResult (if verbose > 0):\n"
"{\n"
" \"hex\" : \"data\", (string) The serialized, hex-encoded data for 'txid'\n"
" \"txid\" : \"id\", (string) The transaction id (same as provided)\n"
" \"version\" : n, (numeric) The version\n"
" \"locktime\" : ttt, (numeric) The lock time\n"
" \"vin\" : [ (array of json objects)\n"
" {\n"
" \"txid\": \"id\", (string) The transaction id\n"
" \"vout\": n, (numeric) \n"
" \"scriptSig\": { (json object) The script\n"
" \"asm\": \"asm\", (string) asm\n"
" \"hex\": \"hex\" (string) hex\n"
" },\n"
" \"sequence\": n (numeric) The script sequence number\n"
" }\n"
" ,...\n"
" ],\n"
" \"vout\" : [ (array of json objects)\n"
" {\n"
" \"value\" : x.xxx, (numeric) The value in btc\n"
" \"n\" : n, (numeric) index\n"
" \"scriptPubKey\" : { (json object)\n"
" \"asm\" : \"asm\", (string) the asm\n"
" \"hex\" : \"hex\", (string) the hex\n"
" \"reqSigs\" : n, (numeric) The required sigs\n"
" \"type\" : \"pubkeyhash\", (string) The type, eg 'pubkeyhash'\n"
" \"addresses\" : [ (json array of string)\n"
" \"bitcoinaddress\" (string) bitcoin address\n"
" ,...\n"
" ]\n"
" }\n"
" }\n"
" ,...\n"
" ],\n"
" \"blockhash\" : \"hash\", (string) the block hash\n"
" \"confirmations\" : n, (numeric) The confirmations\n"
" \"time\" : ttt, (numeric) The transaction time in seconds since epoch (Jan 1 1970 GMT)\n"
" \"blocktime\" : ttt (numeric) The block time in seconds since epoch (Jan 1 1970 GMT)\n"
"}\n"
"\nExamples:\n"
+ HelpExampleCli("getrawtransaction", "\"mytxid\"")
+ HelpExampleCli("getrawtransaction", "\"mytxid\" 1")
+ HelpExampleRpc("getrawtransaction", "\"mytxid\", 1")
);
uint256 hash = ParseHashV(params[0], "parameter 1");
bool fVerbose = false;
if (params.size() > 1)
fVerbose = (params[1].get_int() != 0);
CTransaction tx;
uint256 hashBlock = 0;
if (!GetTransaction(hash, tx, hashBlock, true))
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "No information available about transaction");
CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
ssTx << tx;
string strHex = HexStr(ssTx.begin(), ssTx.end());
if (!fVerbose)
return strHex;
Object result;
result.push_back(Pair("hex", strHex));
TxToJSON(tx, hashBlock, result);
return result;
}
#ifdef ENABLE_WALLET
Value listunspent(const Array& params, bool fHelp)
{
if (fHelp || params.size() > 3)
throw runtime_error(
"listunspent ( minconf maxconf [\"address\",...] )\n"
"\nReturns array of unspent transaction outputs\n"
"with between minconf and maxconf (inclusive) confirmations.\n"
"Optionally filter to only include txouts paid to specified addresses.\n"
"Results are an array of Objects, each of which has:\n"
"{txid, vout, scriptPubKey, amount, confirmations}\n"
"\nArguments:\n"
"1. minconf (numeric, optional, default=1) The minimum confirmationsi to filter\n"
"2. maxconf (numeric, optional, default=9999999) The maximum confirmations to filter\n"
"3. \"addresses\" (string) A json array of bitcoin addresses to filter\n"
" [\n"
" \"address\" (string) bitcoin address\n"
" ,...\n"
" ]\n"
"\nResult\n"
"[ (array of json object)\n"
" {\n"
" \"txid\" : \"txid\", (string) the transaction id \n"
" \"vout\" : n, (numeric) the vout value\n"
" \"address\" : \"address\", (string) the bitcoin address\n"
" \"account\" : \"account\", (string) The associated account, or \"\" for the default account\n"
" \"scriptPubKey\" : \"key\", (string) the script key\n"
" \"amount\" : x.xxx, (numeric) the transaction amount in btc\n"
" \"confirmations\" : n (numeric) The number of confirmations\n"
" }\n"
" ,...\n"
"]\n"
"\nExamples\n"
+ HelpExampleCli("listunspent", "")
+ HelpExampleCli("listunspent", "6 9999999 \"[\\\"1PGFqEzfmQch1gKD3ra4k18PNj3tTUUSqg\\\",\\\"1LtvqCaApEdUGFkpKMM4MstjcaL4dKg8SP\\\"]\"")
+ HelpExampleRpc("listunspent", "6, 9999999 \"[\\\"1PGFqEzfmQch1gKD3ra4k18PNj3tTUUSqg\\\",\\\"1LtvqCaApEdUGFkpKMM4MstjcaL4dKg8SP\\\"]\"")
);
RPCTypeCheck(params, list_of(int_type)(int_type)(array_type));
int nMinDepth = 1;
if (params.size() > 0)
nMinDepth = params[0].get_int();
int nMaxDepth = 9999999;
if (params.size() > 1)
nMaxDepth = params[1].get_int();
set<CBitcoinAddress> setAddress;
if (params.size() > 2)
{
Array inputs = params[2].get_array();
BOOST_FOREACH(Value& input, inputs)
{
CBitcoinAddress address(input.get_str());
if (!address.IsValid())
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, string("Invalid Bitcoin address: ")+input.get_str());
if (setAddress.count(address))
throw JSONRPCError(RPC_INVALID_PARAMETER, string("Invalid parameter, duplicated address: ")+input.get_str());
setAddress.insert(address);
}
}
Array results;
vector<COutput> vecOutputs;
assert(pwalletMain != NULL);
pwalletMain->AvailableCoins(vecOutputs, false);
BOOST_FOREACH(const COutput& out, vecOutputs)
{
if (out.nDepth < nMinDepth || out.nDepth > nMaxDepth)
continue;
if (setAddress.size())
{
CTxDestination address;
if (!ExtractDestination(out.tx->vout[out.i].scriptPubKey, address))
continue;
if (!setAddress.count(address))
continue;
}
int64_t nValue = out.tx->vout[out.i].nValue;
const CScript& pk = out.tx->vout[out.i].scriptPubKey;
Object entry;
entry.push_back(Pair("txid", out.tx->GetHash().GetHex()));
entry.push_back(Pair("vout", out.i));
CTxDestination address;
if (ExtractDestination(out.tx->vout[out.i].scriptPubKey, address))
{
entry.push_back(Pair("address", CBitcoinAddress(address).ToString()));
if (pwalletMain->mapAddressBook.count(address))
entry.push_back(Pair("account", pwalletMain->mapAddressBook[address].name));
}
entry.push_back(Pair("scriptPubKey", HexStr(pk.begin(), pk.end())));
if (pk.IsPayToScriptHash())
{
CTxDestination address;
if (ExtractDestination(pk, address))
{
const CScriptID& hash = boost::get<const CScriptID&>(address);
CScript redeemScript;
if (pwalletMain->GetCScript(hash, redeemScript))
entry.push_back(Pair("redeemScript", HexStr(redeemScript.begin(), redeemScript.end())));
}
}
entry.push_back(Pair("amount",ValueFromAmount(nValue)));
entry.push_back(Pair("confirmations",out.nDepth));
results.push_back(entry);
}
return results;
}
#endif
Value createrawtransaction(const Array& params, bool fHelp)
{
if (fHelp || params.size() != 2)
throw runtime_error(
"createrawtransaction [{\"txid\":\"id\",\"vout\":n},...] {\"address\":amount,...}\n"
"\nCreate a transaction spending the given inputs and sending to the given addresses.\n"
"Returns hex-encoded raw transaction.\n"
"Note that the transaction's inputs are not signed, and\n"
"it is not stored in the wallet or transmitted to the network.\n"
"\nArguments:\n"
"1. \"transactions\" (string, required) A json array of json objects\n"
" [\n"
" {\n"
" \"txid\":\"id\", (string, required) The transaction id\n"
" \"vout\":n (numeric, required) The output number\n"
" }\n"
" ,...\n"
" ]\n"
"2. \"addresses\" (string, required) a json object with addresses as keys and amounts as values\n"
" {\n"
" \"address\": x.xxx (numeric, required) The key is the bitcoin address, the value is the btc amount\n"
" ,...\n"
" }\n"
"\nResult:\n"
"\"transaction\" (string) hex string of the transaction\n"
"\nExamples\n"
+ HelpExampleCli("createrawtransaction", "\"[{\\\"txid\\\":\\\"myid\\\",\\\"vout\\\":0}]\" \"{\\\"address\\\":0.01}\"")
+ HelpExampleRpc("createrawtransaction", "\"[{\\\"txid\\\":\\\"myid\\\",\\\"vout\\\":0}]\", \"{\\\"address\\\":0.01}\"")
);
RPCTypeCheck(params, list_of(array_type)(obj_type));
Array inputs = params[0].get_array();
Object sendTo = params[1].get_obj();
CTransaction rawTx;
BOOST_FOREACH(const Value& input, inputs)
{
const Object& o = input.get_obj();
uint256 txid = ParseHashO(o, "txid");
const Value& vout_v = find_value(o, "vout");
if (vout_v.type() != int_type)
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, missing vout key");
int nOutput = vout_v.get_int();
if (nOutput < 0)
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, vout must be positive");
CTxIn in(COutPoint(txid, nOutput));
rawTx.vin.push_back(in);
}
set<CBitcoinAddress> setAddress;
BOOST_FOREACH(const Pair& s, sendTo)
{
CBitcoinAddress address(s.name_);
if (!address.IsValid())
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, string("Invalid Bitcoin address: ")+s.name_);
if (setAddress.count(address))
throw JSONRPCError(RPC_INVALID_PARAMETER, string("Invalid parameter, duplicated address: ")+s.name_);
setAddress.insert(address);
CScript scriptPubKey;
scriptPubKey.SetDestination(address.Get());
int64_t nAmount = AmountFromValue(s.value_);
CTxOut out(nAmount, scriptPubKey);
rawTx.vout.push_back(out);
}
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss << rawTx;
return HexStr(ss.begin(), ss.end());
}
Value decoderawtransaction(const Array& params, bool fHelp)
{
if (fHelp || params.size() != 1)
throw runtime_error(
"decoderawtransaction \"hexstring\"\n"
"\nReturn a JSON object representing the serialized, hex-encoded transaction.\n"
"\nArguments:\n"
"1. \"txid\" (string, required) The transaction hex string\n"
"\nResult:\n"
"{\n"
" \"hex\" : \"data\", (string) The serialized, hex-encoded data for 'txid'\n"
" \"txid\" : \"id\", (string) The transaction id (same as provided)\n"
" \"version\" : n, (numeric) The version\n"
" \"locktime\" : ttt, (numeric) The lock time\n"
" \"vin\" : [ (array of json objects)\n"
" {\n"
" \"txid\": \"id\", (string) The transaction id\n"
" \"vout\": n, (numeric) The output number\n"
" \"scriptSig\": { (json object) The script\n"
" \"asm\": \"asm\", (string) asm\n"
" \"hex\": \"hex\" (string) hex\n"
" },\n"
" \"sequence\": n (numeric) The script sequence number\n"
" }\n"
" ,...\n"
" ],\n"
" \"vout\" : [ (array of json objects)\n"
" {\n"
" \"value\" : x.xxx, (numeric) The value in btc\n"
" \"n\" : n, (numeric) index\n"
" \"scriptPubKey\" : { (json object)\n"
" \"asm\" : \"asm\", (string) the asm\n"
" \"hex\" : \"hex\", (string) the hex\n"
" \"reqSigs\" : n, (numeric) The required sigs\n"
" \"type\" : \"pubkeyhash\", (string) The type, eg 'pubkeyhash'\n"
" \"addresses\" : [ (json array of string)\n"
" \"12tvKAXCxZjSmdNbao16dKXC8tRWfcF5oc\" (string) bitcoin address\n"
" ,...\n"
" ]\n"
" }\n"
" }\n"
" ,...\n"
" ],\n"
" \"blockhash\" : \"hash\", (string) the block hash\n"
" \"confirmations\" : n, (numeric) The confirmations\n"
" \"time\" : ttt, (numeric) The transaction time in seconds since epoch (Jan 1 1970 GMT)\n"
" \"blocktime\" : ttt (numeric) The block time in seconds since epoch (Jan 1 1970 GMT)\n"
"}\n"
"\nExamples:\n"
+ HelpExampleCli("decoderawtransaction", "\"hexstring\"")
+ HelpExampleRpc("decoderawtransaction", "\"hexstring\"")
);
vector<unsigned char> txData(ParseHexV(params[0], "argument"));
CDataStream ssData(txData, SER_NETWORK, PROTOCOL_VERSION);
CTransaction tx;
try {
ssData >> tx;
}
catch (std::exception &e) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
}
Object result;
TxToJSON(tx, 0, result);
return result;
}
Value decodescript(const Array& params, bool fHelp)
{
if (fHelp || params.size() != 1)
throw runtime_error(
"decodescript \"hex\"\n"
"\nDecode a hex-encoded script.\n"
"\nArguments:\n"
"1. \"hex\" (string) the hex encoded script\n"
"\nResult:\n"
"{\n"
" \"asm\":\"asm\", (string) Script public key\n"
" \"hex\":\"hex\", (string) hex encoded public key\n"
" \"type\":\"type\", (string) The output type\n"
" \"reqSigs\": n, (numeric) The required signatures\n"
" \"addresses\": [ (json array of string)\n"
" \"address\" (string) bitcoin address\n"
" ,...\n"
" ],\n"
" \"p2sh\",\"address\" (string) script address\n"
"}\n"
"\nExamples:\n"
+ HelpExampleCli("decodescript", "\"hexstring\"")
+ HelpExampleRpc("decodescript", "\"hexstring\"")
);
RPCTypeCheck(params, list_of(str_type));
Object r;
CScript script;
if (params[0].get_str().size() > 0){
vector<unsigned char> scriptData(ParseHexV(params[0], "argument"));
script = CScript(scriptData.begin(), scriptData.end());
} else {
// Empty scripts are valid
}
ScriptPubKeyToJSON(script, r, false);
r.push_back(Pair("p2sh", CBitcoinAddress(script.GetID()).ToString()));
return r;
}
Value signrawtransaction(const Array& params, bool fHelp)
{
if (fHelp || params.size() < 1 || params.size() > 4)
throw runtime_error(
"signrawtransaction \"hexstring\" ( [{\"txid\":\"id\",\"vout\":n,\"scriptPubKey\":\"hex\",\"redeemScript\":\"hex\"},...] [\"privatekey1\",...] sighashtype )\n"
"\nSign inputs for raw transaction (serialized, hex-encoded).\n"
"The second optional argument (may be null) is an array of previous transaction outputs that\n"
"this transaction depends on but may not yet be in the block chain.\n"
"The third optional argument (may be null) is an array of base58-encoded private\n"
"keys that, if given, will be the only keys used to sign the transaction.\n"
#ifdef ENABLE_WALLET
+ HelpRequiringPassphrase() + "\n"
#endif
"\nArguments:\n"
"1. \"hexstring\" (string, required) The transaction hex string\n"
"2. \"prevtxs\" (string, optional) An json array of previous dependent transaction outputs\n"
" [ (json array of json objects)\n"
" {\n"
" \"txid\":\"id\", (string, required) The transaction id\n"
" \"vout\":n, (numeric, required) The output number\n"
" \"scriptPubKey\": \"hex\", (string, required) script key\n"
" \"redeemScript\": \"hex\" (string, required) redeem script\n"
" }\n"
" ,...\n"
" ]\n"
"3. \"privatekeys\" (string, optional) A json array of base58-encoded private keys for signing\n"
" [ (json array of strings)\n"
" \"privatekey\" (string) private key in base58-encoding\n"
" ,...\n"
" ]\n"
"4. \"sighashtype\" (string, optional, default=ALL) The signature has type. Must be one of\n"
" \"ALL\"\n"
" \"NONE\"\n"
" \"SINGLE\"\n"
" \"ALL|ANYONECANPAY\"\n"
" \"NONE|ANYONECANPAY\"\n"
" \"SINGLE|ANYONECANPAY\"\n"
"\nResult:\n"
"{\n"
" \"hex\": \"value\", (string) The raw transaction with signature(s) (hex-encoded string)\n"
" \"complete\": n (numeric) if transaction has a complete set of signature (0 if not)\n"
"}\n"
"\nExamples:\n"
+ HelpExampleCli("signrawtransaction", "\"myhex\"")
+ HelpExampleRpc("signrawtransaction", "\"myhex\"")
);
RPCTypeCheck(params, list_of(str_type)(array_type)(array_type)(str_type), true);
vector<unsigned char> txData(ParseHexV(params[0], "argument 1"));
CDataStream ssData(txData, SER_NETWORK, PROTOCOL_VERSION);
vector<CTransaction> txVariants;
while (!ssData.empty())
{
try {
CTransaction tx;
ssData >> tx;
txVariants.push_back(tx);
}
catch (std::exception &e) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
}
}
if (txVariants.empty())
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Missing transaction");
// mergedTx will end up with all the signatures; it
// starts as a clone of the rawtx:
CTransaction mergedTx(txVariants[0]);
bool fComplete = true;
// Fetch previous transactions (inputs):
CCoinsView viewDummy;
CCoinsViewCache view(viewDummy);
{
LOCK(mempool.cs);
CCoinsViewCache &viewChain = *pcoinsTip;
CCoinsViewMemPool viewMempool(viewChain, mempool);
view.SetBackend(viewMempool); // temporarily switch cache backend to db+mempool view
BOOST_FOREACH(const CTxIn& txin, mergedTx.vin) {
const uint256& prevHash = txin.prevout.hash;
CCoins coins;
view.GetCoins(prevHash, coins); // this is certainly allowed to fail
}
view.SetBackend(viewDummy); // switch back to avoid locking mempool for too long
}
bool fGivenKeys = false;
CBasicKeyStore tempKeystore;
if (params.size() > 2 && params[2].type() != null_type)
{
fGivenKeys = true;
Array keys = params[2].get_array();
BOOST_FOREACH(Value k, keys)
{
CBitcoinSecret vchSecret;
bool fGood = vchSecret.SetString(k.get_str());
if (!fGood)
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid private key");
CKey key = vchSecret.GetKey();
tempKeystore.AddKey(key);
}
}
#ifdef ENABLE_WALLET
else
EnsureWalletIsUnlocked();
#endif
// Add previous txouts given in the RPC call:
if (params.size() > 1 && params[1].type() != null_type)
{
Array prevTxs = params[1].get_array();
BOOST_FOREACH(Value& p, prevTxs)
{
if (p.type() != obj_type)
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "expected object with {\"txid'\",\"vout\",\"scriptPubKey\"}");
Object prevOut = p.get_obj();
RPCTypeCheck(prevOut, map_list_of("txid", str_type)("vout", int_type)("scriptPubKey", str_type));
uint256 txid = ParseHashO(prevOut, "txid");
int nOut = find_value(prevOut, "vout").get_int();
if (nOut < 0)
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "vout must be positive");
vector<unsigned char> pkData(ParseHexO(prevOut, "scriptPubKey"));
CScript scriptPubKey(pkData.begin(), pkData.end());
CCoins coins;
if (view.GetCoins(txid, coins)) {
if (coins.IsAvailable(nOut) && coins.vout[nOut].scriptPubKey != scriptPubKey) {
string err("Previous output scriptPubKey mismatch:\n");
err = err + coins.vout[nOut].scriptPubKey.ToString() + "\nvs:\n"+
scriptPubKey.ToString();
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, err);
}
// what todo if txid is known, but the actual output isn't?
}
if ((unsigned int)nOut >= coins.vout.size())
coins.vout.resize(nOut+1);
coins.vout[nOut].scriptPubKey = scriptPubKey;
coins.vout[nOut].nValue = 0; // we don't know the actual output value
view.SetCoins(txid, coins);
// if redeemScript given and not using the local wallet (private keys
// given), add redeemScript to the tempKeystore so it can be signed:
if (fGivenKeys && scriptPubKey.IsPayToScriptHash())
{
RPCTypeCheck(prevOut, map_list_of("txid", str_type)("vout", int_type)("scriptPubKey", str_type)("redeemScript",str_type));
Value v = find_value(prevOut, "redeemScript");
if (!(v == Value::null))
{
vector<unsigned char> rsData(ParseHexV(v, "redeemScript"));
CScript redeemScript(rsData.begin(), rsData.end());
tempKeystore.AddCScript(redeemScript);
}
}
}
}
#ifdef ENABLE_WALLET
const CKeyStore& keystore = ((fGivenKeys || !pwalletMain) ? tempKeystore : *pwalletMain);
#else
const CKeyStore& keystore = tempKeystore;
#endif
int nHashType = SIGHASH_ALL;
if (params.size() > 3 && params[3].type() != null_type)
{
static map<string, int> mapSigHashValues =
boost::assign::map_list_of
(string("ALL"), int(SIGHASH_ALL))
(string("ALL|ANYONECANPAY"), int(SIGHASH_ALL|SIGHASH_ANYONECANPAY))
(string("NONE"), int(SIGHASH_NONE))
(string("NONE|ANYONECANPAY"), int(SIGHASH_NONE|SIGHASH_ANYONECANPAY))
(string("SINGLE"), int(SIGHASH_SINGLE))
(string("SINGLE|ANYONECANPAY"), int(SIGHASH_SINGLE|SIGHASH_ANYONECANPAY))
;
string strHashType = params[3].get_str();
if (mapSigHashValues.count(strHashType))
nHashType = mapSigHashValues[strHashType];
else
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid sighash param");
}
bool fHashSingle = ((nHashType & ~SIGHASH_ANYONECANPAY) == SIGHASH_SINGLE);
// Sign what we can:
for (unsigned int i = 0; i < mergedTx.vin.size(); i++)
{
CTxIn& txin = mergedTx.vin[i];
CCoins coins;
if (!view.GetCoins(txin.prevout.hash, coins) || !coins.IsAvailable(txin.prevout.n))
{
fComplete = false;
continue;
}
const CScript& prevPubKey = coins.vout[txin.prevout.n].scriptPubKey;
txin.scriptSig.clear();
// Only sign SIGHASH_SINGLE if there's a corresponding output:
if (!fHashSingle || (i < mergedTx.vout.size()))
SignSignature(keystore, prevPubKey, mergedTx, i, nHashType);
// ... and merge in other signatures:
BOOST_FOREACH(const CTransaction& txv, txVariants)
{
txin.scriptSig = CombineSignatures(prevPubKey, mergedTx, i, txin.scriptSig, txv.vin[i].scriptSig);
}
if (!VerifyScript(txin.scriptSig, prevPubKey, mergedTx, i, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_STRICTENC, 0))
fComplete = false;
}
Object result;
CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
ssTx << mergedTx;
result.push_back(Pair("hex", HexStr(ssTx.begin(), ssTx.end())));
result.push_back(Pair("complete", fComplete));
return result;
}
Value sendrawtransaction(const Array& params, bool fHelp)
{
if (fHelp || params.size() < 1 || params.size() > 2)
throw runtime_error(
"sendrawtransaction \"hexstring\" ( allowhighfees )\n"
"\nSubmits raw transaction (serialized, hex-encoded) to local node and network.\n"
"\nAlso see createrawtransaction and signrawtransaction calls.\n"
"\nArguments:\n"
"1. \"hexstring\" (string, required) The hex string of the raw transaction)\n"
"2. allowhighfees (boolean, optional, default=false) Allow high fees\n"
"\nResult:\n"
"\"hex\" (string) The transaction hash in hex\n"
"\nExamples:\n"
"\nCreate a transaction\n"
+ HelpExampleCli("createrawtransaction", "\"[{\\\"txid\\\" : \\\"mytxid\\\",\\\"vout\\\":0}]\" \"{\\\"myaddress\\\":0.01}\"") +
"Sign the transaction, and get back the hex\n"
+ HelpExampleCli("signrawtransaction", "\"myhex\"") +
"\nSend the transaction (signed hex)\n"
+ HelpExampleCli("sendrawtransaction", "\"signedhex\"") +
"\nAs a json rpc call\n"
+ HelpExampleRpc("sendrawtransaction", "\"signedhex\"")
);
// parse hex string from parameter
vector<unsigned char> txData(ParseHexV(params[0], "parameter"));
CDataStream ssData(txData, SER_NETWORK, PROTOCOL_VERSION);
CTransaction tx;
bool fOverrideFees = false;
if (params.size() > 1)
fOverrideFees = params[1].get_bool();
// deserialize binary data stream
try {
ssData >> tx;
}
catch (std::exception &e) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
}
uint256 hashTx = tx.GetHash();
bool fHave = false;
CCoinsViewCache &view = *pcoinsTip;
CCoins existingCoins;
{
fHave = view.GetCoins(hashTx, existingCoins);
if (!fHave) {
// push to local node
CValidationState state;
if (!AcceptToMemoryPool(mempool, state, tx, false, NULL, !fOverrideFees))
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX rejected"); // TODO: report validation state
}
}
if (fHave) {
if (existingCoins.nHeight < 1000000000)
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "transaction already in block chain");
// Not in block, but already in the memory pool; will drop
// through to re-relay it.
} else {
SyncWithWallets(hashTx, tx, NULL);
}
RelayTransaction(tx, hashTx);
return hashTx.GetHex();
}
|
/* Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// gRPC server implementation of
// tensorflow_serving/apis/prediction_service.proto.
//
// It bring up a standard server to serve a single TensorFlow model using
// command line flags, or multiple models via config file.
//
// ModelServer prioritizes easy invocation over flexibility,
// and thus serves a statically configured set of models. New versions of these
// models will be loaded and managed over time using the
// AvailabilityPreservingPolicy at:
// tensorflow_serving/core/availability_preserving_policy.h.
// by AspiredVersionsManager at:
// tensorflow_serving/core/aspired_versions_manager.h
//
// ModelServer has inter-request batching support built-in, by using the
// BatchingSession at:
// tensorflow_serving/batching/batching_session.h
//
// To serve a single model, run with:
// $path_to_binary/tensorflow_model_server \
// --model_base_path=[/tmp/my_model | gs://gcs_address]
// IMPORTANT: Be sure the base path excludes the version directory. For
// example for a model at /tmp/my_model/123, where 123 is the version, the base
// path is /tmp/my_model.
//
// To specify model name (default "default"): --model_name=my_name
// To specify port (default 8500): --port=my_port
// To enable batching (default disabled): --enable_batching
// To override the default batching parameters: --batching_parameters_file
#include <unistd.h>
#include <iostream>
#include <memory>
#include <utility>
#include <vector>
#include "google/protobuf/wrappers.pb.h"
#include "grpc/grpc.h"
#include "grpcpp/security/server_credentials.h"
#include "grpcpp/server.h"
#include "grpcpp/server_builder.h"
#include "grpcpp/server_context.h"
#include "grpcpp/support/status.h"
#include "tensorflow/cc/saved_model/tag_constants.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow_serving/apis/prediction_service.grpc.pb.h"
#include "tensorflow_serving/apis/prediction_service.pb.h"
#include "tensorflow_serving/config/model_server_config.pb.h"
#include "tensorflow_serving/core/availability_preserving_policy.h"
#include "tensorflow_serving/model_servers/grpc_status_util.h"
#include "tensorflow_serving/model_servers/http_server.h"
#include "tensorflow_serving/model_servers/model_platform_types.h"
#include "tensorflow_serving/model_servers/model_service_impl.h"
#include "tensorflow_serving/model_servers/platform_config_util.h"
#include "tensorflow_serving/model_servers/server_core.h"
#include "tensorflow_serving/servables/tensorflow/classification_service.h"
#include "tensorflow_serving/servables/tensorflow/get_model_metadata_impl.h"
#include "tensorflow_serving/servables/tensorflow/multi_inference_helper.h"
#include "tensorflow_serving/servables/tensorflow/predict_impl.h"
#include "tensorflow_serving/servables/tensorflow/regression_service.h"
#include "tensorflow_serving/servables/tensorflow/session_bundle_config.pb.h"
namespace grpc {
class ServerCompletionQueue;
} // namespace grpc
using tensorflow::string;
using tensorflow::serving::AspiredVersionPolicy;
using tensorflow::serving::AspiredVersionsManager;
using tensorflow::serving::AvailabilityPreservingPolicy;
using tensorflow::serving::BatchingParameters;
using tensorflow::serving::EventBus;
using tensorflow::serving::FileSystemStoragePathSourceConfig;
using tensorflow::serving::GetModelMetadataImpl;
using tensorflow::serving::ModelServerConfig;
using tensorflow::serving::ServableState;
using tensorflow::serving::ServerCore;
using tensorflow::serving::SessionBundleConfig;
using tensorflow::serving::TensorflowClassificationServiceImpl;
using tensorflow::serving::TensorflowPredictor;
using tensorflow::serving::TensorflowRegressionServiceImpl;
using tensorflow::serving::UniquePtrWithDeps;
using grpc::InsecureServerCredentials;
using grpc::Server;
using grpc::ServerBuilder;
using grpc::ServerContext;
using tensorflow::serving::ClassificationRequest;
using tensorflow::serving::ClassificationResponse;
using tensorflow::serving::GetModelMetadataRequest;
using tensorflow::serving::GetModelMetadataResponse;
using tensorflow::serving::MultiInferenceRequest;
using tensorflow::serving::MultiInferenceResponse;
using tensorflow::serving::PredictRequest;
using tensorflow::serving::PredictResponse;
using tensorflow::serving::RegressionRequest;
using tensorflow::serving::RegressionResponse;
using tensorflow::serving::PredictionService;
namespace {
tensorflow::Status ParseProtoTextFile(const string& file,
google::protobuf::Message* message) {
std::unique_ptr<tensorflow::ReadOnlyMemoryRegion> file_data;
TF_RETURN_IF_ERROR(
tensorflow::Env::Default()->NewReadOnlyMemoryRegionFromFile(file,
&file_data));
string file_data_str(static_cast<const char*>(file_data->data()),
file_data->length());
if (tensorflow::protobuf::TextFormat::ParseFromString(file_data_str,
message)) {
return tensorflow::Status::OK();
} else {
return tensorflow::errors::InvalidArgument("Invalid protobuf file: '", file,
"'");
}
}
tensorflow::Status LoadCustomModelConfig(
const ::google::protobuf::Any& any,
EventBus<ServableState>* servable_event_bus,
UniquePtrWithDeps<AspiredVersionsManager>* manager) {
LOG(FATAL) // Crash ok
<< "ModelServer does not yet support custom model config.";
}
ModelServerConfig BuildSingleModelConfig(const string& model_name,
const string& model_base_path) {
ModelServerConfig config;
LOG(INFO) << "Building single TensorFlow model file config: "
<< " model_name: " << model_name
<< " model_base_path: " << model_base_path;
tensorflow::serving::ModelConfig* single_model =
config.mutable_model_config_list()->add_config();
single_model->set_name(model_name);
single_model->set_base_path(model_base_path);
single_model->set_model_platform(
tensorflow::serving::kTensorFlowModelPlatform);
return config;
}
template <typename ProtoType>
ProtoType ReadProtoFromFile(const string& file) {
ProtoType proto;
TF_CHECK_OK(ParseProtoTextFile(file, &proto));
return proto;
}
int DeadlineToTimeoutMillis(const gpr_timespec deadline) {
return gpr_time_to_millis(
gpr_time_sub(gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
gpr_now(GPR_CLOCK_MONOTONIC)));
}
class PredictionServiceImpl final : public PredictionService::Service {
public:
explicit PredictionServiceImpl(ServerCore* core, bool use_saved_model)
: core_(core),
predictor_(new TensorflowPredictor(use_saved_model)),
use_saved_model_(use_saved_model) {}
grpc::Status Predict(ServerContext* context, const PredictRequest* request,
PredictResponse* response) override {
tensorflow::RunOptions run_options = tensorflow::RunOptions();
// By default, this is infinite which is the same default as RunOptions.
run_options.set_timeout_in_ms(
DeadlineToTimeoutMillis(context->raw_deadline()));
const grpc::Status status = tensorflow::serving::ToGRPCStatus(
predictor_->Predict(run_options, core_, *request, response));
if (!status.ok()) {
VLOG(1) << "Predict failed: " << status.error_message();
}
return status;
}
grpc::Status GetModelMetadata(ServerContext* context,
const GetModelMetadataRequest* request,
GetModelMetadataResponse* response) override {
if (!use_saved_model_) {
return tensorflow::serving::ToGRPCStatus(
tensorflow::errors::InvalidArgument(
"GetModelMetadata API is only available when use_saved_model is "
"set to true"));
}
const grpc::Status status = tensorflow::serving::ToGRPCStatus(
GetModelMetadataImpl::GetModelMetadata(core_, *request, response));
if (!status.ok()) {
VLOG(1) << "GetModelMetadata failed: " << status.error_message();
}
return status;
}
grpc::Status Classify(ServerContext* context,
const ClassificationRequest* request,
ClassificationResponse* response) override {
tensorflow::RunOptions run_options = tensorflow::RunOptions();
// By default, this is infinite which is the same default as RunOptions.
run_options.set_timeout_in_ms(
DeadlineToTimeoutMillis(context->raw_deadline()));
const grpc::Status status = tensorflow::serving::ToGRPCStatus(
TensorflowClassificationServiceImpl::Classify(run_options, core_,
*request, response));
if (!status.ok()) {
VLOG(1) << "Classify request failed: " << status.error_message();
}
return status;
}
grpc::Status Regress(ServerContext* context, const RegressionRequest* request,
RegressionResponse* response) override {
tensorflow::RunOptions run_options = tensorflow::RunOptions();
// By default, this is infinite which is the same default as RunOptions.
run_options.set_timeout_in_ms(
DeadlineToTimeoutMillis(context->raw_deadline()));
const grpc::Status status = tensorflow::serving::ToGRPCStatus(
TensorflowRegressionServiceImpl::Regress(run_options, core_, *request,
response));
if (!status.ok()) {
VLOG(1) << "Regress request failed: " << status.error_message();
}
return status;
}
grpc::Status MultiInference(ServerContext* context,
const MultiInferenceRequest* request,
MultiInferenceResponse* response) override {
tensorflow::RunOptions run_options = tensorflow::RunOptions();
// By default, this is infinite which is the same default as RunOptions.
run_options.set_timeout_in_ms(
DeadlineToTimeoutMillis(context->raw_deadline()));
const grpc::Status status =
tensorflow::serving::ToGRPCStatus(RunMultiInferenceWithServerCore(
run_options, core_, *request, response));
if (!status.ok()) {
VLOG(1) << "MultiInference request failed: " << status.error_message();
}
return status;
}
private:
ServerCore* core_;
std::unique_ptr<TensorflowPredictor> predictor_;
bool use_saved_model_;
};
// gRPC Channel Arguments to be passed from command line to gRPC ServerBuilder.
struct GrpcChannelArgument {
string key;
string value;
};
// Parses a comma separated list of gRPC channel arguments into list of
// ChannelArgument.
std::vector<GrpcChannelArgument> parseGrpcChannelArgs(
const string& channel_arguments_str) {
const std::vector<string> channel_arguments =
tensorflow::str_util::Split(channel_arguments_str, ",");
std::vector<GrpcChannelArgument> result;
for (const string& channel_argument : channel_arguments) {
const std::vector<string> key_val =
tensorflow::str_util::Split(channel_argument, "=");
result.push_back({key_val[0], key_val[1]});
}
return result;
}
struct HttpServerOptions {
tensorflow::int32 port;
tensorflow::int32 num_threads;
tensorflow::int32 timeout_in_ms;
};
void RunServer(int port, std::unique_ptr<ServerCore> core, bool use_saved_model,
const string& grpc_channel_arguments,
const HttpServerOptions& http_options) {
// "0.0.0.0" is the way to listen on localhost in gRPC.
const string server_address = "0.0.0.0:" + std::to_string(port);
tensorflow::serving::ModelServiceImpl model_service(core.get());
PredictionServiceImpl prediction_service(core.get(), use_saved_model);
ServerBuilder builder;
std::shared_ptr<grpc::ServerCredentials> creds = InsecureServerCredentials();
builder.AddListeningPort(server_address, creds);
builder.RegisterService(&model_service);
builder.RegisterService(&prediction_service);
builder.SetMaxMessageSize(tensorflow::kint32max);
const std::vector<GrpcChannelArgument> channel_arguments =
parseGrpcChannelArgs(grpc_channel_arguments);
for (GrpcChannelArgument channel_argument : channel_arguments) {
// gRPC accept arguments of two types, int and string. We will attempt to
// parse each arg as int and pass it on as such if successful. Otherwise we
// will pass it as a string. gRPC will log arguments that were not accepted.
tensorflow::int32 value;
if (tensorflow::strings::safe_strto32(channel_argument.value, &value)) {
builder.AddChannelArgument(channel_argument.key, value);
} else {
builder.AddChannelArgument(channel_argument.key, channel_argument.value);
}
}
std::unique_ptr<Server> server(builder.BuildAndStart());
LOG(INFO) << "Running ModelServer at " << server_address << " ...";
if (http_options.port != 0) {
if (http_options.port != port) {
const string server_address =
"localhost:" + std::to_string(http_options.port);
auto http_server =
CreateAndStartHttpServer(http_options.port, http_options.num_threads,
http_options.timeout_in_ms, core.get());
if (http_server != nullptr) {
LOG(INFO) << "Exporting HTTP/REST API at:" << server_address << " ...";
http_server->WaitForTermination();
} else {
LOG(ERROR) << "Failed to start HTTP Server at " << server_address;
}
} else {
LOG(ERROR) << "--rest_api_port cannot be same as --port. "
<< "Please use a different port for HTTP/REST API. "
<< "Skipped exporting HTTP/REST API.";
}
}
server->Wait();
}
// Parses an ascii PlatformConfigMap protobuf from 'file'.
tensorflow::serving::PlatformConfigMap ParsePlatformConfigMap(
const string& file) {
tensorflow::serving::PlatformConfigMap platform_config_map;
TF_CHECK_OK(ParseProtoTextFile(file, &platform_config_map));
return platform_config_map;
}
} // namespace
int main(int argc, char** argv) {
tensorflow::int32 port = 8500;
HttpServerOptions http_options;
http_options.port = 0;
http_options.num_threads = 4 * tensorflow::port::NumSchedulableCPUs();
http_options.timeout_in_ms = 30000; // 30 seconds.
bool enable_batching = false;
float per_process_gpu_memory_fraction = 0;
tensorflow::string batching_parameters_file;
tensorflow::string model_name = "default";
tensorflow::int32 file_system_poll_wait_seconds = 1;
bool flush_filesystem_caches = true;
tensorflow::string model_base_path;
const bool use_saved_model = true;
tensorflow::string saved_model_tags = tensorflow::kSavedModelTagServe;
// Tensorflow session parallelism of zero means that both inter and intra op
// thread pools will be auto configured.
tensorflow::int64 tensorflow_session_parallelism = 0;
string platform_config_file = "";
string model_config_file;
string grpc_channel_arguments = "";
bool enable_model_warmup = true;
std::vector<tensorflow::Flag> flag_list = {
tensorflow::Flag("port", &port, "Port to listen on for gRPC API"),
tensorflow::Flag("rest_api_port", &http_options.port,
"Port to listen on for HTTP/REST API. If set to zero "
"HTTP/REST API will not be exported. This port must be "
"different than the one specified in --port."),
tensorflow::Flag("rest_api_num_threads", &http_options.num_threads,
"Number of threads for HTTP/REST API processing. If not "
"set, will be auto set based on number of CPUs."),
tensorflow::Flag("rest_api_timeout_in_ms", &http_options.timeout_in_ms,
"Timeout for HTTP/REST API calls."),
tensorflow::Flag("enable_batching", &enable_batching, "enable batching"),
tensorflow::Flag("batching_parameters_file", &batching_parameters_file,
"If non-empty, read an ascii BatchingParameters "
"protobuf from the supplied file name and use the "
"contained values instead of the defaults."),
tensorflow::Flag("model_config_file", &model_config_file,
"If non-empty, read an ascii ModelServerConfig "
"protobuf from the supplied file name, and serve the "
"models in that file. This config file can be used to "
"specify multiple models to serve and other advanced "
"parameters including non-default version policy. (If "
"used, --model_name, --model_base_path are ignored.)"),
tensorflow::Flag("model_name", &model_name,
"name of model (ignored "
"if --model_config_file flag is set"),
tensorflow::Flag("model_base_path", &model_base_path,
"path to export (ignored if --model_config_file flag "
"is set, otherwise required)"),
tensorflow::Flag("file_system_poll_wait_seconds",
&file_system_poll_wait_seconds,
"interval in seconds between each poll of the file "
"system for new model version"),
tensorflow::Flag("flush_filesystem_caches", &flush_filesystem_caches,
"If true (the default), filesystem caches will be "
"flushed after the initial load of all servables, and "
"after each subsequent individual servable reload (if "
"the number of load threads is 1). This reduces memory "
"consumption of the model server, at the potential cost "
"of cache misses if model files are accessed after "
"servables are loaded."),
tensorflow::Flag("tensorflow_session_parallelism",
&tensorflow_session_parallelism,
"Number of threads to use for running a "
"Tensorflow session. Auto-configured by default."
"Note that this option is ignored if "
"--platform_config_file is non-empty."),
tensorflow::Flag("platform_config_file", &platform_config_file,
"If non-empty, read an ascii PlatformConfigMap protobuf "
"from the supplied file name, and use that platform "
"config instead of the Tensorflow platform. (If used, "
"--enable_batching is ignored.)"),
tensorflow::Flag(
"per_process_gpu_memory_fraction", &per_process_gpu_memory_fraction,
"Fraction that each process occupies of the GPU memory space "
"the value is between 0.0 and 1.0 (with 0.0 as the default) "
"If 1.0, the server will allocate all the memory when the server "
"starts, If 0.0, Tensorflow will automatically select a value."),
tensorflow::Flag("saved_model_tags", &saved_model_tags,
"Comma-separated set of tags corresponding to the meta "
"graph def to load from SavedModel."),
tensorflow::Flag("grpc_channel_arguments", &grpc_channel_arguments,
"A comma separated list of arguments to be passed to "
"the grpc server. (e.g. "
"grpc.max_connection_age_ms=2000)"),
tensorflow::Flag("enable_model_warmup", &enable_model_warmup,
"Enables model warmup, which triggers lazy "
"initializations (such as TF optimizations) at load "
"time, to reduce first request latency.")};
string usage = tensorflow::Flags::Usage(argv[0], flag_list);
const bool parse_result = tensorflow::Flags::Parse(&argc, argv, flag_list);
if (!parse_result || (model_base_path.empty() && model_config_file.empty())) {
std::cout << usage;
return -1;
}
tensorflow::port::InitMain(argv[0], &argc, &argv);
if (argc != 1) {
std::cout << "unknown argument: " << argv[1] << "\n" << usage;
}
// For ServerCore Options, we leave servable_state_monitor_creator unspecified
// so the default servable_state_monitor_creator will be used.
ServerCore::Options options;
// model server config
if (model_config_file.empty()) {
options.model_server_config =
BuildSingleModelConfig(model_name, model_base_path);
} else {
options.model_server_config =
ReadProtoFromFile<ModelServerConfig>(model_config_file);
}
if (platform_config_file.empty()) {
SessionBundleConfig session_bundle_config;
// Batching config
if (enable_batching) {
BatchingParameters* batching_parameters =
session_bundle_config.mutable_batching_parameters();
if (batching_parameters_file.empty()) {
batching_parameters->mutable_thread_pool_name()->set_value(
"model_server_batch_threads");
} else {
*batching_parameters =
ReadProtoFromFile<BatchingParameters>(batching_parameters_file);
}
} else if (!batching_parameters_file.empty()) {
LOG(FATAL) // Crash ok
<< "You supplied --batching_parameters_file without "
"--enable_batching";
}
session_bundle_config.mutable_session_config()
->mutable_gpu_options()
->set_per_process_gpu_memory_fraction(per_process_gpu_memory_fraction);
session_bundle_config.mutable_session_config()
->set_intra_op_parallelism_threads(tensorflow_session_parallelism);
session_bundle_config.mutable_session_config()
->set_inter_op_parallelism_threads(tensorflow_session_parallelism);
const std::vector<string> tags =
tensorflow::str_util::Split(saved_model_tags, ",");
for (const string& tag : tags) {
*session_bundle_config.add_saved_model_tags() = tag;
}
session_bundle_config.set_enable_model_warmup(enable_model_warmup);
options.platform_config_map = CreateTensorFlowPlatformConfigMap(
session_bundle_config, use_saved_model);
} else {
options.platform_config_map = ParsePlatformConfigMap(platform_config_file);
}
options.custom_model_config_loader = &LoadCustomModelConfig;
options.aspired_version_policy =
std::unique_ptr<AspiredVersionPolicy>(new AvailabilityPreservingPolicy);
options.file_system_poll_wait_seconds = file_system_poll_wait_seconds;
options.flush_filesystem_caches = flush_filesystem_caches;
std::unique_ptr<ServerCore> core;
TF_CHECK_OK(ServerCore::Create(std::move(options), &core));
RunServer(port, std::move(core), use_saved_model, grpc_channel_arguments,
http_options);
return 0;
}
|
#include <iostream>
using namespace std;
/*
Program Description: Two numbers is given and you need to Check if they Have Opposite Signs.
Input: Both Numbers
Output: 'Yes' if they Have Opposite Signs 'No' otherwise
Solution Description: In this question I will use signed integers in order to get the sign along with the number and then in the function will compare the signs of the 2 numbers.
*/
int check_sign(signed int a, signed int b)
{
if ((a > 0 && b < 0) || (a < 0 && b > 0))
return 1;
else
return 0;
}
int main()
{
signed int a, b;
cin >> a >> b;
if (check_sign(a, b) == 1)
cout << "YES";
else
cout << "NO";
return 0;
}
|
//===--- SILFunctionType.cpp - Giving SIL types to AST functions ----------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This file defines the native Swift ownership transfer conventions
// and works in concert with the importer to give the correct
// conventions to imported functions and types.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "libsil"
#include "swift/AST/AnyFunctionRef.h"
#include "swift/AST/CanTypeVisitor.h"
#include "swift/AST/Decl.h"
#include "swift/AST/DiagnosticsSIL.h"
#include "swift/AST/ForeignInfo.h"
#include "swift/AST/GenericEnvironment.h"
#include "swift/AST/GenericSignatureBuilder.h"
#include "swift/AST/Module.h"
#include "swift/AST/ModuleLoader.h"
#include "swift/AST/ProtocolConformance.h"
#include "swift/SIL/SILModule.h"
#include "swift/SIL/SILType.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SaveAndRestore.h"
using namespace swift;
using namespace swift::Lowering;
SILType SILFunctionType::substInterfaceType(SILModule &M,
SILType interfaceType,
TypeExpansionContext context) const {
// Apply pattern substitutions first, then invocation substitutions.
if (auto subs = getPatternSubstitutions())
interfaceType = interfaceType.subst(M, subs, context);
if (auto subs = getInvocationSubstitutions())
interfaceType = interfaceType.subst(M, subs, context);
return interfaceType;
}
CanSILFunctionType SILFunctionType::getUnsubstitutedType(SILModule &M) const {
auto mutableThis = const_cast<SILFunctionType*>(this);
// If we have no substitutions, there's nothing to do.
if (!hasPatternSubstitutions() && !hasInvocationSubstitutions())
return CanSILFunctionType(mutableThis);
// Otherwise, substitute the component types.
SmallVector<SILParameterInfo, 4> params;
SmallVector<SILYieldInfo, 4> yields;
SmallVector<SILResultInfo, 4> results;
Optional<SILResultInfo> errorResult;
auto subs = getCombinedSubstitutions();
auto substComponentType = [&](CanType type) {
if (!type->hasTypeParameter()) return type;
return SILType::getPrimitiveObjectType(type)
.subst(M, subs).getASTType();
};
for (auto param : getParameters()) {
params.push_back(param.map(substComponentType));
}
for (auto yield : getYields()) {
yields.push_back(yield.map(substComponentType));
}
for (auto result : getResults()) {
results.push_back(result.map(substComponentType));
}
if (auto error = getOptionalErrorResult()) {
errorResult = error->map(substComponentType);
}
auto signature = isPolymorphic() ? getInvocationGenericSignature()
: CanGenericSignature();
return SILFunctionType::get(signature,
getExtInfo(),
getCoroutineKind(),
getCalleeConvention(),
params, yields, results, errorResult,
SubstitutionMap(),
SubstitutionMap(),
mutableThis->getASTContext(),
getWitnessMethodConformanceOrInvalid());
}
CanType SILParameterInfo::getArgumentType(SILModule &M,
const SILFunctionType *t,
TypeExpansionContext context) const {
// TODO: We should always require a function type.
if (t)
return t
->substInterfaceType(
M, SILType::getPrimitiveAddressType(getInterfaceType()), context)
.getASTType();
return getInterfaceType();
}
CanType SILResultInfo::getReturnValueType(SILModule &M,
const SILFunctionType *t,
TypeExpansionContext context) const {
// TODO: We should always require a function type.
if (t)
return t
->substInterfaceType(
M, SILType::getPrimitiveAddressType(getInterfaceType()), context)
.getASTType();
return getInterfaceType();
}
SILType
SILFunctionType::getDirectFormalResultsType(SILModule &M,
TypeExpansionContext context) {
CanType type;
if (getNumDirectFormalResults() == 0) {
type = getASTContext().TheEmptyTupleType;
} else if (getNumDirectFormalResults() == 1) {
type = getSingleDirectFormalResult().getReturnValueType(M, this, context);
} else {
auto &cache = getMutableFormalResultsCache();
if (cache) {
type = cache;
} else {
SmallVector<TupleTypeElt, 4> elts;
for (auto result : getResults())
if (!result.isFormalIndirect())
elts.push_back(result.getReturnValueType(M, this, context));
type = CanType(TupleType::get(elts, getASTContext()));
cache = type;
}
}
return SILType::getPrimitiveObjectType(type);
}
SILType SILFunctionType::getAllResultsInterfaceType() {
CanType type;
if (getNumResults() == 0) {
type = getASTContext().TheEmptyTupleType;
} else if (getNumResults() == 1) {
type = getResults()[0].getInterfaceType();
} else {
auto &cache = getMutableAllResultsCache();
if (cache) {
type = cache;
} else {
SmallVector<TupleTypeElt, 4> elts;
for (auto result : getResults())
elts.push_back(result.getInterfaceType());
type = CanType(TupleType::get(elts, getASTContext()));
cache = type;
}
}
return SILType::getPrimitiveObjectType(type);
}
SILType SILFunctionType::getAllResultsSubstType(SILModule &M,
TypeExpansionContext context) {
return substInterfaceType(M, getAllResultsInterfaceType(), context);
}
SILType SILFunctionType::getFormalCSemanticResult(SILModule &M) {
assert(getLanguage() == SILFunctionLanguage::C);
assert(getNumResults() <= 1);
return getDirectFormalResultsType(M, TypeExpansionContext::minimal());
}
CanType
SILFunctionType::getSelfInstanceType(SILModule &M,
TypeExpansionContext context) const {
auto selfTy = getSelfParameter().getArgumentType(M, this, context);
// If this is a static method, get the instance type.
if (auto metaTy = dyn_cast<AnyMetatypeType>(selfTy))
return metaTy.getInstanceType();
return selfTy;
}
ClassDecl *
SILFunctionType::getWitnessMethodClass(SILModule &M,
TypeExpansionContext context) const {
// TODO: When witnesses use substituted types, we'd get this from the
// substitution map.
auto selfTy = getSelfInstanceType(M, context);
auto genericSig = getSubstGenericSignature();
if (auto paramTy = dyn_cast<GenericTypeParamType>(selfTy)) {
assert(paramTy->getDepth() == 0 && paramTy->getIndex() == 0);
auto superclass = genericSig->getSuperclassBound(paramTy);
if (superclass)
return superclass->getClassOrBoundGenericClass();
}
return nullptr;
}
IndexSubset *
SILFunctionType::getDifferentiabilityParameterIndices() {
assert(isDifferentiable() && "Must be a differentiable function");
SmallVector<unsigned, 8> paramIndices;
for (auto paramAndIndex : enumerate(getParameters()))
if (paramAndIndex.value().getDifferentiability() !=
SILParameterDifferentiability::NotDifferentiable)
paramIndices.push_back(paramAndIndex.index());
return IndexSubset::get(getASTContext(), getNumParameters(), paramIndices);
}
IndexSubset *SILFunctionType::getDifferentiabilityResultIndices() {
assert(isDifferentiable() && "Must be a differentiable function");
SmallVector<unsigned, 8> resultIndices;
// Check formal results.
for (auto resultAndIndex : enumerate(getResults()))
if (resultAndIndex.value().getDifferentiability() !=
SILResultDifferentiability::NotDifferentiable)
resultIndices.push_back(resultAndIndex.index());
// Check `inout` parameters.
for (auto inoutParamAndIndex : enumerate(getIndirectMutatingParameters()))
if (inoutParamAndIndex.value().getDifferentiability() !=
SILParameterDifferentiability::NotDifferentiable)
resultIndices.push_back(getNumResults() + inoutParamAndIndex.index());
auto numSemanticResults =
getNumResults() + getNumIndirectMutatingParameters();
return IndexSubset::get(getASTContext(), numSemanticResults, resultIndices);
}
CanSILFunctionType
SILFunctionType::getWithDifferentiability(DifferentiabilityKind kind,
IndexSubset *parameterIndices,
IndexSubset *resultIndices) {
assert(kind != DifferentiabilityKind::NonDifferentiable &&
"Differentiability kind must be normal or linear");
SmallVector<SILParameterInfo, 8> newParameters;
for (auto paramAndIndex : enumerate(getParameters())) {
auto ¶m = paramAndIndex.value();
unsigned index = paramAndIndex.index();
newParameters.push_back(param.getWithDifferentiability(
index < parameterIndices->getCapacity() &&
parameterIndices->contains(index)
? SILParameterDifferentiability::DifferentiableOrNotApplicable
: SILParameterDifferentiability::NotDifferentiable));
}
SmallVector<SILResultInfo, 8> newResults;
for (auto resultAndIndex : enumerate(getResults())) {
auto &result = resultAndIndex.value();
unsigned index = resultAndIndex.index();
newResults.push_back(result.getWithDifferentiability(
index < resultIndices->getCapacity() && resultIndices->contains(index)
? SILResultDifferentiability::DifferentiableOrNotApplicable
: SILResultDifferentiability::NotDifferentiable));
}
auto newExtInfo = getExtInfo().withDifferentiabilityKind(kind);
return get(getInvocationGenericSignature(), newExtInfo, getCoroutineKind(),
getCalleeConvention(), newParameters, getYields(), newResults,
getOptionalErrorResult(), getPatternSubstitutions(),
getInvocationSubstitutions(), getASTContext(),
getWitnessMethodConformanceOrInvalid());
}
CanSILFunctionType SILFunctionType::getWithoutDifferentiability() {
if (!isDifferentiable())
return CanSILFunctionType(this);
auto nondiffExtInfo = getExtInfo().withDifferentiabilityKind(
DifferentiabilityKind::NonDifferentiable);
SmallVector<SILParameterInfo, 8> newParams;
for (auto ¶m : getParameters())
newParams.push_back(param.getWithDifferentiability(
SILParameterDifferentiability::DifferentiableOrNotApplicable));
SmallVector<SILResultInfo, 8> newResults;
for (auto &result : getResults())
newResults.push_back(result.getWithDifferentiability(
SILResultDifferentiability::DifferentiableOrNotApplicable));
return SILFunctionType::get(
getInvocationGenericSignature(), nondiffExtInfo, getCoroutineKind(),
getCalleeConvention(), newParams, getYields(), newResults,
getOptionalErrorResult(), getPatternSubstitutions(),
getInvocationSubstitutions(), getASTContext());
}
/// Collects the differentiability parameters of the given original function
/// type in `diffParams`.
static void
getDifferentiabilityParameters(SILFunctionType *originalFnTy,
IndexSubset *parameterIndices,
SmallVectorImpl<SILParameterInfo> &diffParams) {
// Returns true if `index` is a differentiability parameter index.
auto isDiffParamIndex = [&](unsigned index) -> bool {
return index < parameterIndices->getCapacity() &&
parameterIndices->contains(index);
};
// Calculate differentiability parameter infos.
for (auto valueAndIndex : enumerate(originalFnTy->getParameters()))
if (isDiffParamIndex(valueAndIndex.index()))
diffParams.push_back(valueAndIndex.value());
}
/// Collects the semantic results of the given function type in
/// `originalResults`. The semantic results are formal results followed by
/// `inout` parameters, in type order.
static void
getSemanticResults(SILFunctionType *functionType, IndexSubset *parameterIndices,
IndexSubset *&inoutParameterIndices,
SmallVectorImpl<SILResultInfo> &originalResults) {
auto &C = functionType->getASTContext();
SmallVector<unsigned, 4> inoutParamIndices;
// Collect original formal results.
originalResults.append(functionType->getResults().begin(),
functionType->getResults().end());
// Collect original `inout` parameters.
for (auto i : range(functionType->getNumParameters())) {
auto param = functionType->getParameters()[i];
if (!param.isIndirectInOut())
continue;
inoutParamIndices.push_back(i);
originalResults.push_back(
SILResultInfo(param.getInterfaceType(), ResultConvention::Indirect));
}
inoutParameterIndices =
IndexSubset::get(C, parameterIndices->getCapacity(), inoutParamIndices);
}
/// Returns the differential type for the given original function type,
/// parameter indices, and result index.
static CanSILFunctionType getAutoDiffDifferentialType(
SILFunctionType *originalFnTy, IndexSubset *parameterIndices,
IndexSubset *resultIndices, LookupConformanceFn lookupConformance,
TypeConverter &TC) {
// Given the tangent type and the corresponding original parameter's
// convention, returns the tangent parameter's convention.
auto getTangentParameterConvention =
[&](CanType tanType,
ParameterConvention origParamConv) -> ParameterConvention {
tanType =
tanType->getCanonicalType(originalFnTy->getSubstGenericSignature());
AbstractionPattern pattern(originalFnTy->getSubstGenericSignature(),
tanType);
auto &tl =
TC.getTypeLowering(pattern, tanType, TypeExpansionContext::minimal());
// When the tangent type is address only, we must ensure that the tangent
// parameter's convention is indirect.
if (tl.isAddressOnly() && !isIndirectFormalParameter(origParamConv)) {
switch (origParamConv) {
case ParameterConvention::Direct_Guaranteed:
return ParameterConvention::Indirect_In_Guaranteed;
case ParameterConvention::Direct_Owned:
case ParameterConvention::Direct_Unowned:
return ParameterConvention::Indirect_In;
default:
llvm_unreachable("unhandled parameter convention");
}
}
return origParamConv;
};
// Given the tangent type and the corresponding original result's convention,
// returns the tangent result's convention.
auto getTangentResultConvention =
[&](CanType tanType,
ResultConvention origResConv) -> ResultConvention {
tanType =
tanType->getCanonicalType(originalFnTy->getSubstGenericSignature());
AbstractionPattern pattern(originalFnTy->getSubstGenericSignature(),
tanType);
auto &tl =
TC.getTypeLowering(pattern, tanType, TypeExpansionContext::minimal());
// When the tangent type is address only, we must ensure that the tangent
// result's convention is indirect.
if (tl.isAddressOnly() && !isIndirectFormalResult(origResConv)) {
switch (origResConv) {
case ResultConvention::Owned:
return ResultConvention::Indirect;
default:
llvm_unreachable("unhandled result convention");
}
}
return origResConv;
};
auto &ctx = originalFnTy->getASTContext();
SmallVector<GenericTypeParamType *, 4> substGenericParams;
SmallVector<Requirement, 4> substRequirements;
SmallVector<Type, 4> substReplacements;
SmallVector<ProtocolConformanceRef, 4> substConformances;
IndexSubset *inoutParamIndices;
SmallVector<SILResultInfo, 2> originalResults;
getSemanticResults(originalFnTy, parameterIndices, inoutParamIndices,
originalResults);
SmallVector<SILParameterInfo, 4> diffParams;
getDifferentiabilityParameters(originalFnTy, parameterIndices, diffParams);
SmallVector<SILParameterInfo, 8> differentialParams;
for (auto ¶m : diffParams) {
auto paramTan =
param.getInterfaceType()->getAutoDiffTangentSpace(lookupConformance);
assert(paramTan && "Parameter type does not have a tangent space?");
auto paramTanType = paramTan->getCanonicalType();
auto paramConv = getTangentParameterConvention(paramTanType,
param.getConvention());
if (!paramTanType->hasArchetype() && !paramTanType->hasTypeParameter()) {
differentialParams.push_back(
{paramTan->getCanonicalType(), paramConv});
} else {
auto gpIndex = substGenericParams.size();
auto gpType = CanGenericTypeParamType::get(0, gpIndex, ctx);
substGenericParams.push_back(gpType);
substReplacements.push_back(paramTanType);
differentialParams.push_back({gpType, paramConv});
}
}
SmallVector<SILResultInfo, 1> differentialResults;
if (inoutParamIndices->isEmpty()) {
for (auto resultIndex : resultIndices->getIndices()) {
auto &result = originalResults[resultIndex];
auto resultTan =
result.getInterfaceType()->getAutoDiffTangentSpace(lookupConformance);
assert(resultTan && "Result type does not have a tangent space?");
auto resultTanType = resultTan->getCanonicalType();
auto resultConv =
getTangentResultConvention(resultTanType, result.getConvention());
if (!resultTanType->hasArchetype() &&
!resultTanType->hasTypeParameter()) {
differentialResults.push_back(
{resultTan->getCanonicalType(), resultConv});
} else {
auto gpIndex = substGenericParams.size();
auto gpType = CanGenericTypeParamType::get(0, gpIndex, ctx);
substGenericParams.push_back(gpType);
substReplacements.push_back(resultTanType);
differentialResults.push_back({gpType, resultConv});
}
}
}
SubstitutionMap substitutions;
if (!substGenericParams.empty()) {
auto genericSig =
GenericSignature::get(substGenericParams, substRequirements)
.getCanonicalSignature();
substitutions =
SubstitutionMap::get(genericSig, llvm::makeArrayRef(substReplacements),
llvm::makeArrayRef(substConformances));
}
return SILFunctionType::get(
GenericSignature(), SILFunctionType::ExtInfo(), SILCoroutineKind::None,
ParameterConvention::Direct_Guaranteed, differentialParams, {},
differentialResults, None, substitutions,
/*invocationSubstitutions*/ SubstitutionMap(), ctx);
}
/// Returns the pullback type for the given original function type, parameter
/// indices, and result index.
static CanSILFunctionType getAutoDiffPullbackType(
SILFunctionType *originalFnTy, IndexSubset *parameterIndices,
IndexSubset *resultIndices, LookupConformanceFn lookupConformance,
TypeConverter &TC) {
auto &ctx = originalFnTy->getASTContext();
SmallVector<GenericTypeParamType *, 4> substGenericParams;
SmallVector<Requirement, 4> substRequirements;
SmallVector<Type, 4> substReplacements;
SmallVector<ProtocolConformanceRef, 4> substConformances;
IndexSubset *inoutParamIndices;
SmallVector<SILResultInfo, 2> originalResults;
getSemanticResults(originalFnTy, parameterIndices, inoutParamIndices,
originalResults);
// Given a type, returns its formal SIL parameter info.
auto getTangentParameterConventionForOriginalResult =
[&](CanType tanType,
ResultConvention origResConv) -> ParameterConvention {
tanType =
tanType->getCanonicalType(originalFnTy->getSubstGenericSignature());
AbstractionPattern pattern(originalFnTy->getSubstGenericSignature(),
tanType);
auto &tl =
TC.getTypeLowering(pattern, tanType, TypeExpansionContext::minimal());
ParameterConvention conv;
switch (origResConv) {
case ResultConvention::Owned:
case ResultConvention::Autoreleased:
if (tl.isAddressOnly()) {
conv = ParameterConvention::Indirect_In_Guaranteed;
} else {
conv = tl.isTrivial() ? ParameterConvention::Direct_Unowned
: ParameterConvention::Direct_Guaranteed;
}
break;
case ResultConvention::Unowned:
case ResultConvention::UnownedInnerPointer:
conv = ParameterConvention::Direct_Unowned;
break;
case ResultConvention::Indirect:
conv = ParameterConvention::Indirect_In_Guaranteed;
break;
}
return conv;
};
// Given a type, returns its formal SIL result info.
auto getTangentResultConventionForOriginalParameter =
[&](CanType tanType,
ParameterConvention origParamConv) -> ResultConvention {
tanType =
tanType->getCanonicalType(originalFnTy->getSubstGenericSignature());
AbstractionPattern pattern(originalFnTy->getSubstGenericSignature(),
tanType);
auto &tl =
TC.getTypeLowering(pattern, tanType, TypeExpansionContext::minimal());
ResultConvention conv;
switch (origParamConv) {
case ParameterConvention::Direct_Owned:
case ParameterConvention::Direct_Guaranteed:
case ParameterConvention::Direct_Unowned:
if (tl.isAddressOnly()) {
conv = ResultConvention::Indirect;
} else {
conv = tl.isTrivial() ? ResultConvention::Unowned
: ResultConvention::Owned;
}
break;
case ParameterConvention::Indirect_In:
case ParameterConvention::Indirect_Inout:
case ParameterConvention::Indirect_In_Constant:
case ParameterConvention::Indirect_In_Guaranteed:
case ParameterConvention::Indirect_InoutAliasable:
conv = ResultConvention::Indirect;
break;
}
return conv;
};
// Collect pullback parameters.
SmallVector<SILParameterInfo, 1> pullbackParams;
for (auto resultIndex : resultIndices->getIndices()) {
// Handle formal original result.
if (resultIndex < originalFnTy->getNumResults()) {
auto &origRes = originalResults[resultIndex];
auto resultTan = origRes.getInterfaceType()->getAutoDiffTangentSpace(
lookupConformance);
assert(resultTan && "Result type does not have a tangent space?");
auto resultTanType = resultTan->getCanonicalType();
auto paramTanConvention = getTangentParameterConventionForOriginalResult(
resultTanType, origRes.getConvention());
if (!resultTanType->hasArchetype() &&
!resultTanType->hasTypeParameter()) {
auto resultTanType = resultTan->getCanonicalType();
pullbackParams.push_back({resultTanType, paramTanConvention});
} else {
auto gpIndex = substGenericParams.size();
auto gpType = CanGenericTypeParamType::get(0, gpIndex, ctx);
substGenericParams.push_back(gpType);
substReplacements.push_back(resultTanType);
pullbackParams.push_back({gpType, paramTanConvention});
}
continue;
}
// Handle original `inout` parameter.
auto inoutParamIndex = resultIndex - originalFnTy->getNumResults();
auto inoutParamIt = std::next(
originalFnTy->getIndirectMutatingParameters().begin(), inoutParamIndex);
auto paramIndex =
std::distance(originalFnTy->getParameters().begin(), &*inoutParamIt);
auto inoutParam = originalFnTy->getParameters()[paramIndex];
auto paramTan = inoutParam.getInterfaceType()->getAutoDiffTangentSpace(
lookupConformance);
assert(paramTan && "Parameter type does not have a tangent space?");
// The pullback parameter convention depends on whether the original `inout`
// paramater is a differentiability parameter.
// - If yes, the pullback parameter convention is `@inout`.
// - If no, the pullback parameter convention is `@in_guaranteed`.
bool isWrtInoutParameter = parameterIndices->contains(paramIndex);
auto paramTanConvention = isWrtInoutParameter
? inoutParam.getConvention()
: ParameterConvention::Indirect_In_Guaranteed;
auto paramTanType = paramTan->getCanonicalType();
if (!paramTanType->hasArchetype() && !paramTanType->hasTypeParameter()) {
pullbackParams.push_back(
SILParameterInfo(paramTanType, paramTanConvention));
} else {
auto gpIndex = substGenericParams.size();
auto gpType = CanGenericTypeParamType::get(0, gpIndex, ctx);
substGenericParams.push_back(gpType);
substReplacements.push_back(paramTanType);
pullbackParams.push_back({gpType, paramTanConvention});
}
}
// Collect pullback results.
SmallVector<SILParameterInfo, 4> diffParams;
getDifferentiabilityParameters(originalFnTy, parameterIndices, diffParams);
SmallVector<SILResultInfo, 8> pullbackResults;
for (auto ¶m : diffParams) {
// Skip `inout` parameters, which semantically behave as original results
// and always appear as pullback parameters.
if (param.isIndirectInOut())
continue;
auto paramTan =
param.getInterfaceType()->getAutoDiffTangentSpace(lookupConformance);
assert(paramTan && "Parameter type does not have a tangent space?");
auto paramTanType = paramTan->getCanonicalType();
auto resultTanConvention = getTangentResultConventionForOriginalParameter(
paramTanType, param.getConvention());
if (!paramTanType->hasArchetype() && !paramTanType->hasTypeParameter()) {
pullbackResults.push_back({paramTanType, resultTanConvention});
} else {
auto gpIndex = substGenericParams.size();
auto gpType = CanGenericTypeParamType::get(0, gpIndex, ctx);
substGenericParams.push_back(gpType);
substReplacements.push_back(paramTanType);
pullbackResults.push_back({gpType, resultTanConvention});
}
}
SubstitutionMap substitutions;
if (!substGenericParams.empty()) {
auto genericSig =
GenericSignature::get(substGenericParams, substRequirements)
.getCanonicalSignature();
substitutions =
SubstitutionMap::get(genericSig, llvm::makeArrayRef(substReplacements),
llvm::makeArrayRef(substConformances));
}
return SILFunctionType::get(
GenericSignature(), SILFunctionType::ExtInfo(), SILCoroutineKind::None,
ParameterConvention::Direct_Guaranteed, pullbackParams, {},
pullbackResults, None, substitutions,
/*invocationSubstitutions*/ SubstitutionMap(), ctx);
}
/// Constrains the `original` function type according to differentiability
/// requirements:
/// - All differentiability parameters are constrained to conform to
/// `Differentiable`.
/// - The invocation generic signature is replaced by the
/// `constrainedInvocationGenSig` argument.
static SILFunctionType *getConstrainedAutoDiffOriginalFunctionType(
SILFunctionType *original, IndexSubset *parameterIndices,
LookupConformanceFn lookupConformance,
CanGenericSignature constrainedInvocationGenSig) {
auto originalInvocationGenSig = original->getInvocationGenericSignature();
if (!originalInvocationGenSig) {
assert(!constrainedInvocationGenSig ||
constrainedInvocationGenSig->areAllParamsConcrete() &&
"derivative function cannot have invocation generic signature "
"when original function doesn't");
return original;
}
assert(!original->getPatternSubstitutions() &&
"cannot constrain substituted function type");
if (!constrainedInvocationGenSig)
constrainedInvocationGenSig = originalInvocationGenSig;
if (!constrainedInvocationGenSig)
return original;
constrainedInvocationGenSig =
autodiff::getConstrainedDerivativeGenericSignature(
original, parameterIndices, constrainedInvocationGenSig,
lookupConformance)
.getCanonicalSignature();
SmallVector<SILParameterInfo, 4> newParameters;
newParameters.reserve(original->getNumParameters());
for (auto ¶m : original->getParameters()) {
newParameters.push_back(
param.getWithInterfaceType(param.getInterfaceType()->getCanonicalType(
constrainedInvocationGenSig)));
}
SmallVector<SILResultInfo, 4> newResults;
newResults.reserve(original->getNumResults());
for (auto &result : original->getResults()) {
newResults.push_back(
result.getWithInterfaceType(result.getInterfaceType()->getCanonicalType(
constrainedInvocationGenSig)));
}
return SILFunctionType::get(
constrainedInvocationGenSig->areAllParamsConcrete()
? GenericSignature()
: constrainedInvocationGenSig,
original->getExtInfo(), original->getCoroutineKind(),
original->getCalleeConvention(), newParameters, original->getYields(),
newResults, original->getOptionalErrorResult(),
/*patternSubstitutions*/ SubstitutionMap(),
/*invocationSubstitutions*/ SubstitutionMap(), original->getASTContext(),
original->getWitnessMethodConformanceOrInvalid());
}
CanSILFunctionType SILFunctionType::getAutoDiffDerivativeFunctionType(
IndexSubset *parameterIndices, IndexSubset *resultIndices,
AutoDiffDerivativeFunctionKind kind, TypeConverter &TC,
LookupConformanceFn lookupConformance,
CanGenericSignature derivativeFnInvocationGenSig,
bool isReabstractionThunk) {
assert(parameterIndices);
assert(resultIndices);
auto &ctx = getASTContext();
// Look up result in cache.
SILAutoDiffDerivativeFunctionKey key{this,
parameterIndices,
resultIndices,
kind,
derivativeFnInvocationGenSig,
isReabstractionThunk};
auto insertion =
ctx.SILAutoDiffDerivativeFunctions.try_emplace(key, CanSILFunctionType());
auto &cachedResult = insertion.first->getSecond();
if (!insertion.second)
return cachedResult;
SILFunctionType *constrainedOriginalFnTy =
getConstrainedAutoDiffOriginalFunctionType(this, parameterIndices,
lookupConformance,
derivativeFnInvocationGenSig);
// Compute closure type.
CanSILFunctionType closureType;
switch (kind) {
case AutoDiffDerivativeFunctionKind::JVP:
closureType =
getAutoDiffDifferentialType(constrainedOriginalFnTy, parameterIndices,
resultIndices, lookupConformance, TC);
break;
case AutoDiffDerivativeFunctionKind::VJP:
closureType =
getAutoDiffPullbackType(constrainedOriginalFnTy, parameterIndices,
resultIndices, lookupConformance, TC);
break;
}
// Compute the derivative function parameters.
SmallVector<SILParameterInfo, 4> newParameters;
newParameters.reserve(constrainedOriginalFnTy->getNumParameters());
for (auto ¶m : constrainedOriginalFnTy->getParameters()) {
newParameters.push_back(param);
}
// Reabstraction thunks have a function-typed parameter (the function to
// reabstract) as their last parameter. Reabstraction thunk JVPs/VJPs have a
// `@differentiable` function-typed last parameter instead.
if (isReabstractionThunk) {
assert(!parameterIndices->contains(getNumParameters() - 1) &&
"Function-typed parameter should not be wrt");
auto fnParam = newParameters.back();
auto fnParamType = dyn_cast<SILFunctionType>(fnParam.getInterfaceType());
assert(fnParamType);
auto diffFnType = fnParamType->getWithDifferentiability(
DifferentiabilityKind::Normal, parameterIndices, resultIndices);
newParameters.back() = fnParam.getWithInterfaceType(diffFnType);
}
// Compute the derivative function results.
SmallVector<SILResultInfo, 4> newResults;
newResults.reserve(getNumResults() + 1);
for (auto &result : constrainedOriginalFnTy->getResults()) {
newResults.push_back(result);
}
newResults.push_back({closureType, ResultConvention::Owned});
// Compute the derivative function ExtInfo.
// If original function is `@convention(c)`, the derivative function should
// have `@convention(thin)`. IRGen does not support `@convention(c)` functions
// with multiple results.
auto extInfo = constrainedOriginalFnTy->getExtInfo();
if (getRepresentation() == SILFunctionTypeRepresentation::CFunctionPointer)
extInfo = extInfo.withRepresentation(SILFunctionTypeRepresentation::Thin);
// Put everything together to get the derivative function type. Then, store in
// cache and return.
cachedResult = SILFunctionType::get(
constrainedOriginalFnTy->getSubstGenericSignature(), extInfo,
constrainedOriginalFnTy->getCoroutineKind(),
constrainedOriginalFnTy->getCalleeConvention(), newParameters,
constrainedOriginalFnTy->getYields(), newResults,
constrainedOriginalFnTy->getOptionalErrorResult(),
/*patternSubstitutions*/ SubstitutionMap(),
/*invocationSubstitutions*/ SubstitutionMap(),
constrainedOriginalFnTy->getASTContext(),
constrainedOriginalFnTy->getWitnessMethodConformanceOrInvalid());
return cachedResult;
}
CanSILFunctionType SILFunctionType::getAutoDiffTransposeFunctionType(
IndexSubset *parameterIndices, Lowering::TypeConverter &TC,
LookupConformanceFn lookupConformance,
CanGenericSignature transposeFnGenSig) {
// Get the "constrained" transpose function generic signature.
if (!transposeFnGenSig)
transposeFnGenSig = getSubstGenericSignature();
transposeFnGenSig = autodiff::getConstrainedDerivativeGenericSignature(
this, parameterIndices, transposeFnGenSig,
lookupConformance, /*isLinear*/ true)
.getCanonicalSignature();
// Given a type, returns its formal SIL parameter info.
auto getParameterInfoForOriginalResult =
[&](const SILResultInfo &result) -> SILParameterInfo {
AbstractionPattern pattern(transposeFnGenSig, result.getInterfaceType());
auto &tl = TC.getTypeLowering(pattern, result.getInterfaceType(),
TypeExpansionContext::minimal());
ParameterConvention newConv;
switch (result.getConvention()) {
case ResultConvention::Owned:
case ResultConvention::Autoreleased:
newConv = tl.isTrivial() ? ParameterConvention::Direct_Unowned
: ParameterConvention::Direct_Guaranteed;
break;
case ResultConvention::Unowned:
case ResultConvention::UnownedInnerPointer:
newConv = ParameterConvention::Direct_Unowned;
break;
case ResultConvention::Indirect:
newConv = ParameterConvention::Indirect_In_Guaranteed;
break;
}
return {result.getInterfaceType(), newConv};
};
// Given a type, returns its formal SIL result info.
auto getResultInfoForOriginalParameter =
[&](const SILParameterInfo ¶m) -> SILResultInfo {
AbstractionPattern pattern(transposeFnGenSig, param.getInterfaceType());
auto &tl = TC.getTypeLowering(pattern, param.getInterfaceType(),
TypeExpansionContext::minimal());
ResultConvention newConv;
switch (param.getConvention()) {
case ParameterConvention::Direct_Owned:
case ParameterConvention::Direct_Guaranteed:
case ParameterConvention::Direct_Unowned:
newConv =
tl.isTrivial() ? ResultConvention::Unowned : ResultConvention::Owned;
break;
case ParameterConvention::Indirect_In:
case ParameterConvention::Indirect_Inout:
case ParameterConvention::Indirect_In_Constant:
case ParameterConvention::Indirect_In_Guaranteed:
case ParameterConvention::Indirect_InoutAliasable:
newConv = ResultConvention::Indirect;
break;
}
return {param.getInterfaceType(), newConv};
};
SmallVector<SILParameterInfo, 4> newParameters;
SmallVector<SILResultInfo, 4> newResults;
for (auto pair : llvm::enumerate(getParameters())) {
auto index = pair.index();
auto param = pair.value();
if (parameterIndices->contains(index))
newResults.push_back(getResultInfoForOriginalParameter(param));
else
newParameters.push_back(param);
}
for (auto &res : getResults())
newParameters.push_back(getParameterInfoForOriginalResult(res));
return SILFunctionType::get(
getInvocationGenericSignature(), getExtInfo(), getCoroutineKind(),
getCalleeConvention(), newParameters, getYields(), newResults,
getOptionalErrorResult(), getPatternSubstitutions(),
/*invocationSubstitutions*/ {}, getASTContext());
}
static CanType getKnownType(Optional<CanType> &cacheSlot, ASTContext &C,
StringRef moduleName, StringRef typeName) {
if (!cacheSlot) {
cacheSlot = ([&] {
ModuleDecl *mod = C.getLoadedModule(C.getIdentifier(moduleName));
if (!mod)
return CanType();
// Do a general qualified lookup instead of a direct lookupValue because
// some of the types we want are reexported through overlays and
// lookupValue would only give us types actually declared in the overlays
// themselves.
SmallVector<ValueDecl *, 2> decls;
mod->lookupQualified(mod, DeclNameRef(C.getIdentifier(typeName)),
NL_QualifiedDefault | NL_KnownNonCascadingDependency,
decls);
if (decls.size() != 1)
return CanType();
const auto *typeDecl = dyn_cast<TypeDecl>(decls.front());
if (!typeDecl)
return CanType();
return typeDecl->getDeclaredInterfaceType()->getCanonicalType();
})();
}
CanType t = *cacheSlot;
// It is possible that we won't find a bridging type (e.g. String) when we're
// parsing the stdlib itself.
if (t) {
LLVM_DEBUG(llvm::dbgs() << "Bridging type " << moduleName << '.' << typeName
<< " mapped to ";
if (t)
t->print(llvm::dbgs());
else
llvm::dbgs() << "<null>";
llvm::dbgs() << '\n');
}
return t;
}
#define BRIDGING_KNOWN_TYPE(BridgedModule,BridgedType) \
CanType TypeConverter::get##BridgedType##Type() { \
return getKnownType(BridgedType##Ty, Context, \
#BridgedModule, #BridgedType); \
}
#include "swift/SIL/BridgedTypes.def"
/// Adjust a function type to have a slightly different type.
CanAnyFunctionType
Lowering::adjustFunctionType(CanAnyFunctionType t,
AnyFunctionType::ExtInfo extInfo) {
if (t->getExtInfo() == extInfo)
return t;
return CanAnyFunctionType(t->withExtInfo(extInfo));
}
/// Adjust a function type to have a slightly different type.
CanSILFunctionType
Lowering::adjustFunctionType(CanSILFunctionType type,
SILFunctionType::ExtInfo extInfo,
ParameterConvention callee,
ProtocolConformanceRef witnessMethodConformance) {
if (type->getExtInfo() == extInfo && type->getCalleeConvention() == callee &&
type->getWitnessMethodConformanceOrInvalid() == witnessMethodConformance)
return type;
return SILFunctionType::get(type->getInvocationGenericSignature(),
extInfo, type->getCoroutineKind(), callee,
type->getParameters(), type->getYields(),
type->getResults(),
type->getOptionalErrorResult(),
type->getPatternSubstitutions(),
type->getInvocationSubstitutions(),
type->getASTContext(),
witnessMethodConformance);
}
CanSILFunctionType
SILFunctionType::getWithRepresentation(Representation repr) {
return getWithExtInfo(getExtInfo().withRepresentation(repr));
}
CanSILFunctionType SILFunctionType::getWithExtInfo(ExtInfo newExt) {
auto oldExt = getExtInfo();
if (newExt == oldExt)
return CanSILFunctionType(this);
auto calleeConvention =
(newExt.hasContext()
? (oldExt.hasContext()
? getCalleeConvention()
: Lowering::DefaultThickCalleeConvention)
: ParameterConvention::Direct_Unowned);
return get(getInvocationGenericSignature(), newExt, getCoroutineKind(),
calleeConvention, getParameters(), getYields(), getResults(),
getOptionalErrorResult(), getPatternSubstitutions(),
getInvocationSubstitutions(), getASTContext(),
getWitnessMethodConformanceOrInvalid());
}
namespace {
enum class ConventionsKind : uint8_t {
Default = 0,
DefaultBlock = 1,
ObjCMethod = 2,
CFunctionType = 3,
CFunction = 4,
ObjCSelectorFamily = 5,
Deallocator = 6,
Capture = 7,
CXXMethod = 8,
};
class Conventions {
ConventionsKind kind;
protected:
virtual ~Conventions() = default;
public:
Conventions(ConventionsKind k) : kind(k) {}
ConventionsKind getKind() const { return kind; }
virtual ParameterConvention
getIndirectParameter(unsigned index,
const AbstractionPattern &type,
const TypeLowering &substTL) const = 0;
virtual ParameterConvention
getDirectParameter(unsigned index,
const AbstractionPattern &type,
const TypeLowering &substTL) const = 0;
virtual ParameterConvention getCallee() const = 0;
virtual ResultConvention getResult(const TypeLowering &resultTL) const = 0;
virtual ParameterConvention
getIndirectSelfParameter(const AbstractionPattern &type) const = 0;
virtual ParameterConvention
getDirectSelfParameter(const AbstractionPattern &type) const = 0;
// Helpers that branch based on a value ownership.
ParameterConvention getIndirect(ValueOwnership ownership, bool forSelf,
unsigned index,
const AbstractionPattern &type,
const TypeLowering &substTL) const {
switch (ownership) {
case ValueOwnership::Default:
if (forSelf)
return getIndirectSelfParameter(type);
return getIndirectParameter(index, type, substTL);
case ValueOwnership::InOut:
return ParameterConvention::Indirect_Inout;
case ValueOwnership::Shared:
return ParameterConvention::Indirect_In_Guaranteed;
case ValueOwnership::Owned:
return ParameterConvention::Indirect_In;
}
llvm_unreachable("unhandled ownership");
}
ParameterConvention getDirect(ValueOwnership ownership, bool forSelf,
unsigned index, const AbstractionPattern &type,
const TypeLowering &substTL) const {
switch (ownership) {
case ValueOwnership::Default:
if (forSelf)
return getDirectSelfParameter(type);
return getDirectParameter(index, type, substTL);
case ValueOwnership::InOut:
return ParameterConvention::Indirect_Inout;
case ValueOwnership::Shared:
return ParameterConvention::Direct_Guaranteed;
case ValueOwnership::Owned:
return ParameterConvention::Direct_Owned;
}
llvm_unreachable("unhandled ownership");
}
};
/// A structure for building the substituted generic signature of a lowered type.
///
/// Where the abstraction pattern for a lowered type involves substitutable types, we extract those positions
/// out into generic arguments. This signature only needs to consider the general calling convention,
/// so it can reduce away protocol and base class constraints aside from
/// `AnyObject`. We want similar-shaped generic function types to remain
/// canonically equivalent, like `(T, U) -> ()`, `(T, T) -> ()`,
/// `(U, T) -> ()` or `(T, T.A) -> ()` when given substitutions that produce
/// the same function types, so we also introduce a new generic argument for
/// each position where we see a dependent type, and canonicalize the order in
/// which we see independent generic arguments.
class SubstFunctionTypeCollector {
public:
TypeConverter &TC;
TypeExpansionContext Expansion;
CanGenericSignature GenericSig;
bool Enabled;
SmallVector<GenericTypeParamType *, 4> substGenericParams;
SmallVector<Requirement, 4> substRequirements;
SmallVector<Type, 4> substReplacements;
SmallVector<ProtocolConformanceRef, 4> substConformances;
SubstFunctionTypeCollector(TypeConverter &TC, TypeExpansionContext context,
CanGenericSignature genericSig, bool enabled)
: TC(TC), Expansion(context), GenericSig(genericSig), Enabled(enabled) {
}
SubstFunctionTypeCollector(const SubstFunctionTypeCollector &) = delete;
// Add a substitution for a fresh type variable, with the given replacement
// type and layout constraint.
CanType addSubstitution(LayoutConstraint layout,
CanType substType,
ArchetypeType *upperBound,
ArrayRef<ProtocolConformanceRef> substTypeConformances) {
auto paramIndex = substGenericParams.size();
auto param = CanGenericTypeParamType::get(0, paramIndex, TC.Context);
// Expand the bound type according to the expansion context.
if (Expansion.shouldLookThroughOpaqueTypeArchetypes()
&& substType->hasOpaqueArchetype()) {
substType = substOpaqueTypesWithUnderlyingTypes(substType, Expansion);
}
substGenericParams.push_back(param);
substReplacements.push_back(substType);
LayoutConstraint upperBoundLayout;
Type upperBoundSuperclass;
ArrayRef<ProtocolDecl*> upperBoundConformances;
// If the parameter is in a position with upper bound constraints, such
// as a generic nominal type with type constraints on its arguments, then
// preserve the constraints from that upper bound.
if (upperBound) {
upperBoundSuperclass = upperBound->getSuperclass();
upperBoundConformances = upperBound->getConformsTo();
upperBoundLayout = upperBound->getLayoutConstraint();
}
if (upperBoundSuperclass) {
upperBoundSuperclass = upperBoundSuperclass->mapTypeOutOfContext();
substRequirements.push_back(
Requirement(RequirementKind::Superclass, param, upperBoundSuperclass));
}
// Preserve the layout constraint, if any, on the archetype in the
// generic signature, generalizing away some constraints that
// shouldn't affect ABI substitutability.
if (layout) {
switch (layout->getKind()) {
// Keep these layout constraints as is.
case LayoutConstraintKind::RefCountedObject:
case LayoutConstraintKind::TrivialOfAtMostSize:
break;
case LayoutConstraintKind::UnknownLayout:
case LayoutConstraintKind::Trivial:
// These constraints don't really constrain the ABI, so we can
// eliminate them.
layout = LayoutConstraint();
break;
// Replace these specific constraints with one of the more general
// constraints above.
case LayoutConstraintKind::NativeClass:
case LayoutConstraintKind::Class:
case LayoutConstraintKind::NativeRefCountedObject:
// These can all be generalized to RefCountedObject.
layout = LayoutConstraint::getLayoutConstraint(
LayoutConstraintKind::RefCountedObject);
break;
case LayoutConstraintKind::TrivialOfExactSize:
// Generalize to TrivialOfAtMostSize.
layout = LayoutConstraint::getLayoutConstraint(
LayoutConstraintKind::TrivialOfAtMostSize,
layout->getTrivialSizeInBits(),
layout->getAlignmentInBits(),
TC.Context);
break;
}
if (layout) {
// Pick the more specific of the upper bound layout and the layout
// we chose above.
if (upperBoundLayout) {
layout = layout.merge(upperBoundLayout);
}
substRequirements.push_back(
Requirement(RequirementKind::Layout, param, layout));
}
} else {
(void)0;
}
for (unsigned i : indices(upperBoundConformances)) {
auto proto = upperBoundConformances[i];
auto conformance = substTypeConformances[i];
substRequirements.push_back(Requirement(RequirementKind::Conformance, param,
proto->getDeclaredInterfaceType()));
substConformances.push_back(conformance);
}
return param;
}
/// Given the destructured original abstraction pattern and substituted type for a destructured
/// parameter or result, introduce substituted generic parameters and requirements as needed for
/// the lowered type, and return the substituted type in terms of the substituted generic signature.
CanType getSubstitutedInterfaceType(AbstractionPattern origType,
CanType substType) {
if (!Enabled)
return substType;
// Replace every dependent type we see with a fresh type variable in
// the substituted signature, substituted by the corresponding concrete
// type.
// The entire original context could be a generic parameter.
if (origType.isTypeParameter() ||
origType.isOpaqueFunctionOrOpaqueDerivativeFunction()) {
return addSubstitution(origType.getLayoutConstraint(), substType,
nullptr, {});
}
auto origContextType = origType.getType();
// If the substituted type is a subclass of the abstraction pattern
// type, build substitutions for any type parameters in it. This only
// comes up when lowering override types for vtable entries.
auto areDifferentClasses = [](Type a, Type b) -> bool {
if (auto dynA = a->getAs<DynamicSelfType>()) {
a = dynA->getSelfType();
}
if (auto dynB = b->getAs<DynamicSelfType>()) {
b = dynB->getSelfType();
}
if (auto aClass = a->getClassOrBoundGenericClass()) {
if (auto bClass = b->getClassOrBoundGenericClass()) {
return aClass != bClass;
}
}
return false;
};
bool substituteBindingsInSubstType = false;
if (areDifferentClasses(substType, origContextType)) {
substituteBindingsInSubstType = true;
}
if (auto substMeta = dyn_cast<MetatypeType>(substType)) {
if (auto origMeta = dyn_cast<MetatypeType>(origContextType)) {
if (areDifferentClasses(substMeta->getInstanceType(),
origMeta->getInstanceType())) {
substituteBindingsInSubstType = true;
}
}
}
CanGenericSignature origSig = origType.getGenericSignature();
if (substituteBindingsInSubstType) {
origContextType = substType;
origSig = TC.getCurGenericSignature();
assert((!substType->hasTypeParameter() || origSig) &&
"lowering mismatched interface types in a context without "
"a generic signature");
}
if (!origContextType->hasTypeParameter()
&& !origContextType->hasArchetype()) {
// If the abstraction pattern doesn't have substitutable positions, nor
// should the concrete type.
assert(!substType->hasTypeParameter()
&& !substType->hasArchetype());
return substType;
}
// Extract structural substitutions.
if (origContextType->hasTypeParameter()) {
origContextType = origSig->getGenericEnvironment()
->mapTypeIntoContext(origContextType)
->getCanonicalType(origSig);
}
auto result = origContextType
->substituteBindingsTo(substType,
[&](ArchetypeType *archetype,
CanType binding,
ArchetypeType *upperBound,
ArrayRef<ProtocolConformanceRef> bindingConformances) -> CanType {
// TODO: ArchetypeType::getLayoutConstraint sometimes misses out on
// implied layout constraints. For now AnyObject is the only one we
// care about.
return addSubstitution(archetype->requiresClass()
? LayoutConstraint::getLayoutConstraint(LayoutConstraintKind::Class)
: LayoutConstraint(),
binding,
upperBound,
bindingConformances);
});
assert(result && "substType was not bindable to abstraction pattern type?");
return result;
}
};
/// A visitor for breaking down formal result types into a SILResultInfo
/// and possibly some number of indirect-out SILParameterInfos,
/// matching the abstraction patterns of the original type.
class DestructureResults {
TypeConverter &TC;
const Conventions &Convs;
SmallVectorImpl<SILResultInfo> &Results;
TypeExpansionContext context;
SubstFunctionTypeCollector &Subst;
public:
DestructureResults(TypeExpansionContext context, TypeConverter &TC,
const Conventions &conventions,
SmallVectorImpl<SILResultInfo> &results,
SubstFunctionTypeCollector &subst)
: TC(TC), Convs(conventions), Results(results), context(context),
Subst(subst) {}
void destructure(AbstractionPattern origType, CanType substType) {
// Recur into tuples.
if (origType.isTuple()) {
auto substTupleType = cast<TupleType>(substType);
for (auto eltIndex : indices(substTupleType.getElementTypes())) {
AbstractionPattern origEltType =
origType.getTupleElementType(eltIndex);
CanType substEltType = substTupleType.getElementType(eltIndex);
destructure(origEltType, substEltType);
}
return;
}
auto substInterfaceType = Subst.getSubstitutedInterfaceType(origType,
substType);
auto &substResultTLForConvention = TC.getTypeLowering(
origType, substInterfaceType, TypeExpansionContext::minimal());
auto &substResultTL = TC.getTypeLowering(origType, substInterfaceType,
context);
// Determine the result convention.
ResultConvention convention;
if (isFormallyReturnedIndirectly(origType, substType,
substResultTLForConvention)) {
convention = ResultConvention::Indirect;
} else {
convention = Convs.getResult(substResultTLForConvention);
// Reduce conventions for trivial types to an unowned convention.
if (substResultTL.isTrivial()) {
switch (convention) {
case ResultConvention::Indirect:
case ResultConvention::Unowned:
case ResultConvention::UnownedInnerPointer:
// Leave these as-is.
break;
case ResultConvention::Autoreleased:
case ResultConvention::Owned:
// These aren't distinguishable from unowned for trivial types.
convention = ResultConvention::Unowned;
break;
}
}
}
SILResultInfo result(substResultTL.getLoweredType().getASTType(),
convention);
Results.push_back(result);
}
/// Query whether the original type is returned indirectly for the purpose
/// of reabstraction given complete lowering information about its
/// substitution.
bool isFormallyReturnedIndirectly(AbstractionPattern origType,
CanType substType,
const TypeLowering &substTL) {
// If the substituted type is returned indirectly, so must the
// unsubstituted type.
if ((origType.isTypeParameter()
&& !origType.isConcreteType()
&& !origType.requiresClass())
|| substTL.isAddressOnly()) {
return true;
// Functions are always returned directly.
} else if (origType.isOpaqueFunctionOrOpaqueDerivativeFunction()) {
return false;
// If the substitution didn't change the type, then a negative
// response to the above is determinative as well.
} else if (origType.getType() == substType &&
!origType.getType()->hasTypeParameter()) {
return false;
// Otherwise, query specifically for the original type.
} else {
return SILType::isFormallyReturnedIndirectly(
origType.getType(), TC, origType.getGenericSignature());
}
}
};
static bool isClangTypeMoreIndirectThanSubstType(TypeConverter &TC,
const clang::Type *clangTy,
CanType substTy) {
// A const pointer argument might have been imported as
// UnsafePointer, COpaquePointer, or a CF foreign class.
// (An ObjC class type wouldn't be const-qualified.)
if (clangTy->isPointerType()
&& clangTy->getPointeeType().isConstQualified()) {
// Peek through optionals.
if (auto substObjTy = substTy.getOptionalObjectType())
substTy = substObjTy;
// Void pointers aren't usefully indirectable.
if (clangTy->isVoidPointerType())
return false;
if (auto eltTy = substTy->getAnyPointerElementType())
return isClangTypeMoreIndirectThanSubstType(TC,
clangTy->getPointeeType().getTypePtr(), CanType(eltTy));
if (substTy->getAnyNominal() ==
TC.Context.getOpaquePointerDecl())
// TODO: We could conceivably have an indirect opaque ** imported
// as COpaquePointer. That shouldn't ever happen today, though,
// since we only ever indirect the 'self' parameter of functions
// imported as methods.
return false;
if (clangTy->getPointeeType()->getAs<clang::RecordType>()) {
// CF type as foreign class
if (substTy->getClassOrBoundGenericClass() &&
substTy->getClassOrBoundGenericClass()->getForeignClassKind() ==
ClassDecl::ForeignKind::CFType) {
return false;
}
}
// swift_newtypes are always passed directly
if (auto typedefTy = clangTy->getAs<clang::TypedefType>()) {
if (typedefTy->getDecl()->getAttr<clang::SwiftNewtypeAttr>())
return false;
}
return true;
}
return false;
}
static bool isFormallyPassedIndirectly(TypeConverter &TC,
AbstractionPattern origType,
CanType substType,
const TypeLowering &substTL) {
// If the C type of the argument is a const pointer, but the Swift type
// isn't, treat it as indirect.
if (origType.isClangType()
&& isClangTypeMoreIndirectThanSubstType(TC, origType.getClangType(),
substType)) {
return true;
}
// If the substituted type is passed indirectly, so must the
// unsubstituted type.
if ((origType.isTypeParameter() && !origType.isConcreteType()
&& !origType.requiresClass())
|| substTL.isAddressOnly()) {
return true;
// If the substitution didn't change the type, then a negative
// response to the above is determinative as well.
} else if (origType.getType() == substType &&
!origType.getType()->hasTypeParameter()) {
return false;
// Otherwise, query specifically for the original type.
} else {
return SILType::isFormallyPassedIndirectly(
origType.getType(), TC, origType.getGenericSignature());
}
}
/// A visitor for turning formal input types into SILParameterInfos, matching
/// the abstraction patterns of the original type.
///
/// If the original abstraction pattern is fully opaque, we must pass the
/// function's parameters and results indirectly, as if the original type were
/// the most general function signature (expressed entirely in generic
/// parameters) which can be substituted to equal the given signature.
///
/// See the comment in AbstractionPattern.h for details.
class DestructureInputs {
TypeExpansionContext expansion;
TypeConverter &TC;
const Conventions &Convs;
const ForeignInfo &Foreign;
Optional<llvm::function_ref<void()>> HandleForeignSelf;
SmallVectorImpl<SILParameterInfo> &Inputs;
SubstFunctionTypeCollector &Subst;
unsigned NextOrigParamIndex = 0;
public:
DestructureInputs(TypeExpansionContext expansion, TypeConverter &TC,
const Conventions &conventions, const ForeignInfo &foreign,
SmallVectorImpl<SILParameterInfo> &inputs,
SubstFunctionTypeCollector &subst)
: expansion(expansion), TC(TC), Convs(conventions), Foreign(foreign),
Inputs(inputs), Subst(subst) {}
void destructure(AbstractionPattern origType,
CanAnyFunctionType::CanParamArrayRef params,
AnyFunctionType::ExtInfo extInfo) {
visitTopLevelParams(origType, params, extInfo);
}
private:
/// Query whether the original type is address-only given complete
/// lowering information about its substitution.
bool isFormallyPassedIndirectly(AbstractionPattern origType,
CanType substType,
const TypeLowering &substTL) {
return ::isFormallyPassedIndirectly(TC, origType, substType, substTL);
}
/// This is a special entry point that allows destructure inputs to handle
/// self correctly.
void visitTopLevelParams(AbstractionPattern origType,
CanAnyFunctionType::CanParamArrayRef params,
AnyFunctionType::ExtInfo extInfo) {
unsigned numEltTypes = params.size();
bool hasSelf = (extInfo.hasSelfParam() || Foreign.Self.isImportAsMember());
unsigned numNonSelfParams = (hasSelf ? numEltTypes - 1 : numEltTypes);
auto silRepresentation = extInfo.getSILRepresentation();
// We have to declare this out here so that the lambda scope lasts for
// the duration of the loop below.
auto handleForeignSelf = [&] {
// This is a "self", but it's not a Swift self, we handle it differently.
auto selfParam = params[numNonSelfParams];
visit(selfParam.getValueOwnership(),
/*forSelf=*/false,
origType.getFunctionParamType(numNonSelfParams),
selfParam.getParameterType(), silRepresentation);
};
// If we have a foreign-self, install handleSelf as the handler.
if (Foreign.Self.isInstance()) {
assert(hasSelf && numEltTypes > 0);
// This is safe because function_ref just stores a pointer to the
// existing lambda object.
HandleForeignSelf = handleForeignSelf;
}
// Add any leading foreign parameters.
maybeAddForeignParameters();
// Process all the non-self parameters.
for (unsigned i = 0; i != numNonSelfParams; ++i) {
auto ty = params[i].getParameterType();
auto eltPattern = origType.getFunctionParamType(i);
auto flags = params[i].getParameterFlags();
visit(flags.getValueOwnership(), /*forSelf=*/false, eltPattern, ty,
silRepresentation, flags.isNoDerivative());
}
// Process the self parameter. Note that we implicitly drop self
// if this is a static foreign-self import.
if (hasSelf && !Foreign.Self.isImportAsMember()) {
auto selfParam = params[numNonSelfParams];
auto ty = selfParam.getParameterType();
auto eltPattern = origType.getFunctionParamType(numNonSelfParams);
auto flags = selfParam.getParameterFlags();
visit(flags.getValueOwnership(), /*forSelf=*/true,
eltPattern, ty, silRepresentation);
}
// Clear the foreign-self handler for safety.
HandleForeignSelf.reset();
}
void visit(ValueOwnership ownership, bool forSelf,
AbstractionPattern origType, CanType substType,
SILFunctionTypeRepresentation rep,
bool isNonDifferentiable = false) {
assert(!isa<InOutType>(substType));
// Tuples get handled specially, in some cases:
CanTupleType substTupleTy = dyn_cast<TupleType>(substType);
if (substTupleTy && !origType.isTypeParameter()) {
assert(origType.getNumTupleElements() == substTupleTy->getNumElements());
switch (ownership) {
case ValueOwnership::Default:
case ValueOwnership::Owned:
case ValueOwnership::Shared:
// Expand the tuple.
for (auto i : indices(substTupleTy.getElementTypes())) {
auto &elt = substTupleTy->getElement(i);
auto ownership = elt.getParameterFlags().getValueOwnership();
assert(ownership == ValueOwnership::Default);
assert(!elt.isVararg());
visit(ownership, forSelf,
origType.getTupleElementType(i),
CanType(elt.getRawType()), rep);
}
return;
case ValueOwnership::InOut:
// handled below
break;
}
}
unsigned origParamIndex = NextOrigParamIndex++;
auto substInterfaceType =
Subst.getSubstitutedInterfaceType(origType, substType);
auto &substTLConv = TC.getTypeLowering(origType, substInterfaceType,
TypeExpansionContext::minimal());
auto &substTL = TC.getTypeLowering(origType, substInterfaceType, expansion);
ParameterConvention convention;
if (ownership == ValueOwnership::InOut) {
convention = ParameterConvention::Indirect_Inout;
} else if (isFormallyPassedIndirectly(origType, substType, substTLConv)) {
convention = Convs.getIndirect(ownership, forSelf, origParamIndex,
origType, substTLConv);
assert(isIndirectFormalParameter(convention));
} else if (substTL.isTrivial()) {
convention = ParameterConvention::Direct_Unowned;
} else {
convention = Convs.getDirect(ownership, forSelf, origParamIndex, origType,
substTLConv);
assert(!isIndirectFormalParameter(convention));
}
SILParameterInfo param(substTL.getLoweredType().getASTType(), convention);
if (isNonDifferentiable)
param = param.getWithDifferentiability(
SILParameterDifferentiability::NotDifferentiable);
Inputs.push_back(param);
maybeAddForeignParameters();
}
/// Given that we've just reached an argument index for the
/// first time, add any foreign parameters.
void maybeAddForeignParameters() {
while (maybeAddForeignErrorParameter() ||
maybeAddForeignSelfParameter()) {
// Continue to see, just in case there are more parameters to add.
}
}
bool maybeAddForeignErrorParameter() {
if (!Foreign.Error ||
NextOrigParamIndex != Foreign.Error->getErrorParameterIndex())
return false;
auto foreignErrorTy = TC.getLoweredRValueType(
expansion, Foreign.Error->getErrorParameterType());
// Assume the error parameter doesn't have interesting lowering.
Inputs.push_back(SILParameterInfo(foreignErrorTy,
ParameterConvention::Direct_Unowned));
++NextOrigParamIndex;
return true;
}
bool maybeAddForeignSelfParameter() {
if (!Foreign.Self.isInstance() ||
NextOrigParamIndex != Foreign.Self.getSelfIndex())
return false;
(*HandleForeignSelf)();
return true;
}
};
} // end anonymous namespace
static bool isPseudogeneric(SILDeclRef c) {
// FIXME: should this be integrated in with the Sema check that prevents
// illegal use of type arguments in pseudo-generic method bodies?
// The implicitly-generated native initializer thunks for imported
// initializers are never pseudo-generic, because they may need
// to use their type arguments to bridge their value arguments.
if (!c.isForeign &&
(c.kind == SILDeclRef::Kind::Allocator ||
c.kind == SILDeclRef::Kind::Initializer) &&
c.getDecl()->hasClangNode())
return false;
// Otherwise, we have to look at the entity's context.
DeclContext *dc;
if (c.hasDecl()) {
dc = c.getDecl()->getDeclContext();
} else if (auto closure = c.getAbstractClosureExpr()) {
dc = closure->getParent();
} else {
return false;
}
dc = dc->getInnermostTypeContext();
if (!dc) return false;
auto classDecl = dc->getSelfClassDecl();
return (classDecl && classDecl->usesObjCGenericsModel());
}
/// Update the result type given the foreign error convention that we will be
/// using.
static std::pair<AbstractionPattern, CanType> updateResultTypeForForeignError(
ForeignErrorConvention convention, CanGenericSignature genericSig,
AbstractionPattern origResultType, CanType substFormalResultType) {
switch (convention.getKind()) {
// These conventions replace the result type.
case ForeignErrorConvention::ZeroResult:
case ForeignErrorConvention::NonZeroResult:
assert(substFormalResultType->isVoid());
substFormalResultType = convention.getResultType();
origResultType = AbstractionPattern(genericSig, substFormalResultType);
return {origResultType, substFormalResultType};
// These conventions wrap the result type in a level of optionality.
case ForeignErrorConvention::NilResult:
assert(!substFormalResultType->getOptionalObjectType());
substFormalResultType =
OptionalType::get(substFormalResultType)->getCanonicalType();
origResultType =
AbstractionPattern::getOptional(origResultType);
return {origResultType, substFormalResultType};
// These conventions don't require changes to the formal error type.
case ForeignErrorConvention::ZeroPreservedResult:
case ForeignErrorConvention::NonNilError:
return {origResultType, substFormalResultType};
}
llvm_unreachable("unhandled kind");
}
/// Lower any/all capture context parameters.
///
/// *NOTE* Currently default arg generators can not capture anything.
/// If we ever add that ability, it will be a different capture list
/// from the function to which the argument is attached.
static void
lowerCaptureContextParameters(TypeConverter &TC, SILDeclRef function,
CanGenericSignature genericSig,
TypeExpansionContext expansion,
SmallVectorImpl<SILParameterInfo> &inputs) {
// NB: The generic signature may be elided from the lowered function type
// if the function is in a fully-specialized context, but we still need to
// canonicalize references to the generic parameters that may appear in
// non-canonical types in that context. We need the original generic
// signature from the AST for that.
auto origGenericSig = function.getAnyFunctionRef()->getGenericSignature();
auto loweredCaptures = TC.getLoweredLocalCaptures(function);
for (auto capture : loweredCaptures.getCaptures()) {
if (capture.isDynamicSelfMetadata()) {
ParameterConvention convention = ParameterConvention::Direct_Unowned;
auto dynamicSelfInterfaceType =
loweredCaptures.getDynamicSelfType()->mapTypeOutOfContext();
auto selfMetatype = MetatypeType::get(dynamicSelfInterfaceType,
MetatypeRepresentation::Thick);
auto canSelfMetatype = selfMetatype->getCanonicalType(origGenericSig);
SILParameterInfo param(canSelfMetatype, convention);
inputs.push_back(param);
continue;
}
if (capture.isOpaqueValue()) {
OpaqueValueExpr *opaqueValue = capture.getOpaqueValue();
auto canType = opaqueValue->getType()->mapTypeOutOfContext()
->getCanonicalType(origGenericSig);
auto &loweredTL =
TC.getTypeLowering(AbstractionPattern(genericSig, canType),
canType, expansion);
auto loweredTy = loweredTL.getLoweredType();
ParameterConvention convention;
if (loweredTL.isAddressOnly()) {
convention = ParameterConvention::Indirect_In;
} else {
convention = ParameterConvention::Direct_Owned;
}
SILParameterInfo param(loweredTy.getASTType(), convention);
inputs.push_back(param);
continue;
}
auto *VD = capture.getDecl();
auto type = VD->getInterfaceType();
auto canType = type->getCanonicalType(origGenericSig);
auto &loweredTL =
TC.getTypeLowering(AbstractionPattern(genericSig, canType), canType,
expansion);
auto loweredTy = loweredTL.getLoweredType();
switch (TC.getDeclCaptureKind(capture, expansion)) {
case CaptureKind::Constant: {
// Constants are captured by value.
ParameterConvention convention;
assert (!loweredTL.isAddressOnly());
if (loweredTL.isTrivial()) {
convention = ParameterConvention::Direct_Unowned;
} else {
convention = ParameterConvention::Direct_Guaranteed;
}
SILParameterInfo param(loweredTy.getASTType(), convention);
inputs.push_back(param);
break;
}
case CaptureKind::Box: {
// The type in the box is lowered in the minimal context.
auto minimalLoweredTy =
TC.getTypeLowering(AbstractionPattern(genericSig, canType), canType,
TypeExpansionContext::minimal())
.getLoweredType();
// Lvalues are captured as a box that owns the captured value.
auto boxTy = TC.getInterfaceBoxTypeForCapture(
VD, minimalLoweredTy.getASTType(),
/*mutable*/ true);
auto convention = ParameterConvention::Direct_Guaranteed;
auto param = SILParameterInfo(boxTy, convention);
inputs.push_back(param);
break;
}
case CaptureKind::StorageAddress: {
// Non-escaping lvalues are captured as the address of the value.
SILType ty = loweredTy.getAddressType();
auto param =
SILParameterInfo(ty.getASTType(),
ParameterConvention::Indirect_InoutAliasable);
inputs.push_back(param);
break;
}
case CaptureKind::Immutable: {
// 'let' constants that are address-only are captured as the address of
// the value and will be consumed by the closure.
SILType ty = loweredTy.getAddressType();
auto param =
SILParameterInfo(ty.getASTType(),
ParameterConvention::Indirect_In_Guaranteed);
inputs.push_back(param);
break;
}
}
}
}
static AccessorDecl *getAsCoroutineAccessor(Optional<SILDeclRef> constant) {
if (!constant || !constant->hasDecl())
return nullptr;;
auto accessor = dyn_cast<AccessorDecl>(constant->getDecl());
if (!accessor || !accessor->isCoroutine())
return nullptr;
return accessor;
}
static void destructureYieldsForReadAccessor(TypeConverter &TC,
TypeExpansionContext expansion,
AbstractionPattern origType,
CanType valueType,
SmallVectorImpl<SILYieldInfo> &yields,
SubstFunctionTypeCollector &subst) {
// Recursively destructure tuples.
if (origType.isTuple()) {
auto valueTupleType = cast<TupleType>(valueType);
for (auto i : indices(valueTupleType.getElementTypes())) {
auto origEltType = origType.getTupleElementType(i);
auto valueEltType = valueTupleType.getElementType(i);
destructureYieldsForReadAccessor(TC, expansion, origEltType, valueEltType,
yields, subst);
}
return;
}
auto valueInterfaceType =
subst.getSubstitutedInterfaceType(origType, valueType);
auto &tlConv =
TC.getTypeLowering(origType, valueInterfaceType,
TypeExpansionContext::minimal());
auto &tl =
TC.getTypeLowering(origType, valueInterfaceType, expansion);
auto convention = [&] {
if (isFormallyPassedIndirectly(TC, origType, valueInterfaceType, tlConv))
return ParameterConvention::Indirect_In_Guaranteed;
if (tlConv.isTrivial())
return ParameterConvention::Direct_Unowned;
return ParameterConvention::Direct_Guaranteed;
}();
yields.push_back(SILYieldInfo(tl.getLoweredType().getASTType(),
convention));
}
static void destructureYieldsForCoroutine(TypeConverter &TC,
TypeExpansionContext expansion,
Optional<SILDeclRef> origConstant,
Optional<SILDeclRef> constant,
Optional<SubstitutionMap> reqtSubs,
SmallVectorImpl<SILYieldInfo> &yields,
SILCoroutineKind &coroutineKind,
SubstFunctionTypeCollector &subst) {
assert(coroutineKind == SILCoroutineKind::None);
assert(yields.empty());
auto accessor = getAsCoroutineAccessor(constant);
if (!accessor)
return;
auto origAccessor = cast<AccessorDecl>(origConstant->getDecl());
// Coroutine accessors are implicitly yield-once coroutines, despite
// their function type.
coroutineKind = SILCoroutineKind::YieldOnce;
// Coroutine accessors are always native, so fetch the native
// abstraction pattern.
auto origStorage = origAccessor->getStorage();
auto origType = TC.getAbstractionPattern(origStorage, /*nonobjc*/ true)
.getReferenceStorageReferentType();
auto storage = accessor->getStorage();
auto valueType = storage->getValueInterfaceType();
if (reqtSubs) {
valueType = valueType.subst(*reqtSubs);
}
auto canValueType = valueType->getCanonicalType(
accessor->getGenericSignature());
// 'modify' yields an inout of the target type.
if (accessor->getAccessorKind() == AccessorKind::Modify) {
auto valueInterfaceType = subst.getSubstitutedInterfaceType(origType,
canValueType);
auto loweredValueTy =
TC.getLoweredType(origType, valueInterfaceType, expansion);
yields.push_back(SILYieldInfo(loweredValueTy.getASTType(),
ParameterConvention::Indirect_Inout));
return;
}
// 'read' yields a borrowed value of the target type, destructuring
// tuples as necessary.
assert(accessor->getAccessorKind() == AccessorKind::Read);
destructureYieldsForReadAccessor(TC, expansion, origType, canValueType,
yields, subst);
}
/// Create the appropriate SIL function type for the given formal type
/// and conventions.
///
/// The lowering of function types is generally sensitive to the
/// declared abstraction pattern. We want to be able to take
/// advantage of declared type information in order to, say, pass
/// arguments separately and directly; but we also want to be able to
/// call functions from generic code without completely embarrassing
/// performance. Therefore, different abstraction patterns induce
/// different argument-passing conventions, and we must introduce
/// implicit reabstracting conversions where necessary to map one
/// convention to another.
///
/// However, we actually can't reabstract arbitrary thin function
/// values while still leaving them thin, at least without costly
/// page-mapping tricks. Therefore, the representation must remain
/// consistent across all abstraction patterns.
///
/// We could reabstract block functions in theory, but (1) we don't
/// really need to and (2) doing so would be problematic because
/// stuffing something in an Optional currently forces it to be
/// reabstracted to the most general type, which means that we'd
/// expect the wrong abstraction conventions on bridged block function
/// types.
///
/// Therefore, we only honor abstraction patterns on thick or
/// polymorphic functions.
///
/// FIXME: we shouldn't just drop the original abstraction pattern
/// when we can't reabstract. Instead, we should introduce
/// dynamic-indirect argument-passing conventions and map opaque
/// archetypes to that, then respect those conventions in IRGen by
/// using runtime call construction.
///
/// \param conventions - conventions as expressed for the original type
static CanSILFunctionType getSILFunctionType(
TypeConverter &TC, TypeExpansionContext expansionContext, AbstractionPattern origType,
CanAnyFunctionType substFnInterfaceType, AnyFunctionType::ExtInfo extInfo,
const Conventions &conventions, const ForeignInfo &foreignInfo,
Optional<SILDeclRef> origConstant, Optional<SILDeclRef> constant,
Optional<SubstitutionMap> reqtSubs,
ProtocolConformanceRef witnessMethodConformance) {
// Find the generic parameters.
CanGenericSignature genericSig =
substFnInterfaceType.getOptGenericSignature();
Optional<TypeConverter::GenericContextRAII> contextRAII;
if (genericSig) contextRAII.emplace(TC, genericSig);
// Per above, only fully honor opaqueness in the abstraction pattern
// for thick or polymorphic functions. We don't need to worry about
// non-opaque patterns because the type-checker forbids non-thick
// function types from having generic parameters or results.
if (origType.isTypeParameter() &&
substFnInterfaceType->getExtInfo().getSILRepresentation()
!= SILFunctionType::Representation::Thick &&
isa<FunctionType>(substFnInterfaceType)) {
origType = AbstractionPattern(genericSig,
substFnInterfaceType);
}
// Map 'throws' to the appropriate error convention.
Optional<SILResultInfo> errorResult;
assert((!foreignInfo.Error || substFnInterfaceType->getExtInfo().throws()) &&
"foreignError was set but function type does not throw?");
if (substFnInterfaceType->getExtInfo().throws() && !foreignInfo.Error) {
assert(!origType.isForeign() &&
"using native Swift error convention for foreign type!");
SILType exnType = SILType::getExceptionType(TC.Context);
assert(exnType.isObject());
errorResult = SILResultInfo(exnType.getASTType(),
ResultConvention::Owned);
}
// Lower the result type.
AbstractionPattern origResultType = origType.getFunctionResultType();
CanType substFormalResultType = substFnInterfaceType.getResult();
// If we have a foreign error convention, restore the original result type.
if (auto convention = foreignInfo.Error) {
std::tie(origResultType, substFormalResultType) =
updateResultTypeForForeignError(*convention, genericSig, origResultType,
substFormalResultType);
}
bool shouldBuildSubstFunctionType = [&]{
if (!TC.Context.LangOpts.EnableSubstSILFunctionTypesForFunctionValues)
return false;
// We always use substituted function types for coroutines that are
// being lowered in the context of another coroutine, which is to say,
// for class override thunks. This is required to make the yields
// match in abstraction to the base method's yields, which is necessary
// to make the extracted continuation-function signatures match.
if (constant != origConstant && getAsCoroutineAccessor(constant))
return true;
// We don't currently use substituted function types for generic function
// type lowering, though we should for generic methods on classes and
// protocols.
if (genericSig)
return false;
// We only currently use substituted function types for function values,
// which will have standard thin or thick representation. (Per the previous
// comment, it would be useful to do so for generic methods on classes and
// protocols too.)
auto rep = extInfo.getSILRepresentation();
return (rep == SILFunctionTypeRepresentation::Thick ||
rep == SILFunctionTypeRepresentation::Thin);
}();
SubstFunctionTypeCollector subst(TC, expansionContext, genericSig,
shouldBuildSubstFunctionType);
// Destructure the input tuple type.
SmallVector<SILParameterInfo, 8> inputs;
{
DestructureInputs destructurer(expansionContext, TC, conventions,
foreignInfo, inputs, subst);
destructurer.destructure(origType,
substFnInterfaceType.getParams(),
extInfo);
}
// Destructure the coroutine yields.
SILCoroutineKind coroutineKind = SILCoroutineKind::None;
SmallVector<SILYieldInfo, 8> yields;
destructureYieldsForCoroutine(TC, expansionContext, origConstant, constant,
reqtSubs, yields, coroutineKind, subst);
// Destructure the result tuple type.
SmallVector<SILResultInfo, 8> results;
{
DestructureResults destructurer(expansionContext, TC, conventions,
results, subst);
destructurer.destructure(origResultType, substFormalResultType);
}
// Lower the capture context parameters, if any.
if (constant && constant->getAnyFunctionRef()) {
auto expansion = TypeExpansionContext::maximal(
expansionContext.getContext(), expansionContext.isWholeModuleContext());
if (constant->isSerialized())
expansion = TypeExpansionContext::minimal();
lowerCaptureContextParameters(TC, *constant, genericSig, expansion, inputs);
}
auto calleeConvention = ParameterConvention::Direct_Unowned;
if (extInfo.hasContext())
calleeConvention = conventions.getCallee();
bool pseudogeneric = genericSig && constant
? isPseudogeneric(*constant)
: false;
// NOTE: SILFunctionType::ExtInfo doesn't track everything that
// AnyFunctionType::ExtInfo tracks. For example: 'throws' or 'auto-closure'
auto silExtInfo = SILFunctionType::ExtInfo()
.withRepresentation(extInfo.getSILRepresentation())
.withIsPseudogeneric(pseudogeneric)
.withNoEscape(extInfo.isNoEscape())
.withDifferentiabilityKind(extInfo.getDifferentiabilityKind());
// Build the substituted generic signature we extracted.
SubstitutionMap substitutions;
if (subst.Enabled) {
if (!subst.substGenericParams.empty()) {
auto subSig = GenericSignature::get(subst.substGenericParams,
subst.substRequirements)
.getCanonicalSignature();
substitutions = SubstitutionMap::get(subSig,
llvm::makeArrayRef(subst.substReplacements),
llvm::makeArrayRef(subst.substConformances));
}
}
return SILFunctionType::get(genericSig, silExtInfo, coroutineKind,
calleeConvention, inputs, yields,
results, errorResult,
substitutions, SubstitutionMap(),
TC.Context, witnessMethodConformance);
}
//===----------------------------------------------------------------------===//
// Deallocator SILFunctionTypes
//===----------------------------------------------------------------------===//
namespace {
// The convention for general deallocators.
struct DeallocatorConventions : Conventions {
DeallocatorConventions() : Conventions(ConventionsKind::Deallocator) {}
ParameterConvention getIndirectParameter(unsigned index,
const AbstractionPattern &type,
const TypeLowering &substTL) const override {
llvm_unreachable("Deallocators do not have indirect parameters");
}
ParameterConvention getDirectParameter(unsigned index,
const AbstractionPattern &type,
const TypeLowering &substTL) const override {
llvm_unreachable("Deallocators do not have non-self direct parameters");
}
ParameterConvention getCallee() const override {
llvm_unreachable("Deallocators do not have callees");
}
ResultConvention getResult(const TypeLowering &tl) const override {
// TODO: Put an unreachable here?
return ResultConvention::Owned;
}
ParameterConvention
getDirectSelfParameter(const AbstractionPattern &type) const override {
// TODO: Investigate whether or not it is
return ParameterConvention::Direct_Owned;
}
ParameterConvention
getIndirectSelfParameter(const AbstractionPattern &type) const override {
llvm_unreachable("Deallocators do not have indirect self parameters");
}
static bool classof(const Conventions *C) {
return C->getKind() == ConventionsKind::Deallocator;
}
};
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// Default Convention FunctionTypes
//===----------------------------------------------------------------------===//
namespace {
enum class NormalParameterConvention { Owned, Guaranteed };
/// The default Swift conventions.
class DefaultConventions : public Conventions {
NormalParameterConvention normalParameterConvention;
public:
DefaultConventions(NormalParameterConvention normalParameterConvention)
: Conventions(ConventionsKind::Default),
normalParameterConvention(normalParameterConvention) {}
bool isNormalParameterConventionGuaranteed() const {
return normalParameterConvention == NormalParameterConvention::Guaranteed;
}
ParameterConvention getIndirectParameter(unsigned index,
const AbstractionPattern &type,
const TypeLowering &substTL) const override {
if (isNormalParameterConventionGuaranteed()) {
return ParameterConvention::Indirect_In_Guaranteed;
}
return ParameterConvention::Indirect_In;
}
ParameterConvention getDirectParameter(unsigned index,
const AbstractionPattern &type,
const TypeLowering &substTL) const override {
if (isNormalParameterConventionGuaranteed())
return ParameterConvention::Direct_Guaranteed;
return ParameterConvention::Direct_Owned;
}
ParameterConvention getCallee() const override {
return DefaultThickCalleeConvention;
}
ResultConvention getResult(const TypeLowering &tl) const override {
return ResultConvention::Owned;
}
ParameterConvention
getDirectSelfParameter(const AbstractionPattern &type) const override {
return ParameterConvention::Direct_Guaranteed;
}
ParameterConvention
getIndirectSelfParameter(const AbstractionPattern &type) const override {
return ParameterConvention::Indirect_In_Guaranteed;
}
static bool classof(const Conventions *C) {
return C->getKind() == ConventionsKind::Default;
}
};
/// The default conventions for Swift initializing constructors.
///
/// Initializing constructors take all parameters (including) self at +1. This
/// is because:
///
/// 1. We are likely to be initializing fields of self implying that the
/// parameters are likely to be forwarded into memory without further
/// copies.
/// 2. Initializers must take 'self' at +1, since they will return it back
/// at +1, and may chain onto Objective-C initializers that replace the
/// instance.
struct DefaultInitializerConventions : DefaultConventions {
DefaultInitializerConventions()
: DefaultConventions(NormalParameterConvention::Owned) {}
/// Initializers must take 'self' at +1, since they will return it back at +1,
/// and may chain onto Objective-C initializers that replace the instance.
ParameterConvention
getDirectSelfParameter(const AbstractionPattern &type) const override {
return ParameterConvention::Direct_Owned;
}
ParameterConvention
getIndirectSelfParameter(const AbstractionPattern &type) const override {
return ParameterConvention::Indirect_In;
}
};
/// The convention used for allocating inits. Allocating inits take their normal
/// parameters at +1 and do not have a self parameter.
struct DefaultAllocatorConventions : DefaultConventions {
DefaultAllocatorConventions()
: DefaultConventions(NormalParameterConvention::Owned) {}
ParameterConvention
getDirectSelfParameter(const AbstractionPattern &type) const override {
llvm_unreachable("Allocating inits do not have self parameters");
}
ParameterConvention
getIndirectSelfParameter(const AbstractionPattern &type) const override {
llvm_unreachable("Allocating inits do not have self parameters");
}
};
/// The default conventions for Swift setter acccessors.
///
/// These take self at +0, but all other parameters at +1. This is because we
/// assume that setter parameters are likely to be values to be forwarded into
/// memory. Thus by passing in the +1 value, we avoid a potential copy in that
/// case.
struct DefaultSetterConventions : DefaultConventions {
DefaultSetterConventions()
: DefaultConventions(NormalParameterConvention::Owned) {}
};
/// The default conventions for ObjC blocks.
struct DefaultBlockConventions : Conventions {
DefaultBlockConventions() : Conventions(ConventionsKind::DefaultBlock) {}
ParameterConvention getIndirectParameter(unsigned index,
const AbstractionPattern &type,
const TypeLowering &substTL) const override {
llvm_unreachable("indirect block parameters unsupported");
}
ParameterConvention getDirectParameter(unsigned index,
const AbstractionPattern &type,
const TypeLowering &substTL) const override {
return ParameterConvention::Direct_Unowned;
}
ParameterConvention getCallee() const override {
return ParameterConvention::Direct_Unowned;
}
ResultConvention getResult(const TypeLowering &substTL) const override {
return ResultConvention::Autoreleased;
}
ParameterConvention
getDirectSelfParameter(const AbstractionPattern &type) const override {
llvm_unreachable("objc blocks do not have a self parameter");
}
ParameterConvention
getIndirectSelfParameter(const AbstractionPattern &type) const override {
llvm_unreachable("objc blocks do not have a self parameter");
}
static bool classof(const Conventions *C) {
return C->getKind() == ConventionsKind::DefaultBlock;
}
};
} // end anonymous namespace
static CanSILFunctionType
getSILFunctionTypeForAbstractCFunction(TypeConverter &TC,
AbstractionPattern origType,
CanAnyFunctionType substType,
AnyFunctionType::ExtInfo extInfo,
Optional<SILDeclRef> constant);
static CanSILFunctionType getNativeSILFunctionType(
TypeConverter &TC, TypeExpansionContext context, AbstractionPattern origType,
CanAnyFunctionType substInterfaceType, AnyFunctionType::ExtInfo extInfo,
Optional<SILDeclRef> origConstant, Optional<SILDeclRef> constant,
Optional<SubstitutionMap> reqtSubs,
ProtocolConformanceRef witnessMethodConformance) {
assert(bool(origConstant) == bool(constant));
switch (extInfo.getSILRepresentation()) {
case SILFunctionType::Representation::Block:
case SILFunctionType::Representation::CFunctionPointer:
return getSILFunctionTypeForAbstractCFunction(TC, origType,
substInterfaceType,
extInfo, constant);
case SILFunctionType::Representation::Thin:
case SILFunctionType::Representation::ObjCMethod:
case SILFunctionType::Representation::Thick:
case SILFunctionType::Representation::Method:
case SILFunctionType::Representation::Closure:
case SILFunctionType::Representation::WitnessMethod: {
switch (constant ? constant->kind : SILDeclRef::Kind::Func) {
case SILDeclRef::Kind::Initializer:
case SILDeclRef::Kind::EnumElement:
return getSILFunctionType(TC, context, origType, substInterfaceType,
extInfo, DefaultInitializerConventions(),
ForeignInfo(), origConstant, constant, reqtSubs,
witnessMethodConformance);
case SILDeclRef::Kind::Allocator:
return getSILFunctionType(TC, context, origType, substInterfaceType,
extInfo, DefaultAllocatorConventions(),
ForeignInfo(), origConstant, constant, reqtSubs,
witnessMethodConformance);
case SILDeclRef::Kind::Func:
// If we have a setter, use the special setter convention. This ensures
// that we take normal parameters at +1.
if (constant && constant->isSetter()) {
return getSILFunctionType(TC, context, origType, substInterfaceType,
extInfo, DefaultSetterConventions(),
ForeignInfo(), origConstant, constant,
reqtSubs, witnessMethodConformance);
}
LLVM_FALLTHROUGH;
case SILDeclRef::Kind::Destroyer:
case SILDeclRef::Kind::GlobalAccessor:
case SILDeclRef::Kind::DefaultArgGenerator:
case SILDeclRef::Kind::StoredPropertyInitializer:
case SILDeclRef::Kind::PropertyWrapperBackingInitializer:
case SILDeclRef::Kind::IVarInitializer:
case SILDeclRef::Kind::IVarDestroyer: {
auto conv = DefaultConventions(NormalParameterConvention::Guaranteed);
return getSILFunctionType(TC, context, origType, substInterfaceType,
extInfo, conv, ForeignInfo(), origConstant,
constant, reqtSubs, witnessMethodConformance);
}
case SILDeclRef::Kind::Deallocator:
return getSILFunctionType(TC, context, origType, substInterfaceType,
extInfo, DeallocatorConventions(),
ForeignInfo(), origConstant, constant, reqtSubs,
witnessMethodConformance);
}
}
}
llvm_unreachable("Unhandled SILDeclRefKind in switch.");
}
CanSILFunctionType swift::getNativeSILFunctionType(
TypeConverter &TC, TypeExpansionContext context,
AbstractionPattern origType, CanAnyFunctionType substType,
Optional<SILDeclRef> origConstant, Optional<SILDeclRef> substConstant,
Optional<SubstitutionMap> reqtSubs,
ProtocolConformanceRef witnessMethodConformance) {
AnyFunctionType::ExtInfo extInfo;
// Preserve type information from the original type if possible.
if (auto origFnType = origType.getAs<AnyFunctionType>()) {
extInfo = origFnType->getExtInfo();
// Otherwise, preserve function type attributes from the substituted type.
} else {
extInfo = substType->getExtInfo();
}
return ::getNativeSILFunctionType(TC, context, origType, substType, extInfo,
origConstant, substConstant, reqtSubs,
witnessMethodConformance);
}
//===----------------------------------------------------------------------===//
// Foreign SILFunctionTypes
//===----------------------------------------------------------------------===//
static bool isCFTypedef(const TypeLowering &tl, clang::QualType type) {
// If we imported a C pointer type as a non-trivial type, it was
// a foreign class type.
return !tl.isTrivial() && type->isPointerType();
}
/// Given nothing but a formal C parameter type that's passed
/// indirectly, deduce the convention for it.
///
/// Generally, whether the parameter is +1 is handled before this.
static ParameterConvention getIndirectCParameterConvention(clang::QualType type) {
// Non-trivial C++ types would be Indirect_Inout (at least in Itanium).
// A trivial const * parameter in C should be considered @in.
return ParameterConvention::Indirect_In;
}
/// Given a C parameter declaration whose type is passed indirectly,
/// deduce the convention for it.
///
/// Generally, whether the parameter is +1 is handled before this.
static ParameterConvention
getIndirectCParameterConvention(const clang::ParmVarDecl *param) {
return getIndirectCParameterConvention(param->getType());
}
/// Given nothing but a formal C parameter type that's passed
/// directly, deduce the convention for it.
///
/// Generally, whether the parameter is +1 is handled before this.
static ParameterConvention getDirectCParameterConvention(clang::QualType type) {
return ParameterConvention::Direct_Unowned;
}
/// Given a C parameter declaration whose type is passed directly,
/// deduce the convention for it.
static ParameterConvention
getDirectCParameterConvention(const clang::ParmVarDecl *param) {
if (param->hasAttr<clang::NSConsumedAttr>() ||
param->hasAttr<clang::CFConsumedAttr>())
return ParameterConvention::Direct_Owned;
return getDirectCParameterConvention(param->getType());
}
// FIXME: that should be Direct_Guaranteed
const auto ObjCSelfConvention = ParameterConvention::Direct_Unowned;
namespace {
class ObjCMethodConventions : public Conventions {
const clang::ObjCMethodDecl *Method;
public:
const clang::ObjCMethodDecl *getMethod() const { return Method; }
ObjCMethodConventions(const clang::ObjCMethodDecl *method)
: Conventions(ConventionsKind::ObjCMethod), Method(method) {}
ParameterConvention getIndirectParameter(unsigned index,
const AbstractionPattern &type,
const TypeLowering &substTL) const override {
return getIndirectCParameterConvention(Method->param_begin()[index]);
}
ParameterConvention getDirectParameter(unsigned index,
const AbstractionPattern &type,
const TypeLowering &substTL) const override {
return getDirectCParameterConvention(Method->param_begin()[index]);
}
ParameterConvention getCallee() const override {
// Always thin.
return ParameterConvention::Direct_Unowned;
}
/// Given that a method returns a CF type, infer its method
/// family. Unfortunately, Clang's getMethodFamily() never
/// considers a method to be in a special family if its result
/// doesn't satisfy isObjCRetainable().
clang::ObjCMethodFamily getMethodFamilyForCFResult() const {
// Trust an explicit attribute.
if (auto attr = Method->getAttr<clang::ObjCMethodFamilyAttr>()) {
switch (attr->getFamily()) {
case clang::ObjCMethodFamilyAttr::OMF_None:
return clang::OMF_None;
case clang::ObjCMethodFamilyAttr::OMF_alloc:
return clang::OMF_alloc;
case clang::ObjCMethodFamilyAttr::OMF_copy:
return clang::OMF_copy;
case clang::ObjCMethodFamilyAttr::OMF_init:
return clang::OMF_init;
case clang::ObjCMethodFamilyAttr::OMF_mutableCopy:
return clang::OMF_mutableCopy;
case clang::ObjCMethodFamilyAttr::OMF_new:
return clang::OMF_new;
}
llvm_unreachable("bad attribute value");
}
return Method->getSelector().getMethodFamily();
}
bool isImplicitPlusOneCFResult() const {
switch (getMethodFamilyForCFResult()) {
case clang::OMF_None:
case clang::OMF_dealloc:
case clang::OMF_finalize:
case clang::OMF_retain:
case clang::OMF_release:
case clang::OMF_autorelease:
case clang::OMF_retainCount:
case clang::OMF_self:
case clang::OMF_initialize:
case clang::OMF_performSelector:
return false;
case clang::OMF_alloc:
case clang::OMF_new:
case clang::OMF_mutableCopy:
case clang::OMF_copy:
return true;
case clang::OMF_init:
return Method->isInstanceMethod();
}
llvm_unreachable("bad method family");
}
ResultConvention getResult(const TypeLowering &tl) const override {
// If we imported the result as something trivial, we need to
// use one of the unowned conventions.
if (tl.isTrivial()) {
if (Method->hasAttr<clang::ObjCReturnsInnerPointerAttr>())
return ResultConvention::UnownedInnerPointer;
auto type = tl.getLoweredType();
if (type.unwrapOptionalType().getStructOrBoundGenericStruct()
== type.getASTContext().getUnmanagedDecl())
return ResultConvention::UnownedInnerPointer;
return ResultConvention::Unowned;
}
// Otherwise, the return type had better be a retainable object pointer.
auto resultType = Method->getReturnType();
assert(resultType->isObjCRetainableType() || isCFTypedef(tl, resultType));
// If it's retainable for the purposes of ObjC ARC, we can trust
// the presence of ns_returns_retained, because Clang will add
// that implicitly based on the method family.
if (resultType->isObjCRetainableType()) {
if (Method->hasAttr<clang::NSReturnsRetainedAttr>())
return ResultConvention::Owned;
return ResultConvention::Autoreleased;
}
// Otherwise, it's a CF return type, which unfortunately means
// we can't just trust getMethodFamily(). We should really just
// change that, but that's an annoying change to make to Clang
// right now.
assert(isCFTypedef(tl, resultType));
// Trust the explicit attributes.
if (Method->hasAttr<clang::CFReturnsRetainedAttr>())
return ResultConvention::Owned;
if (Method->hasAttr<clang::CFReturnsNotRetainedAttr>())
return ResultConvention::Autoreleased;
// Otherwise, infer based on the method family.
if (isImplicitPlusOneCFResult())
return ResultConvention::Owned;
return ResultConvention::Autoreleased;
}
ParameterConvention
getDirectSelfParameter(const AbstractionPattern &type) const override {
if (Method->hasAttr<clang::NSConsumesSelfAttr>())
return ParameterConvention::Direct_Owned;
// The caller is supposed to take responsibility for ensuring
// that 'self' survives a method call.
return ObjCSelfConvention;
}
ParameterConvention
getIndirectSelfParameter(const AbstractionPattern &type) const override {
llvm_unreachable("objc methods do not support indirect self parameters");
}
static bool classof(const Conventions *C) {
return C->getKind() == ConventionsKind::ObjCMethod;
}
};
/// Conventions based on a C function type.
class CFunctionTypeConventions : public Conventions {
const clang::FunctionType *FnType;
clang::QualType getParamType(unsigned i) const {
return FnType->castAs<clang::FunctionProtoType>()->getParamType(i);
}
protected:
/// Protected constructor for subclasses to override the kind passed to the
/// super class.
CFunctionTypeConventions(ConventionsKind kind,
const clang::FunctionType *type)
: Conventions(kind), FnType(type) {}
public:
CFunctionTypeConventions(const clang::FunctionType *type)
: Conventions(ConventionsKind::CFunctionType), FnType(type) {}
ParameterConvention getIndirectParameter(unsigned index,
const AbstractionPattern &type,
const TypeLowering &substTL) const override {
return getIndirectCParameterConvention(getParamType(index));
}
ParameterConvention getDirectParameter(unsigned index,
const AbstractionPattern &type,
const TypeLowering &substTL) const override {
if (cast<clang::FunctionProtoType>(FnType)->isParamConsumed(index))
return ParameterConvention::Direct_Owned;
return getDirectCParameterConvention(getParamType(index));
}
ParameterConvention getCallee() const override {
// FIXME: blocks should be Direct_Guaranteed.
return ParameterConvention::Direct_Unowned;
}
ResultConvention getResult(const TypeLowering &tl) const override {
if (tl.isTrivial())
return ResultConvention::Unowned;
if (FnType->getExtInfo().getProducesResult())
return ResultConvention::Owned;
return ResultConvention::Autoreleased;
}
ParameterConvention
getDirectSelfParameter(const AbstractionPattern &type) const override {
llvm_unreachable("c function types do not have a self parameter");
}
ParameterConvention
getIndirectSelfParameter(const AbstractionPattern &type) const override {
llvm_unreachable("c function types do not have a self parameter");
}
static bool classof(const Conventions *C) {
return C->getKind() == ConventionsKind::CFunctionType;
}
};
/// Conventions based on C function declarations.
class CFunctionConventions : public CFunctionTypeConventions {
using super = CFunctionTypeConventions;
const clang::FunctionDecl *TheDecl;
public:
CFunctionConventions(const clang::FunctionDecl *decl)
: CFunctionTypeConventions(ConventionsKind::CFunction,
decl->getType()->castAs<clang::FunctionType>()),
TheDecl(decl) {}
ParameterConvention getDirectParameter(unsigned index,
const AbstractionPattern &type,
const TypeLowering &substTL) const override {
if (auto param = TheDecl->getParamDecl(index))
if (param->hasAttr<clang::CFConsumedAttr>())
return ParameterConvention::Direct_Owned;
return super::getDirectParameter(index, type, substTL);
}
ResultConvention getResult(const TypeLowering &tl) const override {
if (isCFTypedef(tl, TheDecl->getReturnType())) {
// The CF attributes aren't represented in the type, so we need
// to check them here.
if (TheDecl->hasAttr<clang::CFReturnsRetainedAttr>()) {
return ResultConvention::Owned;
} else if (TheDecl->hasAttr<clang::CFReturnsNotRetainedAttr>()) {
// Probably not actually autoreleased.
return ResultConvention::Autoreleased;
// The CF Create/Copy rule only applies to functions that return
// a CF-runtime type; it does not apply to methods, and it does
// not apply to functions returning ObjC types.
} else if (clang::ento::coreFoundation::followsCreateRule(TheDecl)) {
return ResultConvention::Owned;
} else {
return ResultConvention::Autoreleased;
}
}
// Otherwise, fall back on the ARC annotations, which are part
// of the type.
return super::getResult(tl);
}
static bool classof(const Conventions *C) {
return C->getKind() == ConventionsKind::CFunction;
}
};
/// Conventions based on C++ method declarations.
class CXXMethodConventions : public CFunctionTypeConventions {
using super = CFunctionTypeConventions;
const clang::CXXMethodDecl *TheDecl;
public:
CXXMethodConventions(const clang::CXXMethodDecl *decl)
: CFunctionTypeConventions(
ConventionsKind::CXXMethod,
decl->getType()->castAs<clang::FunctionType>()),
TheDecl(decl) {}
ParameterConvention
getIndirectSelfParameter(const AbstractionPattern &type) const override {
if (TheDecl->isConst())
return ParameterConvention::Indirect_In_Guaranteed;
return ParameterConvention::Indirect_Inout;
}
static bool classof(const Conventions *C) {
return C->getKind() == ConventionsKind::CXXMethod;
}
};
} // end anonymous namespace
/// Given that we have an imported Clang declaration, deduce the
/// ownership conventions for calling it and build the SILFunctionType.
static CanSILFunctionType
getSILFunctionTypeForClangDecl(TypeConverter &TC, const clang::Decl *clangDecl,
CanAnyFunctionType origType,
CanAnyFunctionType substInterfaceType,
AnyFunctionType::ExtInfo extInfo,
const ForeignInfo &foreignInfo,
Optional<SILDeclRef> constant) {
if (auto method = dyn_cast<clang::ObjCMethodDecl>(clangDecl)) {
auto origPattern =
AbstractionPattern::getObjCMethod(origType, method, foreignInfo.Error);
return getSILFunctionType(TC, TypeExpansionContext::minimal(), origPattern,
substInterfaceType, extInfo,
ObjCMethodConventions(method), foreignInfo,
constant, constant, None,
ProtocolConformanceRef());
}
if (auto method = dyn_cast<clang::CXXMethodDecl>(clangDecl)) {
AbstractionPattern origPattern = method->isOverloadedOperator() ?
AbstractionPattern::getCXXOperatorMethod(origType, method):
AbstractionPattern::getCXXMethod(origType, method);
auto conventions = CXXMethodConventions(method);
return getSILFunctionType(TC, TypeExpansionContext::minimal(), origPattern,
substInterfaceType, extInfo, conventions,
foreignInfo, constant, constant, None,
ProtocolConformanceRef());
}
if (auto func = dyn_cast<clang::FunctionDecl>(clangDecl)) {
auto clangType = func->getType().getTypePtr();
AbstractionPattern origPattern =
foreignInfo.Self.isImportAsMember()
? AbstractionPattern::getCFunctionAsMethod(origType, clangType,
foreignInfo.Self)
: AbstractionPattern(origType, clangType);
return getSILFunctionType(TC, TypeExpansionContext::minimal(), origPattern,
substInterfaceType, extInfo,
CFunctionConventions(func), foreignInfo, constant,
constant, None, ProtocolConformanceRef());
}
llvm_unreachable("call to unknown kind of C function");
}
static CanSILFunctionType
getSILFunctionTypeForAbstractCFunction(TypeConverter &TC,
AbstractionPattern origType,
CanAnyFunctionType substType,
AnyFunctionType::ExtInfo extInfo,
Optional<SILDeclRef> constant) {
if (origType.isClangType()) {
auto clangType = origType.getClangType();
const clang::FunctionType *fnType;
if (auto blockPtr = clangType->getAs<clang::BlockPointerType>()) {
fnType = blockPtr->getPointeeType()->castAs<clang::FunctionType>();
} else if (auto ptr = clangType->getAs<clang::PointerType>()) {
fnType = ptr->getPointeeType()->getAs<clang::FunctionType>();
} else if (auto ref = clangType->getAs<clang::ReferenceType>()) {
fnType = ref->getPointeeType()->getAs<clang::FunctionType>();
} else if (auto fn = clangType->getAs<clang::FunctionType>()) {
fnType = fn;
} else {
llvm_unreachable("unexpected type imported as a function type");
}
if (fnType) {
return getSILFunctionType(
TC, TypeExpansionContext::minimal(), origType, substType, extInfo,
CFunctionTypeConventions(fnType), ForeignInfo(), constant, constant,
None, ProtocolConformanceRef());
}
}
// TODO: Ought to support captures in block funcs.
return getSILFunctionType(TC, TypeExpansionContext::minimal(), origType,
substType, extInfo, DefaultBlockConventions(),
ForeignInfo(), constant, constant, None,
ProtocolConformanceRef());
}
/// Try to find a clang method declaration for the given function.
static const clang::Decl *findClangMethod(ValueDecl *method) {
if (auto *methodFn = dyn_cast<FuncDecl>(method)) {
if (auto *decl = methodFn->getClangDecl())
return decl;
if (auto overridden = methodFn->getOverriddenDecl())
return findClangMethod(overridden);
}
if (auto *constructor = dyn_cast<ConstructorDecl>(method)) {
if (auto *decl = constructor->getClangDecl())
return decl;
}
return nullptr;
}
//===----------------------------------------------------------------------===//
// Selector Family SILFunctionTypes
//===----------------------------------------------------------------------===//
/// Derive the ObjC selector family from an identifier.
///
/// Note that this will never derive the Init family, which is too dangerous
/// to leave to chance. Swift functions starting with "init" are always
/// emitted as if they are part of the "none" family.
static ObjCSelectorFamily getObjCSelectorFamily(ObjCSelector name) {
auto result = name.getSelectorFamily();
if (result == ObjCSelectorFamily::Init)
return ObjCSelectorFamily::None;
return result;
}
/// Get the ObjC selector family a foreign SILDeclRef belongs to.
static ObjCSelectorFamily getObjCSelectorFamily(SILDeclRef c) {
assert(c.isForeign);
switch (c.kind) {
case SILDeclRef::Kind::Func: {
if (!c.hasDecl())
return ObjCSelectorFamily::None;
auto *FD = cast<FuncDecl>(c.getDecl());
if (auto accessor = dyn_cast<AccessorDecl>(FD)) {
switch (accessor->getAccessorKind()) {
case AccessorKind::Get:
case AccessorKind::Set:
break;
#define OBJC_ACCESSOR(ID, KEYWORD)
#define ACCESSOR(ID) \
case AccessorKind::ID:
#include "swift/AST/AccessorKinds.def"
llvm_unreachable("Unexpected AccessorKind of foreign FuncDecl");
}
}
return getObjCSelectorFamily(FD->getObjCSelector());
}
case SILDeclRef::Kind::Initializer:
case SILDeclRef::Kind::IVarInitializer:
return ObjCSelectorFamily::Init;
/// Currently IRGen wraps alloc/init methods into Swift constructors
/// with Swift conventions.
case SILDeclRef::Kind::Allocator:
/// These constants don't correspond to method families we care about yet.
case SILDeclRef::Kind::Destroyer:
case SILDeclRef::Kind::Deallocator:
case SILDeclRef::Kind::IVarDestroyer:
return ObjCSelectorFamily::None;
case SILDeclRef::Kind::EnumElement:
case SILDeclRef::Kind::GlobalAccessor:
case SILDeclRef::Kind::DefaultArgGenerator:
case SILDeclRef::Kind::StoredPropertyInitializer:
case SILDeclRef::Kind::PropertyWrapperBackingInitializer:
llvm_unreachable("Unexpected Kind of foreign SILDeclRef");
}
llvm_unreachable("Unhandled SILDeclRefKind in switch.");
}
namespace {
class ObjCSelectorFamilyConventions : public Conventions {
ObjCSelectorFamily Family;
public:
ObjCSelectorFamilyConventions(ObjCSelectorFamily family)
: Conventions(ConventionsKind::ObjCSelectorFamily), Family(family) {}
ParameterConvention getIndirectParameter(unsigned index,
const AbstractionPattern &type,
const TypeLowering &substTL) const override {
return ParameterConvention::Indirect_In;
}
ParameterConvention getDirectParameter(unsigned index,
const AbstractionPattern &type,
const TypeLowering &substTL) const override {
return ParameterConvention::Direct_Unowned;
}
ParameterConvention getCallee() const override {
// Always thin.
return ParameterConvention::Direct_Unowned;
}
ResultConvention getResult(const TypeLowering &tl) const override {
switch (Family) {
case ObjCSelectorFamily::Alloc:
case ObjCSelectorFamily::Copy:
case ObjCSelectorFamily::Init:
case ObjCSelectorFamily::MutableCopy:
case ObjCSelectorFamily::New:
return ResultConvention::Owned;
case ObjCSelectorFamily::None:
// Defaults below.
break;
}
// Get the underlying AST type, potentially stripping off one level of
// optionality while we do it.
CanType type = tl.getLoweredType().unwrapOptionalType().getASTType();
if (type->hasRetainablePointerRepresentation()
|| (type->getSwiftNewtypeUnderlyingType() && !tl.isTrivial()))
return ResultConvention::Autoreleased;
return ResultConvention::Unowned;
}
ParameterConvention
getDirectSelfParameter(const AbstractionPattern &type) const override {
if (Family == ObjCSelectorFamily::Init)
return ParameterConvention::Direct_Owned;
return ObjCSelfConvention;
}
ParameterConvention
getIndirectSelfParameter(const AbstractionPattern &type) const override {
llvm_unreachable("selector family objc function types do not support "
"indirect self parameters");
}
static bool classof(const Conventions *C) {
return C->getKind() == ConventionsKind::ObjCSelectorFamily;
}
};
} // end anonymous namespace
static CanSILFunctionType
getSILFunctionTypeForObjCSelectorFamily(TypeConverter &TC, ObjCSelectorFamily family,
CanAnyFunctionType origType,
CanAnyFunctionType substInterfaceType,
AnyFunctionType::ExtInfo extInfo,
const ForeignInfo &foreignInfo,
Optional<SILDeclRef> constant) {
return getSILFunctionType(
TC, TypeExpansionContext::minimal(), AbstractionPattern(origType),
substInterfaceType, extInfo, ObjCSelectorFamilyConventions(family),
foreignInfo, constant, constant,
/*requirement subs*/ None, ProtocolConformanceRef());
}
static bool isImporterGeneratedAccessor(const clang::Decl *clangDecl,
SILDeclRef constant) {
// Must be an accessor.
auto accessor = dyn_cast<AccessorDecl>(constant.getDecl());
if (!accessor)
return false;
// Must be a type member.
if (constant.getParameterListCount() != 2)
return false;
// Must be imported from a function.
if (!isa<clang::FunctionDecl>(clangDecl))
return false;
return true;
}
static CanSILFunctionType getUncachedSILFunctionTypeForConstant(
TypeConverter &TC, TypeExpansionContext context, SILDeclRef constant,
CanAnyFunctionType origLoweredInterfaceType) {
assert(origLoweredInterfaceType->getExtInfo().getSILRepresentation()
!= SILFunctionTypeRepresentation::Thick
&& origLoweredInterfaceType->getExtInfo().getSILRepresentation()
!= SILFunctionTypeRepresentation::Block);
auto extInfo = origLoweredInterfaceType->getExtInfo();
if (!constant.isForeign) {
ProtocolConformanceRef witnessMethodConformance;
if (extInfo.getSILRepresentation() ==
SILFunctionTypeRepresentation::WitnessMethod) {
auto proto = constant.getDecl()->getDeclContext()->getSelfProtocolDecl();
witnessMethodConformance = ProtocolConformanceRef(proto);
}
return ::getNativeSILFunctionType(
TC, context, AbstractionPattern(origLoweredInterfaceType),
origLoweredInterfaceType, extInfo, constant, constant, None,
witnessMethodConformance);
}
ForeignInfo foreignInfo;
// If we have a clang decl associated with the Swift decl, derive its
// ownership conventions.
if (constant.hasDecl()) {
auto decl = constant.getDecl();
if (auto funcDecl = dyn_cast<AbstractFunctionDecl>(decl)) {
foreignInfo.Error = funcDecl->getForeignErrorConvention();
foreignInfo.Self = funcDecl->getImportAsMemberStatus();
}
if (auto clangDecl = findClangMethod(decl)) {
// The importer generates accessors that are not actually
// import-as-member but do involve the same gymnastics with the
// formal type. That's all that SILFunctionType cares about, so
// pretend that it's import-as-member.
if (!foreignInfo.Self.isImportAsMember() &&
isImporterGeneratedAccessor(clangDecl, constant)) {
unsigned selfIndex = cast<AccessorDecl>(decl)->isSetter() ? 1 : 0;
foreignInfo.Self.setSelfIndex(selfIndex);
}
return getSILFunctionTypeForClangDecl(TC, clangDecl,
origLoweredInterfaceType,
origLoweredInterfaceType,
extInfo, foreignInfo, constant);
}
}
// If the decl belongs to an ObjC method family, use that family's
// ownership conventions.
return getSILFunctionTypeForObjCSelectorFamily(
TC, getObjCSelectorFamily(constant),
origLoweredInterfaceType, origLoweredInterfaceType,
extInfo, foreignInfo, constant);
}
CanSILFunctionType TypeConverter::getUncachedSILFunctionTypeForConstant(
TypeExpansionContext context, SILDeclRef constant,
CanAnyFunctionType origInterfaceType) {
auto origLoweredInterfaceType =
getLoweredFormalTypes(constant, origInterfaceType).Uncurried;
return ::getUncachedSILFunctionTypeForConstant(*this, context, constant,
origLoweredInterfaceType);
}
static bool isClassOrProtocolMethod(ValueDecl *vd) {
if (!vd->getDeclContext())
return false;
Type contextType = vd->getDeclContext()->getDeclaredInterfaceType();
if (!contextType)
return false;
return contextType->getClassOrBoundGenericClass()
|| contextType->isClassExistentialType();
}
SILFunctionTypeRepresentation
TypeConverter::getDeclRefRepresentation(SILDeclRef c) {
// If this is a foreign thunk, it always has the foreign calling convention.
if (c.isForeign) {
if (!c.hasDecl() ||
c.getDecl()->isImportAsMember())
return SILFunctionTypeRepresentation::CFunctionPointer;
if (isClassOrProtocolMethod(c.getDecl()) ||
c.kind == SILDeclRef::Kind::IVarInitializer ||
c.kind == SILDeclRef::Kind::IVarDestroyer)
return SILFunctionTypeRepresentation::ObjCMethod;
return SILFunctionTypeRepresentation::CFunctionPointer;
}
// Anonymous functions currently always have Freestanding CC.
if (!c.hasDecl())
return SILFunctionTypeRepresentation::Thin;
// FIXME: Assert that there is a native entry point
// available. There's no great way to do this.
// Protocol witnesses are called using the witness calling convention.
if (auto proto = dyn_cast<ProtocolDecl>(c.getDecl()->getDeclContext())) {
// Use the regular method convention for foreign-to-native thunks.
if (c.isForeignToNativeThunk())
return SILFunctionTypeRepresentation::Method;
assert(!c.isNativeToForeignThunk() && "shouldn't be possible");
return getProtocolWitnessRepresentation(proto);
}
switch (c.kind) {
case SILDeclRef::Kind::GlobalAccessor:
case SILDeclRef::Kind::DefaultArgGenerator:
case SILDeclRef::Kind::StoredPropertyInitializer:
case SILDeclRef::Kind::PropertyWrapperBackingInitializer:
return SILFunctionTypeRepresentation::Thin;
case SILDeclRef::Kind::Func:
if (c.getDecl()->getDeclContext()->isTypeContext())
return SILFunctionTypeRepresentation::Method;
return SILFunctionTypeRepresentation::Thin;
case SILDeclRef::Kind::Destroyer:
case SILDeclRef::Kind::Deallocator:
case SILDeclRef::Kind::Allocator:
case SILDeclRef::Kind::Initializer:
case SILDeclRef::Kind::EnumElement:
case SILDeclRef::Kind::IVarInitializer:
case SILDeclRef::Kind::IVarDestroyer:
return SILFunctionTypeRepresentation::Method;
}
llvm_unreachable("Unhandled SILDeclRefKind in switch.");
}
// Provide the ability to turn off the type converter cache to ease debugging.
static llvm::cl::opt<bool>
DisableConstantInfoCache("sil-disable-typelowering-constantinfo-cache",
llvm::cl::init(false));
const SILConstantInfo &
TypeConverter::getConstantInfo(TypeExpansionContext expansion,
SILDeclRef constant) {
if (!DisableConstantInfoCache) {
auto found = ConstantTypes.find(std::make_pair(expansion, constant));
if (found != ConstantTypes.end())
return *found->second;
}
// First, get a function type for the constant. This creates the
// right type for a getter or setter.
auto formalInterfaceType = makeConstantInterfaceType(constant);
// The formal type is just that with the right representation.
auto rep = getDeclRefRepresentation(constant);
formalInterfaceType = adjustFunctionType(formalInterfaceType, rep);
// The lowered type is the formal type, but uncurried and with
// parameters automatically turned into their bridged equivalents.
auto bridgedTypes = getLoweredFormalTypes(constant, formalInterfaceType);
CanAnyFunctionType loweredInterfaceType = bridgedTypes.Uncurried;
// The SIL type encodes conventions according to the original type.
CanSILFunctionType silFnType =
::getUncachedSILFunctionTypeForConstant(*this, expansion, constant,
loweredInterfaceType);
// If the constant refers to a derivative function, get the SIL type of the
// original function and use it to compute the derivative SIL type.
//
// This is necessary because the "lowered AST derivative function type" (bc)
// may differ from the "derivative type of the lowered original function type"
// (ad):
//
// ┌────────────────────┐ lowering ┌────────────────────┐
// │ AST orig. fn type │ ───────(a)──────► │ SIL orig. fn type │
// └────────────────────┘ └────────────────────┘
// │ │
// (b, Sema) getAutoDiffDerivativeFunctionType (d, here)
// │ │
// ▼ ▼
// ┌────────────────────┐ lowering ┌────────────────────┐
// │ AST deriv. fn type │ ───────(c)──────► │ SIL deriv. fn type │
// └────────────────────┘ └────────────────────┘
//
// (ad) does not always commute with (bc):
// - (bc) is the result of computing the AST derivative type (Sema), then
// lowering it via SILGen. This is the default lowering behavior, but may
// break SIL typing invariants because expected lowered derivative types are
// computed from lowered original function types.
// - (ad) is the result of lowering the original function type, then computing
// its derivative type. This is the expected lowered derivative type,
// preserving SIL typing invariants.
//
// Always use (ad) to compute lowered derivative function types.
if (auto *derivativeId = constant.derivativeFunctionIdentifier) {
// Get lowered original function type.
auto origFnConstantInfo = getConstantInfo(
TypeExpansionContext::minimal(), constant.asAutoDiffOriginalFunction());
// Use it to compute lowered derivative function type.
auto *loweredParamIndices = autodiff::getLoweredParameterIndices(
derivativeId->getParameterIndices(), formalInterfaceType);
auto numResults =
origFnConstantInfo.SILFnType->getNumResults() +
origFnConstantInfo.SILFnType->getNumIndirectMutatingParameters();
auto *loweredResultIndices = IndexSubset::getDefault(
M.getASTContext(), numResults, /*includeAll*/ true);
silFnType = origFnConstantInfo.SILFnType->getAutoDiffDerivativeFunctionType(
loweredParamIndices, loweredResultIndices, derivativeId->getKind(),
*this, LookUpConformanceInModule(&M));
}
LLVM_DEBUG(llvm::dbgs() << "lowering type for constant ";
constant.print(llvm::dbgs());
llvm::dbgs() << "\n formal type: ";
formalInterfaceType.print(llvm::dbgs());
llvm::dbgs() << "\n lowered AST type: ";
loweredInterfaceType.print(llvm::dbgs());
llvm::dbgs() << "\n SIL type: ";
silFnType.print(llvm::dbgs());
llvm::dbgs() << "\n Expansion context: "
<< expansion.shouldLookThroughOpaqueTypeArchetypes();
llvm::dbgs() << "\n");
auto resultBuf = Context.Allocate(sizeof(SILConstantInfo),
alignof(SILConstantInfo));
auto result = ::new (resultBuf) SILConstantInfo{formalInterfaceType,
bridgedTypes.Pattern,
loweredInterfaceType,
silFnType};
if (DisableConstantInfoCache)
return *result;
auto inserted =
ConstantTypes.insert({std::make_pair(expansion, constant), result});
assert(inserted.second);
(void)inserted;
return *result;
}
/// Returns the SILParameterInfo for the given declaration's `self` parameter.
/// `constant` must refer to a method.
SILParameterInfo
TypeConverter::getConstantSelfParameter(TypeExpansionContext context,
SILDeclRef constant) {
auto ty = getConstantFunctionType(context, constant);
// In most cases the "self" parameter is lowered as the back parameter.
// The exception is C functions imported as methods.
if (!constant.isForeign)
return ty->getParameters().back();
if (!constant.hasDecl())
return ty->getParameters().back();
auto fn = dyn_cast<AbstractFunctionDecl>(constant.getDecl());
if (!fn)
return ty->getParameters().back();
if (fn->isImportAsStaticMember())
return SILParameterInfo();
if (fn->isImportAsInstanceMember())
return ty->getParameters()[fn->getSelfIndex()];
return ty->getParameters().back();
}
// This check duplicates TypeConverter::checkForABIDifferences(),
// but on AST types. The issue is we only want to introduce a new
// vtable thunk if the AST type changes, but an abstraction change
// is OK; we don't want a new entry if an @in parameter became
// @guaranteed or whatever.
static bool checkASTTypeForABIDifferences(CanType type1,
CanType type2) {
return !type1->matches(type2, TypeMatchFlags::AllowABICompatible);
}
// FIXME: This makes me very upset. Can we do without this?
static CanType copyOptionalityFromDerivedToBase(TypeConverter &tc,
CanType derived,
CanType base) {
// Unwrap optionals, but remember that we did.
bool derivedWasOptional = false;
if (auto object = derived.getOptionalObjectType()) {
derivedWasOptional = true;
derived = object;
}
if (auto object = base.getOptionalObjectType()) {
base = object;
}
// T? +> S = (T +> S)?
// T? +> S? = (T +> S)?
if (derivedWasOptional) {
base = copyOptionalityFromDerivedToBase(tc, derived, base);
auto optDecl = tc.Context.getOptionalDecl();
return CanType(BoundGenericEnumType::get(optDecl, Type(), base));
}
// (T1, T2, ...) +> (S1, S2, ...) = (T1 +> S1, T2 +> S2, ...)
if (auto derivedTuple = dyn_cast<TupleType>(derived)) {
if (auto baseTuple = dyn_cast<TupleType>(base)) {
assert(derivedTuple->getNumElements() == baseTuple->getNumElements());
SmallVector<TupleTypeElt, 4> elements;
for (unsigned i = 0, e = derivedTuple->getNumElements(); i < e; i++) {
elements.push_back(
baseTuple->getElement(i).getWithType(
copyOptionalityFromDerivedToBase(
tc,
derivedTuple.getElementType(i),
baseTuple.getElementType(i))));
}
return CanType(TupleType::get(elements, tc.Context));
}
}
// (T1 -> T2) +> (S1 -> S2) = (T1 +> S1) -> (T2 +> S2)
if (auto derivedFunc = dyn_cast<AnyFunctionType>(derived)) {
if (auto baseFunc = dyn_cast<AnyFunctionType>(base)) {
SmallVector<FunctionType::Param, 8> params;
auto derivedParams = derivedFunc.getParams();
auto baseParams = baseFunc.getParams();
assert(derivedParams.size() == baseParams.size());
for (unsigned i = 0, e = derivedParams.size(); i < e; ++i) {
assert(derivedParams[i].getParameterFlags() ==
baseParams[i].getParameterFlags());
params.emplace_back(
copyOptionalityFromDerivedToBase(
tc,
derivedParams[i].getPlainType(),
baseParams[i].getPlainType()),
Identifier(),
baseParams[i].getParameterFlags());
}
auto result = copyOptionalityFromDerivedToBase(tc,
derivedFunc.getResult(),
baseFunc.getResult());
return CanAnyFunctionType::get(baseFunc.getOptGenericSignature(),
llvm::makeArrayRef(params), result,
baseFunc->getExtInfo());
}
}
return base;
}
/// Returns the ConstantInfo corresponding to the VTable thunk for overriding.
/// Will be the same as getConstantInfo if the declaration does not override.
const SILConstantInfo &
TypeConverter::getConstantOverrideInfo(TypeExpansionContext context,
SILDeclRef derived, SILDeclRef base) {
// Foreign overrides currently don't need reabstraction.
if (derived.isForeign)
return getConstantInfo(context, derived);
auto found = ConstantOverrideTypes.find({derived, base});
if (found != ConstantOverrideTypes.end())
return *found->second;
assert(base.requiresNewVTableEntry() && "base must not be an override");
// Figure out the generic signature for the class method call. This is the
// signature of the derived class, with requirements transplanted from
// the base method. The derived method is allowed to have fewer
// requirements, in which case the thunk will translate the calling
// convention appropriately before calling the derived method.
bool hasGenericRequirementDifference = false;
auto derivedSig = derived.getDecl()->getAsGenericContext()
->getGenericSignature();
auto genericSig = Context.getOverrideGenericSignature(base.getDecl(),
derived.getDecl());
if (genericSig) {
hasGenericRequirementDifference =
!genericSig->requirementsNotSatisfiedBy(derivedSig).empty();
}
auto baseInfo = getConstantInfo(context, base);
auto derivedInfo = getConstantInfo(context, derived);
auto params = derivedInfo.FormalType.getParams();
assert(params.size() == 1);
auto selfInterfaceTy = params[0].getPlainType()->getMetatypeInstanceType();
auto overrideInterfaceTy =
cast<AnyFunctionType>(
selfInterfaceTy->adjustSuperclassMemberDeclType(
base.getDecl(), derived.getDecl(), baseInfo.FormalType)
->getCanonicalType());
// Build the formal AST function type for the class method call.
auto basePattern = AbstractionPattern(baseInfo.LoweredType);
if (!hasGenericRequirementDifference &&
!checkASTTypeForABIDifferences(derivedInfo.FormalType,
overrideInterfaceTy)) {
// The derived method is ABI-compatible with the base method. Let's
// just use the derived method's formal type.
basePattern = AbstractionPattern(
copyOptionalityFromDerivedToBase(
*this,
derivedInfo.LoweredType,
baseInfo.LoweredType));
overrideInterfaceTy = derivedInfo.FormalType;
}
if (genericSig && !genericSig->areAllParamsConcrete()) {
overrideInterfaceTy =
cast<AnyFunctionType>(
GenericFunctionType::get(genericSig,
overrideInterfaceTy->getParams(),
overrideInterfaceTy->getResult(),
overrideInterfaceTy->getExtInfo())
->getCanonicalType());
}
// Build the lowered AST function type for the class method call.
auto bridgedTypes = getLoweredFormalTypes(derived, overrideInterfaceTy);
// Build the SILFunctionType for the class method call.
CanSILFunctionType fnTy = getNativeSILFunctionType(
*this, context, basePattern, bridgedTypes.Uncurried, base, derived,
/*reqt subs*/ None, ProtocolConformanceRef());
// Build the SILConstantInfo and cache it.
auto resultBuf = Context.Allocate(sizeof(SILConstantInfo),
alignof(SILConstantInfo));
auto result = ::new (resultBuf) SILConstantInfo{
overrideInterfaceTy,
basePattern,
bridgedTypes.Uncurried,
fnTy};
auto inserted = ConstantOverrideTypes.insert({{derived, base}, result});
assert(inserted.second);
(void)inserted;
return *result;
}
namespace {
/// Given a lowered SIL type, apply a substitution to it to produce another
/// lowered SIL type which uses the same abstraction conventions.
class SILTypeSubstituter :
public CanTypeVisitor<SILTypeSubstituter, CanType> {
TypeConverter &TC;
TypeSubstitutionFn Subst;
LookupConformanceFn Conformances;
// The signature for the original type.
//
// Replacement types are lowered with respect to the current
// context signature.
CanGenericSignature Sig;
TypeExpansionContext typeExpansionContext;
bool shouldSubstituteOpaqueArchetypes;
public:
SILTypeSubstituter(TypeConverter &TC,
TypeExpansionContext context,
TypeSubstitutionFn Subst,
LookupConformanceFn Conformances,
CanGenericSignature Sig,
bool shouldSubstituteOpaqueArchetypes)
: TC(TC),
Subst(Subst),
Conformances(Conformances),
Sig(Sig),
typeExpansionContext(context),
shouldSubstituteOpaqueArchetypes(shouldSubstituteOpaqueArchetypes)
{}
// SIL type lowering only does special things to tuples and functions.
// When a function appears inside of another type, we only perform
// substitutions if it is not polymorphic.
CanSILFunctionType visitSILFunctionType(CanSILFunctionType origType) {
return substSILFunctionType(origType, false);
}
SubstitutionMap substSubstitutions(SubstitutionMap subs) {
// Substitute the substitutions.
SubstOptions options = None;
if (shouldSubstituteOpaqueArchetypes)
options |= SubstFlags::SubstituteOpaqueArchetypes;
// Expand substituted type according to the expansion context.
auto newSubs = subs.subst(Subst, Conformances, options);
// If we need to look through opaque types in this context, re-substitute
// according to the expansion context.
newSubs = substOpaqueTypes(newSubs);
return newSubs;
}
SubstitutionMap substOpaqueTypes(SubstitutionMap subs) {
if (!typeExpansionContext.shouldLookThroughOpaqueTypeArchetypes())
return subs;
return subs.subst([&](SubstitutableType *s) -> Type {
return substOpaqueTypesWithUnderlyingTypes(s->getCanonicalType(),
typeExpansionContext);
}, [&](CanType dependentType,
Type conformingReplacementType,
ProtocolDecl *conformedProtocol) -> ProtocolConformanceRef {
return substOpaqueTypesWithUnderlyingTypes(
ProtocolConformanceRef(conformedProtocol),
conformingReplacementType->getCanonicalType(),
typeExpansionContext);
}, SubstFlags::SubstituteOpaqueArchetypes);
}
// Substitute a function type.
CanSILFunctionType substSILFunctionType(CanSILFunctionType origType,
bool isGenericApplication) {
assert((!isGenericApplication || origType->isPolymorphic()) &&
"generic application without invocation signature or with "
"existing arguments");
assert((!isGenericApplication || !shouldSubstituteOpaqueArchetypes) &&
"generic application while substituting opaque archetypes");
// The general substitution rule is that we should only substitute
// into the free components of the type, i.e. the components that
// aren't inside a generic signature. That rule would say:
//
// - If there are invocation substitutions, just substitute those;
// the other components are necessarily inside the invocation
// generic signature.
//
// - Otherwise, if there's an invocation generic signature,
// substitute nothing. If we are applying generic arguments,
// add the appropriate invocation substitutions.
//
// - Otherwise, if there are pattern substitutions, just substitute
// those; the other components are inside the patttern generic
// signature.
//
// - Otherwise, substitute the basic components.
//
// There are two caveats here. The first is that we haven't yet
// written all the code that would be necessary in order to handle
// invocation substitutions everywhere, and so we never build those.
// Instead, we substitute into the pattern substitutions if present,
// or the components if not, and build a type with no invocation
// signature. As a special case, when substituting a coroutine type,
// we build pattern substitutions instead of substituting the
// component types in order to preserve the original yield structure,
// which factors into the continuation function ABI.
//
// The second is that this function is also used when substituting
// opaque archetypes. In this case, we may need to substitute
// into component types even within generic signatures. This is
// safe because the substitutions used in this case don't change
// generics, they just narrowly look through certain opaque archetypes.
// If substitutions are present, we still don't substitute into
// the basic components, in order to maintain the information about
// what was abstracted there.
auto patternSubs = origType->getPatternSubstitutions();
// If we have an invocation signatture, we generally shouldn't
// substitute into the pattern substitutions and component types.
if (auto sig = origType->getInvocationGenericSignature()) {
// Substitute the invocation substitutions if present.
if (auto invocationSubs = origType->getInvocationSubstitutions()) {
assert(!isGenericApplication);
invocationSubs = substSubstitutions(invocationSubs);
auto substType =
origType->withInvocationSubstitutions(invocationSubs);
// Also do opaque-type substitutions on the pattern substitutions
// if requested and applicable.
if (patternSubs) {
patternSubs = substOpaqueTypes(patternSubs);
substType = substType->withPatternSubstitutions(patternSubs);
}
return substType;
}
// Otherwise, we shouldn't substitute any components except
// when substituting opaque archetypes.
// If we're doing a generic application, and there are pattern
// substitutions, substitute into the pattern substitutions; or if
// it's a coroutine, build pattern substitutions; or else, fall
// through to substitute the component types as discussed above.
if (isGenericApplication) {
if (patternSubs || origType->isCoroutine()) {
CanSILFunctionType substType = origType;
if (typeExpansionContext.shouldLookThroughOpaqueTypeArchetypes()) {
substType =
origType->substituteOpaqueArchetypes(TC, typeExpansionContext);
}
SubstitutionMap subs;
if (patternSubs) {
subs = substSubstitutions(patternSubs);
} else {
subs = SubstitutionMap::get(sig, Subst, Conformances);
}
auto witnessConformance = substWitnessConformance(origType);
substType = substType->withPatternSpecialization(nullptr, subs,
witnessConformance);
if (typeExpansionContext.shouldLookThroughOpaqueTypeArchetypes()) {
substType =
substType->substituteOpaqueArchetypes(TC, typeExpansionContext);
}
return substType;
}
// else fall down to component substitution
// If we're substituting opaque archetypes, and there are pattern
// substitutions present, just substitute those and preserve the
// basic structure in the component types. Otherwise, fall through
// to substitute the component types.
} else if (shouldSubstituteOpaqueArchetypes) {
if (patternSubs) {
patternSubs = substOpaqueTypes(patternSubs);
auto witnessConformance = substWitnessConformance(origType);
return origType->withPatternSpecialization(sig, patternSubs,
witnessConformance);
}
// else fall down to component substitution
// Otherwise, don't try to substitute bound components.
} else {
auto substType = origType;
if (patternSubs) {
patternSubs = substOpaqueTypes(patternSubs);
auto witnessConformance = substWitnessConformance(origType);
substType = substType->withPatternSpecialization(sig, patternSubs,
witnessConformance);
}
return substType;
}
// Otherwise, if there are pattern substitutions, just substitute
// into those and don't touch the component types.
} else if (patternSubs) {
patternSubs = substSubstitutions(patternSubs);
auto witnessConformance = substWitnessConformance(origType);
return origType->withPatternSpecialization(nullptr, patternSubs,
witnessConformance);
}
// Otherwise, we need to substitute component types.
SmallVector<SILResultInfo, 8> substResults;
substResults.reserve(origType->getNumResults());
for (auto origResult : origType->getResults()) {
substResults.push_back(substInterface(origResult));
}
auto substErrorResult = origType->getOptionalErrorResult();
assert(!substErrorResult ||
(!substErrorResult->getInterfaceType()->hasTypeParameter() &&
!substErrorResult->getInterfaceType()->hasArchetype()));
SmallVector<SILParameterInfo, 8> substParams;
substParams.reserve(origType->getParameters().size());
for (auto &origParam : origType->getParameters()) {
substParams.push_back(substInterface(origParam));
}
SmallVector<SILYieldInfo, 8> substYields;
substYields.reserve(origType->getYields().size());
for (auto &origYield : origType->getYields()) {
substYields.push_back(substInterface(origYield));
}
auto witnessMethodConformance = substWitnessConformance(origType);
// The substituted type is no longer generic, so it'd never be
// pseudogeneric.
auto extInfo = origType->getExtInfo();
if (!shouldSubstituteOpaqueArchetypes)
extInfo = extInfo.withIsPseudogeneric(false);
auto genericSig = shouldSubstituteOpaqueArchetypes
? origType->getInvocationGenericSignature()
: nullptr;
return SILFunctionType::get(genericSig, extInfo,
origType->getCoroutineKind(),
origType->getCalleeConvention(), substParams,
substYields, substResults, substErrorResult,
SubstitutionMap(), SubstitutionMap(),
TC.Context, witnessMethodConformance);
}
ProtocolConformanceRef substWitnessConformance(CanSILFunctionType origType) {
auto conformance = origType->getWitnessMethodConformanceOrInvalid();
if (!conformance) return conformance;
assert(origType->getExtInfo().hasSelfParam());
auto selfType = origType->getSelfParameter().getInterfaceType();
// The Self type can be nested in a few layers of metatypes (etc.).
while (auto metatypeType = dyn_cast<MetatypeType>(selfType)) {
auto next = metatypeType.getInstanceType();
if (next == selfType)
break;
selfType = next;
}
auto substConformance =
conformance.subst(selfType, Subst, Conformances);
// Substitute the underlying conformance of opaque type archetypes if we
// should look through opaque archetypes.
if (typeExpansionContext.shouldLookThroughOpaqueTypeArchetypes()) {
SubstOptions substOptions(None);
auto substType = selfType.subst(Subst, Conformances, substOptions)
->getCanonicalType();
if (substType->hasOpaqueArchetype()) {
substConformance = substOpaqueTypesWithUnderlyingTypes(
substConformance, substType, typeExpansionContext);
}
}
return substConformance;
}
SILType subst(SILType type) {
return SILType::getPrimitiveType(visit(type.getASTType()),
type.getCategory());
}
SILResultInfo substInterface(SILResultInfo orig) {
return SILResultInfo(visit(orig.getInterfaceType()), orig.getConvention());
}
SILYieldInfo substInterface(SILYieldInfo orig) {
return SILYieldInfo(visit(orig.getInterfaceType()), orig.getConvention());
}
SILParameterInfo substInterface(SILParameterInfo orig) {
return SILParameterInfo(visit(orig.getInterfaceType()),
orig.getConvention(), orig.getDifferentiability());
}
/// Tuples need to have their component types substituted by these
/// same rules.
CanType visitTupleType(CanTupleType origType) {
// Fast-path the empty tuple.
if (origType->getNumElements() == 0) return origType;
SmallVector<TupleTypeElt, 8> substElts;
substElts.reserve(origType->getNumElements());
for (auto &origElt : origType->getElements()) {
auto substEltType = visit(CanType(origElt.getType()));
substElts.push_back(origElt.getWithType(substEltType));
}
return CanType(TupleType::get(substElts, TC.Context));
}
// Block storage types need to substitute their capture type by these same
// rules.
CanType visitSILBlockStorageType(CanSILBlockStorageType origType) {
auto substCaptureType = visit(origType->getCaptureType());
return SILBlockStorageType::get(substCaptureType);
}
/// Optionals need to have their object types substituted by these rules.
CanType visitBoundGenericEnumType(CanBoundGenericEnumType origType) {
// Only use a special rule if it's Optional.
if (!origType->getDecl()->isOptionalDecl()) {
return visitType(origType);
}
CanType origObjectType = origType.getGenericArgs()[0];
CanType substObjectType = visit(origObjectType);
return CanType(BoundGenericType::get(origType->getDecl(), Type(),
substObjectType));
}
/// Any other type would be a valid type in the AST. Just apply the
/// substitution on the AST level and then lower that.
CanType visitType(CanType origType) {
assert(!isa<AnyFunctionType>(origType));
assert(!isa<LValueType>(origType) && !isa<InOutType>(origType));
SubstOptions substOptions(None);
if (shouldSubstituteOpaqueArchetypes)
substOptions = SubstFlags::SubstituteOpaqueArchetypes |
SubstFlags::AllowLoweredTypes;
auto substType =
origType.subst(Subst, Conformances, substOptions)->getCanonicalType();
// If the substitution didn't change anything, we know that the
// original type was a lowered type, so we're good.
if (origType == substType) {
return origType;
}
AbstractionPattern abstraction(Sig, origType);
return TC.getLoweredRValueType(typeExpansionContext, abstraction,
substType);
}
};
} // end anonymous namespace
SILType SILType::subst(TypeConverter &tc, TypeSubstitutionFn subs,
LookupConformanceFn conformances,
CanGenericSignature genericSig,
bool shouldSubstituteOpaqueArchetypes) const {
if (!hasArchetype() && !hasTypeParameter() &&
(!shouldSubstituteOpaqueArchetypes ||
!getASTType()->hasOpaqueArchetype()))
return *this;
SILTypeSubstituter STST(tc, TypeExpansionContext::minimal(), subs,
conformances, genericSig,
shouldSubstituteOpaqueArchetypes);
return STST.subst(*this);
}
SILType SILType::subst(SILModule &M, TypeSubstitutionFn subs,
LookupConformanceFn conformances,
CanGenericSignature genericSig,
bool shouldSubstituteOpaqueArchetypes) const {
return subst(M.Types, subs, conformances, genericSig,
shouldSubstituteOpaqueArchetypes);
}
SILType SILType::subst(TypeConverter &tc, SubstitutionMap subs) const {
auto sig = subs.getGenericSignature();
return subst(tc, QuerySubstitutionMap{subs},
LookUpConformanceInSubstitutionMap(subs),
sig.getCanonicalSignature());
}
SILType SILType::subst(SILModule &M, SubstitutionMap subs) const{
return subst(M.Types, subs);
}
SILType SILType::subst(SILModule &M, SubstitutionMap subs,
TypeExpansionContext context) const {
if (!hasArchetype() && !hasTypeParameter() &&
!getASTType()->hasOpaqueArchetype())
return *this;
// Pass the TypeSubstitutionFn and LookupConformanceFn as arguments so that
// the llvm::function_ref value's scope spans the STST.subst call since
// SILTypeSubstituter captures these functions.
auto result = [&](TypeSubstitutionFn subsFn,
LookupConformanceFn conformancesFn) -> SILType {
SILTypeSubstituter STST(M.Types, context, subsFn, conformancesFn,
subs.getGenericSignature().getCanonicalSignature(),
false);
return STST.subst(*this);
}(QuerySubstitutionMap{subs}, LookUpConformanceInSubstitutionMap(subs));
return result;
}
/// Apply a substitution to this polymorphic SILFunctionType so that
/// it has the form of the normal SILFunctionType for the substituted
/// type, except using the original conventions.
CanSILFunctionType
SILFunctionType::substGenericArgs(SILModule &silModule, SubstitutionMap subs,
TypeExpansionContext context) {
if (!isPolymorphic()) {
return CanSILFunctionType(this);
}
if (subs.empty()) {
return CanSILFunctionType(this);
}
return substGenericArgs(silModule,
QuerySubstitutionMap{subs},
LookUpConformanceInSubstitutionMap(subs),
context);
}
CanSILFunctionType
SILFunctionType::substGenericArgs(SILModule &silModule,
TypeSubstitutionFn subs,
LookupConformanceFn conformances,
TypeExpansionContext context) {
if (!isPolymorphic()) return CanSILFunctionType(this);
SILTypeSubstituter substituter(silModule.Types, context, subs, conformances,
getSubstGenericSignature(),
/*shouldSubstituteOpaqueTypes*/ false);
return substituter.substSILFunctionType(CanSILFunctionType(this), true);
}
CanSILFunctionType
SILFunctionType::substituteOpaqueArchetypes(TypeConverter &TC,
TypeExpansionContext context) {
if (!hasOpaqueArchetype() ||
!context.shouldLookThroughOpaqueTypeArchetypes())
return CanSILFunctionType(this);
ReplaceOpaqueTypesWithUnderlyingTypes replacer(
context.getContext(), context.getResilienceExpansion(),
context.isWholeModuleContext());
SILTypeSubstituter substituter(TC, context, replacer, replacer,
getSubstGenericSignature(),
/*shouldSubstituteOpaqueTypes*/ true);
auto resTy =
substituter.substSILFunctionType(CanSILFunctionType(this), false);
return resTy;
}
/// Fast path for bridging types in a function type without uncurrying.
CanAnyFunctionType
TypeConverter::getBridgedFunctionType(AbstractionPattern pattern,
CanAnyFunctionType t,
AnyFunctionType::ExtInfo extInfo,
Bridgeability bridging) {
// Pull out the generic signature.
CanGenericSignature genericSig = t.getOptGenericSignature();
switch (auto rep = t->getExtInfo().getSILRepresentation()) {
case SILFunctionTypeRepresentation::Thick:
case SILFunctionTypeRepresentation::Thin:
case SILFunctionTypeRepresentation::Method:
case SILFunctionTypeRepresentation::Closure:
case SILFunctionTypeRepresentation::WitnessMethod: {
// No bridging needed for native functions.
if (t->getExtInfo() == extInfo)
return t;
return CanAnyFunctionType::get(genericSig, t.getParams(), t.getResult(),
extInfo);
}
case SILFunctionTypeRepresentation::CFunctionPointer:
case SILFunctionTypeRepresentation::Block:
case SILFunctionTypeRepresentation::ObjCMethod: {
SmallVector<AnyFunctionType::Param, 8> params;
getBridgedParams(rep, pattern, t->getParams(), params, bridging);
bool suppressOptional = pattern.hasForeignErrorStrippingResultOptionality();
auto result = getBridgedResultType(rep,
pattern.getFunctionResultType(),
t.getResult(),
bridging,
suppressOptional);
return CanAnyFunctionType::get(genericSig, llvm::makeArrayRef(params),
result, extInfo);
}
}
llvm_unreachable("bad calling convention");
}
static AbstractFunctionDecl *getBridgedFunction(SILDeclRef declRef) {
switch (declRef.kind) {
case SILDeclRef::Kind::Func:
case SILDeclRef::Kind::Allocator:
case SILDeclRef::Kind::Initializer:
return (declRef.hasDecl()
? cast<AbstractFunctionDecl>(declRef.getDecl())
: nullptr);
case SILDeclRef::Kind::EnumElement:
case SILDeclRef::Kind::Destroyer:
case SILDeclRef::Kind::Deallocator:
case SILDeclRef::Kind::GlobalAccessor:
case SILDeclRef::Kind::DefaultArgGenerator:
case SILDeclRef::Kind::StoredPropertyInitializer:
case SILDeclRef::Kind::PropertyWrapperBackingInitializer:
case SILDeclRef::Kind::IVarInitializer:
case SILDeclRef::Kind::IVarDestroyer:
return nullptr;
}
llvm_unreachable("bad SILDeclRef kind");
}
static AbstractionPattern
getAbstractionPatternForConstant(ASTContext &ctx, SILDeclRef constant,
CanAnyFunctionType fnType,
unsigned numParameterLists) {
if (!constant.isForeign)
return AbstractionPattern(fnType);
auto bridgedFn = getBridgedFunction(constant);
if (!bridgedFn)
return AbstractionPattern(fnType);
const clang::Decl *clangDecl = bridgedFn->getClangDecl();
if (!clangDecl)
return AbstractionPattern(fnType);
// Don't implicitly turn non-optional results to optional if
// we're going to apply a foreign error convention that checks
// for nil results.
if (auto method = dyn_cast<clang::ObjCMethodDecl>(clangDecl)) {
assert(numParameterLists == 2 && "getting curried ObjC method type?");
auto foreignError = bridgedFn->getForeignErrorConvention();
return AbstractionPattern::getCurriedObjCMethod(fnType, method,
foreignError);
} else if (auto value = dyn_cast<clang::ValueDecl>(clangDecl)) {
if (numParameterLists == 1) {
// C function imported as a function.
return AbstractionPattern(fnType, value->getType().getTypePtr());
} else {
assert(numParameterLists == 2);
if (auto method = dyn_cast<clang::CXXMethodDecl>(clangDecl)) {
// C++ method.
return method->isOverloadedOperator()
? AbstractionPattern::getCurriedCXXOperatorMethod(fnType,
bridgedFn)
: AbstractionPattern::getCurriedCXXMethod(fnType, bridgedFn);
} else {
// C function imported as a method.
return AbstractionPattern::getCurriedCFunctionAsMethod(fnType,
bridgedFn);
}
}
}
return AbstractionPattern(fnType);
}
TypeConverter::LoweredFormalTypes
TypeConverter::getLoweredFormalTypes(SILDeclRef constant,
CanAnyFunctionType fnType) {
// We always use full bridging when importing a constant because we can
// directly bridge its arguments and results when calling it.
auto bridging = Bridgeability::Full;
unsigned numParameterLists = constant.getParameterListCount();
auto extInfo = fnType->getExtInfo();
// Form an abstraction pattern for bridging purposes.
AbstractionPattern bridgingFnPattern =
getAbstractionPatternForConstant(Context, constant, fnType,
numParameterLists);
// Fast path: no uncurrying required.
if (numParameterLists == 1) {
auto bridgedFnType =
getBridgedFunctionType(bridgingFnPattern, fnType, extInfo, bridging);
bridgingFnPattern.rewriteType(bridgingFnPattern.getGenericSignature(),
bridgedFnType);
return { bridgingFnPattern, bridgedFnType };
}
SILFunctionTypeRepresentation rep = extInfo.getSILRepresentation();
assert(rep != SILFunctionType::Representation::Block
&& "objc blocks cannot be curried");
// The dependent generic signature.
CanGenericSignature genericSig = fnType.getOptGenericSignature();
// The 'self' parameter.
assert(fnType.getParams().size() == 1);
AnyFunctionType::Param selfParam = fnType.getParams()[0];
// The formal method parameters.
// If we actually partially-apply this, assume we'll need a thick function.
fnType = cast<FunctionType>(fnType.getResult());
auto innerExtInfo =
fnType->getExtInfo().withRepresentation(FunctionTypeRepresentation::Swift);
auto methodParams = fnType->getParams();
auto resultType = fnType.getResult();
bool suppressOptionalResult =
bridgingFnPattern.hasForeignErrorStrippingResultOptionality();
// Bridge input and result types.
SmallVector<AnyFunctionType::Param, 8> bridgedParams;
CanType bridgedResultType;
switch (rep) {
case SILFunctionTypeRepresentation::Thin:
case SILFunctionTypeRepresentation::Thick:
case SILFunctionTypeRepresentation::Method:
case SILFunctionTypeRepresentation::Closure:
case SILFunctionTypeRepresentation::WitnessMethod:
// Native functions don't need bridging.
bridgedParams.append(methodParams.begin(), methodParams.end());
bridgedResultType = resultType;
break;
case SILFunctionTypeRepresentation::ObjCMethod:
case SILFunctionTypeRepresentation::CFunctionPointer: {
if (rep == SILFunctionTypeRepresentation::ObjCMethod) {
// The "self" parameter should not get bridged unless it's a metatype.
if (selfParam.getPlainType()->is<AnyMetatypeType>()) {
auto selfPattern = bridgingFnPattern.getFunctionParamType(0);
selfParam = getBridgedParam(rep, selfPattern, selfParam, bridging);
}
}
auto partialFnPattern = bridgingFnPattern.getFunctionResultType();
for (unsigned i : indices(methodParams)) {
// C++ operators that are implemented as non-static member functions get
// imported into Swift as static methods that have an additional
// parameter for the left-hand-side operand instead of the receiver
// object. These are inout parameters and don't get bridged.
// TODO: Undo this if we stop using inout.
if (auto method = dyn_cast_or_null<clang::CXXMethodDecl>(
constant.getDecl()->getClangDecl())) {
if (i==0 && method->isOverloadedOperator()) {
bridgedParams.push_back(methodParams[0]);
continue;
}
}
auto paramPattern = partialFnPattern.getFunctionParamType(i);
auto bridgedParam =
getBridgedParam(rep, paramPattern, methodParams[i], bridging);
bridgedParams.push_back(bridgedParam);
}
bridgedResultType =
getBridgedResultType(rep,
partialFnPattern.getFunctionResultType(),
resultType, bridging, suppressOptionalResult);
break;
}
case SILFunctionTypeRepresentation::Block:
llvm_unreachable("Cannot uncurry native representation");
}
// Build the curried function type.
auto inner =
CanFunctionType::get(llvm::makeArrayRef(bridgedParams),
bridgedResultType, innerExtInfo);
auto curried =
CanAnyFunctionType::get(genericSig, {selfParam}, inner, extInfo);
// Replace the type in the abstraction pattern with the curried type.
bridgingFnPattern.rewriteType(genericSig, curried);
// Build the uncurried function type.
if (innerExtInfo.throws())
extInfo = extInfo.withThrows(true);
bridgedParams.push_back(selfParam);
auto uncurried =
CanAnyFunctionType::get(genericSig,
llvm::makeArrayRef(bridgedParams),
bridgedResultType,
extInfo);
return { bridgingFnPattern, uncurried };
}
// TODO: We should compare generic signatures. Class and witness methods
// allow variance in "self"-fulfilled parameters; other functions must
// match exactly.
// TODO: More sophisticated param and return ABI compatibility rules could
// diverge.
static bool areABICompatibleParamsOrReturns(SILType a, SILType b,
SILFunction *inFunction) {
// Address parameters are all ABI-compatible, though the referenced
// values may not be. Assume whoever's doing this knows what they're
// doing.
if (a.isAddress() && b.isAddress())
return true;
// Addresses aren't compatible with values.
// TODO: An exception for pointerish types?
if (a.isAddress() || b.isAddress())
return false;
// Tuples are ABI compatible if their elements are.
// TODO: Should destructure recursively.
SmallVector<CanType, 1> aElements, bElements;
if (auto tup = a.getAs<TupleType>()) {
auto types = tup.getElementTypes();
aElements.append(types.begin(), types.end());
} else {
aElements.push_back(a.getASTType());
}
if (auto tup = b.getAs<TupleType>()) {
auto types = tup.getElementTypes();
bElements.append(types.begin(), types.end());
} else {
bElements.push_back(b.getASTType());
}
if (aElements.size() != bElements.size())
return false;
for (unsigned i : indices(aElements)) {
auto aa = SILType::getPrimitiveObjectType(aElements[i]);
auto bb = SILType::getPrimitiveObjectType(bElements[i]);
// Equivalent types are always ABI-compatible.
if (aa == bb)
continue;
// Opaque types are compatible with their substitution.
if (inFunction) {
auto opaqueTypesSubsituted = aa;
auto *dc = inFunction->getDeclContext();
auto *currentModule = inFunction->getModule().getSwiftModule();
if (!dc || !dc->isChildContextOf(currentModule))
dc = currentModule;
ReplaceOpaqueTypesWithUnderlyingTypes replacer(
dc, inFunction->getResilienceExpansion(),
inFunction->getModule().isWholeModule());
if (aa.getASTType()->hasOpaqueArchetype())
opaqueTypesSubsituted = aa.subst(inFunction->getModule(), replacer,
replacer, CanGenericSignature(), true);
auto opaqueTypesSubsituted2 = bb;
if (bb.getASTType()->hasOpaqueArchetype())
opaqueTypesSubsituted2 =
bb.subst(inFunction->getModule(), replacer, replacer,
CanGenericSignature(), true);
if (opaqueTypesSubsituted == opaqueTypesSubsituted2)
continue;
}
// FIXME: If one or both types are dependent, we can't accurately assess
// whether they're ABI-compatible without a generic context. We can
// do a better job here when dependent types are related to their
// generic signatures.
if (aa.hasTypeParameter() || bb.hasTypeParameter())
continue;
// Bridgeable object types are interchangeable.
if (aa.isBridgeableObjectType() && bb.isBridgeableObjectType())
continue;
// Optional and IUO are interchangeable if their elements are.
auto aObject = aa.getOptionalObjectType();
auto bObject = bb.getOptionalObjectType();
if (aObject && bObject &&
areABICompatibleParamsOrReturns(aObject, bObject, inFunction))
continue;
// Optional objects are ABI-interchangeable with non-optionals;
// None is represented by a null pointer.
if (aObject && aObject.isBridgeableObjectType() &&
bb.isBridgeableObjectType())
continue;
if (bObject && bObject.isBridgeableObjectType() &&
aa.isBridgeableObjectType())
continue;
// Optional thick metatypes are ABI-interchangeable with non-optionals
// too.
if (aObject)
if (auto aObjMeta = aObject.getAs<MetatypeType>())
if (auto bMeta = bb.getAs<MetatypeType>())
if (aObjMeta->getRepresentation() == bMeta->getRepresentation() &&
bMeta->getRepresentation() != MetatypeRepresentation::Thin)
continue;
if (bObject)
if (auto aMeta = aa.getAs<MetatypeType>())
if (auto bObjMeta = bObject.getAs<MetatypeType>())
if (aMeta->getRepresentation() == bObjMeta->getRepresentation() &&
aMeta->getRepresentation() != MetatypeRepresentation::Thin)
continue;
// Function types are interchangeable if they're also ABI-compatible.
if (auto aFunc = aa.getAs<SILFunctionType>()) {
if (auto bFunc = bb.getAs<SILFunctionType>()) {
// *NOTE* We swallow the specific error here for now. We will still get
// that the function types are incompatible though, just not more
// specific information.
return aFunc->isABICompatibleWith(bFunc, *inFunction).isCompatible();
}
}
// Metatypes are interchangeable with metatypes with the same
// representation.
if (auto aMeta = aa.getAs<MetatypeType>()) {
if (auto bMeta = bb.getAs<MetatypeType>()) {
if (aMeta->getRepresentation() == bMeta->getRepresentation())
continue;
}
}
// Other types must match exactly.
return false;
}
return true;
}
namespace {
using ABICompatibilityCheckResult =
SILFunctionType::ABICompatibilityCheckResult;
} // end anonymous namespace
ABICompatibilityCheckResult
SILFunctionType::isABICompatibleWith(CanSILFunctionType other,
SILFunction &context) const {
// The calling convention and function representation can't be changed.
if (getRepresentation() != other->getRepresentation())
return ABICompatibilityCheckResult::DifferentFunctionRepresentations;
// Check the results.
if (getNumResults() != other->getNumResults())
return ABICompatibilityCheckResult::DifferentNumberOfResults;
for (unsigned i : indices(getResults())) {
auto result1 = getResults()[i];
auto result2 = other->getResults()[i];
if (result1.getConvention() != result2.getConvention())
return ABICompatibilityCheckResult::DifferentReturnValueConventions;
if (!areABICompatibleParamsOrReturns(
result1.getSILStorageType(context.getModule(), this,
context.getTypeExpansionContext()),
result2.getSILStorageType(context.getModule(), other,
context.getTypeExpansionContext()),
&context)) {
return ABICompatibilityCheckResult::ABIIncompatibleReturnValues;
}
}
// Our error result conventions are designed to be ABI compatible
// with functions lacking error results. Just make sure that the
// actual conventions match up.
if (hasErrorResult() && other->hasErrorResult()) {
auto error1 = getErrorResult();
auto error2 = other->getErrorResult();
if (error1.getConvention() != error2.getConvention())
return ABICompatibilityCheckResult::DifferentErrorResultConventions;
if (!areABICompatibleParamsOrReturns(
error1.getSILStorageType(context.getModule(), this,
context.getTypeExpansionContext()),
error2.getSILStorageType(context.getModule(), other,
context.getTypeExpansionContext()),
&context))
return ABICompatibilityCheckResult::ABIIncompatibleErrorResults;
}
// Check the parameters.
// TODO: Could allow known-empty types to be inserted or removed, but SIL
// doesn't know what empty types are yet.
if (getParameters().size() != other->getParameters().size())
return ABICompatibilityCheckResult::DifferentNumberOfParameters;
for (unsigned i : indices(getParameters())) {
auto param1 = getParameters()[i];
auto param2 = other->getParameters()[i];
if (param1.getConvention() != param2.getConvention())
return {ABICompatibilityCheckResult::DifferingParameterConvention, i};
if (!areABICompatibleParamsOrReturns(
param1.getSILStorageType(context.getModule(), this,
context.getTypeExpansionContext()),
param2.getSILStorageType(context.getModule(), other,
context.getTypeExpansionContext()),
&context))
return {ABICompatibilityCheckResult::ABIIncompatibleParameterType, i};
}
// This needs to be checked last because the result implies everying else has
// already been checked and this is the only difference.
if (isNoEscape() != other->isNoEscape() &&
(getRepresentation() == SILFunctionType::Representation::Thick))
return ABICompatibilityCheckResult::ABIEscapeToNoEscapeConversion;
return ABICompatibilityCheckResult::None;
}
StringRef SILFunctionType::ABICompatibilityCheckResult::getMessage() const {
switch (kind) {
case innerty::None:
return "None";
case innerty::DifferentFunctionRepresentations:
return "Different function representations";
case innerty::DifferentNumberOfResults:
return "Different number of results";
case innerty::DifferentReturnValueConventions:
return "Different return value conventions";
case innerty::ABIIncompatibleReturnValues:
return "ABI incompatible return values";
case innerty::DifferentErrorResultConventions:
return "Different error result conventions";
case innerty::ABIIncompatibleErrorResults:
return "ABI incompatible error results";
case innerty::DifferentNumberOfParameters:
return "Different number of parameters";
// These two have to do with specific parameters, so keep the error message
// non-plural.
case innerty::DifferingParameterConvention:
return "Differing parameter convention";
case innerty::ABIIncompatibleParameterType:
return "ABI incompatible parameter type.";
case innerty::ABIEscapeToNoEscapeConversion:
return "Escape to no escape conversion";
}
llvm_unreachable("Covered switch isn't completely covered?!");
}
static DeclContext *getDeclContextForExpansion(const SILFunction &f) {
auto *dc = f.getDeclContext();
if (!dc)
dc = f.getModule().getSwiftModule();
auto *currentModule = f.getModule().getSwiftModule();
if (!dc || !dc->isChildContextOf(currentModule))
dc = currentModule;
return dc;
}
TypeExpansionContext::TypeExpansionContext(const SILFunction &f)
: expansion(f.getResilienceExpansion()),
inContext(getDeclContextForExpansion(f)),
isContextWholeModule(f.getModule().isWholeModule()) {}
CanSILFunctionType SILFunction::getLoweredFunctionTypeInContext(
TypeExpansionContext context) const {
auto origFunTy = getLoweredFunctionType();
auto &M = getModule();
auto funTy = M.Types.getLoweredType(origFunTy , context);
return cast<SILFunctionType>(funTy.getASTType());
}
|
// Copyright 2016 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <endian.h>
#include <zircon/compiler.h>
#include <array>
#include <cstdint>
#include "gtest/gtest.h"
#include "src/connectivity/bluetooth/core/bt-host/common/test_helpers.h"
#include "src/connectivity/bluetooth/core/bt-host/hci/acl_data_packet.h"
#include "src/connectivity/bluetooth/core/bt-host/hci/control_packets.h"
using bt::ContainersEqual;
using bt::StaticByteBuffer;
namespace bt {
namespace hci {
namespace test {
namespace {
constexpr OpCode kTestOpCode = 0x07FF;
constexpr EventCode kTestEventCode = 0xFF;
struct TestPayload {
uint8_t foo;
} __PACKED;
TEST(HCI_PacketTest, CommandPacket) {
constexpr size_t kPayloadSize = sizeof(TestPayload);
auto packet = CommandPacket::New(kTestOpCode, kPayloadSize);
EXPECT_EQ(kTestOpCode, packet->opcode());
EXPECT_EQ(kPayloadSize, packet->view().payload_size());
packet->mutable_view()->mutable_payload<TestPayload>()->foo = 127;
// clang-format off
auto kExpected = CreateStaticByteBuffer(
0xFF, 0x07, // opcode
0x01, // parameter_total_size
0x7F // foo
);
// clang-format on
EXPECT_TRUE(ContainersEqual(kExpected, packet->view().data()));
}
TEST(HCI_PacketTest, EventPacket) {
constexpr size_t kPayloadSize = sizeof(TestPayload);
auto packet = EventPacket::New(kPayloadSize);
// clang-format off
auto bytes = CreateStaticByteBuffer(
0xFF, // event code
0x01, // parameter_total_size
0x7F // foo
);
packet->mutable_view()->mutable_data().Write(bytes);
packet->InitializeFromBuffer();
// clang-format on
EXPECT_EQ(kTestEventCode, packet->event_code());
EXPECT_EQ(kPayloadSize, packet->view().payload_size());
EXPECT_EQ(127, packet->view().payload<TestPayload>().foo);
}
TEST(HCI_PacketTest, EventPacketReturnParams) {
// clang-format off
auto correct_size_bad_event_code = CreateStaticByteBuffer(
// Event header
0xFF, 0x04, // (event_code is not CommandComplete)
// CommandCompleteEventParams
0x01, 0xFF, 0x07,
// Return parameters
0x7F);
auto cmd_complete_small_payload = CreateStaticByteBuffer(
// Event header
0x0E, 0x03,
// CommandCompleteEventParams
0x01, 0xFF, 0x07);
auto valid = CreateStaticByteBuffer(
// Event header
0x0E, 0x04,
// CommandCompleteEventParams
0x01, 0xFF, 0x07,
// Return parameters
0x7F);
// clang-format on
// Allocate a large enough packet which we'll reuse for the 3 payloads.
auto packet = EventPacket::New(valid.size());
// If the event code or the payload size don't match, then return_params()
// should return nullptr.
packet->mutable_view()->mutable_data().Write(correct_size_bad_event_code);
packet->InitializeFromBuffer();
EXPECT_EQ(nullptr, packet->return_params<TestPayload>());
packet->mutable_view()->mutable_data().Write(cmd_complete_small_payload);
packet->InitializeFromBuffer();
EXPECT_EQ(nullptr, packet->return_params<TestPayload>());
// Reset packet size to the original so that |valid| can fit.
packet->mutable_view()->Resize(valid.size());
// Valid case
packet->mutable_view()->mutable_data().Write(valid);
packet->InitializeFromBuffer();
ASSERT_NE(nullptr, packet->return_params<TestPayload>());
EXPECT_EQ(127, packet->return_params<TestPayload>()->foo);
}
TEST(HCI_PacketTest, EventPacketStatus) {
// clang-format off
auto evt = CreateStaticByteBuffer(
// Event header
0x05, 0x04, // (event_code is DisconnectionComplete)
// Disconnection Complete event parameters
0x03, // status: hardware failure
0x01, 0x00, // handle: 0x0001
0x16 // reason: terminated by local host
);
// clang-format on
auto packet = EventPacket::New(evt.size());
packet->mutable_view()->mutable_data().Write(evt);
packet->InitializeFromBuffer();
Status status = packet->ToStatus();
EXPECT_TRUE(status.is_protocol_error());
EXPECT_EQ(StatusCode::kHardwareFailure, status.protocol_error());
}
TEST(HCI_PacketTest, CommandCompleteEventStatus) {
// clang-format off
auto evt = CreateStaticByteBuffer(
// Event header
0x0E, 0x04, // (event code is CommandComplete)
// CommandCompleteEventParams
0x01, 0xFF, 0x07,
// Return parameters (status: hardware failure)
0x03);
// clang-format on
auto packet = EventPacket::New(evt.size());
packet->mutable_view()->mutable_data().Write(evt);
packet->InitializeFromBuffer();
Status status = packet->ToStatus();
EXPECT_TRUE(status.is_protocol_error());
EXPECT_EQ(StatusCode::kHardwareFailure, status.protocol_error());
}
TEST(HCI_PacketTest, EventPacketMalformed) {
// clang-format off
auto evt = CreateStaticByteBuffer(
// Event header
0x05, 0x03, // (event_code is DisconnectionComplete)
// Disconnection Complete event parameters
0x03, // status: hardware failure
0x01, 0x00 // handle: 0x0001
// event is one byte too short
);
// clang-format on
auto packet = EventPacket::New(evt.size());
packet->mutable_view()->mutable_data().Write(evt);
packet->InitializeFromBuffer();
Status status = packet->ToStatus();
EXPECT_FALSE(status.is_protocol_error());
EXPECT_EQ(HostError::kPacketMalformed, status.error());
}
TEST(HCI_PacketTest, LEEventParams) {
// clang-format off
auto correct_size_bad_event_code = CreateStaticByteBuffer(
// Event header
0xFF, 0x02, // (event_code is not LEMetaEventCode)
// Subevent code
0xFF,
// Subevent payload
0x7F);
auto payload_too_small = CreateStaticByteBuffer(
0x3E, 0x01,
// Subevent code
0xFF);
auto valid = CreateStaticByteBuffer(
// Event header
0x3E, 0x02,
// Subevent code
0xFF,
// Subevent payload
0x7F);
// clang-format on
auto packet = EventPacket::New(valid.size());
// If the event code or the payload size don't match, then return_params()
// should return nullptr.
packet->mutable_view()->mutable_data().Write(correct_size_bad_event_code);
packet->InitializeFromBuffer();
EXPECT_EQ(nullptr, packet->le_event_params<TestPayload>());
packet->mutable_view()->mutable_data().Write(payload_too_small);
packet->InitializeFromBuffer();
EXPECT_EQ(nullptr, packet->le_event_params<TestPayload>());
// Valid case
packet->mutable_view()->Resize(valid.size());
packet->mutable_view()->mutable_data().Write(valid);
packet->InitializeFromBuffer();
EXPECT_NE(nullptr, packet->le_event_params<TestPayload>());
EXPECT_EQ(127, packet->le_event_params<TestPayload>()->foo);
}
TEST(HCI_PacketTest, ACLDataPacketFromFields) {
constexpr size_t kLargeDataLength = 10;
constexpr size_t kSmallDataLength = 1;
auto packet = ACLDataPacket::New(
0x007F, ACLPacketBoundaryFlag::kContinuingFragment,
ACLBroadcastFlag::kActiveSlaveBroadcast, kSmallDataLength);
packet->mutable_view()->mutable_payload_data().Fill(0);
// First 12-bits: 0x07F
// Upper 4-bits: 0b0101
EXPECT_TRUE(
ContainersEqual(packet->view().data(),
std::array<uint8_t, 5>{{0x7F, 0x50, 0x01, 0x00, 0x00}}));
packet = ACLDataPacket::New(0x0FFF, ACLPacketBoundaryFlag::kCompletePDU,
ACLBroadcastFlag::kActiveSlaveBroadcast,
kSmallDataLength);
packet->mutable_view()->mutable_payload_data().Fill(0);
// First 12-bits: 0xFFF
// Upper 4-bits: 0b0111
EXPECT_TRUE(
ContainersEqual(packet->view().data(),
std::array<uint8_t, 5>{{0xFF, 0x7F, 0x01, 0x00, 0x00}}));
packet =
ACLDataPacket::New(0x0FFF, ACLPacketBoundaryFlag::kFirstNonFlushable,
ACLBroadcastFlag::kPointToPoint, kLargeDataLength);
packet->mutable_view()->mutable_payload_data().Fill(0);
// First 12-bits: 0xFFF
// Upper 4-bits: 0b0000
EXPECT_TRUE(ContainersEqual(
packet->view().data(),
std::array<uint8_t, 14>{{0xFF, 0x0F, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00}}));
}
TEST(HCI_PacketTest, ACLDataPacketFromBuffer) {
constexpr size_t kLargeDataLength = 256;
constexpr size_t kSmallDataLength = 1;
// The same test cases as ACLDataPacketFromFields test above but in the
// opposite direction.
// First 12-bits: 0x07F
// Upper 4-bits: 0b0101
auto bytes = CreateStaticByteBuffer(0x7F, 0x50, 0x01, 0x00, 0x00);
auto packet = ACLDataPacket::New(kSmallDataLength);
packet->mutable_view()->mutable_data().Write(bytes);
packet->InitializeFromBuffer();
EXPECT_EQ(0x007F, packet->connection_handle());
EXPECT_EQ(ACLPacketBoundaryFlag::kContinuingFragment,
packet->packet_boundary_flag());
EXPECT_EQ(ACLBroadcastFlag::kActiveSlaveBroadcast, packet->broadcast_flag());
EXPECT_EQ(kSmallDataLength, packet->view().payload_size());
// First 12-bits: 0xFFF
// Upper 4-bits: 0b0111
bytes = CreateStaticByteBuffer(0xFF, 0x7F, 0x01, 0x00, 0x00);
packet->mutable_view()->mutable_data().Write(bytes);
packet->InitializeFromBuffer();
EXPECT_EQ(0x0FFF, packet->connection_handle());
EXPECT_EQ(ACLPacketBoundaryFlag::kCompletePDU,
packet->packet_boundary_flag());
EXPECT_EQ(ACLBroadcastFlag::kActiveSlaveBroadcast, packet->broadcast_flag());
EXPECT_EQ(kSmallDataLength, packet->view().payload_size());
packet = ACLDataPacket::New(kLargeDataLength);
packet->mutable_view()->mutable_data().Write(
CreateStaticByteBuffer(0xFF, 0x0F, 0x00, 0x01));
packet->InitializeFromBuffer();
EXPECT_EQ(0x0FFF, packet->connection_handle());
EXPECT_EQ(ACLPacketBoundaryFlag::kFirstNonFlushable,
packet->packet_boundary_flag());
EXPECT_EQ(ACLBroadcastFlag::kPointToPoint, packet->broadcast_flag());
EXPECT_EQ(kLargeDataLength, packet->view().payload_size());
}
} // namespace
} // namespace test
} // namespace hci
} // namespace bt
|
#include "shader/ShaderCore.h"
namespace ze
{
ZE_DEFINE_MODULE(ze::module::DefaultModule, shadercore);
}
|
#include "extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h"
#include "envoy/registry/registry.h"
#include "common/common/assert.h"
#include "extensions/filters/network/dubbo_proxy/message_impl.h"
#include "extensions/filters/network/dubbo_proxy/serializer_impl.h"
namespace Envoy {
namespace Extensions {
namespace NetworkFilters {
namespace DubboProxy {
namespace {
constexpr uint16_t MagicNumber = 0xdabb;
constexpr uint8_t MessageTypeMask = 0x80;
constexpr uint8_t EventMask = 0x20;
constexpr uint8_t TwoWayMask = 0x40;
constexpr uint8_t SerializationTypeMask = 0x1f;
constexpr uint64_t FlagOffset = 2;
constexpr uint64_t StatusOffset = 3;
constexpr uint64_t RequestIDOffset = 4;
constexpr uint64_t BodySizeOffset = 12;
} // namespace
// Consistent with the SerializationType
bool isValidSerializationType(SerializationType type) {
switch (type) {
case SerializationType::Hessian2:
break;
default:
return false;
}
return true;
}
// Consistent with the ResponseStatus
bool isValidResponseStatus(ResponseStatus status) {
switch (status) {
case ResponseStatus::Ok:
case ResponseStatus::ClientTimeout:
case ResponseStatus::ServerTimeout:
case ResponseStatus::BadRequest:
case ResponseStatus::BadResponse:
case ResponseStatus::ServiceNotFound:
case ResponseStatus::ServiceError:
case ResponseStatus::ClientError:
case ResponseStatus::ServerThreadpoolExhaustedError:
break;
default:
return false;
}
return true;
}
void parseRequestInfoFromBuffer(Buffer::Instance& data, MessageMetadataSharedPtr metadata) {
ASSERT(data.length() >= DubboProtocolImpl::MessageSize);
uint8_t flag = data.peekInt<uint8_t>(FlagOffset);
bool is_two_way = (flag & TwoWayMask) == TwoWayMask ? true : false;
SerializationType type = static_cast<SerializationType>(flag & SerializationTypeMask);
if (!isValidSerializationType(type)) {
throw EnvoyException(
fmt::format("invalid dubbo message serialization type {}",
static_cast<std::underlying_type<SerializationType>::type>(type)));
}
if (!is_two_way && metadata->message_type() != MessageType::HeartbeatRequest) {
metadata->setMessageType(MessageType::Oneway);
}
metadata->setSerializationType(type);
}
void parseResponseInfoFromBuffer(Buffer::Instance& buffer, MessageMetadataSharedPtr metadata) {
ASSERT(buffer.length() >= DubboProtocolImpl::MessageSize);
ResponseStatus status = static_cast<ResponseStatus>(buffer.peekInt<uint8_t>(StatusOffset));
if (!isValidResponseStatus(status)) {
throw EnvoyException(
fmt::format("invalid dubbo message response status {}",
static_cast<std::underlying_type<ResponseStatus>::type>(status)));
}
metadata->setResponseStatus(status);
}
std::pair<ContextSharedPtr, bool>
DubboProtocolImpl::decodeHeader(Buffer::Instance& buffer, MessageMetadataSharedPtr metadata) {
if (!metadata) {
throw EnvoyException("invalid metadata parameter");
}
if (buffer.length() < DubboProtocolImpl::MessageSize) {
return std::pair<ContextSharedPtr, bool>(nullptr, false);
}
uint16_t magic_number = buffer.peekBEInt<uint16_t>();
if (magic_number != MagicNumber) {
throw EnvoyException(fmt::format("invalid dubbo message magic number {}", magic_number));
}
uint8_t flag = buffer.peekInt<uint8_t>(FlagOffset);
MessageType type =
(flag & MessageTypeMask) == MessageTypeMask ? MessageType::Request : MessageType::Response;
bool is_event = (flag & EventMask) == EventMask ? true : false;
int64_t request_id = buffer.peekBEInt<int64_t>(RequestIDOffset);
int32_t body_size = buffer.peekBEInt<int32_t>(BodySizeOffset);
// The body size of the heartbeat message is zero.
if (body_size > MaxBodySize || body_size < 0) {
throw EnvoyException(fmt::format("invalid dubbo message size {}", body_size));
}
metadata->setRequestId(request_id);
if (type == MessageType::Request) {
if (is_event) {
type = MessageType::HeartbeatRequest;
}
metadata->setMessageType(type);
parseRequestInfoFromBuffer(buffer, metadata);
} else {
if (is_event) {
type = MessageType::HeartbeatResponse;
}
metadata->setMessageType(type);
parseResponseInfoFromBuffer(buffer, metadata);
}
auto context = std::make_shared<ContextImpl>();
context->set_header_size(DubboProtocolImpl::MessageSize);
context->set_body_size(body_size);
context->set_heartbeat(is_event);
return std::pair<ContextSharedPtr, bool>(context, true);
}
bool DubboProtocolImpl::decodeData(Buffer::Instance& buffer, ContextSharedPtr context,
MessageMetadataSharedPtr metadata) {
ASSERT(serializer_);
if ((buffer.length()) < static_cast<uint64_t>(context->body_size())) {
return false;
}
switch (metadata->message_type()) {
case MessageType::Oneway:
case MessageType::Request: {
auto ret = serializer_->deserializeRpcInvocation(buffer, context);
if (!ret.second) {
return false;
}
metadata->setInvocationInfo(ret.first);
break;
}
case MessageType::Response: {
auto ret = serializer_->deserializeRpcResult(buffer, context);
if (!ret.second) {
return false;
}
if (ret.first->hasException()) {
metadata->setMessageType(MessageType::Exception);
}
break;
}
default:
NOT_REACHED_GCOVR_EXCL_LINE;
}
return true;
}
bool DubboProtocolImpl::encode(Buffer::Instance& buffer, const MessageMetadata& metadata,
const std::string& content, RpcResponseType type) {
ASSERT(serializer_);
switch (metadata.message_type()) {
case MessageType::HeartbeatResponse: {
ASSERT(metadata.hasResponseStatus());
ASSERT(content.empty());
buffer.writeBEInt<uint16_t>(MagicNumber);
uint8_t flag = static_cast<uint8_t>(metadata.serialization_type());
flag = flag ^ EventMask;
buffer.writeByte(flag);
buffer.writeByte(static_cast<uint8_t>(metadata.response_status()));
buffer.writeBEInt<uint64_t>(metadata.request_id());
buffer.writeBEInt<uint32_t>(0);
return true;
}
case MessageType::Response: {
ASSERT(metadata.hasResponseStatus());
ASSERT(!content.empty());
Buffer::OwnedImpl body_buffer;
size_t serialized_body_size = serializer_->serializeRpcResult(body_buffer, content, type);
buffer.writeBEInt<uint16_t>(MagicNumber);
buffer.writeByte(static_cast<uint8_t>(metadata.serialization_type()));
buffer.writeByte(static_cast<uint8_t>(metadata.response_status()));
buffer.writeBEInt<uint64_t>(metadata.request_id());
buffer.writeBEInt<uint32_t>(serialized_body_size);
buffer.move(body_buffer, serialized_body_size);
return true;
}
case MessageType::Request:
case MessageType::Oneway:
case MessageType::Exception:
NOT_IMPLEMENTED_GCOVR_EXCL_LINE;
default:
NOT_REACHED_GCOVR_EXCL_LINE;
}
}
class DubboProtocolConfigFactory : public ProtocolFactoryBase<DubboProtocolImpl> {
public:
DubboProtocolConfigFactory() : ProtocolFactoryBase(ProtocolType::Dubbo) {}
};
/**
* Static registration for the Dubbo protocol. @see RegisterFactory.
*/
REGISTER_FACTORY(DubboProtocolConfigFactory, NamedProtocolConfigFactory);
} // namespace DubboProxy
} // namespace NetworkFilters
} // namespace Extensions
} // namespace Envoy
|
//
// TM & (c) 2017 Lucasfilm Entertainment Company Ltd. and Lucasfilm Ltd.
// All rights reserved. See LICENSE.txt for license.
//
#include <MaterialXFormat/XmlIo.h>
#include <MaterialXFormat/PugiXML/pugixml.hpp>
#include <MaterialXFormat/Util.h>
#include <MaterialXCore/Types.h>
#include <cstring>
#include <fstream>
#include <sstream>
using namespace pugi;
namespace MaterialX
{
const string MTLX_EXTENSION = "mtlx";
namespace {
const string XINCLUDE_TAG = "xi:include";
const string XINCLUDE_NAMESPACE = "xmlns:xi";
const string XINCLUDE_URL = "http://www.w3.org/2001/XInclude";
void elementFromXml(const xml_node& xmlNode, ElementPtr elem, const XmlReadOptions* readOptions)
{
// Store attributes in element.
for (const xml_attribute& xmlAttr : xmlNode.attributes())
{
if (xmlAttr.name() != Element::NAME_ATTRIBUTE)
{
elem->setAttribute(xmlAttr.name(), xmlAttr.value());
}
}
// Create child elements and recurse.
for (const xml_node& xmlChild : xmlNode.children())
{
string category = xmlChild.name();
string name;
for (const xml_attribute& xmlAttr : xmlChild.attributes())
{
if (xmlAttr.name() == Element::NAME_ATTRIBUTE)
{
name = xmlAttr.value();
break;
}
}
// Check for duplicate elements.
ConstElementPtr previous = elem->getChild(name);
if (previous)
{
if (!readOptions || !readOptions->generateUniqueNames)
{
continue;
}
else
{
name = elem->createValidChildName(name);
}
}
// Create the new element.
ElementPtr child = elem->addChildOfCategory(category, name);
elementFromXml(xmlChild, child, readOptions);
// Handle the interpretation of XML comments.
if (readOptions && readOptions->readComments && category.empty())
{
child = elem->changeChildCategory(child, CommentElement::CATEGORY);
child->setDocString(xmlChild.value());
}
}
}
void elementToXml(ConstElementPtr elem, xml_node& xmlNode, const XmlWriteOptions* writeOptions)
{
bool writeXIncludeEnable = writeOptions ? writeOptions->writeXIncludeEnable : true;
ElementPredicate elementPredicate = writeOptions ? writeOptions->elementPredicate : nullptr;
// Store attributes in XML.
if (!elem->getName().empty())
{
xmlNode.append_attribute(Element::NAME_ATTRIBUTE.c_str()) = elem->getName().c_str();
}
for (const string& attrName : elem->getAttributeNames())
{
xml_attribute xmlAttr = xmlNode.append_attribute(attrName.c_str());
xmlAttr.set_value(elem->getAttribute(attrName).c_str());
}
// Create child nodes and recurse.
StringSet writtenSourceFiles;
for (auto child : elem->getChildren())
{
if (elementPredicate && !elementPredicate(child))
{
continue;
}
// Write XInclude references if requested.
if (writeXIncludeEnable && child->hasSourceUri())
{
string sourceUri = child->getSourceUri();
if (sourceUri != elem->getDocument()->getSourceUri())
{
if (!writtenSourceFiles.count(sourceUri))
{
if (!xmlNode.attribute(XINCLUDE_NAMESPACE.c_str()))
{
xmlNode.append_attribute(XINCLUDE_NAMESPACE.c_str()) = XINCLUDE_URL.c_str();
}
xml_node includeNode = xmlNode.append_child(XINCLUDE_TAG.c_str());
xml_attribute includeAttr = includeNode.append_attribute("href");
FilePath includePath(sourceUri);
// Write relative include paths in Posix format, and absolute
// include paths in native format.
FilePath::Format includeFormat = includePath.isAbsolute() ?
FilePath::FormatNative : FilePath::FormatPosix;
includeAttr.set_value(includePath.asString(includeFormat).c_str());
writtenSourceFiles.insert(sourceUri);
}
continue;
}
}
// Write XML comments.
if (child->getCategory() == CommentElement::CATEGORY)
{
xml_node xmlChild = xmlNode.append_child(node_comment);
xmlChild.set_value(child->getAttribute(Element::DOC_ATTRIBUTE).c_str());
continue;
}
xml_node xmlChild = xmlNode.append_child(child->getCategory().c_str());
elementToXml(child, xmlChild, writeOptions);
}
}
void processXIncludes(DocumentPtr doc, xml_node& xmlNode, const FileSearchPath& searchPath, const XmlReadOptions* readOptions)
{
// Search path for includes. Set empty and then evaluated once in the iteration through xml includes.
FileSearchPath includeSearchPath;
XmlReadFunction readXIncludeFunction = readOptions ? readOptions->readXIncludeFunction : readFromXmlFile;
xml_node xmlChild = xmlNode.first_child();
while (xmlChild)
{
if (xmlChild.name() == XINCLUDE_TAG)
{
// Read XInclude references if requested.
if (readXIncludeFunction)
{
string filename = xmlChild.attribute("href").value();
// Check for XInclude cycles.
if (readOptions)
{
const StringVec& parents = readOptions->parentXIncludes;
if (std::find(parents.begin(), parents.end(), filename) != parents.end())
{
throw ExceptionParseError("XInclude cycle detected.");
}
}
// Read the included file into a library document.
DocumentPtr library = createDocument();
XmlReadOptions xiReadOptions = readOptions ? *readOptions : XmlReadOptions();
xiReadOptions.parentXIncludes.push_back(filename);
// Prepend the directory of the parent to accommodate
// includes relative to the parent file location.
if (includeSearchPath.isEmpty())
{
string parentUri = doc->getSourceUri();
if (!parentUri.empty())
{
FilePath filePath = searchPath.find(parentUri);
if (!filePath.isEmpty())
{
// Remove the file name from the path as we want the path to the containing folder.
includeSearchPath = searchPath;
includeSearchPath.prepend(filePath.getParentPath());
}
}
// Set default search path if no parent path found
if (includeSearchPath.isEmpty())
{
includeSearchPath = searchPath;
}
}
readXIncludeFunction(library, filename, includeSearchPath, &xiReadOptions);
// Import the library document.
doc->importLibrary(library);
}
// Remove include directive.
xml_node includeNode = xmlChild;
xmlChild = xmlChild.next_sibling();
xmlNode.remove_child(includeNode);
}
else
{
xmlChild = xmlChild.next_sibling();
}
}
}
void documentFromXml(DocumentPtr doc,
const xml_document& xmlDoc,
const FileSearchPath& searchPath = FileSearchPath(),
const XmlReadOptions* readOptions = nullptr)
{
xml_node xmlRoot = xmlDoc.child(Document::CATEGORY.c_str());
if (xmlRoot)
{
processXIncludes(doc, xmlRoot, searchPath, readOptions);
elementFromXml(xmlRoot, doc, readOptions);
}
doc->upgradeVersion();
}
void validateParseResult(xml_parse_result& result, const FilePath& filename = FilePath())
{
if (result)
{
return;
}
if (result.status == xml_parse_status::status_file_not_found ||
result.status == xml_parse_status::status_io_error ||
result.status == xml_parse_status::status_out_of_memory)
{
throw ExceptionFileMissing("Failed to open file for reading: " + filename.asString());
}
string desc = result.description();
string offset = std::to_string(result.offset);
string message = "XML parse error";
if (!filename.isEmpty())
{
message += " in " + filename.asString();
}
message += " (" + desc + " at character " + offset + ")";
throw ExceptionParseError(message);
}
unsigned int getParseOptions(const XmlReadOptions* readOptions)
{
unsigned int parseOptions = parse_default;
if (readOptions && readOptions->readComments)
{
parseOptions |= parse_comments;
}
return parseOptions;
}
void mergeLooks(DocumentPtr doc, const XmlExportOptions* exportOptions)
{
if (exportOptions && exportOptions->mergeLooks)
{
doc->mergeLooks(exportOptions->lookGroupToMerge);
}
}
} // anonymous namespace
//
// XmlReadOptions methods
//
XmlReadOptions::XmlReadOptions() :
readXIncludeFunction(readFromXmlFile),
readComments(false)
{
}
//
// XmlWriteOptions methods
//
XmlWriteOptions::XmlWriteOptions() :
writeXIncludeEnable(true)
{
}
//
// XmlExportOptions methods
//
XmlExportOptions::XmlExportOptions() :
XmlWriteOptions(),
mergeLooks(false)
{
}
//
// Reading
//
void readFromXmlBuffer(DocumentPtr doc, const char* buffer, const XmlReadOptions* readOptions)
{
xml_document xmlDoc;
xml_parse_result result = xmlDoc.load_string(buffer, getParseOptions(readOptions));
validateParseResult(result);
documentFromXml(doc, xmlDoc, EMPTY_STRING, readOptions);
}
void readFromXmlStream(DocumentPtr doc, std::istream& stream, const XmlReadOptions* readOptions)
{
xml_document xmlDoc;
xml_parse_result result = xmlDoc.load(stream, getParseOptions(readOptions));
validateParseResult(result);
documentFromXml(doc, xmlDoc, EMPTY_STRING, readOptions);
}
void readFromXmlFile(DocumentPtr doc, FilePath filename, FileSearchPath searchPath, const XmlReadOptions* readOptions)
{
xml_document xmlDoc;
searchPath.append(getEnvironmentPath());
filename = searchPath.find(filename);
xml_parse_result result = xmlDoc.load_file(filename.asString().c_str(), getParseOptions(readOptions));
validateParseResult(result, filename);
// This must be done before parsing the XML as the source URI
// is used for searching for include files.
if (readOptions && !readOptions->parentXIncludes.empty())
{
doc->setSourceUri(readOptions->parentXIncludes[0]);
}
else
{
doc->setSourceUri(filename);
}
documentFromXml(doc, xmlDoc, searchPath, readOptions);
}
void readFromXmlString(DocumentPtr doc, const string& str, const XmlReadOptions* readOptions)
{
std::istringstream stream(str);
readFromXmlStream(doc, stream, readOptions);
}
//
// Writing
//
void writeToXmlStream(DocumentPtr doc, std::ostream& stream, const XmlWriteOptions* writeOptions)
{
xml_document xmlDoc;
xml_node xmlRoot = xmlDoc.append_child("materialx");
elementToXml(doc, xmlRoot, writeOptions);
xmlDoc.save(stream, " ");
}
void writeToXmlFile(DocumentPtr doc, const FilePath& filename, const XmlWriteOptions* writeOptions)
{
std::ofstream ofs(filename.asString());
writeToXmlStream(doc, ofs, writeOptions);
}
string writeToXmlString(DocumentPtr doc, const XmlWriteOptions* writeOptions)
{
std::ostringstream stream;
writeToXmlStream(doc, stream, writeOptions);
return stream.str();
}
void exportToXmlStream(DocumentPtr doc, std::ostream& stream, const XmlExportOptions* exportOptions)
{
mergeLooks(doc, exportOptions);
if (exportOptions && exportOptions->flattenFilenames)
{
FileSearchPath texturePath = getResolvedTexturePath(exportOptions->userTexturePath, exportOptions->userDefinitionPath);
flattenFilenames(doc, texturePath, exportOptions->stringResolver);
}
writeToXmlStream(doc, stream, exportOptions);
}
void exportToXmlFile(DocumentPtr doc, const FilePath& filename, const XmlExportOptions* exportOptions)
{
mergeLooks(doc, exportOptions);
if (exportOptions && exportOptions->flattenFilenames)
{
FileSearchPath texturePath = getResolvedTexturePath(exportOptions->userTexturePath, exportOptions->userDefinitionPath);
flattenFilenames(doc, texturePath, exportOptions->stringResolver);
}
writeToXmlFile(doc, filename, exportOptions);
}
string exportToXmlString(DocumentPtr doc, const XmlExportOptions* exportOptions)
{
mergeLooks(doc, exportOptions);
if (exportOptions && exportOptions->flattenFilenames)
{
FileSearchPath texturePath = getResolvedTexturePath(exportOptions->userTexturePath, exportOptions->userDefinitionPath);
flattenFilenames(doc, texturePath, exportOptions->stringResolver);
}
return writeToXmlString(doc, exportOptions);
}
void prependXInclude(DocumentPtr doc, const FilePath& filename)
{
if (!filename.isEmpty())
{
ElementPtr elem = doc->addNode("xinclude");
elem->setSourceUri(filename.asString());
doc->setChildIndex(elem->getName(), 0);
}
}
} // namespace MaterialX
|
#pragma once
#include "pr/pr_problem.hxx"
#include "intrinsics.hxx"
using namespace gunrock::util;
namespace gunrock {
namespace pr {
struct pr_functor_t {
static __device__ __forceinline__ bool cond_filter(int idx, pr_problem_t::data_slice_t *data, int iteration) {
float old_value = data->d_current_ranks[idx];
float new_value = (data->d_degrees[idx] > 0) ? (0.15f + 0.85f * data->d_reduced_ranks[idx] / data->d_degrees[idx]) : 0.15f;
if (!isfinite(new_value)) new_value = 0;
data->d_current_ranks[idx] = new_value;
return (fabs(new_value-old_value) > (0.001f*old_value));
}
static __device__ __forceinline__ bool cond_advance(int src, int dst, int edge_id, int rank, int output_idx, pr_problem_t::data_slice_t *data, int iteration) {
return true;
}
static __device__ __forceinline__ bool apply_advance(int src, int dst, int edge_id, int rank, int output_idx, pr_problem_t::data_slice_t *data, int iteration) {
return true;
}
static __device__ __forceinline__ float get_value_to_reduce(int idx, pr_problem_t::data_slice_t *data, int iteration) {
return (isfinite(data->d_current_ranks[idx]) ? data->d_current_ranks[idx]:0);
}
};
}// end of pr
}// end of gunrock
|
#include <farversion.hpp>
#define PLUGIN_BUILD 9
#define PLUGIN_DESC L"Hello World Plugin for Far Manager"
#define PLUGIN_NAME L"HelloWorld"
#define PLUGIN_FILENAME L"HelloWorld.dll"
#define PLUGIN_AUTHOR FARCOMPANYNAME
#define PLUGIN_VERSION MAKEFARVERSION(FARMANAGERVERSION_MAJOR,FARMANAGERVERSION_MINOR,FARMANAGERVERSION_REVISION,PLUGIN_BUILD,VS_RELEASE)
|
/*
-----------------------------------------------------------------------------
This source file is part of OGRE
(Object-oriented Graphics Rendering Engine)
For the latest info, see http://www.ogre3d.org/
Copyright (c) 2000-2014 Torus Knot Software Ltd
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
-----------------------------------------------------------------------------
*/
#include "OgreShaderPrecompiledHeaders.h"
namespace Ogre {
namespace RTShader {
String GLSLESProgramWriter::TargetLanguage = "glsles";
//-----------------------------------------------------------------------
GLSLESProgramWriter::GLSLESProgramWriter()
{
mIsGLSLES = true;
auto* rs = Root::getSingleton().getRenderSystem();
mGLSLVersion = rs ? rs->getNativeShadingLanguageVersion() : 100;
initializeStringMaps();
mFunctionCacheMap.clear();
}
//-----------------------------------------------------------------------
GLSLESProgramWriter::~GLSLESProgramWriter()
{
}
FunctionInvocation * GLSLESProgramWriter::createInvocationFromString(const String & input)
{
String functionName, returnType;
FunctionInvocation *invoc = NULL;
// Get the function name and return type
StringVector leftTokens = StringUtil::split(input, "(");
StringVector leftTokens2 = StringUtil::split(leftTokens[0], " ");
StringUtil::trim(leftTokens2[0]);
StringUtil::trim(leftTokens2[1]);
returnType = leftTokens2[0];
functionName = leftTokens2[1];
invoc = OGRE_NEW FunctionInvocation(functionName, 0, returnType);
// Split out the parameters
StringVector parameters;
String::size_type lparen_pos = input.find('(', 0);
if(lparen_pos != String::npos)
{
StringVector tokens = StringUtil::split(input, "(");
parameters = StringUtil::split(tokens[1], ",");
}
else
{
parameters = StringUtil::split(input, ",");
}
StringVector::iterator itParam;
int i = 0;
for(itParam = parameters.begin(); itParam != parameters.end(); ++itParam, i++)
{
*itParam = StringUtil::replaceAll(*itParam, ")", "");
StringVector paramTokens = StringUtil::split(*itParam, " ");
// There should be three parts for each token
// 1. The operand type(in, out, inout)
// 2. The type
// 3. The name
if(paramTokens.size() == 3)
{
StringUtil::trim(paramTokens[0]);
StringUtil::trim(paramTokens[1]);
StringUtil::trim(paramTokens[2]);
Operand::OpSemantic semantic = Operand::OPS_IN;
GpuConstantType gpuType = GCT_UNKNOWN;
if(paramTokens[0] == "in")
{
semantic = Operand::OPS_IN;
}
else if(paramTokens[0] == "out")
{
semantic = Operand::OPS_OUT;
}
else if(paramTokens[0] == "inout")
{
semantic = Operand::OPS_INOUT;
}
// Find the internal type based on the string that we're given.
GpuConstTypeToStringMapIterator typeMapIterator;
for(typeMapIterator = mGpuConstTypeMap.begin(); typeMapIterator != mGpuConstTypeMap.end(); ++typeMapIterator)
{
if((*typeMapIterator).second == paramTokens[1])
{
gpuType = (*typeMapIterator).first;
break;
}
}
// We need a valid type otherwise glsl compilation will not work
if (gpuType == GCT_UNKNOWN)
{
OGRE_EXCEPT( Exception::ERR_INTERNAL_ERROR,
"Can not convert '"+paramTokens[1]+"' to GpuConstantType",
"GLSLESProgramWriter::createInvocationFromString" );
}
if(gpuType == GCT_SAMPLER1D)
gpuType = GCT_SAMPLER2D;
ParameterPtr p = ParameterPtr(OGRE_NEW Parameter(gpuType, paramTokens[2],
Parameter::SPS_UNKNOWN, i,
Parameter::SPC_UNKNOWN));
invoc->pushOperand(p, semantic);
}
}
return invoc;
}
//-----------------------------------------------------------------------
void GLSLESProgramWriter::discoverFunctionDependencies(const FunctionInvocation &invoc, FunctionVector &depVector)
{
// Uses recursion to find any functions that the supplied function invocation depends on
FunctionMap::const_iterator itCache = mFunctionCacheMap.begin();
String body;
// Find the function in the cache and retrieve the body
for (; itCache != mFunctionCacheMap.end(); ++itCache)
{
if(!(invoc == (*itCache).first))
continue;
body = (*itCache).second;
break;
}
if(!body.empty())
{
// Trim whitespace
StringUtil::trim(body);
StringVector tokens = StringUtil::split(body, "(");
for (StringVector::const_iterator it = tokens.begin(); it != tokens.end(); ++it)
{
StringVector moreTokens = StringUtil::split(*it, " \n");
if (!moreTokens.empty())
{
FunctionMap::const_iterator itFuncCache = mFunctionCacheMap.begin();
for (; itFuncCache != mFunctionCacheMap.end(); ++itFuncCache)
{
const FunctionInvocation& fi = itFuncCache->first;
if(fi.getFunctionName() == moreTokens.back())
{
// Add the function declaration
depVector.push_back(FunctionInvocation((*itFuncCache).first));
discoverFunctionDependencies(itFuncCache->first, depVector);
}
}
}
}
}
else
{
LogManager::getSingleton().logError("Cached function not found " + invoc.getFunctionName());
}
}
//-----------------------------------------------------------------------
void GLSLESProgramWriter::writeSourceCode(
std::ostream& os,
Program* program)
{
// Write the current version (this forces the driver to fulfill the glsl es standard)
os << "#version "<< mGLSLVersion;
// Starting with ES 3.0 the version must contain the string "es" after the version number with a space separating them
if(mGLSLVersion > 100)
os << " es";
os << std::endl;
for(const auto& p : program->getParameters())
{
if(p->getType() != GCT_SAMPLER_EXTERNAL_OES)
continue;
if(mGLSLVersion > 100)
os << "#extension GL_OES_EGL_image_external_essl3 : require\n";
else
os << "#extension GL_OES_EGL_image_external : require\n";
break;
}
// Default precision declaration is required in fragment and vertex shaders.
os << "precision highp float;" << std::endl;
os << "precision highp int;" << std::endl;
if(mGLSLVersion > 100)
{
// sampler3D has no default precision
os << "precision highp sampler3D;" << std::endl;
// Redefine texture functions to maintain reusability
os << "#define texture2D texture" << std::endl;
os << "#define texture3D texture" << std::endl;
os << "#define textureCube texture" << std::endl;
os << "#define texture2DLod textureLod" << std::endl;
}
// Generate source code header.
writeProgramTitle(os, program);
os<< std::endl;
// Embed dependencies.
writeProgramDependencies(os, program);
os << std::endl;
writeMainSourceCode(os, program);
}
//-----------------------------------------------------------------------
// Here's the gist of how and what we're doing here.
// First, identify which fixed function libraries we need. We already have a list of functions that we need to find.
// Then for each library file we perform the following steps:
// 1. Go through the source line by line to find function signatures.
// 2. Once we have found one, compare it to the list of functions that we are searching for
// 3. If a match is found then continue reading through the file until we reach the end of the function.
// 4. When we reach the last closing brace, write the function and its signature to the output stream
// 5. Go back to step 1 until we have found all the functions
//
void GLSLESProgramWriter::writeProgramDependencies(
std::ostream& os,
Program* program)
{
for(unsigned int i = 0; i < program->getDependencyCount(); ++i)
{
const String& curDependency = program->getDependency(i);
cacheDependencyFunctions(curDependency);
}
os << "//-----------------------------------------------------------------------------" << std::endl;
os << "// PROGRAM DEPENDENCIES" << std::endl;
os << "//-----------------------------------------------------------------------------" << std::endl;
FunctionVector forwardDecl; // Holds all function declarations
const ShaderFunctionList& functionList = program->getFunctions();
Function* curFunction = *(functionList.begin());
const FunctionAtomInstanceList& atomInstances = curFunction->getAtomInstances();
FunctionAtomInstanceConstIterator itAtom = atomInstances.begin();
FunctionAtomInstanceConstIterator itAtomEnd = atomInstances.end();
// Now iterate over all function atoms
for ( ; itAtom != itAtomEnd; ++itAtom)
{
// Skip non function invocation atoms.
if (!dynamic_cast<const FunctionInvocation*>(*itAtom))
continue;
FunctionInvocation pFuncInvoc = *(static_cast<FunctionInvocation *>(*itAtom));
forwardDecl.push_back(pFuncInvoc);
// Now look into that function for other non-builtin functions and add them to the declaration list
// Look for non-builtin functions
// Do so by assuming that these functions do not have several variations.
// Also, because GLSL is C based, functions must be defined before they are used
// so we can make the assumption that we already have this function cached.
//
// If we find a function, look it up in the map and write it out
discoverFunctionDependencies(pFuncInvoc, forwardDecl);
}
// Now remove duplicate declarations, first we have to sort the vector.
std::sort(forwardDecl.begin(), forwardDecl.end(), FunctionInvocation::FunctionInvocationLessThan());
forwardDecl.erase(std::unique(forwardDecl.begin(), forwardDecl.end(), FunctionInvocation::FunctionInvocationCompare()), forwardDecl.end());
// Write forward declarations as we did not sort by dependency
for (auto& decl : forwardDecl)
{
writeFunctionDeclaration(os, decl, false);
os << ";\n";
}
for(unsigned int i = 0; i < program->getDependencyCount(); ++i)
{
const String& curDependency = program->getDependency(i);
// Write out #defines
StringMap::const_iterator itDefines = mDefinesMap.begin();
StringMap::const_iterator itDefinesEnd = mDefinesMap.end();
for (; itDefines != itDefinesEnd; ++itDefines)
{
if((*itDefines).second == curDependency)
{
os << (*itDefines).first;
os << "\n";
}
}
}
// Parse the source shader and write out only the needed functions
for (FunctionVector::const_iterator it = forwardDecl.begin(); it != forwardDecl.end(); ++it)
{
FunctionMap::const_iterator itCache = mFunctionCacheMap.begin();
FunctionInvocation invoc = FunctionInvocation("", 0);
String body;
// Find the function in the cache
for (; itCache != mFunctionCacheMap.end(); ++itCache)
{
if(!((*it) == (*itCache).first))
continue;
invoc = (*itCache).first;
body = (*itCache).second;
break;
}
if(invoc.getFunctionName().length())
{
writeFunctionDeclaration(os, invoc);
os << std::endl << "{" << std::endl << body << std::endl << "}" << std::endl;
}
}
}
bool GLSLESProgramWriter::isBasicType(String &type)
{
if(type == "void" ||
type == "bool" ||
type == "float" ||
type == "vec2" ||
type == "vec3" ||
type == "vec4" ||
type == "sampler2D" ||
type == "samplerCube" ||
type == "mat2" ||
type == "mat3" ||
type == "mat4" ||
type == "int" ||
type == "int2" ||
type == "int3" ||
type == "int4")
return true;
else
return false;
}
//-----------------------------------------------------------------------
void GLSLESProgramWriter::cacheDependencyFunctions(const String & libName)
{
if(mCachedFunctionLibraries.find(libName) != mCachedFunctionLibraries.end())
{
// this mean that the lib is in the cache
return;
}
mCachedFunctionLibraries[libName] = "";
String libFileName = libName + ".glsl";
DataStreamPtr stream = ResourceGroupManager::getSingleton().openResource(libFileName);
StringMap functionCache;
functionCache.clear();
String line;
while(!stream->eof())
{
// Grab a line
line = stream->getLine();
// Ignore empty lines and comments
if(line.length() > 0)
{
// Strip whitespace
StringUtil::trim(line);
// If we find a multiline comment, run through till we get to the end
if(line.at(0) == '/' && line.at(1) == '*')
{
bool endFound = false;
while(!endFound)
{
// Get the next line
line = stream->getLine();
// Skip empties
if(line.length() > 0)
{
// Look for the ending sequence.
String::size_type comment_pos = line.find("*/", 0);
if(comment_pos != String::npos)
{
endFound = true;
}
}
}
}
else if(line.length() > 1 && line.at(0) != '/' && line.at(1) != '/')
{
// Break up the line.
StringVector tokens = StringUtil::tokenise(line, " (\n\r");
// Cache #defines
if(tokens[0] == "#define")
{
// Add the line in
mDefinesMap[line] = libName;
// Move on to the next line in the shader
continue;
}
// Try to identify a function definition
// First, look for a return type
if(isBasicType(tokens[0]) && ((tokens.size() < 3) || (tokens[2] != "=")) )
{
String functionSig = "";
String functionBody = "";
FunctionInvocation *functionInvoc = NULL;
// Return type
functionSig = tokens[0];
functionSig += " ";
// Function name
functionSig += tokens[1];
functionSig += "(";
bool foundEndOfSignature = false;
// Now look for all the parameters, they may span multiple lines
while(!foundEndOfSignature)
{
// Trim whitespace from both sides of the line
StringUtil::trim(line);
// First we want to get everything right of the paren
StringVector paramTokens;
String::size_type lparen_pos = line.find('(', 0);
if(lparen_pos != String::npos)
{
StringVector lineTokens = StringUtil::split(line, "(");
if(lineTokens.size() == 2) {
paramTokens = StringUtil::split(lineTokens[1], ",");
}
}
else
{
paramTokens = StringUtil::split(line, ",");
}
StringVector::const_iterator itParam;
for(itParam = paramTokens.begin(); itParam != paramTokens.end(); ++itParam)
{
functionSig += *itParam;
String::size_type rparen_pos = itParam->find(')', 0);
if(rparen_pos == String::npos)
functionSig += ",";
}
String::size_type space_pos = line.find(')', 0);
if(space_pos != String::npos)
{
foundEndOfSignature = true;
}
line = stream->getLine();
}
functionInvoc = createInvocationFromString(functionSig);
// Ok, now if we have found the signature, iterate through the file until we find the end
// of the function.
bool foundEndOfBody = false;
size_t braceCount = 0;
while(!foundEndOfBody)
{
functionBody += line;
String::size_type brace_pos = line.find('{', 0);
if(brace_pos != String::npos)
{
braceCount++;
}
brace_pos = line.find('}', 0);
if(brace_pos != String::npos)
braceCount--;
if(braceCount == 0)
{
foundEndOfBody = true;
// Remove first and last braces
size_t pos = functionBody.find('{');
functionBody.erase(pos, 1);
pos = functionBody.rfind('}');
functionBody.erase(pos, 1);
mFunctionCacheMap.emplace(*functionInvoc, functionBody);
}
functionBody += "\n";
line = stream->getLine();
}
}
}
}
}
stream->close();
}
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file rewrite_simplify.cc
* \brief Rewrite-rule based simplification.
*/
// Acknowledgement: Most rewrite-rules are from Halide.
#include "rewrite_simplify.h"
#include <tvm/arith/analyzer.h>
#include <tvm/tir/builtin.h>
#include <tvm/tir/op.h>
#include <algorithm>
#include "../target/datatype/registry.h"
#include "const_fold.h"
#include "pattern_match.h"
namespace tvm {
namespace arith {
using namespace tir;
// macro for doing simple rewrite
#define TVM_TRY_REWRITE(SrcExpr, ResExpr) \
if ((SrcExpr).Match(ret)) { \
return (ResExpr).Eval(); \
}
// macro for rewrite + recursively rewrite ResExpr
#define TVM_TRY_RECURSIVE_REWRITE(SrcExpr, ResExpr) \
if ((SrcExpr).Match(ret)) { \
return RecursiveRewrite((ResExpr).Eval()); \
}
// macro rewrite only if CondExor is true after match.
#define TVM_TRY_REWRITE_IF(SrcExpr, ResExpr, CondExpr) \
if ((SrcExpr).Match(ret) && (CondExpr)) { \
return (ResExpr).Eval(); \
}
// macro rewrite + recursive_rewrite only if CondExor is true after match.
#define TVM_TRY_RECURSIVE_REWRITE_IF(SrcExpr, ResExpr, CondExpr) \
if ((SrcExpr).Match(ret) && (CondExpr)) { \
return RecursiveRewrite((ResExpr).Eval()); \
}
// NOTE for developers:
//
// We mainly focus on index expression simplification.
// Besides the RewriteSimplifier, some cases can be better
// handled by CanonicalSimplifier.
//
// try to prove x equals val
RewriteSimplifier::Impl::CompareResult RewriteSimplifier::Impl::TryCompare(const PrimExpr& x,
int64_t val) {
PrimExpr diff = this->VisitExpr(x);
if (const auto* ptr = diff.as<IntImmNode>()) {
if (ptr->value == val) {
return kEQ;
} else if (ptr->value > val) {
return kGT;
} else if (ptr->value < val) {
return kLT;
}
}
ConstIntBound dbound = analyzer_->const_int_bound(diff);
if (dbound->min_value > val) {
return kGT;
}
if (dbound->max_value < val) {
return kLT;
}
if (dbound->min_value >= val) {
return kGE;
}
if (dbound->max_value <= val) {
return kLE;
}
if (val == 0) {
ModularSet dmod = analyzer_->modular_set(diff);
if (dmod->base != 0) {
return kNE;
}
}
return kUnknown;
}
void RewriteSimplifier::Impl::Update(const Var& var, const PrimExpr& info, bool can_override) {
if (!can_override) {
auto it = var_map_.find(var);
if (it != var_map_.end()) {
ICHECK(ExprDeepEqual()(it->second, info)) << "Trying to update var \'" << var << "\'"
<< " with a different value: "
<< "original=" << it->second << ", new=" << info;
}
}
var_map_[var] = info;
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const AddNode* op) {
PrimExpr ret = IRMutatorWithAnalyzer::VisitExpr_(op);
op = ret.as<AddNode>();
PrimExpr const_res = TryConstFold<Add>(op->a, op->b);
if (const_res.defined()) return const_res;
// Pattern var to match any expression
PVar<PrimExpr> x, y, z, b1, b2, s1, s2;
// Pattern var match IntImm
PVar<IntImm> c1, c2, c3;
// Pattern var match FloatImm
PVar<FloatImm> c4;
// Pattern var for lanes in broadcast and ramp
PVar<int> lanes;
// Vector rules
if (op->dtype.lanes() != 1) {
TVM_TRY_REWRITE(ramp(b1, s1, lanes) + ramp(b2, s2, lanes), ramp(b1 + b2, s1 + s2, lanes));
TVM_TRY_REWRITE(ramp(b1, s1, lanes) + broadcast(x, lanes), ramp(b1 + x, s1, lanes));
TVM_TRY_REWRITE(broadcast(x, lanes) + ramp(b1, s1, lanes), ramp(x + b1, s1, lanes));
TVM_TRY_REWRITE(broadcast(x, lanes) + broadcast(y, lanes), broadcast(x + y, lanes));
TVM_TRY_REWRITE_IF(x + broadcast(c4, lanes), x, c4.Eval()->value == 0.0f);
}
if (IsIndexType(op->dtype)) {
// Index rules
// cancelation rules
TVM_TRY_REWRITE((x - y) + y, x);
TVM_TRY_REWRITE(x + (y - x), y);
TVM_TRY_REWRITE((x - y) + (y - z), x - z);
TVM_TRY_REWRITE((x - y) + (z - x), z - y);
TVM_TRY_REWRITE(min(x, y - z) + z, min(x + z, y));
TVM_TRY_REWRITE(min(x - z, y) + z, min(x, y + z));
TVM_TRY_REWRITE(max(x, y - z) + z, max(x + z, y));
TVM_TRY_REWRITE(max(x - z, y) + z, max(x, y + z));
TVM_TRY_REWRITE_IF(min(x, y + z * c1) + z * c2, min(x + z * c2, y),
c1.Eval()->value == -c2.Eval()->value);
TVM_TRY_REWRITE_IF(max(x, y + z * c1) + z * c2, max(x + z * c2, y),
c1.Eval()->value == -c2.Eval()->value);
TVM_TRY_REWRITE_IF(min(y + z * c1, x) + z * c2, min(x + z * c2, y),
c1.Eval()->value == -c2.Eval()->value);
TVM_TRY_REWRITE_IF(max(y + z * c1, x) + z * c2, max(x + z * c2, y),
c1.Eval()->value == -c2.Eval()->value);
TVM_TRY_REWRITE(max(x, y) + min(x, y), x + y);
TVM_TRY_REWRITE(min(x, y) + max(x, y), x + y);
TVM_TRY_REWRITE(max(x, y) + min(y, x), x + y);
TVM_TRY_REWRITE(min(x, y) + max(y, x), x + y);
TVM_TRY_REWRITE_IF(min(x, y + c1) + c2, min(x + c2, y), c1.Eval()->value == -c2.Eval()->value);
TVM_TRY_REWRITE_IF(min(x + c1, y) + c2, min(x, y + c2), c1.Eval()->value == -c2.Eval()->value);
TVM_TRY_REWRITE_IF(max(x, y + c1) + c2, max(x + c2, y), c1.Eval()->value == -c2.Eval()->value);
TVM_TRY_REWRITE_IF(max(x + c1, y) + c2, max(x, y + c2), c1.Eval()->value == -c2.Eval()->value);
// constant folding
// NOTE: canonicalization might better at this.
TVM_TRY_REWRITE((x + c1) + c2, x + (c1 + c2));
// mul co-efficient folding
TVM_TRY_REWRITE(x + x, x * 2);
TVM_TRY_REWRITE(x * y + x, x * (y + 1));
TVM_TRY_REWRITE(y * x + x, x * (y + 1));
TVM_TRY_REWRITE(x + y * x, x * (1 + y));
TVM_TRY_REWRITE(x + x * y, x * (1 + y));
TVM_TRY_REWRITE(x * y + x * z, x * (y + z));
TVM_TRY_REWRITE(y * x + x * z, x * (y + z));
TVM_TRY_REWRITE(x * y + z * x, x * (y + z));
TVM_TRY_REWRITE(y * x + z * x, x * (y + z));
// DivMod rules
// truc div
TVM_TRY_REWRITE(truncdiv(x, c1) * c1 + truncmod(x, c1), x);
// floor div
TVM_TRY_REWRITE(floordiv(x, c1) * c1 + floormod(x, c1), x);
// canonicalization rule
// will try rewrite again after canonicalization.
TVM_TRY_RECURSIVE_REWRITE(x + (c1 - y), (x - y) + c1);
TVM_TRY_RECURSIVE_REWRITE((c1 - y) + x, (x - y) + c1);
TVM_TRY_RECURSIVE_REWRITE(x + c1 + y, (x + y) + c1);
TVM_TRY_RECURSIVE_REWRITE(x + (c1 + y), (x + y) + c1);
TVM_TRY_RECURSIVE_REWRITE(x + max(y, z), max(y, z) + x);
TVM_TRY_RECURSIVE_REWRITE(x + min(y, z), min(y, z) + x);
// DivMod rules
// truc div
TVM_TRY_RECURSIVE_REWRITE(truncmod(y, c1) + x * c1, x * c1 + truncmod(y, c1));
// floor div
TVM_TRY_RECURSIVE_REWRITE(floormod(y, c1) + x * c1, x * c1 + floormod(y, c1));
}
// condition rules.
TVM_TRY_REWRITE(select(x, b1, b2) + select(x, s1, s2), select(x, b1 + s1, b2 + s2));
// default value
return ret;
}
std::function<void()> RewriteSimplifier::Impl::EnterConstraint(const PrimExpr& constraint) {
size_t old_literal_size = literal_constraints_.size();
// we will compare the already simplified result with the constraint,
// so simplify the constarint as well
literal_constraints_.push_back(operator()(constraint));
size_t new_literal_size = literal_constraints_.size();
auto frecover = [old_literal_size, new_literal_size, this]() {
ICHECK_EQ(literal_constraints_.size(), new_literal_size);
literal_constraints_.resize(old_literal_size);
};
return frecover;
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const SubNode* op) {
PrimExpr ret = IRMutatorWithAnalyzer::VisitExpr_(op);
op = ret.as<SubNode>();
PrimExpr const_res = TryConstFold<Sub>(op->a, op->b);
if (const_res.defined()) return const_res;
// Pattern var to match any expression
PVar<PrimExpr> x, y, z, b1, b2, s1, s2;
// Pattern var match IntImm
PVar<IntImm> c1, c2, c3;
// Pattern var for lanes in broadcast and ramp
PVar<int> lanes;
// Vector rules
if (op->dtype.lanes() != 1) {
TVM_TRY_REWRITE(ramp(b1, s1, lanes) - ramp(b2, s2, lanes), ramp(b1 - b2, s1 - s2, lanes));
TVM_TRY_REWRITE(ramp(b1, s1, lanes) - broadcast(x, lanes), ramp(b1 - x, s1, lanes));
TVM_TRY_REWRITE(broadcast(x, lanes) - ramp(b1, s1, lanes), ramp(x - b1, 0 - s1, lanes));
TVM_TRY_REWRITE(broadcast(x, lanes) - broadcast(y, lanes), broadcast(x - y, lanes));
}
if (IsIndexType(op->dtype)) {
// Index rules
// cancelation rules
TVM_TRY_REWRITE((x + y) - y, x);
TVM_TRY_REWRITE((x + y) - x, y);
TVM_TRY_REWRITE(x - (y + x), 0 - y);
TVM_TRY_REWRITE(x - (x + y), 0 - y);
TVM_TRY_REWRITE(min(x, y) - x, min(0, y - x));
TVM_TRY_REWRITE(min(x, y) - y, min(x - y, 0));
TVM_TRY_REWRITE(max(x, y) - x, max(0, y - x));
TVM_TRY_REWRITE(max(x, y) - y, max(x - y, 0));
TVM_TRY_REWRITE(x - max(x, y), min(0, x - y));
TVM_TRY_REWRITE(y - max(x, y), min(y - x, 0));
TVM_TRY_REWRITE(x - min(x, y), max(0, x - y));
TVM_TRY_REWRITE(y - min(x, y), max(y - x, 0));
// mul co-efficient folding
TVM_TRY_REWRITE(x - x, ZeroWithTypeLike(x));
TVM_TRY_REWRITE(x * y - x, x * (y - 1));
TVM_TRY_REWRITE(y * x - x, x * (y - 1));
TVM_TRY_REWRITE(x - y * x, x * (1 - y));
TVM_TRY_REWRITE(x - x * y, x * (1 - y));
TVM_TRY_REWRITE(x * y - x * z, x * (y - z));
TVM_TRY_REWRITE(y * x - x * z, x * (y - z));
TVM_TRY_REWRITE(x * y - z * x, x * (y - z));
TVM_TRY_REWRITE(y * x - z * x, x * (y - z));
// constant cancelation
TVM_TRY_REWRITE((x + c1) - c2, x + (c1 - c2));
TVM_TRY_REWRITE((c1 - x) - (c2 - y), (y - x) + (c1 - c2));
// cancelization rule involving 4 operands
TVM_TRY_REWRITE((x + y) - (x + z), y - z);
TVM_TRY_REWRITE((x + y) - (z + x), y - z);
TVM_TRY_REWRITE((y + x) - (z + x), y - z);
TVM_TRY_REWRITE((y + x) - (x + z), y - z);
TVM_TRY_REWRITE(min(x + y, z) - x, min(y, z - x));
TVM_TRY_REWRITE(min(y + x, z) - x, min(y, z - x));
TVM_TRY_REWRITE(min(z, x + y) - x, min(z - x, y));
TVM_TRY_REWRITE(min(z, y + x) - x, min(z - x, y));
TVM_TRY_REWRITE(max(x + y, z) - x, max(y, z - x));
TVM_TRY_REWRITE(max(y + x, z) - x, max(y, z - x));
TVM_TRY_REWRITE(max(z, x + y) - x, max(z - x, y));
TVM_TRY_REWRITE(max(z, y + x) - x, max(z - x, y));
TVM_TRY_REWRITE(x - min(x + y, z), max(0 - y, x - z));
TVM_TRY_REWRITE(x - min(y + x, z), max(0 - y, x - z));
TVM_TRY_REWRITE(x - min(z, x + y), max(x - z, 0 - y));
TVM_TRY_REWRITE(x - min(z, y + x), max(x - z, 0 - y));
TVM_TRY_REWRITE(min(x, y) - min(y, x), ZeroWithTypeLike(x));
TVM_TRY_REWRITE(max(x, y) - max(y, x), ZeroWithTypeLike(x));
TVM_TRY_REWRITE_IF(min(b1, b2) - min(s1, s2), b1 - s1,
CanProveEqual(((b1 - s1) - (b2 - s2)).Eval(), 0));
TVM_TRY_REWRITE_IF(min(b1, b2) - min(s1, s2), b1 - s2,
CanProveEqual(((b1 - s2) - (b2 - s1)).Eval(), 0));
TVM_TRY_REWRITE_IF(max(b1, b2) - max(s1, s2), b1 - s1,
CanProveEqual(((b1 - s1) - (b2 - s2)).Eval(), 0));
TVM_TRY_REWRITE_IF(max(b1, b2) - max(s1, s2), b1 - s2,
CanProveEqual(((b1 - s2) - (b2 - s1)).Eval(), 0));
// DivMod rules
// trucdiv
// NOTE: c*(x/c) + x % c == x is true all division mode.
TVM_TRY_REWRITE_IF(x - truncdiv(x, c1) * c1, truncmod(x, c1), c1.Eval()->value != 0);
TVM_TRY_REWRITE_IF(truncdiv(x, c1) * c1 - x, 0 - truncmod(x, c1), c1.Eval()->value != 0);
TVM_TRY_REWRITE_IF(x - (truncdiv(x + y, c1)) * c1, truncmod(x + y, c1) - y,
c1.Eval()->value != 0);
TVM_TRY_REWRITE_IF((truncdiv(x + y, c1)) * c1 - x, y - truncmod(x + y, c1),
c1.Eval()->value != 0);
TVM_TRY_REWRITE_IF(x - truncdiv(x - y, c1) * c1, truncmod(x - y, c1) + y,
c1.Eval()->value != 0);
TVM_TRY_REWRITE_IF(truncdiv(x - y, c1) * c1 - x, 0 - truncmod(x - y, c1) - y,
c1.Eval()->value != 0);
TVM_TRY_REWRITE_IF(
x * c2 - truncdiv(x, c1) * c3, truncmod(x, c1) * c2,
c1.Eval()->value != 0 && c3.Eval()->value == c1.Eval()->value * c2.Eval()->value);
TVM_TRY_REWRITE_IF(
truncdiv(x, c1) * c3 - x * c2, 0 - truncmod(x, c1) * c2,
c1.Eval()->value != 0 && c3.Eval()->value == c1.Eval()->value * c2.Eval()->value);
TVM_TRY_REWRITE_IF(
x * c2 - truncdiv(x + y, c1) * c3, (truncmod(x + y, c1) - y) * c2,
c1.Eval()->value != 0 && c3.Eval()->value == c1.Eval()->value * c2.Eval()->value);
TVM_TRY_REWRITE_IF(
truncdiv(x + y, c1) * c3 - x * c2, (y - truncmod(x + y, c1)) * c2,
c1.Eval()->value != 0 && c3.Eval()->value == c1.Eval()->value * c2.Eval()->value);
TVM_TRY_REWRITE_IF(
x * c2 - truncdiv(x - y, c1) * c3, (truncmod(x - y, c1) + y) * c2,
c1.Eval()->value != 0 && c3.Eval()->value == c1.Eval()->value * c2.Eval()->value);
TVM_TRY_REWRITE_IF(
truncdiv(x - y, c1) * c3 - x * c2, (0 - truncmod(x - y, c1) - y) * c2,
c1.Eval()->value != 0 && c3.Eval()->value == c1.Eval()->value * c2.Eval()->value);
// Proof in the case of floordiv, need positive condition.
// let x = a * c3 + r
// (x + c1) / c3 - x / c3 => (r + c1) / c3
// NOTE: the use of floormod(c2, c3) was intentional to simplify the const.
TVM_TRY_REWRITE_IF(truncdiv(x + c1, c3) - truncdiv(x + c2, c3),
truncdiv(truncmod(x + floormod(c2, c3), c3) + (c1 - c2), c3),
CanProveGreaterEqual(x.Eval(), -c2.Eval()->value) &&
c1.Eval()->value >= c2.Eval()->value && c3.Eval()->value > 0);
TVM_TRY_REWRITE_IF(
truncdiv(x + c1, c3) - truncdiv(x, c3), truncdiv(truncmod(x, c3) + c1, c3),
CanProveGreaterEqual(x.Eval(), 0) && c1.Eval()->value >= 0 && c3.Eval()->value > 0);
// floordiv
TVM_TRY_REWRITE_IF(x - floordiv(x, c1) * c1, floormod(x, c1), c1.Eval()->value != 0);
TVM_TRY_REWRITE_IF(floordiv(x, c1) * c1 - x, 0 - floormod(x, c1), c1.Eval()->value != 0);
TVM_TRY_REWRITE_IF(x - floordiv(x + y, c1) * c1, floormod(x + y, c1) - y,
c1.Eval()->value != 0);
TVM_TRY_REWRITE_IF(floordiv(x + y, c1) * c1 - x, y - floormod(x + y, c1),
c1.Eval()->value != 0);
TVM_TRY_REWRITE_IF(x - floordiv(x - y, c1) * c1, floormod(x - y, c1) + y,
c1.Eval()->value != 0);
TVM_TRY_REWRITE_IF(floordiv(x - y, c1) * c1 - x, 0 - floormod(x - y, c1) - y,
c1.Eval()->value != 0);
TVM_TRY_REWRITE_IF(
x * c2 - floordiv(x, c1) * c3, floormod(x, c1) * c2,
c1.Eval()->value != 0 && c3.Eval()->value == c1.Eval()->value * c2.Eval()->value);
TVM_TRY_REWRITE_IF(
floordiv(x, c1) * c3 - x * c2, 0 - floormod(x, c1) * c2,
c1.Eval()->value != 0 && c3.Eval()->value == c1.Eval()->value * c2.Eval()->value);
TVM_TRY_REWRITE_IF(
x * c2 - floordiv(x + y, c1) * c3, (floormod(x + y, c1) - y) * c2,
c1.Eval()->value != 0 && c3.Eval()->value == c1.Eval()->value * c2.Eval()->value);
TVM_TRY_REWRITE_IF(
floordiv(x + y, c1) * c3 - x * c2, (y - floormod(x + y, c1)) * c2,
c1.Eval()->value != 0 && c3.Eval()->value == c1.Eval()->value * c2.Eval()->value);
TVM_TRY_REWRITE_IF(
x * c2 - floordiv(x - y, c1) * c3, (floormod(x - y, c1) + y) * c2,
c1.Eval()->value != 0 && c3.Eval()->value == c1.Eval()->value * c2.Eval()->value);
TVM_TRY_REWRITE_IF(
floordiv(x - y, c1) * c3 - x * c2, (0 - floormod(x - y, c1) - y) * c2,
c1.Eval()->value != 0 && c3.Eval()->value == c1.Eval()->value * c2.Eval()->value);
TVM_TRY_REWRITE_IF(floordiv(x + c1, c3) - floordiv(x + c2, c3),
floordiv(floormod(x + floormod(c2, c3), c3) + (c1 - c2), c3),
c3.Eval()->value > 0);
TVM_TRY_REWRITE_IF(floordiv(x + c1, c3) - floordiv(x, c3), floordiv(floormod(x, c3) + c1, c3),
c3.Eval()->value > 0);
// canonicalization rule
// will try rewrite again after canonicalization.
TVM_TRY_REWRITE(x - c1, x + (0 - c1));
TVM_TRY_RECURSIVE_REWRITE((x + c1) - y, (x - y) + c1);
TVM_TRY_RECURSIVE_REWRITE(x - (y - z), (x + z) - y);
TVM_TRY_RECURSIVE_REWRITE(x - y * c1, x + y * (0 - c1));
}
// condition rules.
TVM_TRY_REWRITE(select(x, b1, b2) - select(x, s1, s2), select(x, b1 - s1, b2 - s2));
TVM_TRY_REWRITE(select(x, y, z) - z, select(x, y - z, ZeroWithTypeLike(z)));
TVM_TRY_REWRITE(select(x, y, z) - y, select(x, ZeroWithTypeLike(y), z - y));
return ret;
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const MulNode* op) {
PrimExpr ret = IRMutatorWithAnalyzer::VisitExpr_(op);
op = ret.as<MulNode>();
PrimExpr const_res = TryConstFold<Mul>(op->a, op->b);
if (const_res.defined()) return const_res;
// Pattern var to match any expression
PVar<PrimExpr> x, y, z, b1, b2, s1, s2;
// Pattern var match IntImm
PVar<IntImm> c1, c2;
// Pattern var match FloatImm
PVar<FloatImm> c3;
// Pattern var for lanes in broadcast and ramp
PVar<int> lanes;
// Vector rules
if (op->dtype.lanes() != 1) {
TVM_TRY_REWRITE(broadcast(x, lanes) * broadcast(y, lanes), broadcast(x * y, lanes));
TVM_TRY_REWRITE(ramp(b1, s1, lanes) * broadcast(x, lanes), ramp(b1 * x, s1 * x, lanes));
TVM_TRY_REWRITE(broadcast(x, lanes) * ramp(b1, s1, lanes), ramp(b1 * x, s1 * x, lanes));
TVM_TRY_REWRITE_IF(broadcast(c3, lanes) * x, broadcast(c3, lanes), c3.Eval()->value == 0.0f);
}
if (IsIndexType(op->dtype)) {
// constant simplification rule
TVM_TRY_REWRITE((x + c1) * c2, x * c2 + c1 * c2);
TVM_TRY_REWRITE((x * c1) * c2, x * (c1 * c2));
TVM_TRY_REWRITE(min(x, y) * max(x, y), x * y);
TVM_TRY_REWRITE(max(x, y) * min(x, y), x * y);
// canonicalization
TVM_TRY_RECURSIVE_REWRITE(x * (c1 * y), (x * y) * c1);
TVM_TRY_RECURSIVE_REWRITE(c1 * x, x * c1);
TVM_TRY_RECURSIVE_REWRITE_IF((x - y) * c1, (y - x) * (0 - c1), c1.Eval()->value < 0);
}
return ret;
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const DivNode* op) {
PrimExpr ret = IRMutatorWithAnalyzer::VisitExpr_(op);
op = ret.as<DivNode>();
PrimExpr const_res = TryConstFold<Div>(op->a, op->b);
if (const_res.defined()) return const_res;
// Pattern var to match any expression
PVar<PrimExpr> x, y, z, b1;
// Pattern var match IntImm
PVar<IntImm> c1, c2, c3;
// Pattern var for lanes in broadcast and ramp
PVar<int> lanes;
// x / 2.0 = x * 0.5
if (const FloatImmNode* ptr = op->b.as<FloatImmNode>()) {
ICHECK(op->dtype.is_float() ||
datatype::Registry::Global()->GetTypeRegistered(op->dtype.code()));
return op->a * make_const(op->b.dtype(), 1.0 / ptr->value);
}
// Vector rules
if (op->dtype.lanes() != 1) {
// NOTE: use div as the pattern also works for float.
TVM_TRY_REWRITE(div(broadcast(x, lanes), broadcast(y, lanes)), broadcast(div(x, y), lanes));
// ramp / bcast
if ((div(ramp(b1, c1, lanes), broadcast(c2, lanes))).Match(ret)) {
int64_t c1val = c1.Eval()->value;
int64_t c2val = c2.Eval()->value;
if (c1val % c2val == 0) {
return ramp(div(b1, c2), div(c1, c2), lanes).Eval();
}
// If all possible indices in ramp are the same.
if (CanProveGreaterEqual(b1.Eval(), 0)) {
ModularSet bmod = analyzer_->modular_set(b1.Eval());
int64_t ramp_min = bmod->base / c2val;
int64_t ramp_max = (bmod->base + (lanes.Eval() - 1) * c1val) / c2val;
if (bmod->coeff % c2val == 0 && ramp_min == ramp_max) {
return broadcast(div(b1, c2), lanes).Eval();
}
}
}
}
if (IsIndexType(op->dtype)) {
// Be-aware of the division rules:
// We adopt the default C division uses truncation instead of floordiv.
// This means most rules need to check non-negativeness of the operands.
// TryConstFold doesn't work for negative cases because it is also used by legacy
// parts of tvm which still assume euclidean div. In this simplifier we assume that the division
// is truncated, so perform const folding again.
// NOTE: trunc div required
if (truncdiv(c1, c2).Match(ret)) {
int64_t c1val = c1.Eval()->value;
int64_t c2val = c2.Eval()->value;
return make_const(op->dtype, truncdiv(c1val, c2val));
}
// while it is always true for trunc div
// restrict to common case(positive div)
TVM_TRY_REWRITE_IF(truncdiv(truncdiv(x, c1), c2), truncdiv(x, c1 * c2),
c1.Eval()->value > 0 && c2.Eval()->value > 0);
TVM_TRY_REWRITE_IF(truncdiv(truncdiv(x, c1) + c2, c3), truncdiv(x + c1 * c2, c1 * c3),
c1.Eval()->value > 0 && c2.Eval()->value >= 0 && c3.Eval()->value > 0 &&
CanProveGreaterEqual(x.Eval(), 0));
if (truncdiv(x * c1, c2).Match(ret)) {
int64_t c1val = c1.Eval()->value;
int64_t c2val = c2.Eval()->value;
if (c1val > 0 && c2val > 0) {
if (c1val % c2val == 0) return (x * truncdiv(c1, c2)).Eval();
if (c2val % c1val == 0) return truncdiv(x, truncdiv(c2, c1)).Eval();
}
}
TVM_TRY_REWRITE(truncdiv(x, x), OneWithTypeLike(x));
TVM_TRY_REWRITE(truncdiv(x * c1, x), c1);
TVM_TRY_REWRITE(truncdiv(c1 * x, x), c1);
// Rules involving 2-operands.
TVM_TRY_REWRITE_IF(truncdiv(x * c1 + y, c2), x * truncdiv(c1, c2) + truncdiv(y, c2),
c1.Eval()->value >= 0 && c2.Eval()->value > 0 &&
c1.Eval()->value % c2.Eval()->value == 0 &&
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual(y.Eval(), 0));
TVM_TRY_REWRITE_IF(truncdiv(min(x * c1, y), c2), min(x * truncdiv(c1, c2), truncdiv(y, c2)),
c1.Eval()->value >= 0 && c2.Eval()->value > 0 &&
c1.Eval()->value % c2.Eval()->value == 0 &&
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual(y.Eval(), 0));
TVM_TRY_REWRITE_IF(truncdiv(max(x * c1, y), c2), max(x * truncdiv(c1, c2), truncdiv(y, c2)),
c1.Eval()->value >= 0 && c2.Eval()->value > 0 &&
c1.Eval()->value % c2.Eval()->value == 0 &&
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual(y.Eval(), 0));
TVM_TRY_REWRITE_IF(truncdiv(y + x * c1, c2), truncdiv(y, c2) + x * truncdiv(c1, c2),
c1.Eval()->value >= 0 && c2.Eval()->value > 0 &&
c1.Eval()->value % c2.Eval()->value == 0 &&
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual(y.Eval(), 0));
TVM_TRY_REWRITE_IF(truncdiv(min(y, x * c1), c2), min(truncdiv(y, c2), x * truncdiv(c1, c2)),
c1.Eval()->value >= 0 && c2.Eval()->value > 0 &&
c1.Eval()->value % c2.Eval()->value == 0 &&
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual(y.Eval(), 0));
TVM_TRY_REWRITE_IF(truncdiv(max(y, x * c1), c2), max(truncdiv(y, c2), x * truncdiv(c1, c2)),
c1.Eval()->value >= 0 && c2.Eval()->value > 0 &&
c1.Eval()->value % c2.Eval()->value == 0 &&
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual(y.Eval(), 0));
// Rules involving 3-operands.
TVM_TRY_REWRITE_IF(
truncdiv(x * c1 + y + z, c2), x * truncdiv(c1, c2) + truncdiv(y + z, c2),
c1.Eval()->value >= 0 && c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0 &&
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual((y + z).Eval(), 0));
TVM_TRY_REWRITE_IF(
truncdiv(x * c1 - y + z, c2), x * truncdiv(c1, c2) + truncdiv(z - y, c2),
c1.Eval()->value >= 0 && c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0 &&
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual((z - y).Eval(), 0));
TVM_TRY_REWRITE_IF(
truncdiv(x * c1 + y - z, c2), x * truncdiv(c1, c2) + truncdiv(y - z, c2),
c1.Eval()->value >= 0 && c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0 &&
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual((y - z).Eval(), 0));
TVM_TRY_REWRITE_IF(
truncdiv(y + x * c1 + z, c2), x * truncdiv(c1, c2) + truncdiv(y + z, c2),
c1.Eval()->value > 0 && c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0 &&
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual((y + z).Eval(), 0));
TVM_TRY_REWRITE_IF(truncdiv(x + c1, c2), truncdiv(x, c2) + truncdiv(c1, c2),
c1.Eval()->value > 0 && c2.Eval()->value > 0 &&
c1.Eval()->value % c2.Eval()->value == 0 &&
CanProveGreaterEqual(x.Eval(), 0));
TVM_TRY_REWRITE_IF(truncdiv(x + y, x), truncdiv(y, x) + 1,
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual(y.Eval(), 0));
TVM_TRY_REWRITE_IF(truncdiv(y + x, x), truncdiv(y, x) + 1,
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual(y.Eval(), 0));
TVM_TRY_REWRITE_IF(
truncdiv((x + y) + z, x), truncdiv(y + z, x) + 1,
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual((y + z).Eval(), 0));
TVM_TRY_REWRITE_IF(
truncdiv((y + x) + z, x), truncdiv(y + z, x) + 1,
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual((y + z).Eval(), 0));
TVM_TRY_REWRITE_IF(
truncdiv(y + (z + x), x), truncdiv(y + z, x) + 1,
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual((y + z).Eval(), 0));
TVM_TRY_REWRITE_IF(
truncdiv(y + (x + z), x), truncdiv(y + z, x) + 1,
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual((y + z).Eval(), 0));
TVM_TRY_REWRITE_IF(truncdiv(x * y, y), x,
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual(y.Eval(), 0));
TVM_TRY_REWRITE_IF(truncdiv(y * x, y), x,
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual(y.Eval(), 0));
TVM_TRY_REWRITE_IF(truncdiv(x * z + y, z), x + truncdiv(y, z),
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual(y.Eval(), 0) &&
CanProveGreaterEqual(z.Eval(), 0));
TVM_TRY_REWRITE_IF(truncdiv(z * x + y, z), x + truncdiv(y, z),
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual(y.Eval(), 0) &&
CanProveGreaterEqual(z.Eval(), 0));
TVM_TRY_REWRITE_IF(truncdiv(y + x * z, z), truncdiv(y, z) + x,
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual(y.Eval(), 0) &&
CanProveGreaterEqual(z.Eval(), 0));
TVM_TRY_REWRITE_IF(truncdiv(y + z * x, z), truncdiv(y, z) + x,
CanProveGreaterEqual(x.Eval(), 0) && CanProveGreaterEqual(y.Eval(), 0) &&
CanProveGreaterEqual(z.Eval(), 0));
}
return ret;
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const ModNode* op) {
PrimExpr ret = IRMutatorWithAnalyzer::VisitExpr_(op);
op = ret.as<ModNode>();
PrimExpr const_res = TryConstFold<Mod>(op->a, op->b);
if (const_res.defined()) return const_res;
// Pattern var to match any expression
PVar<PrimExpr> x, y, z, b1;
// Pattern var match IntImm
PVar<IntImm> c1, c2;
// Pattern var for lanes in broadcast and ramp
PVar<int> lanes;
// Vector rules
if (op->dtype.lanes() != 1) {
TVM_TRY_REWRITE(truncmod(broadcast(x, lanes), broadcast(y, lanes)),
broadcast(truncmod(x, y), lanes));
// ramp % bcast
if (truncmod(ramp(b1, c1, lanes), broadcast(c2, lanes)).Match(ret)) {
int64_t c1val = c1.Eval()->value;
int64_t c2val = c2.Eval()->value;
if (c1val % c2val == 0) {
return broadcast(truncmod(b1, c2), lanes).Eval();
}
// If all possible indices in ramp are the same.
if (CanProveGreaterEqual(b1.Eval(), 0)) {
ModularSet bmod = analyzer_->modular_set(b1.Eval());
int64_t ramp_min = bmod->base / c2val;
int64_t ramp_max = (bmod->base + (lanes.Eval() - 1) * c1val) / c2val;
if (bmod->coeff % c2val == 0) {
if (ramp_min == ramp_max) {
return ramp(truncmod(bmod->base, c2), c1, lanes).Eval();
} else {
return truncmod(ramp(truncmod(bmod->base, c2), c1, lanes), broadcast(c2, lanes)).Eval();
}
}
}
}
}
if (IsIndexType(op->dtype)) {
// Be-aware of the division rules:
// We adopt the default C division uses truncation instead of floordiv.
// This means most rules need to check non-negativeness of the operands.
TVM_TRY_REWRITE_IF(truncmod(x * c1, c2), ZeroWithTypeLike(x),
c2.Eval()->value != 0 && c1.Eval()->value % c2.Eval()->value == 0);
TVM_TRY_REWRITE_IF(truncmod(x * c1 + y, c2), truncmod(y, c2),
c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0 &&
CanProveGreaterEqual((x * c1).Eval(), 0) &&
CanProveGreaterEqual(y.Eval(), 0));
TVM_TRY_REWRITE_IF(truncmod(x + c1, c2), truncmod(x, c2),
c2.Eval()->value > 0 && c1.Eval()->value >= 0 &&
c1.Eval()->value % c2.Eval()->value == 0 &&
CanProveGreaterEqual(x.Eval(), 0));
TVM_TRY_REWRITE_IF(truncmod(x + y * c1, c2), truncmod(x, c2),
c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0 &&
CanProveGreaterEqual(x.Eval(), 0) &&
CanProveGreaterEqual((y * c1).Eval(), 0));
// canonicalization: x % c == x % (-c) for truncated division
// NOTE: trunc div required
TVM_TRY_RECURSIVE_REWRITE_IF(
truncmod(x, c1), truncmod(x, PConst<PrimExpr>(make_const(op->dtype, -c1.Eval()->value))),
c1.Eval()->value < 0);
// try modular analysis
if (truncmod(x, c1).Match(ret)) {
ModularSet mod = analyzer_->modular_set(x.Eval());
int64_t c1val = c1.Eval()->value;
if (mod->coeff % c1val == 0 && c1val > 0 && CanProveGreaterEqual(x.Eval(), 0)) {
return truncmod(mod->base, c1).Eval();
}
}
}
return ret;
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const FloorDivNode* op) {
PrimExpr ret = IRMutatorWithAnalyzer::VisitExpr_(op);
op = ret.as<FloorDivNode>();
PrimExpr const_res = TryConstFold<FloorDiv>(op->a, op->b);
if (const_res.defined()) return const_res;
// Pattern var to match any expression
PVar<PrimExpr> x, y, z, b1;
// Pattern var match IntImm
PVar<IntImm> c1, c2, c3;
// Pattern var for lanes in broadcast and ramp
PVar<int> lanes;
// Vector rules
if (op->dtype.lanes() != 1) {
TVM_TRY_REWRITE(floordiv(broadcast(x, lanes), broadcast(y, lanes)),
broadcast(floordiv(x, y), lanes));
// ramp // bcast
if (floordiv(ramp(b1, c1, lanes), broadcast(c2, lanes)).Match(ret)) {
int64_t c1val = c1.Eval()->value;
int64_t c2val = c2.Eval()->value;
if (c1val % c2val == 0) {
return ramp(floordiv(b1, c2), floordiv(c1, c2), lanes).Eval();
}
// If all possible indices in ramp are the same.
ModularSet bmod = analyzer_->modular_set(b1.Eval());
int64_t ramp_min = floordiv(bmod->base, c2val);
int64_t ramp_max = floordiv(bmod->base + (lanes.Eval() - 1) * c1val, c2val);
if (ramp_min == ramp_max) {
// If b1 can devide c2
if (bmod->coeff % c2val == 0) {
return broadcast(floordiv(b1, c2), lanes).Eval();
}
// If all indices can be guaranteed to settle inside a coeff range
if (c2val % bmod->coeff == 0 && bmod->base + (lanes.Eval() - 1) * c1val < bmod->coeff) {
return broadcast(floordiv(b1, c2), lanes).Eval();
}
}
}
}
if (IsIndexType(op->dtype)) {
// Be-aware of the division rules: this is floor division.
TVM_TRY_REWRITE_IF(floordiv(floordiv(x, c1), c2), floordiv(x, c1 * c2),
c1.Eval()->value > 0 && c2.Eval()->value > 0);
TVM_TRY_REWRITE_IF(floordiv(floordiv(x, c1) + c2, c3), floordiv(x + c1 * c2, c1 * c3),
c1.Eval()->value > 0 && c3.Eval()->value > 0);
if (floordiv(x * c1, c2).Match(ret)) {
int64_t c1val = c1.Eval()->value;
int64_t c2val = c2.Eval()->value;
if (c1val > 0 && c2val > 0) {
if (c1val % c2val == 0) return (x * floordiv(c1, c2)).Eval();
if (c2val % c1val == 0) return floordiv(x, floordiv(c2, c1)).Eval();
}
}
TVM_TRY_REWRITE(floordiv(x, x), OneWithTypeLike(x));
TVM_TRY_REWRITE(floordiv(x * c1, x), c1);
TVM_TRY_REWRITE(floordiv(c1 * x, x), c1);
// Rules involving 2-operands.
TVM_TRY_REWRITE_IF(floordiv(x * c1 + y, c2), x * floordiv(c1, c2) + floordiv(y, c2),
c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0);
TVM_TRY_REWRITE_IF(floordiv(min(x * c1, y), c2), min(x * floordiv(c1, c2), floordiv(y, c2)),
c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0);
TVM_TRY_REWRITE_IF(floordiv(max(x * c1, y), c2), max(x * floordiv(c1, c2), floordiv(y, c2)),
c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0);
TVM_TRY_REWRITE_IF(floordiv(y + x * c1, c2), floordiv(y, c2) + x * floordiv(c1, c2),
c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0);
TVM_TRY_REWRITE_IF(floordiv(min(y, x * c1), c2), min(floordiv(y, c2), x * floordiv(c1, c2)),
c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0);
TVM_TRY_REWRITE_IF(floordiv(max(y, x * c1), c2), max(floordiv(y, c2), x * floordiv(c1, c2)),
c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0);
// Rules involving 3-operands.
TVM_TRY_REWRITE_IF(floordiv(x * c1 + y + z, c2), x * floordiv(c1, c2) + floordiv(y + z, c2),
c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0);
TVM_TRY_REWRITE_IF(floordiv(x * c1 - y + z, c2), x * floordiv(c1, c2) + floordiv(z - y, c2),
c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0);
TVM_TRY_REWRITE_IF(floordiv(x * c1 + y - z, c2), x * floordiv(c1, c2) + floordiv(y - z, c2),
c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0);
TVM_TRY_REWRITE_IF(floordiv(y + x * c1 + z, c2), x * floordiv(c1, c2) + floordiv(y + z, c2),
c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0);
TVM_TRY_REWRITE_IF(floordiv(x + c1, c2), floordiv(x, c2) + floordiv(c1, c2),
c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0);
TVM_TRY_REWRITE_IF(floordiv(x * c1, x * c2), floordiv(c1, c2), c2.Eval()->value > 0);
TVM_TRY_REWRITE_IF(floordiv(x + y, x), floordiv(y, x) + 1, CanProveGreaterEqual(x.Eval(), 0));
TVM_TRY_REWRITE_IF(floordiv(y + x, x), floordiv(y, x) + 1, CanProveGreaterEqual(x.Eval(), 0));
TVM_TRY_REWRITE_IF(floordiv((x + y) + z, x), floordiv(y + z, x) + 1,
CanProveGreaterEqual(x.Eval(), 0));
TVM_TRY_REWRITE_IF(floordiv((y + x) + z, x), floordiv(y + z, x) + 1,
CanProveGreaterEqual(x.Eval(), 0));
TVM_TRY_REWRITE_IF(floordiv(y + (z + x), x), floordiv(y + z, x) + 1,
CanProveGreaterEqual(x.Eval(), 0));
TVM_TRY_REWRITE_IF(floordiv(y + (x + z), x), floordiv(y + z, x) + 1,
CanProveGreaterEqual(x.Eval(), 0));
TVM_TRY_REWRITE_IF(floordiv(x * y, y), x, CanProveGreaterEqual(y.Eval(), 0));
TVM_TRY_REWRITE_IF(floordiv(y * x, y), x, CanProveGreaterEqual(y.Eval(), 0));
TVM_TRY_REWRITE_IF(floordiv(x * z + y, z), x + floordiv(y, z),
CanProveGreaterEqual(z.Eval(), 0));
TVM_TRY_REWRITE_IF(floordiv(z * x + y, z), x + floordiv(y, z),
CanProveGreaterEqual(z.Eval(), 0));
TVM_TRY_REWRITE_IF(floordiv(y + x * z, z), floordiv(y, z) + x,
CanProveGreaterEqual(z.Eval(), 0));
TVM_TRY_REWRITE_IF(floordiv(y + z * x, z), floordiv(y, z) + x,
CanProveGreaterEqual(z.Eval(), 0));
}
return ret;
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const FloorModNode* op) {
PrimExpr ret = IRMutatorWithAnalyzer::VisitExpr_(op);
op = ret.as<FloorModNode>();
PrimExpr const_res = TryConstFold<FloorMod>(op->a, op->b);
if (const_res.defined()) return const_res;
// Pattern var to match any expression
PVar<PrimExpr> x, y, z, b1;
// Pattern var match IntImm
PVar<IntImm> c1, c2;
// Pattern var for lanes in broadcast and ramp
PVar<int> lanes;
// Vector rules
if (op->dtype.lanes() != 1) {
TVM_TRY_REWRITE(floormod(broadcast(x, lanes), broadcast(y, lanes)),
broadcast(floormod(x, y), lanes));
// floormod(ramp, bcast)
if (floormod(ramp(b1, c1, lanes), broadcast(c2, lanes)).Match(ret)) {
int64_t c1val = c1.Eval()->value;
int64_t c2val = c2.Eval()->value;
if (c1val % c2val == 0) {
return broadcast(floormod(b1, c2), lanes).Eval();
}
// If all possible indices in ramp are the same.
ModularSet bmod = analyzer_->modular_set(b1.Eval());
int64_t ramp_min = floordiv(bmod->base, c2val);
int64_t ramp_max = floordiv(bmod->base + (lanes.Eval() - 1) * c1val, c2val);
if (bmod->coeff % c2val == 0) {
if (ramp_min == ramp_max) {
return ramp(floormod(bmod->base, c2), c1, lanes).Eval();
} else {
return floormod(ramp(floormod(bmod->base, c2), c1, lanes), broadcast(c2, lanes)).Eval();
}
} else if (c2val % bmod->coeff == 0 && ramp_min == ramp_max) {
return ramp(floormod(b1, c2), c1, lanes).Eval();
}
}
}
if (IsIndexType(op->dtype)) {
// Be-aware of the division rules: we use floordiv/floormod here
TVM_TRY_REWRITE_IF(floormod(x * c1, c2), ZeroWithTypeLike(x),
c2.Eval()->value != 0 && c1.Eval()->value % c2.Eval()->value == 0);
TVM_TRY_REWRITE_IF(floormod(x * c1 + y, c2), floormod(y, c2),
c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0);
TVM_TRY_REWRITE_IF(floormod(x + c1, c2), floormod(x, c2),
c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0);
TVM_TRY_REWRITE_IF(floormod(x + y * c1, c2), floormod(x, c2),
c2.Eval()->value > 0 && c1.Eval()->value % c2.Eval()->value == 0);
TVM_TRY_REWRITE_IF(floormod(x * c1, x * c2), x * floormod(c1, c2), c2.Eval()->value != 0);
TVM_TRY_REWRITE(floormod(x * y, y), ZeroWithTypeLike(x));
TVM_TRY_REWRITE(floormod(y * x, y), ZeroWithTypeLike(y));
// try modular analysis
if (floormod(x, c1).Match(ret)) {
ModularSet mod = analyzer_->modular_set(x.Eval());
int64_t c1val = c1.Eval()->value;
if (mod->coeff % c1val == 0 && c1val > 0) {
return floormod(mod->base, c1).Eval();
}
}
}
return ret;
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const MinNode* op) {
PrimExpr ret = IRMutatorWithAnalyzer::VisitExpr_(op);
op = ret.as<MinNode>();
PrimExpr const_res = TryConstFold<Min>(op->a, op->b);
if (const_res.defined()) return const_res;
// Pattern var to match any expression
PVar<PrimExpr> x, y, z, s1, s2;
// Pattern var match IntImm
PVar<IntImm> c1, c2;
PVar<int> lanes;
// vector rule
if (op->dtype.lanes() != 1) {
TVM_TRY_REWRITE(min(broadcast(x, lanes), broadcast(y, lanes)), broadcast(min(x, y), lanes));
TVM_TRY_REWRITE(min(min(x, broadcast(y, lanes)), broadcast(z, lanes)),
min(x, broadcast(min(y, z), lanes)));
}
if (IsIndexType(op->dtype)) {
TVM_TRY_REWRITE(min(x, x), x);
// constant int bound
ConstIntBound a_bound = analyzer_->const_int_bound(op->a);
ConstIntBound b_bound = analyzer_->const_int_bound(op->b);
if (a_bound->max_value <= b_bound->min_value) {
return op->a;
}
if (b_bound->max_value <= a_bound->min_value) {
return op->b;
}
// constant comparison
if (min(x + c1, x + c2).Match(ret)) {
if (c1.Eval()->value < c2.Eval()->value) {
return (x + c1).Eval();
} else {
return (x + c2).Eval();
}
}
if (min(x + c1, x).Match(ret) || min(x, x + c1).Match(ret)) {
if (c1.Eval()->value < 0) {
return (x + c1).Eval();
} else {
return x.Eval();
}
}
if (min(c1 - x, c2 - x).Match(ret)) {
if (c1.Eval()->value < c2.Eval()->value) {
return (c1 - x).Eval();
} else {
return (c2 - x).Eval();
}
}
// DivMod rules
// Divide up rounding: truc div
// NOTE: trucdiv(x, y) >= floordiv(x, y)
TVM_TRY_REWRITE_IF(min(truncdiv(x + c1, c2) * c2, x), x,
c2.Eval()->value > 0 && c1.Eval()->value + 1 == c2.Eval()->value);
TVM_TRY_REWRITE_IF(min(truncdiv(x + c1, c2) * c2, max(x, c2)), max(x, c2),
c2.Eval()->value > 0 && c1.Eval()->value + 1 == c2.Eval()->value &&
CanProveGreaterEqual(x.Eval(), 0));
TVM_TRY_REWRITE_IF(min(x, truncdiv(x + c1, c2) * c2), x,
c2.Eval()->value > 0 && c1.Eval()->value + 1 == c2.Eval()->value);
TVM_TRY_REWRITE_IF(min(max(x, c2), truncdiv(x + c1, c2) * c2), max(x, c2),
c2.Eval()->value > 0 && c1.Eval()->value + 1 == c2.Eval()->value &&
CanProveGreaterEqual(x.Eval(), 0));
// Divide up rounding: floor div
TVM_TRY_REWRITE_IF(min(floordiv(x + c1, c2) * c2, x), x,
c2.Eval()->value > 0 && c1.Eval()->value + 1 == c2.Eval()->value);
TVM_TRY_REWRITE_IF(min(floordiv(x + c1, c2) * c2, max(x, c2)), max(x, c2),
c2.Eval()->value > 0 && c1.Eval()->value + 1 == c2.Eval()->value);
TVM_TRY_REWRITE_IF(min(x, floordiv(x + c1, c2) * c2), x,
c2.Eval()->value > 0 && c1.Eval()->value + 1 == c2.Eval()->value);
TVM_TRY_REWRITE_IF(min(max(x, c2), floordiv(x + c1, c2) * c2), max(x, c2),
c2.Eval()->value > 0 && c1.Eval()->value + 1 == c2.Eval()->value);
TVM_TRY_REWRITE_IF(min(x, floordiv(x, c2) * c2), floordiv(x, c2) * c2, c2.Eval()->value > 0);
TVM_TRY_REWRITE_IF(min(floordiv(x, c2) * c2, x), floordiv(x, c2) * c2, c2.Eval()->value > 0);
TVM_TRY_REWRITE(min(max(x, y), min(x, y)), min(x, y));
TVM_TRY_REWRITE(min(max(x, y), min(y, x)), min(x, y));
TVM_TRY_REWRITE(min(min(x, y), max(x, y)), min(x, y));
TVM_TRY_REWRITE(min(min(x, y), max(y, x)), min(x, y));
TVM_TRY_REWRITE(min(max(x, y), x), x);
TVM_TRY_REWRITE(min(max(x, y), y), y);
TVM_TRY_REWRITE(min(min(x, y), x), min(x, y));
TVM_TRY_REWRITE(min(min(x, y), y), min(x, y));
TVM_TRY_REWRITE(min(x, max(x, y)), x);
TVM_TRY_REWRITE(min(y, max(x, y)), y);
TVM_TRY_REWRITE(min(x, min(x, y)), min(x, y));
TVM_TRY_REWRITE(min(y, min(x, y)), min(x, y));
TVM_TRY_REWRITE(min(min(min(x, y), z), y), min(min(x, y), z));
TVM_TRY_REWRITE(min(min(min(min(x, y), z), s1), y), min(min(min(x, y), z), s1));
TVM_TRY_REWRITE(min(min(min(min(min(x, y), z), s1), s2), y),
min(min(min(min(x, y), z), s1), s2));
TVM_TRY_REWRITE(min(max(x, y), max(x, z)), max(min(y, z), x));
TVM_TRY_REWRITE(min(max(x, y), max(z, x)), max(min(y, z), x));
TVM_TRY_REWRITE(min(max(y, x), max(x, z)), max(min(y, z), x));
TVM_TRY_REWRITE(min(max(y, x), max(z, x)), max(min(y, z), x));
TVM_TRY_REWRITE(min(min(x, y), min(x, z)), min(min(y, z), x));
TVM_TRY_REWRITE(min(min(x, y), min(z, x)), min(min(y, z), x));
TVM_TRY_REWRITE(min(min(y, x), min(x, z)), min(min(y, z), x));
TVM_TRY_REWRITE(min(min(y, x), min(z, x)), min(min(y, z), x));
TVM_TRY_REWRITE(min(y + x, z + x), min(y, z) + x);
TVM_TRY_REWRITE(min(y + x, x + z), min(y, z) + x);
TVM_TRY_REWRITE(min(x + y, x + z), min(y, z) + x);
TVM_TRY_REWRITE(min(x + y, z + x), min(y, z) + x);
// sub distribution
TVM_TRY_REWRITE(min(y - x, z - x), min(y, z) - x);
TVM_TRY_REWRITE(min(x - y, x - z), x - max(y, z));
// constant folding rule.
TVM_TRY_REWRITE(min(min(x, c1), c2), min(x, min(c1, c2)));
// scaling rule
if (min(truncdiv(x, c1), truncdiv(y, c1)).Match(ret)) {
if (c1.Eval()->value > 0) {
return truncdiv(min(x, y), c1).Eval();
} else {
return truncdiv(max(x, y), c1).Eval();
}
}
if (min(floordiv(x, c1), floordiv(y, c1)).Match(ret)) {
if (c1.Eval()->value > 0) {
return floordiv(min(x, y), c1).Eval();
} else {
return floordiv(max(x, y), c1).Eval();
}
}
if (min(x * c1, y * c1).Match(ret)) {
if (c1.Eval()->value > 0) {
return (min(x, y) * c1).Eval();
} else {
return (max(x, y) * c1).Eval();
}
}
if (min(x * c1, c2).Match(ret)) {
int64_t c1val = c1.Eval()->value;
int64_t c2val = c2.Eval()->value;
if (c1val == 0) {
return c2val < 0 ? c2.Eval() : c1.Eval();
}
if (c2val % c1val == 0) {
if (c1val > 0) {
return (min(x, c2val / c1val) * c1val).Eval();
} else {
return (max(x, c2val / c1val) * c1val).Eval();
}
}
}
// canonicalization
TVM_TRY_RECURSIVE_REWRITE(min(min(x, c1), y), min(min(x, y), c1));
TVM_TRY_RECURSIVE_REWRITE_IF(min(c1 - x, c2), c1 - max(x, c1 - c2), c2.Eval()->value != 0);
}
// condition rules.
TVM_TRY_REWRITE(min(select(x, y, z), select(x, s1, s2)), select(x, min(y, s1), min(z, s2)));
return ret;
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const MaxNode* op) {
PrimExpr ret = IRMutatorWithAnalyzer::VisitExpr_(op);
op = ret.as<MaxNode>();
PrimExpr const_res = TryConstFold<Max>(op->a, op->b);
if (const_res.defined()) return const_res;
// Pattern var to match any expression
PVar<PrimExpr> x, y, z, s1, s2;
// Pattern var match IntImm
PVar<IntImm> c1, c2;
PVar<int> lanes;
// vector rule
if (op->dtype.lanes() != 1) {
TVM_TRY_REWRITE(max(broadcast(x, lanes), broadcast(y, lanes)), broadcast(max(x, y), lanes));
TVM_TRY_REWRITE(max(max(x, broadcast(y, lanes)), broadcast(z, lanes)),
max(x, broadcast(max(y, z), lanes)));
}
if (IsIndexType(op->dtype)) {
TVM_TRY_REWRITE(max(x, x), x);
// constant int bound
ConstIntBound a_bound = analyzer_->const_int_bound(op->a);
ConstIntBound b_bound = analyzer_->const_int_bound(op->b);
if (a_bound->min_value >= b_bound->max_value) {
return op->a;
}
if (b_bound->min_value >= a_bound->max_value) {
return op->b;
}
// constant comparison
if (max(x + c1, x + c2).Match(ret)) {
if (c1.Eval()->value > c2.Eval()->value) {
return (x + c1).Eval();
} else {
return (x + c2).Eval();
}
}
if (max(x + c1, x).Match(ret) || max(x, x + c1).Match(ret)) {
if (c1.Eval()->value > 0) {
return (x + c1).Eval();
} else {
return x.Eval();
}
}
if (max(c1 - x, c2 - x).Match(ret)) {
if (c1.Eval()->value > c2.Eval()->value) {
return (c1 - x).Eval();
} else {
return (c2 - x).Eval();
}
}
// DivMod rules
// Divide up rounding: truc div
// NOTE: trucdiv(x, y) >= floordiv(x, y)
TVM_TRY_REWRITE_IF(max(truncdiv(x + c1, c2) * c2, x), truncdiv(x + c1, c2) * c2,
c2.Eval()->value > 0 && c1.Eval()->value + 1 == c2.Eval()->value);
TVM_TRY_REWRITE_IF(max(x, truncdiv(x + c1, c2) * c2), truncdiv(x + c1, c2) * c2,
c2.Eval()->value > 0 && c1.Eval()->value + 1 == c2.Eval()->value);
// Divide up rounding: floor div
TVM_TRY_REWRITE_IF(max(floordiv(x + c1, c2) * c2, x), floordiv(x + c1, c2) * c2,
c2.Eval()->value > 0 && c1.Eval()->value + 1 == c2.Eval()->value);
TVM_TRY_REWRITE_IF(max(x, floordiv(x + c1, c2) * c2), floordiv(x + c1, c2) * c2,
c2.Eval()->value > 0 && c1.Eval()->value + 1 == c2.Eval()->value);
TVM_TRY_REWRITE_IF(max(floordiv(x, c2) * c2, x), x, c2.Eval()->value > 0);
TVM_TRY_REWRITE_IF(max(x, floordiv(x, c2) * c2), x, c2.Eval()->value > 0);
TVM_TRY_REWRITE(max(min(x, y), max(x, y)), max(x, y));
TVM_TRY_REWRITE(max(min(x, y), max(y, x)), max(x, y));
TVM_TRY_REWRITE(max(max(x, y), min(x, y)), max(x, y));
TVM_TRY_REWRITE(max(max(x, y), min(y, x)), max(x, y));
TVM_TRY_REWRITE(max(min(x, y), x), x);
TVM_TRY_REWRITE(max(min(x, y), y), y);
TVM_TRY_REWRITE(max(max(x, y), x), max(x, y));
TVM_TRY_REWRITE(max(max(x, y), y), max(x, y));
TVM_TRY_REWRITE(max(x, min(x, y)), x);
TVM_TRY_REWRITE(max(y, min(x, y)), y);
TVM_TRY_REWRITE(max(x, max(x, y)), max(x, y));
TVM_TRY_REWRITE(max(y, max(x, y)), max(x, y));
TVM_TRY_REWRITE(max(max(max(x, y), z), y), max(max(x, y), z));
TVM_TRY_REWRITE(max(max(max(max(x, y), z), s1), y), max(max(max(x, y), z), s1));
TVM_TRY_REWRITE(max(max(max(max(max(x, y), z), s1), s2), y),
max(max(max(max(x, y), z), s1), s2));
// max/max cancelation
TVM_TRY_REWRITE(max(max(x, y), max(x, z)), max(max(y, z), x));
TVM_TRY_REWRITE(max(max(x, y), max(z, x)), max(max(y, z), x));
TVM_TRY_REWRITE(max(max(y, x), max(x, z)), max(max(y, z), x));
TVM_TRY_REWRITE(max(max(y, x), max(z, x)), max(max(y, z), x));
// max/min distribution
TVM_TRY_REWRITE(max(min(x, y), min(x, z)), min(max(y, z), x));
TVM_TRY_REWRITE(max(min(x, y), min(z, x)), min(max(y, z), x));
TVM_TRY_REWRITE(max(min(y, x), min(x, z)), min(max(y, z), x));
TVM_TRY_REWRITE(max(min(y, x), min(z, x)), min(max(y, z), x));
// add distribution
TVM_TRY_REWRITE(max(y + x, z + x), max(y, z) + x);
TVM_TRY_REWRITE(max(y + x, x + z), max(y, z) + x);
TVM_TRY_REWRITE(max(x + y, x + z), max(y, z) + x);
TVM_TRY_REWRITE(max(x + y, z + x), max(y, z) + x);
// sub distribution
TVM_TRY_REWRITE(max(y - x, z - x), max(y, z) - x);
TVM_TRY_REWRITE(max(x - y, x - z), x - min(y, z));
// constant folding rule.
TVM_TRY_REWRITE(max(max(x, c1), c2), max(x, max(c1, c2)));
// scaling rule
if (max(truncdiv(x, c1), truncdiv(y, c1)).Match(ret)) {
if (c1.Eval()->value > 0) {
return truncdiv(max(x, y), c1).Eval();
} else {
return truncdiv(min(x, y), c1).Eval();
}
}
if (max(floordiv(x, c1), floordiv(y, c1)).Match(ret)) {
if (c1.Eval()->value > 0) {
return floordiv(max(x, y), c1).Eval();
} else {
return floordiv(min(x, y), c1).Eval();
}
}
if (max(x * c1, y * c1).Match(ret)) {
if (c1.Eval()->value > 0) {
return (max(x, y) * c1).Eval();
} else {
return (min(x, y) * c1).Eval();
}
}
if (max(x * c1, c2).Match(ret)) {
int64_t c1val = c1.Eval()->value;
int64_t c2val = c2.Eval()->value;
if (c1val == 0) {
return c2val > 0 ? c2.Eval() : c1.Eval();
}
if (c2val % c1val == 0) {
if (c1val > 0) {
return (max(x, c2val / c1val) * c1val).Eval();
} else {
return (min(x, c2val / c1val) * c1val).Eval();
}
}
}
// canonicalization
TVM_TRY_RECURSIVE_REWRITE(max(max(x, c1), y), max(max(x, y), c1));
TVM_TRY_RECURSIVE_REWRITE_IF(max(c1 - x, c2), c1 - min(x, c1 - c2), c2.Eval()->value != 0);
}
// condition rules.
TVM_TRY_REWRITE(max(select(x, y, z), select(x, s1, s2)), select(x, max(y, s1), max(z, s2)));
return ret;
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const EQNode* op) {
PrimExpr ret = IRMutatorWithAnalyzer::VisitExpr_(op);
op = ret.as<EQNode>();
PrimExpr const_res = TryConstFold<EQ>(op->a, op->b);
if (const_res.defined()) return const_res;
// Pattern var to match any expression
PVar<PrimExpr> x, y;
// Pattern var match IntImm
PVar<IntImm> c1;
PVar<int> lanes;
// vector rule
if (op->dtype.lanes() != 1) {
TVM_TRY_REWRITE(broadcast(x, lanes) == broadcast(y, lanes), broadcast(x == y, lanes));
}
if (IsIndexType(op->a.dtype())) {
CompareResult result = TryCompare(op->a - op->b, 0);
if (result == kEQ) {
return make_const(op->dtype, true);
} else if (result == kNE || result == kGT || result == kLT) {
return make_const(op->dtype, false);
}
TVM_TRY_REWRITE(x - c1 == 0, x == c1);
TVM_TRY_REWRITE(c1 - x == 0, x == c1);
TVM_TRY_REWRITE(x + c1 == 0, x == 0 - c1);
TVM_TRY_REWRITE(x * y == 0, x == 0 || y == 0);
}
return ret;
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const NENode* op) {
return this->VisitExpr(Not(op->a == op->b));
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const LENode* op) {
return this->VisitExpr(Not(op->b < op->a));
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const GTNode* op) {
return this->VisitExpr(op->b < op->a);
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const GENode* op) {
return this->VisitExpr(Not(op->a < op->b));
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const LTNode* op) {
PrimExpr ret = IRMutatorWithAnalyzer::VisitExpr_(op);
op = ret.as<LTNode>();
PrimExpr const_res = TryConstFold<LT>(op->a, op->b);
if (const_res.defined()) return const_res;
// Pattern var to match any expression
PVar<PrimExpr> x, y, z, s1, s2;
// Pattern var match IntImm
PVar<IntImm> c1, c2;
PVar<int> lanes;
// vector rule
if (op->dtype.lanes() != 1) {
TVM_TRY_REWRITE(broadcast(x, lanes) < broadcast(y, lanes), broadcast(x < y, lanes));
TVM_TRY_REWRITE(ramp(x, s1, lanes) < ramp(y, s1, lanes), broadcast(x < y, lanes));
}
if (IsIndexType(op->a.dtype())) {
CompareResult result = TryCompare(op->a - op->b, 0);
if (result == kLT) {
return make_const(op->dtype, true);
}
if (result == kEQ || result == kGT || result == kGE) {
return make_const(op->dtype, false);
}
// clang-format off
TVM_TRY_REWRITE(x + y < x + z, y < z);
TVM_TRY_REWRITE(x + y < z + x, y < z);
TVM_TRY_REWRITE(y + x < x + z, y < z);
TVM_TRY_REWRITE(y + x < z + x, y < z);
TVM_TRY_REWRITE(y - x < z - x, y < z);
TVM_TRY_REWRITE(x - y < x - z, z < y);
TVM_TRY_REWRITE(x < x + z, 0 < z);
TVM_TRY_REWRITE(x < z + x, 0 < z);
TVM_TRY_REWRITE(x < x - z, z < 0);
TVM_TRY_REWRITE(c1 < x + c2, c1 - c2 < x);
TVM_TRY_REWRITE(c1 < c2 - x, x < c2 - c1);
TVM_TRY_REWRITE_IF(x * c1 < y * c1, x < y, c1.Eval()->value > 0);
TVM_TRY_REWRITE_IF(x * c1 < y * c1, y < x, c1.Eval()->value < 0);
// constant cancelation: only need to make use of one mod
// truc div
TVM_TRY_REWRITE_IF(x * c2 < c1,
x < truncdiv(c1 - 1, c2) + 1, c1.Eval()->value > 0 && c2.Eval()->value > 0);
// NOTE: trunc div required
TVM_TRY_REWRITE_IF(x * c2 < c1, x < truncdiv(c1, c2),
c1.Eval()->value <= 0 && c2.Eval()->value > 0);
// NOTE: trunc div required (euclidean is ok too, floored is not)
TVM_TRY_REWRITE_IF(x * c2 < c1, truncdiv(c1 - 1, c2) - 1 < x, c1.Eval()->value > 0 &&
c2.Eval()->value < 0);
// NOTE: trunc div required (floored is ok too, euclidean is not)
TVM_TRY_REWRITE_IF(x * c2 < c1, truncdiv(c1, c2) < x,
c1.Eval()->value <= 0 && c2.Eval()->value < 0);
// NOTE: trunc div required
TVM_TRY_REWRITE_IF(c1 < x * c2, truncdiv(c1 + 1, c2) - 1 < x,
c1.Eval()->value < 0 && c2.Eval()->value > 0);
TVM_TRY_REWRITE_IF(c1 < x * c2, truncdiv(c1, c2) < x,
c1.Eval()->value >= 0 && c2.Eval()->value > 0);
// NOTE: trunc div required (floored is ok too, euclidean is not)
TVM_TRY_REWRITE_IF(c1 < x * c2, x < truncdiv(c1 + 1, c2) + 1,
c1.Eval()->value < 0 && c2.Eval()->value < 0);
// NOTE: trunc div required (euclidean is ok too, floored is not)
TVM_TRY_REWRITE_IF(c1 < x * c2, x < truncdiv(c1, c2),
c1.Eval()->value >= 0 && c2.Eval()->value < 0);
// DivMod rules
// trucdiv
TVM_TRY_REWRITE_IF(truncdiv(x, c1) < c2,
x<c1 * c2, c1.Eval()->value> 0 && c2.Eval()->value > 0);
// NOTE: trunc div required
TVM_TRY_REWRITE_IF(truncdiv(x, c1) < c2,
x<c1*(c2 - 1) + 1, c1.Eval()->value> 0 && c2.Eval()->value <= 0);
TVM_TRY_REWRITE_IF(c1 < truncdiv(x, c2), (c1 + 1) * c2 - 1 < x,
c1.Eval()->value >= 0 && c2.Eval()->value > 0);
// NOTE: trunc div required
TVM_TRY_REWRITE_IF(c1 < truncdiv(x, c2), c1 * c2 < x,
c1.Eval()->value < 0 && c2.Eval()->value > 0);
// invariance for any div mod: x - (x / c1) * c1 == x % c1
TVM_TRY_REWRITE_IF(truncdiv(x, c1) * c1 < x, 0 < truncmod(x, c1), c1.Eval()->value > 0);
TVM_TRY_REWRITE_IF(truncdiv(x, c1) * c1 < x + y,
0 < truncmod(x, c1) + y, c1.Eval()->value > 0);
TVM_TRY_REWRITE_IF(truncdiv(x, c1) * c1 < x - y,
y < truncmod(x, c1), c1.Eval()->value > 0);
TVM_TRY_REWRITE_IF(truncdiv(x + c2, c1) * c1 < x,
c2 < truncmod(x + c2, c1), c1.Eval()->value > 0);
TVM_TRY_REWRITE_IF(truncdiv(x + c2, c1) * c1 < x + y,
c2 < truncmod(x + c2, c1) + y, c1.Eval()->value > 0);
TVM_TRY_REWRITE_IF(truncdiv(x + c2, c1) * c1 < x - y,
y < truncmod(x + c2, c1) + (0 - c2), c1.Eval()->value > 0);
// floordiv
TVM_TRY_REWRITE_IF(floordiv(x, c1) < c2, x < c1 * c2, c1.Eval()->value > 0);
TVM_TRY_REWRITE_IF(c1 < floordiv(x, c2), (c1 + 1) * c2 - 1 < x, c2.Eval()->value > 0);
TVM_TRY_REWRITE_IF(floordiv(x, c1) * c1 < x, 0 < floormod(x, c1), c1.Eval()->value > 0);
TVM_TRY_REWRITE_IF(floordiv(x, c1) * c1 < x + y,
0 < floormod(x, c1) + y, c1.Eval()->value > 0);
TVM_TRY_REWRITE_IF(floordiv(x, c1) * c1 < x - y,
y < floormod(x, c1), c1.Eval()->value > 0);
TVM_TRY_REWRITE_IF(floordiv(x + c2, c1) * c1 < x,
c2 < floormod(x + c2, c1), c1.Eval()->value > 0);
TVM_TRY_REWRITE_IF(floordiv(x + c2, c1) * c1 < x + y,
c2 < floormod(x + c2, c1) + y, c1.Eval()->value > 0);
TVM_TRY_REWRITE_IF(floordiv(x + c2, c1) * c1 < x - y,
y < floormod(x + c2, c1) + (0 - c2), c1.Eval()->value > 0);
// canonicalization rule
TVM_TRY_RECURSIVE_REWRITE(min(x, y) < z, x < z || y < z);
TVM_TRY_RECURSIVE_REWRITE(max(x, y) < z, x < z && y < z);
TVM_TRY_RECURSIVE_REWRITE(z < min(x, y), z < x && z < y);
TVM_TRY_RECURSIVE_REWRITE(z < max(x, y), z < x || z < y);
TVM_TRY_RECURSIVE_REWRITE(x < c1 - y, x + y < c1);
TVM_TRY_RECURSIVE_REWRITE(x < c1 + y, x - y < c1);
TVM_TRY_RECURSIVE_REWRITE(c1 - y < x, c1 < x + y);
TVM_TRY_RECURSIVE_REWRITE(c1 + y < x, c1 < x - y);
TVM_TRY_RECURSIVE_REWRITE(x + c1 < c2, x < c2 - c1);
TVM_TRY_RECURSIVE_REWRITE(x - c1 < c2, x < c2 + c1);
TVM_TRY_REWRITE(x - c1 < 0, x < c1);
// clang-format on
}
return ret;
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const NotNode* op) {
PrimExpr ret = IRMutatorWithAnalyzer::VisitExpr_(op);
op = ret.as<NotNode>();
PrimExpr const_res = TryConstFold<Not>(op->a);
if (const_res.defined()) return const_res;
// Pattern var to match any expression
PVar<PrimExpr> x, y;
PVar<int> lanes;
if (op->dtype.lanes() != 1) {
TVM_TRY_REWRITE(!broadcast(x, lanes), broadcast(!x, lanes));
}
TVM_TRY_REWRITE(!(!x), x);
TVM_TRY_REWRITE(!(x <= y), y < x);
TVM_TRY_REWRITE(!(x >= y), x < y);
TVM_TRY_REWRITE(!(x < y), y <= x);
TVM_TRY_REWRITE(!(x > y), x <= y);
TVM_TRY_REWRITE(!(x == y), x != y);
TVM_TRY_REWRITE(!(x != y), x == y);
TVM_TRY_RECURSIVE_REWRITE(!(x || y), (!x) && (!y));
TVM_TRY_RECURSIVE_REWRITE(!(x && y), (!x) || (!y));
return ret;
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const AndNode* op) {
PrimExpr ret = IRMutatorWithAnalyzer::VisitExpr_(op);
op = ret.as<AndNode>();
PrimExpr const_res = TryConstFold<And>(op->a, op->b);
if (const_res.defined()) return const_res;
// Pattern var to match any expression
PVar<PrimExpr> x, y;
// Pattern var match IntImm
PVar<IntImm> c1, c2;
PVar<int> lanes;
if (op->dtype.lanes() != 1) {
TVM_TRY_REWRITE(broadcast(x, lanes) && broadcast(y, lanes), broadcast(x && y, lanes));
}
auto cfalse = PConst<PrimExpr>(make_const(op->dtype, false));
TVM_TRY_REWRITE(x == y && x != y, cfalse);
TVM_TRY_REWRITE(x != y && x == y, cfalse);
TVM_TRY_REWRITE(x && !x, cfalse);
TVM_TRY_REWRITE(x <= y && y < x, cfalse);
TVM_TRY_REWRITE(y < x && x <= y, cfalse);
TVM_TRY_REWRITE_IF(x < c1 && c2 < x, cfalse, c2.Eval()->value + 1 >= c1.Eval()->value);
TVM_TRY_REWRITE_IF(c2 < x && x < c1, cfalse, c2.Eval()->value + 1 >= c1.Eval()->value);
TVM_TRY_REWRITE_IF(x < c1 && c2 <= x, cfalse, c2.Eval()->value >= c1.Eval()->value);
TVM_TRY_REWRITE_IF(c2 <= x && x < c1, cfalse, c2.Eval()->value >= c1.Eval()->value);
TVM_TRY_REWRITE_IF(x <= c1 && c2 < x, cfalse, c2.Eval()->value >= c1.Eval()->value);
TVM_TRY_REWRITE_IF(c2 < x && x <= c1, cfalse, c2.Eval()->value >= c1.Eval()->value);
TVM_TRY_REWRITE_IF(x <= c1 && c2 <= x, cfalse, c2.Eval()->value > c1.Eval()->value);
TVM_TRY_REWRITE_IF(c2 <= x && x <= c1, cfalse, c2.Eval()->value > c1.Eval()->value);
TVM_TRY_REWRITE(x == c1 && x != c2, x == c1 && c1 != c2);
TVM_TRY_REWRITE(x != c2 && x == c1, x == c1 && c1 != c2);
return ret;
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const OrNode* op) {
PrimExpr ret = IRMutatorWithAnalyzer::VisitExpr_(op);
op = ret.as<OrNode>();
PrimExpr const_res = TryConstFold<Or>(op->a, op->b);
if (const_res.defined()) return const_res;
// Pattern var to match any expression
PVar<PrimExpr> x, y;
// Pattern var match IntImm
PVar<IntImm> c1, c2;
PVar<int> lanes;
if (op->dtype.lanes() != 1) {
TVM_TRY_REWRITE(broadcast(x, lanes) || broadcast(y, lanes), broadcast(x || y, lanes));
}
auto ctrue = PConst<PrimExpr>(make_const(op->dtype, true));
TVM_TRY_REWRITE(x == y || x != y, ctrue);
TVM_TRY_REWRITE(x != y || x == y, ctrue);
TVM_TRY_REWRITE(x || !x, ctrue);
TVM_TRY_REWRITE(x <= y || y < x, ctrue);
TVM_TRY_REWRITE(y < x || x <= y, ctrue);
TVM_TRY_REWRITE_IF(x < c1 || c2 < x, ctrue, c2.Eval()->value < c1.Eval()->value);
TVM_TRY_REWRITE_IF(c2 < x || x < c1, ctrue, c2.Eval()->value < c1.Eval()->value);
TVM_TRY_REWRITE_IF(x <= c1 || c2 < x, ctrue, c2.Eval()->value <= c1.Eval()->value);
TVM_TRY_REWRITE_IF(c2 < x || x <= c1, ctrue, c2.Eval()->value <= c1.Eval()->value);
TVM_TRY_REWRITE_IF(x < c1 || c2 <= x, ctrue, c2.Eval()->value <= c1.Eval()->value);
TVM_TRY_REWRITE_IF(c2 <= x || x < c1, ctrue, c2.Eval()->value <= c1.Eval()->value);
TVM_TRY_REWRITE_IF(x <= c1 || c2 <= x, ctrue, c2.Eval()->value <= c1.Eval()->value + 1);
TVM_TRY_REWRITE_IF(c2 <= x || x <= c1, ctrue, c2.Eval()->value <= c1.Eval()->value + 1);
TVM_TRY_REWRITE(x != c1 || x == c2, x != c1 || c1 == c2);
TVM_TRY_REWRITE(x == c2 || x != c1, x != c1 || c1 == c2);
return ret;
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const SelectNode* op) {
PrimExpr ret = IRMutatorWithAnalyzer::VisitExpr_(op);
op = ret.as<SelectNode>();
if (op == nullptr) return ret;
// Pattern var to match any expression
PVar<PrimExpr> x, y;
TVM_TRY_REWRITE(select(x, y, y), y);
return ret;
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const CallNode* op) {
// add condition context to if_then_else
PrimExpr ret = IRMutatorWithAnalyzer::VisitExpr_(op);
op = ret.as<CallNode>();
if (op == nullptr) return ret;
if (op->op.same_as(tir::builtin::likely()) && is_const_int(op->args[0])) {
return op->args[0];
} else if (op->op.same_as(tir::builtin::shift_right())) {
if (op->args[0].as<IntImmNode>() && op->args[1].as<IntImmNode>()) {
// the operator overload will eagerly constant fold.
return op->args[0] >> op->args[1];
}
} else if (op->op.same_as(tir::builtin::shift_left())) {
if (op->args[0].as<IntImmNode>() && op->args[1].as<IntImmNode>()) {
// the operator overload will eagerly constant fold.
return op->args[0] << op->args[1];
}
}
ExprDeepEqual expr_equal;
if (op->op.same_as(tir::builtin::likely())) {
for (const auto& constraint : literal_constraints_) {
// Cases such as for (i, 0, bound) {if (likely(iter_var < bound)) { .. } }
if (expr_equal(constraint, op->args[0])) {
return make_const(op->dtype, true);
}
}
}
return ret;
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const VarNode* op) {
Var var = GetRef<Var>(op);
auto it = var_map_.find(var);
if (it != var_map_.end()) {
return it->second;
}
return GetRef<PrimExpr>(op);
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const CastNode* op) {
PrimExpr ret = IRMutatorWithAnalyzer::VisitExpr_(op);
op = ret.as<CastNode>();
return cast(op->dtype, op->value);
}
bool RewriteSimplifier::Impl::CanInlineLet(const LetNode* op) {
// Only inline trivial bindings to avoid deep expression explosion
// when we need let to construct complicated expressions.
if (is_const_number(op->value)) return true;
if (op->value.as<VarNode>()) return true;
return false;
}
PrimExpr RewriteSimplifier::Impl::VisitExpr_(const LetNode* op) {
PrimExpr value = this->VisitExpr(op->value);
if (CanInlineLet(op)) {
// it is fine to discard the let binding
// because the value will always be inlined in the simplifier.
analyzer_->Bind(op->var, value);
return this->VisitExpr(op->body);
}
PrimExpr body = this->VisitExpr(op->body);
if (value.same_as(op->value) && body.same_as(op->body)) {
return GetRef<PrimExpr>(op);
} else {
return Let(op->var, value, body);
}
}
PrimExpr RewriteSimplifier::operator()(const PrimExpr& expr) {
// Run simplification in post order
PrimExpr res = expr;
int max_iter = 2;
for (int i = 0; i < max_iter; ++i) {
PrimExpr new_expr = impl_->operator()(res);
if (new_expr.same_as(res)) return res;
res = new_expr;
}
return res;
}
void RewriteSimplifier::Update(const Var& var, const PrimExpr& info, bool allow_override) {
impl_->Update(var, info, allow_override);
}
std::function<void()> RewriteSimplifier::EnterConstraint(const PrimExpr& constraint) {
return impl_->EnterConstraint(constraint);
}
RewriteSimplifier::RewriteSimplifier(Analyzer* parent) : impl_(new Impl(parent)) {}
RewriteSimplifier::~RewriteSimplifier() { delete impl_; }
} // namespace arith
} // namespace tvm
|
/*
* ofxSimpleGuiComboBox.cpp
* AlgorhythmicSorting
*
* Created by Administrator on 7/2/10.
* Copyright 2010 __MyCompanyName__. All rights reserved.
*
*/
#include "ofxSimpleGuiComboBox.h"
#include "ofxSimpleGuiPage.h"
#define kMaxChoiceStringLen 150
#define kMaxNameStringLen 100
ofxSimpleGuiComboBox::ofxSimpleGuiComboBox(string name, int &choice_out, int numChoices, ofxSimpleGuiPage *owner, string* choiceTitles ) :
ofxSimpleGuiControl(name),
m_selectedChoice(choice_out),
m_page(owner)
{
m_selectedChoice = m_mouseChoice = 0;
if(numChoices <=1)
numChoices = 1;
m_hasFocus=false;
m_title = name;
for(int i=0; i<numChoices; i++){
addChoice(choiceTitles ? choiceTitles[i] : ofToString(i));
}
controlType = "ComboBox";
setup();
}
ofxSimpleGuiComboBox::~ofxSimpleGuiComboBox() {
}
void ofxSimpleGuiComboBox::setTitleForIndex(int index, string title) {
if(index < 0 || index >= m_choices.size()) return;
m_choices[index] = title;
}
string ofxSimpleGuiComboBox::getTitleForIndex(int index) {
if(index < 0 || index >= m_choices.size())return m_choices.size() ? m_choices[m_selectedChoice] : "No Choices Available";
return m_choices[index];
}
void ofxSimpleGuiComboBox::addChoice(string title, int index) {
int insertIndex = m_choices.size();
if(index >= 0 && index < m_choices.size()) insertIndex = index;
m_choices.insert(m_choices.begin() + insertIndex, title);
}
int ofxSimpleGuiComboBox::getIndexForTitle(string title) {
for(int i=0; i<m_choices.size(); i++) {
if(title.compare(m_choices[i]) == 0) return i;
}
return -1;
}
void ofxSimpleGuiComboBox::removeChoice(string title) {
int index = getIndexForTitle(title);
if(index >= 0) removeChoice(index);
}
void ofxSimpleGuiComboBox::removeChoice(int index) {
int removeIndex = m_choices.size() - 1;
if(index >= 0 && index < m_choices.size())
removeIndex = index;
m_choices.erase(m_choices.begin() + removeIndex);
//also update the selected indexes.
if(m_selectedChoice >= removeIndex)
m_selectedChoice--;
if(m_mouseChoice >= removeIndex)
m_mouseChoice--;
}
void ofxSimpleGuiComboBox::setup() {
setSize(config->gridSize.x - config->padding.x, config->comboBoxHeight);
}
#ifndef OFXMSAGUI_DONT_USE_XML
void ofxSimpleGuiComboBox::loadFromXML(ofxXmlSettings &XML) {
setValue(XML.getValue(controlType + "_" + key + ":value", 0));
}
void ofxSimpleGuiComboBox::saveToXML(ofxXmlSettings &XML) {
XML.addTag(controlType + "_" + key);
XML.pushTag(controlType + "_" + key);
XML.addValue("name", name);
XML.addValue("value", getValue());
XML.popTag();
}
#endif
void ofxSimpleGuiComboBox::keyPressed( int key ) {
}
int ofxSimpleGuiComboBox::getValue() {
return m_selectedChoice;
}
void ofxSimpleGuiComboBox::setValue(int index) {
m_selectedChoice = ofClamp(index, 0, m_choices.size());
}
void ofxSimpleGuiComboBox::setValue(string title) {
setValue(getIndexForTitle(title));
}
//press was outside - handle.
void onPressOutside(int x, int y, int button) {
}
void ofxSimpleGuiComboBox::onPress(int x, int y, int button) {
// beenPressed = true;
m_mouseMovedSinceClick=false;
//a click toggles focus state if we are off
if(!m_hasFocus) {
//expand the height for all choices
// setSize(config->gridSize.x - config->padding.x, config->comboBoxHeight * m_choices.size());
m_hasFocus = true;
//notify that we want to steal all events from the page
m_page->SetEventStealingControl(*this);
} else {
//if we have focus, a click signals that we should lose it
releaseEventStealingFocus();
}
}
void ofxSimpleGuiComboBox::onPressOutside(int x, int y, int button){
if(m_hasFocus)
releaseEventStealingFocus();
}
void ofxSimpleGuiComboBox::onDragOver(int x, int y, int button){
//same behavior as mouse move
onMouseMove(x,y);
}
void ofxSimpleGuiComboBox::onDragOutside(int x, int y, int button){
//same behavior as mouse move
onMouseMove(x,y);
}
bool ofxSimpleGuiComboBox::hitTest(int tx, int ty) {
if(!m_hasFocus)
return ofxMSAInteractiveObject::hitTest(tx,ty);
int fullheight = height + config->comboBoxTextHeight * m_choices.size();
return ((tx > x) && (tx < x + width) && (ty > y) && (ty < y + fullheight));
}
void ofxSimpleGuiComboBox::onMouseMove(int x, int y) {
m_mouseMovedSinceClick=true;
if(m_hasFocus) {
//see which index was selected.
float fChoice = (y - (height - config->comboBoxTextHeight) - (this->y + config->comboBoxTextHeight))/config->comboBoxTextHeight;
//TODO:replace with OF constrain macro.
m_mouseChoice = fChoice < 0?-1:(fChoice>= m_choices.size()? -1:fChoice);
}
}
void ofxSimpleGuiComboBox::onReleaseOutside(int x, int y, int button) {
onRelease(x, y, button);
}
void ofxSimpleGuiComboBox::onRelease(int x, int y, int button) {
if(m_hasFocus && m_mouseMovedSinceClick) {
releaseEventStealingFocus();
}
}
void ofxSimpleGuiComboBox::releaseEventStealingFocus(){
//see which index was selected, but only if the user actually moved around.
m_selectedChoice = m_mouseChoice >= 0? m_mouseChoice : m_selectedChoice;
//a release toggles focus state if we are on - TODO: unless x and y don't change
m_hasFocus = false;
// setSize(config->gridSize.x - config->padding.x, config->comboBoxHeight);
//also let the page know we don't need to steal all the events and draw over anymore
m_page->ReleaseEventStealingControl();
}
//special overloads - this is a hack - later think about making ofxSimpleGuiControl's methods virtual.
void ofxSimpleGuiComboBox::setCBTextColor() {
if(m_hasFocus) ofSetHexColor(config->textOverColor);
else ofSetHexColor(config->textColor);
}
void ofxSimpleGuiComboBox::setCBTextBGColor() {
if(m_hasFocus) ofSetHexColor(config->textBGOverColor);
else ofSetHexColor(config->textBGColor);
}
#define kSGCBTriangleWidth 10
#define KSGCBTrianglePadding 5
#define kSGCBTextPaddingX 3
#define kSGCBTextPaddingY 15
void ofxSimpleGuiComboBox::draw(float x, float y) {
//we assume a max of 256 characters.
char choiceBuf[256];
setPosition(x, y);
glPushMatrix();
glTranslatef(x, y, 0);
ofEnableAlphaBlending();
ofFill();
setTextBGColor();
ofRect(0, 0, width, height);
setTextColor();
// sprintf(choiceBuf, "%s: %s", m_title, m_choices.size() ? m_choices[m_selectedChoice] : "(No Choices Available)");
ofDrawBitmapString(m_title + "\n" + (m_choices.size() ? m_choices[m_selectedChoice] : "N/A"), kSGCBTextPaddingX, kSGCBTextPaddingY);
//draw a combobox down triangle icon so the users know to click
ofTriangle(width - (kSGCBTriangleWidth + KSGCBTrianglePadding), kSGCBTextPaddingY/2,
width - (KSGCBTrianglePadding), kSGCBTextPaddingY/2,
width - (kSGCBTriangleWidth/2 + KSGCBTrianglePadding), kSGCBTextPaddingY);
if(m_hasFocus) {
setCBTextBGColor();
ofRect(0, height, width, config->comboBoxTextHeight * m_choices.size());
setTextColor();
ofLine(0, config->comboBoxHeight-1, width, config->comboBoxHeight-1);
for(int i=0; i < m_choices.size(); i++) {
setCBTextColor();
//invert for selected choice
float curY = height + i*config->comboBoxTextHeight;
if(i==m_mouseChoice){
//draw a text colored rect so we can see the inverse
ofRect(0, curY, width, config->comboBoxTextHeight);
setCBTextBGColor();
}
ofDrawBitmapString(m_choices[i], kSGCBTextPaddingX, curY + kSGCBTextPaddingY);
}
}
ofDisableAlphaBlending();
glPopMatrix();
}
|
/*
The MIT License (MIT)
Copyright (c) 2017 Lancaster University.
Copyright (c) 2018 Paul ADAM, Europe.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*/
#include "Gyroscope.h"
#include "ErrorNo.h"
#include "Event.h"
#include "CodalCompat.h"
#include "CodalFiber.h"
using namespace codal;
/**
* Constructor.
* Create a software abstraction of an FXSO8700 combined accelerometer/magnetometer
*
* @param _i2c an instance of I2C used to communicate with the device.
*
* @param address the default I2C address of the accelerometer. Defaults to: FXS8700_DEFAULT_ADDR.
*
*/
Gyroscope::Gyroscope(CoordinateSpace &cspace, uint16_t id) : sample(), sampleENU(), coordinateSpace(cspace)
{
// Store our identifiers.
this->id = id;
this->status = 0;
// Set a default rate of 50Hz and a +/-2g range.
this->samplePeriod = 20;
this->sampleRange = 2;
}
/**
* Stores data from the accelerometer sensor in our buffer, and perform gesture tracking.
*
* On first use, this member function will attempt to add this component to the
* list of fiber components in order to constantly update the values stored
* by this object.
*
* This lazy instantiation means that we do not
* obtain the overhead from non-chalantly adding this component to fiber components.
*
* @return DEVICE_OK on success, DEVICE_I2C_ERROR if the read request fails.
*/
int Gyroscope::update(Sample3D s)
{
// Store the new data, after performing any necessary coordinate transformations.
sampleENU = s;
sample = coordinateSpace.transform(s);
// Indicate that pitch and roll data is now stale, and needs to be recalculated if needed.
status &= ~GYROSCOPE_IMU_DATA_VALID;
// Indicate that a new sample is available
Event e(id, GYROSCOPE_EVT_DATA_UPDATE);
return DEVICE_OK;
};
/**
* A service function.
* It calculates the current scalar acceleration of the device (x^2 + y^2 + z^2).
* It does not, however, square root the result, as this is a relatively high cost operation.
*
* This is left to application code should it be needed.
*
* @return the sum of the square of the acceleration of the device across all axes.
*/
uint32_t Gyroscope::instantaneousAccelerationSquared()
{
requestUpdate();
// Use pythagoras theorem to determine the combined force acting on the device.
return (uint32_t)sample.x*(uint32_t)sample.x + (uint32_t)sample.y*(uint32_t)sample.y + (uint32_t)sample.z*(uint32_t)sample.z;
}
/**
* Attempts to set the sample rate of the accelerometer to the specified value (in ms).
*
* @param period the requested time between samples, in milliseconds.
*
* @return DEVICE_OK on success, DEVICE_I2C_ERROR is the request fails.
*
* @code
* // sample rate is now 20 ms.
* accelerometer.setPeriod(20);
* @endcode
*
* @note The requested rate may not be possible on the hardware. In this case, the
* nearest lower rate is chosen.
*/
int Gyroscope::setPeriod(int period)
{
int result;
samplePeriod = period;
result = configure();
samplePeriod = getPeriod();
return result;
}
/**
* Reads the currently configured sample rate of the accelerometer.
*
* @return The time between samples, in milliseconds.
*/
int Gyroscope::getPeriod()
{
return (int)samplePeriod;
}
/**
* Attempts to set the sample range of the accelerometer to the specified value (in g).
*
* @param range The requested sample range of samples, in g.
*
* @return DEVICE_OK on success, DEVICE_I2C_ERROR is the request fails.
*
* @code
* // the sample range of the accelerometer is now 8G.
* accelerometer.setRange(8);
* @endcode
*
* @note The requested range may not be possible on the hardware. In this case, the
* nearest lower range is chosen.
*/
int Gyroscope::setRange(int range)
{
int result;
sampleRange = range;
result = configure();
sampleRange = getRange();
return result;
}
/**
* Reads the currently configured sample range of the accelerometer.
*
* @return The sample range, in g.
*/
int Gyroscope::getRange()
{
return (int)sampleRange;
}
/**
* Configures the accelerometer for G range and sample rate defined
* in this object. The nearest values are chosen to those defined
* that are supported by the hardware. The instance variables are then
* updated to reflect reality.
*
* @return DEVICE_OK on success, DEVICE_I2C_ERROR if the accelerometer could not be configured.
*
* @note This method should be overidden by the hardware driver to implement the requested
* changes in hardware.
*/
int Gyroscope::configure()
{
return DEVICE_NOT_SUPPORTED;
}
/**
* Poll to see if new data is available from the hardware. If so, update it.
* n.b. it is not necessary to explicitly call this function to update data
* (it normally happens in the background when the scheduler is idle), but a check is performed
* if the user explicitly requests up to date data.
*
* @return DEVICE_OK on success, DEVICE_I2C_ERROR if the update fails.
*
* @note This method should be overidden by the hardware driver to implement the requested
* changes in hardware.
*/
int Gyroscope::requestUpdate()
{
return DEVICE_NOT_SUPPORTED;
}
/**
* Reads the last accelerometer value stored, and provides it in the coordinate system requested.
*
* @param coordinateSpace The coordinate system to use.
* @return The force measured in each axis, in milli-g.
*/
Sample3D Gyroscope::getSample(CoordinateSystem coordinateSystem)
{
requestUpdate();
return coordinateSpace.transform(sampleENU, coordinateSystem);
}
/**
* Reads the last accelerometer value stored, and in the coordinate system defined in the constructor.
* @return The force measured in each axis, in milli-g.
*/
Sample3D Gyroscope::getSample()
{
requestUpdate();
return sample;
}
/**
* reads the value of the x axis from the latest update retrieved from the accelerometer,
* usingthe default coordinate system as specified in the constructor.
*
* @return the force measured in the x axis, in milli-g.
*/
int Gyroscope::getX()
{
requestUpdate();
return sample.x;
}
/**
* reads the value of the y axis from the latest update retrieved from the accelerometer,
* usingthe default coordinate system as specified in the constructor.
*
* @return the force measured in the y axis, in milli-g.
*/
int Gyroscope::getY()
{
requestUpdate();
return sample.y;
}
/**
* reads the value of the z axis from the latest update retrieved from the accelerometer,
* usingthe default coordinate system as specified in the constructor.
*
* @return the force measured in the z axis, in milli-g.
*/
int Gyroscope::getZ()
{
requestUpdate();
return sample.z;
}
/**
* Destructor for FXS8700, where we deregister from the array of fiber components.
*/
Gyroscope::~Gyroscope()
{
}
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author @shugeo
//
#include <op_boilerplate.h>
#if NOT_EXCLUDED(OP_weighted_cross_entropy_with_logits)
#include <ops/declarable/headers/parity_ops.h>
#include <ops/declarable/helpers/legacy_helpers.h>
namespace nd4j {
namespace ops {
OP_IMPL(weighted_cross_entropy_with_logits, 3, 1, true) {
auto targets = INPUT_VARIABLE(0);
auto input = INPUT_VARIABLE(1);
auto weights = INPUT_VARIABLE(2);
auto output = OUTPUT_VARIABLE(0);
REQUIRE_TRUE(targets->isSameShape(input), 0, "WEIGHTED_CROSS_ENTROPY_WITH_LOGITS op: The shape of both input params should be equal, but got input_shape=%s and targets_shape=%s !", ShapeUtils::shapeAsString(input).c_str(), ShapeUtils::shapeAsString(targets).c_str());
REQUIRE_TRUE(weights->isScalar() || targets->sizeAt(-1) == weights->lengthOf(), 0, "WEIGHTED_CROSS_ENTROPY_WITH_LOGITS op: The weights should be scalar or vector with length equal to size of last targets dimension, but got weights_shape=%s instead!", ShapeUtils::shapeAsString(weights).c_str());
helpers::weightedCrossEntropyWithLogitsFunctor(block.launchContext(), targets, input, weights, output);
return Status::OK();
}
DECLARE_TYPES(weighted_cross_entropy_with_logits) {
getOpDescriptor()
->setAllowedInputTypes(nd4j::DataType::ANY)
->setAllowedOutputTypes({ALL_FLOATS});
}
}
}
#endif
|
/////////////////////////////////////////////////////////////////////////////
// Name: src/common/sckipc.cpp
// Purpose: Interprocess communication implementation (wxSocket version)
// Author: Julian Smart
// Modified by: Guilhem Lavaux (big rewrite) May 1997, 1998
// Guillermo Rodriguez (updated for wxSocket v2) Jan 2000
// (callbacks deprecated) Mar 2000
// Vadim Zeitlin (added support for Unix sockets) Apr 2002
// (use buffering, many fixes/cleanup) Oct 2008
// Created: 1993
// RCS-ID: $Id$
// Copyright: (c) Julian Smart 1993
// (c) Guilhem Lavaux 1997, 1998
// (c) 2000 Guillermo Rodriguez <guille@iies.es>
// Licence: wxWindows licence
/////////////////////////////////////////////////////////////////////////////
// ==========================================================================
// declarations
// ==========================================================================
// --------------------------------------------------------------------------
// headers
// --------------------------------------------------------------------------
// For compilers that support precompilation, includes "wx.h".
#include "wx/wxprec.h"
#ifdef __BORLANDC__
#pragma hdrstop
#endif
#if wxUSE_SOCKETS && wxUSE_IPC && wxUSE_STREAMS
#include "wx/sckipc.h"
#ifndef WX_PRECOMP
#include "wx/log.h"
#include "wx/event.h"
#include "wx/module.h"
#endif
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include "wx/socket.h"
// --------------------------------------------------------------------------
// macros and constants
// --------------------------------------------------------------------------
namespace
{
// Message codes (don't change them to avoid breaking the existing code using
// wxIPC protocol!)
enum IPCCode
{
IPC_EXECUTE = 1,
IPC_REQUEST = 2,
IPC_POKE = 3,
IPC_ADVISE_START = 4,
IPC_ADVISE_REQUEST = 5,
IPC_ADVISE = 6,
IPC_ADVISE_STOP = 7,
IPC_REQUEST_REPLY = 8,
IPC_FAIL = 9,
IPC_CONNECT = 10,
IPC_DISCONNECT = 11,
IPC_MAX
};
} // anonymous namespace
// headers needed for umask()
#ifdef __UNIX_LIKE__
#include <sys/types.h>
#include <sys/stat.h>
#endif // __UNIX_LIKE__
// ----------------------------------------------------------------------------
// private functions
// ----------------------------------------------------------------------------
// get the address object for the given server name, the caller must delete it
static wxSockAddress *
GetAddressFromName(const wxString& serverName,
const wxString& host = wxEmptyString)
{
// we always use INET sockets under non-Unix systems
#if defined(__UNIX__) && !defined(__WINDOWS__) && !defined(__WINE__)
// under Unix, if the server name looks like a path, create a AF_UNIX
// socket instead of AF_INET one
if ( serverName.Find(wxT('/')) != wxNOT_FOUND )
{
wxUNIXaddress *addr = new wxUNIXaddress;
addr->Filename(serverName);
return addr;
}
#endif // Unix/!Unix
{
wxIPV4address *addr = new wxIPV4address;
addr->Service(serverName);
if ( !host.empty() )
{
addr->Hostname(host);
}
return addr;
}
}
// --------------------------------------------------------------------------
// wxTCPEventHandler stuff (private class)
// --------------------------------------------------------------------------
class wxTCPEventHandler : public wxEvtHandler
{
public:
wxTCPEventHandler() : wxEvtHandler() { }
void Client_OnRequest(wxSocketEvent& event);
void Server_OnRequest(wxSocketEvent& event);
private:
void HandleDisconnect(wxTCPConnection *connection);
DECLARE_EVENT_TABLE()
wxDECLARE_NO_COPY_CLASS(wxTCPEventHandler);
};
enum
{
_CLIENT_ONREQUEST_ID = 1000,
_SERVER_ONREQUEST_ID
};
// --------------------------------------------------------------------------
// wxTCPEventHandlerModule (private class)
// --------------------------------------------------------------------------
class wxTCPEventHandlerModule : public wxModule
{
public:
wxTCPEventHandlerModule() : wxModule() { }
// get the global wxTCPEventHandler creating it if necessary
static wxTCPEventHandler& GetHandler()
{
if ( !ms_handler )
ms_handler = new wxTCPEventHandler;
return *ms_handler;
}
// as ms_handler is initialized on demand, don't do anything in OnInit()
virtual bool OnInit() { return true; }
virtual void OnExit() { wxDELETE(ms_handler); }
private:
static wxTCPEventHandler *ms_handler;
DECLARE_DYNAMIC_CLASS(wxTCPEventHandlerModule)
wxDECLARE_NO_COPY_CLASS(wxTCPEventHandlerModule);
};
IMPLEMENT_DYNAMIC_CLASS(wxTCPEventHandlerModule, wxModule)
wxTCPEventHandler *wxTCPEventHandlerModule::ms_handler = NULL;
// --------------------------------------------------------------------------
// wxIPCSocketStreams
// --------------------------------------------------------------------------
#define USE_BUFFER
// this class contains the various (related) streams used by wxTCPConnection
// and also provides a way to read from the socket stream directly
//
// for writing to the stream use the IPCOutput class below
class wxIPCSocketStreams
{
public:
// ctor initializes all the streams on top of the given socket
//
// note that we use a bigger than default buffer size which matches the
// typical Ethernet MTU (minus TCP header overhead)
wxIPCSocketStreams(wxSocketBase& sock)
: m_socketStream(sock),
#ifdef USE_BUFFER
m_bufferedOut(m_socketStream, 1448),
#else
m_bufferedOut(m_socketStream),
#endif
m_dataIn(m_socketStream),
m_dataOut(m_bufferedOut)
{
}
// expose the IO methods needed by IPC code (notice that writing is only
// done via IPCOutput)
// flush output
void Flush()
{
#ifdef USE_BUFFER
m_bufferedOut.Sync();
#endif
}
// simple wrappers around the functions with the same name in
// wxDataInputStream
wxUint8 Read8()
{
Flush();
return m_dataIn.Read8();
}
wxUint32 Read32()
{
Flush();
return m_dataIn.Read32();
}
wxString ReadString()
{
Flush();
return m_dataIn.ReadString();
}
// read arbitrary (size-prepended) data
//
// connection parameter is needed to call its GetBufferAtLeast() method
void *ReadData(wxConnectionBase *conn, size_t *size)
{
Flush();
wxCHECK_MSG( conn, NULL, "NULL connection parameter" );
wxCHECK_MSG( size, NULL, "NULL size parameter" );
*size = Read32();
void * const data = conn->GetBufferAtLeast(*size);
wxCHECK_MSG( data, NULL, "IPC buffer allocation failed" );
m_socketStream.Read(data, *size);
return data;
}
// same as above but for data preceded by the format
void *
ReadFormatData(wxConnectionBase *conn, wxIPCFormat *format, size_t *size)
{
wxCHECK_MSG( format, NULL, "NULL format parameter" );
*format = static_cast<wxIPCFormat>(Read8());
return ReadData(conn, size);
}
// these methods are only used by IPCOutput and not directly
wxDataOutputStream& GetDataOut() { return m_dataOut; }
wxOutputStream& GetUnformattedOut() { return m_bufferedOut; }
private:
// this is the low-level underlying stream using the connection socket
wxSocketStream m_socketStream;
// the buffered stream is used to avoid writing all pieces of an IPC
// request to the socket one by one but to instead do it all at once when
// we're done with it
#ifdef USE_BUFFER
wxBufferedOutputStream m_bufferedOut;
#else
wxOutputStream& m_bufferedOut;
#endif
// finally the data streams are used to be able to write typed data into
// the above streams easily
wxDataInputStream m_dataIn;
wxDataOutputStream m_dataOut;
wxDECLARE_NO_COPY_CLASS(wxIPCSocketStreams);
};
namespace
{
// an object of this class should be instantiated on the stack to write to the
// underlying socket stream
//
// this class is intentionally separated from wxIPCSocketStreams to ensure that
// Flush() is always called
class IPCOutput
{
public:
// construct an object associated with the given streams (which must have
// life time greater than ours as we keep a reference to it)
IPCOutput(wxIPCSocketStreams *streams)
: m_streams(*streams)
{
wxASSERT_MSG( streams, "NULL streams pointer" );
}
// dtor calls Flush() really sending the IPC data to the network
~IPCOutput() { m_streams.Flush(); }
// write a byte
void Write8(wxUint8 i)
{
m_streams.GetDataOut().Write8(i);
}
// write the reply code and a string
void Write(IPCCode code, const wxString& str)
{
Write8(code);
m_streams.GetDataOut().WriteString(str);
}
// write the reply code, a string and a format in this order
void Write(IPCCode code, const wxString& str, wxIPCFormat format)
{
Write(code, str);
Write8(format);
}
// write arbitrary data
void WriteData(const void *data, size_t size)
{
m_streams.GetDataOut().Write32(size);
m_streams.GetUnformattedOut().Write(data, size);
}
private:
wxIPCSocketStreams& m_streams;
wxDECLARE_NO_COPY_CLASS(IPCOutput);
};
} // anonymous namespace
// ==========================================================================
// implementation
// ==========================================================================
IMPLEMENT_DYNAMIC_CLASS(wxTCPServer, wxServerBase)
IMPLEMENT_DYNAMIC_CLASS(wxTCPClient, wxClientBase)
IMPLEMENT_CLASS(wxTCPConnection, wxConnectionBase)
// --------------------------------------------------------------------------
// wxTCPClient
// --------------------------------------------------------------------------
wxTCPClient::wxTCPClient()
: wxClientBase()
{
}
bool wxTCPClient::ValidHost(const wxString& host)
{
wxIPV4address addr;
return addr.Hostname(host);
}
wxConnectionBase *wxTCPClient::MakeConnection(const wxString& host,
const wxString& serverName,
const wxString& topic)
{
wxSockAddress *addr = GetAddressFromName(serverName, host);
if ( !addr )
return NULL;
wxSocketClient * const client = new wxSocketClient(wxSOCKET_WAITALL);
wxIPCSocketStreams * const streams = new wxIPCSocketStreams(*client);
bool ok = client->Connect(*addr);
delete addr;
if ( ok )
{
// Send topic name, and enquire whether this has succeeded
IPCOutput(streams).Write(IPC_CONNECT, topic);
unsigned char msg = streams->Read8();
// OK! Confirmation.
if (msg == IPC_CONNECT)
{
wxTCPConnection *
connection = (wxTCPConnection *)OnMakeConnection ();
if (connection)
{
if (connection->IsKindOf(CLASSINFO(wxTCPConnection)))
{
connection->m_topic = topic;
connection->m_sock = client;
connection->m_streams = streams;
client->SetEventHandler(wxTCPEventHandlerModule::GetHandler(),
_CLIENT_ONREQUEST_ID);
client->SetClientData(connection);
client->SetNotify(wxSOCKET_INPUT_FLAG | wxSOCKET_LOST_FLAG);
client->Notify(true);
return connection;
}
else
{
delete connection;
// and fall through to delete everything else
}
}
}
}
// Something went wrong, delete everything
delete streams;
client->Destroy();
return NULL;
}
wxConnectionBase *wxTCPClient::OnMakeConnection()
{
return new wxTCPConnection();
}
// --------------------------------------------------------------------------
// wxTCPServer
// --------------------------------------------------------------------------
wxTCPServer::wxTCPServer()
: wxServerBase()
{
m_server = NULL;
}
bool wxTCPServer::Create(const wxString& serverName)
{
// Destroy previous server, if any
if (m_server)
{
m_server->SetClientData(NULL);
m_server->Destroy();
m_server = NULL;
}
wxSockAddress *addr = GetAddressFromName(serverName);
if ( !addr )
return false;
#ifdef __UNIX_LIKE__
mode_t umaskOld;
if ( addr->Type() == wxSockAddress::UNIX )
{
// ensure that the file doesn't exist as otherwise calling socket()
// would fail
int rc = remove(serverName.fn_str());
if ( rc < 0 && errno != ENOENT )
{
delete addr;
return false;
}
// also set the umask to prevent the others from reading our file
umaskOld = umask(077);
}
else
{
// unused anyhow but shut down the compiler warnings
umaskOld = 0;
}
#endif // __UNIX_LIKE__
// Create a socket listening on the specified port (reusing it to allow
// restarting the server listening on the same port as was used by the
// previous instance of this server)
m_server = new wxSocketServer(*addr, wxSOCKET_WAITALL | wxSOCKET_REUSEADDR);
#ifdef __UNIX_LIKE__
if ( addr->Type() == wxSockAddress::UNIX )
{
// restore the umask
umask(umaskOld);
// save the file name to remove it later
m_filename = serverName;
}
#endif // __UNIX_LIKE__
delete addr;
if (!m_server->Ok())
{
m_server->Destroy();
m_server = NULL;
return false;
}
m_server->SetEventHandler(wxTCPEventHandlerModule::GetHandler(),
_SERVER_ONREQUEST_ID);
m_server->SetClientData(this);
m_server->SetNotify(wxSOCKET_CONNECTION_FLAG);
m_server->Notify(true);
return true;
}
wxTCPServer::~wxTCPServer()
{
if ( m_server )
{
m_server->SetClientData(NULL);
m_server->Destroy();
}
#ifdef __UNIX_LIKE__
if ( !m_filename.empty() )
{
if ( remove(m_filename.fn_str()) != 0 )
{
wxLogDebug(wxT("Stale AF_UNIX file '%s' left."), m_filename.c_str());
}
}
#endif // __UNIX_LIKE__
}
wxConnectionBase *
wxTCPServer::OnAcceptConnection(const wxString& WXUNUSED(topic))
{
return new wxTCPConnection();
}
// --------------------------------------------------------------------------
// wxTCPConnection
// --------------------------------------------------------------------------
void wxTCPConnection::Init()
{
m_sock = NULL;
m_streams = NULL;
}
wxTCPConnection::~wxTCPConnection()
{
Disconnect();
if ( m_sock )
{
m_sock->SetClientData(NULL);
m_sock->Destroy();
}
delete m_streams;
}
void wxTCPConnection::Compress(bool WXUNUSED(on))
{
// TODO
}
// Calls that CLIENT can make.
bool wxTCPConnection::Disconnect()
{
if ( !GetConnected() )
return true;
// Send the disconnect message to the peer.
IPCOutput(m_streams).Write8(IPC_DISCONNECT);
if ( m_sock )
{
m_sock->Notify(false);
m_sock->Close();
}
SetConnected(false);
return true;
}
bool wxTCPConnection::DoExecute(const void *data,
size_t size,
wxIPCFormat format)
{
if ( !m_sock->IsConnected() )
return false;
// Prepare EXECUTE message
IPCOutput out(m_streams);
out.Write8(IPC_EXECUTE);
out.Write8(format);
out.WriteData(data, size);
return true;
}
const void *wxTCPConnection::Request(const wxString& item,
size_t *size,
wxIPCFormat format)
{
if ( !m_sock->IsConnected() )
return NULL;
IPCOutput(m_streams).Write(IPC_REQUEST, item, format);
const int ret = m_streams->Read8();
if ( ret != IPC_REQUEST_REPLY )
return NULL;
// ReadData() needs a non-NULL size pointer but the client code can call us
// with NULL pointer (this makes sense if it knows that it always works
// with NUL-terminated strings)
size_t sizeFallback;
return m_streams->ReadData(this, size ? size : &sizeFallback);
}
bool wxTCPConnection::DoPoke(const wxString& item,
const void *data,
size_t size,
wxIPCFormat format)
{
if ( !m_sock->IsConnected() )
return false;
IPCOutput out(m_streams);
out.Write(IPC_POKE, item, format);
out.WriteData(data, size);
return true;
}
bool wxTCPConnection::StartAdvise(const wxString& item)
{
if ( !m_sock->IsConnected() )
return false;
IPCOutput(m_streams).Write(IPC_ADVISE_START, item);
const int ret = m_streams->Read8();
return ret == IPC_ADVISE_START;
}
bool wxTCPConnection::StopAdvise (const wxString& item)
{
if ( !m_sock->IsConnected() )
return false;
IPCOutput(m_streams).Write(IPC_ADVISE_STOP, item);
const int ret = m_streams->Read8();
return ret == IPC_ADVISE_STOP;
}
// Calls that SERVER can make
bool wxTCPConnection::DoAdvise(const wxString& item,
const void *data,
size_t size,
wxIPCFormat format)
{
if ( !m_sock->IsConnected() )
return false;
IPCOutput out(m_streams);
out.Write(IPC_ADVISE, item, format);
out.WriteData(data, size);
return true;
}
// --------------------------------------------------------------------------
// wxTCPEventHandler (private class)
// --------------------------------------------------------------------------
BEGIN_EVENT_TABLE(wxTCPEventHandler, wxEvtHandler)
EVT_SOCKET(_CLIENT_ONREQUEST_ID, wxTCPEventHandler::Client_OnRequest)
EVT_SOCKET(_SERVER_ONREQUEST_ID, wxTCPEventHandler::Server_OnRequest)
END_EVENT_TABLE()
void wxTCPEventHandler::HandleDisconnect(wxTCPConnection *connection)
{
// connection was closed (either gracefully or not): destroy everything
connection->m_sock->Notify(false);
connection->m_sock->Close();
// don't leave references to this soon-to-be-dangling connection in the
// socket as it won't be destroyed immediately as its destruction will be
// delayed in case there are more events pending for it
connection->m_sock->SetClientData(NULL);
connection->SetConnected(false);
connection->OnDisconnect();
}
void wxTCPEventHandler::Client_OnRequest(wxSocketEvent &event)
{
wxSocketBase *sock = event.GetSocket();
if (!sock)
return;
wxSocketNotify evt = event.GetSocketEvent();
wxTCPConnection * const
connection = static_cast<wxTCPConnection *>(sock->GetClientData());
// This socket is being deleted; skip this event
if (!connection)
return;
if ( evt == wxSOCKET_LOST )
{
HandleDisconnect(connection);
return;
}
// Receive message number.
wxIPCSocketStreams * const streams = connection->m_streams;
const wxString topic = connection->m_topic;
wxString item;
bool error = false;
const int msg = streams->Read8();
switch ( msg )
{
case IPC_EXECUTE:
{
wxIPCFormat format;
size_t size wxDUMMY_INITIALIZE(0);
void * const
data = streams->ReadFormatData(connection, &format, &size);
if ( data )
connection->OnExecute(topic, data, size, format);
else
error = true;
}
break;
case IPC_ADVISE:
{
item = streams->ReadString();
wxIPCFormat format;
size_t size wxDUMMY_INITIALIZE(0);
void * const
data = streams->ReadFormatData(connection, &format, &size);
if ( data )
connection->OnAdvise(topic, item, data, size, format);
else
error = true;
}
break;
case IPC_ADVISE_START:
{
item = streams->ReadString();
IPCOutput(streams).Write8(connection->OnStartAdvise(topic, item)
? IPC_ADVISE_START
: IPC_FAIL);
}
break;
case IPC_ADVISE_STOP:
{
item = streams->ReadString();
IPCOutput(streams).Write8(connection->OnStopAdvise(topic, item)
? IPC_ADVISE_STOP
: IPC_FAIL);
}
break;
case IPC_POKE:
{
item = streams->ReadString();
wxIPCFormat format = (wxIPCFormat)streams->Read8();
size_t size wxDUMMY_INITIALIZE(0);
void * const data = streams->ReadData(connection, &size);
if ( data )
connection->OnPoke(topic, item, data, size, format);
else
error = true;
}
break;
case IPC_REQUEST:
{
item = streams->ReadString();
wxIPCFormat format = (wxIPCFormat)streams->Read8();
size_t user_size = wxNO_LEN;
const void *user_data = connection->OnRequest(topic,
item,
&user_size,
format);
if ( !user_data )
{
IPCOutput(streams).Write8(IPC_FAIL);
break;
}
IPCOutput out(streams);
out.Write8(IPC_REQUEST_REPLY);
if ( user_size == wxNO_LEN )
{
switch ( format )
{
case wxIPC_TEXT:
case wxIPC_UTF8TEXT:
user_size = strlen((const char *)user_data) + 1; // includes final NUL
break;
case wxIPC_UNICODETEXT:
user_size = (wcslen((const wchar_t *)user_data) + 1) * sizeof(wchar_t); // includes final NUL
break;
default:
user_size = 0;
}
}
out.WriteData(user_data, user_size);
}
break;
case IPC_DISCONNECT:
HandleDisconnect(connection);
break;
case IPC_FAIL:
wxLogDebug("Unexpected IPC_FAIL received");
error = true;
break;
default:
wxLogDebug("Unknown message code %d received.", msg);
error = true;
break;
}
if ( error )
IPCOutput(streams).Write8(IPC_FAIL);
}
void wxTCPEventHandler::Server_OnRequest(wxSocketEvent &event)
{
wxSocketServer *server = (wxSocketServer *) event.GetSocket();
if (!server)
return;
wxTCPServer *ipcserv = (wxTCPServer *) server->GetClientData();
// This socket is being deleted; skip this event
if (!ipcserv)
return;
if (event.GetSocketEvent() != wxSOCKET_CONNECTION)
return;
// Accept the connection, getting a new socket
wxSocketBase *sock = server->Accept();
if (!sock)
return;
if (!sock->Ok())
{
sock->Destroy();
return;
}
wxIPCSocketStreams *streams = new wxIPCSocketStreams(*sock);
{
IPCOutput out(streams);
const int msg = streams->Read8();
if ( msg == IPC_CONNECT )
{
const wxString topic = streams->ReadString();
wxTCPConnection *new_connection =
(wxTCPConnection *)ipcserv->OnAcceptConnection (topic);
if (new_connection)
{
if (new_connection->IsKindOf(CLASSINFO(wxTCPConnection)))
{
// Acknowledge success
out.Write8(IPC_CONNECT);
new_connection->m_sock = sock;
new_connection->m_streams = streams;
new_connection->m_topic = topic;
sock->SetEventHandler(wxTCPEventHandlerModule::GetHandler(),
_CLIENT_ONREQUEST_ID);
sock->SetClientData(new_connection);
sock->SetNotify(wxSOCKET_INPUT_FLAG | wxSOCKET_LOST_FLAG);
sock->Notify(true);
return;
}
else
{
delete new_connection;
// and fall through to delete everything else
}
}
}
// Something went wrong, send failure message and delete everything
out.Write8(IPC_FAIL);
} // IPCOutput object is destroyed here, before destroying stream
delete streams;
sock->Destroy();
}
#endif // wxUSE_SOCKETS && wxUSE_IPC && wxUSE_STREAMS
|
// RUN: clang-cc -fsyntax-only -verify %s
int *use_new(int N) {
return new int [N];
}
int std = 17;
|
/* WARNING: This is auto-generated file. Do not modify, since changes will
* be lost! Modify the generating script instead.
*/
const char* getPipelineCacheHeaderVersionName (VkPipelineCacheHeaderVersion value);
const char* getResultName (VkResult value);
const char* getStructureTypeName (VkStructureType value);
const char* getSystemAllocationScopeName (VkSystemAllocationScope value);
const char* getInternalAllocationTypeName (VkInternalAllocationType value);
const char* getFormatName (VkFormat value);
const char* getImageTypeName (VkImageType value);
const char* getImageTilingName (VkImageTiling value);
const char* getPhysicalDeviceTypeName (VkPhysicalDeviceType value);
const char* getQueryTypeName (VkQueryType value);
const char* getSharingModeName (VkSharingMode value);
const char* getImageLayoutName (VkImageLayout value);
const char* getImageViewTypeName (VkImageViewType value);
const char* getComponentSwizzleName (VkComponentSwizzle value);
const char* getVertexInputRateName (VkVertexInputRate value);
const char* getPrimitiveTopologyName (VkPrimitiveTopology value);
const char* getPolygonModeName (VkPolygonMode value);
const char* getFrontFaceName (VkFrontFace value);
const char* getCompareOpName (VkCompareOp value);
const char* getStencilOpName (VkStencilOp value);
const char* getLogicOpName (VkLogicOp value);
const char* getBlendFactorName (VkBlendFactor value);
const char* getBlendOpName (VkBlendOp value);
const char* getDynamicStateName (VkDynamicState value);
const char* getFilterName (VkFilter value);
const char* getSamplerMipmapModeName (VkSamplerMipmapMode value);
const char* getSamplerAddressModeName (VkSamplerAddressMode value);
const char* getBorderColorName (VkBorderColor value);
const char* getDescriptorTypeName (VkDescriptorType value);
const char* getAttachmentLoadOpName (VkAttachmentLoadOp value);
const char* getAttachmentStoreOpName (VkAttachmentStoreOp value);
const char* getPipelineBindPointName (VkPipelineBindPoint value);
const char* getCommandBufferLevelName (VkCommandBufferLevel value);
const char* getIndexTypeName (VkIndexType value);
const char* getSubpassContentsName (VkSubpassContents value);
const char* getColorSpaceKHRName (VkColorSpaceKHR value);
const char* getPresentModeKHRName (VkPresentModeKHR value);
const char* getDebugReportObjectTypeEXTName (VkDebugReportObjectTypeEXT value);
const char* getDebugReportErrorEXTName (VkDebugReportErrorEXT value);
const char* getRasterizationOrderAMDName (VkRasterizationOrderAMD value);
const char* getValidationCheckEXTName (VkValidationCheckEXT value);
const char* getIndirectCommandsTokenTypeNVXName (VkIndirectCommandsTokenTypeNVX value);
const char* getObjectEntryTypeNVXName (VkObjectEntryTypeNVX value);
const char* getDescriptorUpdateTemplateTypeKHRName (VkDescriptorUpdateTemplateTypeKHR value);
inline tcu::Format::Enum<VkPipelineCacheHeaderVersion> getPipelineCacheHeaderVersionStr (VkPipelineCacheHeaderVersion value) { return tcu::Format::Enum<VkPipelineCacheHeaderVersion>(getPipelineCacheHeaderVersionName, value); }
inline tcu::Format::Enum<VkResult> getResultStr (VkResult value) { return tcu::Format::Enum<VkResult>(getResultName, value); }
inline tcu::Format::Enum<VkStructureType> getStructureTypeStr (VkStructureType value) { return tcu::Format::Enum<VkStructureType>(getStructureTypeName, value); }
inline tcu::Format::Enum<VkSystemAllocationScope> getSystemAllocationScopeStr (VkSystemAllocationScope value) { return tcu::Format::Enum<VkSystemAllocationScope>(getSystemAllocationScopeName, value); }
inline tcu::Format::Enum<VkInternalAllocationType> getInternalAllocationTypeStr (VkInternalAllocationType value) { return tcu::Format::Enum<VkInternalAllocationType>(getInternalAllocationTypeName, value); }
inline tcu::Format::Enum<VkFormat> getFormatStr (VkFormat value) { return tcu::Format::Enum<VkFormat>(getFormatName, value); }
inline tcu::Format::Enum<VkImageType> getImageTypeStr (VkImageType value) { return tcu::Format::Enum<VkImageType>(getImageTypeName, value); }
inline tcu::Format::Enum<VkImageTiling> getImageTilingStr (VkImageTiling value) { return tcu::Format::Enum<VkImageTiling>(getImageTilingName, value); }
inline tcu::Format::Enum<VkPhysicalDeviceType> getPhysicalDeviceTypeStr (VkPhysicalDeviceType value) { return tcu::Format::Enum<VkPhysicalDeviceType>(getPhysicalDeviceTypeName, value); }
inline tcu::Format::Enum<VkQueryType> getQueryTypeStr (VkQueryType value) { return tcu::Format::Enum<VkQueryType>(getQueryTypeName, value); }
inline tcu::Format::Enum<VkSharingMode> getSharingModeStr (VkSharingMode value) { return tcu::Format::Enum<VkSharingMode>(getSharingModeName, value); }
inline tcu::Format::Enum<VkImageLayout> getImageLayoutStr (VkImageLayout value) { return tcu::Format::Enum<VkImageLayout>(getImageLayoutName, value); }
inline tcu::Format::Enum<VkImageViewType> getImageViewTypeStr (VkImageViewType value) { return tcu::Format::Enum<VkImageViewType>(getImageViewTypeName, value); }
inline tcu::Format::Enum<VkComponentSwizzle> getComponentSwizzleStr (VkComponentSwizzle value) { return tcu::Format::Enum<VkComponentSwizzle>(getComponentSwizzleName, value); }
inline tcu::Format::Enum<VkVertexInputRate> getVertexInputRateStr (VkVertexInputRate value) { return tcu::Format::Enum<VkVertexInputRate>(getVertexInputRateName, value); }
inline tcu::Format::Enum<VkPrimitiveTopology> getPrimitiveTopologyStr (VkPrimitiveTopology value) { return tcu::Format::Enum<VkPrimitiveTopology>(getPrimitiveTopologyName, value); }
inline tcu::Format::Enum<VkPolygonMode> getPolygonModeStr (VkPolygonMode value) { return tcu::Format::Enum<VkPolygonMode>(getPolygonModeName, value); }
inline tcu::Format::Enum<VkFrontFace> getFrontFaceStr (VkFrontFace value) { return tcu::Format::Enum<VkFrontFace>(getFrontFaceName, value); }
inline tcu::Format::Enum<VkCompareOp> getCompareOpStr (VkCompareOp value) { return tcu::Format::Enum<VkCompareOp>(getCompareOpName, value); }
inline tcu::Format::Enum<VkStencilOp> getStencilOpStr (VkStencilOp value) { return tcu::Format::Enum<VkStencilOp>(getStencilOpName, value); }
inline tcu::Format::Enum<VkLogicOp> getLogicOpStr (VkLogicOp value) { return tcu::Format::Enum<VkLogicOp>(getLogicOpName, value); }
inline tcu::Format::Enum<VkBlendFactor> getBlendFactorStr (VkBlendFactor value) { return tcu::Format::Enum<VkBlendFactor>(getBlendFactorName, value); }
inline tcu::Format::Enum<VkBlendOp> getBlendOpStr (VkBlendOp value) { return tcu::Format::Enum<VkBlendOp>(getBlendOpName, value); }
inline tcu::Format::Enum<VkDynamicState> getDynamicStateStr (VkDynamicState value) { return tcu::Format::Enum<VkDynamicState>(getDynamicStateName, value); }
inline tcu::Format::Enum<VkFilter> getFilterStr (VkFilter value) { return tcu::Format::Enum<VkFilter>(getFilterName, value); }
inline tcu::Format::Enum<VkSamplerMipmapMode> getSamplerMipmapModeStr (VkSamplerMipmapMode value) { return tcu::Format::Enum<VkSamplerMipmapMode>(getSamplerMipmapModeName, value); }
inline tcu::Format::Enum<VkSamplerAddressMode> getSamplerAddressModeStr (VkSamplerAddressMode value) { return tcu::Format::Enum<VkSamplerAddressMode>(getSamplerAddressModeName, value); }
inline tcu::Format::Enum<VkBorderColor> getBorderColorStr (VkBorderColor value) { return tcu::Format::Enum<VkBorderColor>(getBorderColorName, value); }
inline tcu::Format::Enum<VkDescriptorType> getDescriptorTypeStr (VkDescriptorType value) { return tcu::Format::Enum<VkDescriptorType>(getDescriptorTypeName, value); }
inline tcu::Format::Enum<VkAttachmentLoadOp> getAttachmentLoadOpStr (VkAttachmentLoadOp value) { return tcu::Format::Enum<VkAttachmentLoadOp>(getAttachmentLoadOpName, value); }
inline tcu::Format::Enum<VkAttachmentStoreOp> getAttachmentStoreOpStr (VkAttachmentStoreOp value) { return tcu::Format::Enum<VkAttachmentStoreOp>(getAttachmentStoreOpName, value); }
inline tcu::Format::Enum<VkPipelineBindPoint> getPipelineBindPointStr (VkPipelineBindPoint value) { return tcu::Format::Enum<VkPipelineBindPoint>(getPipelineBindPointName, value); }
inline tcu::Format::Enum<VkCommandBufferLevel> getCommandBufferLevelStr (VkCommandBufferLevel value) { return tcu::Format::Enum<VkCommandBufferLevel>(getCommandBufferLevelName, value); }
inline tcu::Format::Enum<VkIndexType> getIndexTypeStr (VkIndexType value) { return tcu::Format::Enum<VkIndexType>(getIndexTypeName, value); }
inline tcu::Format::Enum<VkSubpassContents> getSubpassContentsStr (VkSubpassContents value) { return tcu::Format::Enum<VkSubpassContents>(getSubpassContentsName, value); }
inline tcu::Format::Enum<VkColorSpaceKHR> getColorSpaceKHRStr (VkColorSpaceKHR value) { return tcu::Format::Enum<VkColorSpaceKHR>(getColorSpaceKHRName, value); }
inline tcu::Format::Enum<VkPresentModeKHR> getPresentModeKHRStr (VkPresentModeKHR value) { return tcu::Format::Enum<VkPresentModeKHR>(getPresentModeKHRName, value); }
inline tcu::Format::Enum<VkDebugReportObjectTypeEXT> getDebugReportObjectTypeEXTStr (VkDebugReportObjectTypeEXT value) { return tcu::Format::Enum<VkDebugReportObjectTypeEXT>(getDebugReportObjectTypeEXTName, value); }
inline tcu::Format::Enum<VkDebugReportErrorEXT> getDebugReportErrorEXTStr (VkDebugReportErrorEXT value) { return tcu::Format::Enum<VkDebugReportErrorEXT>(getDebugReportErrorEXTName, value); }
inline tcu::Format::Enum<VkRasterizationOrderAMD> getRasterizationOrderAMDStr (VkRasterizationOrderAMD value) { return tcu::Format::Enum<VkRasterizationOrderAMD>(getRasterizationOrderAMDName, value); }
inline tcu::Format::Enum<VkValidationCheckEXT> getValidationCheckEXTStr (VkValidationCheckEXT value) { return tcu::Format::Enum<VkValidationCheckEXT>(getValidationCheckEXTName, value); }
inline tcu::Format::Enum<VkIndirectCommandsTokenTypeNVX> getIndirectCommandsTokenTypeNVXStr (VkIndirectCommandsTokenTypeNVX value) { return tcu::Format::Enum<VkIndirectCommandsTokenTypeNVX>(getIndirectCommandsTokenTypeNVXName, value); }
inline tcu::Format::Enum<VkObjectEntryTypeNVX> getObjectEntryTypeNVXStr (VkObjectEntryTypeNVX value) { return tcu::Format::Enum<VkObjectEntryTypeNVX>(getObjectEntryTypeNVXName, value); }
inline tcu::Format::Enum<VkDescriptorUpdateTemplateTypeKHR> getDescriptorUpdateTemplateTypeKHRStr (VkDescriptorUpdateTemplateTypeKHR value) { return tcu::Format::Enum<VkDescriptorUpdateTemplateTypeKHR>(getDescriptorUpdateTemplateTypeKHRName, value); }
inline std::ostream& operator<< (std::ostream& s, VkPipelineCacheHeaderVersion value) { return s << getPipelineCacheHeaderVersionStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkResult value) { return s << getResultStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkStructureType value) { return s << getStructureTypeStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkSystemAllocationScope value) { return s << getSystemAllocationScopeStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkInternalAllocationType value) { return s << getInternalAllocationTypeStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkFormat value) { return s << getFormatStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkImageType value) { return s << getImageTypeStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkImageTiling value) { return s << getImageTilingStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkPhysicalDeviceType value) { return s << getPhysicalDeviceTypeStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkQueryType value) { return s << getQueryTypeStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkSharingMode value) { return s << getSharingModeStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkImageLayout value) { return s << getImageLayoutStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkImageViewType value) { return s << getImageViewTypeStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkComponentSwizzle value) { return s << getComponentSwizzleStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkVertexInputRate value) { return s << getVertexInputRateStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkPrimitiveTopology value) { return s << getPrimitiveTopologyStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkPolygonMode value) { return s << getPolygonModeStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkFrontFace value) { return s << getFrontFaceStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkCompareOp value) { return s << getCompareOpStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkStencilOp value) { return s << getStencilOpStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkLogicOp value) { return s << getLogicOpStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkBlendFactor value) { return s << getBlendFactorStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkBlendOp value) { return s << getBlendOpStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkDynamicState value) { return s << getDynamicStateStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkFilter value) { return s << getFilterStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkSamplerMipmapMode value) { return s << getSamplerMipmapModeStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkSamplerAddressMode value) { return s << getSamplerAddressModeStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkBorderColor value) { return s << getBorderColorStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkDescriptorType value) { return s << getDescriptorTypeStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkAttachmentLoadOp value) { return s << getAttachmentLoadOpStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkAttachmentStoreOp value) { return s << getAttachmentStoreOpStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkPipelineBindPoint value) { return s << getPipelineBindPointStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkCommandBufferLevel value) { return s << getCommandBufferLevelStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkIndexType value) { return s << getIndexTypeStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkSubpassContents value) { return s << getSubpassContentsStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkColorSpaceKHR value) { return s << getColorSpaceKHRStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkPresentModeKHR value) { return s << getPresentModeKHRStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkDebugReportObjectTypeEXT value) { return s << getDebugReportObjectTypeEXTStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkDebugReportErrorEXT value) { return s << getDebugReportErrorEXTStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkRasterizationOrderAMD value) { return s << getRasterizationOrderAMDStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkValidationCheckEXT value) { return s << getValidationCheckEXTStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkIndirectCommandsTokenTypeNVX value) { return s << getIndirectCommandsTokenTypeNVXStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkObjectEntryTypeNVX value) { return s << getObjectEntryTypeNVXStr(value); }
inline std::ostream& operator<< (std::ostream& s, VkDescriptorUpdateTemplateTypeKHR value) { return s << getDescriptorUpdateTemplateTypeKHRStr(value); }
tcu::Format::Bitfield<32> getFormatFeatureFlagsStr (VkFormatFeatureFlags value);
tcu::Format::Bitfield<32> getImageUsageFlagsStr (VkImageUsageFlags value);
tcu::Format::Bitfield<32> getImageCreateFlagsStr (VkImageCreateFlags value);
tcu::Format::Bitfield<32> getSampleCountFlagsStr (VkSampleCountFlags value);
tcu::Format::Bitfield<32> getQueueFlagsStr (VkQueueFlags value);
tcu::Format::Bitfield<32> getMemoryPropertyFlagsStr (VkMemoryPropertyFlags value);
tcu::Format::Bitfield<32> getMemoryHeapFlagsStr (VkMemoryHeapFlags value);
tcu::Format::Bitfield<32> getPipelineStageFlagsStr (VkPipelineStageFlags value);
tcu::Format::Bitfield<32> getImageAspectFlagsStr (VkImageAspectFlags value);
tcu::Format::Bitfield<32> getSparseImageFormatFlagsStr (VkSparseImageFormatFlags value);
tcu::Format::Bitfield<32> getSparseMemoryBindFlagsStr (VkSparseMemoryBindFlags value);
tcu::Format::Bitfield<32> getFenceCreateFlagsStr (VkFenceCreateFlags value);
tcu::Format::Bitfield<32> getQueryPipelineStatisticFlagsStr (VkQueryPipelineStatisticFlags value);
tcu::Format::Bitfield<32> getQueryResultFlagsStr (VkQueryResultFlags value);
tcu::Format::Bitfield<32> getBufferCreateFlagsStr (VkBufferCreateFlags value);
tcu::Format::Bitfield<32> getBufferUsageFlagsStr (VkBufferUsageFlags value);
tcu::Format::Bitfield<32> getPipelineCreateFlagsStr (VkPipelineCreateFlags value);
tcu::Format::Bitfield<32> getShaderStageFlagsStr (VkShaderStageFlags value);
tcu::Format::Bitfield<32> getCullModeFlagsStr (VkCullModeFlags value);
tcu::Format::Bitfield<32> getColorComponentFlagsStr (VkColorComponentFlags value);
tcu::Format::Bitfield<32> getDescriptorSetLayoutCreateFlagsStr (VkDescriptorSetLayoutCreateFlags value);
tcu::Format::Bitfield<32> getDescriptorPoolCreateFlagsStr (VkDescriptorPoolCreateFlags value);
tcu::Format::Bitfield<32> getAttachmentDescriptionFlagsStr (VkAttachmentDescriptionFlags value);
tcu::Format::Bitfield<32> getAccessFlagsStr (VkAccessFlags value);
tcu::Format::Bitfield<32> getDependencyFlagsStr (VkDependencyFlags value);
tcu::Format::Bitfield<32> getCommandPoolCreateFlagsStr (VkCommandPoolCreateFlags value);
tcu::Format::Bitfield<32> getCommandPoolResetFlagsStr (VkCommandPoolResetFlags value);
tcu::Format::Bitfield<32> getCommandBufferUsageFlagsStr (VkCommandBufferUsageFlags value);
tcu::Format::Bitfield<32> getQueryControlFlagsStr (VkQueryControlFlags value);
tcu::Format::Bitfield<32> getCommandBufferResetFlagsStr (VkCommandBufferResetFlags value);
tcu::Format::Bitfield<32> getStencilFaceFlagsStr (VkStencilFaceFlags value);
tcu::Format::Bitfield<32> getSurfaceTransformFlagsKHRStr (VkSurfaceTransformFlagsKHR value);
tcu::Format::Bitfield<32> getCompositeAlphaFlagsKHRStr (VkCompositeAlphaFlagsKHR value);
tcu::Format::Bitfield<32> getDisplayPlaneAlphaFlagsKHRStr (VkDisplayPlaneAlphaFlagsKHR value);
tcu::Format::Bitfield<32> getDebugReportFlagsEXTStr (VkDebugReportFlagsEXT value);
tcu::Format::Bitfield<32> getExternalMemoryHandleTypeFlagsNVStr (VkExternalMemoryHandleTypeFlagsNV value);
tcu::Format::Bitfield<32> getExternalMemoryFeatureFlagsNVStr (VkExternalMemoryFeatureFlagsNV value);
tcu::Format::Bitfield<32> getIndirectCommandsLayoutUsageFlagsNVXStr (VkIndirectCommandsLayoutUsageFlagsNVX value);
tcu::Format::Bitfield<32> getObjectEntryUsageFlagsNVXStr (VkObjectEntryUsageFlagsNVX value);
tcu::Format::Bitfield<32> getInstanceCreateFlagsStr (VkInstanceCreateFlags value);
tcu::Format::Bitfield<32> getDeviceCreateFlagsStr (VkDeviceCreateFlags value);
tcu::Format::Bitfield<32> getDeviceQueueCreateFlagsStr (VkDeviceQueueCreateFlags value);
tcu::Format::Bitfield<32> getMemoryMapFlagsStr (VkMemoryMapFlags value);
tcu::Format::Bitfield<32> getSemaphoreCreateFlagsStr (VkSemaphoreCreateFlags value);
tcu::Format::Bitfield<32> getEventCreateFlagsStr (VkEventCreateFlags value);
tcu::Format::Bitfield<32> getQueryPoolCreateFlagsStr (VkQueryPoolCreateFlags value);
tcu::Format::Bitfield<32> getBufferViewCreateFlagsStr (VkBufferViewCreateFlags value);
tcu::Format::Bitfield<32> getImageViewCreateFlagsStr (VkImageViewCreateFlags value);
tcu::Format::Bitfield<32> getShaderModuleCreateFlagsStr (VkShaderModuleCreateFlags value);
tcu::Format::Bitfield<32> getPipelineCacheCreateFlagsStr (VkPipelineCacheCreateFlags value);
tcu::Format::Bitfield<32> getPipelineShaderStageCreateFlagsStr (VkPipelineShaderStageCreateFlags value);
tcu::Format::Bitfield<32> getPipelineVertexInputStateCreateFlagsStr (VkPipelineVertexInputStateCreateFlags value);
tcu::Format::Bitfield<32> getPipelineInputAssemblyStateCreateFlagsStr (VkPipelineInputAssemblyStateCreateFlags value);
tcu::Format::Bitfield<32> getPipelineTessellationStateCreateFlagsStr (VkPipelineTessellationStateCreateFlags value);
tcu::Format::Bitfield<32> getPipelineViewportStateCreateFlagsStr (VkPipelineViewportStateCreateFlags value);
tcu::Format::Bitfield<32> getPipelineRasterizationStateCreateFlagsStr (VkPipelineRasterizationStateCreateFlags value);
tcu::Format::Bitfield<32> getPipelineMultisampleStateCreateFlagsStr (VkPipelineMultisampleStateCreateFlags value);
tcu::Format::Bitfield<32> getPipelineDepthStencilStateCreateFlagsStr (VkPipelineDepthStencilStateCreateFlags value);
tcu::Format::Bitfield<32> getPipelineColorBlendStateCreateFlagsStr (VkPipelineColorBlendStateCreateFlags value);
tcu::Format::Bitfield<32> getPipelineDynamicStateCreateFlagsStr (VkPipelineDynamicStateCreateFlags value);
tcu::Format::Bitfield<32> getPipelineLayoutCreateFlagsStr (VkPipelineLayoutCreateFlags value);
tcu::Format::Bitfield<32> getSamplerCreateFlagsStr (VkSamplerCreateFlags value);
tcu::Format::Bitfield<32> getDescriptorPoolResetFlagsStr (VkDescriptorPoolResetFlags value);
tcu::Format::Bitfield<32> getFramebufferCreateFlagsStr (VkFramebufferCreateFlags value);
tcu::Format::Bitfield<32> getRenderPassCreateFlagsStr (VkRenderPassCreateFlags value);
tcu::Format::Bitfield<32> getSubpassDescriptionFlagsStr (VkSubpassDescriptionFlags value);
tcu::Format::Bitfield<32> getSwapchainCreateFlagsKHRStr (VkSwapchainCreateFlagsKHR value);
tcu::Format::Bitfield<32> getDisplayModeCreateFlagsKHRStr (VkDisplayModeCreateFlagsKHR value);
tcu::Format::Bitfield<32> getDisplaySurfaceCreateFlagsKHRStr (VkDisplaySurfaceCreateFlagsKHR value);
tcu::Format::Bitfield<32> getXlibSurfaceCreateFlagsKHRStr (VkXlibSurfaceCreateFlagsKHR value);
tcu::Format::Bitfield<32> getXcbSurfaceCreateFlagsKHRStr (VkXcbSurfaceCreateFlagsKHR value);
tcu::Format::Bitfield<32> getWaylandSurfaceCreateFlagsKHRStr (VkWaylandSurfaceCreateFlagsKHR value);
tcu::Format::Bitfield<32> getMirSurfaceCreateFlagsKHRStr (VkMirSurfaceCreateFlagsKHR value);
tcu::Format::Bitfield<32> getAndroidSurfaceCreateFlagsKHRStr (VkAndroidSurfaceCreateFlagsKHR value);
tcu::Format::Bitfield<32> getWin32SurfaceCreateFlagsKHRStr (VkWin32SurfaceCreateFlagsKHR value);
tcu::Format::Bitfield<32> getCommandPoolTrimFlagsKHRStr (VkCommandPoolTrimFlagsKHR value);
tcu::Format::Bitfield<32> getDescriptorUpdateTemplateCreateFlagsKHRStr (VkDescriptorUpdateTemplateCreateFlagsKHR value);
std::ostream& operator<< (std::ostream& s, const VkApplicationInfo& value);
std::ostream& operator<< (std::ostream& s, const VkInstanceCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkAllocationCallbacks& value);
std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceFeatures& value);
std::ostream& operator<< (std::ostream& s, const VkFormatProperties& value);
std::ostream& operator<< (std::ostream& s, const VkExtent3D& value);
std::ostream& operator<< (std::ostream& s, const VkImageFormatProperties& value);
std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceLimits& value);
std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceSparseProperties& value);
std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceProperties& value);
std::ostream& operator<< (std::ostream& s, const VkQueueFamilyProperties& value);
std::ostream& operator<< (std::ostream& s, const VkMemoryType& value);
std::ostream& operator<< (std::ostream& s, const VkMemoryHeap& value);
std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceMemoryProperties& value);
std::ostream& operator<< (std::ostream& s, const VkDeviceQueueCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkDeviceCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkExtensionProperties& value);
std::ostream& operator<< (std::ostream& s, const VkLayerProperties& value);
std::ostream& operator<< (std::ostream& s, const VkSubmitInfo& value);
std::ostream& operator<< (std::ostream& s, const VkMemoryAllocateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkMappedMemoryRange& value);
std::ostream& operator<< (std::ostream& s, const VkMemoryRequirements& value);
std::ostream& operator<< (std::ostream& s, const VkSparseImageFormatProperties& value);
std::ostream& operator<< (std::ostream& s, const VkSparseImageMemoryRequirements& value);
std::ostream& operator<< (std::ostream& s, const VkSparseMemoryBind& value);
std::ostream& operator<< (std::ostream& s, const VkSparseBufferMemoryBindInfo& value);
std::ostream& operator<< (std::ostream& s, const VkSparseImageOpaqueMemoryBindInfo& value);
std::ostream& operator<< (std::ostream& s, const VkImageSubresource& value);
std::ostream& operator<< (std::ostream& s, const VkOffset3D& value);
std::ostream& operator<< (std::ostream& s, const VkSparseImageMemoryBind& value);
std::ostream& operator<< (std::ostream& s, const VkSparseImageMemoryBindInfo& value);
std::ostream& operator<< (std::ostream& s, const VkBindSparseInfo& value);
std::ostream& operator<< (std::ostream& s, const VkFenceCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkSemaphoreCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkEventCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkQueryPoolCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkBufferCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkBufferViewCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkImageCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkSubresourceLayout& value);
std::ostream& operator<< (std::ostream& s, const VkComponentMapping& value);
std::ostream& operator<< (std::ostream& s, const VkImageSubresourceRange& value);
std::ostream& operator<< (std::ostream& s, const VkImageViewCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkShaderModuleCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkPipelineCacheCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkSpecializationMapEntry& value);
std::ostream& operator<< (std::ostream& s, const VkSpecializationInfo& value);
std::ostream& operator<< (std::ostream& s, const VkPipelineShaderStageCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkVertexInputBindingDescription& value);
std::ostream& operator<< (std::ostream& s, const VkVertexInputAttributeDescription& value);
std::ostream& operator<< (std::ostream& s, const VkPipelineVertexInputStateCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkPipelineInputAssemblyStateCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkPipelineTessellationStateCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkViewport& value);
std::ostream& operator<< (std::ostream& s, const VkOffset2D& value);
std::ostream& operator<< (std::ostream& s, const VkExtent2D& value);
std::ostream& operator<< (std::ostream& s, const VkRect2D& value);
std::ostream& operator<< (std::ostream& s, const VkPipelineViewportStateCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkPipelineRasterizationStateCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkPipelineMultisampleStateCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkStencilOpState& value);
std::ostream& operator<< (std::ostream& s, const VkPipelineDepthStencilStateCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkPipelineColorBlendAttachmentState& value);
std::ostream& operator<< (std::ostream& s, const VkPipelineColorBlendStateCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkPipelineDynamicStateCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkGraphicsPipelineCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkComputePipelineCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkPushConstantRange& value);
std::ostream& operator<< (std::ostream& s, const VkPipelineLayoutCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkSamplerCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkDescriptorSetLayoutBinding& value);
std::ostream& operator<< (std::ostream& s, const VkDescriptorSetLayoutCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkDescriptorPoolSize& value);
std::ostream& operator<< (std::ostream& s, const VkDescriptorPoolCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkDescriptorSetAllocateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkDescriptorImageInfo& value);
std::ostream& operator<< (std::ostream& s, const VkDescriptorBufferInfo& value);
std::ostream& operator<< (std::ostream& s, const VkWriteDescriptorSet& value);
std::ostream& operator<< (std::ostream& s, const VkCopyDescriptorSet& value);
std::ostream& operator<< (std::ostream& s, const VkFramebufferCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkAttachmentDescription& value);
std::ostream& operator<< (std::ostream& s, const VkAttachmentReference& value);
std::ostream& operator<< (std::ostream& s, const VkSubpassDescription& value);
std::ostream& operator<< (std::ostream& s, const VkSubpassDependency& value);
std::ostream& operator<< (std::ostream& s, const VkRenderPassCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkCommandPoolCreateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkCommandBufferAllocateInfo& value);
std::ostream& operator<< (std::ostream& s, const VkCommandBufferInheritanceInfo& value);
std::ostream& operator<< (std::ostream& s, const VkCommandBufferBeginInfo& value);
std::ostream& operator<< (std::ostream& s, const VkBufferCopy& value);
std::ostream& operator<< (std::ostream& s, const VkImageSubresourceLayers& value);
std::ostream& operator<< (std::ostream& s, const VkImageCopy& value);
std::ostream& operator<< (std::ostream& s, const VkImageBlit& value);
std::ostream& operator<< (std::ostream& s, const VkBufferImageCopy& value);
std::ostream& operator<< (std::ostream& s, const VkClearColorValue& value);
std::ostream& operator<< (std::ostream& s, const VkClearDepthStencilValue& value);
std::ostream& operator<< (std::ostream& s, const VkClearValue& value);
std::ostream& operator<< (std::ostream& s, const VkClearAttachment& value);
std::ostream& operator<< (std::ostream& s, const VkClearRect& value);
std::ostream& operator<< (std::ostream& s, const VkImageResolve& value);
std::ostream& operator<< (std::ostream& s, const VkMemoryBarrier& value);
std::ostream& operator<< (std::ostream& s, const VkBufferMemoryBarrier& value);
std::ostream& operator<< (std::ostream& s, const VkImageMemoryBarrier& value);
std::ostream& operator<< (std::ostream& s, const VkRenderPassBeginInfo& value);
std::ostream& operator<< (std::ostream& s, const VkDispatchIndirectCommand& value);
std::ostream& operator<< (std::ostream& s, const VkDrawIndexedIndirectCommand& value);
std::ostream& operator<< (std::ostream& s, const VkDrawIndirectCommand& value);
std::ostream& operator<< (std::ostream& s, const VkSurfaceCapabilitiesKHR& value);
std::ostream& operator<< (std::ostream& s, const VkSurfaceFormatKHR& value);
std::ostream& operator<< (std::ostream& s, const VkSwapchainCreateInfoKHR& value);
std::ostream& operator<< (std::ostream& s, const VkPresentInfoKHR& value);
std::ostream& operator<< (std::ostream& s, const VkDisplayPropertiesKHR& value);
std::ostream& operator<< (std::ostream& s, const VkDisplayModeParametersKHR& value);
std::ostream& operator<< (std::ostream& s, const VkDisplayModePropertiesKHR& value);
std::ostream& operator<< (std::ostream& s, const VkDisplayModeCreateInfoKHR& value);
std::ostream& operator<< (std::ostream& s, const VkDisplayPlaneCapabilitiesKHR& value);
std::ostream& operator<< (std::ostream& s, const VkDisplayPlanePropertiesKHR& value);
std::ostream& operator<< (std::ostream& s, const VkDisplaySurfaceCreateInfoKHR& value);
std::ostream& operator<< (std::ostream& s, const VkDisplayPresentInfoKHR& value);
std::ostream& operator<< (std::ostream& s, const VkXlibSurfaceCreateInfoKHR& value);
std::ostream& operator<< (std::ostream& s, const VkXcbSurfaceCreateInfoKHR& value);
std::ostream& operator<< (std::ostream& s, const VkWaylandSurfaceCreateInfoKHR& value);
std::ostream& operator<< (std::ostream& s, const VkMirSurfaceCreateInfoKHR& value);
std::ostream& operator<< (std::ostream& s, const VkAndroidSurfaceCreateInfoKHR& value);
std::ostream& operator<< (std::ostream& s, const VkWin32SurfaceCreateInfoKHR& value);
std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceFeatures2KHR& value);
std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceProperties2KHR& value);
std::ostream& operator<< (std::ostream& s, const VkFormatProperties2KHR& value);
std::ostream& operator<< (std::ostream& s, const VkImageFormatProperties2KHR& value);
std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceImageFormatInfo2KHR& value);
std::ostream& operator<< (std::ostream& s, const VkQueueFamilyProperties2KHR& value);
std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceMemoryProperties2KHR& value);
std::ostream& operator<< (std::ostream& s, const VkSparseImageFormatProperties2KHR& value);
std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceSparseImageFormatInfo2KHR& value);
std::ostream& operator<< (std::ostream& s, const VkRectLayerKHR& value);
std::ostream& operator<< (std::ostream& s, const VkPresentRegionKHR& value);
std::ostream& operator<< (std::ostream& s, const VkPresentRegionsKHR& value);
std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceSurfaceInfo2KHR& value);
std::ostream& operator<< (std::ostream& s, const VkSurfaceCapabilities2KHR& value);
std::ostream& operator<< (std::ostream& s, const VkSurfaceFormat2KHR& value);
std::ostream& operator<< (std::ostream& s, const VkSharedPresentSurfaceCapabilitiesKHR& value);
std::ostream& operator<< (std::ostream& s, const VkDebugReportCallbackCreateInfoEXT& value);
std::ostream& operator<< (std::ostream& s, const VkPipelineRasterizationStateRasterizationOrderAMD& value);
std::ostream& operator<< (std::ostream& s, const VkDebugMarkerObjectNameInfoEXT& value);
std::ostream& operator<< (std::ostream& s, const VkDebugMarkerObjectTagInfoEXT& value);
std::ostream& operator<< (std::ostream& s, const VkDebugMarkerMarkerInfoEXT& value);
std::ostream& operator<< (std::ostream& s, const VkDedicatedAllocationImageCreateInfoNV& value);
std::ostream& operator<< (std::ostream& s, const VkDedicatedAllocationBufferCreateInfoNV& value);
std::ostream& operator<< (std::ostream& s, const VkDedicatedAllocationMemoryAllocateInfoNV& value);
std::ostream& operator<< (std::ostream& s, const VkExternalImageFormatPropertiesNV& value);
std::ostream& operator<< (std::ostream& s, const VkExternalMemoryImageCreateInfoNV& value);
std::ostream& operator<< (std::ostream& s, const VkExportMemoryAllocateInfoNV& value);
std::ostream& operator<< (std::ostream& s, const VkImportMemoryWin32HandleInfoNV& value);
std::ostream& operator<< (std::ostream& s, const VkExportMemoryWin32HandleInfoNV& value);
std::ostream& operator<< (std::ostream& s, const VkWin32KeyedMutexAcquireReleaseInfoNV& value);
std::ostream& operator<< (std::ostream& s, const VkValidationFlagsEXT& value);
std::ostream& operator<< (std::ostream& s, const VkDeviceGeneratedCommandsFeaturesNVX& value);
std::ostream& operator<< (std::ostream& s, const VkDeviceGeneratedCommandsLimitsNVX& value);
std::ostream& operator<< (std::ostream& s, const VkIndirectCommandsTokenNVX& value);
std::ostream& operator<< (std::ostream& s, const VkIndirectCommandsLayoutTokenNVX& value);
std::ostream& operator<< (std::ostream& s, const VkIndirectCommandsLayoutCreateInfoNVX& value);
std::ostream& operator<< (std::ostream& s, const VkCmdProcessCommandsInfoNVX& value);
std::ostream& operator<< (std::ostream& s, const VkCmdReserveSpaceForCommandsInfoNVX& value);
std::ostream& operator<< (std::ostream& s, const VkObjectTableCreateInfoNVX& value);
std::ostream& operator<< (std::ostream& s, const VkObjectTableEntryNVX& value);
std::ostream& operator<< (std::ostream& s, const VkObjectTablePipelineEntryNVX& value);
std::ostream& operator<< (std::ostream& s, const VkObjectTableDescriptorSetEntryNVX& value);
std::ostream& operator<< (std::ostream& s, const VkObjectTableVertexBufferEntryNVX& value);
std::ostream& operator<< (std::ostream& s, const VkObjectTableIndexBufferEntryNVX& value);
std::ostream& operator<< (std::ostream& s, const VkObjectTablePushConstantEntryNVX& value);
std::ostream& operator<< (std::ostream& s, const VkPhysicalDevicePushDescriptorPropertiesKHR& value);
std::ostream& operator<< (std::ostream& s, const VkDescriptorUpdateTemplateEntryKHR& value);
std::ostream& operator<< (std::ostream& s, const VkDescriptorUpdateTemplateCreateInfoKHR& value);
std::ostream& operator<< (std::ostream& s, const VkRefreshCycleDurationGOOGLE& value);
std::ostream& operator<< (std::ostream& s, const VkPastPresentationTimingGOOGLE& value);
std::ostream& operator<< (std::ostream& s, const VkPresentTimeGOOGLE& value);
std::ostream& operator<< (std::ostream& s, const VkPresentTimesInfoGOOGLE& value);
|
/*Copyright 2014 George Karagoulis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.*/
#include "htmlboardgenerator.h"
#include "gkchess_piece.h"
#include "gkchess_board.h"
#include "gkchess_square.h"
#include <QXmlStreamWriter>
NAMESPACE_GKCHESS;
static QString __generate_table_style(const HtmlBoardGenerator::HtmlFormattingOptions &f)
{
GUTIL_UNUSED(f);
return "text-align:center;"
"border-spacing:1pt;"
"font-family:'Arial Unicode MS';"
"border-collapse:collapse;"
"border-color:#FFFFFFFF;"
"border-style:solid;"
"border-width:0pt 0pt 0pt 0pt;";
}
static QString __generate_row_style(const HtmlBoardGenerator::HtmlFormattingOptions &)
{
return "vertical-align:bottom;";
}
static QColor __get_square_color(Square const &s, const HtmlBoardGenerator::HtmlFormattingOptions &f)
{
QColor ret;
if(s.IsDarkSquare())
ret = f.DarkSquareColor;
else
ret = f.LightSquareColor;
return ret;
}
static QString __generate_cell_style(const Square &s, const HtmlBoardGenerator::HtmlFormattingOptions &f)
{
return QString("width:%1pt;"
"height:%2pt;"
"border-collapse:collapse;"
"border-color:#FFFFFFFF;"
"border-style:solid;"
"border-width:%3pt %3pt %3pt %3pt;"
"background-color:#%4;")
.arg(f.SquareSize)
.arg(f.SquareSize)
.arg(f.BorderSize)
.arg(0x00FFFFFF & __get_square_color(s, f).rgb(), 6, 16, QChar('0'));
}
static QString __generate_piece_style(Piece const &, const HtmlBoardGenerator::HtmlFormattingOptions &f)
{
return QString("font-size:%1pt;"
"color:#%2")
.arg(f.PieceSize)
.arg(0x00FFFFFF & f.PieceColor.rgb(), 6, 16, QChar('0'));
}
HtmlBoardGenerator::HtmlFormattingOptions::HtmlFormattingOptions()
:HumanReadable(true),
PieceColor(::Qt::black),
LightSquareColor(::Qt::white),
DarkSquareColor(::Qt::gray),
SquareSize(40),
BorderSize(1),
PieceSize(30),
IndexSize(15)
{}
QString HtmlBoardGenerator::GenerateHtml(const Board &b, const HtmlFormattingOptions &f)
{
QString html;
if(b.ColumnCount() > 0 && b.RowCount() > 0)
{
QXmlStreamWriter sw(&html);
sw.setAutoFormatting(f.HumanReadable);
sw.writeStartElement("table");
sw.writeAttribute("style", __generate_table_style(f));
// Write each row to html
for(int i = b.RowCount() - 1; 0 <= i; --i)
{
sw.writeStartElement("tr");
sw.writeAttribute("style", __generate_row_style(f));
// Write the row number:
sw.writeStartElement("td");
sw.writeAttribute("style",
QString("vertical-align:middle;width:12pt;font-size:%1pt;")
.arg(f.IndexSize));
sw.writeCharacters(QString("%1").arg(i + 1));
sw.writeEndElement(); //td
// Iterate through the columns and write each cell
for(int j = 0; j < b.ColumnCount(); ++j)
{
Square const &s( b.SquareAt(j, i) );
sw.writeStartElement("td");
sw.writeAttribute("style", __generate_cell_style(s, f));
// Put a piece in the square if there is one
Piece const &p = s.GetPiece();
if(!p.IsNull())
{
sw.writeStartElement("span");
sw.writeAttribute("style", __generate_piece_style(p, f));
sw.writeEntityReference(QString("#%1").arg(p.UnicodeValue()));
sw.writeEndElement(); //span
}
else
{
// Need to write an empty piece to force the xml stream writer to write a
// close tag for the cell
sw.writeCharacters(" ");
}
sw.writeEndElement();
}
sw.writeEndElement(); //tr
}
// Write the last row which holds the column letters
sw.writeStartElement("tr");
{
// Empty cell
sw.writeStartElement("td");
sw.writeCharacters("");
sw.writeEndElement(); //td
char letter = 'a';
for(int i = 0; i < b.RowCount(); ++i, ++letter)
{
sw.writeStartElement("td");
sw.writeAttribute("style", QString("font-size:%1pt;").arg(f.IndexSize));
sw.writeCharacters(QString::fromUtf8(&letter, 1));
sw.writeEndElement(); //td
}
}
sw.writeEndElement(); //tr
sw.writeEndElement(); //table
sw.writeEndElement(); //html
}
return html;
}
END_NAMESPACE_GKCHESS;
|
#define _CRT_SECURE_NO_WARNINGS
#include <iostream>
#include <string>
#include <algorithm>
#include <vector>
#include <queue>
using namespace std;
void BFS(int s, int e, vector<vector<int>> &adjmatrix) {
int n = adjmatrix.size();
vector<bool> visited(n);
vector<int> d(n), p(n);
queue<int> queue;
queue.push(s);
visited[s] = true;
p[s] = -1;
while (!queue.empty()) {
int v = queue.front();
queue.pop();
for (int i = 0; i < adjmatrix.size(); i++) {
if (adjmatrix[v][i] != 0 && !visited[i]) {
visited[i] = true;
queue.push(i);
d[i] = d[v] + 1;
p[i] = v;
}
}
}
if (!visited[e])
cout << -1;
else {
cout << d[e] << endl;
vector<int> path;
for (int v = e; v != -1; v = p[v])
path.push_back(v);
reverse(path.begin(), path.end());
for (int v : path)
cout << v + 1 << " ";
}
}
int main() {
freopen("input.txt", "r", stdin);
freopen("output.txt", "w", stdout);
int n, s, e;
cin >> n;
vector<vector<int>> matrix(n, vector<int>(n));
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++)
cin >> matrix[i][j];
}
cin >> s >> e;
if (s == e)
cout << 0 << endl;
else
BFS(s - 1, e - 1, matrix);
return 0;
}
|
#include "IRremoteInt.h"
// MagiQuest added by E. Stuart Hicks
// Based off the Magiquest fork of Arduino-IRremote by mpflaga
// https://github.com/mpflaga/Arduino-IRremote/
//==============================================================================
//
//
// M A G I Q U E S T
//
//
//==============================================================================
// MagiQuest packet is both Wand ID and magnitude of swish and flick
union magiquest_t {
uint64_t llword;
struct {
uint16_t magnitude;
uint32_t wand_id;
uint8_t padding;
uint8_t scrap; // just to pad the struct out to 64 bits so we can union with llword
} cmd;
};
#define MAGIQUEST_MAGNITUDE_BITS 16 // The number of bits
#define MAGIQUEST_WAND_ID_BITS 32 // The number of bits
#define MAGIQUEST_BITS (MAGIQUEST_MAGNITUDE_BITS + MAGIQUEST_WAND_ID_BITS) // The number of bits in the command itself
#define MAGIQUEST_PERIOD 1150 // Length of time a full MQ "bit" consumes (1100 - 1200 usec)
/*
* 0 = 25% mark & 75% space across 1 period
* 1150 * 0.25 = 288 usec mark
* 1150 - 288 = 862 usec space
* 1 = 50% mark & 50% space across 1 period
* 1150 * 0.5 = 575 usec mark
* 1150 - 575 = 575 usec space
*/
#define MAGIQUEST_UNIT 288
#define MAGIQUEST_ONE_MARK (2* MAGIQUEST_UNIT) // 576
#define MAGIQUEST_ONE_SPACE (2* MAGIQUEST_UNIT) // 576
#define MAGIQUEST_ZERO_MARK MAGIQUEST_UNIT
#define MAGIQUEST_ZERO_SPACE (3* MAGIQUEST_UNIT) // 864
//#define MAGIQUEST_MASK (1ULL << (MAGIQUEST_BITS-1))
//+=============================================================================
//
void IRsend::sendMagiQuest(uint32_t wand_id, uint16_t magnitude) {
// magiquest_t data;
//
// data.llword = 0;
// data.cmd.wand_id = wand_id;
// data.cmd.magnitude = magnitude;
// Set IR carrier frequency
enableIROut(38);
// 2 start bits
sendPulseDistanceWidthData(MAGIQUEST_ONE_MARK, MAGIQUEST_ONE_SPACE, MAGIQUEST_ZERO_MARK, MAGIQUEST_ZERO_SPACE, 0, 2, PROTOCOL_IS_MSB_FIRST);
// Data
sendPulseDistanceWidthData(MAGIQUEST_ONE_MARK, MAGIQUEST_ONE_SPACE, MAGIQUEST_ZERO_MARK, MAGIQUEST_ZERO_SPACE, wand_id,
MAGIQUEST_WAND_ID_BITS, PROTOCOL_IS_MSB_FIRST);
sendPulseDistanceWidthData(MAGIQUEST_ONE_MARK, MAGIQUEST_ONE_SPACE, MAGIQUEST_ZERO_MARK, MAGIQUEST_ZERO_SPACE, magnitude,
MAGIQUEST_MAGNITUDE_BITS, PROTOCOL_IS_MSB_FIRST, SEND_STOP_BIT);
// for (unsigned long long mask = MAGIQUEST_MASK; mask > 0; mask >>= 1) {
// if (data.llword & mask) {
// mark(MAGIQUEST_ONE_MARK);
// space(MAGIQUEST_ONE_SPACE);
// } else {
// mark(MAGIQUEST_ZERO_MARK);
// space(MAGIQUEST_ZERO_SPACE);
// }
// }
}
//+=============================================================================
//
/*
* decodes a 32 bit result, which is nor really compatible with standard decoder layout
*/
bool IRrecv::decodeMagiQuest() {
magiquest_t data; // Somewhere to build our code
unsigned int offset = 1; // Skip the gap reading
unsigned int mark_;
unsigned int space_;
unsigned int ratio_;
#ifdef DEBUG
char bitstring[(2 * MAGIQUEST_BITS) + 6];
memset(bitstring, 0, sizeof(bitstring));
#endif
// Check we have enough data (102), + 6 for 2 start and 1 stop bit
if (decodedIRData.rawDataPtr->rawlen != (2 * MAGIQUEST_BITS) + 6) {
return false;
}
// Read the bits in
data.llword = 0;
while (offset + 1 < decodedIRData.rawDataPtr->rawlen) {
mark_ = decodedIRData.rawDataPtr->rawbuf[offset++];
space_ = decodedIRData.rawDataPtr->rawbuf[offset++];
ratio_ = space_ / mark_;
DBG_PRINT("MagiQuest: ");
DBG_PRINT("mark=");
DBG_PRINT(mark_ * MICROS_PER_TICK);
DBG_PRINT(" space=");
DBG_PRINT(space_ * MICROS_PER_TICK);
DBG_PRINT(" ratio=");
DBG_PRINTLN(ratio_);
if (MATCH_MARK(space_ + mark_, MAGIQUEST_PERIOD)) {
if (ratio_ > 1) {
// It's a 0
data.llword <<= 1;
#ifdef DEBUG
bitstring[(offset / 2) - 1] = '0';
#endif
} else {
// It's a 1
data.llword = (data.llword << 1) | 1;
#ifdef DEBUG
bitstring[(offset / 2) - 1] = '1';
#endif
}
} else {
DBG_PRINTLN("MATCH_MARK failed");
return false;
}
}
#ifdef DEBUG
DBG_PRINTLN(bitstring);
#endif
// Success
decodedIRData.protocol = MAGIQUEST;
decodedIRData.numberOfBits = offset / 2;
decodedIRData.flags = IRDATA_FLAGS_EXTRA_INFO;
decodedIRData.extra = data.cmd.magnitude;
decodedIRData.decodedRawData = data.cmd.wand_id;
return true;
}
|
// system includes
#include <fstream>
#include <iostream>
#include <memory>
#include <sstream>
#include <tuple>
extern "C" {
#include <appimage/appimage.h>
#include <glib.h>
// #include <libgen.h>
#include <sys/stat.h>
#include <stdio.h>
#include <unistd.h>
}
// library includes
#include <QDebug>
#include <QIcon>
#include <QtDBus>
#include <QDirIterator>
#include <QJsonDocument>
#include <QJsonObject>
#include <QJsonParseError>
#include <QLibraryInfo>
#include <QMap>
#include <QMapIterator>
#include <QMessageBox>
#include <QObject>
#include <QRegularExpression>
#include <QSet>
#include <QSettings>
#include <QStandardPaths>
#include <QWindow>
#include <QPushButton>
#include <QPixmap>
#ifdef ENABLE_UPDATE_HELPER
#include <appimage/update.h>
#endif
// local headers
#include "shared.h"
#include "translationmanager.h"
static void gKeyFileDeleter(GKeyFile* ptr) {
if (ptr != nullptr)
g_key_file_free(ptr);
}
static void gErrorDeleter(GError* ptr) {
if (ptr != nullptr)
g_error_free(ptr);
}
bool makeExecutable(const QString& path) {
struct stat fileStat{};
if (stat(path.toStdString().c_str(), &fileStat) != 0) {
std::cerr << "Failed to call stat() on " << path.toStdString() << std::endl;
return false;
}
// no action required when file is executable already
// this could happen in scenarios when an AppImage is in a read-only location
if ((fileStat.st_uid == getuid() && fileStat.st_mode & 0100) ||
(fileStat.st_gid == getgid() && fileStat.st_mode & 0010) ||
(fileStat.st_mode & 0001)) {
return true;
}
return chmod(path.toStdString().c_str(), fileStat.st_mode | 0111) == 0;
}
bool makeNonExecutable(const QString& path) {
struct stat fileStat{};
if (stat(path.toStdString().c_str(), &fileStat) != 0) {
std::cerr << "Failed to call stat() on " << path.toStdString() << std::endl;
return false;
}
auto permissions = fileStat.st_mode;
// remove executable permissions
for (const auto permPart : {0100, 0010, 0001}) {
if (permissions & permPart)
permissions -= permPart;
}
return chmod(path.toStdString().c_str(), permissions) == 0;
}
QString expandTilde(QString path) {
if ((path.size() == 1 && path[0] == '~') || (path.size() >= 2 && path.startsWith("~/"))) {
path.remove(0, 1);
path.prepend(QDir::homePath());
}
return path;
}
// calculate path to config file
QString getConfigFilePath() {
const auto configPath = QStandardPaths::writableLocation(QStandardPaths::ConfigLocation);
const auto configFilePath = configPath + "/appimagelauncher.cfg";
return configFilePath;
}
void createConfigFile(int askToMove,
const QString& destination,
int enableDaemon,
const QStringList& additionalDirsToWatch,
int monitorMountedFilesystems) {
auto configFilePath = getConfigFilePath();
QFile file(configFilePath);
file.open(QIODevice::WriteOnly);
// cannot use QSettings because it doesn't support comments
// let's do it manually and hope for the best
file.write("[AppImageLauncher]\n");
if (askToMove < 0) {
file.write("# ask_to_move = true\n");
} else {
file.write("ask_to_move = ");
if (askToMove == 0) {
file.write("false");
} else {
file.write("true");
}
file.write("\n");
}
if (destination.isEmpty()) {
file.write("# destination = ~/Applications\n");
} else {
file.write("destination = ");
file.write(destination.toUtf8());
file.write("\n");
}
if (enableDaemon < 0) {
file.write("# enable_daemon = true\n");
} else {
file.write("enable_daemon = ");
if (enableDaemon == 0) {
file.write("false");
} else {
file.write("true");
}
file.write("\n");
}
file.write("\n\n");
// daemon configs
file.write("[appimagelauncherd]\n");
if (additionalDirsToWatch.empty()) {
file.write("# additional_directories_to_watch = ~/otherApplications:/even/more/applications\n");
} else {
file.write("additional_directories_to_watch = ");
file.write(additionalDirsToWatch.join(':').toUtf8());
file.write("\n");
}
if (monitorMountedFilesystems < 0) {
file.write("# monitor_mounted_filesystems = false\n");
} else {
file.write("monitor_mounted_filesystems = ");
if (monitorMountedFilesystems == 0) {
file.write("false");
} else {
file.write("true");
}
file.write("\n");
}
}
std::shared_ptr<QSettings> getConfig() {
auto configFilePath = getConfigFilePath();
// if the file does not exist, we'll just use the standard location
// while in theory it would have been possible to just write the default location to the file, if we'd ever change
// it again, we'd leave a lot of systems in the old state, and would have to write some complex code to resolve
// the situation
// therefore, the file is simply created, but left empty intentionally
if (!QFileInfo::exists(configFilePath)) {
return nullptr;
}
auto rv = std::make_shared<QSettings>(configFilePath, QSettings::IniFormat);
// expand ~ in paths in the config file with $HOME
const auto keysContainingPath = {
"AppImageLauncher/destination",
};
for (const QString& keyContainingPath : keysContainingPath){
if (rv->contains(keyContainingPath)) {
auto newValue = expandTilde(rv->value(keyContainingPath).toString());
rv->setValue(keyContainingPath, newValue);
}
}
return rv;
}
// TODO: check if this works with Wayland
bool isHeadless() {
bool isHeadless = true;
// not really clean to abuse env vars as "global storage", but hey, it works
if (getenv("_FORCE_HEADLESS")) {
return true;
}
QProcess proc;
proc.setProgram("xhost");
proc.setStandardOutputFile(QProcess::nullDevice());
proc.setStandardErrorFile(QProcess::nullDevice());
proc.start();
proc.waitForFinished();
switch (proc.exitCode()) {
case 255: {
// program not found, using fallback method
isHeadless = (getenv("DISPLAY") == nullptr);
break;
}
case 0:
case 1:
isHeadless = proc.exitCode() == 1;
break;
default:
throw std::runtime_error("Headless detection failed: unexpected exit code from xhost");
}
return isHeadless;
}
// avoids code duplication, and works for both graphical and non-graphical environments
void displayMessageBox(const QString& title, const QString& message, const QMessageBox::Icon icon) {
if (isHeadless()) {
std::cerr << title.toStdString() << ": " << message.toStdString() << std::endl;
} else {
// little complex, can't use QMessageBox::{critical,warning,...} for the same reason as in main()
auto* mb = new QMessageBox(icon, title, message, QMessageBox::Ok, nullptr);
mb->show();
QApplication::exec();
}
}
void displayError(const QString& message) {
displayMessageBox(QObject::tr("Error"), message, QMessageBox::Critical);
}
void displayWarning(const QString& message) {
displayMessageBox(QObject::tr("Warning"), message, QMessageBox::Warning);
}
QDir integratedAppImagesDestination() {
auto config = getConfig();
if (config == nullptr)
return DEFAULT_INTEGRATION_DESTINATION;
static const QString keyName("AppImageLauncher/destination");
if (config->contains(keyName))
return config->value(keyName).toString();
return DEFAULT_INTEGRATION_DESTINATION;
}
class Mount {
private:
QString device;
QString mountPoint;
QString fsType;
QString mountOptions;
public:
Mount(QString device, QString mountPoint, QString fsType, QString mountOptions) :
device(std::move(device)),
mountPoint(std::move(mountPoint)),
fsType(std::move(fsType)),
mountOptions(std::move(
mountOptions)) {}
Mount(const Mount& other) = default;
Mount& operator=(const Mount& other) = default;
public:
const QString& getDevice() const {
return device;
}
const QString& getMountPoint() const {
return mountPoint;
}
const QString& getFsType() const {
return fsType;
}
const QString& getMountOptions() const {
return mountOptions;
}
};
QList<Mount> listMounts() {
QList<Mount> mountedDirectories;
std::ifstream ifs("/proc/mounts");
std::string _currentLine;
while (std::getline(ifs, _currentLine)) {
const auto currentLine = QString::fromStdString(_currentLine);
const auto parts = currentLine.split(" ");
mountedDirectories << Mount{parts[0], parts[1], parts[2], parts[3]};
}
return mountedDirectories;
}
QSet<QString> additionalAppImagesLocations(const bool includeAllMountPoints) {
QSet<QString> additionalLocations;
additionalLocations << "/Applications";
// integrate AppImages from mounted filesystems, if requested
// we don't want to read files from any FUSE mounted filesystems nor from any virtual filesystems
// to
static const auto validFilesystems = {"ext2", "ext3", "ext4", "ntfs", "vfat"};
static const auto blacklistedMountPointPrefixes = {
"/var/lib/schroot",
"/run/docker",
"/boot",
"/sys",
"/proc",
"/snap",
};
if (includeAllMountPoints) {
for (const auto& mount : listMounts()) {
const auto& device = mount.getDevice();
const auto& mountPoint = mount.getMountPoint();
const auto& fsType = mount.getFsType();
// we have to filter out virtual filesystems, i.e., ones which have a "nonsense" device path
// any device that doesn't start with / is likely virtual, this is the first indicator
if (device.size() < 1 || device[0] != '/') {
continue;
}
// the device should exist for obvious reasons
if (!QFileInfo(QFileInfo(device).absoluteFilePath()).exists()) {
continue;
}
// we don't want to mount any loop-mounted or bind-mounted or other devices, only... "native" ones
// therefore we permit only "real" devices listed within /dev
if (!device.startsWith("/dev/")) {
continue;
}
// there's a few locations which we know we don't want to search for AppImages in
// either it's a waste of time or otherwise a bad idea, but it will surely save time *not* to search them
if (std::find_if(blacklistedMountPointPrefixes.begin(), blacklistedMountPointPrefixes.end(),
[&mountPoint](const QString& prefix) {
return mountPoint == prefix || mountPoint.startsWith(prefix + "/");
}) != blacklistedMountPointPrefixes.end()) {
continue;
}
// we can skip the root mount point, as we handled it above
if (mountPoint == "/") {
continue;
}
// we only support a limited set of filesystems
if (std::find(validFilesystems.begin(), validFilesystems.end(), fsType) == validFilesystems.end()) {
continue;
}
// sanity check -- can likely be removed in the future
if (mountPoint.isEmpty()) {
const auto message = "empty mount point for mount with device " + device.toStdString();
throw std::invalid_argument(message);
}
// assemble potential applications location; caller needs to check whether the directory exists before setting
// up e.g., an inotify watch
const QString additionalLocation(mountPoint + "/Applications");
additionalLocations << additionalLocation;
}
}
return additionalLocations;
}
bool shallMonitorMountedFilesystems(std::shared_ptr<QSettings> config) {
return config != nullptr &&
config->value("appimagelauncherd/monitor_mounted_filesystems", "false").toBool();
}
QDirSet getAdditionalDirectoriesFromConfig(const std::shared_ptr<QSettings>& config) {
// getConfig might've returned a null pointer, therefore we have to check this before proceeding
if (config == nullptr)
return {};
constexpr auto configKey = "appimagelauncherd/additional_directories_to_watch";
const auto configValue = config->value(configKey, "").toString();
qDebug() << configKey << "value:" << configValue;
QDirSet additionalDirs{};
for (auto dirPath : configValue.split(":")) {
// empty values will, for some reason, be interpreted as "use the home directory"
// as we don't want to accidentally monitor the home directory, we need to skip those values
if (dirPath.isEmpty()) {
qDebug() << "skipping empty directory path";
continue;
}
// make sure to have full path
qDebug() << "path before tilde expansion:" << dirPath;
dirPath = expandTilde(dirPath);
qDebug() << "path after tilde expansion:" << dirPath;
// non-absolute paths which don't contain a tilde cannot be resolved safely, they likley depend on the cwd
// therefore, we need to ignore those
if (!QFileInfo(dirPath).isAbsolute()) {
std::cerr << "Warning: path " << dirPath.toStdString() << " can not be resolved, skipping" << std::endl;
continue;
}
const QDir dir(dirPath);
if (!dir.exists()) {
std::cerr << "Warning: could not find directory " << dirPath.toStdString() << ", skipping" << std::endl;
continue;
}
additionalDirs.insert(dir);
}
return additionalDirs;
}
QDirSet daemonDirectoriesToWatch(const std::shared_ptr<QSettings>& config) {
auto watchedDirectories = QDirSet();
// of course we need to watch the main integration directory
const auto defaultDestination = integratedAppImagesDestination();
watchedDirectories.insert(defaultDestination);
// however, there's likely additional ones to watch, like a system-wide Applications directory
{
bool monitorMountedFilesystems = config != nullptr && shallMonitorMountedFilesystems(config);
const auto additionalDirs = additionalAppImagesLocations(monitorMountedFilesystems);
for (const auto& d : additionalDirs) {
watchedDirectories.insert(QDir(d).absolutePath());
}
}
// also, we should include additional directories from the config file
{
const auto configProvidedDirectories = getAdditionalDirectoriesFromConfig(config);
std::copy(
configProvidedDirectories.begin(), configProvidedDirectories.end(),
std::inserter(watchedDirectories, watchedDirectories.end())
);
}
return watchedDirectories;
}
QString buildPathToIntegratedAppImage(const QString& pathToAppImage) {
// if type 2 AppImage, we can build a "content-aware" filename
// see #7 for details
auto digest = getAppImageDigestMd5(pathToAppImage);
const QFileInfo appImageInfo(pathToAppImage);
QString baseName = appImageInfo.completeBaseName();
// if digest is available, append a separator
if (!digest.isEmpty()) {
const auto digestSuffix = "_" + digest;
// check whether digest is already contained in filename
if (!pathToAppImage.contains(digestSuffix))
baseName += "_" + digest;
}
auto fileName = baseName;
// must not use completeSuffix() in combination with completeBasename(), otherwise the final filename is composed
// incorrectly
if (!appImageInfo.suffix().isEmpty()) {
fileName += "." + appImageInfo.suffix();
}
return integratedAppImagesDestination().path() + "/" + fileName;
}
std::map<std::string, std::string> findCollisions(const QString& currentNameEntry) {
std::map<std::string, std::string> collisions{};
// default locations of desktop files on systems
const auto directories = {
QString("/usr/share/applications/"),
QStandardPaths::writableLocation(QStandardPaths::GenericDataLocation) + "/applications/"
};
for (const auto& directory : directories) {
QDirIterator iterator(directory, QDirIterator::FollowSymlinks);
while (iterator.hasNext()) {
const auto filename = iterator.next();
if (!QFileInfo(filename).isFile() || !filename.endsWith(".desktop"))
continue;
std::shared_ptr<GKeyFile> desktopFile(g_key_file_new(), gKeyFileDeleter);
std::shared_ptr<GError*> error(nullptr, gErrorDeleter);
// if the key file parser can't load the file, it's most likely not a valid desktop file, so we just skip this file
if (!g_key_file_load_from_file(desktopFile.get(), filename.toStdString().c_str(), G_KEY_FILE_KEEP_TRANSLATIONS, error.get()))
continue;
auto* nameEntry = g_key_file_get_string(desktopFile.get(), G_KEY_FILE_DESKTOP_GROUP, G_KEY_FILE_DESKTOP_KEY_NAME, error.get());
// invalid desktop file, needs to be skipped
if (nameEntry == nullptr)
continue;
if (QString(nameEntry).trimmed().startsWith(currentNameEntry.trimmed())) {
collisions[filename.toStdString()] = nameEntry;
}
}
}
return collisions;
}
bool updateDesktopDatabaseAndIconCaches() {
const auto dataLocation = QStandardPaths::writableLocation(QStandardPaths::GenericDataLocation);
const std::map<std::string, std::string> commands = {
{"update-desktop-database", dataLocation.toStdString() + "/applications"},
{"gtk-update-icon-cache-3.0", dataLocation.toStdString() + "/icons/hicolor/ -t"},
{"gtk-update-icon-cache", dataLocation.toStdString() + "/icons/hicolor/ -t"},
{"xdg-desktop-menu", "forceupdate"},
{"update-mime-database", dataLocation.toStdString() + "/mime "},
{"update-icon-caches", dataLocation.toStdString() + "/icons/"},
};
for (const auto& command : commands) {
// only call if the command exists
if (system(("which " + command.first + " 2>&1 1>/dev/null").c_str()) == 0) {
// exit codes are not evaluated intentionally
system((command.first + " " + command.second).c_str());
}
}
return true;
}
std::shared_ptr<char> getOwnBinaryPath() {
auto path = std::shared_ptr<char>(realpath("/proc/self/exe", nullptr));
if (path == nullptr)
throw std::runtime_error("Could not detect path to own binary; something must be horribly broken");
return path;
}
#ifndef BUILD_LITE
QString privateLibDirPath(const QString& srcSubdirName) {
// PRIVATE_LIBDIR will be a relative path most likely
// therefore, we need to detect the install prefix based on our own binary path, and then calculate the path to
// the helper tools based on that
const QString ownBinaryDirPath = QFileInfo(getOwnBinaryPath().get()).dir().absolutePath();
const QString installPrefixPath = QFileInfo(ownBinaryDirPath).dir().absolutePath();
QString privateLibDirPath = installPrefixPath + "/" + PRIVATE_LIBDIR;
// the following lines make things work during development: here, the build dir path is inserted instead, which
// allows for testing with the latest changes
if (!QDir(privateLibDirPath).exists()) {
// this makes sure that when we're running from a local dev build, we end up in the right directory
// very important when running this code from the daemon, since it's not in the same directory as the helpers
privateLibDirPath = ownBinaryDirPath + "/../" + srcSubdirName;
}
// if there is no such directory like <prefix>/bin/../lib/... or the binary is not found there, there is a chance
// the binary is just next to this one (this is the case in the update/remove helpers)
// therefore we compare the binary directory path with PRIVATE_LIBDIR
if (!QDir(privateLibDirPath).exists()) {
if (privateLibDirPath.contains(PRIVATE_LIBDIR)) {
privateLibDirPath = ownBinaryDirPath;
}
}
return privateLibDirPath;
}
#endif
bool installDesktopFileAndIcons(const QString& pathToAppImage, bool resolveCollisions) {
if (appimage_register_in_system(pathToAppImage.toStdString().c_str(), false) != 0) {
displayError(QObject::tr("Failed to register AppImage in system via libappimage"));
return false;
}
const auto* desktopFilePath = appimage_registered_desktop_file_path(pathToAppImage.toStdString().c_str(), nullptr, false);
// sanity check -- if the file doesn't exist, the function returns NULL
if (desktopFilePath == nullptr) {
displayError(QObject::tr("Failed to find integrated desktop file"));
return false;
}
// check that file exists
if (!QFile(desktopFilePath).exists()) {
displayError(QObject::tr("Couldn't find integrated AppImage's desktop file"));
return false;
}
/* write AppImageLauncher specific entries to desktop file
*
* unfortunately, QSettings doesn't work as a desktop file reader/writer, and libqtxdg isn't really meant to be
* used by projects via add_subdirectory/ExternalProject
* a system dependency is not an option for this project, and we link to glib already anyway, so let's just use
* glib, which is known to work
*/
std::shared_ptr<GKeyFile> desktopFile(g_key_file_new(), gKeyFileDeleter);
std::shared_ptr<GError*> error(nullptr, gErrorDeleter);
const auto flags = GKeyFileFlags(G_KEY_FILE_KEEP_COMMENTS | G_KEY_FILE_KEEP_TRANSLATIONS);
auto handleError = [error, desktopFile]() {
std::ostringstream ss;
ss << QObject::tr("Failed to load desktop file:").toStdString() << std::endl << (*error)->message;
displayError(QString::fromStdString(ss.str()));
};
if (!g_key_file_load_from_file(desktopFile.get(), desktopFilePath, flags, error.get())) {
handleError();
return false;
}
const auto* nameEntry = g_key_file_get_string(desktopFile.get(), G_KEY_FILE_DESKTOP_GROUP, G_KEY_FILE_DESKTOP_KEY_NAME, error.get());
if (nameEntry == nullptr) {
displayWarning(QObject::tr("AppImage has invalid desktop file"));
}
if (resolveCollisions) {
// TODO: support multilingual collisions
auto collisions = findCollisions(nameEntry);
// make sure to remove own entry
collisions.erase(collisions.find(desktopFilePath));
if (!collisions.empty()) {
// collisions are resolved like in the filesystem: a monotonically increasing number in brackets is
// appended to the Name in order to keep the number monotonically increasing, we look for the highest
// number in brackets in the existing entries, add 1 to it, and append it in brackets to the current
// desktop file's Name entry
unsigned int currentNumber = 1;
QRegularExpression regex(R"(^.*\(([0-9]+)\)$)");
for (const auto& collision : collisions) {
const auto& currentNameEntry = collision.second;
auto match = regex.match(QString::fromStdString(currentNameEntry));
if (match.hasMatch()) {
// 0 = entire string
// 1 = first group
const QString numString = match.captured(1);
const int num = numString.toInt();
// monotonic counting, i.e., never try to "be smart" by e.g., filling in the gaps between
// previous numbers
if (num >= currentNumber) {
currentNumber = num + 1;
}
}
}
auto newName = QString(nameEntry) + " (" + QString::number(currentNumber) + ")";
g_key_file_set_string(desktopFile.get(), G_KEY_FILE_DESKTOP_GROUP, G_KEY_FILE_DESKTOP_KEY_NAME, newName.toStdString().c_str());
}
}
auto convertToCharPointerList = [](const std::vector<std::string>& stringList) {
std::vector<const char*> pointerList;
// reserve space to increase efficiency
pointerList.reserve(stringList.size());
// convert string list to list of const char pointers
for (const auto& action : stringList) {
pointerList.push_back(action.c_str());
}
return pointerList;
};
std::vector<std::string> desktopActions;
// we may not just overwrite the existing actions key, as then the actions cannot be used any more from the context menu
{
const auto* actionsEntry = g_key_file_get_string(desktopFile.get(), G_KEY_FILE_DESKTOP_GROUP, G_KEY_FILE_DESKTOP_KEY_ACTIONS, error.get());
for (const QString& action : QString(actionsEntry).split(";")) {
if (action.isEmpty()) {
continue;
}
desktopActions.emplace_back(action.toStdString());
}
}
// use a "vendor prefix" to avoid collisions with existing actions, as "Update" and "Remove" are generic terms
static const std::string removeActionKey{"AppImageLauncher-Remove-AppImage"};
static const std::string updateActionKey{"AppImageLauncher-Update-AppImage"};
desktopActions.emplace_back(removeActionKey);
// load translations from JSON file(s)
QMap<QString, QString> removeActionNameTranslations;
#ifdef ENABLE_UPDATE_HELPER
QMap<QString, QString> updateActionNameTranslations;
{
QDirIterator i18nDirIterator(TranslationManager::getTranslationDir());
while(i18nDirIterator.hasNext()) {
const auto& filePath = i18nDirIterator.next();
const auto& fileName = QFileInfo(filePath).fileName();
if (!QFileInfo(filePath).isFile() || !(fileName.startsWith("desktopfiles.") && fileName.endsWith(".json")))
continue;
// check whether filename's format is alright, otherwise parsing the locale might try to access a
// non-existing (or the wrong) member
auto splitFilename = fileName.split(".");
if (splitFilename.size() != 3)
continue;
// parse locale from filename
auto locale = splitFilename[1];
QFile jsonFile(filePath);
if (!jsonFile.open(QIODevice::ReadOnly)) {
displayWarning(QMessageBox::tr("Could not parse desktop file translations:\nCould not open file for reading:\n\n%1").arg(fileName));
}
// TODO: need to make sure that this doesn't try to read huge files at once
auto data = jsonFile.readAll();
QJsonParseError parseError{};
auto jsonDoc = QJsonDocument::fromJson(data, &parseError);
// show warning on syntax errors and continue
if (parseError.error != QJsonParseError::NoError || jsonDoc.isNull() || !jsonDoc.isObject()) {
displayWarning(QMessageBox::tr("Could not parse desktop file translations:\nInvalid syntax:\n\n%1").arg(parseError.errorString()));
}
auto jsonObj = jsonDoc.object();
for (const auto& key : jsonObj.keys()) {
auto value = jsonObj[key].toString();
auto splitKey = key.split("/");
if (key.startsWith("Desktop Action update")) {
qDebug() << "update: adding" << value << "for locale" << locale;
updateActionNameTranslations[locale] = value;
} else if (key.startsWith("Desktop Action remove")) {
qDebug() << "remove: adding" << value << "for locale" << locale;
removeActionNameTranslations[locale] = value;
}
}
}
}
#endif
#ifndef BUILD_LITE
auto privateLibDir = privateLibDirPath("ui");
const char helperIconName[] = "AppImageLauncher";
#else
const char helperIconName[] = "AppImageLauncher-Lite";
#endif
// add Remove action
{
const auto removeSectionName = "Desktop Action " + removeActionKey;
g_key_file_set_string(desktopFile.get(), removeSectionName.c_str(), "Name", "Delete this AppImage");
g_key_file_set_string(desktopFile.get(), removeSectionName.c_str(), "Icon", helperIconName);
std::ostringstream removeExecPath;
#ifndef BUILD_LITE
removeExecPath << privateLibDir.toStdString() << "/remove";
#else
removeExecPath << getenv("HOME") << "/.local/lib/appimagelauncher-lite/appimagelauncher-lite.AppImage remove";
#endif
removeExecPath << " \"" << pathToAppImage.toStdString() << "\"";
g_key_file_set_string(desktopFile.get(), removeSectionName.c_str(), "Exec", removeExecPath.str().c_str());
// install translations
auto it = QMapIterator<QString, QString>(removeActionNameTranslations);
while (it.hasNext()) {
auto entry = it.next();
g_key_file_set_locale_string(desktopFile.get(), removeSectionName.c_str(), "Name", entry.key().toStdString().c_str(), entry.value().toStdString().c_str());
}
}
#ifdef ENABLE_UPDATE_HELPER
// add Update action
{
appimage::update::Updater updater(pathToAppImage.toStdString());
// but only if there's update information
if (!updater.updateInformation().empty()) {
// section needs to be announced in desktop actions list
desktopActions.emplace_back(updateActionKey);
const auto updateSectionName = "Desktop Action " + updateActionKey;
g_key_file_set_string(desktopFile.get(), updateSectionName.c_str(), "Name", "Update this AppImage");
g_key_file_set_string(desktopFile.get(), updateSectionName.c_str(), "Icon", helperIconName);
std::ostringstream updateExecPath;
#ifndef BUILD_LITE
updateExecPath << privateLibDir.toStdString() << "/update";
#else
updateExecPath << getenv("HOME") << "/.local/lib/appimagelauncher-lite/appimagelauncher-lite.AppImage update";
#endif
updateExecPath << " \"" << pathToAppImage.toStdString() << "\"";
g_key_file_set_string(desktopFile.get(), updateSectionName.c_str(), "Exec", updateExecPath.str().c_str());
// install translations
auto it = QMapIterator<QString, QString>(updateActionNameTranslations);
while (it.hasNext()) {
auto entry = it.next();
g_key_file_set_locale_string(desktopFile.get(), updateSectionName.c_str(), "Name", entry.key().toStdString().c_str(), entry.value().toStdString().c_str());
}
}
}
#endif
// add desktop actions key
g_key_file_set_string_list(
desktopFile.get(),
G_KEY_FILE_DESKTOP_GROUP,
G_KEY_FILE_DESKTOP_KEY_ACTIONS,
convertToCharPointerList(desktopActions).data(),
desktopActions.size()
);
// add version key
const auto version = QApplication::applicationVersion().replace("version ", "").toStdString();
g_key_file_set_string(desktopFile.get(), G_KEY_FILE_DESKTOP_GROUP, "X-AppImageLauncher-Version", version.c_str());
// save desktop file to disk
if (!g_key_file_save_to_file(desktopFile.get(), desktopFilePath, error.get())) {
handleError();
return false;
}
// make desktop file executable ("trustworthy" to some DEs)
// TODO: handle this in libappimage
makeExecutable(desktopFilePath);
// notify KDE/Plasma about icon change
{
auto message = QDBusMessage::createSignal(QStringLiteral("/KIconLoader"), QStringLiteral("org.kde.KIconLoader"), QStringLiteral("iconChanged"));
message.setArguments({0});
QDBusConnection::sessionBus().send(message);
}
return true;
}
bool updateDesktopFileAndIcons(const QString& pathToAppImage) {
return installDesktopFileAndIcons(pathToAppImage, true);
}
IntegrationState integrateAppImage(const QString& pathToAppImage, const QString& pathToIntegratedAppImage) {
// need std::strings to get working pointers with .c_str()
const auto oldPath = pathToAppImage.toStdString();
const auto newPath = pathToIntegratedAppImage.toStdString();
// create target directory
QDir().mkdir(QFileInfo(QFile(pathToIntegratedAppImage)).dir().absolutePath());
// check whether AppImage is in integration directory already
if (QFileInfo(pathToAppImage).absoluteFilePath() != QFileInfo(pathToIntegratedAppImage).absoluteFilePath()) {
// need to check whether file exists
// if it does, the existing AppImage needs to be removed before rename can be called
if (QFile(pathToIntegratedAppImage).exists()) {
std::ostringstream message;
message << QObject::tr("AppImage with same filename has already been integrated.").toStdString() << std::endl
<< std::endl
<< QObject::tr("Do you wish to overwrite the existing AppImage?").toStdString() << std::endl
<< QObject::tr("Choosing No will run the AppImage once, and leave the system in its current state.").toStdString();
auto* messageBox = new QMessageBox(
QMessageBox::Warning,
QObject::tr("Warning"),
QString::fromStdString(message.str()),
QMessageBox::Yes | QMessageBox::No
);
messageBox->setDefaultButton(QMessageBox::No);
messageBox->show();
QApplication::exec();
if (messageBox->clickedButton() == messageBox->button(QMessageBox::No)) {
return INTEGRATION_ABORTED;
}
QFile(pathToIntegratedAppImage).remove();
}
if (!QFile(pathToAppImage).rename(pathToIntegratedAppImage)) {
auto* messageBox = new QMessageBox(
QMessageBox::Critical,
QObject::tr("Error"),
QObject::tr("Failed to move AppImage to target location.\n"
"Try to copy AppImage instead?"),
QMessageBox::Ok | QMessageBox::Cancel
);
messageBox->setDefaultButton(QMessageBox::Ok);
messageBox->show();
QApplication::exec();
if (messageBox->clickedButton() == messageBox->button(QMessageBox::Cancel))
return INTEGRATION_FAILED;
if (!QFile(pathToAppImage).copy(pathToIntegratedAppImage)) {
displayError("Failed to copy AppImage to target location");
return INTEGRATION_FAILED;
}
}
}
if (!installDesktopFileAndIcons(pathToIntegratedAppImage))
return INTEGRATION_FAILED;
return INTEGRATION_SUCCESSFUL;
}
QString getAppImageDigestMd5(const QString& path) {
// try to read embedded MD5 digest
unsigned long offset = 0, length = 0;
// first of all, digest calculation is supported only for type 2
if (appimage_get_type(path.toStdString().c_str(), false) != 2)
return "";
auto rv = appimage_get_elf_section_offset_and_length(path.toStdString().c_str(), ".digest_md5", &offset, &length);
QByteArray buffer(16, '\0');
if (rv && offset != 0 && length != 0) {
// open file and read digest from ELF header section
QFile file(path);
if (!file.open(QFile::ReadOnly))
return "";
if (!file.seek(static_cast<qint64>(offset)))
return "";
if (!file.read(buffer.data(), buffer.size()))
return "";
file.close();
}
bool needToCalculateDigest;
// there seem to be some AppImages out there who actually have the required section embedded, but it's empty
// therefore we make the assumption that a hash value of zeroes is probably incorrect and recalculate
// in the extremely rare case in which the AppImage's digest would *really* be that value, we'd waste a bit of
// computation time, but the chances are so low... who cares, right?
{
auto nonZeroCharacterFound = false;
for (const char i : buffer) {
if (i != '\0') {
nonZeroCharacterFound = true;
break;
}
}
needToCalculateDigest = !nonZeroCharacterFound;
}
if (needToCalculateDigest) {
// calculate digest
if (!appimage_type2_digest_md5(path.toStdString().c_str(), buffer.data()))
return "";
}
// create hexadecimal representation
auto hexDigest = appimage_hexlify(buffer, static_cast<size_t>(buffer.size()));
QString hexDigestStr(hexDigest);
free(hexDigest);
return hexDigestStr;
}
bool hasAlreadyBeenIntegrated(const QString& pathToAppImage) {
return appimage_is_registered_in_system(pathToAppImage.toStdString().c_str());
}
bool isInDirectory(const QString& pathToAppImage, const QDir& directory) {
return directory == QFileInfo(pathToAppImage).absoluteDir();
}
bool cleanUpOldDesktopIntegrationResources(bool verbose) {
auto dirPath = QStandardPaths::writableLocation(QStandardPaths::GenericDataLocation) + "/applications";
auto directory = QDir(dirPath);
QStringList filters;
filters << "appimagekit_*.desktop";
directory.setNameFilters(filters);
for (auto desktopFilePath : directory.entryList()) {
desktopFilePath = dirPath + "/" + desktopFilePath;
std::shared_ptr<GKeyFile> desktopFile(g_key_file_new(), [](GKeyFile* p) {
g_key_file_free(p);
});
if (!g_key_file_load_from_file(desktopFile.get(), desktopFilePath.toStdString().c_str(), G_KEY_FILE_NONE, nullptr)) {
continue;
}
std::shared_ptr<char> execValue(g_key_file_get_string(desktopFile.get(), G_KEY_FILE_DESKTOP_GROUP, G_KEY_FILE_DESKTOP_KEY_EXEC, nullptr), [](char* p) {
free(p);
});
// if there is no Exec value in the file, the desktop file is apparently broken, therefore we skip the file
if (execValue == nullptr) {
continue;
}
std::shared_ptr<char> tryExecValue(g_key_file_get_string(desktopFile.get(), G_KEY_FILE_DESKTOP_GROUP, G_KEY_FILE_DESKTOP_KEY_TRY_EXEC, nullptr), [](char* p) {
free(p);
});
// TryExec is optional, although recently the desktop integration functions started to force add such keys
// with a path to the desktop file
// (before, if it existed, the key was replaced with the AppImage's path)
// If it exists, we assume its value is the full path to the AppImage, which can be used to check the existence
// of the AppImage
QString appImagePath;
if (tryExecValue != nullptr) {
appImagePath = QString(tryExecValue.get());
} else {
appImagePath = QString(execValue.get()).split(" ").first();
}
// now, check whether AppImage exists
// FIXME: the split command for the Exec value might not work if there's a space in the filename
// we really need a parser that understands the desktop file escaping
if (!QFile(appImagePath).exists()) {
if (verbose)
std::cout << "AppImage no longer exists, cleaning up resources: " << appImagePath.toStdString() << std::endl;
if (verbose)
std::cout << "Removing desktop file: " << desktopFilePath.toStdString() << std::endl;
QFile(desktopFilePath).remove();
// TODO: clean up related resources such as icons or MIME definitions
auto* iconValue = g_key_file_get_string(desktopFile.get(), G_KEY_FILE_DESKTOP_GROUP, G_KEY_FILE_DESKTOP_KEY_ICON, nullptr);
if (iconValue != nullptr) {
const auto dataLocation = QStandardPaths::writableLocation(QStandardPaths::GenericDataLocation);
const auto iconsPath = QString::fromStdString(dataLocation.toStdString() + "/share/icons/");
for (QDirIterator it(iconsPath, QDirIterator::Subdirectories); it.hasNext();) {
auto path = it.next();
if (QFileInfo(path).completeBaseName().startsWith(iconValue)) {
QFile::remove(path);
}
}
}
}
}
return true;
}
time_t getMTime(const QString& path) {
struct stat st{};
if (stat(path.toStdString().c_str(), &st) != 0) {
displayError(QObject::tr("Failed to call stat() on path:\n\n%1").arg(path));
return -1;
}
return st.st_mtim.tv_sec;
}
bool desktopFileHasBeenUpdatedSinceLastUpdate(const QString& pathToAppImage) {
const auto ownBinaryPath = getOwnBinaryPath();
const auto desktopFilePath = appimage_registered_desktop_file_path(pathToAppImage.toStdString().c_str(), nullptr, false);
auto ownBinaryMTime = getMTime(ownBinaryPath.get());
auto desktopFileMTime = getMTime(desktopFilePath);
// check if something has failed horribly
if (desktopFileMTime < 0 || ownBinaryMTime < 0)
return false;
return desktopFileMTime > ownBinaryMTime;
}
bool isAppImage(const QString& path) {
const auto type = appimage_get_type(path.toUtf8(), false);
return type > 0 && type <= 2;
}
QString which(const std::string& name) {
std::vector<char> command(4096);
snprintf(command.data(), command.size()-1, "which %s", name.c_str());
auto* proc = popen(command.data(), "r");
if (proc == nullptr)
throw std::runtime_error("Failed to start process for which");
std::vector<char> outBuf(4096);
fread(outBuf.data(), sizeof(char), outBuf.size()-1, proc);
pclose(proc);
QString rv(outBuf.data());
rv.replace("\n", "");
return rv;
}
void checkAuthorizationAndShowDialogIfNecessary(const QString& path, const QString& question) {
const uint32_t ownUid = getuid();
const uint32_t fileOwnerUid = QFileInfo(path).ownerId();
const auto fileOwnerUsername = QFileInfo(path).owner();
if (ownUid != fileOwnerUid) {
qDebug() << "attempting relaunch with root helper";
QString messageBoxText = QMessageBox::tr("File %1 is owned by another user: %2").arg(path).arg(fileOwnerUsername);
messageBoxText += "\n\n";
messageBoxText += question;
auto* messageBox = new QMessageBox(
QMessageBox::Warning,
QMessageBox::tr("Permissions problem"),
messageBoxText,
QMessageBox::Ok | QMessageBox::Abort,
nullptr
);
messageBox->setDefaultButton(QMessageBox::Ok);
messageBox->show();
QApplication::exec();
const auto relaunch = messageBox->clickedButton() == messageBox->button(QMessageBox::Ok);
if (!relaunch) {
qDebug() << "Dialog aborted";
exit(1);
}
qDebug() << "ok, attempting relaunch with root helper";
// pkexec doesn't retain $DISPLAY etc., as per the man page, so we can't run UI programs with it
for (const auto& rootHelperFilename : {/*"pkexec",*/ "gksudo", "gksu"}) {
const auto rootHelperPath = which(rootHelperFilename);
qDebug() << "trying root helper " << rootHelperFilename << rootHelperPath;
if (rootHelperPath.isEmpty())
continue;
qDebug() << rootHelperFilename << rootHelperPath;
std::vector<char*> argv = {
strdup(rootHelperPath.toStdString().c_str()),
};
if (fileOwnerUid != 0) {
argv.emplace_back(strdup("--user"));
argv.emplace_back(strdup(std::to_string(fileOwnerUid).c_str()));
}
for (const auto& arg : QCoreApplication::arguments()) {
argv.emplace_back(strdup(arg.toStdString().c_str()));
}
argv.emplace_back(nullptr);
const auto rv = execv(strdup(rootHelperPath.toStdString().c_str()), argv.data());
// if the execution fails, we should signalize this to the user instead of silently failing over to the
// next tool
QMessageBox::critical(
nullptr,
QMessageBox::tr("Error"),
QMessageBox::tr("Failed to run permissions helper, exited with return code %1").arg(rv)
);
exit(1);
}
QMessageBox::critical(
nullptr,
QMessageBox::tr("Error"),
QMessageBox::tr("Could not find suitable permissions helper, aborting")
);
exit(1);
}
}
QString pathToPrivateDataDirectory() {
// first we need to find the translation directory
// if this is run from the build tree, we try a path that can only work within the build directory
// then, we try the expected install location relative to the main binary
const auto binaryDirPath = QApplication::applicationDirPath();
// our helper tools are not shipped in usr/bin but usr/lib/<arch>-linux-gnu/appimagelauncher
// therefore we need to check for the translations directory relative to this directory as well
// as <arch-linux-gnu> may not be used in the path, we also check for its parent directory
QString dataDir = binaryDirPath + "/../../share/appimagelauncher/";
if (!QDir(dataDir).exists()) {
dataDir = binaryDirPath + "/../../../share/appimagelauncher/";
}
// this directory should work for the main application in usr/bin
if (!QDir(dataDir).exists()) {
dataDir = binaryDirPath + "/../share/appimagelauncher/";
}
if (!QDir(dataDir).exists()) {
std::cerr << "[AppImageLauncher] Warning: "
<< "Path to private data directory could not be found" << std::endl;
return "";
}
return dataDir;
}
bool unregisterAppImage(const QString& pathToAppImage) {
auto rv = appimage_unregister_in_system(pathToAppImage.toStdString().c_str(), false);
if (rv != 0)
return false;
return true;
}
QIcon loadIconWithFallback(const QString& iconName) {
const QString subdirName("fallback-icons");
const auto binaryDir = QApplication::applicationDirPath();
// first we check the directory that would be expected with in the build environment
QDir fallbackIconDirectory = QDir(binaryDir + "/../../resources/" + subdirName);
// if that doesn't work, we check the private data directory, which should work when AppImageLauncher is installed
// through the packages or in Lite's AppImage
if (!fallbackIconDirectory.exists()) {
auto privateDataDir = pathToPrivateDataDirectory();
if (privateDataDir.length() > 0 && QDir(privateDataDir).exists()) {
fallbackIconDirectory = QDir(pathToPrivateDataDirectory() + "/" + subdirName);
}
}
// fallback icons aren't critical enough to exit the application if they can't be found
// after all, the theme icons may work just as well
if (!fallbackIconDirectory.exists()) {
std::cerr << "[AppImageLauncher] Warning:"
<< "fallback icons could not be loaded: directory could not be found" << std::endl;
return QIcon{};
}
qDebug() << "Loading fallback for icon" << iconName;
const auto iconFilename = iconName + ".svg";
const auto iconPath = fallbackIconDirectory.filePath(iconFilename);
if (!QFileInfo(iconPath).isFile()) {
std::cerr << "[AppImageLauncher] Warning: can't find fallback icon for name"
<< iconName.toStdString() << std::endl;
return QIcon{};
}
const auto fallbackIcon = QIcon(iconPath);
qDebug() << fallbackIcon;
return fallbackIcon;
}
void setUpFallbackIconPaths(QWidget* parent) {
/**
* Qt 5.12 adds a feature to add fallback paths for icons. This is a very simple way to automatically load custom
* icons when the icon theme doesn't provide a suitable alternative.
* However, we need to support a much older Qt version. Therefore we cannot use this very very handy feature.
* We basically iterate over all buttons which carry an icon and (re)load it, but this time provide a fallback
* loaded from our private data directory.
*/
// for now we only support buttons
// we could always add more widgets which provide an icon property
const auto buttons = parent->findChildren<QAbstractButton*>();
for (const auto& button : buttons) {
const auto iconName = button->icon().name();
// sort out buttons without an icon
if (iconName.length() <= 0)
continue;
// load icon from theme, providing the bundled icon as a fallback
// loading an "empty" (i.e., isNull() returns true) icon as fallback, as returned by loadIconWithFallback(...),
// works just fine
auto fallbackIcon = loadIconWithFallback(iconName);
auto newIcon = QIcon::fromTheme(iconName, fallbackIcon);
if (newIcon.isNull() || newIcon.pixmap(16, 16).isNull())
newIcon = fallbackIcon;
// now replace the button's actual icon with the fallback-enabled one
button->setIcon(newIcon);
}
}
|
//$Id$
//------------------------------------------------------------------------------
// BooleanWrapper
//------------------------------------------------------------------------------
// GMAT: General Mission Analysis Tool
//
// Copyright (c) 2002 - 2017 United States Government as represented by the
// Administrator of the National Aeronautics and Space Administration.
// All Other Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// You may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
// http://www.apache.org/licenses/LICENSE-2.0.
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language
// governing permissions and limitations under the License.
//
// Developed jointly by NASA/GSFC and Thinking Systems, Inc.
//
// Author: Linda Jun/GSFC
// Created: 2007/07/24
//
/**
* Declares BooleanWrapper class.
*/
//------------------------------------------------------------------------------
#ifndef BooleanWrapper_hpp
#define BooleanWrapper_hpp
#include "gmatdefs.hpp"
#include "ElementWrapper.hpp"
class GMAT_API BooleanWrapper : public ElementWrapper
{
public:
BooleanWrapper();
BooleanWrapper(const BooleanWrapper ©);
const BooleanWrapper& operator=(const BooleanWrapper &right);
virtual ~BooleanWrapper();
virtual ElementWrapper* Clone() const;
virtual Gmat::ParameterType GetDataType() const;
virtual Real EvaluateReal() const;
virtual bool SetReal(const Real val);
virtual bool EvaluateBoolean() const;
virtual bool SetBoolean(const bool val);
protected:
// the bool value
bool value;
virtual void SetupWrapper();
};
#endif // BooleanWrapper_hpp
|
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <vector>
#include "binTree.hpp"
struct visibilityType {
int id;
bool camera;
bool seen;
bool operator==(int x) {
return id == x;
}
bool operator!=(int x) {
return id != x;
}
};
int camera_count;
int place_cameras(binTree<visibilityType>::binTreeIterator it) {
binTree<visibilityType>::binTreeIterator nil(NULL);
if (it == nil) return 1;
int L = place_cameras(it.leftChild());
int R = place_cameras(it.rightChild());
// Both the nodes are monitored
if (L == 1 && R == 1)
return 2;
// If one of the left and the
// right subtree is not monitored
else if (L == 2 || R == 2) {
camera_count++;
return 3;
}
// If the root node is monitored
return 1;
}
std::vector<visibilityType> parseLine(std::string line) {
std::stringstream s(line);
std::vector<visibilityType> parsedLine;
visibilityType n;
while (s >> n.id) {
n.seen = false;
n.camera = false;
parsedLine.push_back(n);
}
return parsedLine;
}
int main(int argc, char *argv[]) {
if (argc != 2) {
std::cout << "Usage: ./Assignment05 infile" << std::endl;
return -1;
}
int t_cases = 0;
std::string temp = "";
std::ifstream in(argv[1]);
std::vector<std::vector<visibilityType> > trees(1, std::vector<visibilityType>(1));
if (!in.is_open()) {
std::cout << "Error opening infile" << std::endl;
return -2;
}
getline(in, temp);
t_cases = stoi(temp);
for (int i = 0; i < t_cases; i++) {
getline(in, temp);
trees.push_back(parseLine(temp));
}
trees.erase(trees.begin());
for (size_t i = 0; i < trees.size(); i++) {
binTree<visibilityType> t;
t.buildTree(trees[i]);
camera_count = 0;
int value = place_cameras(t.rootIterator());
std::cout << camera_count + (value == 2 ? 1 : 0) << std::endl;
}
}
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/cpu/cpu_options.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "tensorflow/core/lib/strings/numbers.h"
namespace {
const char* const kXlaOptimizeForSizeCpuOption = "xla_cpu_optimize_for_size";
const char* const kLlvmIrDotTilingFactor = "xla_llvm_dot_tiling_factor";
const char* const kXlaForceEnableExperimentalLlvmIrGemm =
"xla_force_enable_experimental_llvm_ir_gemm";
const char* const kLlvmIrGemmTileSize = "xla_llvm_ir_gemm_tile_size";
} // namespace
namespace xla {
namespace cpu {
namespace options {
bool OptimizeForSizeRequested(const HloModuleConfig& config) {
const auto& extra_options_map =
config.debug_options().xla_backend_extra_options();
return extra_options_map.count(kXlaOptimizeForSizeCpuOption) > 0;
}
bool VectorizedReduceDisabled(const HloModuleConfig& config) {
const auto& extra_options_map =
config.debug_options().xla_backend_extra_options();
return extra_options_map.count(kXlaOptimizeForSizeCpuOption) > 0;
}
absl::optional<int64> LlvmIrGemvTilingFactor(const HloModuleConfig& config) {
const auto& extra_options_map =
config.debug_options().xla_backend_extra_options();
auto it = extra_options_map.find(kLlvmIrDotTilingFactor);
int64 tiling_factor;
if (it != extra_options_map.end() &&
absl::SimpleAtoi(it->second, &tiling_factor)) {
return tiling_factor;
}
return absl::nullopt;
}
bool ForceEnableExperimentalLlvmIrGemm(const HloModuleConfig& config) {
const auto& extra_options_map =
config.debug_options().xla_backend_extra_options();
return extra_options_map.count(kXlaForceEnableExperimentalLlvmIrGemm) > 0;
}
static absl::string_view RemoveSuffix(absl::string_view str,
absl::string_view suffix) {
CHECK_GE(str.size(), suffix.size());
CHECK_EQ(str.substr(str.size() - suffix.size()), suffix);
return str.substr(0, str.size() - suffix.size());
}
absl::optional<std::tuple<int64, int64, int64>> LlvmIrGemmTileSize(
const HloModuleConfig& config) {
const auto& extra_options_map =
config.debug_options().xla_backend_extra_options();
auto it = extra_options_map.find(kLlvmIrGemmTileSize);
if (it == extra_options_map.end()) {
return absl::nullopt;
}
std::vector<string> tile_components = absl::StrSplit(it->second, ':');
CHECK_EQ(tile_components.size(), 3);
int64 tile_size_m;
int64 tile_size_k;
int64 tile_size_n_in_vector_width;
CHECK(absl::SimpleAtoi(tile_components[0], &tile_size_m));
CHECK(absl::SimpleAtoi(tile_components[1], &tile_size_k));
absl::string_view tile_size_n_in_vector_width_str =
RemoveSuffix(tile_components[2], "*vectwidth");
CHECK(absl::SimpleAtoi(tile_size_n_in_vector_width_str,
&tile_size_n_in_vector_width));
return std::tuple<int64, int64, int64>(tile_size_m, tile_size_k,
tile_size_n_in_vector_width);
}
} // namespace options
} // namespace cpu
} // namespace xla
|
#include <shift/parser/proto/proto.hpp>
#include <shift/core/boost_disable_warnings.hpp>
#include <boost/test/unit_test.hpp>
#include <shift/core/boost_restore_warnings.hpp>
#include <iostream>
#include <sstream>
#include "test.hpp"
using namespace shift;
using namespace shift::parser;
using namespace shift::parser::proto;
BOOST_AUTO_TEST_CASE(using_errors)
{
test_parser_and_ast(
R"(using)", "using_errors_1.pro2",
R"(using_errors_1.pro2(1): error P1100: parse error: Expected whitespace.
using
^
)");
test_parser_and_ast(
R"(using;)", "using_errors_2.pro2",
R"(using_errors_2.pro2(1): error P1100: parse error: Expected whitespace.
using;
^
)");
test_parser_and_ast(
R"(using )", "using_errors_3.pro2",
R"(using_errors_3.pro2(1): error P1116: parse error: Expected identifier.
using
^
)");
test_parser_and_ast(
R"(using ;)", "using_errors_4.pro2",
R"(using_errors_4.pro2(1): error P1116: parse error: Expected identifier.
using ;
^
)");
test_parser_and_ast(
R"(using foo)", "using_errors_5.pro2",
R"(using_errors_5.pro2(1): error P1105: parse error: Expected equality sign ('=').
using foo
^
)");
test_parser_and_ast(
R"(using foo;)", "using_errors_6.pro2",
R"(using_errors_6.pro2(1): error P1105: parse error: Expected equality sign ('=').
using foo;
^
)");
test_parser_and_ast(
R"(using foo =)", "using_errors_7.pro2",
R"(using_errors_7.pro2(1): error P1120: parse error: Expected type path.
using foo =
^
)");
test_parser_and_ast(
R"(using foo =;)", "using_errors_8.pro2",
R"(using_errors_8.pro2(1): error P1120: parse error: Expected type path.
using foo =;
^
)");
}
|
#ifndef SAMPLE_RENDERER_HPP
#define SAMPLE_RENDERER_HPP
#include <GL3/Renderer.hpp>
class SampleRenderer : public GL3::Renderer
{
public:
//! Default constructor
SampleRenderer();
//! Default desctrutor
~SampleRenderer();
protected:
bool OnInitialize(const cxxopts::ParseResult& configure) override;
void OnCleanUp() override;
void OnUpdateFrame(double dt) override;
void OnBeginDraw() override;
void OnEndDraw() override;
void OnProcessInput(unsigned int key) override;
};
#endif //! end of SampleRenderer.hpp
|
//===-- Log.cpp -----------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "lldb/Utility/Log.h"
#include "lldb/Utility/VASPrintf.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Support/Chrono.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Signals.h"
#include "llvm/Support/Threading.h"
#include "llvm/Support/raw_ostream.h"
#include <chrono>
#include <cstdarg>
#include <mutex>
#include <utility>
#include <cassert>
#if defined(_WIN32)
#include <process.h>
#else
#include <unistd.h>
#include <pthread.h>
#endif
using namespace lldb_private;
llvm::ManagedStatic<Log::ChannelMap> Log::g_channel_map;
void Log::ForEachCategory(
const Log::ChannelMap::value_type &entry,
llvm::function_ref<void(llvm::StringRef, llvm::StringRef)> lambda) {
lambda("all", "all available logging categories");
lambda("default", "default set of logging categories");
for (const auto &category : entry.second.m_channel.categories)
lambda(category.name, category.description);
}
void Log::ListCategories(llvm::raw_ostream &stream,
const ChannelMap::value_type &entry) {
stream << llvm::formatv("Logging categories for '{0}':\n", entry.first());
ForEachCategory(entry,
[&stream](llvm::StringRef name, llvm::StringRef description) {
stream << llvm::formatv(" {0} - {1}\n", name, description);
});
}
uint32_t Log::GetFlags(llvm::raw_ostream &stream, const ChannelMap::value_type &entry,
llvm::ArrayRef<const char *> categories) {
bool list_categories = false;
uint32_t flags = 0;
for (const char *category : categories) {
if (llvm::StringRef("all").equals_insensitive(category)) {
flags |= UINT32_MAX;
continue;
}
if (llvm::StringRef("default").equals_insensitive(category)) {
flags |= entry.second.m_channel.default_flags;
continue;
}
auto cat = llvm::find_if(entry.second.m_channel.categories,
[&](const Log::Category &c) {
return c.name.equals_insensitive(category);
});
if (cat != entry.second.m_channel.categories.end()) {
flags |= cat->flag;
continue;
}
stream << llvm::formatv("error: unrecognized log category '{0}'\n",
category);
list_categories = true;
}
if (list_categories)
ListCategories(stream, entry);
return flags;
}
void Log::Enable(const std::shared_ptr<llvm::raw_ostream> &stream_sp,
uint32_t options, uint32_t flags) {
llvm::sys::ScopedWriter lock(m_mutex);
uint32_t mask = m_mask.fetch_or(flags, std::memory_order_relaxed);
if (mask | flags) {
m_options.store(options, std::memory_order_relaxed);
m_stream_sp = stream_sp;
m_channel.log_ptr.store(this, std::memory_order_relaxed);
}
}
void Log::Disable(uint32_t flags) {
llvm::sys::ScopedWriter lock(m_mutex);
uint32_t mask = m_mask.fetch_and(~flags, std::memory_order_relaxed);
if (!(mask & ~flags)) {
m_stream_sp.reset();
m_channel.log_ptr.store(nullptr, std::memory_order_relaxed);
}
}
const Flags Log::GetOptions() const {
return m_options.load(std::memory_order_relaxed);
}
const Flags Log::GetMask() const {
return m_mask.load(std::memory_order_relaxed);
}
void Log::PutCString(const char *cstr) { Printf("%s", cstr); }
void Log::PutString(llvm::StringRef str) { PutCString(str.str().c_str()); }
// Simple variable argument logging with flags.
void Log::Printf(const char *format, ...) {
va_list args;
va_start(args, format);
VAPrintf(format, args);
va_end(args);
}
// All logging eventually boils down to this function call. If we have a
// callback registered, then we call the logging callback. If we have a valid
// file handle, we also log to the file.
void Log::VAPrintf(const char *format, va_list args) {
llvm::SmallString<64> FinalMessage;
llvm::raw_svector_ostream Stream(FinalMessage);
WriteHeader(Stream, "", "");
llvm::SmallString<64> Content;
lldb_private::VASprintf(Content, format, args);
Stream << Content << "\n";
WriteMessage(std::string(FinalMessage.str()));
}
// Printing of errors that are not fatal.
void Log::Error(const char *format, ...) {
va_list args;
va_start(args, format);
VAError(format, args);
va_end(args);
}
void Log::VAError(const char *format, va_list args) {
llvm::SmallString<64> Content;
VASprintf(Content, format, args);
Printf("error: %s", Content.c_str());
}
// Printing of warnings that are not fatal only if verbose mode is enabled.
void Log::Verbose(const char *format, ...) {
if (!GetVerbose())
return;
va_list args;
va_start(args, format);
VAPrintf(format, args);
va_end(args);
}
// Printing of warnings that are not fatal.
void Log::Warning(const char *format, ...) {
llvm::SmallString<64> Content;
va_list args;
va_start(args, format);
VASprintf(Content, format, args);
va_end(args);
Printf("warning: %s", Content.c_str());
}
void Log::Initialize() {
#ifdef LLVM_ON_UNIX
pthread_atfork(nullptr, nullptr, &Log::DisableLoggingChild);
#endif
InitializeLldbChannel();
}
void Log::Register(llvm::StringRef name, Channel &channel) {
auto iter = g_channel_map->try_emplace(name, channel);
assert(iter.second == true);
(void)iter;
}
void Log::Unregister(llvm::StringRef name) {
auto iter = g_channel_map->find(name);
assert(iter != g_channel_map->end());
iter->second.Disable(UINT32_MAX);
g_channel_map->erase(iter);
}
bool Log::EnableLogChannel(
const std::shared_ptr<llvm::raw_ostream> &log_stream_sp,
uint32_t log_options, llvm::StringRef channel,
llvm::ArrayRef<const char *> categories, llvm::raw_ostream &error_stream) {
auto iter = g_channel_map->find(channel);
if (iter == g_channel_map->end()) {
error_stream << llvm::formatv("Invalid log channel '{0}'.\n", channel);
return false;
}
uint32_t flags = categories.empty()
? iter->second.m_channel.default_flags
: GetFlags(error_stream, *iter, categories);
iter->second.Enable(log_stream_sp, log_options, flags);
return true;
}
bool Log::DisableLogChannel(llvm::StringRef channel,
llvm::ArrayRef<const char *> categories,
llvm::raw_ostream &error_stream) {
auto iter = g_channel_map->find(channel);
if (iter == g_channel_map->end()) {
error_stream << llvm::formatv("Invalid log channel '{0}'.\n", channel);
return false;
}
uint32_t flags = categories.empty()
? UINT32_MAX
: GetFlags(error_stream, *iter, categories);
iter->second.Disable(flags);
return true;
}
bool Log::ListChannelCategories(llvm::StringRef channel,
llvm::raw_ostream &stream) {
auto ch = g_channel_map->find(channel);
if (ch == g_channel_map->end()) {
stream << llvm::formatv("Invalid log channel '{0}'.\n", channel);
return false;
}
ListCategories(stream, *ch);
return true;
}
void Log::DisableAllLogChannels() {
for (auto &entry : *g_channel_map)
entry.second.Disable(UINT32_MAX);
}
void Log::ForEachChannelCategory(
llvm::StringRef channel,
llvm::function_ref<void(llvm::StringRef, llvm::StringRef)> lambda) {
auto ch = g_channel_map->find(channel);
if (ch == g_channel_map->end())
return;
ForEachCategory(*ch, lambda);
}
std::vector<llvm::StringRef> Log::ListChannels() {
std::vector<llvm::StringRef> result;
for (const auto &channel : *g_channel_map)
result.push_back(channel.first());
return result;
}
void Log::ListAllLogChannels(llvm::raw_ostream &stream) {
if (g_channel_map->empty()) {
stream << "No logging channels are currently registered.\n";
return;
}
for (const auto &channel : *g_channel_map)
ListCategories(stream, channel);
}
bool Log::GetVerbose() const {
return m_options.load(std::memory_order_relaxed) & LLDB_LOG_OPTION_VERBOSE;
}
void Log::WriteHeader(llvm::raw_ostream &OS, llvm::StringRef file,
llvm::StringRef function) {
Flags options = GetOptions();
static uint32_t g_sequence_id = 0;
// Add a sequence ID if requested
if (options.Test(LLDB_LOG_OPTION_PREPEND_SEQUENCE))
OS << ++g_sequence_id << " ";
// Timestamp if requested
if (options.Test(LLDB_LOG_OPTION_PREPEND_TIMESTAMP)) {
auto now = std::chrono::duration<double>(
std::chrono::system_clock::now().time_since_epoch());
OS << llvm::formatv("{0:f9} ", now.count());
}
// Add the process and thread if requested
if (options.Test(LLDB_LOG_OPTION_PREPEND_PROC_AND_THREAD))
OS << llvm::formatv("[{0,0+4}/{1,0+4}] ", getpid(),
llvm::get_threadid());
// Add the thread name if requested
if (options.Test(LLDB_LOG_OPTION_PREPEND_THREAD_NAME)) {
llvm::SmallString<32> thread_name;
llvm::get_thread_name(thread_name);
llvm::SmallString<12> format_str;
llvm::raw_svector_ostream format_os(format_str);
format_os << "{0,-" << llvm::alignTo<16>(thread_name.size()) << "} ";
OS << llvm::formatv(format_str.c_str(), thread_name);
}
if (options.Test(LLDB_LOG_OPTION_BACKTRACE))
llvm::sys::PrintStackTrace(OS);
if (options.Test(LLDB_LOG_OPTION_PREPEND_FILE_FUNCTION) &&
(!file.empty() || !function.empty())) {
file = llvm::sys::path::filename(file).take_front(40);
function = function.take_front(40);
OS << llvm::formatv("{0,-60:60} ", (file + ":" + function).str());
}
}
void Log::WriteMessage(const std::string &message) {
// Make a copy of our stream shared pointer in case someone disables our log
// while we are logging and releases the stream
auto stream_sp = GetStream();
if (!stream_sp)
return;
Flags options = GetOptions();
if (options.Test(LLDB_LOG_OPTION_THREADSAFE)) {
static std::recursive_mutex g_LogThreadedMutex;
std::lock_guard<std::recursive_mutex> guard(g_LogThreadedMutex);
*stream_sp << message;
stream_sp->flush();
} else {
*stream_sp << message;
stream_sp->flush();
}
}
void Log::Format(llvm::StringRef file, llvm::StringRef function,
const llvm::formatv_object_base &payload) {
std::string message_string;
llvm::raw_string_ostream message(message_string);
WriteHeader(message, file, function);
message << payload << "\n";
WriteMessage(message.str());
}
void Log::DisableLoggingChild() {
// Disable logging by clearing out the atomic variable after forking -- if we
// forked while another thread held the channel mutex, we would deadlock when
// trying to write to the log.
for (auto &c: *g_channel_map)
c.second.m_channel.log_ptr.store(nullptr, std::memory_order_relaxed);
}
|
/* ----------------------------------------------------------------------
CSlib - Client/server library for code coupling
http://cslib.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright 2018 National Technology & Engineering Solutions of
Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
NTESS, the U.S. Government retains certain rights in this software.
This software is distributed under the modified Berkeley Software
Distribution (BSD) License.
See the README file in the top-level CSlib directory.
------------------------------------------------------------------------- */
#ifdef MPI_YES
#include <mpi.h>
#else
#include <mpi_dummy.h>
#endif
#include <cstdio>
#include <cstring>
#include <cstdlib>
#include "msg.h"
using namespace CSLIB_NS;
/* ---------------------------------------------------------------------- */
Msg::Msg(int csflag, const void * /* ptr */, MPI_Comm cworld)
{
world = cworld;
MPI_Comm_rank(world,&me);
MPI_Comm_size(world,&nprocs);
init(csflag);
}
/* ---------------------------------------------------------------------- */
Msg::Msg(int csflag, const void * /* ptr */)
{
world = 0;
me = 0;
nprocs = 1;
init(csflag);
}
/* ---------------------------------------------------------------------- */
void Msg::init(int csflag)
{
client = server = 0;
if (csflag == 0) client = 1;
else if (csflag == 1) server = 1;
nsend = nrecv = 0;
}
/* ---------------------------------------------------------------------- */
void Msg::allocate(int nheader, int &maxheader, int *&header,
int nbuf, int &maxbuf, char *&buf)
{
if (nheader > maxheader) {
sfree(header);
maxheader = nheader;
header = (int *) smalloc(maxheader*sizeof(int));
}
if (nbuf > maxbuf) {
sfree(buf);
maxbuf = nbuf;
buf = (char *) smalloc(maxbuf*sizeof(char));
}
}
/* ---------------------------------------------------------------------- */
void *Msg::smalloc(int nbytes)
{
if (nbytes == 0) return NULL;
void *ptr = (void *) malloc(nbytes);
if (ptr == NULL) {
char str[128];
sprintf(str,"Failed to allocate %d bytes",nbytes);
}
return ptr;
}
/* ---------------------------------------------------------------------- */
void Msg::sfree(void *ptr)
{
if (ptr == NULL) return;
free(ptr);
}
/* ---------------------------------------------------------------------- */
void Msg::error_all(const char *str)
{
if (me == 0) printf("CSlib ERROR: %s\n",str);
MPI_Abort(world,1);
}
/* ---------------------------------------------------------------------- */
void Msg::error_one(const char *str)
{
printf("CSlib ERROR: %s\n",str);
MPI_Abort(world,1);
}
|
/*
gpe_sfml_converter.cpp
This file is part of:
GAME PENCIL ENGINE
https://www.pawbyte.com/gamepencilengine
Copyright (c) 2014-2021 Nathan Hurde, Chase Lee.
Copyright (c) 2014-2021 PawByte LLC.
Copyright (c) 2014-2021 Game Pencil Engine contributors ( Contributors Page )
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-Game Pencil Engine <https://www.pawbyte.com/gamepencilengine>
*/
#include "gpe_sfml_converter.h"
namespace gpe
{
/*sfml_Color * to_sfml_color( color * color_from_gpe )
{
if( color_from_gpe == nullptr )
{
return nullptr;
}
sfml_Color color_new = { color_from_gpe->get_r(), color_from_gpe->get_g(), color_from_gpe->get_b(), color_from_gpe->get_a() };
return &color_new;
}*/
}
|
#ifndef DATE_TIME_DATE_NAMES_PUT_HPP___
#define DATE_TIME_DATE_NAMES_PUT_HPP___
/* Copyright (c) 2002-2005 CrystalClear Software, Inc.
* Use, modification and distribution is subject to the
* Boost Software License, Version 1.0. (See accompanying
* file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
* Author: Jeff Garland, Bart Garst
* $Date$
*/
#include <boost/date_time/locale_config.hpp> // set BOOST_DATE_TIME_NO_LOCALE
#ifndef BOOST_DATE_TIME_NO_LOCALE
#include <boost/date_time/compiler_config.hpp>
#include <boost/date_time/special_defs.hpp>
#include <boost/date_time/date_defs.hpp>
#include <boost/date_time/parse_format_base.hpp>
#include <boost/lexical_cast.hpp>
#include <locale>
namespace lslboost {
namespace date_time {
//! Output facet base class for gregorian dates.
/*! This class is a base class for date facets used to localize the
* names of months and the names of days in the week.
*
* Requirements of Config
* - define an enumeration month_enum that enumerates the months.
* The enumeration should be '1' based eg: Jan==1
* - define as_short_string and as_long_string
*
* (see langer & kreft p334).
*
*/
template<class Config,
class charT = char,
class OutputIterator = std::ostreambuf_iterator<charT> >
class BOOST_SYMBOL_VISIBLE date_names_put : public std::locale::facet
{
public:
date_names_put() {}
typedef OutputIterator iter_type;
typedef typename Config::month_type month_type;
typedef typename Config::month_enum month_enum;
typedef typename Config::weekday_enum weekday_enum;
typedef typename Config::special_value_enum special_value_enum;
//typedef typename Config::format_type format_type;
typedef std::basic_string<charT> string_type;
typedef charT char_type;
static const char_type default_special_value_names[3][17];
static const char_type separator[2];
static std::locale::id id;
#if defined (__SUNPRO_CC) && defined (_RWSTD_VER)
std::locale::id& __get_id (void) const { return id; }
#endif
void put_special_value(iter_type& oitr, special_value_enum sv) const
{
do_put_special_value(oitr, sv);
}
void put_month_short(iter_type& oitr, month_enum moy) const
{
do_put_month_short(oitr, moy);
}
void put_month_long(iter_type& oitr, month_enum moy) const
{
do_put_month_long(oitr, moy);
}
void put_weekday_short(iter_type& oitr, weekday_enum wd) const
{
do_put_weekday_short(oitr, wd);
}
void put_weekday_long(iter_type& oitr, weekday_enum wd) const
{
do_put_weekday_long(oitr, wd);
}
bool has_date_sep_chars() const
{
return do_has_date_sep_chars();
}
void year_sep_char(iter_type& oitr) const
{
do_year_sep_char(oitr);
}
//! char between year-month
void month_sep_char(iter_type& oitr) const
{
do_month_sep_char(oitr);
}
//! Char to separate month-day
void day_sep_char(iter_type& oitr) const
{
do_day_sep_char(oitr);
}
//! Determines the order to put the date elements
ymd_order_spec date_order() const
{
return do_date_order();
}
//! Determines if month is displayed as integer, short or long string
month_format_spec month_format() const
{
return do_month_format();
}
protected:
//! Default facet implementation uses month_type defaults
virtual void do_put_month_short(iter_type& oitr, month_enum moy) const
{
month_type gm(moy);
charT c = '\0';
put_string(oitr, gm.as_short_string(c));
}
//! Default facet implementation uses month_type defaults
virtual void do_put_month_long(iter_type& oitr,
month_enum moy) const
{
month_type gm(moy);
charT c = '\0';
put_string(oitr, gm.as_long_string(c));
}
//! Default facet implementation for special value types
virtual void do_put_special_value(iter_type& oitr, special_value_enum sv) const
{
if(sv <= 2) { // only output not_a_date_time, neg_infin, or pos_infin
string_type s(default_special_value_names[sv]);
put_string(oitr, s);
}
}
virtual void do_put_weekday_short(iter_type&, weekday_enum) const
{
}
virtual void do_put_weekday_long(iter_type&, weekday_enum) const
{
}
virtual bool do_has_date_sep_chars() const
{
return true;
}
virtual void do_year_sep_char(iter_type& oitr) const
{
string_type s(separator);
put_string(oitr, s);
}
//! char between year-month
virtual void do_month_sep_char(iter_type& oitr) const
{
string_type s(separator);
put_string(oitr, s);
}
//! Char to separate month-day
virtual void do_day_sep_char(iter_type& oitr) const
{
string_type s(separator); //put in '-'
put_string(oitr, s);
}
//! Default for date order
virtual ymd_order_spec do_date_order() const
{
return ymd_order_iso;
}
//! Default month format
virtual month_format_spec do_month_format() const
{
return month_as_short_string;
}
void put_string(iter_type& oi, const charT* const s) const
{
string_type s1(lslboost::lexical_cast<string_type>(s));
typename string_type::iterator si,end;
for (si=s1.begin(), end=s1.end(); si!=end; si++, oi++) {
*oi = *si;
}
}
void put_string(iter_type& oi, const string_type& s1) const
{
typename string_type::const_iterator si,end;
for (si=s1.begin(), end=s1.end(); si!=end; si++, oi++) {
*oi = *si;
}
}
};
template<class Config, class charT, class OutputIterator>
const typename date_names_put<Config, charT, OutputIterator>::char_type
date_names_put<Config, charT, OutputIterator>::default_special_value_names[3][17] = {
{'n','o','t','-','a','-','d','a','t','e','-','t','i','m','e'},
{'-','i','n','f','i','n','i','t','y'},
{'+','i','n','f','i','n','i','t','y'} };
template<class Config, class charT, class OutputIterator>
const typename date_names_put<Config, charT, OutputIterator>::char_type
date_names_put<Config, charT, OutputIterator>::separator[2] =
{'-', '\0'} ;
//! Generate storage location for a std::locale::id
template<class Config, class charT, class OutputIterator>
std::locale::id date_names_put<Config, charT, OutputIterator>::id;
//! A date name output facet that takes an array of char* to define strings
template<class Config,
class charT = char,
class OutputIterator = std::ostreambuf_iterator<charT> >
class BOOST_SYMBOL_VISIBLE all_date_names_put : public date_names_put<Config, charT, OutputIterator>
{
public:
all_date_names_put(const charT* const month_short_names[],
const charT* const month_long_names[],
const charT* const special_value_names[],
const charT* const weekday_short_names[],
const charT* const weekday_long_names[],
charT separator_char = '-',
ymd_order_spec order_spec = ymd_order_iso,
month_format_spec month_format = month_as_short_string) :
month_short_names_(month_short_names),
month_long_names_(month_long_names),
special_value_names_(special_value_names),
weekday_short_names_(weekday_short_names),
weekday_long_names_(weekday_long_names),
order_spec_(order_spec),
month_format_spec_(month_format)
{
separator_char_[0] = separator_char;
separator_char_[1] = '\0';
}
typedef OutputIterator iter_type;
typedef typename Config::month_enum month_enum;
typedef typename Config::weekday_enum weekday_enum;
typedef typename Config::special_value_enum special_value_enum;
const charT* const* get_short_month_names() const
{
return month_short_names_;
}
const charT* const* get_long_month_names() const
{
return month_long_names_;
}
const charT* const* get_special_value_names() const
{
return special_value_names_;
}
const charT* const* get_short_weekday_names()const
{
return weekday_short_names_;
}
const charT* const* get_long_weekday_names()const
{
return weekday_long_names_;
}
protected:
//! Generic facet that takes array of chars
virtual void do_put_month_short(iter_type& oitr, month_enum moy) const
{
this->put_string(oitr, month_short_names_[moy-1]);
}
//! Long month names
virtual void do_put_month_long(iter_type& oitr, month_enum moy) const
{
this->put_string(oitr, month_long_names_[moy-1]);
}
//! Special values names
virtual void do_put_special_value(iter_type& oitr, special_value_enum sv) const
{
this->put_string(oitr, special_value_names_[sv]);
}
virtual void do_put_weekday_short(iter_type& oitr, weekday_enum wd) const
{
this->put_string(oitr, weekday_short_names_[wd]);
}
virtual void do_put_weekday_long(iter_type& oitr, weekday_enum wd) const
{
this->put_string(oitr, weekday_long_names_[wd]);
}
//! char between year-month
virtual void do_month_sep_char(iter_type& oitr) const
{
this->put_string(oitr, separator_char_);
}
//! Char to separate month-day
virtual void do_day_sep_char(iter_type& oitr) const
{
this->put_string(oitr, separator_char_);
}
//! Set the date ordering
virtual ymd_order_spec do_date_order() const
{
return order_spec_;
}
//! Set the date ordering
virtual month_format_spec do_month_format() const
{
return month_format_spec_;
}
private:
const charT* const* month_short_names_;
const charT* const* month_long_names_;
const charT* const* special_value_names_;
const charT* const* weekday_short_names_;
const charT* const* weekday_long_names_;
charT separator_char_[2];
ymd_order_spec order_spec_;
month_format_spec month_format_spec_;
};
} } //namespace lslboost::date_time
#endif //BOOST_NO_STD_LOCALE
#endif
|
//===-- ThreadPlanStepInRange.cpp -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "lldb/Target/ThreadPlanStepInRange.h"
#include "lldb/Core/Architecture.h"
#include "lldb/Core/Module.h"
#include "lldb/Symbol/Function.h"
#include "lldb/Symbol/Symbol.h"
#include "lldb/Target/Process.h"
#include "lldb/Target/RegisterContext.h"
#include "lldb/Target/SectionLoadList.h"
#include "lldb/Target/Target.h"
#include "lldb/Target/Thread.h"
#include "lldb/Target/ThreadPlanStepOut.h"
#include "lldb/Target/ThreadPlanStepThrough.h"
#include "lldb/Utility/Log.h"
#include "lldb/Utility/RegularExpression.h"
#include "lldb/Utility/Stream.h"
using namespace lldb;
using namespace lldb_private;
uint32_t ThreadPlanStepInRange::s_default_flag_values =
ThreadPlanShouldStopHere::eStepInAvoidNoDebug;
// ThreadPlanStepInRange: Step through a stack range, either stepping over or
// into based on the value of \a type.
ThreadPlanStepInRange::ThreadPlanStepInRange(
Thread &thread, const AddressRange &range,
const SymbolContext &addr_context, lldb::RunMode stop_others,
LazyBool step_in_avoids_code_without_debug_info,
LazyBool step_out_avoids_code_without_debug_info)
: ThreadPlanStepRange(ThreadPlan::eKindStepInRange,
"Step Range stepping in", thread, range, addr_context,
stop_others),
ThreadPlanShouldStopHere(this), m_step_past_prologue(true),
m_virtual_step(false) {
SetCallbacks();
SetFlagsToDefault();
SetupAvoidNoDebug(step_in_avoids_code_without_debug_info,
step_out_avoids_code_without_debug_info);
}
ThreadPlanStepInRange::ThreadPlanStepInRange(
Thread &thread, const AddressRange &range,
const SymbolContext &addr_context, const char *step_into_target,
lldb::RunMode stop_others, LazyBool step_in_avoids_code_without_debug_info,
LazyBool step_out_avoids_code_without_debug_info)
: ThreadPlanStepRange(ThreadPlan::eKindStepInRange,
"Step Range stepping in", thread, range, addr_context,
stop_others),
ThreadPlanShouldStopHere(this), m_step_past_prologue(true),
m_virtual_step(false), m_step_into_target(step_into_target) {
SetCallbacks();
SetFlagsToDefault();
SetupAvoidNoDebug(step_in_avoids_code_without_debug_info,
step_out_avoids_code_without_debug_info);
}
ThreadPlanStepInRange::~ThreadPlanStepInRange() = default;
void ThreadPlanStepInRange::SetupAvoidNoDebug(
LazyBool step_in_avoids_code_without_debug_info,
LazyBool step_out_avoids_code_without_debug_info) {
bool avoid_nodebug = true;
switch (step_in_avoids_code_without_debug_info) {
case eLazyBoolYes:
avoid_nodebug = true;
break;
case eLazyBoolNo:
avoid_nodebug = false;
break;
case eLazyBoolCalculate:
avoid_nodebug = m_thread.GetStepInAvoidsNoDebug();
break;
}
if (avoid_nodebug)
GetFlags().Set(ThreadPlanShouldStopHere::eStepInAvoidNoDebug);
else
GetFlags().Clear(ThreadPlanShouldStopHere::eStepInAvoidNoDebug);
switch (step_out_avoids_code_without_debug_info) {
case eLazyBoolYes:
avoid_nodebug = true;
break;
case eLazyBoolNo:
avoid_nodebug = false;
break;
case eLazyBoolCalculate:
avoid_nodebug = m_thread.GetStepOutAvoidsNoDebug();
break;
}
if (avoid_nodebug)
GetFlags().Set(ThreadPlanShouldStopHere::eStepOutAvoidNoDebug);
else
GetFlags().Clear(ThreadPlanShouldStopHere::eStepOutAvoidNoDebug);
}
void ThreadPlanStepInRange::GetDescription(Stream *s,
lldb::DescriptionLevel level) {
auto PrintFailureIfAny = [&]() {
if (m_status.Success())
return;
s->Printf(" failed (%s)", m_status.AsCString());
};
if (level == lldb::eDescriptionLevelBrief) {
s->Printf("step in");
PrintFailureIfAny();
return;
}
s->Printf("Stepping in");
bool printed_line_info = false;
if (m_addr_context.line_entry.IsValid()) {
s->Printf(" through line ");
m_addr_context.line_entry.DumpStopContext(s, false);
printed_line_info = true;
}
const char *step_into_target = m_step_into_target.AsCString();
if (step_into_target && step_into_target[0] != '\0')
s->Printf(" targeting %s", m_step_into_target.AsCString());
if (!printed_line_info || level == eDescriptionLevelVerbose) {
s->Printf(" using ranges:");
DumpRanges(s);
}
PrintFailureIfAny();
s->PutChar('.');
}
bool ThreadPlanStepInRange::ShouldStop(Event *event_ptr) {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
if (log) {
StreamString s;
s.Address(
m_thread.GetRegisterContext()->GetPC(),
m_thread.CalculateTarget()->GetArchitecture().GetAddressByteSize());
LLDB_LOGF(log, "ThreadPlanStepInRange reached %s.", s.GetData());
}
if (IsPlanComplete())
return true;
m_no_more_plans = false;
if (m_sub_plan_sp && m_sub_plan_sp->IsPlanComplete()) {
if (!m_sub_plan_sp->PlanSucceeded()) {
SetPlanComplete();
m_no_more_plans = true;
return true;
} else
m_sub_plan_sp.reset();
}
if (m_virtual_step) {
// If we've just completed a virtual step, all we need to do is check for a
// ShouldStopHere plan, and otherwise we're done.
// FIXME - This can be both a step in and a step out. Probably should
// record which in the m_virtual_step.
m_sub_plan_sp =
CheckShouldStopHereAndQueueStepOut(eFrameCompareYounger, m_status);
} else {
// Stepping through should be done running other threads in general, since
// we're setting a breakpoint and continuing. So only stop others if we
// are explicitly told to do so.
bool stop_others = (m_stop_others == lldb::eOnlyThisThread);
FrameComparison frame_order = CompareCurrentFrameToStartFrame();
if (frame_order == eFrameCompareOlder ||
frame_order == eFrameCompareSameParent) {
// If we're in an older frame then we should stop.
//
// A caveat to this is if we think the frame is older but we're actually
// in a trampoline.
// I'm going to make the assumption that you wouldn't RETURN to a
// trampoline. So if we are in a trampoline we think the frame is older
// because the trampoline confused the backtracer.
m_sub_plan_sp = m_thread.QueueThreadPlanForStepThrough(
m_stack_id, false, stop_others, m_status);
if (!m_sub_plan_sp) {
// Otherwise check the ShouldStopHere for step out:
m_sub_plan_sp =
CheckShouldStopHereAndQueueStepOut(frame_order, m_status);
if (log) {
if (m_sub_plan_sp)
LLDB_LOGF(log,
"ShouldStopHere found plan to step out of this frame.");
else
LLDB_LOGF(log, "ShouldStopHere no plan to step out of this frame.");
}
} else if (log) {
LLDB_LOGF(
log, "Thought I stepped out, but in fact arrived at a trampoline.");
}
} else if (frame_order == eFrameCompareEqual && InSymbol()) {
// If we are not in a place we should step through, we're done. One
// tricky bit here is that some stubs don't push a frame, so we have to
// check both the case of a frame that is younger, or the same as this
// frame. However, if the frame is the same, and we are still in the
// symbol we started in, the we don't need to do this. This first check
// isn't strictly necessary, but it is more efficient.
// If we're still in the range, keep going, either by running to the next
// branch breakpoint, or by stepping.
if (InRange()) {
SetNextBranchBreakpoint();
return false;
}
SetPlanComplete();
m_no_more_plans = true;
return true;
}
// If we get to this point, we're not going to use a previously set "next
// branch" breakpoint, so delete it:
ClearNextBranchBreakpoint();
// We may have set the plan up above in the FrameIsOlder section:
if (!m_sub_plan_sp)
m_sub_plan_sp = m_thread.QueueThreadPlanForStepThrough(
m_stack_id, false, stop_others, m_status);
if (log) {
if (m_sub_plan_sp)
LLDB_LOGF(log, "Found a step through plan: %s",
m_sub_plan_sp->GetName());
else
LLDB_LOGF(log, "No step through plan found.");
}
// If not, give the "should_stop" callback a chance to push a plan to get
// us out of here. But only do that if we actually have stepped in.
if (!m_sub_plan_sp && frame_order == eFrameCompareYounger)
m_sub_plan_sp = CheckShouldStopHereAndQueueStepOut(frame_order, m_status);
// If we've stepped in and we are going to stop here, check to see if we
// were asked to run past the prologue, and if so do that.
if (!m_sub_plan_sp && frame_order == eFrameCompareYounger &&
m_step_past_prologue) {
lldb::StackFrameSP curr_frame = m_thread.GetStackFrameAtIndex(0);
if (curr_frame) {
size_t bytes_to_skip = 0;
lldb::addr_t curr_addr = m_thread.GetRegisterContext()->GetPC();
Address func_start_address;
SymbolContext sc = curr_frame->GetSymbolContext(eSymbolContextFunction |
eSymbolContextSymbol);
if (sc.function) {
func_start_address = sc.function->GetAddressRange().GetBaseAddress();
if (curr_addr ==
func_start_address.GetLoadAddress(
m_thread.CalculateTarget().get()))
bytes_to_skip = sc.function->GetPrologueByteSize();
} else if (sc.symbol) {
func_start_address = sc.symbol->GetAddress();
if (curr_addr ==
func_start_address.GetLoadAddress(
m_thread.CalculateTarget().get()))
bytes_to_skip = sc.symbol->GetPrologueByteSize();
}
if (bytes_to_skip == 0 && sc.symbol) {
TargetSP target = m_thread.CalculateTarget();
const Architecture *arch = target->GetArchitecturePlugin();
if (arch) {
Address curr_sec_addr;
target->GetSectionLoadList().ResolveLoadAddress(curr_addr,
curr_sec_addr);
bytes_to_skip = arch->GetBytesToSkip(*sc.symbol, curr_sec_addr);
}
}
if (bytes_to_skip != 0) {
func_start_address.Slide(bytes_to_skip);
log = lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP);
LLDB_LOGF(log, "Pushing past prologue ");
m_sub_plan_sp = m_thread.QueueThreadPlanForRunToAddress(
false, func_start_address, true, m_status);
}
}
}
}
if (!m_sub_plan_sp) {
m_no_more_plans = true;
SetPlanComplete();
return true;
} else {
m_no_more_plans = false;
m_sub_plan_sp->SetPrivate(true);
return false;
}
}
void ThreadPlanStepInRange::SetAvoidRegexp(const char *name) {
auto name_ref = llvm::StringRef::withNullAsEmpty(name);
if (!m_avoid_regexp_up)
m_avoid_regexp_up.reset(new RegularExpression(name_ref));
m_avoid_regexp_up->Compile(name_ref);
}
void ThreadPlanStepInRange::SetDefaultFlagValue(uint32_t new_value) {
// TODO: Should we test this for sanity?
ThreadPlanStepInRange::s_default_flag_values = new_value;
}
bool ThreadPlanStepInRange::FrameMatchesAvoidCriteria() {
StackFrame *frame = GetThread().GetStackFrameAtIndex(0).get();
// Check the library list first, as that's cheapest:
bool libraries_say_avoid = false;
FileSpecList libraries_to_avoid(GetThread().GetLibrariesToAvoid());
size_t num_libraries = libraries_to_avoid.GetSize();
if (num_libraries > 0) {
SymbolContext sc(frame->GetSymbolContext(eSymbolContextModule));
FileSpec frame_library(sc.module_sp->GetFileSpec());
if (frame_library) {
for (size_t i = 0; i < num_libraries; i++) {
const FileSpec &file_spec(libraries_to_avoid.GetFileSpecAtIndex(i));
if (FileSpec::Equal(file_spec, frame_library, false)) {
libraries_say_avoid = true;
break;
}
}
}
}
if (libraries_say_avoid)
return true;
const RegularExpression *avoid_regexp_to_use = m_avoid_regexp_up.get();
if (avoid_regexp_to_use == nullptr)
avoid_regexp_to_use = GetThread().GetSymbolsToAvoidRegexp();
if (avoid_regexp_to_use != nullptr) {
SymbolContext sc = frame->GetSymbolContext(
eSymbolContextFunction | eSymbolContextBlock | eSymbolContextSymbol);
if (sc.symbol != nullptr) {
const char *frame_function_name =
sc.GetFunctionName(Mangled::ePreferDemangledWithoutArguments)
.GetCString();
if (frame_function_name) {
size_t num_matches = 0;
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
if (log)
num_matches = 1;
RegularExpression::Match regex_match(num_matches);
bool return_value =
avoid_regexp_to_use->Execute(frame_function_name, ®ex_match);
if (return_value) {
if (log) {
std::string match;
regex_match.GetMatchAtIndex(frame_function_name, 0, match);
LLDB_LOGF(log,
"Stepping out of function \"%s\" because it matches "
"the avoid regexp \"%s\" - match substring: \"%s\".",
frame_function_name,
avoid_regexp_to_use->GetText().str().c_str(),
match.c_str());
}
}
return return_value;
}
}
}
return false;
}
bool ThreadPlanStepInRange::DefaultShouldStopHereCallback(
ThreadPlan *current_plan, Flags &flags, FrameComparison operation,
Status &status, void *baton) {
bool should_stop_here = true;
StackFrame *frame = current_plan->GetThread().GetStackFrameAtIndex(0).get();
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
// First see if the ThreadPlanShouldStopHere default implementation thinks we
// should get out of here:
should_stop_here = ThreadPlanShouldStopHere::DefaultShouldStopHereCallback(
current_plan, flags, operation, status, baton);
if (!should_stop_here)
return should_stop_here;
if (should_stop_here && current_plan->GetKind() == eKindStepInRange &&
operation == eFrameCompareYounger) {
ThreadPlanStepInRange *step_in_range_plan =
static_cast<ThreadPlanStepInRange *>(current_plan);
if (step_in_range_plan->m_step_into_target) {
SymbolContext sc = frame->GetSymbolContext(
eSymbolContextFunction | eSymbolContextBlock | eSymbolContextSymbol);
if (sc.symbol != nullptr) {
// First try an exact match, since that's cheap with ConstStrings.
// Then do a strstr compare.
if (step_in_range_plan->m_step_into_target == sc.GetFunctionName()) {
should_stop_here = true;
} else {
const char *target_name =
step_in_range_plan->m_step_into_target.AsCString();
const char *function_name = sc.GetFunctionName().AsCString();
if (function_name == nullptr)
should_stop_here = false;
else if (strstr(function_name, target_name) == nullptr)
should_stop_here = false;
}
if (log && !should_stop_here)
LLDB_LOGF(log,
"Stepping out of frame %s which did not match step into "
"target %s.",
sc.GetFunctionName().AsCString(),
step_in_range_plan->m_step_into_target.AsCString());
}
}
if (should_stop_here) {
ThreadPlanStepInRange *step_in_range_plan =
static_cast<ThreadPlanStepInRange *>(current_plan);
// Don't log the should_step_out here, it's easier to do it in
// FrameMatchesAvoidCriteria.
should_stop_here = !step_in_range_plan->FrameMatchesAvoidCriteria();
}
}
return should_stop_here;
}
bool ThreadPlanStepInRange::DoPlanExplainsStop(Event *event_ptr) {
// We always explain a stop. Either we've just done a single step, in which
// case we'll do our ordinary processing, or we stopped for some reason that
// isn't handled by our sub-plans, in which case we want to just stop right
// away. In general, we don't want to mark the plan as complete for
// unexplained stops. For instance, if you step in to some code with no debug
// info, so you step out and in the course of that hit a breakpoint, then you
// want to stop & show the user the breakpoint, but not unship the step in
// plan, since you still may want to complete that plan when you continue.
// This is particularly true when doing "step in to target function."
// stepping.
//
// The only variation is that if we are doing "step by running to next
// branch" in which case if we hit our branch breakpoint we don't set the
// plan to complete.
bool return_value = false;
if (m_virtual_step) {
return_value = true;
} else {
StopInfoSP stop_info_sp = GetPrivateStopInfo();
if (stop_info_sp) {
StopReason reason = stop_info_sp->GetStopReason();
if (reason == eStopReasonBreakpoint) {
if (NextRangeBreakpointExplainsStop(stop_info_sp)) {
return_value = true;
}
} else if (IsUsuallyUnexplainedStopReason(reason)) {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
if (log)
log->PutCString("ThreadPlanStepInRange got asked if it explains the "
"stop for some reason other than step.");
return_value = false;
} else {
return_value = true;
}
} else
return_value = true;
}
return return_value;
}
bool ThreadPlanStepInRange::DoWillResume(lldb::StateType resume_state,
bool current_plan) {
m_virtual_step = false;
if (resume_state == eStateStepping && current_plan) {
// See if we are about to step over a virtual inlined call.
bool step_without_resume = m_thread.DecrementCurrentInlinedDepth();
if (step_without_resume) {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
LLDB_LOGF(log,
"ThreadPlanStepInRange::DoWillResume: returning false, "
"inline_depth: %d",
m_thread.GetCurrentInlinedDepth());
SetStopInfo(StopInfo::CreateStopReasonToTrace(m_thread));
// FIXME: Maybe it would be better to create a InlineStep stop reason, but
// then
// the whole rest of the world would have to handle that stop reason.
m_virtual_step = true;
}
return !step_without_resume;
}
return true;
}
bool ThreadPlanStepInRange::IsVirtualStep() { return m_virtual_step; }
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Importer XX
XX XX
XX Imports the given method and converts it to semantic trees XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "corexcep.h"
#define Verify(cond, msg) \
do \
{ \
if (!(cond)) \
{ \
verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
} \
} while (0)
#define VerifyOrReturn(cond, msg) \
do \
{ \
if (!(cond)) \
{ \
verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
return; \
} \
} while (0)
#define VerifyOrReturnSpeculative(cond, msg, speculative) \
do \
{ \
if (speculative) \
{ \
if (!(cond)) \
{ \
return false; \
} \
} \
else \
{ \
if (!(cond)) \
{ \
verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
return false; \
} \
} \
} while (0)
/*****************************************************************************/
void Compiler::impInit()
{
impStmtList = impLastStmt = nullptr;
#ifdef DEBUG
impInlinedCodeSize = 0;
#endif // DEBUG
}
/*****************************************************************************
*
* Pushes the given tree on the stack.
*/
void Compiler::impPushOnStack(GenTree* tree, typeInfo ti)
{
/* Check for overflow. If inlining, we may be using a bigger stack */
if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
(verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
{
BADCODE("stack overflow");
}
#ifdef DEBUG
// If we are pushing a struct, make certain we know the precise type!
if (tree->TypeGet() == TYP_STRUCT)
{
assert(ti.IsType(TI_STRUCT));
CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
assert(clsHnd != NO_CLASS_HANDLE);
}
if (tiVerificationNeeded && !ti.IsDead())
{
assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
// The ti type is consistent with the tree type.
//
// On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
// In the verification type system, we always transform "native int" to "TI_INT".
// Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
// attempts to do that have proved too difficult. Instead, we'll assume that in checks like this,
// when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
// method used in the last disjunct allows exactly this mismatch.
assert(ti.IsDead() || (ti.IsByRef() && ((tree->TypeGet() == TYP_I_IMPL) || (tree->TypeGet() == TYP_BYREF))) ||
(ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF) ||
(ti.IsObjRef() && tree->TypeGet() == TYP_REF) || (ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL) ||
(ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF) ||
typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
NormaliseForStack(typeInfo(tree->TypeGet()))));
// If it is a struct type, make certain we normalized the primitive types
assert(!ti.IsType(TI_STRUCT) ||
info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
}
#if VERBOSE_VERIFY
if (VERBOSE && tiVerificationNeeded)
{
printf("\n");
printf(TI_DUMP_PADDING);
printf("About to push to stack: ");
ti.Dump();
}
#endif // VERBOSE_VERIFY
#endif // DEBUG
verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
{
compLongUsed = true;
}
else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
{
compFloatingPointUsed = true;
}
}
inline void Compiler::impPushNullObjRefOnStack()
{
impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
}
// This method gets called when we run into unverifiable code
// (and we are verifying the method)
inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
DEBUGARG(unsigned line))
{
#ifdef DEBUG
const char* tail = strrchr(file, '\\');
if (tail)
{
file = tail + 1;
}
if (JitConfig.JitBreakOnUnsafeCode())
{
assert(!"Unsafe code detected");
}
#endif
JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
msg, info.compFullName, impCurOpcName, impCurOpcOffs));
if (compIsForImportOnly())
{
JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
msg, info.compFullName, impCurOpcName, impCurOpcOffs));
verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
}
}
inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
DEBUGARG(unsigned line))
{
JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
msg, info.compFullName, impCurOpcName, impCurOpcOffs));
#ifdef DEBUG
// BreakIfDebuggerPresent();
if (getBreakOnBadCode())
{
assert(!"Typechecking error");
}
#endif
RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
UNREACHABLE();
}
// helper function that will tell us if the IL instruction at the addr passed
// by param consumes an address at the top of the stack. We use it to save
// us lvAddrTaken
bool Compiler::impILConsumesAddr(const BYTE* codeAddr)
{
assert(!compIsForInlining());
OPCODE opcode;
opcode = (OPCODE)getU1LittleEndian(codeAddr);
switch (opcode)
{
// case CEE_LDFLDA: We're taking this one out as if you have a sequence
// like
//
// ldloca.0
// ldflda whatever
//
// of a primitivelike struct, you end up after morphing with addr of a local
// that's not marked as addrtaken, which is wrong. Also ldflda is usually used
// for structs that contain other structs, which isnt a case we handle very
// well now for other reasons.
case CEE_LDFLD:
{
// We won't collapse small fields. This is probably not the right place to have this
// check, but we're only using the function for this purpose, and is easy to factor
// out if we need to do so.
CORINFO_RESOLVED_TOKEN resolvedToken;
impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField));
// Preserve 'small' int types
if (!varTypeIsSmall(lclTyp))
{
lclTyp = genActualType(lclTyp);
}
if (varTypeIsSmall(lclTyp))
{
return false;
}
return true;
}
default:
break;
}
return false;
}
void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
{
pResolvedToken->tokenContext = impTokenLookupContextHandle;
pResolvedToken->tokenScope = info.compScopeHnd;
pResolvedToken->token = getU4LittleEndian(addr);
pResolvedToken->tokenType = kind;
if (!tiVerificationNeeded)
{
info.compCompHnd->resolveToken(pResolvedToken);
}
else
{
Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
}
}
/*****************************************************************************
*
* Pop one tree from the stack.
*/
StackEntry Compiler::impPopStack()
{
if (verCurrentState.esStackDepth == 0)
{
BADCODE("stack underflow");
}
#ifdef DEBUG
#if VERBOSE_VERIFY
if (VERBOSE && tiVerificationNeeded)
{
JITDUMP("\n");
printf(TI_DUMP_PADDING);
printf("About to pop from the stack: ");
const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
ti.Dump();
}
#endif // VERBOSE_VERIFY
#endif // DEBUG
return verCurrentState.esStack[--verCurrentState.esStackDepth];
}
/*****************************************************************************
*
* Peep at n'th (0-based) tree on the top of the stack.
*/
StackEntry& Compiler::impStackTop(unsigned n)
{
if (verCurrentState.esStackDepth <= n)
{
BADCODE("stack underflow");
}
return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
}
unsigned Compiler::impStackHeight()
{
return verCurrentState.esStackDepth;
}
/*****************************************************************************
* Some of the trees are spilled specially. While unspilling them, or
* making a copy, these need to be handled specially. The function
* enumerates the operators possible after spilling.
*/
#ifdef DEBUG // only used in asserts
static bool impValidSpilledStackEntry(GenTree* tree)
{
if (tree->gtOper == GT_LCL_VAR)
{
return true;
}
if (tree->OperIsConst())
{
return true;
}
return false;
}
#endif
/*****************************************************************************
*
* The following logic is used to save/restore stack contents.
* If 'copy' is true, then we make a copy of the trees on the stack. These
* have to all be cloneable/spilled values.
*/
void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
{
savePtr->ssDepth = verCurrentState.esStackDepth;
if (verCurrentState.esStackDepth)
{
savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
if (copy)
{
StackEntry* table = savePtr->ssTrees;
/* Make a fresh copy of all the stack entries */
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
{
table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
GenTree* tree = verCurrentState.esStack[level].val;
assert(impValidSpilledStackEntry(tree));
switch (tree->gtOper)
{
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_LCL_VAR:
table->val = gtCloneExpr(tree);
break;
default:
assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
break;
}
}
}
else
{
memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
}
}
}
void Compiler::impRestoreStackState(SavedStack* savePtr)
{
verCurrentState.esStackDepth = savePtr->ssDepth;
if (verCurrentState.esStackDepth)
{
memcpy(verCurrentState.esStack, savePtr->ssTrees,
verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
}
}
//------------------------------------------------------------------------
// impBeginTreeList: Get the tree list started for a new basic block.
//
inline void Compiler::impBeginTreeList()
{
assert(impStmtList == nullptr && impLastStmt == nullptr);
}
/*****************************************************************************
*
* Store the given start and end stmt in the given basic block. This is
* mostly called by impEndTreeList(BasicBlock *block). It is called
* directly only for handling CEE_LEAVEs out of finally-protected try's.
*/
inline void Compiler::impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt)
{
/* Make the list circular, so that we can easily walk it backwards */
firstStmt->SetPrevStmt(lastStmt);
/* Store the tree list in the basic block */
block->bbStmtList = firstStmt;
/* The block should not already be marked as imported */
assert((block->bbFlags & BBF_IMPORTED) == 0);
block->bbFlags |= BBF_IMPORTED;
}
//------------------------------------------------------------------------
// impEndTreeList: Store the current tree list in the given basic block.
//
// Arguments:
// block - the basic block to store into.
//
inline void Compiler::impEndTreeList(BasicBlock* block)
{
if (impStmtList == nullptr)
{
// The block should not already be marked as imported.
assert((block->bbFlags & BBF_IMPORTED) == 0);
// Empty block. Just mark it as imported.
block->bbFlags |= BBF_IMPORTED;
}
else
{
impEndTreeList(block, impStmtList, impLastStmt);
}
#ifdef DEBUG
if (impLastILoffsStmt != nullptr)
{
impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs);
impLastILoffsStmt = nullptr;
}
#endif
impStmtList = impLastStmt = nullptr;
}
/*****************************************************************************
*
* Check that storing the given tree doesnt mess up the semantic order. Note
* that this has only limited value as we can only check [0..chkLevel).
*/
inline void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel)
{
#ifndef DEBUG
return;
#else
if (chkLevel == (unsigned)CHECK_SPILL_ALL)
{
chkLevel = verCurrentState.esStackDepth;
}
if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
{
return;
}
GenTree* tree = stmt->GetRootNode();
// Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
if (tree->gtFlags & GTF_CALL)
{
for (unsigned level = 0; level < chkLevel; level++)
{
assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
}
}
if (tree->gtOper == GT_ASG)
{
// For an assignment to a local variable, all references of that
// variable have to be spilled. If it is aliased, all calls and
// indirect accesses have to be spilled
if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR)
{
unsigned lclNum = tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum();
for (unsigned level = 0; level < chkLevel; level++)
{
assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
assert(!lvaTable[lclNum].lvAddrExposed ||
(verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
}
}
// If the access may be to global memory, all side effects have to be spilled.
else if (tree->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF)
{
for (unsigned level = 0; level < chkLevel; level++)
{
assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
}
}
}
#endif
}
/*****************************************************************************
*
* Append the given statement to the current block's tree list.
* [0..chkLevel) is the portion of the stack which we will check for
* interference with stmt and spill if needed.
*/
inline void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel)
{
if (chkLevel == (unsigned)CHECK_SPILL_ALL)
{
chkLevel = verCurrentState.esStackDepth;
}
if ((chkLevel != 0) && (chkLevel != (unsigned)CHECK_SPILL_NONE))
{
assert(chkLevel <= verCurrentState.esStackDepth);
/* If the statement being appended has any side-effects, check the stack
to see if anything needs to be spilled to preserve correct ordering. */
GenTree* expr = stmt->GetRootNode();
unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
// Assignment to (unaliased) locals don't count as a side-effect as
// we handle them specially using impSpillLclRefs(). Temp locals should
// be fine too.
if ((expr->gtOper == GT_ASG) && (expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) &&
((expr->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) == 0) && !gtHasLocalsWithAddrOp(expr->AsOp()->gtOp2))
{
unsigned op2Flags = expr->AsOp()->gtOp2->gtFlags & GTF_GLOB_EFFECT;
assert(flags == (op2Flags | GTF_ASG));
flags = op2Flags;
}
if (flags != 0)
{
bool spillGlobEffects = false;
if ((flags & GTF_CALL) != 0)
{
// If there is a call, we have to spill global refs
spillGlobEffects = true;
}
else if (!expr->OperIs(GT_ASG))
{
if ((flags & GTF_ASG) != 0)
{
// The expression is not an assignment node but it has an assignment side effect, it
// must be an atomic op, HW intrinsic or some other kind of node that stores to memory.
// Since we don't know what it assigns to, we need to spill global refs.
spillGlobEffects = true;
}
}
else
{
GenTree* lhs = expr->gtGetOp1();
GenTree* rhs = expr->gtGetOp2();
if (((rhs->gtFlags | lhs->gtFlags) & GTF_ASG) != 0)
{
// Either side of the assignment node has an assignment side effect.
// Since we don't know what it assigns to, we need to spill global refs.
spillGlobEffects = true;
}
else if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
{
spillGlobEffects = true;
}
}
impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
}
else
{
impSpillSpecialSideEff();
}
}
impAppendStmtCheck(stmt, chkLevel);
impAppendStmt(stmt);
#ifdef FEATURE_SIMD
impMarkContiguousSIMDFieldAssignments(stmt);
#endif
/* Once we set impCurStmtOffs in an appended tree, we are ready to
report the following offsets. So reset impCurStmtOffs */
if (impLastStmt->GetILOffsetX() == impCurStmtOffs)
{
impCurStmtOffsSet(BAD_IL_OFFSET);
}
#ifdef DEBUG
if (impLastILoffsStmt == nullptr)
{
impLastILoffsStmt = stmt;
}
if (verbose)
{
printf("\n\n");
gtDispStmt(stmt);
}
#endif
}
//------------------------------------------------------------------------
// impAppendStmt: Add the statement to the current stmts list.
//
// Arguments:
// stmt - the statement to add.
//
inline void Compiler::impAppendStmt(Statement* stmt)
{
if (impStmtList == nullptr)
{
// The stmt is the first in the list.
impStmtList = stmt;
}
else
{
// Append the expression statement to the existing list.
impLastStmt->SetNextStmt(stmt);
stmt->SetPrevStmt(impLastStmt);
}
impLastStmt = stmt;
}
//------------------------------------------------------------------------
// impExtractLastStmt: Extract the last statement from the current stmts list.
//
// Return Value:
// The extracted statement.
//
// Notes:
// It assumes that the stmt will be reinserted later.
//
Statement* Compiler::impExtractLastStmt()
{
assert(impLastStmt != nullptr);
Statement* stmt = impLastStmt;
impLastStmt = impLastStmt->GetPrevStmt();
if (impLastStmt == nullptr)
{
impStmtList = nullptr;
}
return stmt;
}
//-------------------------------------------------------------------------
// impInsertStmtBefore: Insert the given "stmt" before "stmtBefore".
//
// Arguments:
// stmt - a statement to insert;
// stmtBefore - an insertion point to insert "stmt" before.
//
inline void Compiler::impInsertStmtBefore(Statement* stmt, Statement* stmtBefore)
{
assert(stmt != nullptr);
assert(stmtBefore != nullptr);
if (stmtBefore == impStmtList)
{
impStmtList = stmt;
}
else
{
Statement* stmtPrev = stmtBefore->GetPrevStmt();
stmt->SetPrevStmt(stmtPrev);
stmtPrev->SetNextStmt(stmt);
}
stmt->SetNextStmt(stmtBefore);
stmtBefore->SetPrevStmt(stmt);
}
/*****************************************************************************
*
* Append the given expression tree to the current block's tree list.
* Return the newly created statement.
*/
Statement* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, IL_OFFSETX offset)
{
assert(tree);
/* Allocate an 'expression statement' node */
Statement* stmt = gtNewStmt(tree, offset);
/* Append the statement to the current block's stmt list */
impAppendStmt(stmt, chkLevel);
return stmt;
}
/*****************************************************************************
*
* Insert the given expression tree before "stmtBefore"
*/
void Compiler::impInsertTreeBefore(GenTree* tree, IL_OFFSETX offset, Statement* stmtBefore)
{
/* Allocate an 'expression statement' node */
Statement* stmt = gtNewStmt(tree, offset);
/* Append the statement to the current block's stmt list */
impInsertStmtBefore(stmt, stmtBefore);
}
/*****************************************************************************
*
* Append an assignment of the given value to a temp to the current tree list.
* curLevel is the stack level for which the spill to the temp is being done.
*/
void Compiler::impAssignTempGen(unsigned tmp,
GenTree* val,
unsigned curLevel,
Statement** pAfterStmt, /* = NULL */
IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
BasicBlock* block /* = NULL */
)
{
GenTree* asg = gtNewTempAssign(tmp, val);
if (!asg->IsNothingNode())
{
if (pAfterStmt)
{
Statement* asgStmt = gtNewStmt(asg, ilOffset);
fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
*pAfterStmt = asgStmt;
}
else
{
impAppendTree(asg, curLevel, impCurStmtOffs);
}
}
}
/*****************************************************************************
* same as above, but handle the valueclass case too
*/
void Compiler::impAssignTempGen(unsigned tmpNum,
GenTree* val,
CORINFO_CLASS_HANDLE structType,
unsigned curLevel,
Statement** pAfterStmt, /* = NULL */
IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
BasicBlock* block /* = NULL */
)
{
GenTree* asg;
assert(val->TypeGet() != TYP_STRUCT || structType != NO_CLASS_HANDLE);
if (varTypeIsStruct(val) && (structType != NO_CLASS_HANDLE))
{
assert(tmpNum < lvaCount);
assert(structType != NO_CLASS_HANDLE);
// if the method is non-verifiable the assert is not true
// so at least ignore it in the case when verification is turned on
// since any block that tries to use the temp would have failed verification.
var_types varType = lvaTable[tmpNum].lvType;
assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
lvaSetStruct(tmpNum, structType, false);
varType = lvaTable[tmpNum].lvType;
// Now, set the type of the struct value. Note that lvaSetStruct may modify the type
// of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
// that has been passed in for the value being assigned to the temp, in which case we
// need to set 'val' to that same type.
// Note also that if we always normalized the types of any node that might be a struct
// type, this would not be necessary - but that requires additional JIT/EE interface
// calls that may not actually be required - e.g. if we only access a field of a struct.
if (compDoOldStructRetyping())
{
val->gtType = varType;
}
GenTree* dst = gtNewLclvNode(tmpNum, varType);
asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, ilOffset, block);
}
else
{
asg = gtNewTempAssign(tmpNum, val);
}
if (!asg->IsNothingNode())
{
if (pAfterStmt)
{
Statement* asgStmt = gtNewStmt(asg, ilOffset);
fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
*pAfterStmt = asgStmt;
}
else
{
impAppendTree(asg, curLevel, impCurStmtOffs);
}
}
}
/*****************************************************************************
*
* Pop the given number of values from the stack and return a list node with
* their values.
* The 'prefixTree' argument may optionally contain an argument
* list that is prepended to the list returned from this function.
*
* The notion of prepended is a bit misleading in that the list is backwards
* from the way I would expect: The first element popped is at the end of
* the returned list, and prefixTree is 'before' that, meaning closer to
* the end of the list. To get to prefixTree, you have to walk to the
* end of the list.
*
* For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
* such we reverse its meaning such that returnValue has a reversed
* prefixTree at the head of the list.
*/
GenTreeCall::Use* Compiler::impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs)
{
assert(sig == nullptr || count == sig->numArgs);
CORINFO_CLASS_HANDLE structType;
GenTreeCall::Use* argList;
if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
{
argList = nullptr;
}
else
{ // ARG_ORDER_L2R
argList = prefixArgs;
}
while (count--)
{
StackEntry se = impPopStack();
typeInfo ti = se.seTypeInfo;
GenTree* temp = se.val;
if (varTypeIsStruct(temp))
{
// Morph trees that aren't already OBJs or MKREFANY to be OBJs
assert(ti.IsType(TI_STRUCT));
structType = ti.GetClassHandleForValueClass();
bool forceNormalization = false;
if (varTypeIsSIMD(temp))
{
// We need to ensure that fgMorphArgs will use the correct struct handle to ensure proper
// ABI handling of this argument.
// Note that this can happen, for example, if we have a SIMD intrinsic that returns a SIMD type
// with a different baseType than we've seen.
// We also need to ensure an OBJ node if we have a FIELD node that might be transformed to LCL_FLD
// or a plain GT_IND.
// TODO-Cleanup: Consider whether we can eliminate all of these cases.
if ((gtGetStructHandleIfPresent(temp) != structType) || temp->OperIs(GT_FIELD))
{
forceNormalization = true;
}
}
#ifdef DEBUG
if (verbose)
{
printf("Calling impNormStructVal on:\n");
gtDispTree(temp);
}
#endif
temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL, forceNormalization);
#ifdef DEBUG
if (verbose)
{
printf("resulting tree:\n");
gtDispTree(temp);
}
#endif
}
/* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
argList = gtPrependNewCallArg(temp, argList);
}
if (sig != nullptr)
{
if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
{
// Make sure that all valuetypes (including enums) that we push are loaded.
// This is to guarantee that if a GC is triggerred from the prestub of this methods,
// all valuetypes in the method signature are already loaded.
// We need to be able to find the size of the valuetypes, but we cannot
// do a class-load from within GC.
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
}
CORINFO_ARG_LIST_HANDLE sigArgs = sig->args;
GenTreeCall::Use* arg;
for (arg = argList, count = sig->numArgs; count > 0; arg = arg->GetNext(), count--)
{
PREFIX_ASSUME(arg != nullptr);
CORINFO_CLASS_HANDLE classHnd;
CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArgs, &classHnd));
var_types jitSigType = JITtype2varType(corType);
if (!impCheckImplicitArgumentCoercion(jitSigType, arg->GetNode()->TypeGet()))
{
BADCODE("the call argument has a type that can't be implicitly converted to the signature type");
}
// insert implied casts (from float to double or double to float)
if ((jitSigType == TYP_DOUBLE) && (arg->GetNode()->TypeGet() == TYP_FLOAT))
{
arg->SetNode(gtNewCastNode(TYP_DOUBLE, arg->GetNode(), false, TYP_DOUBLE));
}
else if ((jitSigType == TYP_FLOAT) && (arg->GetNode()->TypeGet() == TYP_DOUBLE))
{
arg->SetNode(gtNewCastNode(TYP_FLOAT, arg->GetNode(), false, TYP_FLOAT));
}
// insert any widening or narrowing casts for backwards compatibility
arg->SetNode(impImplicitIorI4Cast(arg->GetNode(), jitSigType));
if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
corType != CORINFO_TYPE_VAR)
{
CORINFO_CLASS_HANDLE argRealClass = info.compCompHnd->getArgClass(sig, sigArgs);
if (argRealClass != nullptr)
{
// Make sure that all valuetypes (including enums) that we push are loaded.
// This is to guarantee that if a GC is triggered from the prestub of this methods,
// all valuetypes in the method signature are already loaded.
// We need to be able to find the size of the valuetypes, but we cannot
// do a class-load from within GC.
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
}
}
const var_types nodeArgType = arg->GetNode()->TypeGet();
if (!varTypeIsStruct(jitSigType) && genTypeSize(nodeArgType) != genTypeSize(jitSigType))
{
assert(!varTypeIsStruct(nodeArgType));
// Some ABI require precise size information for call arguments less than target pointer size,
// for example arm64 OSX. Create a special node to keep this information until morph
// consumes it into `fgArgInfo`.
GenTree* putArgType = gtNewOperNode(GT_PUTARG_TYPE, jitSigType, arg->GetNode());
arg->SetNode(putArgType);
}
sigArgs = info.compCompHnd->getArgNext(sigArgs);
}
}
if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
{
// Prepend the prefixTree
// Simple in-place reversal to place treeList
// at the end of a reversed prefixTree
while (prefixArgs != nullptr)
{
GenTreeCall::Use* next = prefixArgs->GetNext();
prefixArgs->SetNext(argList);
argList = prefixArgs;
prefixArgs = next;
}
}
return argList;
}
static bool TypeIs(var_types type1, var_types type2)
{
return type1 == type2;
}
// Check if type1 matches any type from the list.
template <typename... T>
static bool TypeIs(var_types type1, var_types type2, T... rest)
{
return TypeIs(type1, type2) || TypeIs(type1, rest...);
}
//------------------------------------------------------------------------
// impCheckImplicitArgumentCoercion: check that the node's type is compatible with
// the signature's type using ECMA implicit argument coercion table.
//
// Arguments:
// sigType - the type in the call signature;
// nodeType - the node type.
//
// Return Value:
// true if they are compatible, false otherwise.
//
// Notes:
// - it is currently allowing byref->long passing, should be fixed in VM;
// - it can't check long -> native int case on 64-bit platforms,
// so the behavior is different depending on the target bitness.
//
bool Compiler::impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const
{
if (sigType == nodeType)
{
return true;
}
if (TypeIs(sigType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT))
{
if (TypeIs(nodeType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT, TYP_I_IMPL))
{
return true;
}
}
else if (TypeIs(sigType, TYP_ULONG, TYP_LONG))
{
if (TypeIs(nodeType, TYP_LONG))
{
return true;
}
}
else if (TypeIs(sigType, TYP_FLOAT, TYP_DOUBLE))
{
if (TypeIs(nodeType, TYP_FLOAT, TYP_DOUBLE))
{
return true;
}
}
else if (TypeIs(sigType, TYP_BYREF))
{
if (TypeIs(nodeType, TYP_I_IMPL))
{
return true;
}
// This condition tolerates such IL:
// ; V00 this ref this class-hnd
// ldarg.0
// call(byref)
if (TypeIs(nodeType, TYP_REF))
{
return true;
}
}
else if (varTypeIsStruct(sigType))
{
if (varTypeIsStruct(nodeType))
{
return true;
}
}
// This condition should not be under `else` because `TYP_I_IMPL`
// intersects with `TYP_LONG` or `TYP_INT`.
if (TypeIs(sigType, TYP_I_IMPL, TYP_U_IMPL))
{
// Note that it allows `ldc.i8 1; call(nint)` on 64-bit platforms,
// but we can't distinguish `nint` from `long` there.
if (TypeIs(nodeType, TYP_I_IMPL, TYP_U_IMPL, TYP_INT, TYP_UINT))
{
return true;
}
// It tolerates IL that ECMA does not allow but that is commonly used.
// Example:
// V02 loc1 struct <RTL_OSVERSIONINFOEX, 32>
// ldloca.s 0x2
// call(native int)
if (TypeIs(nodeType, TYP_BYREF))
{
return true;
}
}
return false;
}
/*****************************************************************************
*
* Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
* The first "skipReverseCount" items are not reversed.
*/
GenTreeCall::Use* Compiler::impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount)
{
assert(skipReverseCount <= count);
GenTreeCall::Use* list = impPopCallArgs(count, sig);
// reverse the list
if (list == nullptr || skipReverseCount == count)
{
return list;
}
GenTreeCall::Use* ptr = nullptr; // Initialized to the first node that needs to be reversed
GenTreeCall::Use* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
if (skipReverseCount == 0)
{
ptr = list;
}
else
{
lastSkipNode = list;
// Get to the first node that needs to be reversed
for (unsigned i = 0; i < skipReverseCount - 1; i++)
{
lastSkipNode = lastSkipNode->GetNext();
}
PREFIX_ASSUME(lastSkipNode != nullptr);
ptr = lastSkipNode->GetNext();
}
GenTreeCall::Use* reversedList = nullptr;
do
{
GenTreeCall::Use* tmp = ptr->GetNext();
ptr->SetNext(reversedList);
reversedList = ptr;
ptr = tmp;
} while (ptr != nullptr);
if (skipReverseCount)
{
lastSkipNode->SetNext(reversedList);
return list;
}
else
{
return reversedList;
}
}
//------------------------------------------------------------------------
// impAssignStruct: Create a struct assignment
//
// Arguments:
// dest - the destination of the assignment
// src - the value to be assigned
// structHnd - handle representing the struct type
// curLevel - stack level for which a spill may be being done
// pAfterStmt - statement to insert any additional statements after
// ilOffset - il offset for new statements
// block - block to insert any additional statements in
//
// Return Value:
// The tree that should be appended to the statement list that represents the assignment.
//
// Notes:
// Temp assignments may be appended to impStmtList if spilling is necessary.
GenTree* Compiler::impAssignStruct(GenTree* dest,
GenTree* src,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt, /* = nullptr */
IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
BasicBlock* block /* = nullptr */
)
{
assert(varTypeIsStruct(dest));
if (ilOffset == BAD_IL_OFFSET)
{
ilOffset = impCurStmtOffs;
}
while (dest->gtOper == GT_COMMA)
{
// Second thing is the struct.
assert(varTypeIsStruct(dest->AsOp()->gtOp2));
// Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
if (pAfterStmt)
{
Statement* newStmt = gtNewStmt(dest->AsOp()->gtOp1, ilOffset);
fgInsertStmtAfter(block, *pAfterStmt, newStmt);
*pAfterStmt = newStmt;
}
else
{
impAppendTree(dest->AsOp()->gtOp1, curLevel, ilOffset); // do the side effect
}
// set dest to the second thing
dest = dest->AsOp()->gtOp2;
}
assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
// Return a NOP if this is a self-assignment.
if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
src->AsLclVarCommon()->GetLclNum() == dest->AsLclVarCommon()->GetLclNum())
{
return gtNewNothingNode();
}
// TODO-1stClassStructs: Avoid creating an address if it is not needed,
// or re-creating a Blk node if it is.
GenTree* destAddr;
if (dest->gtOper == GT_IND || dest->OperIsBlk())
{
destAddr = dest->AsOp()->gtOp1;
}
else
{
destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
}
return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, ilOffset, block));
}
//------------------------------------------------------------------------
// impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'.
//
// Arguments:
// destAddr - address of the destination of the assignment
// src - source of the assignment
// structHnd - handle representing the struct type
// curLevel - stack level for which a spill may be being done
// pAfterStmt - statement to insert any additional statements after
// ilOffset - il offset for new statements
// block - block to insert any additional statements in
//
// Return Value:
// The tree that should be appended to the statement list that represents the assignment.
//
// Notes:
// Temp assignments may be appended to impStmtList if spilling is necessary.
GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
GenTree* src,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt, /* = NULL */
IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
BasicBlock* block /* = NULL */
)
{
GenTree* dest = nullptr;
unsigned destFlags = 0;
if (ilOffset == BAD_IL_OFFSET)
{
ilOffset = impCurStmtOffs;
}
assert(src->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_FIELD, GT_IND, GT_OBJ, GT_CALL, GT_MKREFANY, GT_RET_EXPR, GT_COMMA) ||
(src->TypeGet() != TYP_STRUCT && src->OperIsSimdOrHWintrinsic()));
var_types asgType = src->TypeGet();
if (src->gtOper == GT_CALL)
{
GenTreeCall* srcCall = src->AsCall();
if (srcCall->TreatAsHasRetBufArg(this))
{
// Case of call returning a struct via hidden retbuf arg
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_WINDOWS) && !defined(TARGET_ARM)
// Unmanaged instance methods on Windows need the retbuf arg after the first (this) parameter
if (srcCall->IsUnmanaged())
{
if (callConvIsInstanceMethodCallConv(srcCall->GetUnmanagedCallConv()))
{
GenTreeCall::Use* thisArg = gtInsertNewCallArgAfter(destAddr, srcCall->gtCallArgs);
}
else
{
#ifdef TARGET_X86
// The argument list has already been reversed.
// Insert the return buffer as the last node so it will be pushed on to the stack last
// as required by the native ABI.
GenTreeCall::Use* lastArg = srcCall->gtCallArgs;
if (lastArg == nullptr)
{
srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs);
}
else
{
for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext())
;
gtInsertNewCallArgAfter(destAddr, lastArg);
}
#else
// insert the return value buffer into the argument list as first byref parameter
srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs);
#endif
}
}
else
#endif
{
// insert the return value buffer into the argument list as first byref parameter
srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs);
}
// now returns void, not a struct
src->gtType = TYP_VOID;
// return the morphed call node
return src;
}
else
{
// Case of call returning a struct in one or more registers.
var_types returnType = (var_types)srcCall->gtReturnType;
if (compDoOldStructRetyping())
{
// We're not using a return buffer, so if we're retyping we'll change the type of 'src' to 'returnTYpe'.
src->gtType = genActualType(returnType);
}
// First we try to change this to "LclVar/LclFld = call"
//
if ((destAddr->gtOper == GT_ADDR) && (destAddr->AsOp()->gtOp1->gtOper == GT_LCL_VAR))
{
// If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
// That is, the IR will be of the form lclVar = call for multi-reg return
//
GenTreeLclVar* lcl = destAddr->AsOp()->gtOp1->AsLclVar();
unsigned lclNum = lcl->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (src->AsCall()->HasMultiRegRetVal())
{
// Mark the struct LclVar as used in a MultiReg return context
// which currently makes it non promotable.
// TODO-1stClassStructs: Eliminate this pessimization when we can more generally
// handle multireg returns.
lcl->gtFlags |= GTF_DONT_CSE;
varDsc->lvIsMultiRegRet = true;
}
else if ((lcl->gtType != src->gtType) && compDoOldStructRetyping())
{
// We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
lcl->ChangeOper(GT_LCL_FLD);
fgLclFldAssign(lclNum);
lcl->gtType = src->gtType;
asgType = src->gtType;
}
dest = lcl;
#if defined(TARGET_ARM)
// TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
// but that method has not been updadted to include ARM.
impMarkLclDstNotPromotable(lclNum, src, structHnd);
lcl->gtFlags |= GTF_DONT_CSE;
#elif defined(UNIX_AMD64_ABI)
// Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
assert(!src->AsCall()->IsVarargs() && "varargs not allowed for System V OSs.");
// Make the struct non promotable. The eightbytes could contain multiple fields.
// TODO-1stClassStructs: Eliminate this pessimization when we can more generally
// handle multireg returns.
// TODO-Cleanup: Why is this needed here? This seems that it will set this even for
// non-multireg returns.
lcl->gtFlags |= GTF_DONT_CSE;
varDsc->lvIsMultiRegRet = true;
#endif
}
else // we don't have a GT_ADDR of a GT_LCL_VAR
{
// !!! The destination could be on stack. !!!
// This flag will let us choose the correct write barrier.
asgType = returnType;
destFlags = GTF_IND_TGTANYWHERE;
}
}
}
else if (src->gtOper == GT_RET_EXPR)
{
GenTreeCall* call = src->AsRetExpr()->gtInlineCandidate->AsCall();
noway_assert(call->gtOper == GT_CALL);
if (call->HasRetBufArg())
{
// insert the return value buffer into the argument list as first byref parameter
call->gtCallArgs = gtPrependNewCallArg(destAddr, call->gtCallArgs);
// now returns void, not a struct
src->gtType = TYP_VOID;
call->gtType = TYP_VOID;
// We already have appended the write to 'dest' GT_CALL's args
// So now we just return an empty node (pruning the GT_RET_EXPR)
return src;
}
else
{
// Case of inline method returning a struct in one or more registers.
// We won't need a return buffer
if (compDoOldStructRetyping())
{
var_types returnType = (var_types)call->gtReturnType;
asgType = returnType;
src->gtType = genActualType(returnType);
call->gtType = src->gtType;
}
else
{
asgType = src->gtType;
}
if ((destAddr->gtOper != GT_ADDR) || (destAddr->AsOp()->gtOp1->gtOper != GT_LCL_VAR))
{
// !!! The destination could be on stack. !!!
// This flag will let us choose the correct write barrier.
destFlags = GTF_IND_TGTANYWHERE;
}
}
}
else if (src->OperIsBlk())
{
asgType = impNormStructType(structHnd);
if (src->gtOper == GT_OBJ)
{
assert(src->AsObj()->GetLayout()->GetClassHandle() == structHnd);
}
}
else if (src->gtOper == GT_INDEX)
{
asgType = impNormStructType(structHnd);
assert(src->AsIndex()->gtStructElemClass == structHnd);
}
else if (src->gtOper == GT_MKREFANY)
{
// Since we are assigning the result of a GT_MKREFANY,
// "destAddr" must point to a refany.
GenTree* destAddrClone;
destAddr =
impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0);
assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
fgAddFieldSeqForZeroOffset(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL);
typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
GenTree* typeSlot =
gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
// append the assign of the pointer value
GenTree* asg = gtNewAssignNode(ptrSlot, src->AsOp()->gtOp1);
if (pAfterStmt)
{
Statement* newStmt = gtNewStmt(asg, ilOffset);
fgInsertStmtAfter(block, *pAfterStmt, newStmt);
*pAfterStmt = newStmt;
}
else
{
impAppendTree(asg, curLevel, ilOffset);
}
// return the assign of the type value, to be appended
return gtNewAssignNode(typeSlot, src->AsOp()->gtOp2);
}
else if (src->gtOper == GT_COMMA)
{
// The second thing is the struct or its address.
assert(varTypeIsStruct(src->AsOp()->gtOp2) || src->AsOp()->gtOp2->gtType == TYP_BYREF);
if (pAfterStmt)
{
// Insert op1 after '*pAfterStmt'
Statement* newStmt = gtNewStmt(src->AsOp()->gtOp1, ilOffset);
fgInsertStmtAfter(block, *pAfterStmt, newStmt);
*pAfterStmt = newStmt;
}
else if (impLastStmt != nullptr)
{
// Do the side-effect as a separate statement.
impAppendTree(src->AsOp()->gtOp1, curLevel, ilOffset);
}
else
{
// In this case we have neither been given a statement to insert after, nor are we
// in the importer where we can append the side effect.
// Instead, we're going to sink the assignment below the COMMA.
src->AsOp()->gtOp2 =
impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, ilOffset, block);
return src;
}
// Evaluate the second thing using recursion.
return impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, ilOffset, block);
}
else if (src->IsLocal())
{
asgType = src->TypeGet();
}
else if (asgType == TYP_STRUCT)
{
// It should already have the appropriate type.
assert(asgType == impNormStructType(structHnd));
}
if ((dest == nullptr) && (destAddr->OperGet() == GT_ADDR))
{
GenTree* destNode = destAddr->gtGetOp1();
// If the actual destination is a local, a GT_INDEX or a block node, or is a node that
// will be morphed, don't insert an OBJ(ADDR) if it already has the right type.
if (destNode->OperIs(GT_LCL_VAR, GT_INDEX) || destNode->OperIsBlk())
{
var_types destType = destNode->TypeGet();
// If one or both types are TYP_STRUCT (one may not yet be normalized), they are compatible
// iff their handles are the same.
// Otherwise, they are compatible if their types are the same.
bool typesAreCompatible =
((destType == TYP_STRUCT) || (asgType == TYP_STRUCT))
? ((gtGetStructHandleIfPresent(destNode) == structHnd) && varTypeIsStruct(asgType))
: (destType == asgType);
if (typesAreCompatible)
{
dest = destNode;
if (destType != TYP_STRUCT)
{
// Use a normalized type if available. We know from above that they're equivalent.
asgType = destType;
}
}
}
}
if (dest == nullptr)
{
if (asgType == TYP_STRUCT)
{
dest = gtNewObjNode(structHnd, destAddr);
gtSetObjGcInfo(dest->AsObj());
// Although an obj as a call argument was always assumed to be a globRef
// (which is itself overly conservative), that is not true of the operands
// of a block assignment.
dest->gtFlags &= ~GTF_GLOB_REF;
dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
}
else
{
dest = gtNewOperNode(GT_IND, asgType, destAddr);
}
}
else if (compDoOldStructRetyping())
{
dest->gtType = asgType;
}
if (dest->OperIs(GT_LCL_VAR) &&
(src->IsMultiRegNode() ||
(src->OperIs(GT_RET_EXPR) && src->AsRetExpr()->gtInlineCandidate->AsCall()->HasMultiRegRetVal())))
{
if (lvaEnregMultiRegVars && varTypeIsStruct(dest))
{
dest->AsLclVar()->SetMultiReg();
}
if (src->OperIs(GT_CALL))
{
lvaGetDesc(dest->AsLclVar())->lvIsMultiRegRet = true;
}
}
dest->gtFlags |= destFlags;
destFlags = dest->gtFlags;
// return an assignment node, to be appended
GenTree* asgNode = gtNewAssignNode(dest, src);
gtBlockOpInit(asgNode, dest, src, false);
// TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
// of assignments.
if ((destFlags & GTF_DONT_CSE) == 0)
{
dest->gtFlags &= ~(GTF_DONT_CSE);
}
return asgNode;
}
/*****************************************************************************
Given a struct value, and the class handle for that structure, return
the expression for the address for that structure value.
willDeref - does the caller guarantee to dereference the pointer.
*/
GenTree* Compiler::impGetStructAddr(GenTree* structVal,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
bool willDeref)
{
assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
var_types type = structVal->TypeGet();
genTreeOps oper = structVal->gtOper;
if (oper == GT_OBJ && willDeref)
{
assert(structVal->AsObj()->GetLayout()->GetClassHandle() == structHnd);
return (structVal->AsObj()->Addr());
}
else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY ||
structVal->OperIsSimdOrHWintrinsic())
{
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
// The 'return value' is now the temp itself
type = genActualType(lvaTable[tmpNum].TypeGet());
GenTree* temp = gtNewLclvNode(tmpNum, type);
temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
return temp;
}
else if (oper == GT_COMMA)
{
assert(structVal->AsOp()->gtOp2->gtType == type); // Second thing is the struct
Statement* oldLastStmt = impLastStmt;
structVal->AsOp()->gtOp2 = impGetStructAddr(structVal->AsOp()->gtOp2, structHnd, curLevel, willDeref);
structVal->gtType = TYP_BYREF;
if (oldLastStmt != impLastStmt)
{
// Some temp assignment statement was placed on the statement list
// for Op2, but that would be out of order with op1, so we need to
// spill op1 onto the statement list after whatever was last
// before we recursed on Op2 (i.e. before whatever Op2 appended).
Statement* beforeStmt;
if (oldLastStmt == nullptr)
{
// The op1 stmt should be the first in the list.
beforeStmt = impStmtList;
}
else
{
// Insert after the oldLastStmt before the first inserted for op2.
beforeStmt = oldLastStmt->GetNextStmt();
}
impInsertTreeBefore(structVal->AsOp()->gtOp1, impCurStmtOffs, beforeStmt);
structVal->AsOp()->gtOp1 = gtNewNothingNode();
}
return (structVal);
}
return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
}
//------------------------------------------------------------------------
// impNormStructType: Normalize the type of a (known to be) struct class handle.
//
// Arguments:
// structHnd - The class handle for the struct type of interest.
// pSimdBaseType - (optional, default nullptr) - if non-null, and the struct is a SIMD
// type, set to the SIMD base type
//
// Return Value:
// The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
// It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
//
// Notes:
// Normalizing the type involves examining the struct type to determine if it should
// be modified to one that is handled specially by the JIT, possibly being a candidate
// for full enregistration, e.g. TYP_SIMD16. If the size of the struct is already known
// call structSizeMightRepresentSIMDType to determine if this api needs to be called.
var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, var_types* pSimdBaseType)
{
assert(structHnd != NO_CLASS_HANDLE);
var_types structType = TYP_STRUCT;
#ifdef FEATURE_SIMD
if (supportSIMDTypes())
{
const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
// Don't bother if the struct contains GC references of byrefs, it can't be a SIMD type.
if ((structFlags & (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_CONTAINS_STACK_PTR)) == 0)
{
unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
if (structSizeMightRepresentSIMDType(originalSize))
{
unsigned int sizeBytes;
var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
if (simdBaseType != TYP_UNKNOWN)
{
assert(sizeBytes == originalSize);
structType = getSIMDTypeForSize(sizeBytes);
if (pSimdBaseType != nullptr)
{
*pSimdBaseType = simdBaseType;
}
// Also indicate that we use floating point registers.
compFloatingPointUsed = true;
}
}
}
}
#endif // FEATURE_SIMD
return structType;
}
//------------------------------------------------------------------------
// Compiler::impNormStructVal: Normalize a struct value
//
// Arguments:
// structVal - the node we are going to normalize
// structHnd - the class handle for the node
// curLevel - the current stack level
// forceNormalization - Force the creation of an OBJ node (default is false).
//
// Notes:
// Given struct value 'structVal', make sure it is 'canonical', that is
// it is either:
// - a known struct type (non-TYP_STRUCT, e.g. TYP_SIMD8)
// - an OBJ or a MKREFANY node, or
// - a node (e.g. GT_INDEX) that will be morphed.
// If the node is a CALL or RET_EXPR, a copy will be made to a new temp.
//
GenTree* Compiler::impNormStructVal(GenTree* structVal,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
bool forceNormalization /*=false*/)
{
assert(forceNormalization || varTypeIsStruct(structVal));
assert(structHnd != NO_CLASS_HANDLE);
var_types structType = structVal->TypeGet();
bool makeTemp = false;
if (structType == TYP_STRUCT)
{
structType = impNormStructType(structHnd);
}
bool alreadyNormalized = false;
GenTreeLclVarCommon* structLcl = nullptr;
genTreeOps oper = structVal->OperGet();
switch (oper)
{
// GT_RETURN and GT_MKREFANY don't capture the handle.
case GT_RETURN:
break;
case GT_MKREFANY:
alreadyNormalized = true;
break;
case GT_CALL:
structVal->AsCall()->gtRetClsHnd = structHnd;
makeTemp = true;
break;
case GT_RET_EXPR:
structVal->AsRetExpr()->gtRetClsHnd = structHnd;
makeTemp = true;
break;
case GT_ARGPLACE:
structVal->AsArgPlace()->gtArgPlaceClsHnd = structHnd;
break;
case GT_INDEX:
// This will be transformed to an OBJ later.
alreadyNormalized = true;
structVal->AsIndex()->gtStructElemClass = structHnd;
structVal->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(structHnd);
break;
case GT_FIELD:
// Wrap it in a GT_OBJ, if needed.
structVal->gtType = structType;
if ((structType == TYP_STRUCT) || forceNormalization)
{
structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
}
break;
case GT_LCL_VAR:
case GT_LCL_FLD:
structLcl = structVal->AsLclVarCommon();
// Wrap it in a GT_OBJ.
structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
FALLTHROUGH;
case GT_OBJ:
case GT_BLK:
case GT_DYN_BLK:
case GT_ASG:
// These should already have the appropriate type.
assert(structVal->gtType == structType);
alreadyNormalized = true;
break;
case GT_IND:
assert(structVal->gtType == structType);
structVal = gtNewObjNode(structHnd, structVal->gtGetOp1());
alreadyNormalized = true;
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
break;
#endif
case GT_COMMA:
{
// The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node.
GenTree* blockNode = structVal->AsOp()->gtOp2;
assert(blockNode->gtType == structType);
// Is this GT_COMMA(op1, GT_COMMA())?
GenTree* parent = structVal;
if (blockNode->OperGet() == GT_COMMA)
{
// Find the last node in the comma chain.
do
{
assert(blockNode->gtType == structType);
parent = blockNode;
blockNode = blockNode->AsOp()->gtOp2;
} while (blockNode->OperGet() == GT_COMMA);
}
if (blockNode->OperGet() == GT_FIELD)
{
// If we have a GT_FIELD then wrap it in a GT_OBJ.
blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode));
}
#ifdef FEATURE_SIMD
if (blockNode->OperIsSimdOrHWintrinsic())
{
parent->AsOp()->gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
alreadyNormalized = true;
}
else
#endif
{
noway_assert(blockNode->OperIsBlk());
// Sink the GT_COMMA below the blockNode addr.
// That is GT_COMMA(op1, op2=blockNode) is tranformed into
// blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
//
// In case of a chained GT_COMMA case, we sink the last
// GT_COMMA below the blockNode addr.
GenTree* blockNodeAddr = blockNode->AsOp()->gtOp1;
assert(blockNodeAddr->gtType == TYP_BYREF);
GenTree* commaNode = parent;
commaNode->gtType = TYP_BYREF;
commaNode->AsOp()->gtOp2 = blockNodeAddr;
blockNode->AsOp()->gtOp1 = commaNode;
if (parent == structVal)
{
structVal = blockNode;
}
alreadyNormalized = true;
}
}
break;
default:
noway_assert(!"Unexpected node in impNormStructVal()");
break;
}
structVal->gtType = structType;
if (!alreadyNormalized || forceNormalization)
{
if (makeTemp)
{
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
// The structVal is now the temp itself
structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
structVal = structLcl;
}
if ((forceNormalization || (structType == TYP_STRUCT)) && !structVal->OperIsBlk())
{
// Wrap it in a GT_OBJ
structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
}
}
if (structLcl != nullptr)
{
// A OBJ on a ADDR(LCL_VAR) can never raise an exception
// so we don't set GTF_EXCEPT here.
if (!lvaIsImplicitByRefLocal(structLcl->GetLclNum()))
{
structVal->gtFlags &= ~GTF_GLOB_REF;
}
}
else if (structVal->OperIsBlk())
{
// In general a OBJ is an indirection and could raise an exception.
structVal->gtFlags |= GTF_EXCEPT;
}
return structVal;
}
/******************************************************************************/
// Given a type token, generate code that will evaluate to the correct
// handle representation of that token (type handle, field handle, or method handle)
//
// For most cases, the handle is determined at compile-time, and the code
// generated is simply an embedded handle.
//
// Run-time lookup is required if the enclosing method is shared between instantiations
// and the token refers to formal type parameters whose instantiation is not known
// at compile-time.
//
GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
BOOL* pRuntimeLookup /* = NULL */,
BOOL mustRestoreHandle /* = FALSE */,
BOOL importParent /* = FALSE */)
{
assert(!fgGlobalMorph);
CORINFO_GENERICHANDLE_RESULT embedInfo;
info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
if (pRuntimeLookup)
{
*pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
}
if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
{
switch (embedInfo.handleType)
{
case CORINFO_HANDLETYPE_CLASS:
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
break;
case CORINFO_HANDLETYPE_METHOD:
info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
break;
case CORINFO_HANDLETYPE_FIELD:
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
break;
default:
break;
}
}
// Generate the full lookup tree. May be null if we're abandoning an inline attempt.
GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
embedInfo.compileTimeHandle);
// If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
{
result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
}
return result;
}
GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
unsigned handleFlags,
void* compileTimeHandle)
{
if (!pLookup->lookupKind.needsRuntimeLookup)
{
// No runtime lookup is required.
// Access is direct or memory-indirect (of a fixed address) reference
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE);
if (pLookup->constLookup.accessType == IAT_VALUE)
{
handle = pLookup->constLookup.handle;
}
else if (pLookup->constLookup.accessType == IAT_PVALUE)
{
pIndirection = pLookup->constLookup.addr;
}
GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
#ifdef DEBUG
size_t handleToTrack;
if (handleFlags == GTF_ICON_TOKEN_HDL)
{
handleToTrack = 0;
}
else
{
handleToTrack = (size_t)compileTimeHandle;
}
if (handle != nullptr)
{
addr->AsIntCon()->gtTargetHandle = handleToTrack;
}
else
{
addr->gtGetOp1()->AsIntCon()->gtTargetHandle = handleToTrack;
}
#endif
return addr;
}
if (pLookup->lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED)
{
// Runtime does not support inlining of all shapes of runtime lookups
// Inlining has to be aborted in such a case
assert(compIsForInlining());
compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
return nullptr;
}
// Need to use dictionary-based access which depends on the typeContext
// which is only available at runtime, not at compile-time.
return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
}
#ifdef FEATURE_READYTORUN_COMPILER
GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
unsigned handleFlags,
void* compileTimeHandle)
{
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE);
if (pLookup->accessType == IAT_VALUE)
{
handle = pLookup->handle;
}
else if (pLookup->accessType == IAT_PVALUE)
{
pIndirection = pLookup->addr;
}
GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
#ifdef DEBUG
assert((handleFlags == GTF_ICON_CLASS_HDL) || (handleFlags == GTF_ICON_METHOD_HDL));
if (handle != nullptr)
{
addr->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle;
}
else
{
addr->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle;
}
#endif // DEBUG
return addr;
}
GenTreeCall* Compiler::impReadyToRunHelperToTree(
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CorInfoHelpFunc helper,
var_types type,
GenTreeCall::Use* args /* = nullptr */,
CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */)
{
CORINFO_CONST_LOOKUP lookup;
if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
{
return nullptr;
}
GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args);
op1->setEntryPoint(lookup);
return op1;
}
#endif
GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
{
GenTree* op1 = nullptr;
switch (pCallInfo->kind)
{
case CORINFO_CALL:
op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
op1->AsFptrVal()->gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
}
#endif
break;
case CORINFO_CALL_CODE_POINTER:
op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
break;
default:
noway_assert(!"unknown call kind");
break;
}
return op1;
}
//------------------------------------------------------------------------
// getRuntimeContextTree: find pointer to context for runtime lookup.
//
// Arguments:
// kind - lookup kind.
//
// Return Value:
// Return GenTree pointer to generic shared context.
//
// Notes:
// Reports about generic context using.
GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
{
GenTree* ctxTree = nullptr;
// Collectible types requires that for shared generic code, if we use the generic context parameter
// that we report it. (This is a conservative approach, we could detect some cases particularly when the
// context parameter is this that we don't need the eager reporting logic.)
lvaGenericsContextInUse = true;
Compiler* pRoot = impInlineRoot();
if (kind == CORINFO_LOOKUP_THISOBJ)
{
// this Object
ctxTree = gtNewLclvNode(pRoot->info.compThisArg, TYP_REF);
ctxTree->gtFlags |= GTF_VAR_CONTEXT;
// context is the method table pointer of the this object
ctxTree = gtNewMethodTableLookup(ctxTree);
}
else
{
assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
// Exact method descriptor as passed in
ctxTree = gtNewLclvNode(pRoot->info.compTypeCtxtArg, TYP_I_IMPL);
ctxTree->gtFlags |= GTF_VAR_CONTEXT;
}
return ctxTree;
}
/*****************************************************************************/
/* Import a dictionary lookup to access a handle in code shared between
generic instantiations.
The lookup depends on the typeContext which is only available at
runtime, and not at compile-time.
pLookup->token1 and pLookup->token2 specify the handle that is needed.
The cases are:
1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
instantiation-specific handle, and the tokens to lookup the handle.
2. pLookup->indirections != CORINFO_USEHELPER :
2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
to get the handle.
2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
If it is non-NULL, it is the handle required. Else, call a helper
to lookup the handle.
*/
GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
void* compileTimeHandle)
{
GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
#if 0
ctxTree->gtFlags |= GTF_DONT_CSE; // ToDo Remove this
#endif
CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
// It's available only via the run-time helper function
if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
{
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
gtNewCallArgs(ctxTree), &pLookup->lookupKind);
}
#endif
return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup, ctxTree, compileTimeHandle);
}
// Slot pointer
GenTree* slotPtrTree = ctxTree;
if (pRuntimeLookup->testForNull)
{
slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("impRuntimeLookup slot"));
}
GenTree* indOffTree = nullptr;
GenTree* lastIndOfTree = nullptr;
// Applied repeated indirections
for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
{
if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
{
indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
}
// The last indirection could be subject to a size check (dynamic dictionary expansion)
bool isLastIndirectionWithSizeCheck =
((i == pRuntimeLookup->indirections - 1) && (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK));
if (i != 0)
{
slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
if (!isLastIndirectionWithSizeCheck)
{
slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
}
}
if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
{
slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
}
if (pRuntimeLookup->offsets[i] != 0)
{
if (isLastIndirectionWithSizeCheck)
{
lastIndOfTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
}
slotPtrTree =
gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
}
}
// No null test required
if (!pRuntimeLookup->testForNull)
{
if (pRuntimeLookup->indirections == 0)
{
return slotPtrTree;
}
slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
if (!pRuntimeLookup->testForFixup)
{
return slotPtrTree;
}
impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test"));
impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtOffs);
GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
// downcast the pointer to a TYP_INT on 64-bit targets
slot = impImplicitIorI4Cast(slot, TYP_INT);
// Use a GT_AND to check for the lowest bit and indirect if it is set
GenTree* test = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1));
GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0));
// slot = GT_IND(slot - 1)
slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
GenTree* add = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL));
GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
indir->gtFlags |= GTF_IND_NONFAULTING;
indir->gtFlags |= GTF_IND_INVARIANT;
slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
GenTree* asg = gtNewAssignNode(slot, indir);
GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg);
GenTree* qmark = gtNewQmarkNode(TYP_VOID, relop, colon);
impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
return gtNewLclvNode(slotLclNum, TYP_I_IMPL);
}
assert(pRuntimeLookup->indirections != 0);
impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
// Extract the handle
GenTree* handleForNullCheck = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
handleForNullCheck->gtFlags |= GTF_IND_NONFAULTING;
// Call the helper
// - Setup argNode with the pointer to the signature returned by the lookup
GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_GLOBAL_PTR, compileTimeHandle);
GenTreeCall::Use* helperArgs = gtNewCallArgs(ctxTree, argNode);
GenTreeCall* helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
// Check for null and possibly call helper
GenTree* nullCheck = gtNewOperNode(GT_NE, TYP_INT, handleForNullCheck, gtNewIconNode(0, TYP_I_IMPL));
GenTree* handleForResult = gtCloneExpr(handleForNullCheck);
GenTree* result = nullptr;
if (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK)
{
// Dynamic dictionary expansion support
assert((lastIndOfTree != nullptr) && (pRuntimeLookup->indirections > 0));
// sizeValue = dictionary[pRuntimeLookup->sizeOffset]
GenTreeIntCon* sizeOffset = gtNewIconNode(pRuntimeLookup->sizeOffset, TYP_I_IMPL);
GenTree* sizeValueOffset = gtNewOperNode(GT_ADD, TYP_I_IMPL, lastIndOfTree, sizeOffset);
GenTree* sizeValue = gtNewOperNode(GT_IND, TYP_I_IMPL, sizeValueOffset);
sizeValue->gtFlags |= GTF_IND_NONFAULTING;
// sizeCheck fails if sizeValue < pRuntimeLookup->offsets[i]
GenTree* offsetValue = gtNewIconNode(pRuntimeLookup->offsets[pRuntimeLookup->indirections - 1], TYP_I_IMPL);
GenTree* sizeCheck = gtNewOperNode(GT_LE, TYP_INT, sizeValue, offsetValue);
// revert null check condition.
nullCheck->ChangeOperUnchecked(GT_EQ);
// ((sizeCheck fails || nullCheck fails))) ? (helperCall : handle).
// Add checks and the handle as call arguments, indirect call transformer will handle this.
helperCall->gtCallArgs = gtPrependNewCallArg(handleForResult, helperCall->gtCallArgs);
helperCall->gtCallArgs = gtPrependNewCallArg(sizeCheck, helperCall->gtCallArgs);
helperCall->gtCallArgs = gtPrependNewCallArg(nullCheck, helperCall->gtCallArgs);
result = helperCall;
addExpRuntimeLookupCandidate(helperCall);
}
else
{
GenTreeColon* colonNullCheck = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, handleForResult, helperCall);
result = gtNewQmarkNode(TYP_I_IMPL, nullCheck, colonNullCheck);
}
unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling Runtime Lookup tree"));
impAssignTempGen(tmp, result, (unsigned)CHECK_SPILL_NONE);
return gtNewLclvNode(tmp, TYP_I_IMPL);
}
/******************************************************************************
* Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
* If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
* else, grab a new temp.
* For structs (which can be pushed on the stack using obj, etc),
* special handling is needed
*/
struct RecursiveGuard
{
public:
RecursiveGuard()
{
m_pAddress = nullptr;
}
~RecursiveGuard()
{
if (m_pAddress)
{
*m_pAddress = false;
}
}
void Init(bool* pAddress, bool bInitialize)
{
assert(pAddress && *pAddress == false && "Recursive guard violation");
m_pAddress = pAddress;
if (bInitialize)
{
*m_pAddress = true;
}
}
protected:
bool* m_pAddress;
};
bool Compiler::impSpillStackEntry(unsigned level,
unsigned tnum
#ifdef DEBUG
,
bool bAssertOnRecursion,
const char* reason
#endif
)
{
#ifdef DEBUG
RecursiveGuard guard;
guard.Init(&impNestedStackSpill, bAssertOnRecursion);
#endif
GenTree* tree = verCurrentState.esStack[level].val;
/* Allocate a temp if we haven't been asked to use a particular one */
if (tiVerificationNeeded)
{
// Ignore bad temp requests (they will happen with bad code and will be
// catched when importing the destblock)
if (tnum != BAD_VAR_NUM && tnum >= lvaCount)
{
return false;
}
}
else
{
if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
{
return false;
}
}
bool isNewTemp = false;
if (tnum == BAD_VAR_NUM)
{
tnum = lvaGrabTemp(true DEBUGARG(reason));
isNewTemp = true;
}
else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
{
// if verification is needed and tnum's type is incompatible with
// type on that stack, we grab a new temp. This is safe since
// we will throw a verification exception in the dest block.
var_types valTyp = tree->TypeGet();
var_types dstTyp = lvaTable[tnum].TypeGet();
// if the two types are different, we return. This will only happen with bad code and will
// be catched when importing the destblock. We still allow int/byrefs and float/double differences.
if ((genActualType(valTyp) != genActualType(dstTyp)) &&
!(
#ifndef TARGET_64BIT
(valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
#endif // !TARGET_64BIT
(varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
{
return false;
}
}
/* Assign the spilled entry to the temp */
impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
// If temp is newly introduced and a ref type, grab what type info we can.
if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF))
{
assert(lvaTable[tnum].lvSingleDef == 0);
lvaTable[tnum].lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def temp\n", tnum);
CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle();
lvaSetClass(tnum, tree, stkHnd);
// If we're assigning a GT_RET_EXPR, note the temp over on the call,
// so the inliner can use it in case it needs a return spill temp.
if (tree->OperGet() == GT_RET_EXPR)
{
JITDUMP("\n*** see V%02u = GT_RET_EXPR, noting temp\n", tnum);
GenTree* call = tree->AsRetExpr()->gtInlineCandidate;
InlineCandidateInfo* ici = call->AsCall()->gtInlineCandidateInfo;
ici->preexistingSpillTemp = tnum;
}
}
// The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
var_types type = genActualType(lvaTable[tnum].TypeGet());
GenTree* temp = gtNewLclvNode(tnum, type);
verCurrentState.esStack[level].val = temp;
return true;
}
/*****************************************************************************
*
* Ensure that the stack has only spilled values
*/
void Compiler::impSpillStackEnsure(bool spillLeaves)
{
assert(!spillLeaves || opts.compDbgCode);
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
GenTree* tree = verCurrentState.esStack[level].val;
if (!spillLeaves && tree->OperIsLeaf())
{
continue;
}
// Temps introduced by the importer itself don't need to be spilled
bool isTempLcl =
(tree->OperGet() == GT_LCL_VAR) && (tree->AsLclVarCommon()->GetLclNum() >= info.compLocalsCount);
if (isTempLcl)
{
continue;
}
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
}
}
void Compiler::impSpillEvalStack()
{
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
}
}
/*****************************************************************************
*
* If the stack contains any trees with side effects in them, assign those
* trees to temps and append the assignments to the statement list.
* On return the stack is guaranteed to be empty.
*/
inline void Compiler::impEvalSideEffects()
{
impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
verCurrentState.esStackDepth = 0;
}
/*****************************************************************************
*
* If the stack contains any trees with side effects in them, assign those
* trees to temps and replace them on the stack with refs to their temps.
* [0..chkLevel) is the portion of the stack which will be checked and spilled.
*/
inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
{
assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
/* Before we make any appends to the tree list we must spill the
* "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
impSpillSpecialSideEff();
if (chkLevel == (unsigned)CHECK_SPILL_ALL)
{
chkLevel = verCurrentState.esStackDepth;
}
assert(chkLevel <= verCurrentState.esStackDepth);
unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
for (unsigned i = 0; i < chkLevel; i++)
{
GenTree* tree = verCurrentState.esStack[i].val;
GenTree* lclVarTree;
if ((tree->gtFlags & spillFlags) != 0 ||
(spillGlobEffects && // Only consider the following when spillGlobEffects == TRUE
!impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
// lvAddrTaken flag.
{
impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
}
}
}
/*****************************************************************************
*
* If the stack contains any trees with special side effects in them, assign
* those trees to temps and replace them on the stack with refs to their temps.
*/
inline void Compiler::impSpillSpecialSideEff()
{
// Only exception objects need to be carefully handled
if (!compCurBB->bbCatchTyp)
{
return;
}
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
GenTree* tree = verCurrentState.esStack[level].val;
// Make sure if we have an exception object in the sub tree we spill ourselves.
if (gtHasCatchArg(tree))
{
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
}
}
}
/*****************************************************************************
*
* Spill all stack references to value classes (TYP_STRUCT nodes)
*/
void Compiler::impSpillValueClasses()
{
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
GenTree* tree = verCurrentState.esStack[level].val;
if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
{
// Tree walk was aborted, which means that we found a
// value class on the stack. Need to spill that
// stack entry.
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
}
}
}
/*****************************************************************************
*
* Callback that checks if a tree node is TYP_STRUCT
*/
Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data)
{
fgWalkResult walkResult = WALK_CONTINUE;
if ((*pTree)->gtType == TYP_STRUCT)
{
// Abort the walk and indicate that we found a value class
walkResult = WALK_ABORT;
}
return walkResult;
}
/*****************************************************************************
*
* If the stack contains any trees with references to local #lclNum, assign
* those trees to temps and replace their place on the stack with refs to
* their temps.
*/
void Compiler::impSpillLclRefs(ssize_t lclNum)
{
/* Before we make any appends to the tree list we must spill the
* "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
impSpillSpecialSideEff();
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
GenTree* tree = verCurrentState.esStack[level].val;
/* If the tree may throw an exception, and the block has a handler,
then we need to spill assignments to the local if the local is
live on entry to the handler.
Just spill 'em all without considering the liveness */
bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
/* Skip the tree if it doesn't have an affected reference,
unless xcptnCaught */
if (xcptnCaught || gtHasRef(tree, lclNum, false))
{
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
}
}
}
/*****************************************************************************
*
* Push catch arg onto the stack.
* If there are jumps to the beginning of the handler, insert basic block
* and spill catch arg to a temp. Update the handler block if necessary.
*
* Returns the basic block of the actual handler.
*/
BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter)
{
// Do not inject the basic block twice on reimport. This should be
// hit only under JIT stress. See if the block is the one we injected.
// Note that EH canonicalization can inject internal blocks here. We might
// be able to re-use such a block (but we don't, right now).
if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
(BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
{
Statement* stmt = hndBlk->firstStmt();
if (stmt != nullptr)
{
GenTree* tree = stmt->GetRootNode();
assert(tree != nullptr);
if ((tree->gtOper == GT_ASG) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) &&
(tree->AsOp()->gtOp2->gtOper == GT_CATCH_ARG))
{
tree = gtNewLclvNode(tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(), TYP_REF);
impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
return hndBlk->bbNext;
}
}
// If we get here, it must have been some other kind of internal block. It's possible that
// someone prepended something to our injected block, but that's unlikely.
}
/* Push the exception address value on the stack */
GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
/* Mark the node as having a side-effect - i.e. cannot be
* moved around since it is tied to a fixed location (EAX) */
arg->gtFlags |= GTF_ORDER_SIDEEFF;
#if defined(JIT32_GCENCODER)
const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5);
#else
const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5);
#endif // defined(JIT32_GCENCODER)
/* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
if (hndBlk->bbRefs > 1 || forceInsertNewBlock)
{
if (hndBlk->bbRefs == 1)
{
hndBlk->bbRefs++;
}
/* Create extra basic block for the spill */
BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
newBlk->setBBWeight(hndBlk->bbWeight);
newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
/* Account for the new link we are about to create */
hndBlk->bbRefs++;
// Spill into a temp.
unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
lvaTable[tempNum].lvType = TYP_REF;
GenTree* argAsg = gtNewTempAssign(tempNum, arg);
arg = gtNewLclvNode(tempNum, TYP_REF);
hndBlk->bbStkTempsIn = tempNum;
Statement* argStmt;
if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
{
// Report the debug info. impImportBlockCode won't treat the actual handler as exception block and thus
// won't do it for us.
impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
argStmt = gtNewStmt(argAsg, impCurStmtOffs);
}
else
{
argStmt = gtNewStmt(argAsg);
}
fgInsertStmtAtEnd(newBlk, argStmt);
}
impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
return hndBlk;
}
/*****************************************************************************
*
* Given a tree, clone it. *pClone is set to the cloned tree.
* Returns the original tree if the cloning was easy,
* else returns the temp to which the tree had to be spilled to.
* If the tree has side-effects, it will be spilled to a temp.
*/
GenTree* Compiler::impCloneExpr(GenTree* tree,
GenTree** pClone,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt DEBUGARG(const char* reason))
{
if (!(tree->gtFlags & GTF_GLOB_EFFECT))
{
GenTree* clone = gtClone(tree, true);
if (clone)
{
*pClone = clone;
return tree;
}
}
/* Store the operand in a temp and return the temp */
unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
// impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
// return a struct type. It also may modify the struct type to a more
// specialized type (e.g. a SIMD type). So we will get the type from
// the lclVar AFTER calling impAssignTempGen().
impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
var_types type = genActualType(lvaTable[temp].TypeGet());
*pClone = gtNewLclvNode(temp, type);
return gtNewLclvNode(temp, type);
}
/*****************************************************************************
* Remember the IL offset (including stack-empty info) for the trees we will
* generate now.
*/
inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
{
if (compIsForInlining())
{
Statement* callStmt = impInlineInfo->iciStmt;
impCurStmtOffs = callStmt->GetILOffsetX();
}
else
{
assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
impCurStmtOffs = offs | stkBit;
}
}
/*****************************************************************************
* Returns current IL offset with stack-empty and call-instruction info incorporated
*/
inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
{
if (compIsForInlining())
{
return BAD_IL_OFFSET;
}
else
{
assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
return offs | stkBit | callInstructionBit;
}
}
//------------------------------------------------------------------------
// impCanSpillNow: check is it possible to spill all values from eeStack to local variables.
//
// Arguments:
// prevOpcode - last importer opcode
//
// Return Value:
// true if it is legal, false if it could be a sequence that we do not want to divide.
bool Compiler::impCanSpillNow(OPCODE prevOpcode)
{
// Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence.
// Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed.
return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ);
}
/*****************************************************************************
*
* Remember the instr offset for the statements
*
* When we do impAppendTree(tree), we can't set stmt->SetLastILOffset(impCurOpcOffs),
* if the append was done because of a partial stack spill,
* as some of the trees corresponding to code up to impCurOpcOffs might
* still be sitting on the stack.
* So we delay calling of SetLastILOffset() until impNoteLastILoffs().
* This should be called when an opcode finally/explicitly causes
* impAppendTree(tree) to be called (as opposed to being called because of
* a spill caused by the opcode)
*/
#ifdef DEBUG
void Compiler::impNoteLastILoffs()
{
if (impLastILoffsStmt == nullptr)
{
// We should have added a statement for the current basic block
// Is this assert correct ?
assert(impLastStmt);
impLastStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs);
}
else
{
impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs);
impLastILoffsStmt = nullptr;
}
}
#endif // DEBUG
/*****************************************************************************
* We don't create any GenTree (excluding spills) for a branch.
* For debugging info, we need a placeholder so that we can note
* the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
*/
void Compiler::impNoteBranchOffs()
{
if (opts.compDbgCode)
{
impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
}
}
/*****************************************************************************
* Locate the next stmt boundary for which we need to record info.
* We will have to spill the stack at such boundaries if it is not
* already empty.
* Returns the next stmt boundary (after the start of the block)
*/
unsigned Compiler::impInitBlockLineInfo()
{
/* Assume the block does not correspond with any IL offset. This prevents
us from reporting extra offsets. Extra mappings can cause confusing
stepping, especially if the extra mapping is a jump-target, and the
debugger does not ignore extra mappings, but instead rewinds to the
nearest known offset */
impCurStmtOffsSet(BAD_IL_OFFSET);
if (compIsForInlining())
{
return ~0;
}
IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
{
impCurStmtOffsSet(blockOffs);
}
if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
{
impCurStmtOffsSet(blockOffs);
}
/* Always report IL offset 0 or some tests get confused.
Probably a good idea anyways */
if (blockOffs == 0)
{
impCurStmtOffsSet(blockOffs);
}
if (!info.compStmtOffsetsCount)
{
return ~0;
}
/* Find the lowest explicit stmt boundary within the block */
/* Start looking at an entry that is based on our instr offset */
unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
if (index >= info.compStmtOffsetsCount)
{
index = info.compStmtOffsetsCount - 1;
}
/* If we've guessed too far, back up */
while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
{
index--;
}
/* If we guessed short, advance ahead */
while (info.compStmtOffsets[index] < blockOffs)
{
index++;
if (index == info.compStmtOffsetsCount)
{
return info.compStmtOffsetsCount;
}
}
assert(index < info.compStmtOffsetsCount);
if (info.compStmtOffsets[index] == blockOffs)
{
/* There is an explicit boundary for the start of this basic block.
So we will start with bbCodeOffs. Else we will wait until we
get to the next explicit boundary */
impCurStmtOffsSet(blockOffs);
index++;
}
return index;
}
/*****************************************************************************/
static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
{
switch (opcode)
{
case CEE_CALL:
case CEE_CALLI:
case CEE_CALLVIRT:
return true;
default:
return false;
}
}
/*****************************************************************************/
static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
{
switch (opcode)
{
case CEE_CALL:
case CEE_CALLI:
case CEE_CALLVIRT:
case CEE_JMP:
case CEE_NEWOBJ:
case CEE_NEWARR:
return true;
default:
return false;
}
}
/*****************************************************************************/
// One might think it is worth caching these values, but results indicate
// that it isn't.
// In addition, caching them causes SuperPMI to be unable to completely
// encapsulate an individual method context.
CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
{
CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
return refAnyClass;
}
CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
{
CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
return typeHandleClass;
}
CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
{
CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
return argIteratorClass;
}
CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
{
CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
return stringClass;
}
CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
{
CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
return objectClass;
}
/*****************************************************************************
* "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
* set its type to TYP_BYREF when we create it. We know if it can be
* changed to TYP_I_IMPL only at the point where we use it
*/
/* static */
void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2)
{
if (tree1->IsLocalAddrExpr() != nullptr)
{
tree1->gtType = TYP_I_IMPL;
}
if (tree2 && (tree2->IsLocalAddrExpr() != nullptr))
{
tree2->gtType = TYP_I_IMPL;
}
}
/*****************************************************************************
* TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
* to make that an explicit cast in our trees, so any implicit casts that
* exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
* turned into explicit casts here.
* We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
*/
GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp)
{
var_types currType = genActualType(tree->gtType);
var_types wantedType = genActualType(dstTyp);
if (wantedType != currType)
{
// Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
{
if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->AsIntCon()->gtIconVal == 0)))
{
tree->gtType = TYP_I_IMPL;
}
}
#ifdef TARGET_64BIT
else if (varTypeIsI(wantedType) && (currType == TYP_INT))
{
// Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
}
else if ((wantedType == TYP_INT) && varTypeIsI(currType))
{
// Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT);
}
#endif // TARGET_64BIT
}
return tree;
}
/*****************************************************************************
* TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
* but we want to make that an explicit cast in our trees, so any implicit casts
* that exist in the IL are turned into explicit casts here.
*/
GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp)
{
if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
{
tree = gtNewCastNode(dstTyp, tree, false, dstTyp);
}
return tree;
}
//------------------------------------------------------------------------
// impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
// with a GT_COPYBLK node.
//
// Arguments:
// sig - The InitializeArray signature.
//
// Return Value:
// A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
// nullptr otherwise.
//
// Notes:
// The function recognizes the following IL pattern:
// ldc <length> or a list of ldc <lower bound>/<length>
// newarr or newobj
// dup
// ldtoken <field handle>
// call InitializeArray
// The lower bounds need not be constant except when the array rank is 1.
// The function recognizes all kinds of arrays thus enabling a small runtime
// such as CoreRT to skip providing an implementation for InitializeArray.
GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
{
assert(sig->numArgs == 2);
GenTree* fieldTokenNode = impStackTop(0).val;
GenTree* arrayLocalNode = impStackTop(1).val;
//
// Verify that the field token is known and valid. Note that It's also
// possible for the token to come from reflection, in which case we cannot do
// the optimization and must therefore revert to calling the helper. You can
// see an example of this in bvt\DynIL\initarray2.exe (in Main).
//
// Check to see if the ldtoken helper call is what we see here.
if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) ||
(fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
{
return nullptr;
}
// Strip helper call away
fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode();
if (fieldTokenNode->gtOper == GT_IND)
{
fieldTokenNode = fieldTokenNode->AsOp()->gtOp1;
}
// Check for constant
if (fieldTokenNode->gtOper != GT_CNS_INT)
{
return nullptr;
}
CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle;
if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
{
return nullptr;
}
//
// We need to get the number of elements in the array and the size of each element.
// We verify that the newarr statement is exactly what we expect it to be.
// If it's not then we just return NULL and we don't optimize this call
//
// It is possible the we don't have any statements in the block yet.
if (impLastStmt == nullptr)
{
return nullptr;
}
//
// We start by looking at the last statement, making sure it's an assignment, and
// that the target of the assignment is the array passed to InitializeArray.
//
GenTree* arrayAssignment = impLastStmt->GetRootNode();
if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->AsOp()->gtOp1->gtOper != GT_LCL_VAR) ||
(arrayLocalNode->gtOper != GT_LCL_VAR) || (arrayAssignment->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() !=
arrayLocalNode->AsLclVarCommon()->GetLclNum()))
{
return nullptr;
}
//
// Make sure that the object being assigned is a helper call.
//
GenTree* newArrayCall = arrayAssignment->AsOp()->gtOp2;
if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->AsCall()->gtCallType != CT_HELPER))
{
return nullptr;
}
//
// Verify that it is one of the new array helpers.
//
bool isMDArray = false;
if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
#ifdef FEATURE_READYTORUN_COMPILER
&& newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
#endif
)
{
if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
{
return nullptr;
}
isMDArray = true;
}
CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->AsCall()->compileTimeHelperArgumentHandle;
//
// Make sure we found a compile time handle to the array
//
if (!arrayClsHnd)
{
return nullptr;
}
unsigned rank = 0;
S_UINT32 numElements;
if (isMDArray)
{
rank = info.compCompHnd->getArrayRank(arrayClsHnd);
if (rank == 0)
{
return nullptr;
}
GenTreeCall::Use* tokenArg = newArrayCall->AsCall()->gtCallArgs;
assert(tokenArg != nullptr);
GenTreeCall::Use* numArgsArg = tokenArg->GetNext();
assert(numArgsArg != nullptr);
GenTreeCall::Use* argsArg = numArgsArg->GetNext();
assert(argsArg != nullptr);
//
// The number of arguments should be a constant between 1 and 64. The rank can't be 0
// so at least one length must be present and the rank can't exceed 32 so there can
// be at most 64 arguments - 32 lengths and 32 lower bounds.
//
if ((!numArgsArg->GetNode()->IsCnsIntOrI()) || (numArgsArg->GetNode()->AsIntCon()->IconValue() < 1) ||
(numArgsArg->GetNode()->AsIntCon()->IconValue() > 64))
{
return nullptr;
}
unsigned numArgs = static_cast<unsigned>(numArgsArg->GetNode()->AsIntCon()->IconValue());
bool lowerBoundsSpecified;
if (numArgs == rank * 2)
{
lowerBoundsSpecified = true;
}
else if (numArgs == rank)
{
lowerBoundsSpecified = false;
//
// If the rank is 1 and a lower bound isn't specified then the runtime creates
// a SDArray. Note that even if a lower bound is specified it can be 0 and then
// we get a SDArray as well, see the for loop below.
//
if (rank == 1)
{
isMDArray = false;
}
}
else
{
return nullptr;
}
//
// The rank is known to be at least 1 so we can start with numElements being 1
// to avoid the need to special case the first dimension.
//
numElements = S_UINT32(1);
struct Match
{
static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
{
return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
}
static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
{
return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
(tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
}
static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
{
return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
(tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
}
static bool IsComma(GenTree* tree)
{
return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
}
};
unsigned argIndex = 0;
GenTree* comma;
for (comma = argsArg->GetNode(); Match::IsComma(comma); comma = comma->gtGetOp2())
{
if (lowerBoundsSpecified)
{
//
// In general lower bounds can be ignored because they're not needed to
// calculate the total number of elements. But for single dimensional arrays
// we need to know if the lower bound is 0 because in this case the runtime
// creates a SDArray and this affects the way the array data offset is calculated.
//
if (rank == 1)
{
GenTree* lowerBoundAssign = comma->gtGetOp1();
assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
if (lowerBoundNode->IsIntegralConst(0))
{
isMDArray = false;
}
}
comma = comma->gtGetOp2();
argIndex++;
}
GenTree* lengthNodeAssign = comma->gtGetOp1();
assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
if (!lengthNode->IsCnsIntOrI())
{
return nullptr;
}
numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
argIndex++;
}
assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
if (argIndex != numArgs)
{
return nullptr;
}
}
else
{
//
// Make sure there are exactly two arguments: the array class and
// the number of elements.
//
GenTree* arrayLengthNode;
GenTreeCall::Use* args = newArrayCall->AsCall()->gtCallArgs;
#ifdef FEATURE_READYTORUN_COMPILER
if (newArrayCall->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
{
// Array length is 1st argument for readytorun helper
arrayLengthNode = args->GetNode();
}
else
#endif
{
// Array length is 2nd argument for regular helper
arrayLengthNode = args->GetNext()->GetNode();
}
//
// Make sure that the number of elements look valid.
//
if (arrayLengthNode->gtOper != GT_CNS_INT)
{
return nullptr;
}
numElements = S_SIZE_T(arrayLengthNode->AsIntCon()->gtIconVal);
if (!info.compCompHnd->isSDArray(arrayClsHnd))
{
return nullptr;
}
}
CORINFO_CLASS_HANDLE elemClsHnd;
var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
//
// Note that genTypeSize will return zero for non primitive types, which is exactly
// what we want (size will then be 0, and we will catch this in the conditional below).
// Note that we don't expect this to fail for valid binaries, so we assert in the
// non-verification case (the verification case should not assert but rather correctly
// handle bad binaries). This assert is not guarding any specific invariant, but rather
// saying that we don't expect this to happen, and if it is hit, we need to investigate
// why.
//
S_UINT32 elemSize(genTypeSize(elementType));
S_UINT32 size = elemSize * S_UINT32(numElements);
if (size.IsOverflow())
{
return nullptr;
}
if ((size.Value() == 0) || (varTypeIsGC(elementType)))
{
return nullptr;
}
void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
if (!initData)
{
return nullptr;
}
//
// At this point we are ready to commit to implementing the InitializeArray
// intrinsic using a struct assignment. Pop the arguments from the stack and
// return the struct assignment node.
//
impPopStack();
impPopStack();
const unsigned blkSize = size.Value();
unsigned dataOffset;
if (isMDArray)
{
dataOffset = eeGetMDArrayDataOffset(elementType, rank);
}
else
{
dataOffset = eeGetArrayDataOffset(elementType);
}
GenTree* dstAddr = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
GenTree* dst = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, dstAddr, typGetBlkLayout(blkSize));
GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_CONST_PTR, true);
#ifdef DEBUG
src->gtGetOp1()->AsIntCon()->gtTargetHandle = THT_IntializeArrayIntrinsics;
#endif
return gtNewBlkOpNode(dst, // dst
src, // src
false, // volatile
true); // copyBlock
}
//------------------------------------------------------------------------
// impIntrinsic: possibly expand intrinsic call into alternate IR sequence
//
// Arguments:
// newobjThis - for constructor calls, the tree for the newly allocated object
// clsHnd - handle for the intrinsic method's class
// method - handle for the intrinsic method
// sig - signature of the intrinsic method
// methodFlags - CORINFO_FLG_XXX flags of the intrinsic method
// memberRef - the token for the intrinsic method
// readonlyCall - true if call has a readonly prefix
// tailCall - true if call is in tail position
// pConstrainedResolvedToken -- resolved token for constrained call, or nullptr
// if call is not constrained
// constraintCallThisTransform -- this transform to apply for a constrained call
// pIntrinsicID [OUT] -- intrinsic ID (see enumeration in corinfo.h)
// for "traditional" jit intrinsics
// isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call
// that is amenable to special downstream optimization opportunities
//
// Returns:
// IR tree to use in place of the call, or nullptr if the jit should treat
// the intrinsic call like a normal call.
//
// pIntrinsicID set to non-illegal value if the call is recognized as a
// traditional jit intrinsic, even if the intrinsic is not expaned.
//
// isSpecial set true if the expansion is subject to special
// optimizations later in the jit processing
//
// Notes:
// On success the IR tree may be a call to a different method or an inline
// sequence. If it is a call, then the intrinsic processing here is responsible
// for handling all the special cases, as upon return to impImportCall
// expanded intrinsics bypass most of the normal call processing.
//
// Intrinsics are generally not recognized in minopts and debug codegen.
//
// However, certain traditional intrinsics are identifed as "must expand"
// if there is no fallback implmentation to invoke; these must be handled
// in all codegen modes.
//
// New style intrinsics (where the fallback implementation is in IL) are
// identified as "must expand" if they are invoked from within their
// own method bodies.
//
GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
unsigned methodFlags,
int memberRef,
bool readonlyCall,
bool tailCall,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
CORINFO_THIS_TRANSFORM constraintCallThisTransform,
CorInfoIntrinsics* pIntrinsicID,
bool* isSpecialIntrinsic)
{
assert((methodFlags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0);
bool mustExpand = false;
bool isSpecial = false;
CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Illegal;
NamedIntrinsic ni = NI_Illegal;
if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0)
{
intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
}
if ((methodFlags & CORINFO_FLG_JIT_INTRINSIC) != 0)
{
// The recursive non-virtual calls to Jit intrinsics are must-expand by convention.
mustExpand = mustExpand || (gtIsRecursiveCall(method) && !(methodFlags & CORINFO_FLG_VIRTUAL));
if (intrinsicID == CORINFO_INTRINSIC_Illegal)
{
ni = lookupNamedIntrinsic(method);
// We specially support the following on all platforms to allow for dead
// code optimization and to more generally support recursive intrinsics.
if (ni == NI_IsSupported_True)
{
assert(sig->numArgs == 0);
return gtNewIconNode(true);
}
if (ni == NI_IsSupported_False)
{
assert(sig->numArgs == 0);
return gtNewIconNode(false);
}
if (ni == NI_Throw_PlatformNotSupportedException)
{
return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand);
}
#ifdef FEATURE_HW_INTRINSICS
if ((ni > NI_HW_INTRINSIC_START) && (ni < NI_HW_INTRINSIC_END))
{
GenTree* hwintrinsic = impHWIntrinsic(ni, clsHnd, method, sig, mustExpand);
if (mustExpand && (hwintrinsic == nullptr))
{
return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_NOT_IMPLEMENTED, method, sig, mustExpand);
}
return hwintrinsic;
}
if ((ni > NI_SIMD_AS_HWINTRINSIC_START) && (ni < NI_SIMD_AS_HWINTRINSIC_END))
{
// These intrinsics aren't defined recursively and so they will never be mustExpand
// Instead, they provide software fallbacks that will be executed instead.
assert(!mustExpand);
return impSimdAsHWIntrinsic(ni, clsHnd, method, sig, newobjThis);
}
#endif // FEATURE_HW_INTRINSICS
}
}
*pIntrinsicID = intrinsicID;
#ifndef TARGET_ARM
genTreeOps interlockedOperator;
#endif
if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
{
// must be done regardless of DbgCode and MinOpts
return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
}
#ifdef TARGET_64BIT
if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
{
// must be done regardless of DbgCode and MinOpts
return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
}
#else
assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
#endif
if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_NextCallReturnAddress)
{
// For now we just avoid inlining anything into these methods since
// this intrinsic is only rarely used. We could do this better if we
// wanted to by trying to match which call is the one we need to get
// the return address of.
info.compHasNextCallRetAddr = true;
return new (this, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL);
}
GenTree* retNode = nullptr;
// Under debug and minopts, only expand what is required.
// NextCallReturnAddress intrinsic returns the return address of the next call.
// If that call is an intrinsic and is expanded, codegen for NextCallReturnAddress will fail.
// To avoid that we conservatively expand only required intrinsics in methods that call
// the NextCallReturnAddress intrinsic.
if (!mustExpand && (opts.OptimizationDisabled() || info.compHasNextCallRetAddr))
{
*pIntrinsicID = CORINFO_INTRINSIC_Illegal;
return retNode;
}
var_types callType = JITtype2varType(sig->retType);
/* First do the intrinsics which are always smaller than a call */
switch (intrinsicID)
{
GenTree* op1;
GenTree* op2;
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
// TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
// Note that CORINFO_INTRINSIC_InterlockedAdd32/64 are not actually used.
// Anyway, we can import them as XADD and leave it to lowering/codegen to perform
// whatever optimizations may arise from the fact that result value is not used.
case CORINFO_INTRINSIC_InterlockedAdd32:
case CORINFO_INTRINSIC_InterlockedXAdd32:
interlockedOperator = GT_XADD;
goto InterlockedBinOpCommon;
case CORINFO_INTRINSIC_InterlockedXchg32:
interlockedOperator = GT_XCHG;
goto InterlockedBinOpCommon;
#ifdef TARGET_64BIT
case CORINFO_INTRINSIC_InterlockedAdd64:
case CORINFO_INTRINSIC_InterlockedXAdd64:
interlockedOperator = GT_XADD;
goto InterlockedBinOpCommon;
case CORINFO_INTRINSIC_InterlockedXchg64:
interlockedOperator = GT_XCHG;
goto InterlockedBinOpCommon;
#endif // TARGET_AMD64
InterlockedBinOpCommon:
assert(callType != TYP_STRUCT);
assert(sig->numArgs == 2);
op2 = impPopStack().val;
op1 = impPopStack().val;
// This creates:
// val
// XAdd
// addr
// field (for example)
//
// In the case where the first argument is the address of a local, we might
// want to make this *not* make the var address-taken -- but atomic instructions
// on a local are probably pretty useless anyway, so we probably don't care.
op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
retNode = op1;
break;
#endif // defined(TARGET_XARCH) || defined(TARGET_ARM64)
case CORINFO_INTRINSIC_MemoryBarrier:
case CORINFO_INTRINSIC_MemoryBarrierLoad:
assert(sig->numArgs == 0);
op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
// On XARCH `CORINFO_INTRINSIC_MemoryBarrierLoad` fences need not be emitted.
// However, we still need to capture the effect on reordering.
if (intrinsicID == CORINFO_INTRINSIC_MemoryBarrierLoad)
{
op1->gtFlags |= GTF_MEMORYBARRIER_LOAD;
}
retNode = op1;
break;
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
// TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
case CORINFO_INTRINSIC_InterlockedCmpXchg32:
#ifdef TARGET_64BIT
case CORINFO_INTRINSIC_InterlockedCmpXchg64:
#endif
{
assert(callType != TYP_STRUCT);
assert(sig->numArgs == 3);
GenTree* op3;
op3 = impPopStack().val; // comparand
op2 = impPopStack().val; // value
op1 = impPopStack().val; // location
GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
node->AsCmpXchg()->gtOpLocation->gtFlags |= GTF_DONT_CSE;
retNode = node;
break;
}
#endif // defined(TARGET_XARCH) || defined(TARGET_ARM64)
case CORINFO_INTRINSIC_StringLength:
op1 = impPopStack().val;
if (opts.OptimizationEnabled())
{
if (op1->OperIs(GT_CNS_STR))
{
// Optimize `ldstr + String::get_Length()` to CNS_INT
// e.g. "Hello".Length => 5
int length = -1;
LPCWSTR str = info.compCompHnd->getStringLiteral(op1->AsStrCon()->gtScpHnd,
op1->AsStrCon()->gtSconCPX, &length);
if (length >= 0)
{
retNode = gtNewIconNode(length);
if (str != nullptr) // can be NULL for dynamic context
{
JITDUMP("Optimizing '\"%ws\".Length' to just '%d'\n", str, length);
}
else
{
JITDUMP("Optimizing 'CNS_STR.Length' to just '%d'\n", length);
}
break;
}
}
GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen, compCurBB);
op1 = arrLen;
}
else
{
/* Create the expression "*(str_addr + stringLengthOffset)" */
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
gtNewIconNode(OFFSETOF__CORINFO_String__stringLen, TYP_I_IMPL));
op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
}
// Getting the length of a null string should throw
op1->gtFlags |= GTF_EXCEPT;
retNode = op1;
break;
case CORINFO_INTRINSIC_StringGetChar:
op2 = impPopStack().val;
op1 = impPopStack().val;
op1 = gtNewIndexRef(TYP_USHORT, op1, op2);
op1->gtFlags |= GTF_INX_STRING_LAYOUT;
retNode = op1;
break;
case CORINFO_INTRINSIC_InitializeArray:
retNode = impInitializeArrayIntrinsic(sig);
break;
case CORINFO_INTRINSIC_Array_Address:
case CORINFO_INTRINSIC_Array_Get:
case CORINFO_INTRINSIC_Array_Set:
retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
break;
case CORINFO_INTRINSIC_GetTypeFromHandle:
op1 = impStackTop(0).val;
CorInfoHelpFunc typeHandleHelper;
if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) &&
gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall(), &typeHandleHelper))
{
op1 = impPopStack().val;
// Replace helper with a more specialized helper that returns RuntimeType
if (typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE)
{
typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
}
else
{
assert(typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL);
typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL;
}
assert(op1->AsCall()->gtCallArgs->GetNext() == nullptr);
op1 = gtNewHelperCallNode(typeHandleHelper, TYP_REF, op1->AsCall()->gtCallArgs);
op1->gtType = TYP_REF;
retNode = op1;
}
// Call the regular function.
break;
case CORINFO_INTRINSIC_RTH_GetValueInternal:
op1 = impStackTop(0).val;
if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) &&
gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall()))
{
// Old tree
// Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
//
// New tree
// TreeToGetNativeTypeHandle
// Remove call to helper and return the native TypeHandle pointer that was the parameter
// to that helper.
op1 = impPopStack().val;
// Get native TypeHandle argument to old helper
GenTreeCall::Use* arg = op1->AsCall()->gtCallArgs;
assert(arg->GetNext() == nullptr);
op1 = arg->GetNode();
retNode = op1;
}
// Call the regular function.
break;
case CORINFO_INTRINSIC_Object_GetType:
{
JITDUMP("\n impIntrinsic: call to Object.GetType\n");
op1 = impStackTop(0).val;
// If we're calling GetType on a boxed value, just get the type directly.
if (op1->IsBoxedValue())
{
JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n");
// Try and clean up the box. Obtain the handle we
// were going to pass to the newobj.
GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE);
if (boxTypeHandle != nullptr)
{
// Note we don't need to play the TYP_STRUCT games here like
// do for LDTOKEN since the return value of this operator is Type,
// not RuntimeTypeHandle.
impPopStack();
GenTreeCall::Use* helperArgs = gtNewCallArgs(boxTypeHandle);
GenTree* runtimeType =
gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
retNode = runtimeType;
}
}
// If we have a constrained callvirt with a "box this" transform
// we know we have a value class and hence an exact type.
//
// If so, instead of boxing and then extracting the type, just
// construct the type directly.
if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) &&
(constraintCallThisTransform == CORINFO_BOX_THIS))
{
// Ensure this is one of the is simple box cases (in particular, rule out nullables).
const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass);
const bool isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX);
if (isSafeToOptimize)
{
JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n");
impPopStack();
GenTree* typeHandleOp =
impTokenToHandle(pConstrainedResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
if (typeHandleOp == nullptr)
{
assert(compDonotInline());
return nullptr;
}
GenTreeCall::Use* helperArgs = gtNewCallArgs(typeHandleOp);
GenTree* runtimeType =
gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
retNode = runtimeType;
}
}
#ifdef DEBUG
if (retNode != nullptr)
{
JITDUMP("Optimized result for call to GetType is\n");
if (verbose)
{
gtDispTree(retNode);
}
}
#endif
// Else expand as an intrinsic, unless the call is constrained,
// in which case we defer expansion to allow impImportCall do the
// special constraint processing.
if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr))
{
JITDUMP("Expanding as special intrinsic\n");
impPopStack();
op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, ni, method);
// Set the CALL flag to indicate that the operator is implemented by a call.
// Set also the EXCEPTION flag because the native implementation of
// CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
retNode = op1;
// Might be further optimizable, so arrange to leave a mark behind
isSpecial = true;
}
if (retNode == nullptr)
{
JITDUMP("Leaving as normal call\n");
// Might be further optimizable, so arrange to leave a mark behind
isSpecial = true;
}
break;
}
// Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field
// in a value type. The canonical example of this is Span<T>. In effect this is just a
// substitution. The parameter byref will be assigned into the newly allocated object.
case CORINFO_INTRINSIC_ByReference_Ctor:
{
// Remove call to constructor and directly assign the byref passed
// to the call to the first slot of the ByReference struct.
op1 = impPopStack().val;
GenTree* thisptr = newobjThis;
CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0);
GenTree* assign = gtNewAssignNode(field, op1);
GenTree* byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
assert(byReferenceStruct != nullptr);
impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
retNode = assign;
break;
}
// Implement ptr value getter for ByReference struct.
case CORINFO_INTRINSIC_ByReference_Value:
{
op1 = impPopStack().val;
CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0);
retNode = field;
break;
}
case CORINFO_INTRINSIC_Span_GetItem:
case CORINFO_INTRINSIC_ReadOnlySpan_GetItem:
{
// Have index, stack pointer-to Span<T> s on the stack. Expand to:
//
// For Span<T>
// Comma
// BoundsCheck(index, s->_length)
// s->_pointer + index * sizeof(T)
//
// For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref
//
// Signature should show one class type parameter, which
// we need to examine.
assert(sig->sigInst.classInstCount == 1);
CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0];
const unsigned elemSize = info.compCompHnd->getClassSize(spanElemHnd);
assert(elemSize > 0);
const bool isReadOnly = (intrinsicID == CORINFO_INTRINSIC_ReadOnlySpan_GetItem);
JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "",
info.compCompHnd->getClassName(spanElemHnd), elemSize);
GenTree* index = impPopStack().val;
GenTree* ptrToSpan = impPopStack().val;
GenTree* indexClone = nullptr;
GenTree* ptrToSpanClone = nullptr;
assert(varTypeIsIntegral(index));
assert(ptrToSpan->TypeGet() == TYP_BYREF);
#if defined(DEBUG)
if (verbose)
{
printf("with ptr-to-span\n");
gtDispTree(ptrToSpan);
printf("and index\n");
gtDispTree(index);
}
#endif // defined(DEBUG)
// We need to use both index and ptr-to-span twice, so clone or spill.
index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Span.get_Item index"));
ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Span.get_Item ptrToSpan"));
// Bounds check
CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1);
const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
GenTree* length = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset);
GenTree* boundsCheck = new (this, GT_ARR_BOUNDS_CHECK)
GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, index, length, SCK_RNGCHK_FAIL);
// Element access
GenTree* indexIntPtr = impImplicitIorI4Cast(indexClone, TYP_I_IMPL);
GenTree* sizeofNode = gtNewIconNode(elemSize);
GenTree* mulNode = gtNewOperNode(GT_MUL, TYP_I_IMPL, indexIntPtr, sizeofNode);
CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd);
GenTree* data = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset);
GenTree* result = gtNewOperNode(GT_ADD, TYP_BYREF, data, mulNode);
// Prepare result
var_types resultType = JITtype2varType(sig->retType);
assert(resultType == result->TypeGet());
retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result);
break;
}
case CORINFO_INTRINSIC_GetRawHandle:
{
noway_assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it.
CORINFO_RESOLVED_TOKEN resolvedToken;
resolvedToken.tokenContext = impTokenLookupContextHandle;
resolvedToken.tokenScope = info.compScopeHnd;
resolvedToken.token = memberRef;
resolvedToken.tokenType = CORINFO_TOKENKIND_Method;
CORINFO_GENERICHANDLE_RESULT embedInfo;
info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo);
GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef),
embedInfo.compileTimeHandle);
if (rawHandle == nullptr)
{
return nullptr;
}
noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL));
unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle"));
impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE);
GenTree* lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL);
GenTree* lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar);
var_types resultType = JITtype2varType(sig->retType);
retNode = gtNewOperNode(GT_IND, resultType, lclVarAddr);
break;
}
case CORINFO_INTRINSIC_TypeEQ:
case CORINFO_INTRINSIC_TypeNEQ:
{
JITDUMP("Importing Type.op_*Equality intrinsic\n");
op1 = impStackTop(1).val;
op2 = impStackTop(0).val;
GenTree* optTree = gtFoldTypeEqualityCall(intrinsicID, op1, op2);
if (optTree != nullptr)
{
// Success, clean up the evaluation stack.
impPopStack();
impPopStack();
// See if we can optimize even further, to a handle compare.
optTree = gtFoldTypeCompare(optTree);
// See if we can now fold a handle compare to a constant.
optTree = gtFoldExpr(optTree);
retNode = optTree;
}
else
{
// Retry optimizing these later
isSpecial = true;
}
break;
}
default:
/* Unknown intrinsic */
intrinsicID = CORINFO_INTRINSIC_Illegal;
break;
}
// Look for new-style jit intrinsics by name
if (ni != NI_Illegal)
{
assert(retNode == nullptr);
switch (ni)
{
case NI_System_Enum_HasFlag:
{
GenTree* thisOp = impStackTop(1).val;
GenTree* flagOp = impStackTop(0).val;
GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp);
if (optTree != nullptr)
{
// Optimization successful. Pop the stack for real.
impPopStack();
impPopStack();
retNode = optTree;
}
else
{
// Retry optimizing this during morph.
isSpecial = true;
}
break;
}
case NI_System_Type_IsAssignableFrom:
{
GenTree* typeTo = impStackTop(1).val;
GenTree* typeFrom = impStackTop(0).val;
retNode = impTypeIsAssignable(typeTo, typeFrom);
break;
}
case NI_System_Type_IsAssignableTo:
{
GenTree* typeTo = impStackTop(0).val;
GenTree* typeFrom = impStackTop(1).val;
retNode = impTypeIsAssignable(typeTo, typeFrom);
break;
}
case NI_System_Type_get_IsValueType:
{
// Optimize
//
// call Type.GetTypeFromHandle (which is replaced with CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE)
// call Type.IsValueType
//
// to `true` or `false`
// e.g. `typeof(int).IsValueType` => `true`
if (impStackTop().val->IsCall())
{
GenTreeCall* call = impStackTop().val->AsCall();
if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE))
{
CORINFO_CLASS_HANDLE hClass = gtGetHelperArgClassHandle(call->gtCallArgs->GetNode());
if (hClass != NO_CLASS_HANDLE)
{
retNode =
gtNewIconNode((eeIsValueClass(hClass) &&
// pointers are not value types (e.g. typeof(int*).IsValueType is false)
info.compCompHnd->asCorInfoType(hClass) != CORINFO_TYPE_PTR)
? 1
: 0);
impPopStack(); // drop CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE call
}
}
}
break;
}
case NI_System_Threading_Thread_get_ManagedThreadId:
{
if (opts.OptimizationEnabled() && impStackTop().val->OperIs(GT_RET_EXPR))
{
GenTreeCall* call = impStackTop().val->AsRetExpr()->gtInlineCandidate->AsCall();
if (call->gtFlags & CORINFO_FLG_JIT_INTRINSIC)
{
if (lookupNamedIntrinsic(call->gtCallMethHnd) == NI_System_Threading_Thread_get_CurrentThread)
{
// drop get_CurrentThread() call
impPopStack();
call->ReplaceWith(gtNewNothingNode(), this);
retNode = gtNewHelperCallNode(CORINFO_HELP_GETCURRENTMANAGEDTHREADID, TYP_INT);
}
}
}
break;
}
#ifdef FEATURE_HW_INTRINSICS
case NI_System_Math_FusedMultiplyAdd:
{
#ifdef TARGET_XARCH
if (compExactlyDependsOn(InstructionSet_FMA) && supportSIMDTypes())
{
assert(varTypeIsFloating(callType));
// We are constructing a chain of intrinsics similar to:
// return FMA.MultiplyAddScalar(
// Vector128.CreateScalarUnsafe(x),
// Vector128.CreateScalarUnsafe(y),
// Vector128.CreateScalarUnsafe(z)
// ).ToScalar();
GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val,
NI_Vector128_CreateScalarUnsafe, callType, 16);
GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val,
NI_Vector128_CreateScalarUnsafe, callType, 16);
GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val,
NI_Vector128_CreateScalarUnsafe, callType, 16);
GenTree* res =
gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, NI_FMA_MultiplyAddScalar, callType, 16);
retNode = gtNewSimdHWIntrinsicNode(callType, res, NI_Vector128_ToScalar, callType, 16);
}
#elif defined(TARGET_ARM64)
if (compExactlyDependsOn(InstructionSet_AdvSimd))
{
assert(varTypeIsFloating(callType));
// We are constructing a chain of intrinsics similar to:
// return AdvSimd.FusedMultiplyAddScalar(
// Vector64.Create{ScalarUnsafe}(z),
// Vector64.Create{ScalarUnsafe}(y),
// Vector64.Create{ScalarUnsafe}(x)
// ).ToScalar();
NamedIntrinsic createVector64 =
(callType == TYP_DOUBLE) ? NI_Vector64_Create : NI_Vector64_CreateScalarUnsafe;
constexpr unsigned int simdSize = 8;
GenTree* op3 =
gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callType, simdSize);
GenTree* op2 =
gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callType, simdSize);
GenTree* op1 =
gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callType, simdSize);
// Note that AdvSimd.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 + op2 * op3
// while Math{F}.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 * op2 + op3
retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op3, op2, op1, NI_AdvSimd_FusedMultiplyAddScalar,
callType, simdSize);
retNode = gtNewSimdHWIntrinsicNode(callType, retNode, NI_Vector64_ToScalar, callType, simdSize);
}
#endif
break;
}
#endif // FEATURE_HW_INTRINSICS
case NI_System_Math_Sin:
case NI_System_Math_Cbrt:
case NI_System_Math_Sqrt:
case NI_System_Math_Abs:
case NI_System_Math_Cos:
case NI_System_Math_Round:
case NI_System_Math_Cosh:
case NI_System_Math_Sinh:
case NI_System_Math_Tan:
case NI_System_Math_Tanh:
case NI_System_Math_Asin:
case NI_System_Math_Asinh:
case NI_System_Math_Acos:
case NI_System_Math_Acosh:
case NI_System_Math_Atan:
case NI_System_Math_Atan2:
case NI_System_Math_Atanh:
case NI_System_Math_Log10:
case NI_System_Math_Pow:
case NI_System_Math_Exp:
case NI_System_Math_Ceiling:
case NI_System_Math_Floor:
{
retNode = impMathIntrinsic(method, sig, callType, ni, tailCall);
break;
}
case NI_System_Collections_Generic_EqualityComparer_get_Default:
{
// Flag for later handling during devirtualization.
isSpecial = true;
break;
}
case NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness:
{
assert(sig->numArgs == 1);
// We expect the return type of the ReverseEndianness routine to match the type of the
// one and only argument to the method. We use a special instruction for 16-bit
// BSWAPs since on x86 processors this is implemented as ROR <16-bit reg>, 8. Additionally,
// we only emit 64-bit BSWAP instructions on 64-bit archs; if we're asked to perform a
// 64-bit byte swap on a 32-bit arch, we'll fall to the default case in the switch block below.
switch (sig->retType)
{
case CorInfoType::CORINFO_TYPE_SHORT:
case CorInfoType::CORINFO_TYPE_USHORT:
retNode = gtNewCastNode(TYP_INT, gtNewOperNode(GT_BSWAP16, TYP_INT, impPopStack().val), false,
callType);
break;
case CorInfoType::CORINFO_TYPE_INT:
case CorInfoType::CORINFO_TYPE_UINT:
#ifdef TARGET_64BIT
case CorInfoType::CORINFO_TYPE_LONG:
case CorInfoType::CORINFO_TYPE_ULONG:
#endif // TARGET_64BIT
retNode = gtNewOperNode(GT_BSWAP, callType, impPopStack().val);
break;
default:
// This default case gets hit on 32-bit archs when a call to a 64-bit overload
// of ReverseEndianness is encountered. In that case we'll let JIT treat this as a standard
// method call, where the implementation decomposes the operation into two 32-bit
// bswap routines. If the input to the 64-bit function is a constant, then we rely
// on inlining + constant folding of 32-bit bswaps to effectively constant fold
// the 64-bit call site.
break;
}
break;
}
// Fold PopCount for constant input
case NI_System_Numerics_BitOperations_PopCount:
{
assert(sig->numArgs == 1);
if (impStackTop().val->IsIntegralConst())
{
typeInfo argType = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
ssize_t cns = impPopStack().val->AsIntConCommon()->IconValue();
if (argType.IsType(TI_LONG))
{
retNode = gtNewIconNode(genCountBits(cns), callType);
}
else
{
assert(argType.IsType(TI_INT));
retNode = gtNewIconNode(genCountBits(static_cast<unsigned>(cns)), callType);
}
}
break;
}
case NI_System_GC_KeepAlive:
{
retNode = gtNewOperNode(GT_KEEPALIVE, TYP_VOID, impPopStack().val);
// Prevent both reordering and removal. Invalid optimizations of GC.KeepAlive are
// very subtle and hard to observe. Thus we are conservatively marking it with both
// GTF_CALL and GTF_GLOB_REF side-effects even though it may be more than strictly
// necessary. The conservative side-effects are unlikely to have negative impact
// on code quality in this case.
retNode->gtFlags |= (GTF_CALL | GTF_GLOB_REF);
break;
}
default:
break;
}
}
if (mustExpand && (retNode == nullptr))
{
assert(!"Unhandled must expand intrinsic, throwing PlatformNotSupportedException");
return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand);
}
// Optionally report if this intrinsic is special
// (that is, potentially re-optimizable during morph).
if (isSpecialIntrinsic != nullptr)
{
*isSpecialIntrinsic = isSpecial;
}
return retNode;
}
GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom)
{
// Optimize patterns like:
//
// typeof(TTo).IsAssignableFrom(typeof(TTFrom))
// valueTypeVar.GetType().IsAssignableFrom(typeof(TTFrom))
// typeof(TTFrom).IsAssignableTo(typeof(TTo))
// typeof(TTFrom).IsAssignableTo(valueTypeVar.GetType())
//
// to true/false
if (typeTo->IsCall() && typeFrom->IsCall())
{
// make sure both arguments are `typeof()`
CORINFO_METHOD_HANDLE hTypeof = eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE);
if ((typeTo->AsCall()->gtCallMethHnd == hTypeof) && (typeFrom->AsCall()->gtCallMethHnd == hTypeof))
{
CORINFO_CLASS_HANDLE hClassTo = gtGetHelperArgClassHandle(typeTo->AsCall()->gtCallArgs->GetNode());
CORINFO_CLASS_HANDLE hClassFrom = gtGetHelperArgClassHandle(typeFrom->AsCall()->gtCallArgs->GetNode());
if (hClassTo == NO_CLASS_HANDLE || hClassFrom == NO_CLASS_HANDLE)
{
return nullptr;
}
TypeCompareState castResult = info.compCompHnd->compareTypesForCast(hClassFrom, hClassTo);
if (castResult == TypeCompareState::May)
{
// requires runtime check
// e.g. __Canon, COMObjects, Nullable
return nullptr;
}
GenTreeIntCon* retNode = gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0);
impPopStack(); // drop both CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE calls
impPopStack();
return retNode;
}
}
return nullptr;
}
GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
var_types callType,
NamedIntrinsic intrinsicName,
bool tailCall)
{
GenTree* op1;
GenTree* op2;
assert(callType != TYP_STRUCT);
assert(IsMathIntrinsic(intrinsicName));
op1 = nullptr;
#if !defined(TARGET_X86)
// Intrinsics that are not implemented directly by target instructions will
// be re-materialized as users calls in rationalizer. For prefixed tail calls,
// don't do this optimization, because
// a) For back compatibility reasons on desktop .NET Framework 4.6 / 4.6.1
// b) It will be non-trivial task or too late to re-materialize a surviving
// tail prefixed GT_INTRINSIC as tail call in rationalizer.
if (!IsIntrinsicImplementedByUserCall(intrinsicName) || !tailCall)
#else
// On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
// of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
// code generation for certain EH constructs.
if (!IsIntrinsicImplementedByUserCall(intrinsicName))
#endif
{
switch (sig->numArgs)
{
case 1:
op1 = impPopStack().val;
assert(varTypeIsFloating(op1));
if (op1->TypeGet() != callType)
{
op1 = gtNewCastNode(callType, op1, false, callType);
}
op1 = new (this, GT_INTRINSIC)
GenTreeIntrinsic(genActualType(callType), op1, CORINFO_INTRINSIC_Illegal, intrinsicName, method);
break;
case 2:
op2 = impPopStack().val;
op1 = impPopStack().val;
assert(varTypeIsFloating(op1));
assert(varTypeIsFloating(op2));
if (op2->TypeGet() != callType)
{
op2 = gtNewCastNode(callType, op2, false, callType);
}
if (op1->TypeGet() != callType)
{
op1 = gtNewCastNode(callType, op1, false, callType);
}
op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2,
CORINFO_INTRINSIC_Illegal, intrinsicName, method);
break;
default:
NO_WAY("Unsupported number of args for Math Intrinsic");
}
if (IsIntrinsicImplementedByUserCall(intrinsicName))
{
op1->gtFlags |= GTF_CALL;
}
}
return op1;
}
//------------------------------------------------------------------------
// lookupNamedIntrinsic: map method to jit named intrinsic value
//
// Arguments:
// method -- method handle for method
//
// Return Value:
// Id for the named intrinsic, or Illegal if none.
//
// Notes:
// method should have CORINFO_FLG_JIT_INTRINSIC set in its attributes,
// otherwise it is not a named jit intrinsic.
//
NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
{
const char* className = nullptr;
const char* namespaceName = nullptr;
const char* enclosingClassName = nullptr;
const char* methodName =
info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName, &enclosingClassName);
JITDUMP("Named Intrinsic ");
if (namespaceName != nullptr)
{
JITDUMP("%s.", namespaceName);
}
if (enclosingClassName != nullptr)
{
JITDUMP("%s.", enclosingClassName);
}
if (className != nullptr)
{
JITDUMP("%s.", className);
}
if (methodName != nullptr)
{
JITDUMP("%s", methodName);
}
JITDUMP(": ");
if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr))
{
JITDUMP("Not recognized, not enough metadata\n");
return NI_Illegal;
}
NamedIntrinsic result = NI_Illegal;
if (strcmp(namespaceName, "System") == 0)
{
if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0))
{
result = NI_System_Enum_HasFlag;
}
else if (strcmp(className, "Math") == 0 || strcmp(className, "MathF") == 0)
{
if (strcmp(methodName, "FusedMultiplyAdd") == 0)
{
result = NI_System_Math_FusedMultiplyAdd;
}
else if (strcmp(methodName, "Round") == 0)
{
result = NI_System_Math_Round;
}
else if (strcmp(methodName, "Sin") == 0)
{
result = NI_System_Math_Sin;
}
else if (strcmp(methodName, "Cos") == 0)
{
result = NI_System_Math_Cos;
}
else if (strcmp(methodName, "Cbrt") == 0)
{
result = NI_System_Math_Cbrt;
}
else if (strcmp(methodName, "Sqrt") == 0)
{
result = NI_System_Math_Sqrt;
}
else if (strcmp(methodName, "Abs") == 0)
{
result = NI_System_Math_Abs;
}
else if (strcmp(methodName, "Cosh") == 0)
{
result = NI_System_Math_Cosh;
}
else if (strcmp(methodName, "Sinh") == 0)
{
result = NI_System_Math_Sinh;
}
else if (strcmp(methodName, "Tan") == 0)
{
result = NI_System_Math_Tan;
}
else if (strcmp(methodName, "Tanh") == 0)
{
result = NI_System_Math_Tanh;
}
else if (strcmp(methodName, "Asin") == 0)
{
result = NI_System_Math_Asin;
}
else if (strcmp(methodName, "Asinh") == 0)
{
result = NI_System_Math_Asinh;
}
else if (strcmp(methodName, "Acos") == 0)
{
result = NI_System_Math_Acos;
}
else if (strcmp(methodName, "Acosh") == 0)
{
result = NI_System_Math_Acosh;
}
else if (strcmp(methodName, "Atan") == 0)
{
result = NI_System_Math_Atan;
}
else if (strcmp(methodName, "Atan2") == 0)
{
result = NI_System_Math_Atan2;
}
else if (strcmp(methodName, "Atanh") == 0)
{
result = NI_System_Math_Atanh;
}
else if (strcmp(methodName, "Log10") == 0)
{
result = NI_System_Math_Log10;
}
else if (strcmp(methodName, "Pow") == 0)
{
result = NI_System_Math_Pow;
}
else if (strcmp(methodName, "Exp") == 0)
{
result = NI_System_Math_Exp;
}
else if (strcmp(methodName, "Ceiling") == 0)
{
result = NI_System_Math_Ceiling;
}
else if (strcmp(methodName, "Floor") == 0)
{
result = NI_System_Math_Floor;
}
}
else if (strcmp(className, "GC") == 0)
{
if (strcmp(methodName, "KeepAlive") == 0)
{
result = NI_System_GC_KeepAlive;
}
}
else if (strcmp(className, "Type") == 0)
{
if (strcmp(methodName, "get_IsValueType") == 0)
{
result = NI_System_Type_get_IsValueType;
}
else if (strcmp(methodName, "IsAssignableFrom") == 0)
{
result = NI_System_Type_IsAssignableFrom;
}
else if (strcmp(methodName, "IsAssignableTo") == 0)
{
result = NI_System_Type_IsAssignableTo;
}
}
}
else if (strcmp(namespaceName, "System.Threading") == 0)
{
if (strcmp(className, "Thread") == 0)
{
if (strcmp(methodName, "get_CurrentThread") == 0)
{
result = NI_System_Threading_Thread_get_CurrentThread;
}
else if (strcmp(methodName, "get_ManagedThreadId") == 0)
{
result = NI_System_Threading_Thread_get_ManagedThreadId;
}
}
}
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
else if (strcmp(namespaceName, "System.Buffers.Binary") == 0)
{
if ((strcmp(className, "BinaryPrimitives") == 0) && (strcmp(methodName, "ReverseEndianness") == 0))
{
result = NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness;
}
}
#endif // defined(TARGET_XARCH) || defined(TARGET_ARM64)
else if (strcmp(namespaceName, "System.Collections.Generic") == 0)
{
if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0))
{
result = NI_System_Collections_Generic_EqualityComparer_get_Default;
}
}
else if ((strcmp(namespaceName, "System.Numerics") == 0) && (strcmp(className, "BitOperations") == 0))
{
if (strcmp(methodName, "PopCount") == 0)
{
result = NI_System_Numerics_BitOperations_PopCount;
}
}
#ifdef FEATURE_HW_INTRINSICS
else if (strcmp(namespaceName, "System.Numerics") == 0)
{
CORINFO_SIG_INFO sig;
info.compCompHnd->getMethodSig(method, &sig);
int sizeOfVectorT = getSIMDVectorRegisterByteLength();
result = SimdAsHWIntrinsicInfo::lookupId(&sig, className, methodName, enclosingClassName, sizeOfVectorT);
}
#endif // FEATURE_HW_INTRINSICS
else if (strncmp(namespaceName, "System.Runtime.Intrinsics", 25) == 0)
{
// We go down this path even when FEATURE_HW_INTRINSICS isn't enabled
// so we can specially handle IsSupported and recursive calls.
// This is required to appropriately handle the intrinsics on platforms
// which don't support them. On such a platform methods like Vector64.Create
// will be seen as `Intrinsic` and `mustExpand` due to having a code path
// which is recursive. When such a path is hit we expect it to be handled by
// the importer and we fire an assert if it wasn't and in previous versions
// of the JIT would fail fast. This was changed to throw a PNSE instead but
// we still assert as most intrinsics should have been recognized/handled.
// In order to avoid the assert, we specially handle the IsSupported checks
// (to better allow dead-code optimizations) and we explicitly throw a PNSE
// as we know that is the desired behavior for the HWIntrinsics when not
// supported. For cases like Vector64.Create, this is fine because it will
// be behind a relevant IsSupported check and will never be hit and the
// software fallback will be executed instead.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef FEATURE_HW_INTRINSICS
namespaceName += 25;
const char* platformNamespaceName;
#if defined(TARGET_XARCH)
platformNamespaceName = ".X86";
#elif defined(TARGET_ARM64)
platformNamespaceName = ".Arm";
#else
#error Unsupported platform
#endif
if ((namespaceName[0] == '\0') || (strcmp(namespaceName, platformNamespaceName) == 0))
{
CORINFO_SIG_INFO sig;
info.compCompHnd->getMethodSig(method, &sig);
result = HWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName);
}
#endif // FEATURE_HW_INTRINSICS
if (result == NI_Illegal)
{
if (strcmp(methodName, "get_IsSupported") == 0)
{
// This allows the relevant code paths to be dropped as dead code even
// on platforms where FEATURE_HW_INTRINSICS is not supported.
result = NI_IsSupported_False;
}
else if (gtIsRecursiveCall(method))
{
// For the framework itself, any recursive intrinsics will either be
// only supported on a single platform or will be guarded by a relevant
// IsSupported check so the throw PNSE will be valid or dropped.
result = NI_Throw_PlatformNotSupportedException;
}
}
}
if (result == NI_Illegal)
{
JITDUMP("Not recognized\n");
}
else if (result == NI_IsSupported_False)
{
JITDUMP("Unsupported - return false");
}
else if (result == NI_Throw_PlatformNotSupportedException)
{
JITDUMP("Unsupported - throw PlatformNotSupportedException");
}
else
{
JITDUMP("Recognized\n");
}
return result;
}
//------------------------------------------------------------------------
// impUnsupportedNamedIntrinsic: Throws an exception for an unsupported named intrinsic
//
// Arguments:
// helper - JIT helper ID for the exception to be thrown
// method - method handle of the intrinsic function.
// sig - signature of the intrinsic call
// mustExpand - true if the intrinsic must return a GenTree*; otherwise, false
//
// Return Value:
// a gtNewMustThrowException if mustExpand is true; otherwise, nullptr
//
GenTree* Compiler::impUnsupportedNamedIntrinsic(unsigned helper,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
bool mustExpand)
{
// We've hit some error case and may need to return a node for the given error.
//
// When `mustExpand=false`, we are attempting to inline the intrinsic directly into another method. In this
// scenario, we need to return `nullptr` so that a GT_CALL to the intrinsic is emitted instead. This is to
// ensure that everything continues to behave correctly when optimizations are enabled (e.g. things like the
// inliner may expect the node we return to have a certain signature, and the `MustThrowException` node won't
// match that).
//
// When `mustExpand=true`, we are in a GT_CALL to the intrinsic and are attempting to JIT it. This will generally
// be in response to an indirect call (e.g. done via reflection) or in response to an earlier attempt returning
// `nullptr` (under `mustExpand=false`). In that scenario, we are safe to return the `MustThrowException` node.
if (mustExpand)
{
for (unsigned i = 0; i < sig->numArgs; i++)
{
impPopStack();
}
return gtNewMustThrowException(helper, JITtype2varType(sig->retType), sig->retTypeClass);
}
else
{
return nullptr;
}
}
/*****************************************************************************/
GenTree* Compiler::impArrayAccessIntrinsic(
CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
{
/* If we are generating SMALL_CODE, we don't want to use intrinsics for
the following, as it generates fatter code.
*/
if (compCodeOpt() == SMALL_CODE)
{
return nullptr;
}
/* These intrinsics generate fatter (but faster) code and are only
done if we don't need SMALL_CODE */
unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
// The rank 1 case is special because it has to handle two array formats
// we will simply not do that case
if (rank > GT_ARR_MAX_RANK || rank <= 1)
{
return nullptr;
}
CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
// For the ref case, we will only be able to inline if the types match
// (verifier checks for this, we don't care for the nonverified case and the
// type is final (so we don't need to do the cast)
if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
{
// Get the call site signature
CORINFO_SIG_INFO LocalSig;
eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
assert(LocalSig.hasThis());
CORINFO_CLASS_HANDLE actualElemClsHnd;
if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
{
// Fetch the last argument, the one that indicates the type we are setting.
CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
for (unsigned r = 0; r < rank; r++)
{
argType = info.compCompHnd->getArgNext(argType);
}
typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
actualElemClsHnd = argInfo.GetClassHandle();
}
else
{
assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
// Fetch the return type
typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
assert(retInfo.IsByRef());
actualElemClsHnd = retInfo.GetClassHandle();
}
// if it's not final, we can't do the optimization
if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
{
return nullptr;
}
}
unsigned arrayElemSize;
if (elemType == TYP_STRUCT)
{
assert(arrElemClsHnd);
arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
}
else
{
arrayElemSize = genTypeSize(elemType);
}
if ((unsigned char)arrayElemSize != arrayElemSize)
{
// arrayElemSize would be truncated as an unsigned char.
// This means the array element is too large. Don't do the optimization.
return nullptr;
}
GenTree* val = nullptr;
if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
{
// Assignment of a struct is more work, and there are more gets than sets.
if (elemType == TYP_STRUCT)
{
return nullptr;
}
val = impPopStack().val;
assert(genActualType(elemType) == genActualType(val->gtType) ||
(elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
(elemType == TYP_INT && val->gtType == TYP_BYREF) ||
(elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
}
noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
GenTree* inds[GT_ARR_MAX_RANK];
for (unsigned k = rank; k > 0; k--)
{
inds[k - 1] = impPopStack().val;
}
GenTree* arr = impPopStack().val;
assert(arr->gtType == TYP_REF);
GenTree* arrElem =
new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
{
if (varTypeIsStruct(elemType))
{
arrElem = gtNewObjNode(sig->retTypeClass, arrElem);
}
else
{
arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
}
}
if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
{
assert(val != nullptr);
return gtNewAssignNode(arrElem, val);
}
else
{
return arrElem;
}
}
BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
{
unsigned i;
// do some basic checks first
if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
{
return FALSE;
}
if (verCurrentState.esStackDepth > 0)
{
// merge stack types
StackEntry* parentStack = block->bbStackOnEntry();
StackEntry* childStack = verCurrentState.esStack;
for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
{
if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
{
return FALSE;
}
}
}
// merge initialization status of this ptr
if (verTrackObjCtorInitState)
{
// If we're tracking the CtorInitState, then it must not be unknown in the current state.
assert(verCurrentState.thisInitialized != TIS_Bottom);
// If the successor block's thisInit state is unknown, copy it from the current state.
if (block->bbThisOnEntry() == TIS_Bottom)
{
*changed = true;
verSetThisInit(block, verCurrentState.thisInitialized);
}
else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
{
if (block->bbThisOnEntry() != TIS_Top)
{
*changed = true;
verSetThisInit(block, TIS_Top);
if (block->bbFlags & BBF_FAILED_VERIFICATION)
{
// The block is bad. Control can flow through the block to any handler that catches the
// verification exception, but the importer ignores bad blocks and therefore won't model
// this flow in the normal way. To complete the merge into the bad block, the new state
// needs to be manually pushed to the handlers that may be reached after the verification
// exception occurs.
//
// Usually, the new state was already propagated to the relevant handlers while processing
// the predecessors of the bad block. The exception is when the bad block is at the start
// of a try region, meaning it is protected by additional handlers that do not protect its
// predecessors.
//
if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
{
// Push TIS_Top to the handlers that protect the bad block. Note that this can cause
// recursive calls back into this code path (if successors of the current bad block are
// also bad blocks).
//
ThisInitState origTIS = verCurrentState.thisInitialized;
verCurrentState.thisInitialized = TIS_Top;
impVerifyEHBlock(block, true);
verCurrentState.thisInitialized = origTIS;
}
}
}
}
}
else
{
assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
}
return TRUE;
}
/*****************************************************************************
* 'logMsg' is true if a log message needs to be logged. false if the caller has
* already logged it (presumably in a more detailed fashion than done here)
* 'bVerificationException' is true for a verification exception, false for a
* "call unauthorized by host" exception.
*/
void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
{
block->bbJumpKind = BBJ_THROW;
block->bbFlags |= BBF_FAILED_VERIFICATION;
impCurStmtOffsSet(block->bbCodeOffs);
#ifdef DEBUG
// we need this since BeginTreeList asserts otherwise
impStmtList = impLastStmt = nullptr;
block->bbFlags &= ~BBF_IMPORTED;
if (logMsg)
{
JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
block->bbCodeOffs, block->bbCodeOffsEnd));
if (verbose)
{
printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
}
}
if (JitConfig.DebugBreakOnVerificationFailure())
{
DebugBreak();
}
#endif
impBeginTreeList();
// if the stack is non-empty evaluate all the side-effects
if (verCurrentState.esStackDepth > 0)
{
impEvalSideEffects();
}
assert(verCurrentState.esStackDepth == 0);
GenTree* op1 =
gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewCallArgs(gtNewIconNode(block->bbCodeOffs)));
// verCurrentState.esStackDepth = 0;
impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
// The inliner is not able to handle methods that require throw block, so
// make sure this methods never gets inlined.
info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
}
/*****************************************************************************
*
*/
void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
{
verResetCurrentState(block, &verCurrentState);
verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
#ifdef DEBUG
impNoteLastILoffs(); // Remember at which BC offset the tree was finished
#endif // DEBUG
}
/******************************************************************************/
typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
{
assert(ciType < CORINFO_TYPE_COUNT);
typeInfo tiResult;
switch (ciType)
{
case CORINFO_TYPE_STRING:
case CORINFO_TYPE_CLASS:
tiResult = verMakeTypeInfo(clsHnd);
if (!tiResult.IsType(TI_REF))
{ // type must be consistent with element type
return typeInfo();
}
break;
#ifdef TARGET_64BIT
case CORINFO_TYPE_NATIVEINT:
case CORINFO_TYPE_NATIVEUINT:
if (clsHnd)
{
// If we have more precise information, use it
return verMakeTypeInfo(clsHnd);
}
else
{
return typeInfo::nativeInt();
}
break;
#endif // TARGET_64BIT
case CORINFO_TYPE_VALUECLASS:
case CORINFO_TYPE_REFANY:
tiResult = verMakeTypeInfo(clsHnd);
// type must be constant with element type;
if (!tiResult.IsValueClass())
{
return typeInfo();
}
break;
case CORINFO_TYPE_VAR:
return verMakeTypeInfo(clsHnd);
case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
case CORINFO_TYPE_VOID:
return typeInfo();
break;
case CORINFO_TYPE_BYREF:
{
CORINFO_CLASS_HANDLE childClassHandle;
CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
return ByRef(verMakeTypeInfo(childType, childClassHandle));
}
break;
default:
if (clsHnd)
{ // If we have more precise information, use it
return typeInfo(TI_STRUCT, clsHnd);
}
else
{
return typeInfo(JITtype2tiType(ciType));
}
}
return tiResult;
}
/******************************************************************************/
typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
{
if (clsHnd == nullptr)
{
return typeInfo();
}
// Byrefs should only occur in method and local signatures, which are accessed
// using ICorClassInfo and ICorClassInfo.getChildType.
// So findClass() and getClassAttribs() should not be called for byrefs
if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
{
assert(!"Did findClass() return a Byref?");
return typeInfo();
}
unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
if (attribs & CORINFO_FLG_VALUECLASS)
{
CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
// Meta-data validation should ensure that CORINF_TYPE_BYREF should
// not occur here, so we may want to change this to an assert instead.
if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
{
return typeInfo();
}
#ifdef TARGET_64BIT
if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
{
return typeInfo::nativeInt();
}
#endif // TARGET_64BIT
if (t != CORINFO_TYPE_UNDEF)
{
return (typeInfo(JITtype2tiType(t)));
}
else if (bashStructToRef)
{
return (typeInfo(TI_REF, clsHnd));
}
else
{
return (typeInfo(TI_STRUCT, clsHnd));
}
}
else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
{
// See comment in _typeInfo.h for why we do it this way.
return (typeInfo(TI_REF, clsHnd, true));
}
else
{
return (typeInfo(TI_REF, clsHnd));
}
}
/******************************************************************************/
BOOL Compiler::verIsSDArray(const typeInfo& ti)
{
if (ti.IsNullObjRef())
{ // nulls are SD arrays
return TRUE;
}
if (!ti.IsType(TI_REF))
{
return FALSE;
}
if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
{
return FALSE;
}
return TRUE;
}
/******************************************************************************/
/* Given 'arrayObjectType' which is an array type, fetch the element type. */
/* Returns an error type if anything goes wrong */
typeInfo Compiler::verGetArrayElemType(const typeInfo& arrayObjectType)
{
assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explicitly since that is a success case
if (!verIsSDArray(arrayObjectType))
{
return typeInfo();
}
CORINFO_CLASS_HANDLE childClassHandle = nullptr;
CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
return verMakeTypeInfo(ciType, childClassHandle);
}
/*****************************************************************************
*/
typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
{
CORINFO_CLASS_HANDLE classHandle;
CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
var_types type = JITtype2varType(ciType);
if (varTypeIsGC(type))
{
// For efficiency, getArgType only returns something in classHandle for
// value types. For other types that have addition type info, you
// have to call back explicitly
classHandle = info.compCompHnd->getArgClass(sig, args);
if (!classHandle)
{
NO_WAY("Could not figure out Class specified in argument or local signature");
}
}
return verMakeTypeInfo(ciType, classHandle);
}
BOOL Compiler::verIsByRefLike(const typeInfo& ti)
{
if (ti.IsByRef())
{
return TRUE;
}
if (!ti.IsType(TI_STRUCT))
{
return FALSE;
}
return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
}
BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
{
if (ti.IsPermanentHomeByRef())
{
return TRUE;
}
else
{
return FALSE;
}
}
BOOL Compiler::verIsBoxable(const typeInfo& ti)
{
return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
|| ti.IsUnboxedGenericTypeVar() ||
(ti.IsType(TI_STRUCT) &&
// exclude byreflike structs
!(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
}
// Is it a boxed value type?
bool Compiler::verIsBoxedValueType(const typeInfo& ti)
{
if (ti.GetType() == TI_REF)
{
CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
return !!eeIsValueClass(clsHnd);
}
else
{
return false;
}
}
/*****************************************************************************
*
* Check if a TailCall is legal.
*/
bool Compiler::verCheckTailCallConstraint(
OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
bool speculative // If true, won't throw if verificatoin fails. Instead it will
// return false to the caller.
// If false, it will throw.
)
{
DWORD mflags;
CORINFO_SIG_INFO sig;
unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
// this counter is used to keep track of how many items have been
// virtually popped
CORINFO_METHOD_HANDLE methodHnd = nullptr;
CORINFO_CLASS_HANDLE methodClassHnd = nullptr;
unsigned methodClassFlgs = 0;
assert(impOpcodeIsCallOpcode(opcode));
if (compIsForInlining())
{
return false;
}
// for calli, VerifyOrReturn that this is not a virtual method
if (opcode == CEE_CALLI)
{
/* Get the call sig */
eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
// We don't know the target method, so we have to infer the flags, or
// assume the worst-case.
mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
}
else
{
methodHnd = pResolvedToken->hMethod;
mflags = info.compCompHnd->getMethodAttribs(methodHnd);
// When verifying generic code we pair the method handle with its
// owning class to get the exact method signature.
methodClassHnd = pResolvedToken->hClass;
assert(methodClassHnd);
eeGetMethodSig(methodHnd, &sig, methodClassHnd);
// opcode specific check
methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
}
// We must have got the methodClassHnd if opcode is not CEE_CALLI
assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
{
eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
}
// check compatibility of the arguments
unsigned int argCount;
argCount = sig.numArgs;
CORINFO_ARG_LIST_HANDLE args;
args = sig.args;
while (argCount--)
{
typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
// check that the argument is not a byref for tailcalls
VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
// For unsafe code, we might have parameters containing pointer to the stack location.
// Disallow the tailcall for this kind.
CORINFO_CLASS_HANDLE classHandle;
CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
args = info.compCompHnd->getArgNext(args);
}
// update popCount
popCount += sig.numArgs;
// check for 'this' which is on non-static methods, not called via NEWOBJ
if (!(mflags & CORINFO_FLG_STATIC))
{
// Always update the popCount.
// This is crucial for the stack calculation to be correct.
typeInfo tiThis = impStackTop(popCount).seTypeInfo;
popCount++;
if (opcode == CEE_CALLI)
{
// For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
// on the stack.
if (tiThis.IsValueClass())
{
tiThis.MakeByRef();
}
VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
}
else
{
// Check type compatibility of the this argument
typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
if (tiDeclaredThis.IsValueClass())
{
tiDeclaredThis.MakeByRef();
}
VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
}
}
// Tail calls on constrained calls should be illegal too:
// when instantiated at a value type, a constrained call may pass the address of a stack allocated value
VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
// Get the exact view of the signature for an array method
if (sig.retType != CORINFO_TYPE_VOID)
{
if (methodClassFlgs & CORINFO_FLG_ARRAY)
{
assert(opcode != CEE_CALLI);
eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
}
}
typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
typeInfo tiCallerRetType =
verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
// void return type gets morphed into the error type, so we have to treat them specially here
if (sig.retType == CORINFO_TYPE_VOID)
{
VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
speculative);
}
else
{
VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
NormaliseForStack(tiCallerRetType), true),
"tailcall return mismatch", speculative);
}
// for tailcall, stack must be empty
VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
return true; // Yes, tailcall is legal
}
/*****************************************************************************
*
* Checks the IL verification rules for the call
*/
void Compiler::verVerifyCall(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
bool tailCall,
bool readonlyCall,
const BYTE* delegateCreateStart,
const BYTE* codeAddr,
CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
{
DWORD mflags;
CORINFO_SIG_INFO* sig = nullptr;
unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
// this counter is used to keep track of how many items have been
// virtually popped
// for calli, VerifyOrReturn that this is not a virtual method
if (opcode == CEE_CALLI)
{
Verify(false, "Calli not verifiable");
return;
}
//<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
mflags = callInfo->verMethodFlags;
sig = &callInfo->verSig;
if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
{
eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
}
// opcode specific check
unsigned methodClassFlgs = callInfo->classFlags;
switch (opcode)
{
case CEE_CALLVIRT:
// cannot do callvirt on valuetypes
VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
break;
case CEE_NEWOBJ:
{
assert(!tailCall); // Importer should not allow this
VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
"newobj must be on instance");
if (methodClassFlgs & CORINFO_FLG_DELEGATE)
{
VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
typeInfo tiDeclaredFtn =
verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
assert(popCount == 0);
typeInfo tiActualObj = impStackTop(1).seTypeInfo;
typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
"delegate object type mismatch");
CORINFO_CLASS_HANDLE objTypeHandle =
tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
// the method signature must be compatible with the delegate's invoke method
// check that for virtual functions, the type of the object used to get the
// ftn ptr is the same as the type of the object passed to the delegate ctor.
// since this is a bit of work to determine in general, we pattern match stylized
// code sequences
// the delegate creation code check, which used to be done later, is now done here
// so we can read delegateMethodRef directly from
// from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
// we then use it in our call to isCompatibleDelegate().
mdMemberRef delegateMethodRef = mdMemberRefNil;
VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
"must create delegates with certain IL");
CORINFO_RESOLVED_TOKEN delegateResolvedToken;
delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
delegateResolvedToken.tokenScope = info.compScopeHnd;
delegateResolvedToken.token = delegateMethodRef;
delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
info.compCompHnd->resolveToken(&delegateResolvedToken);
CORINFO_CALL_INFO delegateCallInfo;
eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
bool isOpenDelegate = false;
VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
tiActualFtn.GetMethod(), pResolvedToken->hClass,
&isOpenDelegate),
"function incompatible with delegate");
// check the constraints on the target method
VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
"delegate target has unsatisfied class constraints");
VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
tiActualFtn.GetMethod()),
"delegate target has unsatisfied method constraints");
// See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
// for additional verification rules for delegates
CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod();
DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
{
if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0))
{
VerifyOrReturn((tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly()) ||
verIsBoxedValueType(tiActualObj),
"The 'this' parameter to the call must be either the calling method's "
"'this' parameter or "
"a boxed value type.");
}
}
if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
{
BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
Verify(targetIsStatic || !isOpenDelegate,
"Unverifiable creation of an open instance delegate for a protected member.");
CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
? info.compClassHnd
: tiActualObj.GetClassHandleForObjRef();
// In the case of protected methods, it is a requirement that the 'this'
// pointer be a subclass of the current context. Perform this check.
Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
"Accessing protected method through wrong type.");
}
goto DONE_ARGS;
}
}
// fall thru to default checks
FALLTHROUGH;
default:
VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
}
VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
"can only newobj a delegate constructor");
// check compatibility of the arguments
unsigned int argCount;
argCount = sig->numArgs;
CORINFO_ARG_LIST_HANDLE args;
args = sig->args;
while (argCount--)
{
typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
args = info.compCompHnd->getArgNext(args);
}
DONE_ARGS:
// update popCount
popCount += sig->numArgs;
// check for 'this' which are is non-static methods, not called via NEWOBJ
CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
{
typeInfo tiThis = impStackTop(popCount).seTypeInfo;
popCount++;
// If it is null, we assume we can access it (since it will AV shortly)
// If it is anything but a reference class, there is no hierarchy, so
// again, we don't need the precise instance class to compute 'protected' access
if (tiThis.IsType(TI_REF))
{
instanceClassHnd = tiThis.GetClassHandleForObjRef();
}
// Check type compatibility of the this argument
typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
if (tiDeclaredThis.IsValueClass())
{
tiDeclaredThis.MakeByRef();
}
// If this is a call to the base class .ctor, set thisPtr Init for
// this block.
if (mflags & CORINFO_FLG_CONSTRUCTOR)
{
if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
{
assert(verCurrentState.thisInitialized !=
TIS_Bottom); // This should never be the case just from the logic of the verifier.
VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
"Call to base class constructor when 'this' is possibly initialized");
// Otherwise, 'this' is now initialized.
verCurrentState.thisInitialized = TIS_Init;
tiThis.SetInitialisedObjRef();
}
else
{
// We allow direct calls to value type constructors
// NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
// constrained callvirt to illegally re-enter a .ctor on a value of reference type.
VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
"Bad call to a constructor");
}
}
if (pConstrainedResolvedToken != nullptr)
{
VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
// We just dereference this and test for equality
tiThis.DereferenceByRef();
VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
"this type mismatch with constrained type operand");
// Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
}
// To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
{
tiDeclaredThis.SetIsReadonlyByRef();
}
VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
if (tiThis.IsByRef())
{
// Find the actual type where the method exists (as opposed to what is declared
// in the metadata). This is to prevent passing a byref as the "this" argument
// while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
VerifyOrReturn(eeIsValueClass(actualClassHnd),
"Call to base type of valuetype (which is never a valuetype)");
}
// Rules for non-virtual call to a non-final virtual method:
// Define:
// The "this" pointer is considered to be "possibly written" if
// 1. Its address have been taken (LDARGA 0) anywhere in the method.
// (or)
// 2. It has been stored to (STARG.0) anywhere in the method.
// A non-virtual call to a non-final virtual method is only allowed if
// 1. The this pointer passed to the callee is an instance of a boxed value type.
// (or)
// 2. The this pointer passed to the callee is the current method's this pointer.
// (and) The current method's this pointer is not "possibly written".
// Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
// virtual methods. (Luckily this does affect .ctors, since they are not virtual).
// This is stronger that is strictly needed, but implementing a laxer rule is significantly
// hard and more error prone.
if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0))
{
VerifyOrReturn((tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiThis),
"The 'this' parameter to the call must be either the calling method's 'this' parameter or "
"a boxed value type.");
}
}
// check any constraints on the callee's class and type parameters
VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
"method has unsatisfied class constraints");
VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
"method has unsatisfied method constraints");
if (mflags & CORINFO_FLG_PROTECTED)
{
VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
"Can't access protected method");
}
// Get the exact view of the signature for an array method
if (sig->retType != CORINFO_TYPE_VOID)
{
eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
}
// "readonly." prefixed calls only allowed for the Address operation on arrays.
// The methods supported by array types are under the control of the EE
// so we can trust that only the Address operation returns a byref.
if (readonlyCall)
{
typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
"unexpected use of readonly prefix");
}
// Verify the tailcall
if (tailCall)
{
verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
}
}
/*****************************************************************************
* Checks that a delegate creation is done using the following pattern:
* dup
* ldvirtftn targetMemberRef
* OR
* ldftn targetMemberRef
*
* 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
* not in this basic block)
*
* targetMemberRef is read from the code sequence.
* targetMemberRef is validated iff verificationNeeded.
*/
BOOL Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart,
const BYTE* codeAddr,
mdMemberRef& targetMemberRef)
{
if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
{
targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
return TRUE;
}
else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
{
targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
return TRUE;
}
return FALSE;
}
typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
{
Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
typeInfo ptrVal = verVerifyLDIND(tiTo, instrType);
typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
if (!tiCompatibleWith(value, normPtrVal, true))
{
Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
compUnsafeCastUsed = true;
}
return ptrVal;
}
typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
{
assert(!instrType.IsStruct());
typeInfo ptrVal;
if (ptr.IsByRef())
{
ptrVal = DereferenceByRef(ptr);
if (instrType.IsObjRef() && !ptrVal.IsObjRef())
{
Verify(false, "bad pointer");
compUnsafeCastUsed = true;
}
else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
{
Verify(false, "pointer not consistent with instr");
compUnsafeCastUsed = true;
}
}
else
{
Verify(false, "pointer not byref");
compUnsafeCastUsed = true;
}
return ptrVal;
}
// Verify that the field is used properly. 'tiThis' is NULL for statics,
// 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
// ld*flda or a st*fld.
// 'enclosingClass' is given if we are accessing a field in some specific type.
void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
const CORINFO_FIELD_INFO& fieldInfo,
const typeInfo* tiThis,
BOOL mutator,
BOOL allowPlainStructAsThis)
{
CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
unsigned fieldFlags = fieldInfo.fieldFlags;
CORINFO_CLASS_HANDLE instanceClass =
info.compClassHnd; // for statics, we imagine the instance is the current class.
bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
if (mutator)
{
Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
{
Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
info.compIsStatic == isStaticField,
"bad use of initonly field (set or address taken)");
}
}
if (tiThis == nullptr)
{
Verify(isStaticField, "used static opcode with non-static field");
}
else
{
typeInfo tThis = *tiThis;
if (allowPlainStructAsThis && tThis.IsValueClass())
{
tThis.MakeByRef();
}
// If it is null, we assume we can access it (since it will AV shortly)
// If it is anything but a refernce class, there is no hierarchy, so
// again, we don't need the precise instance class to compute 'protected' access
if (tiThis->IsType(TI_REF))
{
instanceClass = tiThis->GetClassHandleForObjRef();
}
// Note that even if the field is static, we require that the this pointer
// satisfy the same constraints as a non-static field This happens to
// be simpler and seems reasonable
typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
if (tiDeclaredThis.IsValueClass())
{
tiDeclaredThis.MakeByRef();
// we allow read-only tThis, on any field access (even stores!), because if the
// class implementor wants to prohibit stores he should make the field private.
// we do this by setting the read-only bit on the type we compare tThis to.
tiDeclaredThis.SetIsReadonlyByRef();
}
else if (verTrackObjCtorInitState && tThis.IsThisPtr())
{
// Any field access is legal on "uninitialized" this pointers.
// The easiest way to implement this is to simply set the
// initialized bit for the duration of the type check on the
// field access only. It does not change the state of the "this"
// for the function as a whole. Note that the "tThis" is a copy
// of the original "this" type (*tiThis) passed in.
tThis.SetInitialisedObjRef();
}
Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
}
// Presently the JIT does not check that we don't store or take the address of init-only fields
// since we cannot guarantee their immutability and it is not a security issue.
// check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
"field has unsatisfied class constraints");
if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
{
Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
"Accessing protected method through wrong type.");
}
}
void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
{
if (tiOp1.IsNumberType())
{
#ifdef TARGET_64BIT
Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
#else // TARGET_64BIT
// [10/17/2013] Consider changing this: to put on my verification lawyer hat,
// this is non-conforming to the ECMA Spec: types don't have to be equivalent,
// but compatible, since we can coalesce native int with int32 (see section III.1.5).
Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
#endif // !TARGET_64BIT
}
else if (tiOp1.IsObjRef())
{
switch (opcode)
{
case CEE_BEQ_S:
case CEE_BEQ:
case CEE_BNE_UN_S:
case CEE_BNE_UN:
case CEE_CEQ:
case CEE_CGT_UN:
break;
default:
Verify(FALSE, "Cond not allowed on object types");
}
Verify(tiOp2.IsObjRef(), "Cond type mismatch");
}
else if (tiOp1.IsByRef())
{
Verify(tiOp2.IsByRef(), "Cond type mismatch");
}
else
{
Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
}
}
void Compiler::verVerifyThisPtrInitialised()
{
if (verTrackObjCtorInitState)
{
Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
}
}
BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
{
// Either target == context, in this case calling an alternate .ctor
// Or target is the immediate parent of context
return ((target == context) || (target == info.compCompHnd->getParentType(context)));
}
GenTree* Compiler::impImportLdvirtftn(GenTree* thisPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_CALL_INFO* pCallInfo)
{
if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
{
NO_WAY("Virtual call to a function added via EnC is not supported");
}
// CoreRT generic virtual method
if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
{
GenTree* runtimeMethodHandle =
impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_METHOD_HDL, pCallInfo->hMethod);
return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL,
gtNewCallArgs(thisPtr, runtimeMethodHandle));
}
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
if (!pCallInfo->exactContextNeedsRuntimeLookup)
{
GenTreeCall* call =
gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewCallArgs(thisPtr));
call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
return call;
}
// We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
if (IsTargetAbi(CORINFO_CORERT_ABI))
{
GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
gtNewCallArgs(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
}
}
#endif
// Get the exact descriptor for the static callsite
GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
if (exactTypeDesc == nullptr)
{ // compDonotInline()
return nullptr;
}
GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken);
if (exactMethodDesc == nullptr)
{ // compDonotInline()
return nullptr;
}
GenTreeCall::Use* helpArgs = gtNewCallArgs(exactMethodDesc);
helpArgs = gtPrependNewCallArg(exactTypeDesc, helpArgs);
helpArgs = gtPrependNewCallArg(thisPtr, helpArgs);
// Call helper function. This gets the target address of the final destination callsite.
return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
}
//------------------------------------------------------------------------
// impBoxPatternMatch: match and import common box idioms
//
// Arguments:
// pResolvedToken - resolved token from the box operation
// codeAddr - position in IL stream after the box instruction
// codeEndp - end of IL stream
//
// Return Value:
// Number of IL bytes matched and imported, -1 otherwise
//
// Notes:
// pResolvedToken is known to be a value type; ref type boxing
// is handled in the CEE_BOX clause.
int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp)
{
if (codeAddr >= codeEndp)
{
return -1;
}
switch (codeAddr[0])
{
case CEE_UNBOX_ANY:
// box + unbox.any
if (codeAddr + 1 + sizeof(mdToken) <= codeEndp)
{
CORINFO_RESOLVED_TOKEN unboxResolvedToken;
impResolveToken(codeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class);
// See if the resolved tokens describe types that are equal.
const TypeCompareState compare =
info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass);
// If so, box/unbox.any is a nop.
if (compare == TypeCompareState::Must)
{
JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n");
// Skip the next unbox.any instruction
return 1 + sizeof(mdToken);
}
}
break;
case CEE_BRTRUE:
case CEE_BRTRUE_S:
case CEE_BRFALSE:
case CEE_BRFALSE_S:
// box + br_true/false
if ((codeAddr + ((codeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp)
{
GenTree* const treeToBox = impStackTop().val;
bool canOptimize = true;
GenTree* treeToNullcheck = nullptr;
// Can the thing being boxed cause a side effect?
if ((treeToBox->gtFlags & GTF_SIDE_EFFECT) != 0)
{
// Is this a side effect we can replicate cheaply?
if (((treeToBox->gtFlags & GTF_SIDE_EFFECT) == GTF_EXCEPT) &&
treeToBox->OperIs(GT_OBJ, GT_BLK, GT_IND))
{
// Yes, we just need to perform a null check if needed.
GenTree* const addr = treeToBox->AsOp()->gtGetOp1();
if (fgAddrCouldBeNull(addr))
{
treeToNullcheck = addr;
}
}
else
{
canOptimize = false;
}
}
if (canOptimize)
{
CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
if (boxHelper == CORINFO_HELP_BOX)
{
JITDUMP("\n Importing BOX; BR_TRUE/FALSE as %sconstant\n",
treeToNullcheck == nullptr ? "" : "nullcheck+");
impPopStack();
GenTree* result = gtNewIconNode(1);
if (treeToNullcheck != nullptr)
{
GenTree* nullcheck = gtNewNullCheck(treeToNullcheck, compCurBB);
result = gtNewOperNode(GT_COMMA, TYP_INT, nullcheck, result);
}
impPushOnStack(result, typeInfo(TI_INT));
return 0;
}
}
}
break;
case CEE_ISINST:
if (codeAddr + 1 + sizeof(mdToken) + 1 <= codeEndp)
{
const BYTE* nextCodeAddr = codeAddr + 1 + sizeof(mdToken);
switch (nextCodeAddr[0])
{
// box + isinst + br_true/false
case CEE_BRTRUE:
case CEE_BRTRUE_S:
case CEE_BRFALSE:
case CEE_BRFALSE_S:
if ((nextCodeAddr + ((nextCodeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp)
{
if (!(impStackTop().val->gtFlags & GTF_SIDE_EFFECT))
{
CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
if (boxHelper == CORINFO_HELP_BOX)
{
CORINFO_RESOLVED_TOKEN isInstResolvedToken;
impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting);
TypeCompareState castResult =
info.compCompHnd->compareTypesForCast(pResolvedToken->hClass,
isInstResolvedToken.hClass);
if (castResult != TypeCompareState::May)
{
JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant\n");
impPopStack();
impPushOnStack(gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0),
typeInfo(TI_INT));
// Skip the next isinst instruction
return 1 + sizeof(mdToken);
}
}
}
}
break;
// box + isinst + unbox.any
case CEE_UNBOX_ANY:
if ((nextCodeAddr + 1 + sizeof(mdToken)) <= codeEndp)
{
// See if the resolved tokens in box, isinst and unbox.any describe types that are equal.
CORINFO_RESOLVED_TOKEN isinstResolvedToken = {};
impResolveToken(codeAddr + 1, &isinstResolvedToken, CORINFO_TOKENKIND_Class);
if (info.compCompHnd->compareTypesForEquality(isinstResolvedToken.hClass,
pResolvedToken->hClass) ==
TypeCompareState::Must)
{
CORINFO_RESOLVED_TOKEN unboxResolvedToken = {};
impResolveToken(nextCodeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class);
// If so, box + isinst + unbox.any is a nop.
if (info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass,
pResolvedToken->hClass) ==
TypeCompareState::Must)
{
JITDUMP("\n Importing BOX; ISINST, UNBOX.ANY as NOP\n");
return 2 + sizeof(mdToken) * 2;
}
}
}
break;
}
}
break;
default:
break;
}
return -1;
}
//------------------------------------------------------------------------
// impImportAndPushBox: build and import a value-type box
//
// Arguments:
// pResolvedToken - resolved token from the box operation
//
// Return Value:
// None.
//
// Side Effects:
// The value to be boxed is popped from the stack, and a tree for
// the boxed value is pushed. This method may create upstream
// statements, spill side effecting trees, and create new temps.
//
// If importing an inlinee, we may also discover the inline must
// fail. If so there is no new value pushed on the stack. Callers
// should use CompDoNotInline after calling this method to see if
// ongoing importation should be aborted.
//
// Notes:
// Boxing of ref classes results in the same value as the value on
// the top of the stack, so is handled inline in impImportBlockCode
// for the CEE_BOX case. Only value or primitive type boxes make it
// here.
//
// Boxing for nullable types is done via a helper call; boxing
// of other value types is expanded inline or handled via helper
// call, depending on the jit's codegen mode.
//
// When the jit is operating in size and time constrained modes,
// using a helper call here can save jit time and code size. But it
// also may inhibit cleanup optimizations that could have also had a
// even greater benefit effect on code size and jit time. An optimal
// strategy may need to peek ahead and see if it is easy to tell how
// the box is being used. For now, we defer.
void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
{
// Spill any special side effects
impSpillSpecialSideEff();
// Get get the expression to box from the stack.
GenTree* op1 = nullptr;
GenTree* op2 = nullptr;
StackEntry se = impPopStack();
CORINFO_CLASS_HANDLE operCls = se.seTypeInfo.GetClassHandle();
GenTree* exprToBox = se.val;
// Look at what helper we should use.
CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
// Determine what expansion to prefer.
//
// In size/time/debuggable constrained modes, the helper call
// expansion for box is generally smaller and is preferred, unless
// the value to box is a struct that comes from a call. In that
// case the call can construct its return value directly into the
// box payload, saving possibly some up-front zeroing.
//
// Currently primitive type boxes always get inline expanded. We may
// want to do the same for small structs if they don't come from
// calls and don't have GC pointers, since explicitly copying such
// structs is cheap.
JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via");
bool canExpandInline = (boxHelper == CORINFO_HELP_BOX);
bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && opts.OptimizationDisabled();
bool expandInline = canExpandInline && !optForSize;
if (expandInline)
{
JITDUMP(" inline allocate/copy sequence\n");
// we are doing 'normal' boxing. This means that we can inline the box operation
// Box(expr) gets morphed into
// temp = new(clsHnd)
// cpobj(temp+4, expr, clsHnd)
// push temp
// The code paths differ slightly below for structs and primitives because
// "cpobj" differs in these cases. In one case you get
// impAssignStructPtr(temp+4, expr, clsHnd)
// and the other you get
// *(temp+4) = expr
if (opts.OptimizationDisabled())
{
// For minopts/debug code, try and minimize the total number
// of box temps by reusing an existing temp when possible.
if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
{
impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper"));
}
}
else
{
// When optimizing, use a new temp for each box operation
// since we then know the exact class of the box temp.
impBoxTemp = lvaGrabTemp(true DEBUGARG("Single-def Box Helper"));
lvaTable[impBoxTemp].lvType = TYP_REF;
lvaTable[impBoxTemp].lvSingleDef = 1;
JITDUMP("Marking V%02u as a single def local\n", impBoxTemp);
const bool isExact = true;
lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact);
}
// needs to stay in use until this box expression is appended
// some other node. We approximate this by keeping it alive until
// the opcode stack becomes empty
impBoxTempInUse = true;
const BOOL useParent = FALSE;
op1 = gtNewAllocObjNode(pResolvedToken, useParent);
if (op1 == nullptr)
{
return;
}
/* Remember that this basic block contains 'new' of an object, and so does this method */
compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
optMethodFlags |= OMF_HAS_NEWOBJ;
GenTree* asg = gtNewTempAssign(impBoxTemp, op1);
Statement* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
if (varTypeIsStruct(exprToBox))
{
assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
}
else
{
var_types lclTyp = exprToBox->TypeGet();
if (lclTyp == TYP_BYREF)
{
lclTyp = TYP_I_IMPL;
}
CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
if (impIsPrimitive(jitType))
{
lclTyp = JITtype2varType(jitType);
}
assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
var_types srcTyp = exprToBox->TypeGet();
var_types dstTyp = lclTyp;
if (srcTyp != dstTyp)
{
assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
(varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
exprToBox = gtNewCastNode(dstTyp, exprToBox, false, dstTyp);
}
op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
}
// Spill eval stack to flush out any pending side effects.
impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox"));
// Set up this copy as a second assignment.
Statement* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
// Record that this is a "box" node and keep track of the matching parts.
op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt);
// If it is a value class, mark the "box" node. We can use this information
// to optimise several cases:
// "box(x) == null" --> false
// "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
// "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
op1->gtFlags |= GTF_BOX_VALUE;
assert(op1->IsBoxedValue());
assert(asg->gtOper == GT_ASG);
}
else
{
// Don't optimize, just call the helper and be done with it.
JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable");
assert(operCls != nullptr);
// Ensure that the value class is restored
op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
if (op2 == nullptr)
{
// We must be backing out of an inline.
assert(compDonotInline());
return;
}
GenTreeCall::Use* args =
gtNewCallArgs(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
op1 = gtNewHelperCallNode(boxHelper, TYP_REF, args);
}
/* Push the result back on the stack, */
/* even if clsHnd is a value class we want the TI_REF */
typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
impPushOnStack(op1, tiRetVal);
}
//------------------------------------------------------------------------
// impImportNewObjArray: Build and import `new` of multi-dimmensional array
//
// Arguments:
// pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
// by a call to CEEInfo::resolveToken().
// pCallInfo - The CORINFO_CALL_INFO that has been initialized
// by a call to CEEInfo::getCallInfo().
//
// Assumptions:
// The multi-dimensional array constructor arguments (array dimensions) are
// pushed on the IL stack on entry to this method.
//
// Notes:
// Multi-dimensional array constructors are imported as calls to a JIT
// helper, not as regular calls.
void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
{
GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken);
if (classHandle == nullptr)
{ // compDonotInline()
return;
}
assert(pCallInfo->sig.numArgs);
GenTree* node;
//
// There are two different JIT helpers that can be used to allocate
// multi-dimensional arrays:
//
// - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
// This variant is deprecated. It should be eventually removed.
//
// - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
// pointer to block of int32s. This variant is more portable.
//
// The non-varargs helper is enabled for CoreRT only for now. Enabling this
// unconditionally would require ReadyToRun version bump.
//
CLANG_FORMAT_COMMENT_ANCHOR;
if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
{
// Reuse the temp used to pass the array dimensions to avoid bloating
// the stack frame in case there are multiple calls to multi-dim array
// constructors within a single method.
if (lvaNewObjArrayArgs == BAD_VAR_NUM)
{
lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK;
lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
}
// Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
// for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
lvaTable[lvaNewObjArrayArgs].lvExactSize =
max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
// The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
// to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
// to one allocation at a time.
impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
//
// The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
// - Array class handle
// - Number of dimension arguments
// - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp.
//
node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
// Pop dimension arguments from the stack one at a time and store it
// into lvaNewObjArrayArgs temp.
for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
{
GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
dest = gtNewOperNode(GT_IND, TYP_INT, dest);
node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
}
GenTreeCall::Use* args = gtNewCallArgs(node);
// pass number of arguments to the helper
args = gtPrependNewCallArg(gtNewIconNode(pCallInfo->sig.numArgs), args);
args = gtPrependNewCallArg(classHandle, args);
node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, args);
}
else
{
//
// The varargs helper needs the type and method handles as last
// and last-1 param (this is a cdecl call, so args will be
// pushed in reverse order on the CPU stack)
//
GenTreeCall::Use* args = gtNewCallArgs(classHandle);
// pass number of arguments to the helper
args = gtPrependNewCallArg(gtNewIconNode(pCallInfo->sig.numArgs), args);
unsigned argFlags = 0;
args = impPopCallArgs(pCallInfo->sig.numArgs, &pCallInfo->sig, args);
node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args);
// varargs, so we pop the arguments
node->gtFlags |= GTF_CALL_POP_ARGS;
#ifdef DEBUG
// At the present time we don't track Caller pop arguments
// that have GC references in them
for (GenTreeCall::Use& use : GenTreeCall::UseList(args))
{
assert(use.GetNode()->TypeGet() != TYP_REF);
}
#endif
}
for (GenTreeCall::Use& use : node->AsCall()->Args())
{
node->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT;
}
node->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
// Remember that this basic block contains 'new' of a md array
compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
}
GenTree* Compiler::impTransformThis(GenTree* thisPtr,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
CORINFO_THIS_TRANSFORM transform)
{
switch (transform)
{
case CORINFO_DEREF_THIS:
{
GenTree* obj = thisPtr;
// This does a LDIND on the obj, which should be a byref. pointing to a ref
impBashVarAddrsToI(obj);
assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
// ldind could point anywhere, example a boxed class static int
obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
return obj;
}
case CORINFO_BOX_THIS:
{
// Constraint calls where there might be no
// unboxed entry point require us to implement the call via helper.
// These only occur when a possible target of the call
// may have inherited an implementation of an interface
// method from System.Object or System.ValueType. The EE does not provide us with
// "unboxed" versions of these methods.
GenTree* obj = thisPtr;
assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
obj->gtFlags |= GTF_EXCEPT;
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
var_types objType = JITtype2varType(jitTyp);
if (impIsPrimitive(jitTyp))
{
if (obj->OperIsBlk())
{
obj->ChangeOperUnchecked(GT_IND);
// Obj could point anywhere, example a boxed class static int
obj->gtFlags |= GTF_IND_TGTANYWHERE;
obj->AsOp()->gtOp2 = nullptr; // must be zero for tree walkers
}
obj->gtType = JITtype2varType(jitTyp);
assert(varTypeIsArithmetic(obj->gtType));
}
// This pushes on the dereferenced byref
// This is then used immediately to box.
impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
// This pops off the byref-to-a-value-type remaining on the stack and
// replaces it with a boxed object.
// This is then used as the object to the virtual call immediately below.
impImportAndPushBox(pConstrainedResolvedToken);
if (compDonotInline())
{
return nullptr;
}
obj = impPopStack().val;
return obj;
}
case CORINFO_NO_THIS_TRANSFORM:
default:
return thisPtr;
}
}
//------------------------------------------------------------------------
// impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
//
// Return Value:
// true if PInvoke inlining should be enabled in current method, false otherwise
//
// Notes:
// Checks a number of ambient conditions where we could pinvoke but choose not to
bool Compiler::impCanPInvokeInline()
{
return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
(!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
;
}
//------------------------------------------------------------------------
// impCanPInvokeInlineCallSite: basic legality checks using information
// from a call to see if the call qualifies as an inline pinvoke.
//
// Arguments:
// block - block contaning the call, or for inlinees, block
// containing the call being inlined
//
// Return Value:
// true if this call can legally qualify as an inline pinvoke, false otherwise
//
// Notes:
// For runtimes that support exception handling interop there are
// restrictions on using inline pinvoke in handler regions.
//
// * We have to disable pinvoke inlining inside of filters because
// in case the main execution (i.e. in the try block) is inside
// unmanaged code, we cannot reuse the inlined stub (we still need
// the original state until we are in the catch handler)
//
// * We disable pinvoke inlining inside handlers since the GSCookie
// is in the inlined Frame (see
// CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
// this would not protect framelets/return-address of handlers.
//
// These restrictions are currently also in place for CoreCLR but
// can be relaxed when coreclr/#8459 is addressed.
bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
{
if (block->hasHndIndex())
{
return false;
}
// The remaining limitations do not apply to CoreRT
if (IsTargetAbi(CORINFO_CORERT_ABI))
{
return true;
}
#ifdef TARGET_64BIT
// On 64-bit platforms, we disable pinvoke inlining inside of try regions.
// Note that this could be needed on other architectures too, but we
// haven't done enough investigation to know for sure at this point.
//
// Here is the comment from JIT64 explaining why:
// [VSWhidbey: 611015] - because the jitted code links in the
// Frame (instead of the stub) we rely on the Frame not being
// 'active' until inside the stub. This normally happens by the
// stub setting the return address pointer in the Frame object
// inside the stub. On a normal return, the return address
// pointer is zeroed out so the Frame can be safely re-used, but
// if an exception occurs, nobody zeros out the return address
// pointer. Thus if we re-used the Frame object, it would go
// 'active' as soon as we link it into the Frame chain.
//
// Technically we only need to disable PInvoke inlining if we're
// in a handler or if we're in a try body with a catch or
// filter/except where other non-handler code in this method
// might run and try to re-use the dirty Frame object.
//
// A desktop test case where this seems to matter is
// jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
if (block->hasTryIndex())
{
// This does not apply to the raw pinvoke call that is inside the pinvoke
// ILStub. In this case, we have to inline the raw pinvoke call into the stub,
// otherwise we would end up with a stub that recursively calls itself, and end
// up with a stack overflow.
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers())
{
return true;
}
return false;
}
#endif // TARGET_64BIT
return true;
}
//------------------------------------------------------------------------
// impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
// if it can be expressed as an inline pinvoke.
//
// Arguments:
// call - tree for the call
// methHnd - handle for the method being called (may be null)
// sig - signature of the method being called
// mflags - method flags for the method being called
// block - block contaning the call, or for inlinees, block
// containing the call being inlined
//
// Notes:
// Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
//
// Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
// call passes a combination of legality and profitabilty checks.
//
// If GTF_CALL_UNMANAGED is set, increments info.compUnmanagedCallCountWithGCTransition
void Compiler::impCheckForPInvokeCall(
GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
{
CorInfoCallConvExtension unmanagedCallConv;
// If VM flagged it as Pinvoke, flag the call node accordingly
if ((mflags & CORINFO_FLG_PINVOKE) != 0)
{
call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
}
bool suppressGCTransition = false;
if (methHnd)
{
if ((mflags & CORINFO_FLG_PINVOKE) == 0)
{
return;
}
unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd, nullptr, &suppressGCTransition);
}
else
{
if (sig->getCallConv() == CORINFO_CALLCONV_DEFAULT || sig->getCallConv() == CORINFO_CALLCONV_VARARG)
{
return;
}
unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(nullptr, sig, &suppressGCTransition);
assert(!call->gtCallCookie);
}
if (suppressGCTransition)
{
call->gtCallMoreFlags |= GTF_CALL_M_SUPPRESS_GC_TRANSITION;
}
if (unmanagedCallConv != CorInfoCallConvExtension::C && unmanagedCallConv != CorInfoCallConvExtension::Stdcall &&
unmanagedCallConv != CorInfoCallConvExtension::Thiscall)
{
return;
}
optNativeCallCount++;
if (methHnd == nullptr && (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) || IsTargetAbi(CORINFO_CORERT_ABI)))
{
// PInvoke in CoreRT ABI must be always inlined. Non-inlineable CALLI cases have been
// converted to regular method calls earlier using convertPInvokeCalliToCall.
// PInvoke CALLI in IL stubs must be inlined
}
else
{
// Check legality
if (!impCanPInvokeInlineCallSite(block))
{
return;
}
// Legal PInvoke CALL in PInvoke IL stubs must be inlined to avoid infinite recursive
// inlining in CoreRT. Skip the ambient conditions checks and profitability checks.
if (!IsTargetAbi(CORINFO_CORERT_ABI) || (info.compFlags & CORINFO_FLG_PINVOKE) == 0)
{
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers())
{
// Raw PInvoke call in PInvoke IL stub generated must be inlined to avoid infinite
// recursive calls to the stub.
}
else
{
if (!impCanPInvokeInline())
{
return;
}
// Size-speed tradeoff: don't use inline pinvoke at rarely
// executed call sites. The non-inline version is more
// compact.
if (block->isRunRarely())
{
return;
}
}
}
// The expensive check should be last
if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
{
return;
}
}
JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
call->gtFlags |= GTF_CALL_UNMANAGED;
call->unmgdCallConv = unmanagedCallConv;
if (!call->IsSuppressGCTransition())
{
info.compUnmanagedCallCountWithGCTransition++;
}
// AMD64 convention is same for native and managed
if (unmanagedCallConv == CorInfoCallConvExtension::C)
{
call->gtFlags |= GTF_CALL_POP_ARGS;
}
if (unmanagedCallConv == CorInfoCallConvExtension::Thiscall)
{
call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
}
}
GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
{
var_types callRetTyp = JITtype2varType(sig->retType);
/* The function pointer is on top of the stack - It may be a
* complex expression. As it is evaluated after the args,
* it may cause registered args to be spilled. Simply spill it.
*/
// Ignore this trivial case.
if (impStackTop().val->gtOper != GT_LCL_VAR)
{
impSpillStackEntry(verCurrentState.esStackDepth - 1,
BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
}
/* Get the function pointer */
GenTree* fptr = impPopStack().val;
// The function pointer is typically a sized to match the target pointer size
// However, stubgen IL optimization can change LDC.I8 to LDC.I4
// See ILCodeStream::LowerOpcode
assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT);
#ifdef DEBUG
// This temporary must never be converted to a double in stress mode,
// because that can introduce a call to the cast helper after the
// arguments have already been evaluated.
if (fptr->OperGet() == GT_LCL_VAR)
{
lvaTable[fptr->AsLclVarCommon()->GetLclNum()].lvKeepType = 1;
}
#endif
/* Create the call node */
GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
return call;
}
/*****************************************************************************/
void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig)
{
assert(call->gtFlags & GTF_CALL_UNMANAGED);
/* Since we push the arguments in reverse order (i.e. right -> left)
* spill any side effects from the stack
*
* OBS: If there is only one side effect we do not need to spill it
* thus we have to spill all side-effects except last one
*/
unsigned lastLevelWithSideEffects = UINT_MAX;
unsigned argsToReverse = sig->numArgs;
// For "thiscall", the first argument goes in a register. Since its
// order does not need to be changed, we do not need to spill it
if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
assert(argsToReverse);
argsToReverse--;
}
#ifndef TARGET_X86
// Don't reverse args on ARM or x64 - first four args always placed in regs in order
argsToReverse = 0;
#endif
for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
{
if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
{
assert(lastLevelWithSideEffects == UINT_MAX);
impSpillStackEntry(level,
BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
}
else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
{
if (lastLevelWithSideEffects != UINT_MAX)
{
/* We had a previous side effect - must spill it */
impSpillStackEntry(lastLevelWithSideEffects,
BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
/* Record the level for the current side effect in case we will spill it */
lastLevelWithSideEffects = level;
}
else
{
/* This is the first side effect encountered - record its level */
lastLevelWithSideEffects = level;
}
}
}
/* The argument list is now "clean" - no out-of-order side effects
* Pop the argument list in reverse order */
GenTreeCall::Use* args = impPopReverseCallArgs(sig->numArgs, sig, sig->numArgs - argsToReverse);
call->AsCall()->gtCallArgs = args;
if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
GenTree* thisPtr = args->GetNode();
impBashVarAddrsToI(thisPtr);
assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
}
for (GenTreeCall::Use& argUse : GenTreeCall::UseList(args))
{
GenTree* arg = argUse.GetNode();
call->gtFlags |= arg->gtFlags & GTF_GLOB_EFFECT;
// We should not be passing gc typed args to an unmanaged call.
if (varTypeIsGC(arg->TypeGet()))
{
// Tolerate byrefs by retyping to native int.
//
// This is needed or we'll generate inconsistent GC info
// for this arg at the call site (gc info says byref,
// pinvoke sig says native int).
//
if (arg->TypeGet() == TYP_BYREF)
{
arg->ChangeType(TYP_I_IMPL);
}
else
{
assert(!"*** invalid IL: gc ref passed to unmanaged call");
}
}
}
}
//------------------------------------------------------------------------
// impInitClass: Build a node to initialize the class before accessing the
// field if necessary
//
// Arguments:
// pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
// by a call to CEEInfo::resolveToken().
//
// Return Value: If needed, a pointer to the node that will perform the class
// initializtion. Otherwise, nullptr.
//
GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
{
CorInfoInitClassResult initClassResult =
info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
{
return nullptr;
}
BOOL runtimeLookup;
GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
if (node == nullptr)
{
assert(compDonotInline());
return nullptr;
}
if (runtimeLookup)
{
node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(node));
}
else
{
// Call the shared non gc static helper, as its the fastest
node = fgGetSharedCCtor(pResolvedToken->hClass);
}
return node;
}
GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
{
GenTree* op1 = nullptr;
switch (lclTyp)
{
int ival;
__int64 lval;
double dval;
case TYP_BOOL:
ival = *((bool*)fldAddr);
goto IVAL_COMMON;
case TYP_BYTE:
ival = *((signed char*)fldAddr);
goto IVAL_COMMON;
case TYP_UBYTE:
ival = *((unsigned char*)fldAddr);
goto IVAL_COMMON;
case TYP_SHORT:
ival = *((short*)fldAddr);
goto IVAL_COMMON;
case TYP_USHORT:
ival = *((unsigned short*)fldAddr);
goto IVAL_COMMON;
case TYP_UINT:
case TYP_INT:
ival = *((int*)fldAddr);
IVAL_COMMON:
op1 = gtNewIconNode(ival);
break;
case TYP_LONG:
case TYP_ULONG:
lval = *((__int64*)fldAddr);
op1 = gtNewLconNode(lval);
break;
case TYP_FLOAT:
dval = *((float*)fldAddr);
op1 = gtNewDconNode(dval);
op1->gtType = TYP_FLOAT;
break;
case TYP_DOUBLE:
dval = *((double*)fldAddr);
op1 = gtNewDconNode(dval);
break;
default:
assert(!"Unexpected lclTyp");
break;
}
return op1;
}
GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS access,
CORINFO_FIELD_INFO* pFieldInfo,
var_types lclTyp)
{
GenTree* op1;
switch (pFieldInfo->fieldAccessor)
{
case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
{
assert(!compIsForInlining());
// We first call a special helper to get the statics base pointer
op1 = impParentClassTokenToHandle(pResolvedToken);
// compIsForInlining() is false so we should not get NULL here
assert(op1 != nullptr);
var_types type = TYP_BYREF;
switch (pFieldInfo->helper)
{
case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
type = TYP_I_IMPL;
break;
case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
break;
default:
assert(!"unknown generic statics helper");
break;
}
op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewCallArgs(op1));
FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
op1 = gtNewOperNode(GT_ADD, type, op1,
new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
}
break;
case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
{
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
unsigned callFlags = 0;
if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
{
callFlags |= GTF_CALL_HOISTABLE;
}
op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
op1->gtFlags |= callFlags;
op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup);
}
else
#endif
{
op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
}
{
FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
}
break;
}
case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
{
#ifdef FEATURE_READYTORUN_COMPILER
assert(opts.IsReadyToRun());
assert(!compIsForInlining());
CORINFO_LOOKUP_KIND kind;
info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind);
assert(kind.needsRuntimeLookup);
GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
GenTreeCall::Use* args = gtNewCallArgs(ctxTree);
unsigned callFlags = 0;
if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
{
callFlags |= GTF_CALL_HOISTABLE;
}
var_types type = TYP_BYREF;
op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args);
op1->gtFlags |= callFlags;
op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup);
FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
op1 = gtNewOperNode(GT_ADD, type, op1,
new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
#else
unreached();
#endif // FEATURE_READYTORUN_COMPILER
}
break;
default:
{
// Do we need the address of a static field?
//
if (access & CORINFO_ACCESS_ADDRESS)
{
void** pFldAddr = nullptr;
void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
// We should always be able to access this static's address directly
//
assert(pFldAddr == nullptr);
FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
/* Create the data member node */
op1 = gtNewIconHandleNode(pFldAddr == nullptr ? (size_t)fldAddr : (size_t)pFldAddr, GTF_ICON_STATIC_HDL,
fldSeq);
#ifdef DEBUG
op1->AsIntCon()->gtTargetHandle = op1->AsIntCon()->gtIconVal;
#endif
if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
{
op1->gtFlags |= GTF_ICON_INITCLASS;
}
}
else // We need the value of a static field
{
// In future, it may be better to just create the right tree here instead of folding it later.
op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
{
op1->gtFlags |= GTF_FLD_INITCLASS;
}
if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
{
op1->gtType = TYP_REF; // points at boxed object
FieldSeqNode* firstElemFldSeq =
GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
new (this, GT_CNS_INT)
GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, firstElemFldSeq));
if (varTypeIsStruct(lclTyp))
{
// Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
op1 = gtNewObjNode(pFieldInfo->structType, op1);
}
else
{
op1 = gtNewOperNode(GT_IND, lclTyp, op1);
op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
}
}
return op1;
}
break;
}
}
if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
{
op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, fldSeq));
}
if (!(access & CORINFO_ACCESS_ADDRESS))
{
if (varTypeIsStruct(lclTyp))
{
// Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
op1 = gtNewObjNode(pFieldInfo->structType, op1);
}
else
{
op1 = gtNewOperNode(GT_IND, lclTyp, op1);
op1->gtFlags |= GTF_GLOB_REF;
}
}
return op1;
}
// In general try to call this before most of the verification work. Most people expect the access
// exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns
// out if you can't access something we also think that you're unverifiable for other reasons.
void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
{
if (result != CORINFO_ACCESS_ALLOWED)
{
impHandleAccessAllowedInternal(result, helperCall);
}
}
void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
{
switch (result)
{
case CORINFO_ACCESS_ALLOWED:
break;
case CORINFO_ACCESS_ILLEGAL:
// if we're verifying, then we need to reject the illegal access to ensure that we don't think the
// method is verifiable. Otherwise, delay the exception to runtime.
if (compIsForImportOnly())
{
info.compCompHnd->ThrowExceptionForHelper(helperCall);
}
else
{
impInsertHelperCall(helperCall);
}
break;
}
}
void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
{
// Construct the argument list
GenTreeCall::Use* args = nullptr;
assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
for (unsigned i = helperInfo->numArgs; i > 0; --i)
{
const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1];
GenTree* currentArg = nullptr;
switch (helperArg.argType)
{
case CORINFO_HELPER_ARG_TYPE_Field:
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
info.compCompHnd->getFieldClass(helperArg.fieldHandle));
currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
break;
case CORINFO_HELPER_ARG_TYPE_Method:
info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
break;
case CORINFO_HELPER_ARG_TYPE_Class:
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
break;
case CORINFO_HELPER_ARG_TYPE_Module:
currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
break;
case CORINFO_HELPER_ARG_TYPE_Const:
currentArg = gtNewIconNode(helperArg.constant);
break;
default:
NO_WAY("Illegal helper arg type");
}
args = gtPrependNewCallArg(currentArg, args);
}
/* TODO-Review:
* Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee.
* Also, consider sticking this in the first basic block.
*/
GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args);
impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
}
// Checks whether the return types of caller and callee are compatible
// so that callee can be tail called. Note that here we don't check
// compatibility in IL Verifier sense, but on the lines of return type
// sizes are equal and get returned in the same return register.
bool Compiler::impTailCallRetTypeCompatible(var_types callerRetType,
CORINFO_CLASS_HANDLE callerRetTypeClass,
CorInfoCallConvExtension callerCallConv,
var_types calleeRetType,
CORINFO_CLASS_HANDLE calleeRetTypeClass,
CorInfoCallConvExtension calleeCallConv)
{
// Note that we can not relax this condition with genActualType() as the
// calling convention dictates that the caller of a function with a small
// typed return value is responsible for normalizing the return val.
if (callerRetType == calleeRetType)
{
return true;
}
// If the class handles are the same and not null, the return types are compatible.
if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass))
{
return true;
}
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
// Jit64 compat:
if (callerRetType == TYP_VOID)
{
// This needs to be allowed to support the following IL pattern that Jit64 allows:
// tail.call
// pop
// ret
//
// Note that the above IL pattern is not valid as per IL verification rules.
// Therefore, only full trust code can take advantage of this pattern.
return true;
}
// These checks return true if the return value type sizes are the same and
// get returned in the same return register i.e. caller doesn't need to normalize
// return value. Some of the tail calls permitted by below checks would have
// been rejected by IL Verifier before we reached here. Therefore, only full
// trust code can make those tail calls.
unsigned callerRetTypeSize = 0;
unsigned calleeRetTypeSize = 0;
bool isCallerRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize,
true, info.compIsVarArgs, callerCallConv);
bool isCalleeRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize,
true, info.compIsVarArgs, calleeCallConv);
if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
{
return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
}
#endif // TARGET_AMD64 || TARGET_ARM64
return false;
}
// For prefixFlags
enum
{
PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
PREFIX_TAILCALL_IMPLICIT =
0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
PREFIX_TAILCALL_STRESS =
0x00000100, // call doesn't "tail" IL prefix but is treated as explicit because of tail call stress
PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT | PREFIX_TAILCALL_STRESS),
PREFIX_VOLATILE = 0x00001000,
PREFIX_UNALIGNED = 0x00010000,
PREFIX_CONSTRAINED = 0x00100000,
PREFIX_READONLY = 0x01000000
};
/********************************************************************************
*
* Returns true if the current opcode and and the opcodes following it correspond
* to a supported tail call IL pattern.
*
*/
bool Compiler::impIsTailCallILPattern(
bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive)
{
// Bail out if the current opcode is not a call.
if (!impOpcodeIsCallOpcode(curOpcode))
{
return false;
}
#if !FEATURE_TAILCALL_OPT_SHARED_RETURN
// If shared ret tail opt is not enabled, we will enable
// it for recursive methods.
if (isRecursive)
#endif
{
// we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
// sequence. Make sure we don't go past the end of the IL however.
codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
}
// Bail out if there is no next opcode after call
if (codeAddrOfNextOpcode >= codeEnd)
{
return false;
}
OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
return (nextOpcode == CEE_RET);
}
/*****************************************************************************
*
* Determine whether the call could be converted to an implicit tail call
*
*/
bool Compiler::impIsImplicitTailCallCandidate(
OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
{
#if FEATURE_TAILCALL_OPT
if (!opts.compTailCallOpt)
{
return false;
}
if (opts.OptimizationDisabled())
{
return false;
}
// must not be tail prefixed
if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
{
return false;
}
#if !FEATURE_TAILCALL_OPT_SHARED_RETURN
// the block containing call is marked as BBJ_RETURN
// We allow shared ret tail call optimization on recursive calls even under
// !FEATURE_TAILCALL_OPT_SHARED_RETURN.
if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
return false;
#endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
// must be call+ret or call+pop+ret
if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
{
return false;
}
return true;
#else
return false;
#endif // FEATURE_TAILCALL_OPT
}
//------------------------------------------------------------------------
// impImportCall: import a call-inspiring opcode
//
// Arguments:
// opcode - opcode that inspires the call
// pResolvedToken - resolved token for the call target
// pConstrainedResolvedToken - resolved constraint token (or nullptr)
// newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr)
// prefixFlags - IL prefix flags for the call
// callInfo - EE supplied info for the call
// rawILOffset - IL offset of the opcode
//
// Returns:
// Type of the call's return value.
// If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF.
// However we can't assert for this here yet because there are cases we miss. See issue #13272.
//
//
// Notes:
// opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
//
// For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
// uninitalized object.
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
var_types Compiler::impImportCall(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
GenTree* newobjThis,
int prefixFlags,
CORINFO_CALL_INFO* callInfo,
IL_OFFSET rawILOffset)
{
assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
IL_OFFSETX ilOffset = impCurILOffset(rawILOffset, true);
var_types callRetTyp = TYP_COUNT;
CORINFO_SIG_INFO* sig = nullptr;
CORINFO_METHOD_HANDLE methHnd = nullptr;
CORINFO_CLASS_HANDLE clsHnd = nullptr;
unsigned clsFlags = 0;
unsigned mflags = 0;
unsigned argFlags = 0;
GenTree* call = nullptr;
GenTreeCall::Use* args = nullptr;
CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM;
CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr;
bool exactContextNeedsRuntimeLookup = false;
bool canTailCall = true;
const char* szCanTailCallFailReason = nullptr;
const int tailCallFlags = (prefixFlags & PREFIX_TAILCALL);
const bool isReadonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr;
// Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
// do that before tailcalls, but that is probably not the intended
// semantic. So just disallow tailcalls from synchronized methods.
// Also, popping arguments in a varargs function is more work and NYI
// If we have a security object, we have to keep our frame around for callers
// to see any imperative security.
// Reverse P/Invokes need a call to CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT
// at the end, so tailcalls should be disabled.
if (info.compFlags & CORINFO_FLG_SYNCH)
{
canTailCall = false;
szCanTailCallFailReason = "Caller is synchronized";
}
else if (opts.IsReversePInvoke())
{
canTailCall = false;
szCanTailCallFailReason = "Caller is Reverse P/Invoke";
}
#if !FEATURE_FIXED_OUT_ARGS
else if (info.compIsVarArgs)
{
canTailCall = false;
szCanTailCallFailReason = "Caller is varargs";
}
#endif // FEATURE_FIXED_OUT_ARGS
// We only need to cast the return value of pinvoke inlined calls that return small types
// TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
// widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
// The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
// the time being that the callee might be compiled by the other JIT and thus the return
// value will need to be widened by us (or not widened at all...)
// ReadyToRun code sticks with default calling convention that does not widen small return types.
bool checkForSmallType = opts.IsReadyToRun();
bool bIntrinsicImported = false;
CORINFO_SIG_INFO calliSig;
GenTreeCall::Use* extraArg = nullptr;
/*-------------------------------------------------------------------------
* First create the call node
*/
if (opcode == CEE_CALLI)
{
if (IsTargetAbi(CORINFO_CORERT_ABI))
{
// See comment in impCheckForPInvokeCall
BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
if (info.compCompHnd->convertPInvokeCalliToCall(pResolvedToken, !impCanPInvokeInlineCallSite(block)))
{
eeGetCallInfo(pResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, callInfo);
return impImportCall(CEE_CALL, pResolvedToken, nullptr, nullptr, prefixFlags, callInfo, rawILOffset);
}
}
/* Get the call site sig */
eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &calliSig);
callRetTyp = JITtype2varType(calliSig.retType);
call = impImportIndirectCall(&calliSig, ilOffset);
// We don't know the target method, so we have to infer the flags, or
// assume the worst-case.
mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
#ifdef DEBUG
if (verbose)
{
unsigned structSize =
(callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
}
#endif
// This should be checked in impImportBlockCode.
assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
sig = &calliSig;
if ((sig->flags & CORINFO_SIGFLAG_FAT_CALL) != 0)
{
addFatPointerCandidate(call->AsCall());
}
}
else // (opcode != CEE_CALLI)
{
CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
// Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
// supply the instantiation parameters necessary to make direct calls to underlying
// shared generic code, rather than calling through instantiating stubs. If the
// returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
// must indeed pass an instantiation parameter.
methHnd = callInfo->hMethod;
sig = &(callInfo->sig);
callRetTyp = JITtype2varType(sig->retType);
mflags = callInfo->methodFlags;
#ifdef DEBUG
if (verbose)
{
unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
}
#endif
if (compIsForInlining())
{
/* Does this call site have security boundary restrictions? */
if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
return TYP_UNDEF;
}
/* Does the inlinee use StackCrawlMark */
if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
return TYP_UNDEF;
}
/* For now ignore varargs */
if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
return TYP_UNDEF;
}
if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
return TYP_UNDEF;
}
if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
return TYP_UNDEF;
}
}
clsHnd = pResolvedToken->hClass;
clsFlags = callInfo->classFlags;
#ifdef DEBUG
// If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
// This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
// These should be in corelib.h, and available through a JIT/EE interface call.
const char* modName;
const char* className;
const char* methodName;
if ((className = eeGetClassName(clsHnd)) != nullptr &&
strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
(methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
{
return impImportJitTestLabelMark(sig->numArgs);
}
#endif // DEBUG
// <NICE> Factor this into getCallInfo </NICE>
bool isSpecialIntrinsic = false;
if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0)
{
const bool isTailCall = canTailCall && (tailCallFlags != 0);
call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, isReadonlyCall,
isTailCall, pConstrainedResolvedToken, callInfo->thisTransform, &intrinsicID,
&isSpecialIntrinsic);
if (compDonotInline())
{
return TYP_UNDEF;
}
if (call != nullptr)
{
#ifdef FEATURE_READYTORUN_COMPILER
if (call->OperGet() == GT_INTRINSIC)
{
if (opts.IsReadyToRun())
{
noway_assert(callInfo->kind == CORINFO_CALL);
call->AsIntrinsic()->gtEntryPoint = callInfo->codePointerLookup.constLookup;
}
else
{
call->AsIntrinsic()->gtEntryPoint.addr = nullptr;
call->AsIntrinsic()->gtEntryPoint.accessType = IAT_VALUE;
}
}
#endif
bIntrinsicImported = true;
goto DONE_CALL;
}
}
#ifdef FEATURE_SIMD
if (featureSIMD)
{
call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token);
if (call != nullptr)
{
bIntrinsicImported = true;
goto DONE_CALL;
}
}
#endif // FEATURE_SIMD
if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
{
NO_WAY("Virtual call to a function added via EnC is not supported");
}
if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
(sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
(sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
{
BADCODE("Bad calling convention");
}
//-------------------------------------------------------------------------
// Construct the call node
//
// Work out what sort of call we're making.
// Dispense with virtual calls implemented via LDVIRTFTN immediately.
constraintCallThisTransform = callInfo->thisTransform;
exactContextHnd = callInfo->contextHandle;
exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup == TRUE;
switch (callInfo->kind)
{
case CORINFO_VIRTUALCALL_STUB:
{
assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
{
if (callInfo->stubLookup.lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED)
{
// Runtime does not support inlining of all shapes of runtime lookups
// Inlining has to be aborted in such a case
compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
return TYP_UNDEF;
}
GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
assert(!compDonotInline());
// This is the rough code to set up an indirect stub call
assert(stubAddr != nullptr);
// The stubAddr may be a
// complex expression. As it is evaluated after the args,
// it may cause registered args to be spilled. Simply spill it.
unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_NONE);
stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
// Create the actual call node
assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
(sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
call->gtFlags |= GTF_CALL_VIRT_STUB;
#ifdef TARGET_X86
// No tailcalls allowed for these yet...
canTailCall = false;
szCanTailCallFailReason = "VirtualCall with runtime lookup";
#endif
}
else
{
// The stub address is known at compile time
call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
call->AsCall()->gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
call->gtFlags |= GTF_CALL_VIRT_STUB;
assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE &&
callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE);
if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
{
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
}
}
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
// Null check is sometimes needed for ready to run to handle
// non-virtual <-> virtual changes between versions
if (callInfo->nullInstanceCheck)
{
call->gtFlags |= GTF_CALL_NULLCHECK;
}
}
#endif
break;
}
case CORINFO_VIRTUALCALL_VTABLE:
{
assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
call->gtFlags |= GTF_CALL_VIRT_VTABLE;
break;
}
case CORINFO_VIRTUALCALL_LDVIRTFTN:
{
if (compIsForInlining())
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
return TYP_UNDEF;
}
assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
// OK, We've been told to call via LDVIRTFTN, so just
// take the call now....
GenTreeCall::Use* args = impPopCallArgs(sig->numArgs, sig);
GenTree* thisPtr = impPopStack().val;
thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
assert(thisPtr != nullptr);
// Clone the (possibly transformed) "this" pointer
GenTree* thisPtrCopy;
thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("LDVIRTFTN this pointer"));
GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
assert(fptr != nullptr);
thisPtr = nullptr; // can't reuse it
// Now make an indirect call through the function pointer
unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
// Create the actual call node
call = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
call->AsCall()->gtCallThisArg = gtNewCallArgs(thisPtrCopy);
call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
{
// CoreRT generic virtual method: need to handle potential fat function pointers
addFatPointerCandidate(call->AsCall());
}
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
// Null check is needed for ready to run to handle
// non-virtual <-> virtual changes between versions
call->gtFlags |= GTF_CALL_NULLCHECK;
}
#endif
// Sine we are jumping over some code, check that its OK to skip that code
assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
(sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
goto DONE;
}
case CORINFO_CALL:
{
// This is for a non-virtual, non-interface etc. call
call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
// We remove the nullcheck for the GetType call intrinsic.
// TODO-CQ: JIT64 does not introduce the null check for many more helper calls
// and intrinsics.
if (callInfo->nullInstanceCheck &&
!((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
{
call->gtFlags |= GTF_CALL_NULLCHECK;
}
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
call->AsCall()->setEntryPoint(callInfo->codePointerLookup.constLookup);
}
#endif
break;
}
case CORINFO_CALL_CODE_POINTER:
{
// The EE has asked us to call by computing a code pointer and then doing an
// indirect call. This is because a runtime lookup is required to get the code entry point.
// These calls always follow a uniform calling convention, i.e. no extra hidden params
assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
GenTree* fptr =
impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
if (compDonotInline())
{
return TYP_UNDEF;
}
// Now make an indirect call through the function pointer
unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
if (callInfo->nullInstanceCheck)
{
call->gtFlags |= GTF_CALL_NULLCHECK;
}
break;
}
default:
assert(!"unknown call kind");
break;
}
//-------------------------------------------------------------------------
// Set more flags
PREFIX_ASSUME(call != nullptr);
if (mflags & CORINFO_FLG_NOGCCHECK)
{
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
}
// Mark call if it's one of the ones we will maybe treat as an intrinsic
if (isSpecialIntrinsic)
{
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
}
}
assert(sig);
assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
/* Some sanity checks */
// CALL_VIRT and NEWOBJ must have a THIS pointer
assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
// static bit and hasThis are negations of one another
assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
assert(call != nullptr);
/*-------------------------------------------------------------------------
* Check special-cases etc
*/
/* Special case - Check if it is a call to Delegate.Invoke(). */
if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
{
assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
assert(mflags & CORINFO_FLG_FINAL);
/* Set the delegate flag */
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
if (callInfo->wrapperDelegateInvoke)
{
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_WRAPPER_DELEGATE_INV;
}
if (opcode == CEE_CALLVIRT)
{
assert(mflags & CORINFO_FLG_FINAL);
/* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
assert(call->gtFlags & GTF_CALL_NULLCHECK);
call->gtFlags &= ~GTF_CALL_NULLCHECK;
}
}
CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
actualMethodRetTypeSigClass = sig->retTypeSigClass;
if (varTypeIsStruct(callRetTyp) && compDoOldStructRetyping())
{
callRetTyp = impNormStructType(actualMethodRetTypeSigClass);
call->gtType = callRetTyp;
}
#if !FEATURE_VARARG
/* Check for varargs */
if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
(sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
{
BADCODE("Varargs not supported.");
}
#endif // !FEATURE_VARARG
if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
(sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
{
assert(!compIsForInlining());
/* Set the right flags */
call->gtFlags |= GTF_CALL_POP_ARGS;
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VARARGS;
/* Can't allow tailcall for varargs as it is caller-pop. The caller
will be expecting to pop a certain number of arguments, but if we
tailcall to a function with a different number of arguments, we
are hosed. There are ways around this (caller remembers esp value,
varargs is not caller-pop, etc), but not worth it. */
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_X86
if (canTailCall)
{
canTailCall = false;
szCanTailCallFailReason = "Callee is varargs";
}
#endif
/* Get the total number of arguments - this is already correct
* for CALLI - for methods we have to get it from the call site */
if (opcode != CEE_CALLI)
{
#ifdef DEBUG
unsigned numArgsDef = sig->numArgs;
#endif
eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
// For vararg calls we must be sure to load the return type of the
// method actually being called, as well as the return types of the
// specified in the vararg signature. With type equivalency, these types
// may not be the same.
if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
{
if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
sig->retType != CORINFO_TYPE_VAR)
{
// Make sure that all valuetypes (including enums) that we push are loaded.
// This is to guarantee that if a GC is triggerred from the prestub of this methods,
// all valuetypes in the method signature are already loaded.
// We need to be able to find the size of the valuetypes, but we cannot
// do a class-load from within GC.
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
}
}
assert(numArgsDef <= sig->numArgs);
}
/* We will have "cookie" as the last argument but we cannot push
* it on the operand stack because we may overflow, so we append it
* to the arg list next after we pop them */
}
//--------------------------- Inline NDirect ------------------------------
// For inline cases we technically should look at both the current
// block and the call site block (or just the latter if we've
// fused the EH trees). However the block-related checks pertain to
// EH and we currently won't inline a method with EH. So for
// inlinees, just checking the call site block is sufficient.
{
// New lexical block here to avoid compilation errors because of GOTOs.
BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block);
}
#ifdef UNIX_X86_ABI
// On Unix x86 we use caller-cleaned convention.
if ((call->gtFlags & GTF_CALL_UNMANAGED) == 0)
call->gtFlags |= GTF_CALL_POP_ARGS;
#endif // UNIX_X86_ABI
if (call->gtFlags & GTF_CALL_UNMANAGED)
{
// We set up the unmanaged call by linking the frame, disabling GC, etc
// This needs to be cleaned up on return
if (canTailCall)
{
canTailCall = false;
szCanTailCallFailReason = "Callee is native";
}
checkForSmallType = true;
impPopArgsForUnmanagedCall(call, sig);
goto DONE;
}
else if ((opcode == CEE_CALLI) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT) &&
((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG))
{
if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
{
// Normally this only happens with inlining.
// However, a generic method (or type) being NGENd into another module
// can run into this issue as well. There's not an easy fall-back for NGEN
// so instead we fallback to JIT.
if (compIsForInlining())
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
}
else
{
IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
}
return TYP_UNDEF;
}
GenTree* cookie = eeGetPInvokeCookie(sig);
// This cookie is required to be either a simple GT_CNS_INT or
// an indirection of a GT_CNS_INT
//
GenTree* cookieConst = cookie;
if (cookie->gtOper == GT_IND)
{
cookieConst = cookie->AsOp()->gtOp1;
}
assert(cookieConst->gtOper == GT_CNS_INT);
// Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
// we won't allow this tree to participate in any CSE logic
//
cookie->gtFlags |= GTF_DONT_CSE;
cookieConst->gtFlags |= GTF_DONT_CSE;
call->AsCall()->gtCallCookie = cookie;
if (canTailCall)
{
canTailCall = false;
szCanTailCallFailReason = "PInvoke calli";
}
}
/*-------------------------------------------------------------------------
* Create the argument list
*/
//-------------------------------------------------------------------------
// Special case - for varargs we have an implicit last argument
if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
{
assert(!compIsForInlining());
void *varCookie, *pVarCookie;
if (!info.compCompHnd->canGetVarArgsHandle(sig))
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
return TYP_UNDEF;
}
varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
assert((!varCookie) != (!pVarCookie));
GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig);
assert(extraArg == nullptr);
extraArg = gtNewCallArgs(cookie);
}
//-------------------------------------------------------------------------
// Extra arg for shared generic code and array methods
//
// Extra argument containing instantiation information is passed in the
// following circumstances:
// (a) To the "Address" method on array classes; the extra parameter is
// the array's type handle (a TypeDesc)
// (b) To shared-code instance methods in generic structs; the extra parameter
// is the struct's type handle (a vtable ptr)
// (c) To shared-code per-instantiation non-generic static methods in generic
// classes and structs; the extra parameter is the type handle
// (d) To shared-code generic methods; the extra parameter is an
// exact-instantiation MethodDesc
//
// We also set the exact type context associated with the call so we can
// inline the call correctly later on.
if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
{
assert(call->AsCall()->gtCallType == CT_USER_FUNC);
if (clsHnd == nullptr)
{
NO_WAY("CALLI on parameterized type");
}
assert(opcode != CEE_CALLI);
GenTree* instParam;
BOOL runtimeLookup;
// Instantiated generic method
if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
{
assert(exactContextHnd != METHOD_BEING_COMPILED_CONTEXT());
CORINFO_METHOD_HANDLE exactMethodHandle =
(CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
if (!exactContextNeedsRuntimeLookup)
{
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
instParam =
impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
if (instParam == nullptr)
{
assert(compDonotInline());
return TYP_UNDEF;
}
}
else
#endif
{
instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
}
}
else
{
instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
if (instParam == nullptr)
{
assert(compDonotInline());
return TYP_UNDEF;
}
}
}
// otherwise must be an instance method in a generic struct,
// a static method in a generic type, or a runtime-generated array method
else
{
assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
CORINFO_CLASS_HANDLE exactClassHandle = eeGetClassFromContext(exactContextHnd);
if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
return TYP_UNDEF;
}
if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall)
{
// We indicate "readonly" to the Address operation by using a null
// instParam.
instParam = gtNewIconNode(0, TYP_REF);
}
else if (!exactContextNeedsRuntimeLookup)
{
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
instParam =
impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
if (instParam == nullptr)
{
assert(compDonotInline());
return TYP_UNDEF;
}
}
else
#endif
{
instParam = gtNewIconEmbClsHndNode(exactClassHandle);
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
}
}
else
{
// If the EE was able to resolve a constrained call, the instantiating parameter to use is the type
// by which the call was constrained with. We embed pConstrainedResolvedToken as the extra argument
// because pResolvedToken is an interface method and interface types make a poor generic context.
if (pConstrainedResolvedToken)
{
instParam = impTokenToHandle(pConstrainedResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/,
FALSE /* importParent */);
}
else
{
instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
}
if (instParam == nullptr)
{
assert(compDonotInline());
return TYP_UNDEF;
}
}
}
assert(extraArg == nullptr);
extraArg = gtNewCallArgs(instParam);
}
if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0))
{
// Only verifiable cases are supported.
// dup; ldvirtftn; newobj; or ldftn; newobj.
// IL test could contain unverifiable sequence, in this case optimization should not be done.
if (impStackHeight() > 0)
{
typeInfo delegateTypeInfo = impStackTop().seTypeInfo;
if (delegateTypeInfo.IsToken())
{
ldftnToken = delegateTypeInfo.GetToken();
}
}
}
//-------------------------------------------------------------------------
// The main group of arguments
args = impPopCallArgs(sig->numArgs, sig, extraArg);
call->AsCall()->gtCallArgs = args;
for (GenTreeCall::Use& use : call->AsCall()->Args())
{
call->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT;
}
//-------------------------------------------------------------------------
// The "this" pointer
if (((mflags & CORINFO_FLG_STATIC) == 0) && ((sig->callConv & CORINFO_CALLCONV_EXPLICITTHIS) == 0) &&
!((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
{
GenTree* obj;
if (opcode == CEE_NEWOBJ)
{
obj = newobjThis;
}
else
{
obj = impPopStack().val;
obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
if (compDonotInline())
{
return TYP_UNDEF;
}
}
// Store the "this" value in the call
call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
call->AsCall()->gtCallThisArg = gtNewCallArgs(obj);
// Is this a virtual or interface call?
if (call->AsCall()->IsVirtual())
{
// only true object pointers can be virtual
assert(obj->gtType == TYP_REF);
// See if we can devirtualize.
const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
const bool isLateDevirtualization = false;
impDevirtualizeCall(call->AsCall(), &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle,
&exactContextHnd, isLateDevirtualization, isExplicitTailCall, rawILOffset);
}
if (impIsThis(obj))
{
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
}
}
//-------------------------------------------------------------------------
// The "this" pointer for "newobj"
if (opcode == CEE_NEWOBJ)
{
if (clsFlags & CORINFO_FLG_VAROBJSIZE)
{
assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
// This is a 'new' of a variable sized object, wher
// the constructor is to return the object. In this case
// the constructor claims to return VOID but we know it
// actually returns the new object
assert(callRetTyp == TYP_VOID);
callRetTyp = TYP_REF;
call->gtType = TYP_REF;
impSpillSpecialSideEff();
impPushOnStack(call, typeInfo(TI_REF, clsHnd));
}
else
{
if (clsFlags & CORINFO_FLG_DELEGATE)
{
// New inliner morph it in impImportCall.
// This will allow us to inline the call to the delegate constructor.
call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken);
}
if (!bIntrinsicImported)
{
#if defined(DEBUG) || defined(INLINE_DATA)
// Keep track of the raw IL offset of the call
call->AsCall()->gtRawILOffset = rawILOffset;
#endif // defined(DEBUG) || defined(INLINE_DATA)
// Is it an inline candidate?
impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
}
// append the call node.
impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
// Now push the value of the 'new onto the stack
// This is a 'new' of a non-variable sized object.
// Append the new node (op1) to the statement list,
// and then push the local holding the value of this
// new instruction on the stack.
if (clsFlags & CORINFO_FLG_VALUECLASS)
{
assert(newobjThis->gtOper == GT_ADDR && newobjThis->AsOp()->gtOp1->gtOper == GT_LCL_VAR);
unsigned tmp = newobjThis->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum();
impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
}
else
{
if (newobjThis->gtOper == GT_COMMA)
{
// In coreclr the callout can be inserted even if verification is disabled
// so we cannot rely on tiVerificationNeeded alone
// We must have inserted the callout. Get the real newobj.
newobjThis = newobjThis->AsOp()->gtOp2;
}
assert(newobjThis->gtOper == GT_LCL_VAR);
impPushOnStack(gtNewLclvNode(newobjThis->AsLclVarCommon()->GetLclNum(), TYP_REF),
typeInfo(TI_REF, clsHnd));
}
}
return callRetTyp;
}
DONE:
#ifdef DEBUG
// In debug we want to be able to register callsites with the EE.
assert(call->AsCall()->callSig == nullptr);
call->AsCall()->callSig = new (this, CMK_Generic) CORINFO_SIG_INFO;
*call->AsCall()->callSig = *sig;
#endif
// Final importer checks for calls flagged as tail calls.
//
if (tailCallFlags != 0)
{
const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
const bool isImplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_IMPLICIT) != 0;
const bool isStressTailCall = (tailCallFlags & PREFIX_TAILCALL_STRESS) != 0;
// Exactly one of these should be true.
assert(isExplicitTailCall != isImplicitTailCall);
// This check cannot be performed for implicit tail calls for the reason
// that impIsImplicitTailCallCandidate() is not checking whether return
// types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
// As a result it is possible that in the following case, we find that
// the type stack is non-empty if Callee() is considered for implicit
// tail calling.
// int Caller(..) { .... void Callee(); ret val; ... }
//
// Note that we cannot check return type compatibility before ImpImportCall()
// as we don't have required info or need to duplicate some of the logic of
// ImpImportCall().
//
// For implicit tail calls, we perform this check after return types are
// known to be compatible.
if (isExplicitTailCall && (verCurrentState.esStackDepth != 0))
{
BADCODE("Stack should be empty after tailcall");
}
// Note that we can not relax this condition with genActualType() as
// the calling convention dictates that the caller of a function with
// a small-typed return value is responsible for normalizing the return val
if (canTailCall &&
!impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, info.compCallConv,
callRetTyp, sig->retTypeClass, call->AsCall()->GetUnmanagedCallConv()))
{
canTailCall = false;
szCanTailCallFailReason = "Return types are not tail call compatible";
}
// Stack empty check for implicit tail calls.
if (canTailCall && isImplicitTailCall && (verCurrentState.esStackDepth != 0))
{
#ifdef TARGET_AMD64
// JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException
// in JIT64, not an InvalidProgramException.
Verify(false, "Stack should be empty after tailcall");
#else // TARGET_64BIT
BADCODE("Stack should be empty after tailcall");
#endif //! TARGET_64BIT
}
// assert(compCurBB is not a catch, finally or filter block);
// assert(compCurBB is not a try block protected by a finally block);
assert(!isExplicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
// Ask VM for permission to tailcall
if (canTailCall)
{
// True virtual or indirect calls, shouldn't pass in a callee handle.
CORINFO_METHOD_HANDLE exactCalleeHnd =
((call->AsCall()->gtCallType != CT_USER_FUNC) || call->AsCall()->IsVirtual()) ? nullptr : methHnd;
if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, isExplicitTailCall))
{
if (isExplicitTailCall)
{
// In case of explicit tail calls, mark it so that it is not considered
// for in-lining.
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
JITDUMP("\nGTF_CALL_M_EXPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call));
if (isStressTailCall)
{
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_STRESS_TAILCALL;
JITDUMP("\nGTF_CALL_M_STRESS_TAILCALL set for call [%06u]\n", dspTreeID(call));
}
}
else
{
#if FEATURE_TAILCALL_OPT
// Must be an implicit tail call.
assert(isImplicitTailCall);
// It is possible that a call node is both an inline candidate and marked
// for opportunistic tail calling. In-lining happens before morhphing of
// trees. If in-lining of an in-line candidate gets aborted for whatever
// reason, it will survive to the morphing stage at which point it will be
// transformed into a tail call after performing additional checks.
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
JITDUMP("\nGTF_CALL_M_IMPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call));
#else //! FEATURE_TAILCALL_OPT
NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
#endif // FEATURE_TAILCALL_OPT
}
// This might or might not turn into a tailcall. We do more
// checks in morph. For explicit tailcalls we need more
// information in morph in case it turns out to be a
// helper-based tailcall.
if (isExplicitTailCall)
{
assert(call->AsCall()->tailCallInfo == nullptr);
call->AsCall()->tailCallInfo = new (this, CMK_CorTailCallInfo) TailCallSiteInfo;
switch (opcode)
{
case CEE_CALLI:
call->AsCall()->tailCallInfo->SetCalli(sig);
break;
case CEE_CALLVIRT:
call->AsCall()->tailCallInfo->SetCallvirt(sig, pResolvedToken);
break;
default:
call->AsCall()->tailCallInfo->SetCall(sig, pResolvedToken);
break;
}
}
}
else
{
// canTailCall reported its reasons already
canTailCall = false;
JITDUMP("\ninfo.compCompHnd->canTailCall returned false for call [%06u]\n", dspTreeID(call));
}
}
else
{
// If this assert fires it means that canTailCall was set to false without setting a reason!
assert(szCanTailCallFailReason != nullptr);
JITDUMP("\nRejecting %splicit tail call for [%06u]\n", isExplicitTailCall ? "ex" : "im", dspTreeID(call),
szCanTailCallFailReason);
info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, isExplicitTailCall, TAILCALL_FAIL,
szCanTailCallFailReason);
}
}
// A tail recursive call is a potential loop from the current block to the start of the method.
if ((tailCallFlags != 0) && canTailCall && gtIsRecursiveCall(methHnd))
{
assert(verCurrentState.esStackDepth == 0);
BasicBlock* loopHead = nullptr;
if (opts.IsOSR())
{
// We might not have been planning on importing the method
// entry block, but now we must.
// We should have remembered the real method entry block.
assert(fgEntryBB != nullptr);
JITDUMP("\nOSR: found tail recursive call in the method, scheduling " FMT_BB " for importation\n",
fgEntryBB->bbNum);
impImportBlockPending(fgEntryBB);
loopHead = fgEntryBB;
}
else
{
// For normal jitting we'll branch back to the firstBB; this
// should already be imported.
loopHead = fgFirstBB;
}
JITDUMP("\nFound tail recursive call in the method. Mark " FMT_BB " to " FMT_BB
" as having a backward branch.\n",
loopHead->bbNum, compCurBB->bbNum);
fgMarkBackwardJump(loopHead, compCurBB);
}
// Note: we assume that small return types are already normalized by the managed callee
// or by the pinvoke stub for calls to unmanaged code.
if (!bIntrinsicImported)
{
//
// Things needed to be checked when bIntrinsicImported is false.
//
assert(call->gtOper == GT_CALL);
assert(callInfo != nullptr);
if (compIsForInlining() && opcode == CEE_CALLVIRT)
{
GenTree* callObj = call->AsCall()->gtCallThisArg->GetNode();
if ((call->AsCall()->IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, call->AsCall()->gtCallArgs, callObj,
impInlineInfo->inlArgInfo))
{
impInlineInfo->thisDereferencedFirst = true;
}
}
#if defined(DEBUG) || defined(INLINE_DATA)
// Keep track of the raw IL offset of the call
call->AsCall()->gtRawILOffset = rawILOffset;
#endif // defined(DEBUG) || defined(INLINE_DATA)
// Is it an inline candidate?
impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
}
DONE_CALL:
// Push or append the result of the call
if (callRetTyp == TYP_VOID)
{
if (opcode == CEE_NEWOBJ)
{
// we actually did push something, so don't spill the thing we just pushed.
assert(verCurrentState.esStackDepth > 0);
impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
}
else
{
impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
}
}
else
{
impSpillSpecialSideEff();
if (clsFlags & CORINFO_FLG_ARRAY)
{
eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
}
// Find the return type used for verification by interpreting the method signature.
// NB: we are clobbering the already established sig.
if (tiVerificationNeeded)
{
// Actually, we never get the sig for the original method.
sig = &(callInfo->verSig);
}
typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
tiRetVal.NormaliseForStack();
// The CEE_READONLY prefix modifies the verification semantics of an Address
// operation on an array type.
if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall && tiRetVal.IsByRef())
{
tiRetVal.SetIsReadonlyByRef();
}
if (tiVerificationNeeded)
{
// We assume all calls return permanent home byrefs. If they
// didn't they wouldn't be verifiable. This is also covering
// the Address() helper for multidimensional arrays.
if (tiRetVal.IsByRef())
{
tiRetVal.SetIsPermanentHomeByRef();
}
}
if (call->IsCall())
{
// Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
GenTreeCall* origCall = call->AsCall();
const bool isFatPointerCandidate = origCall->IsFatPointerCandidate();
const bool isInlineCandidate = origCall->IsInlineCandidate();
const bool isGuardedDevirtualizationCandidate = origCall->IsGuardedDevirtualizationCandidate();
if (varTypeIsStruct(callRetTyp))
{
// Need to treat all "split tree" cases here, not just inline candidates
call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass);
}
// TODO: consider handling fatcalli cases this way too...?
if (isInlineCandidate || isGuardedDevirtualizationCandidate)
{
// We should not have made any adjustments in impFixupCallStructReturn
// as we defer those until we know the fate of the call.
assert(call == origCall);
assert(opts.OptEnabled(CLFLG_INLINING));
assert(!isFatPointerCandidate); // We should not try to inline calli.
// Make the call its own tree (spill the stack if needed).
impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
// TODO: Still using the widened type.
GenTree* retExpr = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp), compCurBB->bbFlags);
// Link the retExpr to the call so if necessary we can manipulate it later.
origCall->gtInlineCandidateInfo->retExpr = retExpr;
// Propagate retExpr as the placeholder for the call.
call = retExpr;
}
else
{
if (isFatPointerCandidate)
{
// fatPointer candidates should be in statements of the form call() or var = call().
// Such form allows to find statements with fat calls without walking through whole trees
// and removes problems with cutting trees.
assert(!bIntrinsicImported);
assert(IsTargetAbi(CORINFO_CORERT_ABI));
if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
{
unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli"));
LclVarDsc* varDsc = &lvaTable[calliSlot];
varDsc->lvVerTypeInfo = tiRetVal;
impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE);
// impAssignTempGen can change src arg list and return type for call that returns struct.
var_types type = genActualType(lvaTable[calliSlot].TypeGet());
call = gtNewLclvNode(calliSlot, type);
}
}
// For non-candidates we must also spill, since we
// might have locals live on the eval stack that this
// call can modify.
//
// Suppress this for certain well-known call targets
// that we know won't modify locals, eg calls that are
// recognized in gtCanOptimizeTypeEquality. Otherwise
// we may break key fragile pattern matches later on.
bool spillStack = true;
if (call->IsCall())
{
GenTreeCall* callNode = call->AsCall();
if ((callNode->gtCallType == CT_HELPER) && (gtIsTypeHandleToRuntimeTypeHelper(callNode) ||
gtIsTypeHandleToRuntimeTypeHandleHelper(callNode)))
{
spillStack = false;
}
else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
{
spillStack = false;
}
}
if (spillStack)
{
impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
}
}
}
if (!bIntrinsicImported)
{
//-------------------------------------------------------------------------
//
/* If the call is of a small type and the callee is managed, the callee will normalize the result
before returning.
However, we need to normalize small type values returned by unmanaged
functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
if we use the shorter inlined pinvoke stub. */
if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
{
call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp);
}
}
impPushOnStack(call, tiRetVal);
}
// VSD functions get a new call target each time we getCallInfo, so clear the cache.
// Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
// if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
// callInfoCache.uncacheCallInfo();
return callRetTyp;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv)
{
CorInfoType corType = methInfo->args.retType;
if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
{
// We have some kind of STRUCT being returned
structPassingKind howToReturnStruct = SPK_Unknown;
var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, callConv, &howToReturnStruct);
if (howToReturnStruct == SPK_ByReference)
{
return true;
}
}
return false;
}
#ifdef DEBUG
//
var_types Compiler::impImportJitTestLabelMark(int numArgs)
{
TestLabelAndNum tlAndN;
if (numArgs == 2)
{
tlAndN.m_num = 0;
StackEntry se = impPopStack();
assert(se.seTypeInfo.GetType() == TI_INT);
GenTree* val = se.val;
assert(val->IsCnsIntOrI());
tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
}
else if (numArgs == 3)
{
StackEntry se = impPopStack();
assert(se.seTypeInfo.GetType() == TI_INT);
GenTree* val = se.val;
assert(val->IsCnsIntOrI());
tlAndN.m_num = val->AsIntConCommon()->IconValue();
se = impPopStack();
assert(se.seTypeInfo.GetType() == TI_INT);
val = se.val;
assert(val->IsCnsIntOrI());
tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
}
else
{
assert(false);
}
StackEntry expSe = impPopStack();
GenTree* node = expSe.val;
// There are a small number of special cases, where we actually put the annotation on a subnode.
if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
{
// A loop hoist annotation with value >= 100 means that the expression should be a static field access,
// a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
// offset within the the static field block whose address is returned by the helper call.
// The annotation is saying that this address calculation, but not the entire access, should be hoisted.
GenTree* helperCall = nullptr;
assert(node->OperGet() == GT_IND);
tlAndN.m_num -= 100;
GetNodeTestData()->Set(node->AsOp()->gtOp1, tlAndN);
GetNodeTestData()->Remove(node);
}
else
{
GetNodeTestData()->Set(node, tlAndN);
}
impPushOnStack(node, expSe.seTypeInfo);
return node->TypeGet();
}
#endif // DEBUG
//-----------------------------------------------------------------------------------
// impFixupCallStructReturn: For a call node that returns a struct type either
// adjust the return type to an enregisterable type, or set the flag to indicate
// struct return via retbuf arg.
//
// Arguments:
// call - GT_CALL GenTree node
// retClsHnd - Class handle of return type of the call
//
// Return Value:
// Returns new GenTree node after fixing struct return of call node
//
GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
{
if (!varTypeIsStruct(call))
{
return call;
}
call->gtRetClsHnd = retClsHnd;
#if FEATURE_MULTIREG_RET
call->InitializeStructReturnType(this, retClsHnd, call->GetUnmanagedCallConv());
#endif // FEATURE_MULTIREG_RET
#ifdef UNIX_AMD64_ABI
// Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
assert(!call->IsVarargs() && "varargs not allowed for System V OSs.");
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
const unsigned retRegCount = retTypeDesc->GetReturnRegCount();
if (retRegCount == 0)
{
// struct not returned in registers i.e returned via hiddden retbuf arg.
call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
}
else if (retRegCount == 1)
{
if (!compDoOldStructRetyping())
{
return call;
}
// See if the struct size is smaller than the return
// type size...
if (retTypeDesc->IsEnclosingType())
{
// If we know for sure this call will remain a call,
// retype and return value via a suitable temp.
if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
{
call->gtReturnType = retTypeDesc->GetReturnRegType(0);
return impAssignSmallStructTypeToVar(call, retClsHnd);
}
else
{
call->gtReturnType = call->gtType;
}
}
else
{
// Return type is same size as struct, so we can
// simply retype the call.
call->gtReturnType = retTypeDesc->GetReturnRegType(0);
}
}
else
{
// must be a struct returned in two registers
assert(retRegCount == 2);
if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
{
// Force a call returning multi-reg struct to be always of the IR form
// tmp = call
//
// No need to assign a multi-reg struct to a local var if:
// - It is a tail call or
// - The call is marked for in-lining later
return impAssignMultiRegTypeToVar(call, retClsHnd DEBUGARG(call->GetUnmanagedCallConv()));
}
}
#else // not UNIX_AMD64_ABI
// Check for TYP_STRUCT type that wraps a primitive type
// Such structs are returned using a single register
// and we change the return type on those calls here.
//
structPassingKind howToReturnStruct;
var_types returnType;
returnType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct);
if (howToReturnStruct == SPK_ByReference)
{
assert(returnType == TYP_UNKNOWN);
call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
}
else
{
#if FEATURE_MULTIREG_RET
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
const unsigned retRegCount = retTypeDesc->GetReturnRegCount();
assert(retRegCount != 0);
if (!compDoOldStructRetyping() && retRegCount == 1)
{
return call;
}
#else // !FEATURE_MULTIREG_RET
if (!compDoOldStructRetyping())
{
return call;
}
#endif // !FEATURE_MULTIREG_RET
assert(returnType != TYP_UNKNOWN);
// See if the struct size is smaller than the return
// type size...
if (howToReturnStruct == SPK_EnclosingType)
{
// If we know for sure this call will remain a call,
// retype and return value via a suitable temp.
if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
{
call->gtReturnType = returnType;
return impAssignSmallStructTypeToVar(call, retClsHnd);
}
}
else
{
// Return type is same size as struct, so we can
// simply retype the call.
call->gtReturnType = returnType;
}
// ToDo: Refactor this common code sequence into its own method as it is used 4+ times
if ((returnType == TYP_LONG) && (compLongUsed == false))
{
compLongUsed = true;
}
else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
{
compFloatingPointUsed = true;
}
#if FEATURE_MULTIREG_RET
if (retRegCount >= 2)
{
if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
{
// Force a call returning multi-reg struct to be always of the IR form
// tmp = call
//
// No need to assign a multi-reg struct to a local var if:
// - It is a tail call or
// - The call is marked for in-lining later
return impAssignMultiRegTypeToVar(call, retClsHnd DEBUGARG(call->GetUnmanagedCallConv()));
}
}
#endif // FEATURE_MULTIREG_RET
}
#endif // not UNIX_AMD64_ABI
return call;
}
/*****************************************************************************
For struct return values, re-type the operand in the case where the ABI
does not use a struct return buffer
*/
GenTree* Compiler::impFixupStructReturnType(GenTree* op,
CORINFO_CLASS_HANDLE retClsHnd,
CorInfoCallConvExtension unmgdCallConv)
{
assert(varTypeIsStruct(info.compRetType));
assert(info.compRetBuffArg == BAD_VAR_NUM);
JITDUMP("\nimpFixupStructReturnType: retyping\n");
DISPTREE(op);
#if defined(TARGET_XARCH)
#if FEATURE_MULTIREG_RET
// No VarArgs for CoreCLR on x64 Unix
UNIX_AMD64_ABI_ONLY(assert(!info.compIsVarArgs));
// Is method returning a multi-reg struct?
if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd, unmgdCallConv))
{
// In case of multi-reg struct return, we force IR to be one of the following:
// GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a
// lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
if (op->gtOper == GT_LCL_VAR)
{
// Note that this is a multi-reg return.
unsigned lclNum = op->AsLclVarCommon()->GetLclNum();
lvaTable[lclNum].lvIsMultiRegRet = true;
// TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
op->gtFlags |= GTF_DONT_CSE;
return op;
}
if (op->gtOper == GT_CALL)
{
return op;
}
return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv));
}
#else
assert(info.compRetNativeType != TYP_STRUCT);
#endif // defined(UNIX_AMD64_ABI) || defined(TARGET_X86)
#elif FEATURE_MULTIREG_RET && defined(TARGET_ARM)
if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
{
if (op->gtOper == GT_LCL_VAR)
{
// This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
unsigned lclNum = op->AsLclVarCommon()->GetLclNum();
// Make sure this struct type stays as struct so that we can return it as an HFA
lvaTable[lclNum].lvIsMultiRegRet = true;
// TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
op->gtFlags |= GTF_DONT_CSE;
return op;
}
if (op->gtOper == GT_CALL)
{
if (op->AsCall()->IsVarargs())
{
// We cannot tail call because control needs to return to fixup the calling
// convention for result return.
op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
}
else
{
return op;
}
}
return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv));
}
#elif FEATURE_MULTIREG_RET && defined(TARGET_ARM64)
// Is method returning a multi-reg struct?
if (IsMultiRegReturnedType(retClsHnd, unmgdCallConv))
{
if (op->gtOper == GT_LCL_VAR)
{
// This LCL_VAR stays as a TYP_STRUCT
unsigned lclNum = op->AsLclVarCommon()->GetLclNum();
if (!lvaIsImplicitByRefLocal(lclNum))
{
// Make sure this struct type is not struct promoted
lvaTable[lclNum].lvIsMultiRegRet = true;
// TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
op->gtFlags |= GTF_DONT_CSE;
return op;
}
}
if (op->gtOper == GT_CALL)
{
if (op->AsCall()->IsVarargs())
{
// We cannot tail call because control needs to return to fixup the calling
// convention for result return.
op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
}
else
{
return op;
}
}
return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv));
}
#endif // FEATURE_MULTIREG_RET && FEATURE_HFA
if (!compDoOldStructRetyping() && (!op->IsCall() || !op->AsCall()->TreatAsHasRetBufArg(this)))
{
// Don't retype `struct` as a primitive type in `ret` instruction.
return op;
}
REDO_RETURN_NODE:
// adjust the type away from struct to integral
// and no normalizing
if (op->gtOper == GT_LCL_VAR)
{
// It is possible that we now have a lclVar of scalar type.
// If so, don't transform it to GT_LCL_FLD.
LclVarDsc* varDsc = lvaGetDesc(op->AsLclVarCommon());
if (genActualType(varDsc->TypeGet()) != genActualType(info.compRetNativeType))
{
op->ChangeOper(GT_LCL_FLD);
}
}
else if (op->gtOper == GT_OBJ)
{
GenTree* op1 = op->AsObj()->Addr();
// We will fold away OBJ/ADDR, except for OBJ/ADDR/INDEX
//
// In the latter case the OBJ type may have a different type
// than the array element type, and we need to preserve the
// array element type for now.
//
if ((op1->gtOper == GT_ADDR) && (op1->AsOp()->gtOp1->gtOper != GT_INDEX))
{
// Change '*(&X)' to 'X' and see if we can do better
op = op1->AsOp()->gtOp1;
goto REDO_RETURN_NODE;
}
op->ChangeOperUnchecked(GT_IND);
op->gtFlags |= GTF_IND_TGTANYWHERE;
}
else if (op->gtOper == GT_CALL)
{
if (op->AsCall()->TreatAsHasRetBufArg(this))
{
// This must be one of those 'special' helpers that don't
// really have a return buffer, but instead use it as a way
// to keep the trees cleaner with fewer address-taken temps.
//
// Well now we have to materialize the the return buffer as
// an address-taken temp. Then we can return the temp.
//
// NOTE: this code assumes that since the call directly
// feeds the return, then the call must be returning the
// same structure/class/type.
//
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
// No need to spill anything as we're about to return.
impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
if (compDoOldStructRetyping())
{
// Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
// jump directly to a GT_LCL_FLD.
op = gtNewLclvNode(tmpNum, info.compRetNativeType);
op->ChangeOper(GT_LCL_FLD);
}
else
{
op = gtNewLclvNode(tmpNum, info.compRetType);
JITDUMP("\nimpFixupStructReturnType: created a pseudo-return buffer for a special helper\n");
DISPTREE(op);
return op;
}
}
else
{
// Don't change the gtType of the call just yet, it will get changed later.
return op;
}
}
else if (op->gtOper == GT_COMMA)
{
op->AsOp()->gtOp2 = impFixupStructReturnType(op->AsOp()->gtOp2, retClsHnd, unmgdCallConv);
}
op->gtType = info.compRetNativeType;
JITDUMP("\nimpFixupStructReturnType: result of retyping is\n");
DISPTREE(op);
return op;
}
/*****************************************************************************
CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
finally-protected try. We find the finally blocks protecting the current
offset (in order) by walking over the complete exception table and
finding enclosing clauses. This assumes that the table is sorted.
This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
If we are leaving a catch handler, we need to attach the
CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
After this function, the BBJ_LEAVE block has been converted to a different type.
*/
#if !defined(FEATURE_EH_FUNCLETS)
void Compiler::impImportLeave(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("\nBefore import CEE_LEAVE:\n");
fgDispBasicBlocks();
fgDispHandlerTab();
}
#endif // DEBUG
bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
unsigned blkAddr = block->bbCodeOffs;
BasicBlock* leaveTarget = block->bbJumpDest;
unsigned jmpAddr = leaveTarget->bbCodeOffs;
// LEAVE clears the stack, spill side effects, and set stack to 0
impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
verCurrentState.esStackDepth = 0;
assert(block->bbJumpKind == BBJ_LEAVE);
assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
BasicBlock* step = DUMMY_INIT(NULL);
unsigned encFinallies = 0; // Number of enclosing finallies.
GenTree* endCatches = NULL;
Statement* endLFinStmt = NULL; // The statement tree to indicate the end of locally-invoked finally.
unsigned XTnum;
EHblkDsc* HBtab;
for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
// Grab the handler offsets
IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
/* Is this a catch-handler we are CEE_LEAVEing out of?
* If so, we need to call CORINFO_HELP_ENDCATCH.
*/
if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
{
// Can't CEE_LEAVE out of a finally/fault handler
if (HBtab->HasFinallyOrFaultHandler())
BADCODE("leave out of fault/finally block");
// Create the call to CORINFO_HELP_ENDCATCH
GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
// Make a list of all the currently pending endCatches
if (endCatches)
endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
else
endCatches = endCatch;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - " FMT_BB " jumping out of catch handler EH#%u, adding call to "
"CORINFO_HELP_ENDCATCH\n",
block->bbNum, XTnum);
}
#endif
}
else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
!jitIsBetween(jmpAddr, tryBeg, tryEnd))
{
/* This is a finally-protected try we are jumping out of */
/* If there are any pending endCatches, and we have already
jumped out of a finally-protected try, then the endCatches
have to be put in a block in an outer try for async
exceptions to work correctly.
Else, just use append to the original block */
BasicBlock* callBlock;
assert(!encFinallies ==
!endLFinStmt); // if we have finallies, we better have an endLFin tree, and vice-versa
if (encFinallies == 0)
{
assert(step == DUMMY_INIT(NULL));
callBlock = block;
callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
if (endCatches)
impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
"block %s\n",
callBlock->dspToString());
}
#endif
}
else
{
assert(step != DUMMY_INIT(NULL));
/* Calling the finally block */
callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
assert(step->bbJumpKind == BBJ_ALWAYS);
step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
// finally in the chain)
step->bbJumpDest->bbRefs++;
/* The new block will inherit this block's weight */
callBlock->setBBWeight(block->bbWeight);
callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n",
callBlock->dspToString());
}
#endif
Statement* lastStmt;
if (endCatches)
{
lastStmt = gtNewStmt(endCatches);
endLFinStmt->SetNextStmt(lastStmt);
lastStmt->SetPrevStmt(endLFinStmt);
}
else
{
lastStmt = endLFinStmt;
}
// note that this sets BBF_IMPORTED on the block
impEndTreeList(callBlock, endLFinStmt, lastStmt);
}
step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
/* The new block will inherit this block's weight */
step->setBBWeight(block->bbWeight);
step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n",
step->dspToString());
}
#endif
unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
assert(finallyNesting <= compHndBBtabCount);
callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
GenTree* endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
endLFinStmt = gtNewStmt(endLFin);
endCatches = NULL;
encFinallies++;
invalidatePreds = true;
}
}
/* Append any remaining endCatches, if any */
assert(!encFinallies == !endLFinStmt);
if (encFinallies == 0)
{
assert(step == DUMMY_INIT(NULL));
block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
if (endCatches)
impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
"block %s\n",
block->dspToString());
}
#endif
}
else
{
// If leaveTarget is the start of another try block, we want to make sure that
// we do not insert finalStep into that try block. Hence, we find the enclosing
// try block.
unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
// Insert a new BB either in the try region indicated by tryIndex or
// the handler region indicated by leaveTarget->bbHndIndex,
// depending on which is the inner region.
BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
step->bbJumpDest = finalStep;
/* The new block will inherit this block's weight */
finalStep->setBBWeight(block->bbWeight);
finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies,
finalStep->dspToString());
}
#endif
Statement* lastStmt;
if (endCatches)
{
lastStmt = gtNewStmt(endCatches);
endLFinStmt->SetNextStmt(lastStmt);
lastStmt->SetPrevStmt(endLFinStmt);
}
else
{
lastStmt = endLFinStmt;
}
impEndTreeList(finalStep, endLFinStmt, lastStmt);
finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
// Queue up the jump target for importing
impImportBlockPending(leaveTarget);
invalidatePreds = true;
}
if (invalidatePreds && fgComputePredsDone)
{
JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
fgRemovePreds();
}
#ifdef DEBUG
fgVerifyHandlerTab();
if (verbose)
{
printf("\nAfter import CEE_LEAVE:\n");
fgDispBasicBlocks();
fgDispHandlerTab();
}
#endif // DEBUG
}
#else // FEATURE_EH_FUNCLETS
void Compiler::impImportLeave(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("\nBefore import CEE_LEAVE in " FMT_BB " (targetting " FMT_BB "):\n", block->bbNum,
block->bbJumpDest->bbNum);
fgDispBasicBlocks();
fgDispHandlerTab();
}
#endif // DEBUG
bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
unsigned blkAddr = block->bbCodeOffs;
BasicBlock* leaveTarget = block->bbJumpDest;
unsigned jmpAddr = leaveTarget->bbCodeOffs;
// LEAVE clears the stack, spill side effects, and set stack to 0
impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
verCurrentState.esStackDepth = 0;
assert(block->bbJumpKind == BBJ_LEAVE);
assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
BasicBlock* step = nullptr;
enum StepType
{
// No step type; step == NULL.
ST_None,
// Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
// That is, is step->bbJumpDest where a finally will return to?
ST_FinallyReturn,
// The step block is a catch return.
ST_Catch,
// The step block is in a "try", created as the target for a finally return or the target for a catch return.
ST_Try
};
StepType stepType = ST_None;
unsigned XTnum;
EHblkDsc* HBtab;
for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
// Grab the handler offsets
IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
/* Is this a catch-handler we are CEE_LEAVEing out of?
*/
if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
{
// Can't CEE_LEAVE out of a finally/fault handler
if (HBtab->HasFinallyOrFaultHandler())
{
BADCODE("leave out of fault/finally block");
}
/* We are jumping out of a catch */
if (step == nullptr)
{
step = block;
step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
stepType = ST_Catch;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a catch (EH#%u), convert block " FMT_BB
" to BBJ_EHCATCHRET "
"block\n",
XTnum, step->bbNum);
}
#endif
}
else
{
BasicBlock* exitBlock;
/* Create a new catch exit block in the catch region for the existing step block to jump to in this
* scope */
exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
// exit) returns to this block
step->bbJumpDest->bbRefs++;
#if defined(TARGET_ARM)
if (stepType == ST_FinallyReturn)
{
assert(step->bbJumpKind == BBJ_ALWAYS);
// Mark the target of a finally return
step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
}
#endif // defined(TARGET_ARM)
/* The new block will inherit this block's weight */
exitBlock->setBBWeight(block->bbWeight);
exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
/* This exit block is the new step */
step = exitBlock;
stepType = ST_Catch;
invalidatePreds = true;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block " FMT_BB "\n",
XTnum, exitBlock->bbNum);
}
#endif
}
}
else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
!jitIsBetween(jmpAddr, tryBeg, tryEnd))
{
/* We are jumping out of a finally-protected try */
BasicBlock* callBlock;
if (step == nullptr)
{
#if FEATURE_EH_CALLFINALLY_THUNKS
// Put the call to the finally in the enclosing region.
unsigned callFinallyTryIndex =
(HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
unsigned callFinallyHndIndex =
(HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
// Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
// the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
// which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
// next block, and flow optimizations will remove it.
block->bbJumpKind = BBJ_ALWAYS;
block->bbJumpDest = callBlock;
block->bbJumpDest->bbRefs++;
/* The new block will inherit this block's weight */
callBlock->setBBWeight(block->bbWeight);
callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB
" to "
"BBJ_ALWAYS, add BBJ_CALLFINALLY block " FMT_BB "\n",
XTnum, block->bbNum, callBlock->bbNum);
}
#endif
#else // !FEATURE_EH_CALLFINALLY_THUNKS
callBlock = block;
callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB
" to "
"BBJ_CALLFINALLY block\n",
XTnum, callBlock->bbNum);
}
#endif
#endif // !FEATURE_EH_CALLFINALLY_THUNKS
}
else
{
// Calling the finally block. We already have a step block that is either the call-to-finally from a
// more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
// a 'finally'), or the step block is the return from a catch.
//
// Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
// directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
// automatically re-raise the exception, using the return address of the catch (that is, the target
// block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
// refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
// we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
// finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
// BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
// within the 'try' region protected by the finally, since we generate code in such a way that execution
// never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
// stack walks.)
assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
#if FEATURE_EH_CALLFINALLY_THUNKS
if (step->bbJumpKind == BBJ_EHCATCHRET)
{
// Need to create another step block in the 'try' region that will actually branch to the
// call-to-finally thunk.
BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
step->bbJumpDest = step2;
step->bbJumpDest->bbRefs++;
step2->setBBWeight(block->bbWeight);
step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
"BBJ_EHCATCHRET (" FMT_BB "), new BBJ_ALWAYS step-step block " FMT_BB "\n",
XTnum, step->bbNum, step2->bbNum);
}
#endif
step = step2;
assert(stepType == ST_Catch); // Leave it as catch type for now.
}
#endif // FEATURE_EH_CALLFINALLY_THUNKS
#if FEATURE_EH_CALLFINALLY_THUNKS
unsigned callFinallyTryIndex =
(HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
unsigned callFinallyHndIndex =
(HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
#else // !FEATURE_EH_CALLFINALLY_THUNKS
unsigned callFinallyTryIndex = XTnum + 1;
unsigned callFinallyHndIndex = 0; // don't care
#endif // !FEATURE_EH_CALLFINALLY_THUNKS
callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
// finally in the chain)
step->bbJumpDest->bbRefs++;
#if defined(TARGET_ARM)
if (stepType == ST_FinallyReturn)
{
assert(step->bbJumpKind == BBJ_ALWAYS);
// Mark the target of a finally return
step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
}
#endif // defined(TARGET_ARM)
/* The new block will inherit this block's weight */
callBlock->setBBWeight(block->bbWeight);
callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY "
"block " FMT_BB "\n",
XTnum, callBlock->bbNum);
}
#endif
}
step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
stepType = ST_FinallyReturn;
/* The new block will inherit this block's weight */
step->setBBWeight(block->bbWeight);
step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
"block " FMT_BB "\n",
XTnum, step->bbNum);
}
#endif
callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
invalidatePreds = true;
}
else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
!jitIsBetween(jmpAddr, tryBeg, tryEnd))
{
// We are jumping out of a catch-protected try.
//
// If we are returning from a call to a finally, then we must have a step block within a try
// that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
// finally raises an exception), the VM will find this step block, notice that it is in a protected region,
// and invoke the appropriate catch.
//
// We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
// catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
// and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
// the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
// address of the catch return as the new exception address. That is, the re-raised exception appears to
// occur at the catch return address. If this exception return address skips an enclosing try/catch that
// catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
// For example:
//
// try {
// try {
// // something here raises ThreadAbortException
// LEAVE LABEL_1; // no need to stop at LABEL_2
// } catch (Exception) {
// // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
// // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
// // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
// // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
// // need to do this transformation if the current EH block is a try/catch that catches
// // ThreadAbortException (or one of its parents), however we might not be able to find that
// // information, so currently we do it for all catch types.
// LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
// }
// LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
// } catch (ThreadAbortException) {
// }
// LABEL_1:
//
// Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
// compiler.
if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
{
BasicBlock* catchStep;
assert(step);
if (stepType == ST_FinallyReturn)
{
assert(step->bbJumpKind == BBJ_ALWAYS);
}
else
{
assert(stepType == ST_Catch);
assert(step->bbJumpKind == BBJ_EHCATCHRET);
}
/* Create a new exit block in the try region for the existing step block to jump to in this scope */
catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
step->bbJumpDest = catchStep;
step->bbJumpDest->bbRefs++;
#if defined(TARGET_ARM)
if (stepType == ST_FinallyReturn)
{
// Mark the target of a finally return
step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
}
#endif // defined(TARGET_ARM)
/* The new block will inherit this block's weight */
catchStep->setBBWeight(block->bbWeight);
catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
#ifdef DEBUG
if (verbose)
{
if (stepType == ST_FinallyReturn)
{
printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
"BBJ_ALWAYS block " FMT_BB "\n",
XTnum, catchStep->bbNum);
}
else
{
assert(stepType == ST_Catch);
printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
"BBJ_ALWAYS block " FMT_BB "\n",
XTnum, catchStep->bbNum);
}
}
#endif // DEBUG
/* This block is the new step */
step = catchStep;
stepType = ST_Try;
invalidatePreds = true;
}
}
}
if (step == nullptr)
{
block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
"block " FMT_BB " to BBJ_ALWAYS\n",
block->bbNum);
}
#endif
}
else
{
step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
#if defined(TARGET_ARM)
if (stepType == ST_FinallyReturn)
{
assert(step->bbJumpKind == BBJ_ALWAYS);
// Mark the target of a finally return
step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
}
#endif // defined(TARGET_ARM)
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - final destination of step blocks set to " FMT_BB "\n", leaveTarget->bbNum);
}
#endif
// Queue up the jump target for importing
impImportBlockPending(leaveTarget);
}
if (invalidatePreds && fgComputePredsDone)
{
JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
fgRemovePreds();
}
#ifdef DEBUG
fgVerifyHandlerTab();
if (verbose)
{
printf("\nAfter import CEE_LEAVE:\n");
fgDispBasicBlocks();
fgDispHandlerTab();
}
#endif // DEBUG
}
#endif // FEATURE_EH_FUNCLETS
/*****************************************************************************/
// This is called when reimporting a leave block. It resets the JumpKind,
// JumpDest, and bbNext to the original values
void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
{
#if defined(FEATURE_EH_FUNCLETS)
// With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
// and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0,
// it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
// create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
// only predecessor are also considered orphans and attempted to be deleted.
//
// try {
// ....
// try
// {
// ....
// leave OUTSIDE; // B0 is the block containing this leave, following this would be B1
// } finally { }
// } finally { }
// OUTSIDE:
//
// In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
// where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block.
// Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To
// work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
// only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
// will be treated as pair and handled correctly.
if (block->bbJumpKind == BBJ_CALLFINALLY)
{
BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
dupBlock->bbFlags = block->bbFlags;
dupBlock->bbJumpDest = block->bbJumpDest;
dupBlock->copyEHRegion(block);
dupBlock->bbCatchTyp = block->bbCatchTyp;
// Mark this block as
// a) not referenced by any other block to make sure that it gets deleted
// b) weight zero
// c) prevent from being imported
// d) as internal
// e) as rarely run
dupBlock->bbRefs = 0;
dupBlock->bbWeight = 0;
dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
// Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
// will be next to each other.
fgInsertBBafter(block, dupBlock);
#ifdef DEBUG
if (verbose)
{
printf("New Basic Block " FMT_BB " duplicate of " FMT_BB " created.\n", dupBlock->bbNum, block->bbNum);
}
#endif
}
#endif // FEATURE_EH_FUNCLETS
block->bbJumpKind = BBJ_LEAVE;
fgInitBBLookup();
block->bbJumpDest = fgLookupBB(jmpAddr);
// We will leave the BBJ_ALWAYS block we introduced. When it's reimported
// the BBJ_ALWAYS block will be unreachable, and will be removed after. The
// reason we don't want to remove the block at this point is that if we call
// fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
// added and the linked list length will be different than fgBBcount.
}
/*****************************************************************************/
// Get the first non-prefix opcode. Used for verification of valid combinations
// of prefixes and actual opcodes.
static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
{
while (codeAddr < codeEndp)
{
OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
codeAddr += sizeof(__int8);
if (opcode == CEE_PREFIX1)
{
if (codeAddr >= codeEndp)
{
break;
}
opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
codeAddr += sizeof(__int8);
}
switch (opcode)
{
case CEE_UNALIGNED:
case CEE_VOLATILE:
case CEE_TAILCALL:
case CEE_CONSTRAINED:
case CEE_READONLY:
break;
default:
return opcode;
}
codeAddr += opcodeSizes[opcode];
}
return CEE_ILLEGAL;
}
/*****************************************************************************/
// Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
{
OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
if (!(
// Opcode of all ldind and stdind happen to be in continuous, except stind.i.
((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
(opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
(opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
// volatile. prefix is allowed with the ldsfld and stsfld
(volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
{
BADCODE("Invalid opcode for unaligned. or volatile. prefix");
}
}
/*****************************************************************************/
#ifdef DEBUG
#undef RETURN // undef contracts RETURN macro
enum controlFlow_t
{
NEXT,
CALL,
RETURN,
THROW,
BRANCH,
COND_BRANCH,
BREAK,
PHI,
META,
};
const static controlFlow_t controlFlow[] = {
#define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
#include "opcode.def"
#undef OPDEF
};
#endif // DEBUG
/*****************************************************************************
* Determine the result type of an arithemetic operation
* On 64-bit inserts upcasts when native int is mixed with int32
*/
var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2)
{
var_types type = TYP_UNDEF;
GenTree* op1 = *pOp1;
GenTree* op2 = *pOp2;
// Arithemetic operations are generally only allowed with
// primitive types, but certain operations are allowed
// with byrefs
if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
{
if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
{
// byref1-byref2 => gives a native int
type = TYP_I_IMPL;
}
else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
{
// [native] int - byref => gives a native int
//
// The reason is that it is possible, in managed C++,
// to have a tree like this:
//
// -
// / \.
// / \.
// / \.
// / \.
// const(h) int addr byref
//
// <BUGNUM> VSW 318822 </BUGNUM>
//
// So here we decide to make the resulting type to be a native int.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // TARGET_64BIT
type = TYP_I_IMPL;
}
else
{
// byref - [native] int => gives a byref
assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
#ifdef TARGET_64BIT
if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
{
// insert an explicit upcast
op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // TARGET_64BIT
type = TYP_BYREF;
}
}
else if ((oper == GT_ADD) &&
(genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
{
// byref + [native] int => gives a byref
// (or)
// [native] int + byref => gives a byref
// only one can be a byref : byref op byref not allowed
assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
#ifdef TARGET_64BIT
if (genActualType(op2->TypeGet()) == TYP_BYREF)
{
if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
}
else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // TARGET_64BIT
type = TYP_BYREF;
}
#ifdef TARGET_64BIT
else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
{
assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
// int + long => gives long
// long + int => gives long
// we get this because in the IL the long isn't Int64, it's just IntPtr
if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
type = TYP_I_IMPL;
}
#else // 32-bit TARGET
else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
{
assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
// int + long => gives long
// long + int => gives long
type = TYP_LONG;
}
#endif // TARGET_64BIT
else
{
// int + int => gives an int
assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
(varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)));
type = genActualType(op1->gtType);
// If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
// Otherwise, turn floats into doubles
if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
{
assert(genActualType(op2->gtType) == TYP_DOUBLE);
type = TYP_DOUBLE;
}
}
assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
return type;
}
//------------------------------------------------------------------------
// impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting
//
// Arguments:
// op1 - value to cast
// pResolvedToken - resolved token for type to cast to
// isCastClass - true if this is a castclass, false if isinst
//
// Return Value:
// tree representing optimized cast, or null if no optimization possible
GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass)
{
assert(op1->TypeGet() == TYP_REF);
// Don't optimize for minopts or debug codegen.
if (opts.OptimizationDisabled())
{
return nullptr;
}
// See what we know about the type of the object being cast.
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull);
GenTree* optResult = nullptr;
if (fromClass != nullptr)
{
CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass;
JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst",
isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass),
info.compCompHnd->getClassName(toClass));
// Perhaps we know if the cast will succeed or fail.
TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass);
if (castResult == TypeCompareState::Must)
{
// Cast will succeed, result is simply op1.
JITDUMP("Cast will succeed, optimizing to simply return input\n");
return op1;
}
else if (castResult == TypeCompareState::MustNot)
{
// See if we can sharpen exactness by looking for final classes
if (!isExact)
{
isExact = impIsClassExact(fromClass);
}
// Cast to exact type will fail. Handle case where we have
// an exact type (that is, fromClass is not a subtype)
// and we're not going to throw on failure.
if (isExact && !isCastClass)
{
JITDUMP("Cast will fail, optimizing to return null\n");
GenTree* result = gtNewIconNode(0, TYP_REF);
// If the cast was fed by a box, we can remove that too.
if (op1->IsBoxedValue())
{
JITDUMP("Also removing upstream box\n");
gtTryRemoveBoxUpstreamEffects(op1);
}
return result;
}
else if (isExact)
{
JITDUMP("Not optimizing failing castclass (yet)\n");
}
else
{
JITDUMP("Can't optimize since fromClass is inexact\n");
}
}
else
{
JITDUMP("Result of cast unknown, must generate runtime test\n");
}
}
else
{
JITDUMP("\nCan't optimize since fromClass is unknown\n");
}
return nullptr;
}
//------------------------------------------------------------------------
// impCastClassOrIsInstToTree: build and import castclass/isinst
//
// Arguments:
// op1 - value to cast
// op2 - type handle for type to cast to
// pResolvedToken - resolved token from the cast operation
// isCastClass - true if this is castclass, false means isinst
//
// Return Value:
// Tree representing the cast
//
// Notes:
// May expand into a series of runtime checks or a helper call.
GenTree* Compiler::impCastClassOrIsInstToTree(GenTree* op1,
GenTree* op2,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
bool isCastClass)
{
assert(op1->TypeGet() == TYP_REF);
// Optimistically assume the jit should expand this as an inline test
bool shouldExpandInline = true;
// Profitability check.
//
// Don't bother with inline expansion when jit is trying to
// generate code quickly, or the cast is in code that won't run very
// often, or the method already is pretty big.
if (compCurBB->isRunRarely() || opts.OptimizationDisabled())
{
// not worth the code expansion if jitting fast or in a rarely run block
shouldExpandInline = false;
}
else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
{
// not worth creating an untracked local variable
shouldExpandInline = false;
}
// Pessimistically assume the jit cannot expand this as an inline test
bool canExpandInline = false;
const CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
// Legality check.
//
// Not all classclass/isinst operations can be inline expanded.
// Check legality only if an inline expansion is desirable.
if (shouldExpandInline)
{
if (isCastClass)
{
// Jit can only inline expand the normal CHKCASTCLASS helper.
canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
}
else
{
if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
{
// If the class is exact, the jit can expand the IsInst check inline.
canExpandInline = impIsClassExact(pResolvedToken->hClass);
}
}
}
const bool expandInline = canExpandInline && shouldExpandInline;
if (!expandInline)
{
JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst",
canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
// If we CSE this class handle we prevent assertionProp from making SubType assertions
// so instead we force the CSE logic to not consider CSE-ing this class handle.
//
op2->gtFlags |= GTF_DONT_CSE;
return gtNewHelperCallNode(helper, TYP_REF, gtNewCallArgs(op2, op1));
}
JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst");
impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
GenTree* temp;
GenTree* condMT;
//
// expand the methodtable match:
//
// condMT ==> GT_NE
// / \.
// GT_IND op2 (typically CNS_INT)
// |
// op1Copy
//
// This can replace op1 with a GT_COMMA that evaluates op1 into a local
//
op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
//
// op1 is now known to be a non-complex tree
// thus we can use gtClone(op1) from now on
//
GenTree* op2Var = op2;
if (isCastClass)
{
op2Var = fgInsertCommaFormTemp(&op2);
lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
}
temp = gtNewMethodTableLookup(temp);
condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
GenTree* condNull;
//
// expand the null check:
//
// condNull ==> GT_EQ
// / \.
// op1Copy CNS_INT
// null
//
condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
//
// expand the true and false trees for the condMT
//
GenTree* condFalse = gtClone(op1);
GenTree* condTrue;
if (isCastClass)
{
//
// use the special helper that skips the cases checked by our inlined cast
//
const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewCallArgs(op2Var, gtClone(op1)));
}
else
{
condTrue = gtNewIconNode(0, TYP_REF);
}
#define USE_QMARK_TREES
#ifdef USE_QMARK_TREES
GenTree* qmarkMT;
//
// Generate first QMARK - COLON tree
//
// qmarkMT ==> GT_QMARK
// / \.
// condMT GT_COLON
// / \.
// condFalse condTrue
//
temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
GenTree* qmarkNull;
//
// Generate second QMARK - COLON tree
//
// qmarkNull ==> GT_QMARK
// / \.
// condNull GT_COLON
// / \.
// qmarkMT op1Copy
//
temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
// Make QMark node a top level node by spilling it.
unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
// TODO-CQ: Is it possible op1 has a better type?
//
// See also gtGetHelperCallClassHandle where we make the same
// determination for the helper call variants.
LclVarDsc* lclDsc = lvaGetDesc(tmp);
assert(lclDsc->lvSingleDef == 0);
lclDsc->lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def temp\n", tmp);
lvaSetClass(tmp, pResolvedToken->hClass);
return gtNewLclvNode(tmp, TYP_REF);
#endif
}
#ifndef DEBUG
#define assertImp(cond) ((void)0)
#else
#define assertImp(cond) \
do \
{ \
if (!(cond)) \
{ \
const int cchAssertImpBuf = 600; \
char* assertImpBuf = (char*)alloca(cchAssertImpBuf); \
_snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \
"%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \
impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \
op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \
assertAbort(assertImpBuf, __FILE__, __LINE__); \
} \
} while (0)
#endif // DEBUG
//------------------------------------------------------------------------
// impBlockIsInALoop: check if a block might be in a loop
//
// Arguments:
// block - block to check
//
// Returns:
// true if the block might be in a loop.
//
// Notes:
// Conservatively correct; may return true for some blocks that are
// not actually in loops.
//
bool Compiler::impBlockIsInALoop(BasicBlock* block)
{
return (compIsForInlining() && ((impInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) != 0)) ||
((block->bbFlags & BBF_BACKWARD_JUMP) != 0);
}
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
/*****************************************************************************
* Import the instr for the given basic block
*/
void Compiler::impImportBlockCode(BasicBlock* block)
{
#define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
#ifdef DEBUG
if (verbose)
{
printf("\nImporting " FMT_BB " (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
}
#endif
unsigned nxtStmtIndex = impInitBlockLineInfo();
IL_OFFSET nxtStmtOffs;
CorInfoHelpFunc helper;
CorInfoIsAccessAllowedResult accessAllowedResult;
CORINFO_HELPER_DESC calloutHelper;
const BYTE* lastLoadToken = nullptr;
// reject cyclic constraints
if (tiVerificationNeeded)
{
Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
}
/* Get the tree list started */
impBeginTreeList();
#ifdef FEATURE_ON_STACK_REPLACEMENT
// Are there any places in the method where we might add a patchpoint?
if (compHasBackwardJump)
{
// Are patchpoints enabled?
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0))
{
// We don't inline at Tier0, if we do, we may need rethink our approach.
// Could probably support inlines that don't introduce flow.
assert(!compIsForInlining());
// Is the start of this block a suitable patchpoint?
// Current strategy is blocks that are stack-empty and backwards branch targets
if (block->bbFlags & BBF_BACKWARD_JUMP_TARGET && (verCurrentState.esStackDepth == 0))
{
block->bbFlags |= BBF_PATCHPOINT;
setMethodHasPatchpoint();
}
}
}
else
{
// Should not see backward branch targets w/o backwards branches
assert((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == 0);
}
#endif // FEATURE_ON_STACK_REPLACEMENT
/* Walk the opcodes that comprise the basic block */
const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
IL_OFFSET opcodeOffs = block->bbCodeOffs;
IL_OFFSET lastSpillOffs = opcodeOffs;
signed jmpDist;
/* remember the start of the delegate creation sequence (used for verification) */
const BYTE* delegateCreateStart = nullptr;
int prefixFlags = 0;
bool explicitTailCall, constraintCall, readonlyCall;
typeInfo tiRetVal;
unsigned numArgs = info.compArgsCount;
/* Now process all the opcodes in the block */
var_types callTyp = TYP_COUNT;
OPCODE prevOpcode = CEE_ILLEGAL;
if (block->bbCatchTyp)
{
if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
{
impCurStmtOffsSet(block->bbCodeOffs);
}
// We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
// to a temp. This is a trade off for code simplicity
impSpillSpecialSideEff();
}
while (codeAddr < codeEndp)
{
bool usingReadyToRunHelper = false;
CORINFO_RESOLVED_TOKEN resolvedToken;
CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
CORINFO_CALL_INFO callInfo;
CORINFO_FIELD_INFO fieldInfo;
tiRetVal = typeInfo(); // Default type info
//---------------------------------------------------------------------
/* We need to restrict the max tree depth as many of the Compiler
functions are recursive. We do this by spilling the stack */
if (verCurrentState.esStackDepth)
{
/* Has it been a while since we last saw a non-empty stack (which
guarantees that the tree depth isnt accumulating. */
if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode))
{
impSpillStackEnsure();
lastSpillOffs = opcodeOffs;
}
}
else
{
lastSpillOffs = opcodeOffs;
impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
}
/* Compute the current instr offset */
opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
#ifndef DEBUG
if (opts.compDbgInfo)
#endif
{
if (!compIsForInlining())
{
nxtStmtOffs =
(nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
/* Have we reached the next stmt boundary ? */
if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
{
assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
{
/* We need to provide accurate IP-mapping at this point.
So spill anything on the stack so that it will form
gtStmts with the correct stmt offset noted */
impSpillStackEnsure(true);
}
// Has impCurStmtOffs been reported in any tree?
if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
{
GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
assert(impCurStmtOffs == BAD_IL_OFFSET);
}
if (impCurStmtOffs == BAD_IL_OFFSET)
{
/* Make sure that nxtStmtIndex is in sync with opcodeOffs.
If opcodeOffs has gone past nxtStmtIndex, catch up */
while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
{
nxtStmtIndex++;
}
/* Go to the new stmt */
impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
/* Update the stmt boundary index */
nxtStmtIndex++;
assert(nxtStmtIndex <= info.compStmtOffsetsCount);
/* Are there any more line# entries after this one? */
if (nxtStmtIndex < info.compStmtOffsetsCount)
{
/* Remember where the next line# starts */
nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
}
else
{
/* No more line# entries */
nxtStmtOffs = BAD_IL_OFFSET;
}
}
}
else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
(verCurrentState.esStackDepth == 0))
{
/* At stack-empty locations, we have already added the tree to
the stmt list with the last offset. We just need to update
impCurStmtOffs
*/
impCurStmtOffsSet(opcodeOffs);
}
else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
impOpcodeIsCallSiteBoundary(prevOpcode))
{
/* Make sure we have a type cached */
assert(callTyp != TYP_COUNT);
if (callTyp == TYP_VOID)
{
impCurStmtOffsSet(opcodeOffs);
}
else if (opts.compDbgCode)
{
impSpillStackEnsure(true);
impCurStmtOffsSet(opcodeOffs);
}
}
else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
{
if (opts.compDbgCode)
{
impSpillStackEnsure(true);
}
impCurStmtOffsSet(opcodeOffs);
}
assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
}
}
CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL);
CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
var_types lclTyp, ovflType = TYP_UNKNOWN;
GenTree* op1 = DUMMY_INIT(NULL);
GenTree* op2 = DUMMY_INIT(NULL);
GenTree* newObjThisPtr = DUMMY_INIT(NULL);
bool uns = DUMMY_INIT(false);
bool isLocal = false;
/* Get the next opcode and the size of its parameters */
OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
codeAddr += sizeof(__int8);
#ifdef DEBUG
impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
#endif
DECODE_OPCODE:
// Return if any previous code has caused inline to fail.
if (compDonotInline())
{
return;
}
/* Get the size of additional parameters */
signed int sz = opcodeSizes[opcode];
#ifdef DEBUG
clsHnd = NO_CLASS_HANDLE;
lclTyp = TYP_COUNT;
callTyp = TYP_COUNT;
impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
impCurOpcName = opcodeNames[opcode];
if (verbose && (opcode != CEE_PREFIX1))
{
printf("%s", impCurOpcName);
}
/* Use assertImp() to display the opcode */
op1 = op2 = nullptr;
#endif
/* See what kind of an opcode we have, then */
unsigned mflags = 0;
unsigned clsFlags = 0;
switch (opcode)
{
unsigned lclNum;
var_types type;
GenTree* op3;
genTreeOps oper;
unsigned size;
int val;
CORINFO_SIG_INFO sig;
IL_OFFSET jmpAddr;
bool ovfl, unordered, callNode;
bool ldstruct;
CORINFO_CLASS_HANDLE tokenType;
union {
int intVal;
float fltVal;
__int64 lngVal;
double dblVal;
} cval;
case CEE_PREFIX1:
opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
codeAddr += sizeof(__int8);
goto DECODE_OPCODE;
SPILL_APPEND:
// We need to call impSpillLclRefs() for a struct type lclVar.
// This is because there may be loads of that lclVar on the evaluation stack, and
// we need to ensure that those loads are completed before we modify it.
if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtGetOp1()))
{
GenTree* lhs = op1->gtGetOp1();
GenTreeLclVarCommon* lclVar = nullptr;
if (lhs->gtOper == GT_LCL_VAR)
{
lclVar = lhs->AsLclVarCommon();
}
else if (lhs->OperIsBlk())
{
// Check if LHS address is within some struct local, to catch
// cases where we're updating the struct by something other than a stfld
GenTree* addr = lhs->AsBlk()->Addr();
// Catches ADDR(LCL_VAR), or ADD(ADDR(LCL_VAR),CNS_INT))
lclVar = addr->IsLocalAddrExpr();
// Catches ADDR(FIELD(... ADDR(LCL_VAR)))
if (lclVar == nullptr)
{
GenTree* lclTree = nullptr;
if (impIsAddressInLocal(addr, &lclTree))
{
lclVar = lclTree->AsLclVarCommon();
}
}
}
if (lclVar != nullptr)
{
impSpillLclRefs(lclVar->GetLclNum());
}
}
/* Append 'op1' to the list of statements */
impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
goto DONE_APPEND;
APPEND:
/* Append 'op1' to the list of statements */
impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
goto DONE_APPEND;
DONE_APPEND:
#ifdef DEBUG
// Remember at which BC offset the tree was finished
impNoteLastILoffs();
#endif
break;
case CEE_LDNULL:
impPushNullObjRefOnStack();
break;
case CEE_LDC_I4_M1:
case CEE_LDC_I4_0:
case CEE_LDC_I4_1:
case CEE_LDC_I4_2:
case CEE_LDC_I4_3:
case CEE_LDC_I4_4:
case CEE_LDC_I4_5:
case CEE_LDC_I4_6:
case CEE_LDC_I4_7:
case CEE_LDC_I4_8:
cval.intVal = (opcode - CEE_LDC_I4_0);
assert(-1 <= cval.intVal && cval.intVal <= 8);
goto PUSH_I4CON;
case CEE_LDC_I4_S:
cval.intVal = getI1LittleEndian(codeAddr);
goto PUSH_I4CON;
case CEE_LDC_I4:
cval.intVal = getI4LittleEndian(codeAddr);
goto PUSH_I4CON;
PUSH_I4CON:
JITDUMP(" %d", cval.intVal);
impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
break;
case CEE_LDC_I8:
cval.lngVal = getI8LittleEndian(codeAddr);
JITDUMP(" 0x%016llx", cval.lngVal);
impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
break;
case CEE_LDC_R8:
cval.dblVal = getR8LittleEndian(codeAddr);
JITDUMP(" %#.17g", cval.dblVal);
impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
break;
case CEE_LDC_R4:
cval.dblVal = getR4LittleEndian(codeAddr);
JITDUMP(" %#.17g", cval.dblVal);
{
GenTree* cnsOp = gtNewDconNode(cval.dblVal);
cnsOp->gtType = TYP_FLOAT;
impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
}
break;
case CEE_LDSTR:
if (compIsForInlining())
{
if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
return;
}
}
val = getU4LittleEndian(codeAddr);
JITDUMP(" %08X", val);
if (tiVerificationNeeded)
{
Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
tiRetVal = typeInfo(TI_REF, impGetStringClass());
}
impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
break;
case CEE_LDARG:
lclNum = getU2LittleEndian(codeAddr);
JITDUMP(" %u", lclNum);
impLoadArg(lclNum, opcodeOffs + sz + 1);
break;
case CEE_LDARG_S:
lclNum = getU1LittleEndian(codeAddr);
JITDUMP(" %u", lclNum);
impLoadArg(lclNum, opcodeOffs + sz + 1);
break;
case CEE_LDARG_0:
case CEE_LDARG_1:
case CEE_LDARG_2:
case CEE_LDARG_3:
lclNum = (opcode - CEE_LDARG_0);
assert(lclNum >= 0 && lclNum < 4);
impLoadArg(lclNum, opcodeOffs + sz + 1);
break;
case CEE_LDLOC:
lclNum = getU2LittleEndian(codeAddr);
JITDUMP(" %u", lclNum);
impLoadLoc(lclNum, opcodeOffs + sz + 1);
break;
case CEE_LDLOC_S:
lclNum = getU1LittleEndian(codeAddr);
JITDUMP(" %u", lclNum);
impLoadLoc(lclNum, opcodeOffs + sz + 1);
break;
case CEE_LDLOC_0:
case CEE_LDLOC_1:
case CEE_LDLOC_2:
case CEE_LDLOC_3:
lclNum = (opcode - CEE_LDLOC_0);
assert(lclNum >= 0 && lclNum < 4);
impLoadLoc(lclNum, opcodeOffs + sz + 1);
break;
case CEE_STARG:
lclNum = getU2LittleEndian(codeAddr);
goto STARG;
case CEE_STARG_S:
lclNum = getU1LittleEndian(codeAddr);
STARG:
JITDUMP(" %u", lclNum);
if (tiVerificationNeeded)
{
Verify(lclNum < info.compILargsCount, "bad arg num");
}
if (compIsForInlining())
{
op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
noway_assert(op1->gtOper == GT_LCL_VAR);
lclNum = op1->AsLclVar()->GetLclNum();
goto VAR_ST_VALID;
}
lclNum = compMapILargNum(lclNum); // account for possible hidden param
assertImp(lclNum < numArgs);
if (lclNum == info.compThisArg)
{
lclNum = lvaArg0Var;
}
// We should have seen this arg write in the prescan
assert(lvaTable[lclNum].lvHasILStoreOp);
if (tiVerificationNeeded)
{
typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
"type mismatch");
if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
{
Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
}
}
goto VAR_ST;
case CEE_STLOC:
lclNum = getU2LittleEndian(codeAddr);
isLocal = true;
JITDUMP(" %u", lclNum);
goto LOC_ST;
case CEE_STLOC_S:
lclNum = getU1LittleEndian(codeAddr);
isLocal = true;
JITDUMP(" %u", lclNum);
goto LOC_ST;
case CEE_STLOC_0:
case CEE_STLOC_1:
case CEE_STLOC_2:
case CEE_STLOC_3:
isLocal = true;
lclNum = (opcode - CEE_STLOC_0);
assert(lclNum >= 0 && lclNum < 4);
LOC_ST:
if (tiVerificationNeeded)
{
Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
Verify(tiCompatibleWith(impStackTop().seTypeInfo,
NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
"type mismatch");
}
if (compIsForInlining())
{
lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
/* Have we allocated a temp for this local? */
lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
goto _PopValue;
}
lclNum += numArgs;
VAR_ST:
if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
{
assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
BADCODE("Bad IL");
}
VAR_ST_VALID:
/* if it is a struct assignment, make certain we don't overflow the buffer */
assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
if (lvaTable[lclNum].lvNormalizeOnLoad())
{
lclTyp = lvaGetRealType(lclNum);
}
else
{
lclTyp = lvaGetActualType(lclNum);
}
_PopValue:
/* Pop the value being assigned */
{
StackEntry se = impPopStack();
clsHnd = se.seTypeInfo.GetClassHandle();
op1 = se.val;
tiRetVal = se.seTypeInfo;
}
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
{
assert(op1->TypeGet() == TYP_STRUCT);
op1->gtType = lclTyp;
}
#endif // FEATURE_SIMD
op1 = impImplicitIorI4Cast(op1, lclTyp);
#ifdef TARGET_64BIT
// Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
{
assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT);
}
#endif // TARGET_64BIT
// We had better assign it a value of the correct type
assertImp(
genActualType(lclTyp) == genActualType(op1->gtType) ||
(genActualType(lclTyp) == TYP_I_IMPL && op1->IsLocalAddrExpr() != nullptr) ||
(genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
(genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
(varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
/* If op1 is "&var" then its type is the transient "*" and it can
be used either as TYP_BYREF or TYP_I_IMPL */
if (op1->IsLocalAddrExpr() != nullptr)
{
assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
/* When "&var" is created, we assume it is a byref. If it is
being assigned to a TYP_I_IMPL var, change the type to
prevent unnecessary GC info */
if (genActualType(lclTyp) == TYP_I_IMPL)
{
op1->gtType = TYP_I_IMPL;
}
}
// If this is a local and the local is a ref type, see
// if we can improve type information based on the
// value being assigned.
if (isLocal && (lclTyp == TYP_REF))
{
// We should have seen a stloc in our IL prescan.
assert(lvaTable[lclNum].lvHasILStoreOp);
// Is there just one place this local is defined?
const bool isSingleDefLocal = lvaTable[lclNum].lvSingleDef;
// Conservative check that there is just one
// definition that reaches this store.
const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0);
if (isSingleDefLocal && hasSingleReachingDef)
{
lvaUpdateClass(lclNum, op1, clsHnd);
}
}
/* Filter out simple assignments to itself */
if (op1->gtOper == GT_LCL_VAR && lclNum == op1->AsLclVarCommon()->GetLclNum())
{
if (opts.compDbgCode)
{
op1 = gtNewNothingNode();
goto SPILL_APPEND;
}
else
{
break;
}
}
/* Create the assignment node */
op2 = gtNewLclvNode(lclNum, lclTyp DEBUGARG(opcodeOffs + sz + 1));
/* If the local is aliased or pinned, we need to spill calls and
indirections from the stack. */
if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) &&
(verCurrentState.esStackDepth > 0))
{
impSpillSideEffects(false,
(unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned"));
}
/* Spill any refs to the local from the stack */
impSpillLclRefs(lclNum);
// We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
// We insert a cast to the dest 'op2' type
//
if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
varTypeIsFloating(op2->gtType))
{
op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet());
}
if (varTypeIsStruct(lclTyp))
{
op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
}
else
{
// The code generator generates GC tracking information
// based on the RHS of the assignment. Later the LHS (which is
// is a BYREF) gets used and the emitter checks that that variable
// is being tracked. It is not (since the RHS was an int and did
// not need tracking). To keep this assert happy, we change the RHS
if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
{
op1->gtType = TYP_BYREF;
}
op1 = gtNewAssignNode(op2, op1);
}
goto SPILL_APPEND;
case CEE_LDLOCA:
lclNum = getU2LittleEndian(codeAddr);
goto LDLOCA;
case CEE_LDLOCA_S:
lclNum = getU1LittleEndian(codeAddr);
LDLOCA:
JITDUMP(" %u", lclNum);
if (tiVerificationNeeded)
{
Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
Verify(info.compInitMem, "initLocals not set");
}
if (compIsForInlining())
{
// Get the local type
lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
/* Have we allocated a temp for this local? */
lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
goto _PUSH_ADRVAR;
}
lclNum += numArgs;
assertImp(lclNum < info.compLocalsCount);
goto ADRVAR;
case CEE_LDARGA:
lclNum = getU2LittleEndian(codeAddr);
goto LDARGA;
case CEE_LDARGA_S:
lclNum = getU1LittleEndian(codeAddr);
LDARGA:
JITDUMP(" %u", lclNum);
Verify(lclNum < info.compILargsCount, "bad arg num");
if (compIsForInlining())
{
// In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
// followed by a ldfld to load the field.
op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
if (op1->gtOper != GT_LCL_VAR)
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
return;
}
assert(op1->gtOper == GT_LCL_VAR);
goto _PUSH_ADRVAR;
}
lclNum = compMapILargNum(lclNum); // account for possible hidden param
assertImp(lclNum < numArgs);
if (lclNum == info.compThisArg)
{
lclNum = lvaArg0Var;
}
goto ADRVAR;
ADRVAR:
op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum) DEBUGARG(opcodeOffs + sz + 1));
_PUSH_ADRVAR:
assert(op1->gtOper == GT_LCL_VAR);
/* Note that this is supposed to create the transient type "*"
which may be used as a TYP_I_IMPL. However we catch places
where it is used as a TYP_I_IMPL and change the node if needed.
Thus we are pessimistic and may report byrefs in the GC info
where it was not absolutely needed, but it is safer this way.
*/
op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
// &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
assert((op1->gtFlags & GTF_GLOB_REF) == 0);
tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
if (tiVerificationNeeded)
{
// Don't allow taking address of uninit this ptr.
if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
{
Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
}
if (!tiRetVal.IsByRef())
{
tiRetVal.MakeByRef();
}
else
{
Verify(false, "byref to byref");
}
}
impPushOnStack(op1, tiRetVal);
break;
case CEE_ARGLIST:
if (!info.compIsVarArgs)
{
BADCODE("arglist in non-vararg method");
}
if (tiVerificationNeeded)
{
tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
}
assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
/* The ARGLIST cookie is a hidden 'last' parameter, we have already
adjusted the arg count cos this is like fetching the last param */
assertImp(0 < numArgs);
assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
lclNum = lvaVarargsHandleArg;
op1 = gtNewLclvNode(lclNum, TYP_I_IMPL DEBUGARG(opcodeOffs + sz + 1));
op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
impPushOnStack(op1, tiRetVal);
break;
case CEE_ENDFINALLY:
if (compIsForInlining())
{
assert(!"Shouldn't have exception handlers in the inliner!");
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
return;
}
if (verCurrentState.esStackDepth > 0)
{
impEvalSideEffects();
}
if (info.compXcptnsCount == 0)
{
BADCODE("endfinally outside finally");
}
assert(verCurrentState.esStackDepth == 0);
op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
goto APPEND;
case CEE_ENDFILTER:
if (compIsForInlining())
{
assert(!"Shouldn't have exception handlers in the inliner!");
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
return;
}
block->bbSetRunRarely(); // filters are rare
if (info.compXcptnsCount == 0)
{
BADCODE("endfilter outside filter");
}
if (tiVerificationNeeded)
{
Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
}
op1 = impPopStack().val;
assertImp(op1->gtType == TYP_INT);
if (!bbInFilterILRange(block))
{
BADCODE("EndFilter outside a filter handler");
}
/* Mark current bb as end of filter */
assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
/* Mark catch handler as successor */
op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
if (verCurrentState.esStackDepth != 0)
{
verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
DEBUGARG(__LINE__));
}
goto APPEND;
case CEE_RET:
prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
RET:
if (!impReturnInstruction(prefixFlags, opcode))
{
return; // abort
}
else
{
break;
}
case CEE_JMP:
assert(!compIsForInlining());
if (tiVerificationNeeded)
{
Verify(false, "Invalid opcode: CEE_JMP");
}
if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
{
/* CEE_JMP does not make sense in some "protected" regions. */
BADCODE("Jmp not allowed in protected region");
}
if (opts.IsReversePInvoke())
{
BADCODE("Jmp not allowed in reverse P/Invoke");
}
if (verCurrentState.esStackDepth != 0)
{
BADCODE("Stack must be empty after CEE_JMPs");
}
_impResolveToken(CORINFO_TOKENKIND_Method);
JITDUMP(" %08X", resolvedToken.token);
/* The signature of the target has to be identical to ours.
At least check that argCnt and returnType match */
eeGetMethodSig(resolvedToken.hMethod, &sig);
if (sig.numArgs != info.compMethodInfo->args.numArgs ||
sig.retType != info.compMethodInfo->args.retType ||
sig.callConv != info.compMethodInfo->args.callConv)
{
BADCODE("Incompatible target for CEE_JMPs");
}
op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
/* Mark the basic block as being a JUMP instead of RETURN */
block->bbFlags |= BBF_HAS_JMP;
/* Set this flag to make sure register arguments have a location assigned
* even if we don't use them inside the method */
compJmpOpUsed = true;
fgNoStructPromotion = true;
goto APPEND;
case CEE_LDELEMA:
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
ldelemClsHnd = resolvedToken.hClass;
if (tiVerificationNeeded)
{
typeInfo tiArray = impStackTop(1).seTypeInfo;
typeInfo tiIndex = impStackTop().seTypeInfo;
// As per ECMA 'index' specified can be either int32 or native int.
Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
Verify(tiArray.IsNullObjRef() ||
typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
"bad array");
tiRetVal = arrayElemType;
tiRetVal.MakeByRef();
if (prefixFlags & PREFIX_READONLY)
{
tiRetVal.SetIsReadonlyByRef();
}
// an array interior pointer is always in the heap
tiRetVal.SetIsPermanentHomeByRef();
}
// If it's a value class array we just do a simple address-of
if (eeIsValueClass(ldelemClsHnd))
{
CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
if (cit == CORINFO_TYPE_UNDEF)
{
lclTyp = TYP_STRUCT;
}
else
{
lclTyp = JITtype2varType(cit);
}
goto ARR_LD_POST_VERIFY;
}
// Similarly, if its a readonly access, we can do a simple address-of
// without doing a runtime type-check
if (prefixFlags & PREFIX_READONLY)
{
lclTyp = TYP_REF;
goto ARR_LD_POST_VERIFY;
}
// Otherwise we need the full helper function with run-time type check
op1 = impTokenToHandle(&resolvedToken);
if (op1 == nullptr)
{ // compDonotInline()
return;
}
{
GenTreeCall::Use* args = gtNewCallArgs(op1); // Type
args = gtPrependNewCallArg(impPopStack().val, args); // index
args = gtPrependNewCallArg(impPopStack().val, args); // array
op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args);
}
impPushOnStack(op1, tiRetVal);
break;
// ldelem for reference and value types
case CEE_LDELEM:
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
ldelemClsHnd = resolvedToken.hClass;
if (tiVerificationNeeded)
{
typeInfo tiArray = impStackTop(1).seTypeInfo;
typeInfo tiIndex = impStackTop().seTypeInfo;
// As per ECMA 'index' specified can be either int32 or native int.
Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
tiRetVal = verMakeTypeInfo(ldelemClsHnd);
Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
"type of array incompatible with type operand");
tiRetVal.NormaliseForStack();
}
// If it's a reference type or generic variable type
// then just generate code as though it's a ldelem.ref instruction
if (!eeIsValueClass(ldelemClsHnd))
{
lclTyp = TYP_REF;
opcode = CEE_LDELEM_REF;
}
else
{
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
lclTyp = JITtype2varType(jitTyp);
tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
tiRetVal.NormaliseForStack();
}
goto ARR_LD_POST_VERIFY;
case CEE_LDELEM_I1:
lclTyp = TYP_BYTE;
goto ARR_LD;
case CEE_LDELEM_I2:
lclTyp = TYP_SHORT;
goto ARR_LD;
case CEE_LDELEM_I:
lclTyp = TYP_I_IMPL;
goto ARR_LD;
// Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
// and treating it as TYP_INT avoids other asserts.
case CEE_LDELEM_U4:
lclTyp = TYP_INT;
goto ARR_LD;
case CEE_LDELEM_I4:
lclTyp = TYP_INT;
goto ARR_LD;
case CEE_LDELEM_I8:
lclTyp = TYP_LONG;
goto ARR_LD;
case CEE_LDELEM_REF:
lclTyp = TYP_REF;
goto ARR_LD;
case CEE_LDELEM_R4:
lclTyp = TYP_FLOAT;
goto ARR_LD;
case CEE_LDELEM_R8:
lclTyp = TYP_DOUBLE;
goto ARR_LD;
case CEE_LDELEM_U1:
lclTyp = TYP_UBYTE;
goto ARR_LD;
case CEE_LDELEM_U2:
lclTyp = TYP_USHORT;
goto ARR_LD;
ARR_LD:
if (tiVerificationNeeded)
{
typeInfo tiArray = impStackTop(1).seTypeInfo;
typeInfo tiIndex = impStackTop().seTypeInfo;
// As per ECMA 'index' specified can be either int32 or native int.
Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
if (tiArray.IsNullObjRef())
{
if (lclTyp == TYP_REF)
{ // we will say a deref of a null array yields a null ref
tiRetVal = typeInfo(TI_NULL);
}
else
{
tiRetVal = typeInfo(lclTyp);
}
}
else
{
tiRetVal = verGetArrayElemType(tiArray);
typeInfo arrayElemTi = typeInfo(lclTyp);
#ifdef TARGET_64BIT
if (opcode == CEE_LDELEM_I)
{
arrayElemTi = typeInfo::nativeInt();
}
if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
{
Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
}
else
#endif // TARGET_64BIT
{
Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
}
}
tiRetVal.NormaliseForStack();
}
ARR_LD_POST_VERIFY:
/* Pull the index value and array address */
op2 = impPopStack().val;
op1 = impPopStack().val;
assertImp(op1->gtType == TYP_REF);
/* Check for null pointer - in the inliner case we simply abort */
if (compIsForInlining())
{
if (op1->gtOper == GT_CNS_INT)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
return;
}
}
op1 = impCheckForNullPointer(op1);
/* Mark the block as containing an index expression */
if (op1->gtOper == GT_LCL_VAR)
{
if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
{
block->bbFlags |= BBF_HAS_IDX_LEN;
optMethodFlags |= OMF_HAS_ARRAYREF;
}
}
/* Create the index node and push it on the stack */
op1 = gtNewIndexRef(lclTyp, op1, op2);
ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
if ((opcode == CEE_LDELEMA) || ldstruct ||
(ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
{
assert(ldelemClsHnd != DUMMY_INIT(NULL));
// remember the element size
if (lclTyp == TYP_REF)
{
op1->AsIndex()->gtIndElemSize = TARGET_POINTER_SIZE;
}
else
{
// If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
{
op1->AsIndex()->gtStructElemClass = ldelemClsHnd;
}
assert(lclTyp != TYP_STRUCT || op1->AsIndex()->gtStructElemClass != nullptr);
if (lclTyp == TYP_STRUCT)
{
size = info.compCompHnd->getClassSize(ldelemClsHnd);
op1->AsIndex()->gtIndElemSize = size;
op1->gtType = lclTyp;
}
}
if ((opcode == CEE_LDELEMA) || ldstruct)
{
// wrap it in a &
lclTyp = TYP_BYREF;
op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
}
else
{
assert(lclTyp != TYP_STRUCT);
}
}
if (ldstruct)
{
// Create an OBJ for the result
op1 = gtNewObjNode(ldelemClsHnd, op1);
op1->gtFlags |= GTF_EXCEPT;
}
impPushOnStack(op1, tiRetVal);
break;
// stelem for reference and value types
case CEE_STELEM:
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
stelemClsHnd = resolvedToken.hClass;
if (tiVerificationNeeded)
{
typeInfo tiArray = impStackTop(2).seTypeInfo;
typeInfo tiIndex = impStackTop(1).seTypeInfo;
typeInfo tiValue = impStackTop().seTypeInfo;
// As per ECMA 'index' specified can be either int32 or native int.
Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
"type operand incompatible with array element type");
arrayElem.NormaliseForStack();
Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
}
// If it's a reference type just behave as though it's a stelem.ref instruction
if (!eeIsValueClass(stelemClsHnd))
{
goto STELEM_REF_POST_VERIFY;
}
// Otherwise extract the type
{
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
lclTyp = JITtype2varType(jitTyp);
goto ARR_ST_POST_VERIFY;
}
case CEE_STELEM_REF:
if (tiVerificationNeeded)
{
typeInfo tiArray = impStackTop(2).seTypeInfo;
typeInfo tiIndex = impStackTop(1).seTypeInfo;
typeInfo tiValue = impStackTop().seTypeInfo;
// As per ECMA 'index' specified can be either int32 or native int.
Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
Verify(tiValue.IsObjRef(), "bad value");
// we only check that it is an object referece, The helper does additional checks
Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
}
STELEM_REF_POST_VERIFY:
if (opts.OptimizationEnabled())
{
GenTree* array = impStackTop(2).val;
GenTree* value = impStackTop().val;
// Is this a case where we can skip the covariant store check?
if (impCanSkipCovariantStoreCheck(value, array))
{
lclTyp = TYP_REF;
goto ARR_ST_POST_VERIFY;
}
}
// Else call a helper function to do the assignment
op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopCallArgs(3, nullptr));
goto SPILL_APPEND;
case CEE_STELEM_I1:
lclTyp = TYP_BYTE;
goto ARR_ST;
case CEE_STELEM_I2:
lclTyp = TYP_SHORT;
goto ARR_ST;
case CEE_STELEM_I:
lclTyp = TYP_I_IMPL;
goto ARR_ST;
case CEE_STELEM_I4:
lclTyp = TYP_INT;
goto ARR_ST;
case CEE_STELEM_I8:
lclTyp = TYP_LONG;
goto ARR_ST;
case CEE_STELEM_R4:
lclTyp = TYP_FLOAT;
goto ARR_ST;
case CEE_STELEM_R8:
lclTyp = TYP_DOUBLE;
goto ARR_ST;
ARR_ST:
if (tiVerificationNeeded)
{
typeInfo tiArray = impStackTop(2).seTypeInfo;
typeInfo tiIndex = impStackTop(1).seTypeInfo;
typeInfo tiValue = impStackTop().seTypeInfo;
// As per ECMA 'index' specified can be either int32 or native int.
Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
typeInfo arrayElem = typeInfo(lclTyp);
#ifdef TARGET_64BIT
if (opcode == CEE_STELEM_I)
{
arrayElem = typeInfo::nativeInt();
}
#endif // TARGET_64BIT
Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
"bad array");
Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
"bad value");
}
ARR_ST_POST_VERIFY:
/* The strict order of evaluation is LHS-operands, RHS-operands,
range-check, and then assignment. However, codegen currently
does the range-check before evaluation the RHS-operands. So to
maintain strict ordering, we spill the stack. */
if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
{
impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
"Strict ordering of exceptions for Array store"));
}
/* Pull the new value from the stack */
op2 = impPopStack().val;
/* Pull the index value */
op1 = impPopStack().val;
/* Pull the array address */
op3 = impPopStack().val;
assertImp(op3->gtType == TYP_REF);
if (op2->IsLocalAddrExpr() != nullptr)
{
op2->gtType = TYP_I_IMPL;
}
op3 = impCheckForNullPointer(op3);
// Mark the block as containing an index expression
if (op3->gtOper == GT_LCL_VAR)
{
if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
{
block->bbFlags |= BBF_HAS_IDX_LEN;
optMethodFlags |= OMF_HAS_ARRAYREF;
}
}
/* Create the index node */
op1 = gtNewIndexRef(lclTyp, op3, op1);
/* Create the assignment node and append it */
if (lclTyp == TYP_STRUCT)
{
assert(stelemClsHnd != DUMMY_INIT(NULL));
op1->AsIndex()->gtStructElemClass = stelemClsHnd;
op1->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd);
}
if (varTypeIsStruct(op1))
{
op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
}
else
{
op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
op1 = gtNewAssignNode(op1, op2);
}
/* Mark the expression as containing an assignment */
op1->gtFlags |= GTF_ASG;
goto SPILL_APPEND;
case CEE_ADD:
oper = GT_ADD;
goto MATH_OP2;
case CEE_ADD_OVF:
uns = false;
goto ADD_OVF;
case CEE_ADD_OVF_UN:
uns = true;
goto ADD_OVF;
ADD_OVF:
ovfl = true;
callNode = false;
oper = GT_ADD;
goto MATH_OP2_FLAGS;
case CEE_SUB:
oper = GT_SUB;
goto MATH_OP2;
case CEE_SUB_OVF:
uns = false;
goto SUB_OVF;
case CEE_SUB_OVF_UN:
uns = true;
goto SUB_OVF;
SUB_OVF:
ovfl = true;
callNode = false;
oper = GT_SUB;
goto MATH_OP2_FLAGS;
case CEE_MUL:
oper = GT_MUL;
goto MATH_MAYBE_CALL_NO_OVF;
case CEE_MUL_OVF:
uns = false;
goto MUL_OVF;
case CEE_MUL_OVF_UN:
uns = true;
goto MUL_OVF;
MUL_OVF:
ovfl = true;
oper = GT_MUL;
goto MATH_MAYBE_CALL_OVF;
// Other binary math operations
case CEE_DIV:
oper = GT_DIV;
goto MATH_MAYBE_CALL_NO_OVF;
case CEE_DIV_UN:
oper = GT_UDIV;
goto MATH_MAYBE_CALL_NO_OVF;
case CEE_REM:
oper = GT_MOD;
goto MATH_MAYBE_CALL_NO_OVF;
case CEE_REM_UN:
oper = GT_UMOD;
goto MATH_MAYBE_CALL_NO_OVF;
MATH_MAYBE_CALL_NO_OVF:
ovfl = false;
MATH_MAYBE_CALL_OVF:
// Morpher has some complex logic about when to turn different
// typed nodes on different platforms into helper calls. We
// need to either duplicate that logic here, or just
// pessimistically make all the nodes large enough to become
// call nodes. Since call nodes aren't that much larger and
// these opcodes are infrequent enough I chose the latter.
callNode = true;
goto MATH_OP2_FLAGS;
case CEE_AND:
oper = GT_AND;
goto MATH_OP2;
case CEE_OR:
oper = GT_OR;
goto MATH_OP2;
case CEE_XOR:
oper = GT_XOR;
goto MATH_OP2;
MATH_OP2: // For default values of 'ovfl' and 'callNode'
ovfl = false;
callNode = false;
MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
/* Pull two values and push back the result */
if (tiVerificationNeeded)
{
const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
const typeInfo& tiOp2 = impStackTop().seTypeInfo;
Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
{
Verify(tiOp1.IsNumberType(), "not number");
}
else
{
Verify(tiOp1.IsIntegerType(), "not integer");
}
Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
tiRetVal = tiOp1;
#ifdef TARGET_64BIT
if (tiOp2.IsNativeIntType())
{
tiRetVal = tiOp2;
}
#endif // TARGET_64BIT
}
op2 = impPopStack().val;
op1 = impPopStack().val;
#if !CPU_HAS_FP_SUPPORT
if (varTypeIsFloating(op1->gtType))
{
callNode = true;
}
#endif
/* Can't do arithmetic with references */
assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
// Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
// if it is in the stack)
impBashVarAddrsToI(op1, op2);
type = impGetByRefResultType(oper, uns, &op1, &op2);
assert(!ovfl || !varTypeIsFloating(op1->gtType));
/* Special case: "int+0", "int-0", "int*1", "int/1" */
if (op2->gtOper == GT_CNS_INT)
{
if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
(op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
{
impPushOnStack(op1, tiRetVal);
break;
}
}
// We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
//
if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
{
if (op1->TypeGet() != type)
{
// We insert a cast of op1 to 'type'
op1 = gtNewCastNode(type, op1, false, type);
}
if (op2->TypeGet() != type)
{
// We insert a cast of op2 to 'type'
op2 = gtNewCastNode(type, op2, false, type);
}
}
if (callNode)
{
/* These operators can later be transformed into 'GT_CALL' */
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
#ifndef TARGET_ARM
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
#endif
// It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
// that we'll need to transform into a general large node, but rather specifically
// to a call: by doing it this way, things keep working if there are multiple sizes,
// and a CALL is no longer the largest.
// That said, as of now it *is* a large node, so we'll do this with an assert rather
// than an "if".
assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
}
else
{
op1 = gtNewOperNode(oper, type, op1, op2);
}
/* Special case: integer/long division may throw an exception */
if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this))
{
op1->gtFlags |= GTF_EXCEPT;
}
if (ovfl)
{
assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
if (ovflType != TYP_UNKNOWN)
{
op1->gtType = ovflType;
}
op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
if (uns)
{
op1->gtFlags |= GTF_UNSIGNED;
}
}
impPushOnStack(op1, tiRetVal);
break;
case CEE_SHL:
oper = GT_LSH;
goto CEE_SH_OP2;
case CEE_SHR:
oper = GT_RSH;
goto CEE_SH_OP2;
case CEE_SHR_UN:
oper = GT_RSZ;
goto CEE_SH_OP2;
CEE_SH_OP2:
if (tiVerificationNeeded)
{
const typeInfo& tiVal = impStackTop(1).seTypeInfo;
const typeInfo& tiShift = impStackTop(0).seTypeInfo;
Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
tiRetVal = tiVal;
}
op2 = impPopStack().val;
op1 = impPopStack().val; // operand to be shifted
impBashVarAddrsToI(op1, op2);
type = genActualType(op1->TypeGet());
op1 = gtNewOperNode(oper, type, op1, op2);
impPushOnStack(op1, tiRetVal);
break;
case CEE_NOT:
if (tiVerificationNeeded)
{
tiRetVal = impStackTop().seTypeInfo;
Verify(tiRetVal.IsIntegerType(), "bad int value");
}
op1 = impPopStack().val;
impBashVarAddrsToI(op1, nullptr);
type = genActualType(op1->TypeGet());
impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
break;
case CEE_CKFINITE:
if (tiVerificationNeeded)
{
tiRetVal = impStackTop().seTypeInfo;
Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
}
op1 = impPopStack().val;
type = op1->TypeGet();
op1 = gtNewOperNode(GT_CKFINITE, type, op1);
op1->gtFlags |= GTF_EXCEPT;
impPushOnStack(op1, tiRetVal);
break;
case CEE_LEAVE:
val = getI4LittleEndian(codeAddr); // jump distance
jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
goto LEAVE;
case CEE_LEAVE_S:
val = getI1LittleEndian(codeAddr); // jump distance
jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
LEAVE:
if (compIsForInlining())
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
return;
}
JITDUMP(" %04X", jmpAddr);
if (block->bbJumpKind != BBJ_LEAVE)
{
impResetLeaveBlock(block, jmpAddr);
}
assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
impImportLeave(block);
impNoteBranchOffs();
break;
case CEE_BR:
case CEE_BR_S:
jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
if (compIsForInlining() && jmpDist == 0)
{
break; /* NOP */
}
impNoteBranchOffs();
break;
case CEE_BRTRUE:
case CEE_BRTRUE_S:
case CEE_BRFALSE:
case CEE_BRFALSE_S:
/* Pop the comparand (now there's a neat term) from the stack */
if (tiVerificationNeeded)
{
typeInfo& tiVal = impStackTop().seTypeInfo;
Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
"bad value");
}
op1 = impPopStack().val;
type = op1->TypeGet();
// Per Ecma-355, brfalse and brtrue are only specified for nint, ref, and byref.
//
// We've historically been a bit more permissive, so here we allow
// any type that gtNewZeroConNode can handle.
if (!varTypeIsArithmetic(type) && !varTypeIsGC(type))
{
BADCODE("invalid type for brtrue/brfalse");
}
if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext))
{
block->bbJumpKind = BBJ_NONE;
if (op1->gtFlags & GTF_GLOB_EFFECT)
{
op1 = gtUnusedValNode(op1);
goto SPILL_APPEND;
}
else
{
break;
}
}
if (op1->OperIsCompare())
{
if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
{
// Flip the sense of the compare
op1 = gtReverseCond(op1);
}
}
else
{
// We'll compare against an equally-sized integer 0
// For small types, we always compare against int
op2 = gtNewZeroConNode(genActualType(op1->gtType));
// Create the comparison operator and try to fold it
oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
}
// fall through
COND_JUMP:
/* Fold comparison if we can */
op1 = gtFoldExpr(op1);
/* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
/* Don't make any blocks unreachable in import only mode */
if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
{
/* gtFoldExpr() should prevent this as we don't want to make any blocks
unreachable under compDbgCode */
assert(!opts.compDbgCode);
BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
assertImp((block->bbJumpKind == BBJ_COND) // normal case
|| (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
// block for the second time
block->bbJumpKind = foldedJumpKind;
#ifdef DEBUG
if (verbose)
{
if (op1->AsIntCon()->gtIconVal)
{
printf("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n",
block->bbJumpDest->bbNum);
}
else
{
printf("\nThe block falls through into the next " FMT_BB "\n", block->bbNext->bbNum);
}
}
#endif
break;
}
op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
/* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
in impImportBlock(block). For correct line numbers, spill stack. */
if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
{
impSpillStackEnsure(true);
}
goto SPILL_APPEND;
case CEE_CEQ:
oper = GT_EQ;
uns = false;
goto CMP_2_OPs;
case CEE_CGT_UN:
oper = GT_GT;
uns = true;
goto CMP_2_OPs;
case CEE_CGT:
oper = GT_GT;
uns = false;
goto CMP_2_OPs;
case CEE_CLT_UN:
oper = GT_LT;
uns = true;
goto CMP_2_OPs;
case CEE_CLT:
oper = GT_LT;
uns = false;
goto CMP_2_OPs;
CMP_2_OPs:
if (tiVerificationNeeded)
{
verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
tiRetVal = typeInfo(TI_INT);
}
op2 = impPopStack().val;
op1 = impPopStack().val;
#ifdef TARGET_64BIT
if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
{
op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
}
else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
{
op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // TARGET_64BIT
assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
(varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet())) ||
(varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)));
/* Create the comparison node */
op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
/* TODO: setting both flags when only one is appropriate */
if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
{
op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
}
// Fold result, if possible.
op1 = gtFoldExpr(op1);
impPushOnStack(op1, tiRetVal);
break;
case CEE_BEQ_S:
case CEE_BEQ:
oper = GT_EQ;
goto CMP_2_OPs_AND_BR;
case CEE_BGE_S:
case CEE_BGE:
oper = GT_GE;
goto CMP_2_OPs_AND_BR;
case CEE_BGE_UN_S:
case CEE_BGE_UN:
oper = GT_GE;
goto CMP_2_OPs_AND_BR_UN;
case CEE_BGT_S:
case CEE_BGT:
oper = GT_GT;
goto CMP_2_OPs_AND_BR;
case CEE_BGT_UN_S:
case CEE_BGT_UN:
oper = GT_GT;
goto CMP_2_OPs_AND_BR_UN;
case CEE_BLE_S:
case CEE_BLE:
oper = GT_LE;
goto CMP_2_OPs_AND_BR;
case CEE_BLE_UN_S:
case CEE_BLE_UN:
oper = GT_LE;
goto CMP_2_OPs_AND_BR_UN;
case CEE_BLT_S:
case CEE_BLT:
oper = GT_LT;
goto CMP_2_OPs_AND_BR;
case CEE_BLT_UN_S:
case CEE_BLT_UN:
oper = GT_LT;
goto CMP_2_OPs_AND_BR_UN;
case CEE_BNE_UN_S:
case CEE_BNE_UN:
oper = GT_NE;
goto CMP_2_OPs_AND_BR_UN;
CMP_2_OPs_AND_BR_UN:
uns = true;
unordered = true;
goto CMP_2_OPs_AND_BR_ALL;
CMP_2_OPs_AND_BR:
uns = false;
unordered = false;
goto CMP_2_OPs_AND_BR_ALL;
CMP_2_OPs_AND_BR_ALL:
if (tiVerificationNeeded)
{
verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
}
/* Pull two values */
op2 = impPopStack().val;
op1 = impPopStack().val;
#ifdef TARGET_64BIT
if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
{
op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
}
else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
{
op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // TARGET_64BIT
assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
(varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet())) ||
(varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)));
if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext))
{
block->bbJumpKind = BBJ_NONE;
if (op1->gtFlags & GTF_GLOB_EFFECT)
{
impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
"Branch to next Optimization, op1 side effect"));
impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
}
if (op2->gtFlags & GTF_GLOB_EFFECT)
{
impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
"Branch to next Optimization, op2 side effect"));
impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
}
#ifdef DEBUG
if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
{
impNoteLastILoffs();
}
#endif
break;
}
// We can generate an compare of different sized floating point op1 and op2
// We insert a cast
//
if (varTypeIsFloating(op1->TypeGet()))
{
if (op1->TypeGet() != op2->TypeGet())
{
assert(varTypeIsFloating(op2->TypeGet()));
// say op1=double, op2=float. To avoid loss of precision
// while comparing, op2 is converted to double and double
// comparison is done.
if (op1->TypeGet() == TYP_DOUBLE)
{
// We insert a cast of op2 to TYP_DOUBLE
op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
}
else if (op2->TypeGet() == TYP_DOUBLE)
{
// We insert a cast of op1 to TYP_DOUBLE
op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
}
}
}
/* Create and append the operator */
op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
if (uns)
{
op1->gtFlags |= GTF_UNSIGNED;
}
if (unordered)
{
op1->gtFlags |= GTF_RELOP_NAN_UN;
}
goto COND_JUMP;
case CEE_SWITCH:
assert(!compIsForInlining());
if (tiVerificationNeeded)
{
Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
}
/* Pop the switch value off the stack */
op1 = impPopStack().val;
assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
/* We can create a switch node */
op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
val = (int)getU4LittleEndian(codeAddr);
codeAddr += 4 + val * 4; // skip over the switch-table
goto SPILL_APPEND;
/************************** Casting OPCODES ***************************/
case CEE_CONV_OVF_I1:
lclTyp = TYP_BYTE;
goto CONV_OVF;
case CEE_CONV_OVF_I2:
lclTyp = TYP_SHORT;
goto CONV_OVF;
case CEE_CONV_OVF_I:
lclTyp = TYP_I_IMPL;
goto CONV_OVF;
case CEE_CONV_OVF_I4:
lclTyp = TYP_INT;
goto CONV_OVF;
case CEE_CONV_OVF_I8:
lclTyp = TYP_LONG;
goto CONV_OVF;
case CEE_CONV_OVF_U1:
lclTyp = TYP_UBYTE;
goto CONV_OVF;
case CEE_CONV_OVF_U2:
lclTyp = TYP_USHORT;
goto CONV_OVF;
case CEE_CONV_OVF_U:
lclTyp = TYP_U_IMPL;
goto CONV_OVF;
case CEE_CONV_OVF_U4:
lclTyp = TYP_UINT;
goto CONV_OVF;
case CEE_CONV_OVF_U8:
lclTyp = TYP_ULONG;
goto CONV_OVF;
case CEE_CONV_OVF_I1_UN:
lclTyp = TYP_BYTE;
goto CONV_OVF_UN;
case CEE_CONV_OVF_I2_UN:
lclTyp = TYP_SHORT;
goto CONV_OVF_UN;
case CEE_CONV_OVF_I_UN:
lclTyp = TYP_I_IMPL;
goto CONV_OVF_UN;
case CEE_CONV_OVF_I4_UN:
lclTyp = TYP_INT;
goto CONV_OVF_UN;
case CEE_CONV_OVF_I8_UN:
lclTyp = TYP_LONG;
goto CONV_OVF_UN;
case CEE_CONV_OVF_U1_UN:
lclTyp = TYP_UBYTE;
goto CONV_OVF_UN;
case CEE_CONV_OVF_U2_UN:
lclTyp = TYP_USHORT;
goto CONV_OVF_UN;
case CEE_CONV_OVF_U_UN:
lclTyp = TYP_U_IMPL;
goto CONV_OVF_UN;
case CEE_CONV_OVF_U4_UN:
lclTyp = TYP_UINT;
goto CONV_OVF_UN;
case CEE_CONV_OVF_U8_UN:
lclTyp = TYP_ULONG;
goto CONV_OVF_UN;
CONV_OVF_UN:
uns = true;
goto CONV_OVF_COMMON;
CONV_OVF:
uns = false;
goto CONV_OVF_COMMON;
CONV_OVF_COMMON:
ovfl = true;
goto _CONV;
case CEE_CONV_I1:
lclTyp = TYP_BYTE;
goto CONV;
case CEE_CONV_I2:
lclTyp = TYP_SHORT;
goto CONV;
case CEE_CONV_I:
lclTyp = TYP_I_IMPL;
goto CONV;
case CEE_CONV_I4:
lclTyp = TYP_INT;
goto CONV;
case CEE_CONV_I8:
lclTyp = TYP_LONG;
goto CONV;
case CEE_CONV_U1:
lclTyp = TYP_UBYTE;
goto CONV;
case CEE_CONV_U2:
lclTyp = TYP_USHORT;
goto CONV;
#if (REGSIZE_BYTES == 8)
case CEE_CONV_U:
lclTyp = TYP_U_IMPL;
goto CONV_UN;
#else
case CEE_CONV_U:
lclTyp = TYP_U_IMPL;
goto CONV;
#endif
case CEE_CONV_U4:
lclTyp = TYP_UINT;
goto CONV;
case CEE_CONV_U8:
lclTyp = TYP_ULONG;
goto CONV_UN;
case CEE_CONV_R4:
lclTyp = TYP_FLOAT;
goto CONV;
case CEE_CONV_R8:
lclTyp = TYP_DOUBLE;
goto CONV;
case CEE_CONV_R_UN:
lclTyp = TYP_DOUBLE;
goto CONV_UN;
CONV_UN:
uns = true;
ovfl = false;
goto _CONV;
CONV:
uns = false;
ovfl = false;
goto _CONV;
_CONV:
// just check that we have a number on the stack
if (tiVerificationNeeded)
{
const typeInfo& tiVal = impStackTop().seTypeInfo;
Verify(tiVal.IsNumberType(), "bad arg");
#ifdef TARGET_64BIT
bool isNative = false;
switch (opcode)
{
case CEE_CONV_OVF_I:
case CEE_CONV_OVF_I_UN:
case CEE_CONV_I:
case CEE_CONV_OVF_U:
case CEE_CONV_OVF_U_UN:
case CEE_CONV_U:
isNative = true;
break;
default:
// leave 'isNative' = false;
break;
}
if (isNative)
{
tiRetVal = typeInfo::nativeInt();
}
else
#endif // TARGET_64BIT
{
tiRetVal = typeInfo(lclTyp).NormaliseForStack();
}
}
// only converts from FLOAT or DOUBLE to an integer type
// and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls
if (varTypeIsFloating(lclTyp))
{
callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
#ifdef TARGET_64BIT
// TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
// TYP_BYREF could be used as TYP_I_IMPL which is long.
// TODO-CQ: remove this when we lower casts long/ulong --> float/double
// and generate SSE2 code instead of going through helper calls.
|| (impStackTop().val->TypeGet() == TYP_BYREF)
#endif
;
}
else
{
callNode = varTypeIsFloating(impStackTop().val->TypeGet());
}
// At this point uns, ovf, callNode all set
op1 = impPopStack().val;
impBashVarAddrsToI(op1);
if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
{
op2 = op1->AsOp()->gtOp2;
if (op2->gtOper == GT_CNS_INT)
{
ssize_t ival = op2->AsIntCon()->gtIconVal;
ssize_t mask, umask;
switch (lclTyp)
{
case TYP_BYTE:
case TYP_UBYTE:
mask = 0x00FF;
umask = 0x007F;
break;
case TYP_USHORT:
case TYP_SHORT:
mask = 0xFFFF;
umask = 0x7FFF;
break;
default:
assert(!"unexpected type");
return;
}
if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
{
/* Toss the cast, it's a waste of time */
impPushOnStack(op1, tiRetVal);
break;
}
else if (ival == mask)
{
/* Toss the masking, it's a waste of time, since
we sign-extend from the small value anyways */
op1 = op1->AsOp()->gtOp1;
}
}
}
/* The 'op2' sub-operand of a cast is the 'real' type number,
since the result of a cast to one of the 'small' integer
types is an integer.
*/
type = genActualType(lclTyp);
// If this is a no-op cast, just use op1.
if (!ovfl && (type == op1->TypeGet()) && (genTypeSize(type) == genTypeSize(lclTyp)))
{
// Nothing needs to change
}
// Work is evidently required, add cast node
else
{
if (callNode)
{
op1 = gtNewCastNodeL(type, op1, uns, lclTyp);
}
else
{
op1 = gtNewCastNode(type, op1, uns, lclTyp);
}
if (ovfl)
{
op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
}
}
impPushOnStack(op1, tiRetVal);
break;
case CEE_NEG:
if (tiVerificationNeeded)
{
tiRetVal = impStackTop().seTypeInfo;
Verify(tiRetVal.IsNumberType(), "Bad arg");
}
op1 = impPopStack().val;
impBashVarAddrsToI(op1, nullptr);
impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
break;
case CEE_POP:
{
/* Pull the top value from the stack */
StackEntry se = impPopStack();
clsHnd = se.seTypeInfo.GetClassHandle();
op1 = se.val;
/* Get hold of the type of the value being duplicated */
lclTyp = genActualType(op1->gtType);
/* Does the value have any side effects? */
if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
{
// Since we are throwing away the value, just normalize
// it to its address. This is more efficient.
if (varTypeIsStruct(op1))
{
JITDUMP("\n ... CEE_POP struct ...\n");
DISPTREE(op1);
#ifdef UNIX_AMD64_ABI
// Non-calls, such as obj or ret_expr, have to go through this.
// Calls with large struct return value have to go through this.
// Helper calls with small struct return value also have to go
// through this since they do not follow Unix calling convention.
if (op1->gtOper != GT_CALL ||
!IsMultiRegReturnedType(clsHnd, op1->AsCall()->GetUnmanagedCallConv()) ||
op1->AsCall()->gtCallType == CT_HELPER)
#endif // UNIX_AMD64_ABI
{
// If the value being produced comes from loading
// via an underlying address, just null check the address.
if (op1->OperIs(GT_FIELD, GT_IND, GT_OBJ))
{
gtChangeOperToNullCheck(op1, block);
}
else
{
op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
}
JITDUMP("\n ... optimized to ...\n");
DISPTREE(op1);
}
}
// If op1 is non-overflow cast, throw it away since it is useless.
// Another reason for throwing away the useless cast is in the context of
// implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
// The cast gets added as part of importing GT_CALL, which gets in the way
// of fgMorphCall() on the forms of tail call nodes that we assert.
if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
{
op1 = op1->AsOp()->gtOp1;
}
if (op1->gtOper != GT_CALL)
{
if ((op1->gtFlags & GTF_SIDE_EFFECT) != 0)
{
op1 = gtUnusedValNode(op1);
}
else
{
op1->gtBashToNOP();
}
}
/* Append the value to the tree list */
goto SPILL_APPEND;
}
/* No side effects - just throw the <BEEP> thing away */
}
break;
case CEE_DUP:
{
if (tiVerificationNeeded)
{
// Dup could start the beginning of delegate creation sequence, remember that
delegateCreateStart = codeAddr - 1;
impStackTop(0);
}
// If the expression to dup is simple, just clone it.
// Otherwise spill it to a temp, and reload the temp
// twice.
StackEntry se = impPopStack();
GenTree* tree = se.val;
tiRetVal = se.seTypeInfo;
op1 = tree;
if (!opts.compDbgCode && !op1->IsIntegralConst(0) && !op1->IsFPZero() && !op1->IsLocal())
{
const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill"));
impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL);
var_types type = genActualType(lvaTable[tmpNum].TypeGet());
op1 = gtNewLclvNode(tmpNum, type);
// Propagate type info to the temp from the stack and the original tree
if (type == TYP_REF)
{
assert(lvaTable[tmpNum].lvSingleDef == 0);
lvaTable[tmpNum].lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def local\n", tmpNum);
lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle());
}
}
op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("DUP instruction"));
assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
impPushOnStack(op1, tiRetVal);
impPushOnStack(op2, tiRetVal);
}
break;
case CEE_STIND_I1:
lclTyp = TYP_BYTE;
goto STIND;
case CEE_STIND_I2:
lclTyp = TYP_SHORT;
goto STIND;
case CEE_STIND_I4:
lclTyp = TYP_INT;
goto STIND;
case CEE_STIND_I8:
lclTyp = TYP_LONG;
goto STIND;
case CEE_STIND_I:
lclTyp = TYP_I_IMPL;
goto STIND;
case CEE_STIND_REF:
lclTyp = TYP_REF;
goto STIND;
case CEE_STIND_R4:
lclTyp = TYP_FLOAT;
goto STIND;
case CEE_STIND_R8:
lclTyp = TYP_DOUBLE;
goto STIND;
STIND:
if (tiVerificationNeeded)
{
typeInfo instrType(lclTyp);
#ifdef TARGET_64BIT
if (opcode == CEE_STIND_I)
{
instrType = typeInfo::nativeInt();
}
#endif // TARGET_64BIT
verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
}
else
{
compUnsafeCastUsed = true; // Have to go conservative
}
STIND_POST_VERIFY:
op2 = impPopStack().val; // value to store
op1 = impPopStack().val; // address to store to
// you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
impBashVarAddrsToI(op1, op2);
op2 = impImplicitR4orR8Cast(op2, lclTyp);
#ifdef TARGET_64BIT
// Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
{
op2->gtType = TYP_I_IMPL;
}
else
{
// Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
//
if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
{
assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
}
// Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
//
if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
{
assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
}
}
#endif // TARGET_64BIT
if (opcode == CEE_STIND_REF)
{
// STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
lclTyp = genActualType(op2->TypeGet());
}
// Check target type.
#ifdef DEBUG
if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
{
if (op2->gtType == TYP_BYREF)
{
assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
}
else if (lclTyp == TYP_BYREF)
{
assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
}
}
else
{
assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
(varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
}
#endif
op1 = gtNewOperNode(GT_IND, lclTyp, op1);
// stind could point anywhere, example a boxed class static int
op1->gtFlags |= GTF_IND_TGTANYWHERE;
if (prefixFlags & PREFIX_VOLATILE)
{
assert(op1->OperGet() == GT_IND);
op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
op1->gtFlags |= GTF_IND_VOLATILE;
}
if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
{
assert(op1->OperGet() == GT_IND);
op1->gtFlags |= GTF_IND_UNALIGNED;
}
op1 = gtNewAssignNode(op1, op2);
op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
// Spill side-effects AND global-data-accesses
if (verCurrentState.esStackDepth > 0)
{
impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
}
goto APPEND;
case CEE_LDIND_I1:
lclTyp = TYP_BYTE;
goto LDIND;
case CEE_LDIND_I2:
lclTyp = TYP_SHORT;
goto LDIND;
case CEE_LDIND_U4:
case CEE_LDIND_I4:
lclTyp = TYP_INT;
goto LDIND;
case CEE_LDIND_I8:
lclTyp = TYP_LONG;
goto LDIND;
case CEE_LDIND_REF:
lclTyp = TYP_REF;
goto LDIND;
case CEE_LDIND_I:
lclTyp = TYP_I_IMPL;
goto LDIND;
case CEE_LDIND_R4:
lclTyp = TYP_FLOAT;
goto LDIND;
case CEE_LDIND_R8:
lclTyp = TYP_DOUBLE;
goto LDIND;
case CEE_LDIND_U1:
lclTyp = TYP_UBYTE;
goto LDIND;
case CEE_LDIND_U2:
lclTyp = TYP_USHORT;
goto LDIND;
LDIND:
if (tiVerificationNeeded)
{
typeInfo lclTiType(lclTyp);
#ifdef TARGET_64BIT
if (opcode == CEE_LDIND_I)
{
lclTiType = typeInfo::nativeInt();
}
#endif // TARGET_64BIT
tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
tiRetVal.NormaliseForStack();
}
else
{
compUnsafeCastUsed = true; // Have to go conservative
}
LDIND_POST_VERIFY:
op1 = impPopStack().val; // address to load from
impBashVarAddrsToI(op1);
#ifdef TARGET_64BIT
// Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
//
if (genActualType(op1->gtType) == TYP_INT)
{
assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL);
}
#endif
assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
op1 = gtNewOperNode(GT_IND, lclTyp, op1);
// ldind could point anywhere, example a boxed class static int
op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
if (prefixFlags & PREFIX_VOLATILE)
{
assert(op1->OperGet() == GT_IND);
op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
op1->gtFlags |= GTF_IND_VOLATILE;
}
if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
{
assert(op1->OperGet() == GT_IND);
op1->gtFlags |= GTF_IND_UNALIGNED;
}
impPushOnStack(op1, tiRetVal);
break;
case CEE_UNALIGNED:
assert(sz == 1);
val = getU1LittleEndian(codeAddr);
++codeAddr;
JITDUMP(" %u", val);
if ((val != 1) && (val != 2) && (val != 4))
{
BADCODE("Alignment unaligned. must be 1, 2, or 4");
}
Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
prefixFlags |= PREFIX_UNALIGNED;
impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
PREFIX:
opcode = (OPCODE)getU1LittleEndian(codeAddr);
opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
codeAddr += sizeof(__int8);
goto DECODE_OPCODE;
case CEE_VOLATILE:
Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
prefixFlags |= PREFIX_VOLATILE;
impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
assert(sz == 0);
goto PREFIX;
case CEE_LDFTN:
{
// Need to do a lookup here so that we perform an access check
// and do a NOWAY if protections are violated
_impResolveToken(CORINFO_TOKENKIND_Method);
JITDUMP(" %08X", resolvedToken.token);
eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
&callInfo);
// This check really only applies to intrinsic Array.Address methods
if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
{
NO_WAY("Currently do not support LDFTN of Parameterized functions");
}
// Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
if (tiVerificationNeeded)
{
// LDFTN could start the beginning of delegate creation sequence, remember that
delegateCreateStart = codeAddr - 2;
// check any constraints on the callee's class and type parameters
VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
"method has unsatisfied class constraints");
VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
resolvedToken.hMethod),
"method has unsatisfied method constraints");
mflags = callInfo.verMethodFlags;
Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
}
DO_LDFTN:
op1 = impMethodPointer(&resolvedToken, &callInfo);
if (compDonotInline())
{
return;
}
// Call info may have more precise information about the function than
// the resolved token.
CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
assert(callInfo.hMethod != nullptr);
heapToken->hMethod = callInfo.hMethod;
impPushOnStack(op1, typeInfo(heapToken));
break;
}
case CEE_LDVIRTFTN:
{
/* Get the method token */
_impResolveToken(CORINFO_TOKENKIND_Method);
JITDUMP(" %08X", resolvedToken.token);
eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
CORINFO_CALLINFO_CALLVIRT)),
&callInfo);
// This check really only applies to intrinsic Array.Address methods
if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
{
NO_WAY("Currently do not support LDFTN of Parameterized functions");
}
mflags = callInfo.methodFlags;
impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
if (compIsForInlining())
{
if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
return;
}
}
CORINFO_SIG_INFO& ftnSig = callInfo.sig;
if (tiVerificationNeeded)
{
Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
// JIT32 verifier rejects verifiable ldvirtftn pattern
typeInfo declType =
verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
typeInfo arg = impStackTop().seTypeInfo;
Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
"bad ldvirtftn");
CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
{
instanceClassHnd = arg.GetClassHandleForObjRef();
}
// check any constraints on the method's class and type parameters
VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
"method has unsatisfied class constraints");
VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
resolvedToken.hMethod),
"method has unsatisfied method constraints");
if (mflags & CORINFO_FLG_PROTECTED)
{
Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
"Accessing protected method through wrong type.");
}
}
/* Get the object-ref */
op1 = impPopStack().val;
assertImp(op1->gtType == TYP_REF);
if (opts.IsReadyToRun())
{
if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
{
if (op1->gtFlags & GTF_SIDE_EFFECT)
{
op1 = gtUnusedValNode(op1);
impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
}
goto DO_LDFTN;
}
}
else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
{
if (op1->gtFlags & GTF_SIDE_EFFECT)
{
op1 = gtUnusedValNode(op1);
impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
}
goto DO_LDFTN;
}
GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
if (compDonotInline())
{
return;
}
CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
assert(heapToken->tokenType == CORINFO_TOKENKIND_Method);
assert(callInfo.hMethod != nullptr);
heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn;
heapToken->hMethod = callInfo.hMethod;
impPushOnStack(fptr, typeInfo(heapToken));
break;
}
case CEE_CONSTRAINED:
assertImp(sz == sizeof(unsigned));
impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
JITDUMP(" (%08X) ", constrainedResolvedToken.token);
Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
prefixFlags |= PREFIX_CONSTRAINED;
{
OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
if (actualOpcode != CEE_CALLVIRT)
{
BADCODE("constrained. has to be followed by callvirt");
}
}
goto PREFIX;
case CEE_READONLY:
JITDUMP(" readonly.");
Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
prefixFlags |= PREFIX_READONLY;
{
OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
{
BADCODE("readonly. has to be followed by ldelema or call");
}
}
assert(sz == 0);
goto PREFIX;
case CEE_TAILCALL:
JITDUMP(" tail.");
Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
{
OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
if (!impOpcodeIsCallOpcode(actualOpcode))
{
BADCODE("tailcall. has to be followed by call, callvirt or calli");
}
}
assert(sz == 0);
goto PREFIX;
case CEE_NEWOBJ:
/* Since we will implicitly insert newObjThisPtr at the start of the
argument list, spill any GTF_ORDER_SIDEEFF */
impSpillSpecialSideEff();
/* NEWOBJ does not respond to TAIL */
prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
/* NEWOBJ does not respond to CONSTRAINED */
prefixFlags &= ~PREFIX_CONSTRAINED;
_impResolveToken(CORINFO_TOKENKIND_NewObj);
eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
&callInfo);
if (compIsForInlining())
{
if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
{
// Check to see if this call violates the boundary.
compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
return;
}
}
mflags = callInfo.methodFlags;
if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
{
BADCODE("newobj on static or abstract method");
}
// Insert the security callout before any actual code is generated
impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
// There are three different cases for new
// Object size is variable (depends on arguments)
// 1) Object is an array (arrays treated specially by the EE)
// 2) Object is some other variable sized object (e.g. String)
// 3) Class Size can be determined beforehand (normal case)
// In the first case, we need to call a NEWOBJ helper (multinewarray)
// in the second case we call the constructor with a '0' this pointer
// In the third case we alloc the memory, then call the constuctor
clsFlags = callInfo.classFlags;
if (clsFlags & CORINFO_FLG_ARRAY)
{
if (tiVerificationNeeded)
{
CORINFO_CLASS_HANDLE elemTypeHnd;
INDEBUG(CorInfoType corType =)
info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
Verify(elemTypeHnd == nullptr ||
!(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
"newarr of byref-like objects");
verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
&callInfo DEBUGARG(info.compFullName));
}
// Arrays need to call the NEWOBJ helper.
assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
impImportNewObjArray(&resolvedToken, &callInfo);
if (compDonotInline())
{
return;
}
callTyp = TYP_REF;
break;
}
// At present this can only be String
else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
{
if (IsTargetAbi(CORINFO_CORERT_ABI))
{
// The dummy argument does not exist in CoreRT
newObjThisPtr = nullptr;
}
else
{
// This is the case for variable-sized objects that are not
// arrays. In this case, call the constructor with a null 'this'
// pointer
newObjThisPtr = gtNewIconNode(0, TYP_REF);
}
/* Remember that this basic block contains 'new' of an object */
block->bbFlags |= BBF_HAS_NEWOBJ;
optMethodFlags |= OMF_HAS_NEWOBJ;
}
else
{
// This is the normal case where the size of the object is
// fixed. Allocate the memory and call the constructor.
// Note: We cannot add a peep to avoid use of temp here
// becase we don't have enough interference info to detect when
// sources and destination interfere, example: s = new S(ref);
// TODO: We find the correct place to introduce a general
// reverse copy prop for struct return values from newobj or
// any function returning structs.
/* get a temporary for the new object */
lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
if (compDonotInline())
{
// Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS.
assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS);
return;
}
// In the value class case we only need clsHnd for size calcs.
//
// The lookup of the code pointer will be handled by CALL in this case
if (clsFlags & CORINFO_FLG_VALUECLASS)
{
if (compIsForInlining())
{
// If value class has GC fields, inform the inliner. It may choose to
// bail out on the inline.
DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
{
compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
if (compInlineResult->IsFailure())
{
return;
}
// Do further notification in the case where the call site is rare;
// some policies do not track the relative hotness of call sites for
// "always" inline cases.
if (impInlineInfo->iciBlock->isRunRarely())
{
compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
if (compInlineResult->IsFailure())
{
return;
}
}
}
}
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
if (impIsPrimitive(jitTyp))
{
lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
}
else
{
// The local variable itself is the allocated space.
// Here we need unsafe value cls check, since the address of struct is taken for further use
// and potentially exploitable.
lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
}
bool bbInALoop = impBlockIsInALoop(block);
bool bbIsReturn = (block->bbJumpKind == BBJ_RETURN) &&
(!compIsForInlining() || (impInlineInfo->iciBlock->bbJumpKind == BBJ_RETURN));
LclVarDsc* const lclDsc = lvaGetDesc(lclNum);
if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn))
{
// Append a tree to zero-out the temp
newObjThisPtr = gtNewLclvNode(lclNum, lclDsc->TypeGet());
newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest
gtNewIconNode(0), // Value
false, // isVolatile
false); // not copyBlock
impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
}
else
{
JITDUMP("\nSuppressing zero-init for V%02u -- expect to zero in prolog\n", lclNum);
lclDsc->lvSuppressedZeroInit = 1;
compSuppressedZeroInit = true;
}
// Obtain the address of the temp
newObjThisPtr =
gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
}
else
{
const BOOL useParent = TRUE;
op1 = gtNewAllocObjNode(&resolvedToken, useParent);
if (op1 == nullptr)
{
return;
}
// Remember that this basic block contains 'new' of an object
block->bbFlags |= BBF_HAS_NEWOBJ;
optMethodFlags |= OMF_HAS_NEWOBJ;
// Append the assignment to the temp/local. Dont need to spill
// at all as we are just calling an EE-Jit helper which can only
// cause an (async) OutOfMemoryException.
// We assign the newly allocated object (by a GT_ALLOCOBJ node)
// to a temp. Note that the pattern "temp = allocObj" is required
// by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
// without exhaustive walk over all expressions.
impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
assert(lvaTable[lclNum].lvSingleDef == 0);
lvaTable[lclNum].lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def local\n", lclNum);
lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */);
newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
}
}
goto CALL;
case CEE_CALLI:
/* CALLI does not respond to CONSTRAINED */
prefixFlags &= ~PREFIX_CONSTRAINED;
if (compIsForInlining())
{
// CALLI doesn't have a method handle, so assume the worst.
if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
return;
}
}
FALLTHROUGH;
case CEE_CALLVIRT:
case CEE_CALL:
// We can't call getCallInfo on the token from a CALLI, but we need it in
// many other places. We unfortunately embed that knowledge here.
if (opcode != CEE_CALLI)
{
_impResolveToken(CORINFO_TOKENKIND_Method);
eeGetCallInfo(&resolvedToken,
(prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
// this is how impImportCall invokes getCallInfo
addVerifyFlag(
combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
(opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
: CORINFO_CALLINFO_NONE)),
&callInfo);
}
else
{
// Suppress uninitialized use warning.
memset(&resolvedToken, 0, sizeof(resolvedToken));
memset(&callInfo, 0, sizeof(callInfo));
resolvedToken.token = getU4LittleEndian(codeAddr);
resolvedToken.tokenContext = impTokenLookupContextHandle;
resolvedToken.tokenScope = info.compScopeHnd;
}
CALL: // memberRef should be set.
// newObjThisPtr should be set for CEE_NEWOBJ
JITDUMP(" %08X", resolvedToken.token);
constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
bool newBBcreatedForTailcallStress;
bool passedStressModeValidation;
newBBcreatedForTailcallStress = false;
passedStressModeValidation = true;
if (compIsForInlining())
{
if (compDonotInline())
{
return;
}
// We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
}
else
{
if (compTailCallStress())
{
// Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
// Tail call stress only recognizes call+ret patterns and forces them to be
// explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress
// doesn't import 'ret' opcode following the call into the basic block containing
// the call instead imports it to a new basic block. Note that fgMakeBasicBlocks()
// is already checking that there is an opcode following call and hence it is
// safe here to read next opcode without bounds check.
newBBcreatedForTailcallStress =
impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
// make it jump to RET.
(OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
bool hasTailPrefix = (prefixFlags & PREFIX_TAILCALL_EXPLICIT);
if (newBBcreatedForTailcallStress && !hasTailPrefix)
{
// Do a more detailed evaluation of legality
const bool returnFalseIfInvalid = true;
const bool passedConstraintCheck =
verCheckTailCallConstraint(opcode, &resolvedToken,
constraintCall ? &constrainedResolvedToken : nullptr,
returnFalseIfInvalid);
if (passedConstraintCheck)
{
// Now check with the runtime
CORINFO_METHOD_HANDLE declaredCalleeHnd = callInfo.hMethod;
bool isVirtual = (callInfo.kind == CORINFO_VIRTUALCALL_STUB) ||
(callInfo.kind == CORINFO_VIRTUALCALL_VTABLE);
CORINFO_METHOD_HANDLE exactCalleeHnd = isVirtual ? nullptr : declaredCalleeHnd;
if (info.compCompHnd->canTailCall(info.compMethodHnd, declaredCalleeHnd, exactCalleeHnd,
hasTailPrefix)) // Is it legal to do tailcall?
{
// Stress the tailcall.
JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
prefixFlags |= PREFIX_TAILCALL_STRESS;
}
else
{
// Runtime disallows this tail call
JITDUMP(" (Tailcall stress: runtime preventing tailcall)");
passedStressModeValidation = false;
}
}
else
{
// Constraints disallow this tail call
JITDUMP(" (Tailcall stress: constraint check failed)");
passedStressModeValidation = false;
}
}
}
}
// This is split up to avoid goto flow warnings.
bool isRecursive;
isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
// If we've already disqualified this call as a tail call under tail call stress,
// don't consider it for implicit tail calling either.
//
// When not running under tail call stress, we may mark this call as an implicit
// tail call candidate. We'll do an "equivalent" validation during impImportCall.
//
// Note that when running under tail call stress, a call marked as explicit
// tail prefixed will not be considered for implicit tail calling.
if (passedStressModeValidation &&
impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
{
if (compIsForInlining())
{
#if FEATURE_TAILCALL_OPT_SHARED_RETURN
// Are we inlining at an implicit tail call site? If so the we can flag
// implicit tail call sites in the inline body. These call sites
// often end up in non BBJ_RETURN blocks, so only flag them when
// we're able to handle shared returns.
if (impInlineInfo->iciCall->IsImplicitTailCall())
{
JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
}
#endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
}
else
{
JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
}
}
// Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
{
// All calls and delegates need a security callout.
// For delegates, this is the call to the delegate constructor, not the access check on the
// LD(virt)FTN.
impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
}
if (tiVerificationNeeded)
{
verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
&callInfo DEBUGARG(info.compFullName));
}
callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
if (compDonotInline())
{
// We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue.
assert((callTyp == TYP_UNDEF) ||
(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS));
return;
}
if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
// have created a new BB after the "call"
// instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
{
assert(!compIsForInlining());
goto RET;
}
break;
case CEE_LDFLD:
case CEE_LDSFLD:
case CEE_LDFLDA:
case CEE_LDSFLDA:
{
BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
BOOL isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
/* Get the CP_Fieldref index */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Field);
JITDUMP(" %08X", resolvedToken.token);
int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
GenTree* obj = nullptr;
typeInfo* tiObj = nullptr;
CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
{
tiObj = &impStackTop().seTypeInfo;
StackEntry se = impPopStack();
objType = se.seTypeInfo.GetClassHandle();
obj = se.val;
if (impIsThis(obj))
{
aflags |= CORINFO_ACCESS_THIS;
}
}
eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
// Figure out the type of the member. We always call canAccessField, so you always need this
// handle
CorInfoType ciType = fieldInfo.fieldType;
clsHnd = fieldInfo.structType;
lclTyp = JITtype2varType(ciType);
if (compIsForInlining())
{
switch (fieldInfo.fieldAccessor)
{
case CORINFO_FIELD_INSTANCE_HELPER:
case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
case CORINFO_FIELD_STATIC_ADDR_HELPER:
case CORINFO_FIELD_STATIC_TLS:
compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
return;
case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
/* We may be able to inline the field accessors in specific instantiations of generic
* methods */
compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
return;
default:
break;
}
if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
clsHnd)
{
if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
!(info.compFlags & CORINFO_FLG_FORCEINLINE))
{
// Loading a static valuetype field usually will cause a JitHelper to be called
// for the static base. This will bloat the code.
compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
if (compInlineResult->IsFailure())
{
return;
}
}
}
}
tiRetVal = verMakeTypeInfo(ciType, clsHnd);
if (isLoadAddress)
{
tiRetVal.MakeByRef();
}
else
{
tiRetVal.NormaliseForStack();
}
// Perform this check always to ensure that we get field access exceptions even with
// SkipVerification.
impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
if (tiVerificationNeeded)
{
// You can also pass the unboxed struct to LDFLD
BOOL bAllowPlainValueTypeAsThis = FALSE;
if (opcode == CEE_LDFLD && impIsValueType(tiObj))
{
bAllowPlainValueTypeAsThis = TRUE;
}
verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
// If we're doing this on a heap object or from a 'safe' byref
// then the result is a safe byref too
if (isLoadAddress) // load address
{
if (fieldInfo.fieldFlags &
CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
{
if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
{
tiRetVal.SetIsPermanentHomeByRef();
}
}
else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
{
// ldflda of byref is safe if done on a gc object or on a
// safe byref
tiRetVal.SetIsPermanentHomeByRef();
}
}
}
else
{
// tiVerificationNeeded is false.
// Raise InvalidProgramException if static load accesses non-static field
if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
{
BADCODE("static access on an instance field");
}
}
// We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
{
if (obj->gtFlags & GTF_SIDE_EFFECT)
{
obj = gtUnusedValNode(obj);
impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
}
obj = nullptr;
}
/* Preserve 'small' int types */
if (!varTypeIsSmall(lclTyp))
{
lclTyp = genActualType(lclTyp);
}
bool usesHelper = false;
switch (fieldInfo.fieldAccessor)
{
case CORINFO_FIELD_INSTANCE:
#ifdef FEATURE_READYTORUN_COMPILER
case CORINFO_FIELD_INSTANCE_WITH_BASE:
#endif
{
obj = impCheckForNullPointer(obj);
// If the object is a struct, what we really want is
// for the field to operate on the address of the struct.
if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
{
assert(opcode == CEE_LDFLD && objType != nullptr);
obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
}
/* Create the data member node */
op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
#ifdef FEATURE_READYTORUN_COMPILER
if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
{
op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup;
}
#endif
op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
if (fgAddrCouldBeNull(obj))
{
op1->gtFlags |= GTF_EXCEPT;
}
// If gtFldObj is a BYREF then our target is a value class and
// it could point anywhere, example a boxed class static int
if (obj->gtType == TYP_BYREF)
{
op1->gtFlags |= GTF_IND_TGTANYWHERE;
}
DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
if (StructHasOverlappingFields(typeFlags))
{
op1->AsField()->gtFldMayOverlap = true;
}
// wrap it in a address of operator if necessary
if (isLoadAddress)
{
op1 = gtNewOperNode(GT_ADDR,
(var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
}
else
{
if (compIsForInlining() &&
impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, nullptr, obj,
impInlineInfo->inlArgInfo))
{
impInlineInfo->thisDereferencedFirst = true;
}
}
}
break;
case CORINFO_FIELD_STATIC_TLS:
#ifdef TARGET_X86
// Legacy TLS access is implemented as intrinsic on x86 only
/* Create the data member node */
op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
if (isLoadAddress)
{
op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
}
break;
#else
fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
FALLTHROUGH;
#endif
case CORINFO_FIELD_STATIC_ADDR_HELPER:
case CORINFO_FIELD_INSTANCE_HELPER:
case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
clsHnd, nullptr);
usesHelper = true;
break;
case CORINFO_FIELD_STATIC_ADDRESS:
// Replace static read-only fields with constant if possible
if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
!(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
(varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
{
CorInfoInitClassResult initClassResult =
info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
impTokenLookupContextHandle);
if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
{
void** pFldAddr = nullptr;
void* fldAddr =
info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
// We should always be able to access this static's address directly
//
assert(pFldAddr == nullptr);
op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
// Widen small types since we're propagating the value
// instead of producing an indir.
//
op1->gtType = genActualType(lclTyp);
goto FIELD_DONE;
}
}
FALLTHROUGH;
case CORINFO_FIELD_STATIC_RVA_ADDRESS:
case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
lclTyp);
break;
case CORINFO_FIELD_INTRINSIC_ZERO:
{
assert(aflags & CORINFO_ACCESS_GET);
// Widen to stack type
lclTyp = genActualType(lclTyp);
op1 = gtNewIconNode(0, lclTyp);
goto FIELD_DONE;
}
break;
case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
{
assert(aflags & CORINFO_ACCESS_GET);
LPVOID pValue;
InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
op1 = gtNewStringLiteralNode(iat, pValue);
goto FIELD_DONE;
}
break;
case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN:
{
assert(aflags & CORINFO_ACCESS_GET);
// Widen to stack type
lclTyp = genActualType(lclTyp);
#if BIGENDIAN
op1 = gtNewIconNode(0, lclTyp);
#else
op1 = gtNewIconNode(1, lclTyp);
#endif
goto FIELD_DONE;
}
break;
default:
assert(!"Unexpected fieldAccessor");
}
if (!isLoadAddress)
{
if (prefixFlags & PREFIX_VOLATILE)
{
op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
if (!usesHelper)
{
assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
(op1->OperGet() == GT_OBJ));
op1->gtFlags |= GTF_IND_VOLATILE;
}
}
if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
{
if (!usesHelper)
{
assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
(op1->OperGet() == GT_OBJ));
op1->gtFlags |= GTF_IND_UNALIGNED;
}
}
}
/* Check if the class needs explicit initialization */
if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
{
GenTree* helperNode = impInitClass(&resolvedToken);
if (compDonotInline())
{
return;
}
if (helperNode != nullptr)
{
op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
}
}
FIELD_DONE:
impPushOnStack(op1, tiRetVal);
}
break;
case CEE_STFLD:
case CEE_STSFLD:
{
BOOL isStoreStatic = (opcode == CEE_STSFLD);
CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
/* Get the CP_Fieldref index */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Field);
JITDUMP(" %08X", resolvedToken.token);
int aflags = CORINFO_ACCESS_SET;
GenTree* obj = nullptr;
typeInfo* tiObj = nullptr;
typeInfo tiVal;
/* Pull the value from the stack */
StackEntry se = impPopStack();
op2 = se.val;
tiVal = se.seTypeInfo;
clsHnd = tiVal.GetClassHandle();
if (opcode == CEE_STFLD)
{
tiObj = &impStackTop().seTypeInfo;
obj = impPopStack().val;
if (impIsThis(obj))
{
aflags |= CORINFO_ACCESS_THIS;
}
}
eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
// Figure out the type of the member. We always call canAccessField, so you always need this
// handle
CorInfoType ciType = fieldInfo.fieldType;
fieldClsHnd = fieldInfo.structType;
lclTyp = JITtype2varType(ciType);
if (compIsForInlining())
{
/* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
* per-inst static? */
switch (fieldInfo.fieldAccessor)
{
case CORINFO_FIELD_INSTANCE_HELPER:
case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
case CORINFO_FIELD_STATIC_ADDR_HELPER:
case CORINFO_FIELD_STATIC_TLS:
compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
return;
case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
/* We may be able to inline the field accessors in specific instantiations of generic
* methods */
compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
return;
default:
break;
}
}
impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
if (tiVerificationNeeded)
{
verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
}
else
{
// tiVerificationNeed is false.
// Raise InvalidProgramException if static store accesses non-static field
if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
{
BADCODE("static access on an instance field");
}
}
// We are using stfld on a static field.
// We allow it, but need to eval any side-effects for obj
if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
{
if (obj->gtFlags & GTF_SIDE_EFFECT)
{
obj = gtUnusedValNode(obj);
impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
}
obj = nullptr;
}
/* Preserve 'small' int types */
if (!varTypeIsSmall(lclTyp))
{
lclTyp = genActualType(lclTyp);
}
switch (fieldInfo.fieldAccessor)
{
case CORINFO_FIELD_INSTANCE:
#ifdef FEATURE_READYTORUN_COMPILER
case CORINFO_FIELD_INSTANCE_WITH_BASE:
#endif
{
obj = impCheckForNullPointer(obj);
/* Create the data member node */
op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
if (StructHasOverlappingFields(typeFlags))
{
op1->AsField()->gtFldMayOverlap = true;
}
#ifdef FEATURE_READYTORUN_COMPILER
if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
{
op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup;
}
#endif
op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
if (fgAddrCouldBeNull(obj))
{
op1->gtFlags |= GTF_EXCEPT;
}
// If gtFldObj is a BYREF then our target is a value class and
// it could point anywhere, example a boxed class static int
if (obj->gtType == TYP_BYREF)
{
op1->gtFlags |= GTF_IND_TGTANYWHERE;
}
if (compIsForInlining() &&
impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, nullptr, obj,
impInlineInfo->inlArgInfo))
{
impInlineInfo->thisDereferencedFirst = true;
}
}
break;
case CORINFO_FIELD_STATIC_TLS:
#ifdef TARGET_X86
// Legacy TLS access is implemented as intrinsic on x86 only
/* Create the data member node */
op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
break;
#else
fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
FALLTHROUGH;
#endif
case CORINFO_FIELD_STATIC_ADDR_HELPER:
case CORINFO_FIELD_INSTANCE_HELPER:
case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
clsHnd, op2);
goto SPILL_APPEND;
case CORINFO_FIELD_STATIC_ADDRESS:
case CORINFO_FIELD_STATIC_RVA_ADDRESS:
case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
lclTyp);
break;
default:
assert(!"Unexpected fieldAccessor");
}
// Create the member assignment, unless we have a TYP_STRUCT.
bool deferStructAssign = (lclTyp == TYP_STRUCT);
if (!deferStructAssign)
{
if (prefixFlags & PREFIX_VOLATILE)
{
assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
op1->gtFlags |= GTF_IND_VOLATILE;
}
if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
{
assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
op1->gtFlags |= GTF_IND_UNALIGNED;
}
/* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during
importation and reads from the union as if it were a long during code generation. Though this
can potentially read garbage, one can get lucky to have this working correctly.
This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
/O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a
dependency on it. To be backward compatible, we will explicitly add an upward cast here so that
it works correctly always.
Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT
for V4.0.
*/
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_64BIT
// In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be
// generated for ARM as well as x86, so the following IR will be accepted:
// STMTx (IL 0x... ???)
// * ASG long
// +--* CLS_VAR long
// \--* CNS_INT int 2
if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
varTypeIsLong(op1->TypeGet()))
{
op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
}
#endif
#ifdef TARGET_64BIT
// Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
{
op2->gtType = TYP_I_IMPL;
}
else
{
// Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
//
if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
{
op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
}
// Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
//
if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
{
op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
}
}
#endif
// We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
// We insert a cast to the dest 'op1' type
//
if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
varTypeIsFloating(op2->gtType))
{
op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
}
op1 = gtNewAssignNode(op1, op2);
/* Mark the expression as containing an assignment */
op1->gtFlags |= GTF_ASG;
}
/* Check if the class needs explicit initialization */
if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
{
GenTree* helperNode = impInitClass(&resolvedToken);
if (compDonotInline())
{
return;
}
if (helperNode != nullptr)
{
op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
}
}
/* stfld can interfere with value classes (consider the sequence
ldloc, ldloca, ..., stfld, stloc). We will be conservative and
spill all value class references from the stack. */
if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
{
assert(tiObj);
if (impIsValueType(tiObj))
{
impSpillEvalStack();
}
else
{
impSpillValueClasses();
}
}
/* Spill any refs to the same member from the stack */
impSpillLclRefs((ssize_t)resolvedToken.hField);
/* stsfld also interferes with indirect accesses (for aliased
statics) and calls. But don't need to spill other statics
as we have explicitly spilled this particular static field. */
impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
if (deferStructAssign)
{
op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
}
}
goto APPEND;
case CEE_NEWARR:
{
/* Get the class type index operand */
_impResolveToken(CORINFO_TOKENKIND_Newarr);
JITDUMP(" %08X", resolvedToken.token);
if (!opts.IsReadyToRun())
{
// Need to restore array classes before creating array objects on the heap
op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
if (op1 == nullptr)
{ // compDonotInline()
return;
}
}
if (tiVerificationNeeded)
{
// As per ECMA 'numElems' specified can be either int32 or native int.
Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
CORINFO_CLASS_HANDLE elemTypeHnd;
info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
Verify(elemTypeHnd == nullptr ||
!(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
"array of byref-like type");
}
tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
accessAllowedResult =
info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
/* Form the arglist: array class handle, size */
op2 = impPopStack().val;
assertImp(genActualTypeIsIntOrI(op2->gtType));
#ifdef TARGET_64BIT
// The array helper takes a native int for array length.
// So if we have an int, explicitly extend it to be a native int.
if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
{
if (op2->IsIntegralConst())
{
op2->gtType = TYP_I_IMPL;
}
else
{
bool isUnsigned = false;
op2 = gtNewCastNode(TYP_I_IMPL, op2, isUnsigned, TYP_I_IMPL);
}
}
#endif // TARGET_64BIT
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
gtNewCallArgs(op2));
usingReadyToRunHelper = (op1 != nullptr);
if (!usingReadyToRunHelper)
{
// TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
// and the newarr call with a single call to a dynamic R2R cell that will:
// 1) Load the context
// 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
// 3) Allocate the new array
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
// Need to restore array classes before creating array objects on the heap
op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
if (op1 == nullptr)
{ // compDonotInline()
return;
}
}
}
if (!usingReadyToRunHelper)
#endif
{
GenTreeCall::Use* args = gtNewCallArgs(op1, op2);
/* Create a call to 'new' */
// Note that this only works for shared generic code because the same helper is used for all
// reference array types
op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args);
}
op1->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
/* Remember that this basic block contains 'new' of an sd array */
block->bbFlags |= BBF_HAS_NEWARRAY;
optMethodFlags |= OMF_HAS_NEWARRAY;
/* Push the result of the call on the stack */
impPushOnStack(op1, tiRetVal);
callTyp = TYP_REF;
}
break;
case CEE_LOCALLOC:
if (tiVerificationNeeded)
{
Verify(false, "bad opcode");
}
// We don't allow locallocs inside handlers
if (block->hasHndIndex())
{
BADCODE("Localloc can't be inside handler");
}
// Get the size to allocate
op2 = impPopStack().val;
assertImp(genActualTypeIsIntOrI(op2->gtType));
if (verCurrentState.esStackDepth != 0)
{
BADCODE("Localloc can only be used when the stack is empty");
}
// If the localloc is not in a loop and its size is a small constant,
// create a new local var of TYP_BLK and return its address.
{
bool convertedToLocal = false;
// Need to aggressively fold here, as even fixed-size locallocs
// will have casts in the way.
op2 = gtFoldExpr(op2);
if (op2->IsIntegralConst())
{
const ssize_t allocSize = op2->AsIntCon()->IconValue();
bool bbInALoop = impBlockIsInALoop(block);
if (allocSize == 0)
{
// Result is nullptr
JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n");
op1 = gtNewIconNode(0, TYP_I_IMPL);
convertedToLocal = true;
}
else if ((allocSize > 0) && !bbInALoop)
{
// Get the size threshold for local conversion
ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE;
#ifdef DEBUG
// Optionally allow this to be modified
maxSize = JitConfig.JitStackAllocToLocalSize();
#endif // DEBUG
if (allocSize <= maxSize)
{
const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal"));
JITDUMP("Converting stackalloc of %lld bytes to new local V%02u\n", allocSize,
stackallocAsLocal);
lvaTable[stackallocAsLocal].lvType = TYP_BLK;
lvaTable[stackallocAsLocal].lvExactSize = (unsigned)allocSize;
lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true;
op1 = gtNewLclvNode(stackallocAsLocal, TYP_BLK);
op1 = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1);
convertedToLocal = true;
if (!this->opts.compDbgEnC)
{
// Ensure we have stack security for this method.
// Reorder layout since the converted localloc is treated as an unsafe buffer.
setNeedsGSSecurityCookie();
compGSReorderStackLayout = true;
}
}
}
}
if (!convertedToLocal)
{
// Bail out if inlining and the localloc was not converted.
//
// Note we might consider allowing the inline, if the call
// site is not in a loop.
if (compIsForInlining())
{
InlineObservation obs = op2->IsIntegralConst()
? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE
: InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN;
compInlineResult->NoteFatal(obs);
return;
}
op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
// May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
// Ensure we have stack security for this method.
setNeedsGSSecurityCookie();
/* The FP register may not be back to the original value at the end
of the method, even if the frame size is 0, as localloc may
have modified it. So we will HAVE to reset it */
compLocallocUsed = true;
}
else
{
compLocallocOptimized = true;
}
}
impPushOnStack(op1, tiRetVal);
break;
case CEE_ISINST:
{
/* Get the type token */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Casting);
JITDUMP(" %08X", resolvedToken.token);
if (!opts.IsReadyToRun())
{
op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
}
if (tiVerificationNeeded)
{
Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
// Even if this is a value class, we know it is boxed.
tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
}
accessAllowedResult =
info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
op1 = impPopStack().val;
GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false);
if (optTree != nullptr)
{
impPushOnStack(optTree, tiRetVal);
}
else
{
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
GenTreeCall* opLookup =
impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
gtNewCallArgs(op1));
usingReadyToRunHelper = (opLookup != nullptr);
op1 = (usingReadyToRunHelper ? opLookup : op1);
if (!usingReadyToRunHelper)
{
// TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
// and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
// 1) Load the context
// 2) Perform the generic dictionary lookup and caching, and generate the appropriate
// stub
// 3) Perform the 'is instance' check on the input object
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
}
}
if (!usingReadyToRunHelper)
#endif
{
op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
}
if (compDonotInline())
{
return;
}
impPushOnStack(op1, tiRetVal);
}
break;
}
case CEE_REFANYVAL:
// get the class handle and make a ICON node out of it
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
op2 = impTokenToHandle(&resolvedToken);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
if (tiVerificationNeeded)
{
Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
"need refany");
tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
}
op1 = impPopStack().val;
// make certain it is normalized;
op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
// Call helper GETREFANY(classHandle, op1);
op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, gtNewCallArgs(op2, op1));
impPushOnStack(op1, tiRetVal);
break;
case CEE_REFANYTYPE:
if (tiVerificationNeeded)
{
Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
"need refany");
}
op1 = impPopStack().val;
// make certain it is normalized;
op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
if (op1->gtOper == GT_OBJ)
{
// Get the address of the refany
op1 = op1->AsOp()->gtOp1;
// Fetch the type from the correct slot
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL));
op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
}
else
{
assertImp(op1->gtOper == GT_MKREFANY);
// The pointer may have side-effects
if (op1->AsOp()->gtOp1->gtFlags & GTF_SIDE_EFFECT)
{
impAppendTree(op1->AsOp()->gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
#ifdef DEBUG
impNoteLastILoffs();
#endif
}
// We already have the class handle
op1 = op1->AsOp()->gtOp2;
}
// convert native TypeHandle to RuntimeTypeHandle
{
GenTreeCall::Use* helperArgs = gtNewCallArgs(op1);
op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT,
helperArgs);
CORINFO_CLASS_HANDLE classHandle = impGetTypeHandleClass();
// The handle struct is returned in register
op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType();
if (!compDoOldStructRetyping())
{
op1->AsCall()->gtRetClsHnd = classHandle;
#if FEATURE_MULTIREG_RET
op1->AsCall()->InitializeStructReturnType(this, classHandle,
op1->AsCall()->GetUnmanagedCallConv());
#endif
}
tiRetVal = typeInfo(TI_STRUCT, classHandle);
}
impPushOnStack(op1, tiRetVal);
break;
case CEE_LDTOKEN:
{
/* Get the Class index */
assertImp(sz == sizeof(unsigned));
lastLoadToken = codeAddr;
_impResolveToken(CORINFO_TOKENKIND_Ldtoken);
tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
if (op1 == nullptr)
{ // compDonotInline()
return;
}
helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE;
assert(resolvedToken.hClass != nullptr);
if (resolvedToken.hMethod != nullptr)
{
helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
}
else if (resolvedToken.hField != nullptr)
{
helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
}
GenTreeCall::Use* helperArgs = gtNewCallArgs(op1);
op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs);
// The handle struct is returned in register and
// it could be consumed both as `TYP_STRUCT` and `TYP_REF`.
op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType();
if (!compDoOldStructRetyping())
{
#if FEATURE_MULTIREG_RET
op1->AsCall()->InitializeStructReturnType(this, tokenType, op1->AsCall()->GetUnmanagedCallConv());
#endif
op1->AsCall()->gtRetClsHnd = tokenType;
}
tiRetVal = verMakeTypeInfo(tokenType);
impPushOnStack(op1, tiRetVal);
}
break;
case CEE_UNBOX:
case CEE_UNBOX_ANY:
{
/* Get the Class index */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
BOOL runtimeLookup;
op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
if (op2 == nullptr)
{
assert(compDonotInline());
return;
}
// Run this always so we can get access exceptions even with SkipVerification.
accessAllowedResult =
info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
{
if (tiVerificationNeeded)
{
typeInfo tiUnbox = impStackTop().seTypeInfo;
Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
tiRetVal.NormaliseForStack();
}
JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n");
op1 = impPopStack().val;
goto CASTCLASS;
}
/* Pop the object and create the unbox helper call */
/* You might think that for UNBOX_ANY we need to push a different */
/* (non-byref) type, but here we're making the tiRetVal that is used */
/* for the intermediate pointer which we then transfer onto the OBJ */
/* instruction. OBJ then creates the appropriate tiRetVal. */
if (tiVerificationNeeded)
{
typeInfo tiUnbox = impStackTop().seTypeInfo;
Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
Verify(tiRetVal.IsValueClass(), "not value class");
tiRetVal.MakeByRef();
// We always come from an objref, so this is safe byref
tiRetVal.SetIsPermanentHomeByRef();
tiRetVal.SetIsReadonlyByRef();
}
op1 = impPopStack().val;
assertImp(op1->gtType == TYP_REF);
helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
// Check legality and profitability of inline expansion for unboxing.
const bool canExpandInline = (helper == CORINFO_HELP_UNBOX);
const bool shouldExpandInline = !compCurBB->isRunRarely() && opts.OptimizationEnabled();
if (canExpandInline && shouldExpandInline)
{
// See if we know anything about the type of op1, the object being unboxed.
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(op1, &isExact, &isNonNull);
// We can skip the "exact" bit here as we are comparing to a value class.
// compareTypesForEquality should bail on comparisions for shared value classes.
if (clsHnd != NO_CLASS_HANDLE)
{
const TypeCompareState compare =
info.compCompHnd->compareTypesForEquality(resolvedToken.hClass, clsHnd);
if (compare == TypeCompareState::Must)
{
JITDUMP("\nOptimizing %s (%s) -- type test will succeed\n",
opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", eeGetClassName(clsHnd));
// For UNBOX, null check (if necessary), and then leave the box payload byref on the stack.
if (opcode == CEE_UNBOX)
{
GenTree* cloneOperand;
op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("optimized unbox clone"));
GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
GenTree* boxPayloadAddress =
gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, boxPayloadOffset);
GenTree* nullcheck = gtNewNullCheck(op1, block);
GenTree* result = gtNewOperNode(GT_COMMA, TYP_BYREF, nullcheck, boxPayloadAddress);
impPushOnStack(result, tiRetVal);
break;
}
// For UNBOX.ANY load the struct from the box payload byref (the load will nullcheck)
assert(opcode == CEE_UNBOX_ANY);
GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, op1, boxPayloadOffset);
impPushOnStack(boxPayloadAddress, tiRetVal);
oper = GT_OBJ;
goto OBJ;
}
else
{
JITDUMP("\nUnable to optimize %s -- can't resolve type comparison\n",
opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
}
}
else
{
JITDUMP("\nUnable to optimize %s -- class for [%06u] not known\n",
opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", dspTreeID(op1));
}
JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
// we are doing normal unboxing
// inline the common case of the unbox helper
// UNBOX(exp) morphs into
// clone = pop(exp);
// ((*clone == typeToken) ? nop : helper(clone, typeToken));
// push(clone + TARGET_POINTER_SIZE)
//
GenTree* cloneOperand;
op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("inline UNBOX clone1"));
op1 = gtNewMethodTableLookup(op1);
GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("inline UNBOX clone2"));
op2 = impTokenToHandle(&resolvedToken);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
op1 = gtNewHelperCallNode(helper, TYP_VOID, gtNewCallArgs(op2, op1));
op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
// QMARK nodes cannot reside on the evaluation stack. Because there
// may be other trees on the evaluation stack that side-effect the
// sources of the UNBOX operation we must spill the stack.
impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
// Create the address-expression to reference past the object header
// to the beginning of the value-type. Today this means adjusting
// past the base of the objects vtable field which is pointer sized.
op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
}
else
{
JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY",
canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
// Don't optimize, just call the helper and be done with it
op1 = gtNewHelperCallNode(helper,
(var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
gtNewCallArgs(op2, op1));
if (!compDoOldStructRetyping())
{
if (op1->gtType == TYP_STRUCT)
{
op1->AsCall()->gtRetClsHnd = resolvedToken.hClass;
}
}
}
assert((helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF) || // Unbox helper returns a byref.
(helper == CORINFO_HELP_UNBOX_NULLABLE &&
varTypeIsStruct(op1)) // UnboxNullable helper returns a struct.
);
/*
----------------------------------------------------------------------
| \ helper | | |
| \ | | |
| \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE |
| \ | (which returns a BYREF) | (which returns a STRUCT) | |
| opcode \ | | |
|---------------------------------------------------------------------
| UNBOX | push the BYREF | spill the STRUCT to a local, |
| | | push the BYREF to this local |
|---------------------------------------------------------------------
| UNBOX_ANY | push a GT_OBJ of | push the STRUCT |
| | the BYREF | For Linux when the |
| | | struct is returned in two |
| | | registers create a temp |
| | | which address is passed to |
| | | the unbox_nullable helper. |
|---------------------------------------------------------------------
*/
if (opcode == CEE_UNBOX)
{
if (helper == CORINFO_HELP_UNBOX_NULLABLE)
{
// Unbox nullable helper returns a struct type.
// We need to spill it to a temp so than can take the address of it.
// Here we need unsafe value cls check, since the address of struct is taken to be used
// further along and potetially be exploitable.
unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
op2 = gtNewLclvNode(tmp, TYP_STRUCT);
op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
op2 = gtNewLclvNode(tmp, TYP_STRUCT);
op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
}
assert(op1->gtType == TYP_BYREF);
assert(!tiVerificationNeeded || tiRetVal.IsByRef());
}
else
{
assert(opcode == CEE_UNBOX_ANY);
if (helper == CORINFO_HELP_UNBOX)
{
// Normal unbox helper returns a TYP_BYREF.
impPushOnStack(op1, tiRetVal);
oper = GT_OBJ;
goto OBJ;
}
assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
#if FEATURE_MULTIREG_RET
if (varTypeIsStruct(op1) &&
IsMultiRegReturnedType(resolvedToken.hClass, CorInfoCallConvExtension::Managed))
{
// Unbox nullable helper returns a TYP_STRUCT.
// For the multi-reg case we need to spill it to a temp so that
// we can pass the address to the unbox_nullable jit helper.
unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
lvaTable[tmp].lvIsMultiRegArg = true;
lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
op2 = gtNewLclvNode(tmp, TYP_STRUCT);
op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
op2 = gtNewLclvNode(tmp, TYP_STRUCT);
op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
// In this case the return value of the unbox helper is TYP_BYREF.
// Make sure the right type is placed on the operand type stack.
impPushOnStack(op1, tiRetVal);
// Load the struct.
oper = GT_OBJ;
assert(op1->gtType == TYP_BYREF);
assert(!tiVerificationNeeded || tiRetVal.IsByRef());
goto OBJ;
}
else
#endif // !FEATURE_MULTIREG_RET
{
// If non register passable struct we have it materialized in the RetBuf.
assert(op1->gtType == TYP_STRUCT);
tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
assert(tiRetVal.IsValueClass());
}
}
impPushOnStack(op1, tiRetVal);
}
break;
case CEE_BOX:
{
/* Get the Class index */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Box);
JITDUMP(" %08X", resolvedToken.token);
if (tiVerificationNeeded)
{
typeInfo tiActual = impStackTop().seTypeInfo;
typeInfo tiBox = verMakeTypeInfo(resolvedToken.hClass);
Verify(verIsBoxable(tiBox), "boxable type expected");
// check the class constraints of the boxed type in case we are boxing an uninitialized value
Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
"boxed type has unsatisfied class constraints");
Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
// Observation: the following code introduces a boxed value class on the stack, but,
// according to the ECMA spec, one would simply expect: tiRetVal =
// typeInfo(TI_REF,impGetObjectClass());
// Push the result back on the stack,
// even if clsHnd is a value class we want the TI_REF
// we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
}
accessAllowedResult =
info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
// Note BOX can be used on things that are not value classes, in which
// case we get a NOP. However the verifier's view of the type on the
// stack changes (in generic code a 'T' becomes a 'boxed T')
if (!eeIsValueClass(resolvedToken.hClass))
{
JITDUMP("\n Importing BOX(refClass) as NOP\n");
verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
break;
}
// Look ahead for box idioms
int matched = impBoxPatternMatch(&resolvedToken, codeAddr + sz, codeEndp);
if (matched >= 0)
{
// Skip the matched IL instructions
sz += matched;
break;
}
impImportAndPushBox(&resolvedToken);
if (compDonotInline())
{
return;
}
}
break;
case CEE_SIZEOF:
/* Get the Class index */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
if (tiVerificationNeeded)
{
tiRetVal = typeInfo(TI_INT);
}
op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
impPushOnStack(op1, tiRetVal);
break;
case CEE_CASTCLASS:
/* Get the Class index */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Casting);
JITDUMP(" %08X", resolvedToken.token);
if (!opts.IsReadyToRun())
{
op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
}
if (tiVerificationNeeded)
{
Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
// box it
tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
}
accessAllowedResult =
info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
op1 = impPopStack().val;
/* Pop the address and create the 'checked cast' helper call */
// At this point we expect typeRef to contain the token, op1 to contain the value being cast,
// and op2 to contain code that creates the type handle corresponding to typeRef
CASTCLASS:
{
GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true);
if (optTree != nullptr)
{
impPushOnStack(optTree, tiRetVal);
}
else
{
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
GenTreeCall* opLookup =
impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF,
gtNewCallArgs(op1));
usingReadyToRunHelper = (opLookup != nullptr);
op1 = (usingReadyToRunHelper ? opLookup : op1);
if (!usingReadyToRunHelper)
{
// TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
// and the chkcastany call with a single call to a dynamic R2R cell that will:
// 1) Load the context
// 2) Perform the generic dictionary lookup and caching, and generate the appropriate
// stub
// 3) Check the object on the stack for the type-cast
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
}
}
if (!usingReadyToRunHelper)
#endif
{
op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
}
if (compDonotInline())
{
return;
}
/* Push the result back on the stack */
impPushOnStack(op1, tiRetVal);
}
}
break;
case CEE_THROW:
if (tiVerificationNeeded)
{
tiRetVal = impStackTop().seTypeInfo;
Verify(tiRetVal.IsObjRef(), "object ref expected");
if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
{
Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
}
}
// Any block with a throw is rarely executed.
block->bbSetRunRarely();
// Pop the exception object and create the 'throw' helper call
op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewCallArgs(impPopStack().val));
// Fall through to clear out the eval stack.
EVAL_APPEND:
if (verCurrentState.esStackDepth > 0)
{
impEvalSideEffects();
}
assert(verCurrentState.esStackDepth == 0);
goto APPEND;
case CEE_RETHROW:
assert(!compIsForInlining());
if (info.compXcptnsCount == 0)
{
BADCODE("rethrow outside catch");
}
if (tiVerificationNeeded)
{
Verify(block->hasHndIndex(), "rethrow outside catch");
if (block->hasHndIndex())
{
EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
if (HBtab->HasFilter())
{
// we better be in the handler clause part, not the filter part
Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
"rethrow in filter");
}
}
}
/* Create the 'rethrow' helper call */
op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID);
goto EVAL_APPEND;
case CEE_INITOBJ:
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
if (tiVerificationNeeded)
{
typeInfo tiTo = impStackTop().seTypeInfo;
typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
Verify(tiTo.IsByRef(), "byref expected");
Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
"type operand incompatible with type of address");
}
size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
op2 = gtNewIconNode(0); // Value
op1 = impPopStack().val; // Dest
op1 = gtNewBlockVal(op1, size);
op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false);
goto SPILL_APPEND;
case CEE_INITBLK:
if (tiVerificationNeeded)
{
Verify(false, "bad opcode");
}
op3 = impPopStack().val; // Size
op2 = impPopStack().val; // Value
op1 = impPopStack().val; // Dest
if (op3->IsCnsIntOrI())
{
size = (unsigned)op3->AsIntConCommon()->IconValue();
op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size));
}
else
{
op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
size = 0;
}
op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false);
goto SPILL_APPEND;
case CEE_CPBLK:
if (tiVerificationNeeded)
{
Verify(false, "bad opcode");
}
op3 = impPopStack().val; // Size
op2 = impPopStack().val; // Src
op1 = impPopStack().val; // Dest
if (op3->IsCnsIntOrI())
{
size = (unsigned)op3->AsIntConCommon()->IconValue();
op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size));
}
else
{
op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
size = 0;
}
if (op2->OperGet() == GT_ADDR)
{
op2 = op2->AsOp()->gtOp1;
}
else
{
op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
}
op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, true);
goto SPILL_APPEND;
case CEE_CPOBJ:
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
if (tiVerificationNeeded)
{
typeInfo tiFrom = impStackTop().seTypeInfo;
typeInfo tiTo = impStackTop(1).seTypeInfo;
typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
Verify(tiFrom.IsByRef(), "expected byref source");
Verify(tiTo.IsByRef(), "expected byref destination");
Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
"type of source address incompatible with type operand");
Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
"type operand incompatible with type of destination address");
}
if (!eeIsValueClass(resolvedToken.hClass))
{
op1 = impPopStack().val; // address to load from
impBashVarAddrsToI(op1);
assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
impPushOnStack(op1, typeInfo());
opcode = CEE_STIND_REF;
lclTyp = TYP_REF;
goto STIND_POST_VERIFY;
}
op2 = impPopStack().val; // Src
op1 = impPopStack().val; // Dest
op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
goto SPILL_APPEND;
case CEE_STOBJ:
{
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
if (eeIsValueClass(resolvedToken.hClass))
{
lclTyp = TYP_STRUCT;
}
else
{
lclTyp = TYP_REF;
}
if (tiVerificationNeeded)
{
typeInfo tiPtr = impStackTop(1).seTypeInfo;
// Make sure we have a good looking byref
Verify(tiPtr.IsByRef(), "pointer not byref");
Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
{
compUnsafeCastUsed = true;
}
typeInfo ptrVal = DereferenceByRef(tiPtr);
typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
{
Verify(false, "type of value incompatible with type operand");
compUnsafeCastUsed = true;
}
if (!tiCompatibleWith(argVal, ptrVal, false))
{
Verify(false, "type operand incompatible with type of address");
compUnsafeCastUsed = true;
}
}
else
{
compUnsafeCastUsed = true;
}
if (lclTyp == TYP_REF)
{
opcode = CEE_STIND_REF;
goto STIND_POST_VERIFY;
}
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
if (impIsPrimitive(jitTyp))
{
lclTyp = JITtype2varType(jitTyp);
goto STIND_POST_VERIFY;
}
op2 = impPopStack().val; // Value
op1 = impPopStack().val; // Ptr
assertImp(varTypeIsStruct(op2));
op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED))
{
op1->gtFlags |= GTF_BLK_UNALIGNED;
}
goto SPILL_APPEND;
}
case CEE_MKREFANY:
assert(!compIsForInlining());
// Being lazy here. Refanys are tricky in terms of gc tracking.
// Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
JITDUMP("disabling struct promotion because of mkrefany\n");
fgNoStructPromotion = true;
oper = GT_MKREFANY;
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
if (tiVerificationNeeded)
{
typeInfo tiPtr = impStackTop().seTypeInfo;
typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
}
accessAllowedResult =
info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
op1 = impPopStack().val;
// @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
// But JIT32 allowed it, so we continue to allow it.
assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
// MKREFANY returns a struct. op2 is the class token.
op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
break;
case CEE_LDOBJ:
{
oper = GT_OBJ;
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
OBJ:
tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
if (tiVerificationNeeded)
{
typeInfo tiPtr = impStackTop().seTypeInfo;
// Make sure we have a byref
if (!tiPtr.IsByRef())
{
Verify(false, "pointer not byref");
compUnsafeCastUsed = true;
}
typeInfo tiPtrVal = DereferenceByRef(tiPtr);
if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
{
Verify(false, "type of address incompatible with type operand");
compUnsafeCastUsed = true;
}
tiRetVal.NormaliseForStack();
}
else
{
compUnsafeCastUsed = true;
}
if (eeIsValueClass(resolvedToken.hClass))
{
lclTyp = TYP_STRUCT;
}
else
{
lclTyp = TYP_REF;
opcode = CEE_LDIND_REF;
goto LDIND_POST_VERIFY;
}
op1 = impPopStack().val;
assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
if (impIsPrimitive(jitTyp))
{
op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
// Could point anywhere, example a boxed class static int
op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
assertImp(varTypeIsArithmetic(op1->gtType));
}
else
{
// OBJ returns a struct
// and an inline argument which is the class token of the loaded obj
op1 = gtNewObjNode(resolvedToken.hClass, op1);
}
op1->gtFlags |= GTF_EXCEPT;
if (prefixFlags & PREFIX_UNALIGNED)
{
op1->gtFlags |= GTF_IND_UNALIGNED;
}
impPushOnStack(op1, tiRetVal);
break;
}
case CEE_LDLEN:
if (tiVerificationNeeded)
{
typeInfo tiArray = impStackTop().seTypeInfo;
Verify(verIsSDArray(tiArray), "bad array");
tiRetVal = typeInfo(TI_INT);
}
op1 = impPopStack().val;
if (opts.OptimizationEnabled())
{
/* Use GT_ARR_LENGTH operator so rng check opts see this */
GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_Array__length, block);
op1 = arrLen;
}
else
{
/* Create the expression "*(array_addr + ArrLenOffs)" */
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
gtNewIconNode(OFFSETOF__CORINFO_Array__length, TYP_I_IMPL));
op1 = gtNewIndir(TYP_INT, op1);
}
/* Push the result back on the stack */
impPushOnStack(op1, tiRetVal);
break;
case CEE_BREAK:
op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
goto SPILL_APPEND;
case CEE_NOP:
if (opts.compDbgCode)
{
op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
goto SPILL_APPEND;
}
break;
/******************************** NYI *******************************/
case 0xCC:
OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
FALLTHROUGH;
case CEE_ILLEGAL:
case CEE_MACRO_END:
default:
if (compIsForInlining())
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR);
return;
}
BADCODE3("unknown opcode", ": %02X", (int)opcode);
}
codeAddr += sz;
prevOpcode = opcode;
prefixFlags = 0;
}
return;
#undef _impResolveToken
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
// Push a local/argument treeon the operand stack
void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
{
tiRetVal.NormaliseForStack();
if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
{
tiRetVal.SetUninitialisedObjRef();
}
impPushOnStack(op, tiRetVal);
}
// Load a local/argument on the operand stack
// lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal)
{
var_types lclTyp;
if (lvaTable[lclNum].lvNormalizeOnLoad())
{
lclTyp = lvaGetRealType(lclNum);
}
else
{
lclTyp = lvaGetActualType(lclNum);
}
impPushVar(gtNewLclvNode(lclNum, lclTyp DEBUGARG(offset)), tiRetVal);
}
// Load an argument on the operand stack
// Shared by the various CEE_LDARG opcodes
// ilArgNum is the argument index as specified in IL.
// It will be mapped to the correct lvaTable index
void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
{
Verify(ilArgNum < info.compILargsCount, "bad arg num");
if (compIsForInlining())
{
if (ilArgNum >= info.compArgsCount)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
return;
}
impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
}
else
{
if (ilArgNum >= info.compArgsCount)
{
BADCODE("Bad IL");
}
unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
if (lclNum == info.compThisArg)
{
lclNum = lvaArg0Var;
}
impLoadVar(lclNum, offset);
}
}
// Load a local on the operand stack
// Shared by the various CEE_LDLOC opcodes
// ilLclNum is the local index as specified in IL.
// It will be mapped to the correct lvaTable index
void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
{
if (tiVerificationNeeded)
{
Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
Verify(info.compInitMem, "initLocals not set");
}
if (compIsForInlining())
{
if (ilLclNum >= info.compMethodInfo->locals.numArgs)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
return;
}
// Get the local type
var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
/* Have we allocated a temp for this local? */
unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
// All vars of inlined methods should be !lvNormalizeOnLoad()
assert(!lvaTable[lclNum].lvNormalizeOnLoad());
lclTyp = genActualType(lclTyp);
impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
}
else
{
if (ilLclNum >= info.compMethodInfo->locals.numArgs)
{
BADCODE("Bad IL");
}
unsigned lclNum = info.compArgsCount + ilLclNum;
impLoadVar(lclNum, offset);
}
}
#ifdef TARGET_ARM
/**************************************************************************************
*
* When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
* dst struct, because struct promotion will turn it into a float/double variable while
* the rhs will be an int/long variable. We don't code generate assignment of int into
* a float, but there is nothing that might prevent us from doing so. The tree however
* would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
*
* tmpNum - the lcl dst variable num that is a struct.
* src - the src tree assigned to the dest that is a struct/int (when varargs call.)
* hClass - the type handle for the struct variable.
*
* TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
* however, we could do a codegen of transferring from int to float registers
* (transfer, not a cast.)
*
*/
void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass)
{
if (src->gtOper == GT_CALL && src->AsCall()->IsVarargs() && IsHfa(hClass))
{
int hfaSlots = GetHfaCount(hClass);
var_types hfaType = GetHfaType(hClass);
// If we have varargs we morph the method's return type to be "int" irrespective of its original
// type: struct/float at importer because the ABI calls out return in integer registers.
// We don't want struct promotion to replace an expression like this:
// lclFld_int = callvar_int() into lclFld_float = callvar_int();
// This means an int is getting assigned to a float without a cast. Prevent the promotion.
if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
(hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
{
// Make sure this struct type stays as struct so we can receive the call in a struct.
lvaTable[tmpNum].lvIsMultiRegRet = true;
}
}
}
#endif // TARGET_ARM
//------------------------------------------------------------------------
// impAssignSmallStructTypeToVar: ensure calls that return small structs whose
// sizes are not supported integral type sizes return values to temps.
//
// Arguments:
// op -- call returning a small struct in a register
// hClass -- class handle for struct
//
// Returns:
// Tree with reference to struct local to use as call return value.
//
// Remarks:
// The call will be spilled into a preceding statement.
// Currently handles struct returns for 3, 5, 6, and 7 byte structs.
GenTree* Compiler::impAssignSmallStructTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass)
{
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for small struct return"));
impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
return ret;
}
#if FEATURE_MULTIREG_RET
//------------------------------------------------------------------------
// impAssignMultiRegTypeToVar: ensure calls that return structs in multiple
// registers return values to suitable temps.
//
// Arguments:
// op -- call returning a struct in registers
// hClass -- class handle for struct
//
// Returns:
// Tree with reference to struct local to use as call return value.
GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op,
CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv))
{
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return"));
impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
// TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
ret->gtFlags |= GTF_DONT_CSE;
assert(IsMultiRegReturnedType(hClass, callConv));
// Mark the var so that fields are not promoted and stay together.
lvaTable[tmpNum].lvIsMultiRegRet = true;
return ret;
}
#endif // FEATURE_MULTIREG_RET
//------------------------------------------------------------------------
// impReturnInstruction: import a return or an explicit tail call
//
// Arguments:
// prefixFlags -- active IL prefixes
// opcode -- [in, out] IL opcode
//
// Returns:
// True if import was successful (may fail for some inlinees)
//
bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode)
{
const bool isTailCall = (prefixFlags & PREFIX_TAILCALL) != 0;
if (tiVerificationNeeded)
{
verVerifyThisPtrInitialised();
unsigned expectedStack = 0;
if (info.compRetType != TYP_VOID)
{
typeInfo tiVal = impStackTop().seTypeInfo;
typeInfo tiDeclared =
verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
expectedStack = 1;
}
Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
}
#ifdef DEBUG
// If we are importing an inlinee and have GC ref locals we always
// need to have a spill temp for the return value. This temp
// should have been set up in advance, over in fgFindBasicBlocks.
if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID))
{
assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM);
}
#endif // DEBUG
GenTree* op2 = nullptr;
GenTree* op1 = nullptr;
CORINFO_CLASS_HANDLE retClsHnd = nullptr;
if (info.compRetType != TYP_VOID)
{
StackEntry se = impPopStack();
retClsHnd = se.seTypeInfo.GetClassHandle();
op2 = se.val;
if (!compIsForInlining())
{
impBashVarAddrsToI(op2);
op2 = impImplicitIorI4Cast(op2, info.compRetType);
op2 = impImplicitR4orR8Cast(op2, info.compRetType);
assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
(varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
(varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
#ifdef DEBUG
if (!isTailCall && opts.compGcChecks && (info.compRetType == TYP_REF))
{
// DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path
// VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
// one-return BB.
assert(op2->gtType == TYP_REF);
// confirm that the argument is a GC pointer (for debugging (GC stress))
GenTreeCall::Use* args = gtNewCallArgs(op2);
op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args);
if (verbose)
{
printf("\ncompGcChecks tree:\n");
gtDispTree(op2);
}
}
#endif
}
else
{
// inlinee's stack should be empty now.
assert(verCurrentState.esStackDepth == 0);
#ifdef DEBUG
if (verbose)
{
printf("\n\n Inlinee Return expression (before normalization) =>\n");
gtDispTree(op2);
}
#endif
// Make sure the type matches the original call.
var_types returnType = genActualType(op2->gtType);
var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
{
originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
}
if (returnType != originalCallType)
{
// Allow TYP_BYREF to be returned as TYP_I_IMPL and vice versa
if (((returnType == TYP_BYREF) && (originalCallType == TYP_I_IMPL)) ||
((returnType == TYP_I_IMPL) && (originalCallType == TYP_BYREF)))
{
JITDUMP("Allowing return type mismatch: have %s, needed %s\n", varTypeName(returnType),
varTypeName(originalCallType));
}
else
{
JITDUMP("Return type mismatch: have %s, needed %s\n", varTypeName(returnType),
varTypeName(originalCallType));
compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
return false;
}
}
// Below, we are going to set impInlineInfo->retExpr to the tree with the return
// expression. At this point, retExpr could already be set if there are multiple
// return blocks (meaning fgNeedReturnSpillTemp() == true) and one of
// the other blocks already set it. If there is only a single return block,
// retExpr shouldn't be set. However, this is not true if we reimport a block
// with a return. In that case, retExpr will be set, then the block will be
// reimported, but retExpr won't get cleared as part of setting the block to
// be reimported. The reimported retExpr value should be the same, so even if
// we don't unconditionally overwrite it, it shouldn't matter.
if (info.compRetNativeType != TYP_STRUCT)
{
// compRetNativeType is not TYP_STRUCT.
// This implies it could be either a scalar type or SIMD vector type or
// a struct type that can be normalized to a scalar type.
if (varTypeIsStruct(info.compRetType))
{
noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
// adjust the type away from struct to integral
// and no normalizing
op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv);
}
else
{
// Do we have to normalize?
var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
fgCastNeeded(op2, fncRealRetType))
{
// Small-typed return values are normalized by the callee
op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType);
}
}
if (fgNeedReturnSpillTemp())
{
assert(info.compRetNativeType != TYP_VOID &&
(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()));
// If this method returns a ref type, track the actual types seen
// in the returns.
if (info.compRetType == TYP_REF)
{
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull);
if (impInlineInfo->retExpr == nullptr)
{
// This is the first return, so best known type is the type
// of this return value.
impInlineInfo->retExprClassHnd = returnClsHnd;
impInlineInfo->retExprClassHndIsExact = isExact;
}
else if (impInlineInfo->retExprClassHnd != returnClsHnd)
{
// This return site type differs from earlier seen sites,
// so reset the info and we'll fall back to using the method's
// declared return type for the return spill temp.
impInlineInfo->retExprClassHnd = nullptr;
impInlineInfo->retExprClassHndIsExact = false;
}
}
// This is a bit of a workaround...
// If we are inlining a call that returns a struct, where the actual "native" return type is
// not a struct (for example, the struct is composed of exactly one int, and the native
// return type is thus an int), and the inlinee has multiple return blocks (thus,
// fgNeedReturnSpillTemp() == true, and is the index of a local var that is set
// to the *native* return type), and at least one of the return blocks is the result of
// a call, then we have a problem. The situation is like this (from a failed test case):
//
// inliner:
// // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
// call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
// plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
//
// inlinee:
// ...
// ldobj !!T // this gets bashed to a GT_LCL_FLD, type TYP_INT
// ret
// ...
// call !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
// object&, class System.Func`1<!!0>)
// ret
//
// In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
// of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
// morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
// inlining properly by leaving the correct type on the GT_CALL node through importing.
//
// To fix this, for this case, we temporarily change the GT_CALL node type to the
// native return type, which is what it will be set to eventually. We generate the
// assignment to the return temp, using the correct type, and then restore the GT_CALL
// node type. During morphing, the GT_CALL will get the correct, final, native return type.
bool restoreType = false;
if (compDoOldStructRetyping())
{
if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
{
noway_assert(op2->TypeGet() == TYP_STRUCT);
op2->gtType = info.compRetNativeType;
restoreType = true;
}
}
impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
(unsigned)CHECK_SPILL_ALL);
var_types lclRetType = op2->TypeGet();
if (!compDoOldStructRetyping())
{
LclVarDsc* varDsc = lvaGetDesc(lvaInlineeReturnSpillTemp);
lclRetType = varDsc->lvType;
}
GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, lclRetType);
if (compDoOldStructRetyping())
{
if (restoreType)
{
op2->gtType = TYP_STRUCT; // restore it to what it was
}
}
op2 = tmpOp2;
#ifdef DEBUG
if (impInlineInfo->retExpr)
{
// Some other block(s) have seen the CEE_RET first.
// Better they spilled to the same temp.
assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
assert(impInlineInfo->retExpr->AsLclVarCommon()->GetLclNum() ==
op2->AsLclVarCommon()->GetLclNum());
}
#endif
}
// If we are inlining a method that returns a struct byref, check whether we are "reinterpreting" the
// struct.
GenTree* effectiveRetVal = op2->gtEffectiveVal();
if ((returnType == TYP_BYREF) && (info.compRetType == TYP_BYREF) &&
(effectiveRetVal->OperGet() == GT_ADDR))
{
GenTree* addrChild = effectiveRetVal->gtGetOp1();
if (addrChild->OperGet() == GT_LCL_VAR)
{
LclVarDsc* varDsc = lvaGetDesc(addrChild->AsLclVarCommon());
if (varTypeIsStruct(addrChild->TypeGet()) && !isOpaqueSIMDLclVar(varDsc))
{
CORINFO_CLASS_HANDLE referentClassHandle;
CorInfoType referentType =
info.compCompHnd->getChildType(info.compMethodInfo->args.retTypeClass,
&referentClassHandle);
if (varTypeIsStruct(JITtype2varType(referentType)) &&
(varDsc->GetStructHnd() != referentClassHandle))
{
// We are returning a byref to struct1; the method signature specifies return type as
// byref
// to struct2. struct1 and struct2 are different so we are "reinterpreting" the struct.
// This may happen in, for example, System.Runtime.CompilerServices.Unsafe.As<TFrom,
// TTo>.
// We need to mark the source struct variable as having overlapping fields because its
// fields may be accessed using field handles of a different type, which may confuse
// optimizations, in particular, value numbering.
JITDUMP("\nSetting lvOverlappingFields to true on V%02u because of struct "
"reinterpretation\n",
addrChild->AsLclVarCommon()->GetLclNum());
varDsc->lvOverlappingFields = true;
}
}
}
}
#ifdef DEBUG
if (verbose)
{
printf("\n\n Inlinee Return expression (after normalization) =>\n");
gtDispTree(op2);
}
#endif
// Report the return expression
impInlineInfo->retExpr = op2;
}
else
{
// compRetNativeType is TYP_STRUCT.
// This implies that struct return via RetBuf arg or multi-reg struct return
GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall();
// Assign the inlinee return into a spill temp.
// spill temp only exists if there are multiple return points
if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
{
// in this case we have to insert multiple struct copies to the temp
// and the retexpr is just the temp.
assert(info.compRetNativeType != TYP_VOID);
assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals());
impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
(unsigned)CHECK_SPILL_ALL);
}
#if defined(TARGET_ARM) || defined(UNIX_AMD64_ABI)
#if defined(TARGET_ARM)
// TODO-ARM64-NYI: HFA
// TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
// next ifdefs could be refactored in a single method with the ifdef inside.
if (IsHfa(retClsHnd))
{
// Same as !IsHfa but just don't bother with impAssignStructPtr.
#else // defined(UNIX_AMD64_ABI)
ReturnTypeDesc retTypeDesc;
retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv);
unsigned retRegCount = retTypeDesc.GetReturnRegCount();
if (retRegCount != 0)
{
// If single eightbyte, the return type would have been normalized and there won't be a temp var.
// This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
// max allowed.)
assert(retRegCount == MAX_RET_REG_COUNT);
// Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
CLANG_FORMAT_COMMENT_ANCHOR;
#endif // defined(UNIX_AMD64_ABI)
if (fgNeedReturnSpillTemp())
{
if (!impInlineInfo->retExpr)
{
#if defined(TARGET_ARM)
impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
#else // defined(UNIX_AMD64_ABI)
// The inlinee compiler has figured out the type of the temp already. Use it here.
impInlineInfo->retExpr =
gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
#endif // defined(UNIX_AMD64_ABI)
}
}
else
{
impInlineInfo->retExpr = op2;
}
}
else
#elif defined(TARGET_ARM64)
ReturnTypeDesc retTypeDesc;
retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv);
unsigned retRegCount = retTypeDesc.GetReturnRegCount();
if (retRegCount != 0)
{
assert(!iciCall->HasRetBufArg());
assert(retRegCount >= 2);
if (fgNeedReturnSpillTemp())
{
if (!impInlineInfo->retExpr)
{
// The inlinee compiler has figured out the type of the temp already. Use it here.
impInlineInfo->retExpr =
gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
}
}
else
{
impInlineInfo->retExpr = op2;
}
}
else
#elif defined(TARGET_X86)
ReturnTypeDesc retTypeDesc;
retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv);
unsigned retRegCount = retTypeDesc.GetReturnRegCount();
if (retRegCount != 0)
{
assert(!iciCall->HasRetBufArg());
assert(retRegCount == MAX_RET_REG_COUNT);
if (fgNeedReturnSpillTemp())
{
if (!impInlineInfo->retExpr)
{
// The inlinee compiler has figured out the type of the temp already. Use it here.
impInlineInfo->retExpr =
gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
}
}
else
{
impInlineInfo->retExpr = op2;
}
}
else
#endif // defined(TARGET_ARM64)
{
assert(iciCall->HasRetBufArg());
GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->GetNode());
// spill temp only exists if there are multiple return points
if (fgNeedReturnSpillTemp())
{
// if this is the first return we have seen set the retExpr
if (!impInlineInfo->retExpr)
{
impInlineInfo->retExpr =
impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
retClsHnd, (unsigned)CHECK_SPILL_ALL);
}
}
else
{
impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
}
}
}
if (impInlineInfo->retExpr != nullptr)
{
impInlineInfo->retBB = compCurBB;
}
}
}
if (compIsForInlining())
{
return true;
}
if (info.compRetType == TYP_VOID)
{
// return void
op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
}
else if (info.compRetBuffArg != BAD_VAR_NUM)
{
// Assign value to return buff (first param)
GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF DEBUGARG(impCurStmtOffs));
op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
// There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_AMD64)
// x64 (System V and Win64) calling convention requires to
// return the implicit return buffer explicitly (in RAX).
// Change the return type to be BYREF.
op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
#else // !defined(TARGET_AMD64)
// In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
// In such case the return value of the function is changed to BYREF.
// If profiler hook is not needed the return type of the function is TYP_VOID.
if (compIsProfilerHookNeeded())
{
op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
}
#if defined(TARGET_WINDOWS) && defined(TARGET_ARM64)
// On ARM64, the native instance calling convention variant
// requires the implicit ByRef to be explicitly returned.
else if (callConvIsInstanceMethodCallConv(info.compCallConv))
{
op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
}
#endif
else
{
// return void
op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
}
#endif // !defined(TARGET_AMD64)
}
else if (varTypeIsStruct(info.compRetType))
{
#if !FEATURE_MULTIREG_RET
// For both ARM architectures the HFA native types are maintained as structs.
// Also on System V AMD64 the multireg structs returns are also left as structs.
noway_assert(info.compRetNativeType != TYP_STRUCT);
#endif
op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv);
// return op2
var_types returnType;
if (compDoOldStructRetyping())
{
returnType = info.compRetNativeType;
}
else
{
returnType = info.compRetType;
}
op1 = gtNewOperNode(GT_RETURN, genActualType(returnType), op2);
}
else
{
// return op2
op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
}
// We must have imported a tailcall and jumped to RET
if (isTailCall)
{
assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
// impImportCall() would have already appended TYP_VOID calls
if (info.compRetType == TYP_VOID)
{
return true;
}
}
impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
#ifdef DEBUG
// Remember at which BC offset the tree was finished
impNoteLastILoffs();
#endif
return true;
}
/*****************************************************************************
* Mark the block as unimported.
* Note that the caller is responsible for calling impImportBlockPending(),
* with the appropriate stack-state
*/
inline void Compiler::impReimportMarkBlock(BasicBlock* block)
{
#ifdef DEBUG
if (verbose && (block->bbFlags & BBF_IMPORTED))
{
printf("\n" FMT_BB " will be reimported\n", block->bbNum);
}
#endif
block->bbFlags &= ~BBF_IMPORTED;
}
/*****************************************************************************
* Mark the successors of the given block as unimported.
* Note that the caller is responsible for calling impImportBlockPending()
* for all the successors, with the appropriate stack-state.
*/
void Compiler::impReimportMarkSuccessors(BasicBlock* block)
{
const unsigned numSuccs = block->NumSucc();
for (unsigned i = 0; i < numSuccs; i++)
{
impReimportMarkBlock(block->GetSucc(i));
}
}
/*****************************************************************************
*
* Filter wrapper to handle only passed in exception code
* from it).
*/
LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
{
if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
{
return EXCEPTION_EXECUTE_HANDLER;
}
return EXCEPTION_CONTINUE_SEARCH;
}
void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
{
assert(block->hasTryIndex());
assert(!compIsForInlining());
unsigned tryIndex = block->getTryIndex();
EHblkDsc* HBtab = ehGetDsc(tryIndex);
if (isTryStart)
{
assert(block->bbFlags & BBF_TRY_BEG);
// The Stack must be empty
//
if (block->bbStkDepth != 0)
{
BADCODE("Evaluation stack must be empty on entry into a try block");
}
}
// Save the stack contents, we'll need to restore it later
//
SavedStack blockState;
impSaveStackState(&blockState, false);
while (HBtab != nullptr)
{
if (isTryStart)
{
// Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
// We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
//
if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
{
// We trigger an invalid program exception here unless we have a try/fault region.
//
if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
{
BADCODE(
"The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
}
else
{
// Allow a try/fault region to proceed.
assert(HBtab->HasFaultHandler());
}
}
}
// Recursively process the handler block, if we haven't already done so.
BasicBlock* hndBegBB = HBtab->ebdHndBeg;
if (((hndBegBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(hndBegBB) == 0))
{
// Construct the proper verification stack state
// either empty or one that contains just
// the Exception Object that we are dealing with
//
verCurrentState.esStackDepth = 0;
if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
{
CORINFO_CLASS_HANDLE clsHnd;
if (HBtab->HasFilter())
{
clsHnd = impGetObjectClass();
}
else
{
CORINFO_RESOLVED_TOKEN resolvedToken;
resolvedToken.tokenContext = impTokenLookupContextHandle;
resolvedToken.tokenScope = info.compScopeHnd;
resolvedToken.token = HBtab->ebdTyp;
resolvedToken.tokenType = CORINFO_TOKENKIND_Class;
info.compCompHnd->resolveToken(&resolvedToken);
clsHnd = resolvedToken.hClass;
}
// push catch arg the stack, spill to a temp if necessary
// Note: can update HBtab->ebdHndBeg!
hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false);
}
// Queue up the handler for importing
//
impImportBlockPending(hndBegBB);
}
// Process the filter block, if we haven't already done so.
if (HBtab->HasFilter())
{
/* @VERIFICATION : Ideally the end of filter state should get
propagated to the catch handler, this is an incompleteness,
but is not a security/compliance issue, since the only
interesting state is the 'thisInit' state.
*/
BasicBlock* filterBB = HBtab->ebdFilter;
if (((filterBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(filterBB) == 0))
{
verCurrentState.esStackDepth = 0;
// push catch arg the stack, spill to a temp if necessary
// Note: can update HBtab->ebdFilter!
const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB);
filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter);
impImportBlockPending(filterBB);
}
}
// This seems redundant ....??
if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
{
/* Recursively process the handler block */
verCurrentState.esStackDepth = 0;
// Queue up the fault handler for importing
//
impImportBlockPending(HBtab->ebdHndBeg);
}
// Now process our enclosing try index (if any)
//
tryIndex = HBtab->ebdEnclosingTryIndex;
if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
{
HBtab = nullptr;
}
else
{
HBtab = ehGetDsc(tryIndex);
}
}
// Restore the stack contents
impRestoreStackState(&blockState);
}
//***************************************************************
// Import the instructions for the given basic block. Perform
// verification, throwing an exception on failure. Push any successor blocks that are enabled for the first
// time, or whose verification pre-state is changed.
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
void Compiler::impImportBlock(BasicBlock* block)
{
// BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
// handle them specially. In particular, there is no IL to import for them, but we do need
// to mark them as imported and put their successors on the pending import list.
if (block->bbFlags & BBF_INTERNAL)
{
JITDUMP("Marking BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", block->bbNum);
block->bbFlags |= BBF_IMPORTED;
const unsigned numSuccs = block->NumSucc();
for (unsigned i = 0; i < numSuccs; i++)
{
impImportBlockPending(block->GetSucc(i));
}
return;
}
bool markImport;
assert(block);
/* Make the block globaly available */
compCurBB = block;
#ifdef DEBUG
/* Initialize the debug variables */
impCurOpcName = "unknown";
impCurOpcOffs = block->bbCodeOffs;
#endif
/* Set the current stack state to the merged result */
verResetCurrentState(block, &verCurrentState);
/* Now walk the code and import the IL into GenTrees */
struct FilterVerificationExceptionsParam
{
Compiler* pThis;
BasicBlock* block;
};
FilterVerificationExceptionsParam param;
param.pThis = this;
param.block = block;
PAL_TRY(FilterVerificationExceptionsParam*, pParam, ¶m)
{
/* @VERIFICATION : For now, the only state propagation from try
to it's handler is "thisInit" state (stack is empty at start of try).
In general, for state that we track in verification, we need to
model the possibility that an exception might happen at any IL
instruction, so we really need to merge all states that obtain
between IL instructions in a try block into the start states of
all handlers.
However we do not allow the 'this' pointer to be uninitialized when
entering most kinds try regions (only try/fault are allowed to have
an uninitialized this pointer on entry to the try)
Fortunately, the stack is thrown away when an exception
leads to a handler, so we don't have to worry about that.
We DO, however, have to worry about the "thisInit" state.
But only for the try/fault case.
The only allowed transition is from TIS_Uninit to TIS_Init.
So for a try/fault region for the fault handler block
we will merge the start state of the try begin
and the post-state of each block that is part of this try region
*/
// merge the start state of the try begin
//
if (pParam->block->bbFlags & BBF_TRY_BEG)
{
pParam->pThis->impVerifyEHBlock(pParam->block, true);
}
pParam->pThis->impImportBlockCode(pParam->block);
// As discussed above:
// merge the post-state of each block that is part of this try region
//
if (pParam->block->hasTryIndex())
{
pParam->pThis->impVerifyEHBlock(pParam->block, false);
}
}
PAL_EXCEPT_FILTER(FilterVerificationExceptions)
{
verHandleVerificationFailure(block DEBUGARG(false));
}
PAL_ENDTRY
if (compDonotInline())
{
return;
}
assert(!compDonotInline());
markImport = false;
SPILLSTACK:
unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks
bool reimportSpillClique = false;
BasicBlock* tgtBlock = nullptr;
/* If the stack is non-empty, we might have to spill its contents */
if (verCurrentState.esStackDepth != 0)
{
impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
// on the stack, its lifetime is hard to determine, simply
// don't reuse such temps.
Statement* addStmt = nullptr;
/* Do the successors of 'block' have any other predecessors ?
We do not want to do some of the optimizations related to multiRef
if we can reimport blocks */
unsigned multRef = impCanReimport ? unsigned(~0) : 0;
switch (block->bbJumpKind)
{
case BBJ_COND:
addStmt = impExtractLastStmt();
assert(addStmt->GetRootNode()->gtOper == GT_JTRUE);
/* Note if the next block has more than one ancestor */
multRef |= block->bbNext->bbRefs;
/* Does the next block have temps assigned? */
baseTmp = block->bbNext->bbStkTempsIn;
tgtBlock = block->bbNext;
if (baseTmp != NO_BASE_TMP)
{
break;
}
/* Try the target of the jump then */
multRef |= block->bbJumpDest->bbRefs;
baseTmp = block->bbJumpDest->bbStkTempsIn;
tgtBlock = block->bbJumpDest;
break;
case BBJ_ALWAYS:
multRef |= block->bbJumpDest->bbRefs;
baseTmp = block->bbJumpDest->bbStkTempsIn;
tgtBlock = block->bbJumpDest;
break;
case BBJ_NONE:
multRef |= block->bbNext->bbRefs;
baseTmp = block->bbNext->bbStkTempsIn;
tgtBlock = block->bbNext;
break;
case BBJ_SWITCH:
BasicBlock** jmpTab;
unsigned jmpCnt;
addStmt = impExtractLastStmt();
assert(addStmt->GetRootNode()->gtOper == GT_SWITCH);
jmpCnt = block->bbJumpSwt->bbsCount;
jmpTab = block->bbJumpSwt->bbsDstTab;
do
{
tgtBlock = (*jmpTab);
multRef |= tgtBlock->bbRefs;
// Thanks to spill cliques, we should have assigned all or none
assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
baseTmp = tgtBlock->bbStkTempsIn;
if (multRef > 1)
{
break;
}
} while (++jmpTab, --jmpCnt);
break;
case BBJ_CALLFINALLY:
case BBJ_EHCATCHRET:
case BBJ_RETURN:
case BBJ_EHFINALLYRET:
case BBJ_EHFILTERRET:
case BBJ_THROW:
NO_WAY("can't have 'unreached' end of BB with non-empty stack");
break;
default:
noway_assert(!"Unexpected bbJumpKind");
break;
}
assert(multRef >= 1);
/* Do we have a base temp number? */
bool newTemps = (baseTmp == NO_BASE_TMP);
if (newTemps)
{
/* Grab enough temps for the whole stack */
baseTmp = impGetSpillTmpBase(block);
}
/* Spill all stack entries into temps */
unsigned level, tempNum;
JITDUMP("\nSpilling stack entries into temps\n");
for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
{
GenTree* tree = verCurrentState.esStack[level].val;
/* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
the other. This should merge to a byref in unverifiable code.
However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
successor would be imported assuming there was a TYP_I_IMPL on
the stack. Thus the value would not get GC-tracked. Hence,
change the temp to TYP_BYREF and reimport the successors.
Note: We should only allow this in unverifiable code.
*/
if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL)
{
lvaTable[tempNum].lvType = TYP_BYREF;
impReimportMarkSuccessors(block);
markImport = true;
}
#ifdef TARGET_64BIT
if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
{
if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
(tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
{
// Merge the current state into the entry state of block;
// the call to verMergeEntryStates must have changed
// the entry state of the block by merging the int local var
// and the native-int stack entry.
bool changed = false;
if (verMergeEntryStates(tgtBlock, &changed))
{
impRetypeEntryStateTemps(tgtBlock);
impReimportBlockPending(tgtBlock);
assert(changed);
}
else
{
tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
break;
}
}
// Some other block in the spill clique set this to "int", but now we have "native int".
// Change the type and go back to re-import any blocks that used the wrong type.
lvaTable[tempNum].lvType = TYP_I_IMPL;
reimportSpillClique = true;
}
else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
{
// Spill clique has decided this should be "native int", but this block only pushes an "int".
// Insert a sign-extension to "native int" so we match the clique.
verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
}
// Consider the case where one branch left a 'byref' on the stack and the other leaves
// an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
// size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
// behavior instead of asserting and then generating bad code (where we save/restore the
// low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
// imported already, we need to change the type of the local and reimport the spill clique.
// If the 'byref' side has imported, we insert a cast from int to 'native int' to match
// the 'byref' size.
if (!tiVerificationNeeded)
{
if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
{
// Some other block in the spill clique set this to "int", but now we have "byref".
// Change the type and go back to re-import any blocks that used the wrong type.
lvaTable[tempNum].lvType = TYP_BYREF;
reimportSpillClique = true;
}
else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
{
// Spill clique has decided this should be "byref", but this block only pushes an "int".
// Insert a sign-extension to "native int" so we match the clique size.
verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
}
}
#endif // TARGET_64BIT
if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
{
// Some other block in the spill clique set this to "float", but now we have "double".
// Change the type and go back to re-import any blocks that used the wrong type.
lvaTable[tempNum].lvType = TYP_DOUBLE;
reimportSpillClique = true;
}
else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
{
// Spill clique has decided this should be "double", but this block only pushes a "float".
// Insert a cast to "double" so we match the clique.
verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE);
}
/* If addStmt has a reference to tempNum (can only happen if we
are spilling to the temps already used by a previous block),
we need to spill addStmt */
if (addStmt != nullptr && !newTemps && gtHasRef(addStmt->GetRootNode(), tempNum, false))
{
GenTree* addTree = addStmt->GetRootNode();
if (addTree->gtOper == GT_JTRUE)
{
GenTree* relOp = addTree->AsOp()->gtOp1;
assert(relOp->OperIsCompare());
var_types type = genActualType(relOp->AsOp()->gtOp1->TypeGet());
if (gtHasRef(relOp->AsOp()->gtOp1, tempNum, false))
{
unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
impAssignTempGen(temp, relOp->AsOp()->gtOp1, level);
type = genActualType(lvaTable[temp].TypeGet());
relOp->AsOp()->gtOp1 = gtNewLclvNode(temp, type);
}
if (gtHasRef(relOp->AsOp()->gtOp2, tempNum, false))
{
unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
impAssignTempGen(temp, relOp->AsOp()->gtOp2, level);
type = genActualType(lvaTable[temp].TypeGet());
relOp->AsOp()->gtOp2 = gtNewLclvNode(temp, type);
}
}
else
{
assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->AsOp()->gtOp1->TypeGet()));
unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
impAssignTempGen(temp, addTree->AsOp()->gtOp1, level);
addTree->AsOp()->gtOp1 = gtNewLclvNode(temp, genActualType(addTree->AsOp()->gtOp1->TypeGet()));
}
}
/* Spill the stack entry, and replace with the temp */
if (!impSpillStackEntry(level, tempNum
#ifdef DEBUG
,
true, "Spill Stack Entry"
#endif
))
{
if (markImport)
{
BADCODE("bad stack state");
}
// Oops. Something went wrong when spilling. Bad code.
verHandleVerificationFailure(block DEBUGARG(true));
goto SPILLSTACK;
}
}
/* Put back the 'jtrue'/'switch' if we removed it earlier */
if (addStmt != nullptr)
{
impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
}
}
// Some of the append/spill logic works on compCurBB
assert(compCurBB == block);
/* Save the tree list in the block */
impEndTreeList(block);
// impEndTreeList sets BBF_IMPORTED on the block
// We do *NOT* want to set it later than this because
// impReimportSpillClique might clear it if this block is both a
// predecessor and successor in the current spill clique
assert(block->bbFlags & BBF_IMPORTED);
// If we had a int/native int, or float/double collision, we need to re-import
if (reimportSpillClique)
{
// This will re-import all the successors of block (as well as each of their predecessors)
impReimportSpillClique(block);
// For blocks that haven't been imported yet, we still need to mark them as pending import.
const unsigned numSuccs = block->NumSucc();
for (unsigned i = 0; i < numSuccs; i++)
{
BasicBlock* succ = block->GetSucc(i);
if ((succ->bbFlags & BBF_IMPORTED) == 0)
{
impImportBlockPending(succ);
}
}
}
else // the normal case
{
// otherwise just import the successors of block
/* Does this block jump to any other blocks? */
const unsigned numSuccs = block->NumSucc();
for (unsigned i = 0; i < numSuccs; i++)
{
impImportBlockPending(block->GetSucc(i));
}
}
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
/*****************************************************************************/
//
// Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
// necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
// impPendingBlockMembers). Merges the current verification state into the verification state of "block"
// (its "pre-state").
void Compiler::impImportBlockPending(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("\nimpImportBlockPending for " FMT_BB "\n", block->bbNum);
}
#endif
// We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
// or if it has, but merging in a predecessor's post-state changes the block's pre-state.
// (When we're doing verification, we always attempt the merge to detect verification errors.)
// If the block has not been imported, add to pending set.
bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
// Initialize bbEntryState just the first time we try to add this block to the pending list
// Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
// We use NULL to indicate the 'common' state to avoid memory allocation
if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
(impGetPendingBlockMember(block) == 0))
{
verInitBBEntryState(block, &verCurrentState);
assert(block->bbStkDepth == 0);
block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
assert(addToPending);
assert(impGetPendingBlockMember(block) == 0);
}
else
{
// The stack should have the same height on entry to the block from all its predecessors.
if (block->bbStkDepth != verCurrentState.esStackDepth)
{
#ifdef DEBUG
char buffer[400];
sprintf_s(buffer, sizeof(buffer),
"Block at offset %4.4x to %4.4x in %0.200s entered with different stack depths.\n"
"Previous depth was %d, current depth is %d",
block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
verCurrentState.esStackDepth);
buffer[400 - 1] = 0;
NO_WAY(buffer);
#else
NO_WAY("Block entered with different stack depths");
#endif
}
// Additionally, if we need to verify, merge the verification state.
if (tiVerificationNeeded)
{
// Merge the current state into the entry state of block; if this does not change the entry state
// by merging, do not add the block to the pending-list.
bool changed = false;
if (!verMergeEntryStates(block, &changed))
{
block->bbFlags |= BBF_FAILED_VERIFICATION;
addToPending = true; // We will pop it off, and check the flag set above.
}
else if (changed)
{
addToPending = true;
JITDUMP("Adding " FMT_BB " to pending set due to new merge result\n", block->bbNum);
}
}
if (!addToPending)
{
return;
}
if (block->bbStkDepth > 0)
{
// We need to fix the types of any spill temps that might have changed:
// int->native int, float->double, int->byref, etc.
impRetypeEntryStateTemps(block);
}
// OK, we must add to the pending list, if it's not already in it.
if (impGetPendingBlockMember(block) != 0)
{
return;
}
}
// Get an entry to add to the pending list
PendingDsc* dsc;
if (impPendingFree)
{
// We can reuse one of the freed up dscs.
dsc = impPendingFree;
impPendingFree = dsc->pdNext;
}
else
{
// We have to create a new dsc
dsc = new (this, CMK_Unknown) PendingDsc;
}
dsc->pdBB = block;
dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
dsc->pdThisPtrInit = verCurrentState.thisInitialized;
// Save the stack trees for later
if (verCurrentState.esStackDepth)
{
impSaveStackState(&dsc->pdSavedStack, false);
}
// Add the entry to the pending list
dsc->pdNext = impPendingList;
impPendingList = dsc;
impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
// Various assertions require us to now to consider the block as not imported (at least for
// the final time...)
block->bbFlags &= ~BBF_IMPORTED;
#ifdef DEBUG
if (verbose && 0)
{
printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum);
}
#endif
}
/*****************************************************************************/
//
// Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
// necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
// impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block.
void Compiler::impReimportBlockPending(BasicBlock* block)
{
JITDUMP("\nimpReimportBlockPending for " FMT_BB, block->bbNum);
assert(block->bbFlags & BBF_IMPORTED);
// OK, we must add to the pending list, if it's not already in it.
if (impGetPendingBlockMember(block) != 0)
{
return;
}
// Get an entry to add to the pending list
PendingDsc* dsc;
if (impPendingFree)
{
// We can reuse one of the freed up dscs.
dsc = impPendingFree;
impPendingFree = dsc->pdNext;
}
else
{
// We have to create a new dsc
dsc = new (this, CMK_ImpStack) PendingDsc;
}
dsc->pdBB = block;
if (block->bbEntryState)
{
dsc->pdThisPtrInit = block->bbEntryState->thisInitialized;
dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
}
else
{
dsc->pdThisPtrInit = TIS_Bottom;
dsc->pdSavedStack.ssDepth = 0;
dsc->pdSavedStack.ssTrees = nullptr;
}
// Add the entry to the pending list
dsc->pdNext = impPendingList;
impPendingList = dsc;
impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
// Various assertions require us to now to consider the block as not imported (at least for
// the final time...)
block->bbFlags &= ~BBF_IMPORTED;
#ifdef DEBUG
if (verbose && 0)
{
printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum);
}
#endif
}
void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
{
if (comp->impBlockListNodeFreeList == nullptr)
{
return comp->getAllocator(CMK_BasicBlock).allocate<BlockListNode>(1);
}
else
{
BlockListNode* res = comp->impBlockListNodeFreeList;
comp->impBlockListNodeFreeList = res->m_next;
return res;
}
}
void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
{
node->m_next = impBlockListNodeFreeList;
impBlockListNodeFreeList = node;
}
void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
{
bool toDo = true;
noway_assert(!fgComputePredsDone);
if (!fgCheapPredsValid)
{
fgComputeCheapPreds();
}
BlockListNode* succCliqueToDo = nullptr;
BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
while (toDo)
{
toDo = false;
// Look at the successors of every member of the predecessor to-do list.
while (predCliqueToDo != nullptr)
{
BlockListNode* node = predCliqueToDo;
predCliqueToDo = node->m_next;
BasicBlock* blk = node->m_blk;
FreeBlockListNode(node);
const unsigned numSuccs = blk->NumSucc();
for (unsigned succNum = 0; succNum < numSuccs; succNum++)
{
BasicBlock* succ = blk->GetSucc(succNum);
// If it's not already in the clique, add it, and also add it
// as a member of the successor "toDo" set.
if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
{
callback->Visit(SpillCliqueSucc, succ);
impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
toDo = true;
}
}
}
// Look at the predecessors of every member of the successor to-do list.
while (succCliqueToDo != nullptr)
{
BlockListNode* node = succCliqueToDo;
succCliqueToDo = node->m_next;
BasicBlock* blk = node->m_blk;
FreeBlockListNode(node);
for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
{
BasicBlock* predBlock = pred->block;
// If it's not already in the clique, add it, and also add it
// as a member of the predecessor "toDo" set.
if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
{
callback->Visit(SpillCliquePred, predBlock);
impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
toDo = true;
}
}
}
}
// If this fails, it means we didn't walk the spill clique properly and somehow managed
// miss walking back to include the predecessor we started from.
// This most likely cause: missing or out of date bbPreds
assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
}
void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
{
if (predOrSucc == SpillCliqueSucc)
{
assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
blk->bbStkTempsIn = m_baseTmp;
}
else
{
assert(predOrSucc == SpillCliquePred);
assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
blk->bbStkTempsOut = m_baseTmp;
}
}
void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
{
// For Preds we could be a little smarter and just find the existing store
// and re-type it/add a cast, but that is complicated and hopefully very rare, so
// just re-import the whole block (just like we do for successors)
if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
{
// If we haven't imported this block and we're not going to (because it isn't on
// the pending list) then just ignore it for now.
// This block has either never been imported (EntryState == NULL) or it failed
// verification. Neither state requires us to force it to be imported now.
assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
return;
}
// For successors we have a valid verCurrentState, so just mark them for reimport
// the 'normal' way
// Unlike predecessors, we *DO* need to reimport the current block because the
// initial import had the wrong entry state types.
// Similarly, blocks that are currently on the pending list, still need to call
// impImportBlockPending to fixup their entry state.
if (predOrSucc == SpillCliqueSucc)
{
m_pComp->impReimportMarkBlock(blk);
// Set the current stack state to that of the blk->bbEntryState
m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
m_pComp->impImportBlockPending(blk);
}
else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
{
// As described above, we are only visiting predecessors so they can
// add the appropriate casts, since we have already done that for the current
// block, it does not need to be reimported.
// Nor do we need to reimport blocks that are still pending, but not yet
// imported.
//
// For predecessors, we have no state to seed the EntryState, so we just have
// to assume the existing one is correct.
// If the block is also a successor, it will get the EntryState properly
// updated when it is visited as a successor in the above "if" block.
assert(predOrSucc == SpillCliquePred);
m_pComp->impReimportBlockPending(blk);
}
}
// Re-type the incoming lclVar nodes to match the varDsc.
void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
{
if (blk->bbEntryState != nullptr)
{
EntryState* es = blk->bbEntryState;
for (unsigned level = 0; level < es->esStackDepth; level++)
{
GenTree* tree = es->esStack[level].val;
if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
{
unsigned lclNum = tree->AsLclVarCommon()->GetLclNum();
noway_assert(lclNum < lvaCount);
LclVarDsc* varDsc = lvaTable + lclNum;
es->esStack[level].val->gtType = varDsc->TypeGet();
}
}
}
}
unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
{
if (block->bbStkTempsOut != NO_BASE_TMP)
{
return block->bbStkTempsOut;
}
#ifdef DEBUG
if (verbose)
{
printf("\n*************** In impGetSpillTmpBase(" FMT_BB ")\n", block->bbNum);
}
#endif // DEBUG
// Otherwise, choose one, and propagate to all members of the spill clique.
// Grab enough temps for the whole stack.
unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
SetSpillTempsBase callback(baseTmp);
// We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
// to one spill clique, and similarly can only be the sucessor to one spill clique
impWalkSpillCliqueFromPred(block, &callback);
return baseTmp;
}
void Compiler::impReimportSpillClique(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("\n*************** In impReimportSpillClique(" FMT_BB ")\n", block->bbNum);
}
#endif // DEBUG
// If we get here, it is because this block is already part of a spill clique
// and one predecessor had an outgoing live stack slot of type int, and this
// block has an outgoing live stack slot of type native int.
// We need to reset these before traversal because they have already been set
// by the previous walk to determine all the members of the spill clique.
impInlineRoot()->impSpillCliquePredMembers.Reset();
impInlineRoot()->impSpillCliqueSuccMembers.Reset();
ReimportSpillClique callback(this);
impWalkSpillCliqueFromPred(block, &callback);
}
// Set the pre-state of "block" (which should not have a pre-state allocated) to
// a copy of "srcState", cloning tree pointers as required.
void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
{
if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
{
block->bbEntryState = nullptr;
return;
}
block->bbEntryState = getAllocator(CMK_Unknown).allocate<EntryState>(1);
// block->bbEntryState.esRefcount = 1;
block->bbEntryState->esStackDepth = srcState->esStackDepth;
block->bbEntryState->thisInitialized = TIS_Bottom;
if (srcState->esStackDepth > 0)
{
block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
for (unsigned level = 0; level < srcState->esStackDepth; level++)
{
GenTree* tree = srcState->esStack[level].val;
block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
}
}
if (verTrackObjCtorInitState)
{
verSetThisInit(block, srcState->thisInitialized);
}
return;
}
void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
{
assert(tis != TIS_Bottom); // Precondition.
if (block->bbEntryState == nullptr)
{
block->bbEntryState = new (this, CMK_Unknown) EntryState();
}
block->bbEntryState->thisInitialized = tis;
}
/*
* Resets the current state to the state at the start of the basic block
*/
void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
{
if (block->bbEntryState == nullptr)
{
destState->esStackDepth = 0;
destState->thisInitialized = TIS_Bottom;
return;
}
destState->esStackDepth = block->bbEntryState->esStackDepth;
if (destState->esStackDepth > 0)
{
unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
}
destState->thisInitialized = block->bbThisOnEntry();
return;
}
ThisInitState BasicBlock::bbThisOnEntry()
{
return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
}
unsigned BasicBlock::bbStackDepthOnEntry()
{
return (bbEntryState ? bbEntryState->esStackDepth : 0);
}
void BasicBlock::bbSetStack(void* stackBuffer)
{
assert(bbEntryState);
assert(stackBuffer);
bbEntryState->esStack = (StackEntry*)stackBuffer;
}
StackEntry* BasicBlock::bbStackOnEntry()
{
assert(bbEntryState);
return bbEntryState->esStack;
}
void Compiler::verInitCurrentState()
{
verTrackObjCtorInitState = FALSE;
verCurrentState.thisInitialized = TIS_Bottom;
if (tiVerificationNeeded)
{
// Track this ptr initialization
if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
{
verTrackObjCtorInitState = TRUE;
verCurrentState.thisInitialized = TIS_Uninit;
}
}
// initialize stack info
verCurrentState.esStackDepth = 0;
assert(verCurrentState.esStack != nullptr);
// copy current state to entry state of first BB
verInitBBEntryState(fgFirstBB, &verCurrentState);
}
Compiler* Compiler::impInlineRoot()
{
if (impInlineInfo == nullptr)
{
return this;
}
else
{
return impInlineInfo->InlineRoot;
}
}
BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
{
if (predOrSucc == SpillCliquePred)
{
return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
}
else
{
assert(predOrSucc == SpillCliqueSucc);
return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
}
}
void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
{
if (predOrSucc == SpillCliquePred)
{
impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
}
else
{
assert(predOrSucc == SpillCliqueSucc);
impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
}
}
/*****************************************************************************
*
* Convert the instrs ("import") into our internal format (trees). The
* basic flowgraph has already been constructed and is passed in.
*/
void Compiler::impImport()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In impImport() for %s\n", info.compFullName);
}
#endif
Compiler* inlineRoot = impInlineRoot();
if (info.compMaxStack <= SMALL_STACK_SIZE)
{
impStkSize = SMALL_STACK_SIZE;
}
else
{
impStkSize = info.compMaxStack;
}
if (this == inlineRoot)
{
// Allocate the stack contents
verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
}
else
{
// This is the inlinee compiler, steal the stack from the inliner compiler
// (after ensuring that it is large enough).
if (inlineRoot->impStkSize < impStkSize)
{
inlineRoot->impStkSize = impStkSize;
inlineRoot->verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
}
verCurrentState.esStack = inlineRoot->verCurrentState.esStack;
}
// initialize the entry state at start of method
verInitCurrentState();
// Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
if (this == inlineRoot) // These are only used on the root of the inlining tree.
{
// We have initialized these previously, but to size 0. Make them larger.
impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
}
inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
impBlockListNodeFreeList = nullptr;
#ifdef DEBUG
impLastILoffsStmt = nullptr;
impNestedStackSpill = false;
#endif
impBoxTemp = BAD_VAR_NUM;
impPendingList = impPendingFree = nullptr;
// Skip leading internal blocks.
// These can arise from needing a leading scratch BB, from EH normalization, and from OSR entry redirects.
//
// We expect a linear flow to the first non-internal block. But not necessarily straght-line flow.
BasicBlock* entryBlock = fgFirstBB;
while (entryBlock->bbFlags & BBF_INTERNAL)
{
JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum);
entryBlock->bbFlags |= BBF_IMPORTED;
if (entryBlock->bbJumpKind == BBJ_NONE)
{
entryBlock = entryBlock->bbNext;
}
else if (entryBlock->bbJumpKind == BBJ_ALWAYS)
{
// Only expected for OSR
assert(opts.IsOSR());
entryBlock = entryBlock->bbJumpDest;
}
else
{
assert(!"unexpected bbJumpKind in entry sequence");
}
}
// Note for OSR we'd like to be able to verify this block must be
// stack empty, but won't know that until we've imported...so instead
// we'll BADCODE out if we mess up.
//
// (the concern here is that the runtime asks us to OSR a
// different IL version than the one that matched the method that
// triggered OSR). This should not happen but I might have the
// IL versioning stuff wrong.
//
// TODO: we also currently expect this block to be a join point,
// which we should verify over when we find jump targets.
impImportBlockPending(entryBlock);
/* Import blocks in the worker-list until there are no more */
while (impPendingList)
{
/* Remove the entry at the front of the list */
PendingDsc* dsc = impPendingList;
impPendingList = impPendingList->pdNext;
impSetPendingBlockMember(dsc->pdBB, 0);
/* Restore the stack state */
verCurrentState.thisInitialized = dsc->pdThisPtrInit;
verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth;
if (verCurrentState.esStackDepth)
{
impRestoreStackState(&dsc->pdSavedStack);
}
/* Add the entry to the free list for reuse */
dsc->pdNext = impPendingFree;
impPendingFree = dsc;
/* Now import the block */
if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
{
#ifdef TARGET_64BIT
// On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
// coupled with the JIT64 IL Verification logic. Look inside verHandleVerificationFailure
// method for further explanation on why we raise this exception instead of making the jitted
// code throw the verification exception during execution.
if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
{
BADCODE("Basic block marked as not verifiable");
}
else
#endif // TARGET_64BIT
{
verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
impEndTreeList(dsc->pdBB);
}
}
else
{
impImportBlock(dsc->pdBB);
if (compDonotInline())
{
return;
}
if (compIsForImportOnly() && !tiVerificationNeeded)
{
return;
}
}
}
#ifdef DEBUG
if (verbose && info.compXcptnsCount)
{
printf("\nAfter impImport() added block for try,catch,finally");
fgDispBasicBlocks();
printf("\n");
}
// Used in impImportBlockPending() for STRESS_CHK_REIMPORT
for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
block->bbFlags &= ~BBF_VISITED;
}
#endif
assert(!compIsForInlining() || !tiVerificationNeeded);
}
// Checks if a typeinfo (usually stored in the type stack) is a struct.
// The invariant here is that if it's not a ref or a method and has a class handle
// it's a valuetype
bool Compiler::impIsValueType(typeInfo* pTypeInfo)
{
if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
{
return true;
}
else
{
return false;
}
}
/*****************************************************************************
* Check to see if the tree is the address of a local or
the address of a field in a local.
*lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
*/
BOOL Compiler::impIsAddressInLocal(GenTree* tree, GenTree** lclVarTreeOut)
{
if (tree->gtOper != GT_ADDR)
{
return FALSE;
}
GenTree* op = tree->AsOp()->gtOp1;
while (op->gtOper == GT_FIELD)
{
op = op->AsField()->gtFldObj;
if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
{
op = op->AsOp()->gtOp1;
}
else
{
return false;
}
}
if (op->gtOper == GT_LCL_VAR)
{
*lclVarTreeOut = op;
return TRUE;
}
else
{
return FALSE;
}
}
//------------------------------------------------------------------------
// impMakeDiscretionaryInlineObservations: make observations that help
// determine the profitability of a discretionary inline
//
// Arguments:
// pInlineInfo -- InlineInfo for the inline, or null for the prejit root
// inlineResult -- InlineResult accumulating information about this inline
//
// Notes:
// If inlining or prejitting the root, this method also makes
// various observations about the method that factor into inline
// decisions. It sets `compNativeSizeEstimate` as a side effect.
void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
{
assert((pInlineInfo != nullptr && compIsForInlining()) || // Perform the actual inlining.
(pInlineInfo == nullptr && !compIsForInlining()) // Calculate the static inlining hint for ngen.
);
// If we're really inlining, we should just have one result in play.
assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
// If this is a "forceinline" method, the JIT probably shouldn't have gone
// to the trouble of estimating the native code size. Even if it did, it
// shouldn't be relying on the result of this method.
assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
// Note if the caller contains NEWOBJ or NEWARR.
Compiler* rootCompiler = impInlineRoot();
if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
{
inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
}
if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
{
inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
}
bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0;
bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
if (isSpecialMethod)
{
if (calleeIsStatic)
{
inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
}
else
{
inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
}
}
else if (!calleeIsStatic)
{
// Callee is an instance method.
//
// Check if the callee has the same 'this' as the root.
if (pInlineInfo != nullptr)
{
GenTree* thisArg = pInlineInfo->iciCall->AsCall()->gtCallThisArg->GetNode();
assert(thisArg);
bool isSameThis = impIsThis(thisArg);
inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
}
}
// Note if the callee's class is a promotable struct
if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
{
assert(structPromotionHelper != nullptr);
if (structPromotionHelper->CanPromoteStructType(info.compClassHnd))
{
inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
}
}
#ifdef FEATURE_SIMD
// Note if this method is has SIMD args or return value
if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
{
inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
}
#endif // FEATURE_SIMD
// Roughly classify callsite frequency.
InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
// If this is a prejit root, or a maximally hot block...
if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
{
frequency = InlineCallsiteFrequency::HOT;
}
// No training data. Look for loop-like things.
// We consider a recursive call loop-like. Do not give the inlining boost to the method itself.
// However, give it to things nearby.
else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
(pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
{
frequency = InlineCallsiteFrequency::LOOP;
}
else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
{
frequency = InlineCallsiteFrequency::WARM;
}
// Now modify the multiplier based on where we're called from.
else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
{
frequency = InlineCallsiteFrequency::RARE;
}
else
{
frequency = InlineCallsiteFrequency::BORING;
}
// Also capture the block weight of the call site.
//
// In the prejit root case, assume at runtime there might be a hot call site
// for this method, so we won't prematurely conclude this method should never
// be inlined.
//
BasicBlock::weight_t weight = 0;
if (pInlineInfo != nullptr)
{
weight = pInlineInfo->iciBlock->bbWeight;
}
else
{
const float prejitHotCallerWeight = 1000000.0f;
weight = prejitHotCallerWeight;
}
inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, (int)(weight));
// If the call site has profile data, report the relative frequency of the site.
//
if ((pInlineInfo != nullptr) && pInlineInfo->iciBlock->hasProfileWeight())
{
double callSiteWeight = (double)pInlineInfo->iciBlock->bbWeight;
double entryWeight = (double)impInlineRoot()->fgFirstBB->bbWeight;
assert(callSiteWeight >= 0);
assert(entryWeight >= 0);
if (entryWeight != 0)
{
inlineResult->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, true);
double frequency = callSiteWeight / entryWeight;
inlineResult->NoteDouble(InlineObservation::CALLSITE_PROFILE_FREQUENCY, frequency);
}
}
}
/*****************************************************************************
This method makes STATIC inlining decision based on the IL code.
It should not make any inlining decision based on the context.
If forceInline is true, then the inlining decision should not depend on
performance heuristics (code size, etc.).
*/
void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
CORINFO_METHOD_INFO* methInfo,
bool forceInline,
InlineResult* inlineResult)
{
unsigned codeSize = methInfo->ILCodeSize;
// We shouldn't have made up our minds yet...
assert(!inlineResult->IsDecided());
if (methInfo->EHcount)
{
inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
return;
}
if ((methInfo->ILCode == nullptr) || (codeSize == 0))
{
inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
return;
}
// For now we don't inline varargs (import code can't handle it)
if (methInfo->args.isVarArg())
{
inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
return;
}
// Reject if it has too many locals.
// This is currently an implementation limit due to fixed-size arrays in the
// inline info, rather than a performance heuristic.
inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
if (methInfo->locals.numArgs > MAX_INL_LCLS)
{
inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
return;
}
// Make sure there aren't too many arguments.
// This is currently an implementation limit due to fixed-size arrays in the
// inline info, rather than a performance heuristic.
inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
if (methInfo->args.numArgs > MAX_INL_ARGS)
{
inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
return;
}
// Note force inline state
inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
// Note IL code size
inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
if (inlineResult->IsFailure())
{
return;
}
// Make sure maxstack is not too big
inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
if (inlineResult->IsFailure())
{
return;
}
}
/*****************************************************************************
*/
void Compiler::impCheckCanInline(GenTreeCall* call,
CORINFO_METHOD_HANDLE fncHandle,
unsigned methAttr,
CORINFO_CONTEXT_HANDLE exactContextHnd,
InlineCandidateInfo** ppInlineCandidateInfo,
InlineResult* inlineResult)
{
// Either EE or JIT might throw exceptions below.
// If that happens, just don't inline the method.
struct Param
{
Compiler* pThis;
GenTreeCall* call;
CORINFO_METHOD_HANDLE fncHandle;
unsigned methAttr;
CORINFO_CONTEXT_HANDLE exactContextHnd;
InlineResult* result;
InlineCandidateInfo** ppInlineCandidateInfo;
} param;
memset(¶m, 0, sizeof(param));
param.pThis = this;
param.call = call;
param.fncHandle = fncHandle;
param.methAttr = methAttr;
param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
param.result = inlineResult;
param.ppInlineCandidateInfo = ppInlineCandidateInfo;
bool success = eeRunWithErrorTrap<Param>(
[](Param* pParam) {
DWORD dwRestrictions = 0;
CorInfoInitClassResult initClassResult;
#ifdef DEBUG
const char* methodName;
const char* className;
methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
if (JitConfig.JitNoInline())
{
pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
goto _exit;
}
#endif
/* Try to get the code address/size for the method */
CORINFO_METHOD_INFO methInfo;
if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
{
pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
goto _exit;
}
bool forceInline;
forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
if (pParam->result->IsFailure())
{
assert(pParam->result->IsNever());
goto _exit;
}
// Speculatively check if initClass() can be done.
// If it can be done, we will try to inline the method.
initClassResult =
pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
pParam->exactContextHnd /* context */);
if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
{
pParam->result->NoteFatal(InlineObservation::CALLSITE_CANT_CLASS_INIT);
goto _exit;
}
// Given the EE the final say in whether to inline or not.
// This should be last since for verifiable code, this can be expensive
/* VM Inline check also ensures that the method is verifiable if needed */
CorInfoInline vmResult;
vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
&dwRestrictions);
if (vmResult == INLINE_FAIL)
{
pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
}
else if (vmResult == INLINE_NEVER)
{
pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
}
if (pParam->result->IsFailure())
{
// Make sure not to report this one. It was already reported by the VM.
pParam->result->SetReported();
goto _exit;
}
// check for unsupported inlining restrictions
assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
if (dwRestrictions & INLINE_SAME_THIS)
{
GenTree* thisArg = pParam->call->AsCall()->gtCallThisArg->GetNode();
assert(thisArg);
if (!pParam->pThis->impIsThis(thisArg))
{
pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
goto _exit;
}
}
/* Get the method properties */
CORINFO_CLASS_HANDLE clsHandle;
clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
unsigned clsAttr;
clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
/* Get the return type */
var_types fncRetType;
fncRetType = pParam->call->TypeGet();
#ifdef DEBUG
var_types fncRealRetType;
fncRealRetType = JITtype2varType(methInfo.args.retType);
assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
// <BUGNUM> VSW 288602 </BUGNUM>
// In case of IJW, we allow to assign a native pointer to a BYREF.
(fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
(varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
#endif
// Allocate an InlineCandidateInfo structure,
//
// Or, reuse the existing GuardedDevirtualizationCandidateInfo,
// which was pre-allocated to have extra room.
//
InlineCandidateInfo* pInfo;
if (pParam->call->IsGuardedDevirtualizationCandidate())
{
pInfo = pParam->call->gtInlineCandidateInfo;
}
else
{
pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
// Null out bits we don't use when we're just inlining
pInfo->guardedClassHandle = nullptr;
pInfo->guardedMethodHandle = nullptr;
pInfo->stubAddr = nullptr;
}
pInfo->methInfo = methInfo;
pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd;
pInfo->clsHandle = clsHandle;
pInfo->exactContextHnd = pParam->exactContextHnd;
pInfo->retExpr = nullptr;
pInfo->dwRestrictions = dwRestrictions;
pInfo->preexistingSpillTemp = BAD_VAR_NUM;
pInfo->clsAttr = clsAttr;
pInfo->methAttr = pParam->methAttr;
pInfo->initClassResult = initClassResult;
pInfo->fncRetType = fncRetType;
pInfo->exactContextNeedsRuntimeLookup = false;
// Note exactContextNeedsRuntimeLookup is reset later on,
// over in impMarkInlineCandidate.
*(pParam->ppInlineCandidateInfo) = pInfo;
_exit:;
},
¶m);
if (!success)
{
param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
}
}
//------------------------------------------------------------------------
// impInlineRecordArgInfo: record information about an inline candidate argument
//
// Arguments:
// pInlineInfo - inline info for the inline candidate
// curArgVal - tree for the caller actual argument value
// argNum - logical index of this argument
// inlineResult - result of ongoing inline evaluation
//
// Notes:
//
// Checks for various inline blocking conditions and makes notes in
// the inline info arg table about the properties of the actual. These
// properties are used later by impInlineFetchArg to determine how best to
// pass the argument into the inlinee.
void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo,
GenTree* curArgVal,
unsigned argNum,
InlineResult* inlineResult)
{
InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
inlCurArgInfo->argNode = curArgVal; // Save the original tree, with PUT_ARG and RET_EXPR.
curArgVal = curArgVal->gtSkipPutArgType();
curArgVal = curArgVal->gtRetExprVal();
if (curArgVal->gtOper == GT_MKREFANY)
{
inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
return;
}
GenTree* lclVarTree;
const bool isAddressInLocal = impIsAddressInLocal(curArgVal, &lclVarTree);
if (isAddressInLocal && varTypeIsStruct(lclVarTree))
{
inlCurArgInfo->argIsByRefToStructLocal = true;
#ifdef FEATURE_SIMD
if (lvaTable[lclVarTree->AsLclVarCommon()->GetLclNum()].lvSIMDType)
{
pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
}
#endif // FEATURE_SIMD
}
if (curArgVal->gtFlags & GTF_ALL_EFFECT)
{
inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
}
if (curArgVal->gtOper == GT_LCL_VAR)
{
inlCurArgInfo->argIsLclVar = true;
/* Remember the "original" argument number */
INDEBUG(curArgVal->AsLclVar()->gtLclILoffs = argNum;)
}
if ((curArgVal->OperKind() & GTK_CONST) || isAddressInLocal)
{
inlCurArgInfo->argIsInvariant = true;
if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->AsIntCon()->gtIconVal == 0))
{
// Abort inlining at this call site
inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
return;
}
}
// If the arg is a local that is address-taken, we can't safely
// directly substitute it into the inlinee.
//
// Previously we'd accomplish this by setting "argHasLdargaOp" but
// that has a stronger meaning: that the arg value can change in
// the method body. Using that flag prevents type propagation,
// which is safe in this case.
//
// Instead mark the arg as having a caller local ref.
if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
{
inlCurArgInfo->argHasCallerLocalRef = true;
}
#ifdef DEBUG
if (verbose)
{
if (inlCurArgInfo->argIsThis)
{
printf("thisArg:");
}
else
{
printf("\nArgument #%u:", argNum);
}
if (inlCurArgInfo->argIsLclVar)
{
printf(" is a local var");
}
if (inlCurArgInfo->argIsInvariant)
{
printf(" is a constant");
}
if (inlCurArgInfo->argHasGlobRef)
{
printf(" has global refs");
}
if (inlCurArgInfo->argHasCallerLocalRef)
{
printf(" has caller local ref");
}
if (inlCurArgInfo->argHasSideEff)
{
printf(" has side effects");
}
if (inlCurArgInfo->argHasLdargaOp)
{
printf(" has ldarga effect");
}
if (inlCurArgInfo->argHasStargOp)
{
printf(" has starg effect");
}
if (inlCurArgInfo->argIsByRefToStructLocal)
{
printf(" is byref to a struct local");
}
printf("\n");
gtDispTree(curArgVal);
printf("\n");
}
#endif
}
//------------------------------------------------------------------------
// impInlineInitVars: setup inline information for inlinee args and locals
//
// Arguments:
// pInlineInfo - inline info for the inline candidate
//
// Notes:
// This method primarily adds caller-supplied info to the inlArgInfo
// and sets up the lclVarInfo table.
//
// For args, the inlArgInfo records properties of the actual argument
// including the tree node that produces the arg value. This node is
// usually the tree node present at the call, but may also differ in
// various ways:
// - when the call arg is a GT_RET_EXPR, we search back through the ret
// expr chain for the actual node. Note this will either be the original
// call (which will be a failed inline by this point), or the return
// expression from some set of inlines.
// - when argument type casting is needed the necessary casts are added
// around the argument node.
// - if an argument can be simplified by folding then the node here is the
// folded value.
//
// The method may make observations that lead to marking this candidate as
// a failed inline. If this happens the initialization is abandoned immediately
// to try and reduce the jit time cost for a failed inline.
void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
{
assert(!compIsForInlining());
GenTreeCall* call = pInlineInfo->iciCall;
CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo;
unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr;
InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo;
InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo;
InlineResult* inlineResult = pInlineInfo->inlineResult;
// Inlined methods always use the managed calling convention
const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo, CorInfoCallConvExtension::Managed);
/* init the argument stuct */
memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
GenTreeCall::Use* thisArg = call->gtCallThisArg;
unsigned argCnt = 0; // Count of the arguments
assert((methInfo->args.hasThis()) == (thisArg != nullptr));
if (thisArg != nullptr)
{
inlArgInfo[0].argIsThis = true;
impInlineRecordArgInfo(pInlineInfo, thisArg->GetNode(), argCnt, inlineResult);
if (inlineResult->IsFailure())
{
return;
}
/* Increment the argument count */
argCnt++;
}
/* Record some information about each of the arguments */
bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
#if USER_ARGS_COME_LAST
unsigned typeCtxtArg = (thisArg != nullptr) ? 1 : 0;
#else // USER_ARGS_COME_LAST
unsigned typeCtxtArg = methInfo->args.totalILArgs();
#endif // USER_ARGS_COME_LAST
for (GenTreeCall::Use& use : call->Args())
{
if (hasRetBuffArg && (&use == call->gtCallArgs))
{
continue;
}
// Ignore the type context argument
if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
{
pInlineInfo->typeContextArg = typeCtxtArg;
typeCtxtArg = 0xFFFFFFFF;
continue;
}
GenTree* actualArg = use.GetNode();
impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult);
if (inlineResult->IsFailure())
{
return;
}
/* Increment the argument count */
argCnt++;
}
/* Make sure we got the arg number right */
assert(argCnt == methInfo->args.totalILArgs());
#ifdef FEATURE_SIMD
bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
#endif // FEATURE_SIMD
/* We have typeless opcodes, get type information from the signature */
if (thisArg != nullptr)
{
lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
lclVarInfo[0].lclHasLdlocaOp = false;
#ifdef FEATURE_SIMD
// We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
// the inlining multiplier) for anything in that assembly.
// But we only need to normalize it if it is a TYP_STRUCT
// (which we need to do even if we have already set foundSIMDType).
if (!foundSIMDType && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
{
foundSIMDType = true;
}
#endif // FEATURE_SIMD
var_types sigType = ((clsAttr & CORINFO_FLG_VALUECLASS) != 0) ? TYP_BYREF : TYP_REF;
lclVarInfo[0].lclTypeInfo = sigType;
GenTree* thisArgNode = thisArg->GetNode();
assert(varTypeIsGC(thisArgNode->TypeGet()) || // "this" is managed
((thisArgNode->TypeGet() == TYP_I_IMPL) && // "this" is unmgd but the method's class doesnt care
(clsAttr & CORINFO_FLG_VALUECLASS)));
if (genActualType(thisArgNode->TypeGet()) != genActualType(sigType))
{
if (sigType == TYP_REF)
{
/* The argument cannot be bashed into a ref (see bug 750871) */
inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
return;
}
/* This can only happen with byrefs <-> ints/shorts */
assert(sigType == TYP_BYREF);
assert((genActualType(thisArgNode->TypeGet()) == TYP_I_IMPL) || (thisArgNode->TypeGet() == TYP_BYREF));
lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
}
}
/* Init the types of the arguments and make sure the types
* from the trees match the types in the signature */
CORINFO_ARG_LIST_HANDLE argLst;
argLst = methInfo->args.args;
unsigned i;
for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
{
var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
#ifdef FEATURE_SIMD
if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
{
// If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
// found a SIMD type, even if this may not be a type we recognize (the assumption is that
// it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
foundSIMDType = true;
if (sigType == TYP_STRUCT)
{
var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
sigType = structType;
}
}
#endif // FEATURE_SIMD
lclVarInfo[i].lclTypeInfo = sigType;
lclVarInfo[i].lclHasLdlocaOp = false;
/* Does the tree type match the signature type? */
GenTree* inlArgNode = inlArgInfo[i].argNode;
if ((sigType != inlArgNode->gtType) || inlArgNode->OperIs(GT_PUTARG_TYPE))
{
assert(impCheckImplicitArgumentCoercion(sigType, inlArgNode->gtType));
assert(!varTypeIsStruct(inlArgNode->gtType) && !varTypeIsStruct(sigType) &&
genTypeSize(inlArgNode->gtType) == genTypeSize(sigType));
/* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
but in bad IL cases with caller-callee signature mismatches we can see other types.
Intentionally reject cases with mismatches so the jit is more flexible when
encountering bad IL. */
bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
(genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
(sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
if (!isPlausibleTypeMatch)
{
inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
return;
}
GenTree** pInlArgNode;
if (inlArgNode->OperIs(GT_PUTARG_TYPE))
{
// There was a widening or narrowing cast.
GenTreeUnOp* putArgType = inlArgNode->AsUnOp();
pInlArgNode = &putArgType->gtOp1;
inlArgNode = putArgType->gtOp1;
}
else
{
// The same size but different type of the arguments.
pInlArgNode = &inlArgInfo[i].argNode;
}
/* Is it a narrowing or widening cast?
* Widening casts are ok since the value computed is already
* normalized to an int (on the IL stack) */
if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
{
if (sigType == TYP_BYREF)
{
lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
}
else if (inlArgNode->gtType == TYP_BYREF)
{
assert(varTypeIsIntOrI(sigType));
/* If possible bash the BYREF to an int */
if (inlArgNode->IsLocalAddrExpr() != nullptr)
{
inlArgNode->gtType = TYP_I_IMPL;
lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
}
else
{
/* Arguments 'int <- byref' cannot be changed */
inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
return;
}
}
else if (genTypeSize(sigType) < EA_PTRSIZE)
{
// Narrowing cast.
if (inlArgNode->OperIs(GT_LCL_VAR))
{
const unsigned lclNum = inlArgNode->AsLclVarCommon()->GetLclNum();
if (!lvaTable[lclNum].lvNormalizeOnLoad() && sigType == lvaGetRealType(lclNum))
{
// We don't need to insert a cast here as the variable
// was assigned a normalized value of the right type.
continue;
}
}
inlArgNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType);
inlArgInfo[i].argIsLclVar = false;
// Try to fold the node in case we have constant arguments.
if (inlArgInfo[i].argIsInvariant)
{
inlArgNode = gtFoldExprConst(inlArgNode);
assert(inlArgNode->OperIsConst());
}
*pInlArgNode = inlArgNode;
}
#ifdef TARGET_64BIT
else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
{
// This should only happen for int -> native int widening
inlArgNode = gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType);
inlArgInfo[i].argIsLclVar = false;
/* Try to fold the node in case we have constant arguments */
if (inlArgInfo[i].argIsInvariant)
{
inlArgNode = gtFoldExprConst(inlArgNode);
assert(inlArgNode->OperIsConst());
}
*pInlArgNode = inlArgNode;
}
#endif // TARGET_64BIT
}
}
}
/* Init the types of the local variables */
CORINFO_ARG_LIST_HANDLE localsSig;
localsSig = methInfo->locals.args;
for (i = 0; i < methInfo->locals.numArgs; i++)
{
bool isPinned;
var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
lclVarInfo[i + argCnt].lclTypeInfo = type;
if (varTypeIsGC(type))
{
if (isPinned)
{
JITDUMP("Inlinee local #%02u is pinned\n", i);
lclVarInfo[i + argCnt].lclIsPinned = true;
// Pinned locals may cause inlines to fail.
inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
if (inlineResult->IsFailure())
{
return;
}
}
pInlineInfo->numberOfGcRefLocals++;
}
else if (isPinned)
{
JITDUMP("Ignoring pin on inlinee local #%02u -- not a GC type\n", i);
}
lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
// If this local is a struct type with GC fields, inform the inliner. It may choose to bail
// out on the inline.
if (type == TYP_STRUCT)
{
CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
{
inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
if (inlineResult->IsFailure())
{
return;
}
// Do further notification in the case where the call site is rare; some policies do
// not track the relative hotness of call sites for "always" inline cases.
if (pInlineInfo->iciBlock->isRunRarely())
{
inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
if (inlineResult->IsFailure())
{
return;
}
}
}
}
localsSig = info.compCompHnd->getArgNext(localsSig);
#ifdef FEATURE_SIMD
if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
{
foundSIMDType = true;
if (supportSIMDTypes() && type == TYP_STRUCT)
{
var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
lclVarInfo[i + argCnt].lclTypeInfo = structType;
}
}
#endif // FEATURE_SIMD
}
#ifdef FEATURE_SIMD
if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd))
{
foundSIMDType = true;
}
pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
#endif // FEATURE_SIMD
}
//------------------------------------------------------------------------
// impInlineFetchLocal: get a local var that represents an inlinee local
//
// Arguments:
// lclNum -- number of the inlinee local
// reason -- debug string describing purpose of the local var
//
// Returns:
// Number of the local to use
//
// Notes:
// This method is invoked only for locals actually used in the
// inlinee body.
//
// Allocates a new temp if necessary, and copies key properties
// over from the inlinee local var info.
unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
{
assert(compIsForInlining());
unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
if (tmpNum == BAD_VAR_NUM)
{
const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt];
const var_types lclTyp = inlineeLocal.lclTypeInfo;
// The lifetime of this local might span multiple BBs.
// So it is a long lifetime local.
impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
// Copy over key info
lvaTable[tmpNum].lvType = lclTyp;
lvaTable[tmpNum].lvHasLdAddrOp = inlineeLocal.lclHasLdlocaOp;
lvaTable[tmpNum].lvPinned = inlineeLocal.lclIsPinned;
lvaTable[tmpNum].lvHasILStoreOp = inlineeLocal.lclHasStlocOp;
lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp;
// Copy over class handle for ref types. Note this may be a
// shared type -- someday perhaps we can get the exact
// signature and pass in a more precise type.
if (lclTyp == TYP_REF)
{
assert(lvaTable[tmpNum].lvSingleDef == 0);
lvaTable[tmpNum].lvSingleDef = !inlineeLocal.lclHasMultipleStlocOp && !inlineeLocal.lclHasLdlocaOp;
if (lvaTable[tmpNum].lvSingleDef)
{
JITDUMP("Marked V%02u as a single def temp\n", tmpNum);
}
lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef());
}
if (inlineeLocal.lclVerTypeInfo.IsStruct())
{
if (varTypeIsStruct(lclTyp))
{
lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
}
else
{
// This is a wrapped primitive. Make sure the verstate knows that
lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo;
}
}
#ifdef DEBUG
// Sanity check that we're properly prepared for gc ref locals.
if (varTypeIsGC(lclTyp))
{
// Since there are gc locals we should have seen them earlier
// and if there was a return value, set up the spill temp.
assert(impInlineInfo->HasGcRefLocals());
assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp());
}
else
{
// Make sure all pinned locals count as gc refs.
assert(!inlineeLocal.lclIsPinned);
}
#endif // DEBUG
}
return tmpNum;
}
//------------------------------------------------------------------------
// impInlineFetchArg: return tree node for argument value in an inlinee
//
// Arguments:
// lclNum -- argument number in inlinee IL
// inlArgInfo -- argument info for inlinee
// lclVarInfo -- var info for inlinee
//
// Returns:
// Tree for the argument's value. Often an inlinee-scoped temp
// GT_LCL_VAR but can be other tree kinds, if the argument
// expression from the caller can be directly substituted into the
// inlinee body.
//
// Notes:
// Must be used only for arguments -- use impInlineFetchLocal for
// inlinee locals.
//
// Direct substitution is performed when the formal argument cannot
// change value in the inlinee body (no starg or ldarga), and the
// actual argument expression's value cannot be changed if it is
// substituted it into the inlinee body.
//
// Even if an inlinee-scoped temp is returned here, it may later be
// "bashed" to a caller-supplied tree when arguments are actually
// passed (see fgInlinePrependStatements). Bashing can happen if
// the argument ends up being single use and other conditions are
// met. So the contents of the tree returned here may not end up
// being the ones ultimately used for the argument.
//
// This method will side effect inlArgInfo. It should only be called
// for actual uses of the argument in the inlinee.
GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
{
// Cache the relevant arg and lcl info for this argument.
// We will modify argInfo but not lclVarInfo.
InlArgInfo& argInfo = inlArgInfo[lclNum];
const InlLclVarInfo& lclInfo = lclVarInfo[lclNum];
const bool argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp;
const var_types lclTyp = lclInfo.lclTypeInfo;
GenTree* op1 = nullptr;
GenTree* argNode = argInfo.argNode->gtSkipPutArgType()->gtRetExprVal();
if (argInfo.argIsInvariant && !argCanBeModified)
{
// Directly substitute constants or addresses of locals
//
// Clone the constant. Note that we cannot directly use
// argNode in the trees even if !argInfo.argIsUsed as this
// would introduce aliasing between inlArgInfo[].argNode and
// impInlineExpr. Then gtFoldExpr() could change it, causing
// further references to the argument working off of the
// bashed copy.
op1 = gtCloneExpr(argNode);
PREFIX_ASSUME(op1 != nullptr);
argInfo.argTmpNum = BAD_VAR_NUM;
// We may need to retype to ensure we match the callee's view of the type.
// Otherwise callee-pass throughs of arguments can create return type
// mismatches that block inlining.
//
// Note argument type mismatches that prevent inlining should
// have been caught in impInlineInitVars.
if (op1->TypeGet() != lclTyp)
{
op1->gtType = genActualType(lclTyp);
}
}
else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef)
{
// Directly substitute unaliased caller locals for args that cannot be modified
//
// Use the caller-supplied node if this is the first use.
op1 = argNode;
argInfo.argTmpNum = op1->AsLclVarCommon()->GetLclNum();
// Use an equivalent copy if this is the second or subsequent
// use, or if we need to retype.
//
// Note argument type mismatches that prevent inlining should
// have been caught in impInlineInitVars.
if (argInfo.argIsUsed || (op1->TypeGet() != lclTyp))
{
assert(op1->gtOper == GT_LCL_VAR);
assert(lclNum == op1->AsLclVar()->gtLclILoffs);
var_types newTyp = lclTyp;
if (!lvaTable[op1->AsLclVarCommon()->GetLclNum()].lvNormalizeOnLoad())
{
newTyp = genActualType(lclTyp);
}
// Create a new lcl var node - remember the argument lclNum
op1 = gtNewLclvNode(op1->AsLclVarCommon()->GetLclNum(), newTyp DEBUGARG(op1->AsLclVar()->gtLclILoffs));
}
}
else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp)
{
/* Argument is a by-ref address to a struct, a normed struct, or its field.
In these cases, don't spill the byref to a local, simply clone the tree and use it.
This way we will increase the chance for this byref to be optimized away by
a subsequent "dereference" operation.
From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
(in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
For example, if the caller is:
ldloca.s V_1 // V_1 is a local struct
call void Test.ILPart::RunLdargaOnPointerArg(int32*)
and the callee being inlined has:
.method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed
ldarga.s ptrToInts
call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
*/
assert(argNode->TypeGet() == TYP_BYREF || argNode->TypeGet() == TYP_I_IMPL);
op1 = gtCloneExpr(argNode);
}
else
{
/* Argument is a complex expression - it must be evaluated into a temp */
if (argInfo.argHasTmp)
{
assert(argInfo.argIsUsed);
assert(argInfo.argTmpNum < lvaCount);
/* Create a new lcl var node - remember the argument lclNum */
op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp));
/* This is the second or later use of the this argument,
so we have to use the temp (instead of the actual arg) */
argInfo.argBashTmpNode = nullptr;
}
else
{
/* First time use */
assert(!argInfo.argIsUsed);
/* Reserve a temp for the expression.
* Use a large size node as we may change it later */
const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
lvaTable[tmpNum].lvType = lclTyp;
// For ref types, determine the type of the temp.
if (lclTyp == TYP_REF)
{
if (!argCanBeModified)
{
// If the arg can't be modified in the method
// body, use the type of the value, if
// known. Otherwise, use the declared type.
assert(lvaTable[tmpNum].lvSingleDef == 0);
lvaTable[tmpNum].lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def temp\n", tmpNum);
lvaSetClass(tmpNum, argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
}
else
{
// Arg might be modified, use the declared type of
// the argument.
lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
}
}
assert(lvaTable[tmpNum].lvAddrExposed == 0);
if (argInfo.argHasLdargaOp)
{
lvaTable[tmpNum].lvHasLdAddrOp = 1;
}
if (lclInfo.lclVerTypeInfo.IsStruct())
{
if (varTypeIsStruct(lclTyp))
{
lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
if (info.compIsVarArgs)
{
lvaSetStructUsedAsVarArg(tmpNum);
}
}
else
{
// This is a wrapped primitive. Make sure the verstate knows that
lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo;
}
}
argInfo.argHasTmp = true;
argInfo.argTmpNum = tmpNum;
// If we require strict exception order, then arguments must
// be evaluated in sequence before the body of the inlined method.
// So we need to evaluate them to a temp.
// Also, if arguments have global or local references, we need to
// evaluate them to a temp before the inlined body as the
// inlined body may be modifying the global ref.
// TODO-1stClassStructs: We currently do not reuse an existing lclVar
// if it is a struct, because it requires some additional handling.
if (!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef &&
!argInfo.argHasCallerLocalRef)
{
/* Get a *LARGE* LCL_VAR node */
op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp) DEBUGARG(lclNum));
/* Record op1 as the very first use of this argument.
If there are no further uses of the arg, we may be
able to use the actual arg node instead of the temp.
If we do see any further uses, we will clear this. */
argInfo.argBashTmpNode = op1;
}
else
{
/* Get a small LCL_VAR node */
op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
/* No bashing of this argument */
argInfo.argBashTmpNode = nullptr;
}
}
}
// Mark this argument as used.
argInfo.argIsUsed = true;
return op1;
}
/******************************************************************************
Is this the original "this" argument to the call being inlined?
Note that we do not inline methods with "starg 0", and so we do not need to
worry about it.
*/
BOOL Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo)
{
assert(compIsForInlining());
return (tree->gtOper == GT_LCL_VAR && tree->AsLclVarCommon()->GetLclNum() == inlArgInfo[0].argTmpNum);
}
//-----------------------------------------------------------------------------
// impInlineIsGuaranteedThisDerefBeforeAnySideEffects: Check if a dereference in
// the inlinee can guarantee that the "this" pointer is non-NULL.
//
// Arguments:
// additionalTree - a tree to check for side effects
// additionalCallArgs - a list of call args to check for side effects
// dereferencedAddress - address expression being dereferenced
// inlArgInfo - inlinee argument information
//
// Notes:
// If we haven't hit a branch or a side effect, and we are dereferencing
// from 'this' to access a field or make GTF_CALL_NULLCHECK call,
// then we can avoid a separate null pointer check.
//
// The importer stack and current statement list are searched for side effects.
// Trees that have been popped of the stack but haven't been appended to the
// statement list and have to be checked for side effects may be provided via
// additionalTree and additionalCallArgs.
//
BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree,
GenTreeCall::Use* additionalCallArgs,
GenTree* dereferencedAddress,
InlArgInfo* inlArgInfo)
{
assert(compIsForInlining());
assert(opts.OptEnabled(CLFLG_INLINING));
BasicBlock* block = compCurBB;
if (block != fgFirstBB)
{
return FALSE;
}
if (!impInlineIsThis(dereferencedAddress, inlArgInfo))
{
return FALSE;
}
if ((additionalTree != nullptr) && GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTree->gtFlags))
{
return FALSE;
}
for (GenTreeCall::Use& use : GenTreeCall::UseList(additionalCallArgs))
{
if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(use.GetNode()->gtFlags))
{
return false;
}
}
for (Statement* stmt : StatementList(impStmtList))
{
GenTree* expr = stmt->GetRootNode();
if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
{
return FALSE;
}
}
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
{
return FALSE;
}
}
return TRUE;
}
//------------------------------------------------------------------------
// impMarkInlineCandidate: determine if this call can be subsequently inlined
//
// Arguments:
// callNode -- call under scrutiny
// exactContextHnd -- context handle for inlining
// exactContextNeedsRuntimeLookup -- true if context required runtime lookup
// callInfo -- call info from VM
//
// Notes:
// Mostly a wrapper for impMarkInlineCandidateHelper that also undoes
// guarded devirtualization for virtual calls where the method we'd
// devirtualize to cannot be inlined.
void Compiler::impMarkInlineCandidate(GenTree* callNode,
CORINFO_CONTEXT_HANDLE exactContextHnd,
bool exactContextNeedsRuntimeLookup,
CORINFO_CALL_INFO* callInfo)
{
GenTreeCall* call = callNode->AsCall();
// Do the actual evaluation
impMarkInlineCandidateHelper(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
// If this call is an inline candidate or is not a guarded devirtualization
// candidate, we're done.
if (call->IsInlineCandidate() || !call->IsGuardedDevirtualizationCandidate())
{
return;
}
// If we can't inline the call we'd guardedly devirtualize to,
// we undo the guarded devirtualization, as the benefit from
// just guarded devirtualization alone is likely not worth the
// extra jit time and code size.
//
// TODO: it is possibly interesting to allow this, but requires
// fixes elsewhere too...
JITDUMP("Revoking guarded devirtualization candidacy for call [%06u]: target method can't be inlined\n",
dspTreeID(call));
call->ClearGuardedDevirtualizationCandidate();
// If we have a stub address, restore it back into the union that it shares
// with the candidate info.
if (call->IsVirtualStub())
{
JITDUMP("Restoring stub addr %p from guarded devirt candidate info\n",
dspPtr(call->gtGuardedDevirtualizationCandidateInfo->stubAddr));
call->gtStubCallStubAddr = call->gtGuardedDevirtualizationCandidateInfo->stubAddr;
}
}
//------------------------------------------------------------------------
// impMarkInlineCandidateHelper: determine if this call can be subsequently
// inlined
//
// Arguments:
// callNode -- call under scrutiny
// exactContextHnd -- context handle for inlining
// exactContextNeedsRuntimeLookup -- true if context required runtime lookup
// callInfo -- call info from VM
//
// Notes:
// If callNode is an inline candidate, this method sets the flag
// GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have
// filled in the associated InlineCandidateInfo.
//
// If callNode is not an inline candidate, and the reason is
// something that is inherent to the method being called, the
// method may be marked as "noinline" to short-circuit any
// future assessments of calls to this method.
void Compiler::impMarkInlineCandidateHelper(GenTreeCall* call,
CORINFO_CONTEXT_HANDLE exactContextHnd,
bool exactContextNeedsRuntimeLookup,
CORINFO_CALL_INFO* callInfo)
{
// Let the strategy know there's another call
impInlineRoot()->m_inlineStrategy->NoteCall();
if (!opts.OptEnabled(CLFLG_INLINING))
{
/* XXX Mon 8/18/2008
* This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before
* calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and
* CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and
* figure out why we did not set MAXOPT for this compile.
*/
assert(!compIsForInlining());
return;
}
if (compIsForImportOnly())
{
// Don't bother creating the inline candidate during verification.
// Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
// that leads to the creation of multiple instances of Compiler.
return;
}
InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
// Don't inline if not optimizing root method
if (opts.compDbgCode)
{
inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
return;
}
// Don't inline if inlining into this method is disabled.
if (impInlineRoot()->m_inlineStrategy->IsInliningDisabled())
{
inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
return;
}
// Don't inline into callers that use the NextCallReturnAddress intrinsic.
if (info.compHasNextCallRetAddr)
{
inlineResult.NoteFatal(InlineObservation::CALLER_USES_NEXT_CALL_RET_ADDR);
return;
}
// Inlining candidate determination needs to honor only IL tail prefix.
// Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
if (call->IsTailPrefixedCall())
{
inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
return;
}
// Tail recursion elimination takes precedence over inlining.
// TODO: We may want to do some of the additional checks from fgMorphCall
// here to reduce the chance we don't inline a call that won't be optimized
// as a fast tail call or turned into a loop.
if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
{
inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
return;
}
if (call->IsVirtual())
{
// Allow guarded devirt calls to be treated as inline candidates,
// but reject all other virtual calls.
if (!call->IsGuardedDevirtualizationCandidate())
{
inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
return;
}
}
/* Ignore helper calls */
if (call->gtCallType == CT_HELPER)
{
assert(!call->IsGuardedDevirtualizationCandidate());
inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
return;
}
/* Ignore indirect calls */
if (call->gtCallType == CT_INDIRECT)
{
inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
return;
}
/* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less
* restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding
* inlining in throw blocks. I should consider the same thing for catch and filter regions. */
CORINFO_METHOD_HANDLE fncHandle;
unsigned methAttr;
if (call->IsGuardedDevirtualizationCandidate())
{
fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodHandle;
methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
}
else
{
fncHandle = call->gtCallMethHnd;
// Reuse method flags from the original callInfo if possible
if (fncHandle == callInfo->hMethod)
{
methAttr = callInfo->methodFlags;
}
else
{
methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
}
}
#ifdef DEBUG
if (compStressCompile(STRESS_FORCE_INLINE, 0))
{
methAttr |= CORINFO_FLG_FORCEINLINE;
}
#endif
// Check for COMPlus_AggressiveInlining
if (compDoAggressiveInlining)
{
methAttr |= CORINFO_FLG_FORCEINLINE;
}
if (!(methAttr & CORINFO_FLG_FORCEINLINE))
{
/* Don't bother inline blocks that are in the filter region */
if (bbInCatchHandlerILRange(compCurBB))
{
#ifdef DEBUG
if (verbose)
{
printf("\nWill not inline blocks that are in the catch handler region\n");
}
#endif
inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
return;
}
if (bbInFilterILRange(compCurBB))
{
#ifdef DEBUG
if (verbose)
{
printf("\nWill not inline blocks that are in the filter region\n");
}
#endif
inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
return;
}
}
/* Check if we tried to inline this method before */
if (methAttr & CORINFO_FLG_DONT_INLINE)
{
inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
return;
}
/* Cannot inline synchronized methods */
if (methAttr & CORINFO_FLG_SYNCH)
{
inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
return;
}
/* Check legality of PInvoke callsite (for inlining of marshalling code) */
if (methAttr & CORINFO_FLG_PINVOKE)
{
// See comment in impCheckForPInvokeCall
BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
if (!impCanPInvokeInlineCallSite(block))
{
inlineResult.NoteFatal(InlineObservation::CALLSITE_PINVOKE_EH);
return;
}
}
InlineCandidateInfo* inlineCandidateInfo = nullptr;
impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
if (inlineResult.IsFailure())
{
return;
}
// The old value should be null OR this call should be a guarded devirtualization candidate.
assert((call->gtInlineCandidateInfo == nullptr) || call->IsGuardedDevirtualizationCandidate());
// The new value should not be null.
assert(inlineCandidateInfo != nullptr);
inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup;
call->gtInlineCandidateInfo = inlineCandidateInfo;
// Mark the call node as inline candidate.
call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
// Let the strategy know there's another candidate.
impInlineRoot()->m_inlineStrategy->NoteCandidate();
// Since we're not actually inlining yet, and this call site is
// still just an inline candidate, there's nothing to report.
inlineResult.SetReported();
}
/******************************************************************************/
// Returns true if the given intrinsic will be implemented by target-specific
// instructions
bool Compiler::IsTargetIntrinsic(NamedIntrinsic intrinsicName)
{
#if defined(TARGET_XARCH)
switch (intrinsicName)
{
// AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1
// instructions to directly compute round/ceiling/floor.
//
// TODO: Because the x86 backend only targets SSE for floating-point code,
// it does not treat Sine, Cosine, or Round as intrinsics (JIT32
// implemented those intrinsics as x87 instructions). If this poses
// a CQ problem, it may be necessary to change the implementation of
// the helper calls to decrease call overhead or switch back to the
// x87 instructions. This is tracked by #7097.
case NI_System_Math_Sqrt:
case NI_System_Math_Abs:
return true;
case NI_System_Math_Round:
case NI_System_Math_Ceiling:
case NI_System_Math_Floor:
return compOpportunisticallyDependsOn(InstructionSet_SSE41);
default:
return false;
}
#elif defined(TARGET_ARM64)
switch (intrinsicName)
{
case NI_System_Math_Sqrt:
case NI_System_Math_Abs:
case NI_System_Math_Round:
case NI_System_Math_Floor:
case NI_System_Math_Ceiling:
return true;
default:
return false;
}
#elif defined(TARGET_ARM)
switch (intrinsicName)
{
case NI_System_Math_Sqrt:
case NI_System_Math_Abs:
case NI_System_Math_Round:
return true;
default:
return false;
}
#else
// TODO: This portion of logic is not implemented for other arch.
// The reason for returning true is that on all other arch the only intrinsic
// enabled are target intrinsics.
return true;
#endif
}
/******************************************************************************/
// Returns true if the given intrinsic will be implemented by calling System.Math
// methods.
bool Compiler::IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName)
{
// Currently, if a math intrinsic is not implemented by target-specific
// instructions, it will be implemented by a System.Math call. In the
// future, if we turn to implementing some of them with helper calls,
// this predicate needs to be revisited.
return !IsTargetIntrinsic(intrinsicName);
}
bool Compiler::IsMathIntrinsic(NamedIntrinsic intrinsicName)
{
switch (intrinsicName)
{
case NI_System_Math_Sin:
case NI_System_Math_Cbrt:
case NI_System_Math_Sqrt:
case NI_System_Math_Abs:
case NI_System_Math_Cos:
case NI_System_Math_Round:
case NI_System_Math_Cosh:
case NI_System_Math_Sinh:
case NI_System_Math_Tan:
case NI_System_Math_Tanh:
case NI_System_Math_Asin:
case NI_System_Math_Asinh:
case NI_System_Math_Acos:
case NI_System_Math_Acosh:
case NI_System_Math_Atan:
case NI_System_Math_Atan2:
case NI_System_Math_Atanh:
case NI_System_Math_Log10:
case NI_System_Math_Pow:
case NI_System_Math_Exp:
case NI_System_Math_Ceiling:
case NI_System_Math_Floor:
return true;
default:
return false;
}
}
bool Compiler::IsMathIntrinsic(GenTree* tree)
{
return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->AsIntrinsic()->gtIntrinsicName);
}
//------------------------------------------------------------------------
// impDevirtualizeCall: Attempt to change a virtual vtable call into a
// normal call
//
// Arguments:
// call -- the call node to examine/modify
// method -- [IN/OUT] the method handle for call. Updated iff call devirtualized.
// methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized.
// pContextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized.
// pExactContextHandle -- [OUT] updated context handle iff call devirtualized
// isLateDevirtualization -- if devirtualization is happening after importation
// isExplicitTailCalll -- [IN] true if we plan on using an explicit tail call
// ilOffset -- IL offset of the call
//
// Notes:
// Virtual calls in IL will always "invoke" the base class method.
//
// This transformation looks for evidence that the type of 'this'
// in the call is exactly known, is a final class or would invoke
// a final method, and if that and other safety checks pan out,
// modifies the call and the call info to create a direct call.
//
// This transformation is initially done in the importer and not
// in some subsequent optimization pass because we want it to be
// upstream of inline candidate identification.
//
// However, later phases may supply improved type information that
// can enable further devirtualization. We currently reinvoke this
// code after inlining, if the return value of the inlined call is
// the 'this obj' of a subsequent virtual call.
//
// If devirtualization succeeds and the call's this object is the
// result of a box, the jit will ask the EE for the unboxed entry
// point. If this exists, the jit will see if it can rework the box
// to instead make a local copy. If that is doable, the call is
// updated to invoke the unboxed entry on the local copy.
//
// When guarded devirtualization is enabled, this method will mark
// calls as guarded devirtualization candidates, if the type of `this`
// is not exactly known, and there is a plausible guess for the type.
void Compiler::impDevirtualizeCall(GenTreeCall* call,
CORINFO_METHOD_HANDLE* method,
unsigned* methodFlags,
CORINFO_CONTEXT_HANDLE* pContextHandle,
CORINFO_CONTEXT_HANDLE* pExactContextHandle,
bool isLateDevirtualization,
bool isExplicitTailCall,
IL_OFFSETX ilOffset)
{
assert(call != nullptr);
assert(method != nullptr);
assert(methodFlags != nullptr);
assert(pContextHandle != nullptr);
// This should be a virtual vtable or virtual stub call.
assert(call->IsVirtual());
// Possibly instrument, if not optimizing.
//
if (opts.OptimizationDisabled() && (call->gtCallType != CT_INDIRECT))
{
// During importation, optionally flag this block as one that
// contains calls requiring class profiling. Ideally perhaps
// we'd just keep track of the calls themselves, so we don't
// have to search for them later.
//
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) &&
(JitConfig.JitClassProfiling() > 0) && !isLateDevirtualization)
{
JITDUMP("\n ... marking [%06u] in " FMT_BB " for class profile instrumentation\n", dspTreeID(call),
compCurBB->bbNum);
ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo;
// Record some info needed for the class profiling probe.
//
pInfo->ilOffset = ilOffset;
pInfo->probeIndex = info.compClassProbeCount++;
pInfo->stubAddr = call->gtStubCallStubAddr;
// note this overwrites gtCallStubAddr, so it needs to be undone
// during the instrumentation phase, or we won't generate proper
// code for vsd calls.
//
call->gtClassProfileCandidateInfo = pInfo;
// Flag block as needing scrutiny
//
compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE;
}
return;
}
#if defined(DEBUG)
// Bail if devirt is disabled.
if (JitConfig.JitEnableDevirtualization() == 0)
{
return;
}
// Optionally, print info on devirtualization
Compiler* const rootCompiler = impInlineRoot();
const bool doPrint = JitConfig.JitPrintDevirtualizedMethods().contains(rootCompiler->info.compMethodName,
rootCompiler->info.compClassName,
&rootCompiler->info.compMethodInfo->args);
#endif // DEBUG
// Fetch information about the virtual method we're calling.
CORINFO_METHOD_HANDLE baseMethod = *method;
unsigned baseMethodAttribs = *methodFlags;
if (baseMethodAttribs == 0)
{
// For late devirt we may not have method attributes, so fetch them.
baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
}
else
{
#if defined(DEBUG)
// Validate that callInfo has up to date method flags
const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
// All the base method attributes should agree, save that
// CORINFO_FLG_DONT_INLINE may have changed from 0 to 1
// because of concurrent jitting activity.
//
// Note we don't look at this particular flag bit below, and
// later on (if we do try and inline) we will rediscover why
// the method can't be inlined, so there's no danger here in
// seeing this particular flag bit in different states between
// the cached and fresh values.
if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE))
{
assert(!"mismatched method attributes");
}
#endif // DEBUG
}
// In R2R mode, we might see virtual stub calls to
// non-virtuals. For instance cases where the non-virtual method
// is in a different assembly but is called via CALLVIRT. For
// verison resilience we must allow for the fact that the method
// might become virtual in some update.
//
// In non-R2R modes CALLVIRT <nonvirtual> will be turned into a
// regular call+nullcheck upstream, so we won't reach this
// point.
if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0)
{
assert(call->IsVirtualStub());
assert(opts.IsReadyToRun());
JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n");
return;
}
// See what we know about the type of 'this' in the call.
GenTree* thisObj = call->gtCallThisArg->GetNode()->gtEffectiveVal(false);
GenTree* actualThisObj = nullptr;
bool isExact = false;
bool objIsNonNull = false;
CORINFO_CLASS_HANDLE objClass = gtGetClassHandle(thisObj, &isExact, &objIsNonNull);
// See if we have special knowlege that can get us a type or a better type.
if ((objClass == nullptr) || !isExact)
{
// Walk back through any return expression placeholders
actualThisObj = thisObj->gtRetExprVal();
// See if we landed on a call to a special intrinsic method
if (actualThisObj->IsCall())
{
GenTreeCall* thisObjCall = actualThisObj->AsCall();
if ((thisObjCall->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
{
assert(thisObjCall->gtCallType == CT_USER_FUNC);
CORINFO_METHOD_HANDLE specialIntrinsicHandle = thisObjCall->gtCallMethHnd;
CORINFO_CLASS_HANDLE specialObjClass = impGetSpecialIntrinsicExactReturnType(specialIntrinsicHandle);
if (specialObjClass != nullptr)
{
objClass = specialObjClass;
isExact = true;
objIsNonNull = true;
}
}
}
}
// Bail if we know nothing.
if (objClass == nullptr)
{
JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet()));
return;
}
// Fetch information about the class that introduced the virtual method.
CORINFO_CLASS_HANDLE baseClass = info.compCompHnd->getMethodClass(baseMethod);
const DWORD baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass);
// Is the call an interface call?
const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0;
// If the objClass is sealed (final), then we may be able to devirtualize.
const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass);
const bool objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0;
#if defined(DEBUG)
const char* callKind = isInterface ? "interface" : "virtual";
const char* objClassNote = "[?]";
const char* objClassName = "?objClass";
const char* baseClassName = "?baseClass";
const char* baseMethodName = "?baseMethod";
if (verbose || doPrint)
{
objClassNote = isExact ? " [exact]" : objClassIsFinal ? " [final]" : "";
objClassName = info.compCompHnd->getClassName(objClass);
baseClassName = info.compCompHnd->getClassName(baseClass);
baseMethodName = eeGetMethodName(baseMethod, nullptr);
if (verbose)
{
printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n"
" class for 'this' is %s%s (attrib %08x)\n"
" base method is %s::%s\n",
callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName);
}
}
#endif // defined(DEBUG)
// See if the jit's best type for `obj` is an interface.
// See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal
// IL_021d: ldloc.0
// IL_021e: callvirt instance int32 System.Object::GetHashCode()
//
// If so, we can't devirtualize, but we may be able to do guarded devirtualization.
//
if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0)
{
// Don't try guarded devirtualiztion when we're doing late devirtualization.
//
if (isLateDevirtualization)
{
JITDUMP("No guarded devirt during late devirtualization\n");
return;
}
JITDUMP("Considering guarded devirt...\n");
// See if the runtime can provide a class to guess for.
//
const unsigned interfaceLikelihoodThreshold = 25;
unsigned likelihood = 0;
unsigned numberOfClasses = 0;
CORINFO_CLASS_HANDLE likelyClass =
info.compCompHnd->getLikelyClass(info.compMethodHnd, baseClass, ilOffset, &likelihood, &numberOfClasses);
if (likelyClass == NO_CLASS_HANDLE)
{
JITDUMP("No likely implementor of interface %p (%s), sorry\n", dspPtr(objClass), objClassName);
return;
}
else
{
JITDUMP("Likely implementor of interface %p (%s) is %p (%s) [likelihood:%u classes seen:%u]\n",
dspPtr(objClass), objClassName, likelyClass, eeGetClassName(likelyClass), likelihood,
numberOfClasses);
}
// Todo: a more advanced heuristic using likelihood, number of
// classes, and the profile count for this block.
//
// For now we will guess if the likelihood is 25% or more, as studies
// have shown this should pay off for interface calls.
//
if (likelihood < interfaceLikelihoodThreshold)
{
JITDUMP("Not guessing for class; likelihood is below interface call threshold %u\n",
interfaceLikelihoodThreshold);
return;
}
// Ask the runtime to determine the method that would be called based on the likely type.
//
CORINFO_DEVIRTUALIZATION_INFO dvInfo;
dvInfo.virtualMethod = baseMethod;
dvInfo.objClass = likelyClass;
dvInfo.context = *pContextHandle;
bool canResolve = info.compCompHnd->resolveVirtualMethod(&dvInfo);
if (!canResolve)
{
JITDUMP("Can't figure out which method would be invoked, sorry\n");
return;
}
CORINFO_METHOD_HANDLE likelyMethod = dvInfo.devirtualizedMethod;
JITDUMP("%s call would invoke method %s\n", callKind, eeGetMethodName(likelyMethod, nullptr));
// Some of these may be redundant
//
DWORD likelyMethodAttribs = info.compCompHnd->getMethodAttribs(likelyMethod);
DWORD likelyClassAttribs = info.compCompHnd->getClassAttribs(likelyClass);
// Try guarded devirtualization.
//
addGuardedDevirtualizationCandidate(call, likelyMethod, likelyClass, likelyMethodAttribs, likelyClassAttribs,
likelihood);
return;
}
// If we get this far, the jit has a lower bound class type for the `this` object being used for dispatch.
// It may or may not know enough to devirtualize...
if (isInterface)
{
assert(call->IsVirtualStub());
JITDUMP("--- base class is interface\n");
}
// Fetch the method that would be called based on the declared type of 'this',
// and prepare to fetch the method attributes.
//
CORINFO_DEVIRTUALIZATION_INFO dvInfo;
dvInfo.virtualMethod = baseMethod;
dvInfo.objClass = objClass;
dvInfo.context = *pContextHandle;
info.compCompHnd->resolveVirtualMethod(&dvInfo);
CORINFO_METHOD_HANDLE derivedMethod = dvInfo.devirtualizedMethod;
CORINFO_CONTEXT_HANDLE exactContext = dvInfo.exactContext;
CORINFO_CLASS_HANDLE derivedClass = NO_CLASS_HANDLE;
if (exactContext != nullptr)
{
// We currently expect the context to always be a class context.
assert(((size_t)exactContext & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
derivedClass = (CORINFO_CLASS_HANDLE)((size_t)exactContext & ~CORINFO_CONTEXTFLAGS_MASK);
}
DWORD derivedMethodAttribs = 0;
bool derivedMethodIsFinal = false;
bool canDevirtualize = false;
#if defined(DEBUG)
const char* derivedClassName = "?derivedClass";
const char* derivedMethodName = "?derivedMethod";
const char* note = "inexact or not final";
#endif
// If we failed to get a method handle, we can't directly devirtualize.
//
// This can happen when prejitting, if the devirtualization crosses
// servicing bubble boundaries, or if objClass is a shared class.
//
if (derivedMethod == nullptr)
{
JITDUMP("--- no derived method\n");
}
else
{
// Fetch method attributes to see if method is marked final.
derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
#if defined(DEBUG)
if (isExact)
{
note = "exact";
}
else if (objClassIsFinal)
{
note = "final class";
}
else if (derivedMethodIsFinal)
{
note = "final method";
}
if (verbose || doPrint)
{
derivedMethodName = eeGetMethodName(derivedMethod, nullptr);
derivedClassName = eeGetClassName(derivedClass);
if (verbose)
{
printf(" devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note);
gtDispTree(call);
}
}
#endif // defined(DEBUG)
canDevirtualize = isExact || objClassIsFinal || (!isInterface && derivedMethodIsFinal);
}
// We still might be able to do a guarded devirtualization.
// Note the call might be an interface call or a virtual call.
//
if (!canDevirtualize)
{
JITDUMP(" Class not final or exact%s\n", isInterface ? "" : ", and method not final");
// Don't try guarded devirtualiztion if we're doing late devirtualization.
//
if (isLateDevirtualization)
{
JITDUMP("No guarded devirt during late devirtualization\n");
return;
}
JITDUMP("Consdering guarded devirt...\n");
// See if there's a likely guess for the class.
//
const unsigned likelihoodThreshold = isInterface ? 25 : 30;
unsigned likelihood = 0;
unsigned numberOfClasses = 0;
CORINFO_CLASS_HANDLE likelyClass =
info.compCompHnd->getLikelyClass(info.compMethodHnd, baseClass, ilOffset, &likelihood, &numberOfClasses);
if (likelyClass != NO_CLASS_HANDLE)
{
JITDUMP("Likely class for %p (%s) is %p (%s) [likelihood:%u classes seen:%u]\n", dspPtr(objClass),
objClassName, likelyClass, eeGetClassName(likelyClass), likelihood, numberOfClasses);
}
else if (derivedMethod != nullptr)
{
// If we have a derived method we can optionally guess for
// the class that introduces the method.
//
bool guessJitBestClass = true;
INDEBUG(guessJitBestClass = (JitConfig.JitGuardedDevirtualizationGuessBestClass() > 0););
if (!guessJitBestClass)
{
JITDUMP("No guarded devirt: no likely class and guessing for jit best class disabled\n");
return;
}
// We will use the class that introduced the method as our guess
// for the runtime class of the object.
//
// We don't know now likely this is; just choose a value that gets
// us past the threshold.
likelyClass = info.compCompHnd->getMethodClass(derivedMethod);
likelihood = likelihoodThreshold;
JITDUMP("Will guess implementing class for class %p (%s) is %p (%s)!\n", dspPtr(objClass), objClassName,
likelyClass, eeGetClassName(likelyClass));
}
// Todo: a more advanced heuristic using likelihood, number of
// classes, and the profile count for this block.
//
// For now we will guess if the likelihood is at least 25%/30% (intfc/virt), as studies
// have shown this transformation should pay off even if we guess wrong sometimes.
//
if (likelihood < likelihoodThreshold)
{
JITDUMP("Not guessing for class; likelihood is below %s call threshold %u\n", callKind,
likelihoodThreshold);
return;
}
// Figure out which method will be called.
//
CORINFO_DEVIRTUALIZATION_INFO dvInfo;
dvInfo.virtualMethod = baseMethod;
dvInfo.objClass = likelyClass;
dvInfo.context = *pContextHandle;
bool canResolve = info.compCompHnd->resolveVirtualMethod(&dvInfo);
if (!canResolve)
{
JITDUMP("Can't figure out which method would be invoked, sorry\n");
return;
}
CORINFO_METHOD_HANDLE likelyMethod = dvInfo.devirtualizedMethod;
JITDUMP("%s call would invoke method %s\n", callKind, eeGetMethodName(likelyMethod, nullptr));
// Some of these may be redundant
//
DWORD likelyMethodAttribs = info.compCompHnd->getMethodAttribs(likelyMethod);
DWORD likelyClassAttribs = info.compCompHnd->getClassAttribs(likelyClass);
// Try guarded devirtualization.
//
addGuardedDevirtualizationCandidate(call, likelyMethod, likelyClass, likelyMethodAttribs, likelyClassAttribs,
likelihood);
return;
}
// All checks done. Time to transform the call.
assert(canDevirtualize);
JITDUMP(" %s; can devirtualize\n", note);
// See if the method we're devirtualizing to is an intrinsic.
//
if (derivedMethodAttribs & (CORINFO_FLG_JIT_INTRINSIC | CORINFO_FLG_INTRINSIC))
{
JITDUMP("!!! Devirt to intrinsic in %s, calling %s::%s\n", impInlineRoot()->info.compFullName, derivedClassName,
derivedMethodName);
}
// Make the updates.
call->gtFlags &= ~GTF_CALL_VIRT_VTABLE;
call->gtFlags &= ~GTF_CALL_VIRT_STUB;
call->gtCallMethHnd = derivedMethod;
call->gtCallType = CT_USER_FUNC;
call->gtCallMoreFlags |= GTF_CALL_M_DEVIRTUALIZED;
// Virtual calls include an implicit null check, which we may
// now need to make explicit.
if (!objIsNonNull)
{
call->gtFlags |= GTF_CALL_NULLCHECK;
}
// Clear the inline candidate info (may be non-null since
// it's a union field used for other things by virtual
// stubs)
call->gtInlineCandidateInfo = nullptr;
#if defined(DEBUG)
if (verbose)
{
printf("... after devirt...\n");
gtDispTree(call);
}
if (doPrint)
{
printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName,
baseMethodName, derivedClassName, derivedMethodName, note);
}
#endif // defined(DEBUG)
// If the 'this' object is a box, see if we can find the unboxed entry point for the call.
if (thisObj->IsBoxedValue())
{
JITDUMP("Now have direct call to boxed entry point, looking for unboxed entry point\n");
if (isExplicitTailCall)
{
JITDUMP("Call is an explicit tail call, we cannot perform an unbox\n");
return;
}
// Note for some shared methods the unboxed entry point requires an extra parameter.
bool requiresInstMethodTableArg = false;
CORINFO_METHOD_HANDLE unboxedEntryMethod =
info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg);
if (unboxedEntryMethod != nullptr)
{
// Since the call is the only consumer of the box, we know the box can't escape
// since it is being passed an interior pointer.
//
// So, revise the box to simply create a local copy, use the address of that copy
// as the this pointer, and update the entry point to the unboxed entry.
//
// Ideally, we then inline the boxed method and and if it turns out not to modify
// the copy, we can undo the copy too.
if (requiresInstMethodTableArg)
{
// Perform a trial box removal and ask for the type handle tree.
JITDUMP("Unboxed entry needs method table arg...\n");
GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE);
if (methodTableArg != nullptr)
{
// If that worked, turn the box into a copy to a local var
JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg));
GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
if (localCopyThis != nullptr)
{
// Pass the local var as this and the type handle as a new arg
JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table arg\n");
call->gtCallThisArg = gtNewCallArgs(localCopyThis);
call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
// Prepend for R2L arg passing or empty L2R passing
if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr))
{
call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs);
}
// Append for non-empty L2R
else
{
GenTreeCall::Use* beforeArg = call->gtCallArgs;
while (beforeArg->GetNext() != nullptr)
{
beforeArg = beforeArg->GetNext();
}
beforeArg->SetNext(gtNewCallArgs(methodTableArg));
}
call->gtCallMethHnd = unboxedEntryMethod;
derivedMethod = unboxedEntryMethod;
// Method attributes will differ because unboxed entry point is shared
const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod);
JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs,
unboxedMethodAttribs);
derivedMethodAttribs = unboxedMethodAttribs;
}
else
{
JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n");
}
}
else
{
JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n");
}
}
else
{
JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n");
GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
if (localCopyThis != nullptr)
{
JITDUMP("Success! invoking unboxed entry point on local copy\n");
call->gtCallThisArg = gtNewCallArgs(localCopyThis);
call->gtCallMethHnd = unboxedEntryMethod;
call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
derivedMethod = unboxedEntryMethod;
#if FEATURE_TAILCALL_OPT
if (call->IsImplicitTailCall())
{
JITDUMP("Clearing the implicit tail call flag\n");
// If set, we clear the implicit tail call flag
// as we just introduced a new address taken local variable
//
call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL;
}
#endif // FEATURE_TAILCALL_OPT
}
else
{
JITDUMP("Sorry, failed to undo the box\n");
}
}
}
else
{
// Many of the low-level methods on value classes won't have unboxed entries,
// as they need access to the type of the object.
//
// Note this may be a cue for us to stack allocate the boxed object, since
// we probably know that these objects don't escape.
JITDUMP("Sorry, failed to find unboxed entry point\n");
}
}
// Need to update call info too.
//
*method = derivedMethod;
*methodFlags = derivedMethodAttribs;
// Update context handle
//
*pContextHandle = MAKE_METHODCONTEXT(derivedMethod);
// Update exact context handle.
//
if (pExactContextHandle != nullptr)
{
*pExactContextHandle = MAKE_CLASSCONTEXT(derivedClass);
}
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
// For R2R, getCallInfo triggers bookkeeping on the zap
// side so we need to call it here.
//
// First, cons up a suitable resolved token.
CORINFO_RESOLVED_TOKEN derivedResolvedToken = {};
derivedResolvedToken.tokenScope = info.compCompHnd->getMethodModule(derivedMethod);
derivedResolvedToken.tokenContext = *pContextHandle;
derivedResolvedToken.token = info.compCompHnd->getMethodDefFromMethod(derivedMethod);
derivedResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
derivedResolvedToken.hClass = derivedClass;
derivedResolvedToken.hMethod = derivedMethod;
// Look up the new call info.
CORINFO_CALL_INFO derivedCallInfo;
eeGetCallInfo(&derivedResolvedToken, nullptr, addVerifyFlag(CORINFO_CALLINFO_ALLOWINSTPARAM), &derivedCallInfo);
// Update the call.
call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT;
call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup);
}
#endif // FEATURE_READYTORUN_COMPILER
}
//------------------------------------------------------------------------
// impGetSpecialIntrinsicExactReturnType: Look for special cases where a call
// to an intrinsic returns an exact type
//
// Arguments:
// methodHnd -- handle for the special intrinsic method
//
// Returns:
// Exact class handle returned by the intrinsic call, if known.
// Nullptr if not known, or not likely to lead to beneficial optimization.
CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd)
{
JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd));
CORINFO_CLASS_HANDLE result = nullptr;
// See what intrinisc we have...
const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd);
switch (ni)
{
case NI_System_Collections_Generic_EqualityComparer_get_Default:
{
// Expect one class generic parameter; figure out which it is.
CORINFO_SIG_INFO sig;
info.compCompHnd->getMethodSig(methodHnd, &sig);
assert(sig.sigInst.classInstCount == 1);
CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0];
assert(typeHnd != nullptr);
// Lookup can incorrect when we have __Canon as it won't appear
// to implement any interface types.
//
// And if we do not have a final type, devirt & inlining is
// unlikely to result in much simplification.
//
// We can use CORINFO_FLG_FINAL to screen out both of these cases.
const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd);
const bool isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0);
if (isFinalType)
{
result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd);
JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd),
result != nullptr ? eeGetClassName(result) : "unknown");
}
else
{
JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd));
}
break;
}
default:
{
JITDUMP("This special intrinsic not handled, sorry...\n");
break;
}
}
return result;
}
//------------------------------------------------------------------------
// impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it.
//
// Arguments:
// token - init value for the allocated token.
//
// Return Value:
// pointer to token into jit-allocated memory.
CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(const CORINFO_RESOLVED_TOKEN& token)
{
CORINFO_RESOLVED_TOKEN* memory = getAllocator(CMK_Unknown).allocate<CORINFO_RESOLVED_TOKEN>(1);
*memory = token;
return memory;
}
//------------------------------------------------------------------------
// SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local variables.
//
class SpillRetExprHelper
{
public:
SpillRetExprHelper(Compiler* comp) : comp(comp)
{
}
void StoreRetExprResultsInArgs(GenTreeCall* call)
{
for (GenTreeCall::Use& use : call->Args())
{
comp->fgWalkTreePre(&use.NodeRef(), SpillRetExprVisitor, this);
}
if (call->gtCallThisArg != nullptr)
{
comp->fgWalkTreePre(&call->gtCallThisArg->NodeRef(), SpillRetExprVisitor, this);
}
}
private:
static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre)
{
assert((pTree != nullptr) && (*pTree != nullptr));
GenTree* tree = *pTree;
if ((tree->gtFlags & GTF_CALL) == 0)
{
// Trees with ret_expr are marked as GTF_CALL.
return Compiler::WALK_SKIP_SUBTREES;
}
if (tree->OperGet() == GT_RET_EXPR)
{
SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData);
walker->StoreRetExprAsLocalVar(pTree);
}
return Compiler::WALK_CONTINUE;
}
void StoreRetExprAsLocalVar(GenTree** pRetExpr)
{
GenTree* retExpr = *pRetExpr;
assert(retExpr->OperGet() == GT_RET_EXPR);
const unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr"));
JITDUMP("Storing return expression [%06u] to a local var V%02u.\n", comp->dspTreeID(retExpr), tmp);
comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE);
*pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet());
if (retExpr->TypeGet() == TYP_REF)
{
assert(comp->lvaTable[tmp].lvSingleDef == 0);
comp->lvaTable[tmp].lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def temp\n", tmp);
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE retClsHnd = comp->gtGetClassHandle(retExpr, &isExact, &isNonNull);
if (retClsHnd != nullptr)
{
comp->lvaSetClass(tmp, retClsHnd, isExact);
}
}
}
private:
Compiler* comp;
};
//------------------------------------------------------------------------
// addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate.
// Spill ret_expr in the call node, because they can't be cloned.
//
// Arguments:
// call - fat calli candidate
//
void Compiler::addFatPointerCandidate(GenTreeCall* call)
{
JITDUMP("Marking call [%06u] as fat pointer candidate\n", dspTreeID(call));
setMethodHasFatPointer();
call->SetFatPointerCandidate();
SpillRetExprHelper helper(this);
helper.StoreRetExprResultsInArgs(call);
}
//------------------------------------------------------------------------
// addGuardedDevirtualizationCandidate: potentially mark the call as a guarded
// devirtualization candidate
//
// Notes:
//
// We currently do not mark calls as candidates when prejitting. This was done
// to simplify bringing up the associated transformation. It is worth revisiting
// if we think we can come up with a good guess for the class when prejitting.
//
// Call sites in rare or unoptimized code, and calls that require cookies are
// also not marked as candidates.
//
// As part of marking the candidate, the code spills GT_RET_EXPRs anywhere in any
// child tree, because and we need to clone all these trees when we clone the call
// as part of guarded devirtualization, and these IR nodes can't be cloned.
//
// Arguments:
// call - potentual guarded devirtialization candidate
// methodHandle - method that will be invoked if the class test succeeds
// classHandle - class that will be tested for at runtime
// methodAttr - attributes of the method
// classAttr - attributes of the class
// likelihood - odds that this class is the class seen at runtime
//
void Compiler::addGuardedDevirtualizationCandidate(GenTreeCall* call,
CORINFO_METHOD_HANDLE methodHandle,
CORINFO_CLASS_HANDLE classHandle,
unsigned methodAttr,
unsigned classAttr,
unsigned likelihood)
{
// This transformation only makes sense for virtual calls
assert(call->IsVirtual());
// Only mark calls if the feature is enabled.
const bool isEnabled = JitConfig.JitEnableGuardedDevirtualization() > 0;
if (!isEnabled)
{
JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- disabled by jit config\n",
dspTreeID(call));
return;
}
// Bail when prejitting. We only do this for jitted code.
// We shoud revisit this if we think we can come up with good class guesses when prejitting.
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT))
{
JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- prejitting", dspTreeID(call));
return;
}
// Bail if not optimizing or the call site is very likely cold
if (compCurBB->isRunRarely() || opts.OptimizationDisabled())
{
JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- rare / dbg / minopts\n",
dspTreeID(call));
return;
}
// CT_INDIRECT calls may use the cookie, bail if so...
//
// If transforming these provides a benefit, we could save this off in the same way
// we save the stub address below.
if ((call->gtCallType == CT_INDIRECT) && (call->AsCall()->gtCallCookie != nullptr))
{
return;
}
#ifdef DEBUG
// See if disabled by range
//
static ConfigMethodRange JitGuardedDevirtualizationRange;
JitGuardedDevirtualizationRange.EnsureInit(JitConfig.JitGuardedDevirtualizationRange());
assert(!JitGuardedDevirtualizationRange.Error());
if (!JitGuardedDevirtualizationRange.Contains(impInlineRoot()->info.compMethodHash()))
{
JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- excluded by "
"JitGuardedDevirtualizationRange",
dspTreeID(call));
return;
}
#endif
// We're all set, proceed with candidate creation.
//
JITDUMP("Marking call [%06u] as guarded devirtualization candidate; will guess for class %s\n", dspTreeID(call),
eeGetClassName(classHandle));
setMethodHasGuardedDevirtualization();
call->SetGuardedDevirtualizationCandidate();
// Spill off any GT_RET_EXPR subtrees so we can clone the call.
//
SpillRetExprHelper helper(this);
helper.StoreRetExprResultsInArgs(call);
// Gather some information for later. Note we actually allocate InlineCandidateInfo
// here, as the devirtualized half of this call will likely become an inline candidate.
//
GuardedDevirtualizationCandidateInfo* pInfo = new (this, CMK_Inlining) InlineCandidateInfo;
pInfo->guardedMethodHandle = methodHandle;
pInfo->guardedClassHandle = classHandle;
pInfo->likelihood = likelihood;
// Save off the stub address since it shares a union with the candidate info.
//
if (call->IsVirtualStub())
{
JITDUMP("Saving stub addr %p in candidate info\n", dspPtr(call->gtStubCallStubAddr));
pInfo->stubAddr = call->gtStubCallStubAddr;
}
else
{
pInfo->stubAddr = nullptr;
}
call->gtGuardedDevirtualizationCandidateInfo = pInfo;
}
void Compiler::addExpRuntimeLookupCandidate(GenTreeCall* call)
{
setMethodHasExpRuntimeLookup();
call->SetExpRuntimeLookup();
}
//------------------------------------------------------------------------
// impIsClassExact: check if a class handle can only describe values
// of exactly one class.
//
// Arguments:
// classHnd - handle for class in question
//
// Returns:
// true if class is final and not subject to special casting from
// variance or similar.
//
// Note:
// We are conservative on arrays of primitive types here.
bool Compiler::impIsClassExact(CORINFO_CLASS_HANDLE classHnd)
{
DWORD flags = info.compCompHnd->getClassAttribs(classHnd);
DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY;
if ((flags & flagsMask) == CORINFO_FLG_FINAL)
{
return true;
}
if ((flags & flagsMask) == (CORINFO_FLG_FINAL | CORINFO_FLG_ARRAY))
{
CORINFO_CLASS_HANDLE arrayElementHandle = nullptr;
CorInfoType type = info.compCompHnd->getChildType(classHnd, &arrayElementHandle);
if ((type == CORINFO_TYPE_CLASS) || (type == CORINFO_TYPE_VALUECLASS))
{
return impIsClassExact(arrayElementHandle);
}
}
return false;
}
//------------------------------------------------------------------------
// impCanSkipCovariantStoreCheck: see if storing a ref type value to an array
// can skip the array store covariance check.
//
// Arguments:
// value -- tree producing the value to store
// array -- tree representing the array to store to
//
// Returns:
// true if the store does not require a covariance check.
//
bool Compiler::impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array)
{
// We should only call this when optimizing.
assert(opts.OptimizationEnabled());
// Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
if (value->OperIs(GT_INDEX) && array->OperIs(GT_LCL_VAR))
{
GenTree* valueIndex = value->AsIndex()->Arr();
if (valueIndex->OperIs(GT_LCL_VAR))
{
unsigned valueLcl = valueIndex->AsLclVar()->GetLclNum();
unsigned arrayLcl = array->AsLclVar()->GetLclNum();
if ((valueLcl == arrayLcl) && !lvaGetDesc(arrayLcl)->lvAddrExposed)
{
JITDUMP("\nstelem of ref from same array: skipping covariant store check\n");
return true;
}
}
}
// Check for assignment of NULL.
if (value->OperIs(GT_CNS_INT))
{
assert(value->gtType == TYP_REF);
if (value->AsIntCon()->gtIconVal == 0)
{
JITDUMP("\nstelem of null: skipping covariant store check\n");
return true;
}
// Non-0 const refs can only occur with frozen objects
assert(value->IsIconHandle(GTF_ICON_STR_HDL));
assert(doesMethodHaveFrozenString() ||
(compIsForInlining() && impInlineInfo->InlinerCompiler->doesMethodHaveFrozenString()));
}
// Try and get a class handle for the array
if (value->gtType != TYP_REF)
{
return false;
}
bool arrayIsExact = false;
bool arrayIsNonNull = false;
CORINFO_CLASS_HANDLE arrayHandle = gtGetClassHandle(array, &arrayIsExact, &arrayIsNonNull);
if (arrayHandle == NO_CLASS_HANDLE)
{
return false;
}
// There are some methods in corelib where we're storing to an array but the IL
// doesn't reflect this (see SZArrayHelper). Avoid.
DWORD attribs = info.compCompHnd->getClassAttribs(arrayHandle);
if ((attribs & CORINFO_FLG_ARRAY) == 0)
{
return false;
}
CORINFO_CLASS_HANDLE arrayElementHandle = nullptr;
CorInfoType arrayElemType = info.compCompHnd->getChildType(arrayHandle, &arrayElementHandle);
// Verify array type handle is really an array of ref type
assert(arrayElemType == CORINFO_TYPE_CLASS);
// Check for exactly object[]
if (arrayIsExact && (arrayElementHandle == impGetObjectClass()))
{
JITDUMP("\nstelem to (exact) object[]: skipping covariant store check\n");
return true;
}
// Check for T[] with T exact.
if (!impIsClassExact(arrayElementHandle))
{
return false;
}
bool valueIsExact = false;
bool valueIsNonNull = false;
CORINFO_CLASS_HANDLE valueHandle = gtGetClassHandle(value, &valueIsExact, &valueIsNonNull);
if (valueHandle == arrayElementHandle)
{
JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n");
return true;
}
return false;
}
|
/*! @file
@brief 置換ダイアログ
@author Norio Nakatani
@date 2001/06/23 N.Nakatani 単語単位で検索する機能を実装
*/
/*
Copyright (C) 1998-2001, Norio Nakatani
Copyright (C) 2001, genta, Stonee, hor, YAZAKI
Copyright (C) 2002, MIK, hor, novice, genta, aroka, YAZAKI
Copyright (C) 2006, かろと, ryoji
Copyright (C) 2007, ryoji
Copyright (C) 2009, ryoji
Copyright (C) 2012, Uchi
This source code is designed for sakura editor.
Please contact the copyright holder to use this code for other purpose.
*/
#include "StdAfx.h"
#include "dlg/CDlgReplace.h"
#include "view/CEditView.h"
#include "util/shell.h"
#include "util/window.h"
#include "sakura_rc.h"
#include "sakura.hh"
//置換 CDlgReplace.cpp //@@@ 2002.01.07 add start MIK
const DWORD p_helpids[] = { //11900
IDC_BUTTON_SEARCHNEXT, HIDC_REP_BUTTON_SEARCHNEXT, //下検索
IDC_BUTTON_SEARCHPREV, HIDC_REP_BUTTON_SEARCHPREV, //上検索
IDC_BUTTON_REPALCE, HIDC_REP_BUTTON_REPALCE, //置換
IDC_BUTTON_REPALCEALL, HIDC_REP_BUTTON_REPALCEALL, //全置換
IDCANCEL, HIDCANCEL_REP, //キャンセル
IDC_BUTTON_HELP, HIDC_REP_BUTTON_HELP, //ヘルプ
IDC_CHK_PASTE, HIDC_REP_CHK_PASTE, //クリップボードから貼り付け
IDC_CHK_WORD, HIDC_REP_CHK_WORD, //単語単位
IDC_CHK_LOHICASE, HIDC_REP_CHK_LOHICASE, //大文字小文字
IDC_CHK_REGULAREXP, HIDC_REP_CHK_REGULAREXP, //正規表現
IDC_CHECK_NOTIFYNOTFOUND, HIDC_REP_CHECK_NOTIFYNOTFOUND, //見つからないときに通知
IDC_CHECK_bAutoCloseDlgReplace, HIDC_REP_CHECK_bAutoCloseDlgReplace, //自動的に閉じる
IDC_COMBO_TEXT, HIDC_REP_COMBO_TEXT, //置換前
IDC_COMBO_TEXT2, HIDC_REP_COMBO_TEXT2, //置換後
IDC_RADIO_REPLACE, HIDC_REP_RADIO_REPLACE, //置換対象:置換
IDC_RADIO_INSERT, HIDC_REP_RADIO_INSERT, //置換対象:挿入
IDC_RADIO_ADD, HIDC_REP_RADIO_ADD, //置換対象:追加
IDC_RADIO_LINEDELETE, HIDC_REP_RADIO_LINEDELETE, //置換対象:行削除
IDC_RADIO_SELECTEDAREA, HIDC_REP_RADIO_SELECTEDAREA, //範囲:全体
IDC_RADIO_ALLAREA, HIDC_REP_RADIO_ALLAREA, //範囲:選択範囲
IDC_STATIC_JRE32VER, HIDC_REP_STATIC_JRE32VER, //正規表現バージョン
IDC_BUTTON_SETMARK, HIDC_REP_BUTTON_SETMARK, //2002.01.16 hor 検索該当行をマーク
IDC_CHECK_SEARCHALL, HIDC_REP_CHECK_SEARCHALL, //2002.01.26 hor 先頭(末尾)から再検索
IDC_CHECK_CONSECUTIVEALL, HIDC_REP_CHECK_CONSECUTIVEALL, //「すべて置換」は置換の繰返し // 2007.01.16 ryoji
// IDC_STATIC, -1,
0, 0
}; //@@@ 2002.01.07 add end MIK
CDlgReplace::CDlgReplace()
{
m_sSearchOption.Reset(); // 検索オプション
m_bConsecutiveAll = FALSE; // 「すべて置換」は置換の繰返し // 2007.01.16 ryoji
m_bSelectedArea = FALSE; // 選択範囲内置換
m_nReplaceTarget = 0; // 置換対象 // 2001.12.03 hor
m_nPaste = FALSE; // 貼り付ける? // 2001.12.03 hor
m_nReplaceCnt = 0; //すべて置換の実行結果 // 2002.02.08 hor
m_bCanceled = false; //すべて置換を中断したか // 2002.02.08 hor
return;
}
/*!
コンボボックスのドロップダウンメッセージを捕捉する
@date 2013.03.24 novice 新規作成
*/
BOOL CDlgReplace::OnCbnDropDown( HWND hwndCtl, int wID )
{
switch( wID ){
case IDC_COMBO_TEXT:
if ( ::SendMessage(hwndCtl, CB_GETCOUNT, 0L, 0L) == 0) {
int nSize = m_pShareData->m_sSearchKeywords.m_aSearchKeys.size();
for (int i = 0; i < nSize; ++i) {
Combo_AddString( hwndCtl, m_pShareData->m_sSearchKeywords.m_aSearchKeys[i] );
}
}
break;
case IDC_COMBO_TEXT2:
if ( ::SendMessage(hwndCtl, CB_GETCOUNT, 0L, 0L) == 0) {
int nSize = m_pShareData->m_sSearchKeywords.m_aReplaceKeys.size();
for (int i = 0; i < nSize; ++i) {
Combo_AddString( hwndCtl, m_pShareData->m_sSearchKeywords.m_aReplaceKeys[i] );
}
}
break;
}
return CDialog::OnCbnDropDown( hwndCtl, wID );
}
/* モードレスダイアログの表示 */
HWND CDlgReplace::DoModeless( HINSTANCE hInstance, HWND hwndParent, LPARAM lParam, BOOL bSelected )
{
m_sSearchOption = m_pShareData->m_Common.m_sSearch.m_sSearchOption; // 検索オプション
m_bConsecutiveAll = m_pShareData->m_Common.m_sSearch.m_bConsecutiveAll; // 「すべて置換」は置換の繰返し // 2007.01.16 ryoji
m_bSelectedArea = m_pShareData->m_Common.m_sSearch.m_bSelectedArea; // 選択範囲内置換
m_bNOTIFYNOTFOUND = m_pShareData->m_Common.m_sSearch.m_bNOTIFYNOTFOUND; // 検索/置換 見つからないときメッセージを表示
m_bSelected = bSelected;
m_ptEscCaretPos_PHY = ((CEditView*)lParam)->GetCaret().GetCaretLogicPos(); // 検索/置換開始時のカーソル位置退避
((CEditView*)lParam)->m_bSearch = TRUE; // 検索/置換開始位置の登録有無 02/07/28 ai
return CDialog::DoModeless( hInstance, hwndParent, IDD_REPLACE, lParam, SW_SHOW );
}
/* モードレス時:置換・検索対象となるビューの変更 */
void CDlgReplace::ChangeView( LPARAM pcEditView )
{
m_lParam = pcEditView;
return;
}
/* ダイアログデータの設定 */
void CDlgReplace::SetData( void )
{
// 検索文字列/置換後文字列リストの設定(関数化) 2010/5/26 Uchi
SetCombosList();
/* 英大文字と英小文字を区別する */
::CheckDlgButton( GetHwnd(), IDC_CHK_LOHICASE, m_sSearchOption.bLoHiCase );
// 2001/06/23 N.Nakatani
/* 単語単位で探す */
::CheckDlgButton( GetHwnd(), IDC_CHK_WORD, m_sSearchOption.bWordOnly );
/* 「すべて置換」は置換の繰返し */ // 2007.01.16 ryoji
::CheckDlgButton( GetHwnd(), IDC_CHECK_CONSECUTIVEALL, m_bConsecutiveAll );
// From Here Jun. 29, 2001 genta
// 正規表現ライブラリの差し替えに伴う処理の見直し
// 処理フロー及び判定条件の見直し。必ず正規表現のチェックと
// 無関係にCheckRegexpVersionを通過するようにした。
if( CheckRegexpVersion( GetHwnd(), IDC_STATIC_JRE32VER, false )
&& m_sSearchOption.bRegularExp){
/* 英大文字と英小文字を区別する */
::CheckDlgButton( GetHwnd(), IDC_CHK_REGULAREXP, 1 );
// 2001/06/23 N.Nakatani
/* 単語単位で探す */
::EnableWindow( GetItemHwnd( IDC_CHK_WORD ), FALSE );
}
else {
::CheckDlgButton( GetHwnd(), IDC_CHK_REGULAREXP, 0 );
/*「すべて置換」は置換の繰返し */
::EnableWindow( GetItemHwnd( IDC_CHECK_CONSECUTIVEALL ), FALSE ); // 2007.01.16 ryoji
}
// To Here Jun. 29, 2001 genta
/* 検索/置換 見つからないときメッセージを表示 */
::CheckDlgButton( GetHwnd(), IDC_CHECK_NOTIFYNOTFOUND, m_bNOTIFYNOTFOUND );
/* 置換 ダイアログを自動的に閉じる */
::CheckDlgButton( GetHwnd(), IDC_CHECK_bAutoCloseDlgReplace, m_pShareData->m_Common.m_sSearch.m_bAutoCloseDlgReplace );
/* 先頭(末尾)から再検索 2002.01.26 hor */
::CheckDlgButton( GetHwnd(), IDC_CHECK_SEARCHALL, m_pShareData->m_Common.m_sSearch.m_bSearchAll );
// From Here 2001.12.03 hor
// クリップボードから貼り付ける?
::CheckDlgButton( GetHwnd(), IDC_CHK_PASTE, m_nPaste );
// 置換対象
if(m_nReplaceTarget==0){
::CheckDlgButton( GetHwnd(), IDC_RADIO_REPLACE, TRUE );
}else
if(m_nReplaceTarget==1){
::CheckDlgButton( GetHwnd(), IDC_RADIO_INSERT, TRUE );
}else
if(m_nReplaceTarget==2){
::CheckDlgButton( GetHwnd(), IDC_RADIO_ADD, TRUE );
}else
if(m_nReplaceTarget==3){
::CheckDlgButton( GetHwnd(), IDC_RADIO_LINEDELETE, TRUE );
::EnableWindow( GetItemHwnd( IDC_COMBO_TEXT2 ), FALSE );
::EnableWindow( GetItemHwnd( IDC_CHK_PASTE ), FALSE );
}
// To Here 2001.12.03 hor
return;
}
// 検索文字列/置換後文字列リストの設定
// 2010/5/26 Uchi
void CDlgReplace::SetCombosList( void )
{
HWND hwndCombo;
/* 検索文字列 */
hwndCombo = GetItemHwnd( IDC_COMBO_TEXT );
while (Combo_GetCount(hwndCombo) > 0) {
Combo_DeleteString( hwndCombo, 0);
}
std::wstring strText;
if( !ApiWrap::DlgItem_GetText( GetHwnd(), IDC_COMBO_TEXT, strText ) || strText != m_strText ) {
::DlgItem_SetText( GetHwnd(), IDC_COMBO_TEXT, m_strText.c_str() );
}
/* 置換後文字列 */
hwndCombo = GetItemHwnd( IDC_COMBO_TEXT2 );
while (Combo_GetCount(hwndCombo) > 0) {
Combo_DeleteString( hwndCombo, 0);
}
std::wstring strText2;
if( !ApiWrap::DlgItem_GetText( GetHwnd(), IDC_COMBO_TEXT2, strText2 ) || strText2 != m_strText2 ) {
::DlgItem_SetText( GetHwnd(), IDC_COMBO_TEXT2, m_strText2.c_str() );
}
}
/* ダイアログデータの取得 */
/* 0==条件未入力 0より大きい==正常 0より小さい==入力エラー */
int CDlgReplace::GetData( void )
{
/* 英大文字と英小文字を区別する */
m_sSearchOption.bLoHiCase = (0!=IsDlgButtonChecked( GetHwnd(), IDC_CHK_LOHICASE ));
// 2001/06/23 N.Nakatani
/* 単語単位で探す */
m_sSearchOption.bWordOnly = (0!=IsDlgButtonChecked( GetHwnd(), IDC_CHK_WORD ));
/* 「すべて置換」は置換の繰返し */ // 2007.01.16 ryoji
m_bConsecutiveAll = ::IsDlgButtonChecked( GetHwnd(), IDC_CHECK_CONSECUTIVEALL );
/* 正規表現 */
m_sSearchOption.bRegularExp = (0!=IsDlgButtonChecked( GetHwnd(), IDC_CHK_REGULAREXP ));
/* 選択範囲内置換 */
m_bSelectedArea = ::IsDlgButtonChecked( GetHwnd(), IDC_RADIO_SELECTEDAREA );
/* 検索/置換 見つからないときメッセージを表示 */
m_bNOTIFYNOTFOUND = ::IsDlgButtonChecked( GetHwnd(), IDC_CHECK_NOTIFYNOTFOUND );
m_pShareData->m_Common.m_sSearch.m_bConsecutiveAll = m_bConsecutiveAll; // 1==「すべて置換」は置換の繰返し // 2007.01.16 ryoji
m_pShareData->m_Common.m_sSearch.m_bSelectedArea = m_bSelectedArea; // 選択範囲内置換
m_pShareData->m_Common.m_sSearch.m_bNOTIFYNOTFOUND = m_bNOTIFYNOTFOUND; // 検索/置換 見つからないときメッセージを表示
/* 検索文字列 */
ApiWrap::DlgItem_GetText( GetHwnd(), IDC_COMBO_TEXT, m_strText );
/* 置換後文字列 */
if( ::IsDlgButtonChecked( GetHwnd(), IDC_RADIO_LINEDELETE ) ){
m_strText2 = L"";
}else{
ApiWrap::DlgItem_GetText( GetHwnd(), IDC_COMBO_TEXT2, m_strText2 );
}
/* 置換 ダイアログを自動的に閉じる */
m_pShareData->m_Common.m_sSearch.m_bAutoCloseDlgReplace = ::IsDlgButtonChecked( GetHwnd(), IDC_CHECK_bAutoCloseDlgReplace );
/* 先頭(末尾)から再検索 2002.01.26 hor */
m_pShareData->m_Common.m_sSearch.m_bSearchAll = ::IsDlgButtonChecked( GetHwnd(), IDC_CHECK_SEARCHALL );
if( 0 < m_strText.size() ){
/* 正規表現? */
// From Here Jun. 26, 2001 genta
// 正規表現ライブラリの差し替えに伴う処理の見直し
int nFlag = 0x00;
nFlag |= m_sSearchOption.bLoHiCase ? 0x01 : 0x00;
if( m_sSearchOption.bRegularExp && !CheckRegexpSyntax( m_strText.c_str(), GetHwnd(), true, nFlag ) ){
return -1;
}
// To Here Jun. 26, 2001 genta 正規表現ライブラリ差し替え
/* 検索文字列 */
//@@@ 2002.2.2 YAZAKI CShareData.AddToSearchKeyArr()追加に伴う変更
if( m_strText.size() < _MAX_PATH ){
CSearchKeywordManager().AddToSearchKeyArr( m_strText.c_str() );
m_pShareData->m_Common.m_sSearch.m_sSearchOption = m_sSearchOption; // 検索オプション
}
// 2011.12.18 viewに直接設定
CEditView* pcEditView = (CEditView*)m_lParam;
if( pcEditView->m_strCurSearchKey == m_strText && pcEditView->m_sCurSearchOption == m_sSearchOption ){
}else{
pcEditView->m_strCurSearchKey = m_strText;
pcEditView->m_sCurSearchOption = m_sSearchOption;
pcEditView->m_bCurSearchUpdate = true;
}
pcEditView->m_nCurSearchKeySequence = GetDllShareData().m_Common.m_sSearch.m_nSearchKeySequence;
/* 置換後文字列 */
//@@@ 2002.2.2 YAZAKI CShareData.AddToReplaceKeyArr()追加に伴う変更
if( m_strText2.size() < _MAX_PATH ){
CSearchKeywordManager().AddToReplaceKeyArr( m_strText2.c_str() );
}
m_nReplaceKeySequence = GetDllShareData().m_Common.m_sSearch.m_nReplaceKeySequence;
// From Here 2001.12.03 hor
// クリップボードから貼り付ける?
m_nPaste=IsDlgButtonChecked( GetHwnd(), IDC_CHK_PASTE );
::EnableWindow( GetItemHwnd( IDC_COMBO_TEXT2 ), !m_nPaste );
// 置換対象
m_nReplaceTarget=0;
if(::IsDlgButtonChecked( GetHwnd(), IDC_RADIO_INSERT )){
m_nReplaceTarget=1;
}else
if(::IsDlgButtonChecked( GetHwnd(), IDC_RADIO_ADD )){
m_nReplaceTarget=2;
}else
if(::IsDlgButtonChecked( GetHwnd(), IDC_RADIO_LINEDELETE )){
m_nReplaceTarget=3;
m_nPaste = FALSE;
::EnableWindow( GetItemHwnd( IDC_COMBO_TEXT2 ), FALSE );
}
// To Here 2001.12.03 hor
// 検索文字列/置換後文字列リストの設定 2010/5/26 Uchi
if (!m_bModal) {
SetCombosList();
}
return 1;
}else{
return 0;
}
}
BOOL CDlgReplace::OnInitDialog( HWND hwndDlg, WPARAM wParam, LPARAM lParam )
{
_SetHwnd( hwndDlg );
// Jun. 26, 2001 genta
// この位置で正規表現の初期化をする必要はない
// 他との一貫性を保つため削除
/* ユーザーがコンボ ボックスのエディット コントロールに入力できるテキストの長さを制限する */
// Combo_LimitText( GetItemHwnd( IDC_COMBO_TEXT ), _MAX_PATH - 1 );
// Combo_LimitText( GetItemHwnd( IDC_COMBO_TEXT2 ), _MAX_PATH - 1 );
/* コンボボックスのユーザー インターフェイスを拡張インターフェースにする */
Combo_SetExtendedUI( GetItemHwnd( IDC_COMBO_TEXT ), TRUE );
Combo_SetExtendedUI( GetItemHwnd( IDC_COMBO_TEXT2 ), TRUE );
/* テキスト選択中か */
if( m_bSelected ){
::EnableWindow( ::GetDlgItem( hwndDlg, IDC_BUTTON_SEARCHPREV ), FALSE ); // 2001.12.03 hor コメント解除
::EnableWindow( ::GetDlgItem( hwndDlg, IDC_BUTTON_SEARCHNEXT ), FALSE ); // 2001.12.03 hor コメント解除
::EnableWindow( ::GetDlgItem( hwndDlg, IDC_BUTTON_REPALCE ), FALSE ); // 2001.12.03 hor コメント解除
::CheckDlgButton( GetHwnd(), IDC_RADIO_SELECTEDAREA, TRUE );
// ::CheckDlgButton( GetHwnd(), IDC_RADIO_ALLAREA, FALSE ); // 2001.12.03 hor コメント
}else{
// ::EnableWindow( ::GetDlgItem( hwndDlg, IDC_RADIO_SELECTEDAREA ), FALSE ); // 2001.12.03 hor コメント
// ::CheckDlgButton( GetHwnd(), IDC_RADIO_SELECTEDAREA, FALSE ); // 2001.12.03 hor コメント
::CheckDlgButton( GetHwnd(), IDC_RADIO_ALLAREA, TRUE );
}
m_comboDelText = SComboBoxItemDeleter();
m_comboDelText.pRecent = &m_cRecentSearch;
SetComboBoxDeleter(GetItemHwnd(IDC_COMBO_TEXT), &m_comboDelText);
m_comboDelText2 = SComboBoxItemDeleter();
m_comboDelText2.pRecent = &m_cRecentReplace;
SetComboBoxDeleter(GetItemHwnd(IDC_COMBO_TEXT2), &m_comboDelText2);
BOOL bRet = CDialog::OnInitDialog( hwndDlg, wParam, lParam );
if( !bRet ) return bRet;
// フォント設定 2012/11/27 Uchi
HFONT hFontOld = (HFONT)::SendMessageAny( GetItemHwnd( IDC_COMBO_TEXT ), WM_GETFONT, 0, 0 );
HFONT hFont = SetMainFont( GetItemHwnd( IDC_COMBO_TEXT ) );
m_cFontText.SetFont( hFontOld, hFont, GetItemHwnd( IDC_COMBO_TEXT ) );
hFontOld = (HFONT)::SendMessageAny( GetItemHwnd( IDC_COMBO_TEXT2 ), WM_GETFONT, 0, 0 );
hFont = SetMainFont( GetItemHwnd( IDC_COMBO_TEXT2 ) );
m_cFontText2.SetFont( hFontOld, hFont, GetItemHwnd( IDC_COMBO_TEXT2 ) );
return bRet;
}
BOOL CDlgReplace::OnDestroy()
{
m_cFontText.ReleaseOnDestroy();
m_cFontText2.ReleaseOnDestroy();
return CDialog::OnDestroy();
}
BOOL CDlgReplace::OnBnClicked( int wID )
{
int nRet;
CEditView* pcEditView = (CEditView*)m_lParam;
switch( wID ){
case IDC_CHK_PASTE:
/* テキストの貼り付け */
if( ::IsDlgButtonChecked( GetHwnd(), IDC_CHK_PASTE ) &&
!pcEditView->m_pcEditDoc->m_cDocEditor.IsEnablePaste() ){
OkMessage( GetHwnd(), LS(STR_DLGREPLC_CLIPBOARD) );
::CheckDlgButton( GetHwnd(), IDC_CHK_PASTE, FALSE );
}
::EnableWindow( GetItemHwnd( IDC_COMBO_TEXT2 ), !(::IsDlgButtonChecked( GetHwnd(), IDC_CHK_PASTE)) );
return TRUE;
// 置換対象
case IDC_RADIO_REPLACE:
case IDC_RADIO_INSERT:
case IDC_RADIO_ADD:
case IDC_RADIO_LINEDELETE:
if( ::IsDlgButtonChecked( GetHwnd(), IDC_RADIO_LINEDELETE ) ){
::EnableWindow( GetItemHwnd( IDC_COMBO_TEXT2 ), FALSE );
::EnableWindow( GetItemHwnd( IDC_CHK_PASTE ), FALSE );
}else{
::EnableWindow( GetItemHwnd( IDC_COMBO_TEXT2 ), TRUE );
::EnableWindow( GetItemHwnd( IDC_CHK_PASTE ), TRUE );
}
return TRUE;
case IDC_RADIO_SELECTEDAREA:
/* 範囲範囲 */
if( ::IsDlgButtonChecked( GetHwnd(), IDC_RADIO_ALLAREA ) ){
::EnableWindow( GetItemHwnd( IDC_BUTTON_SEARCHPREV ), TRUE );
::EnableWindow( GetItemHwnd( IDC_BUTTON_SEARCHNEXT ), TRUE );
::EnableWindow( GetItemHwnd( IDC_BUTTON_REPALCE ), TRUE );
}else{
::EnableWindow( GetItemHwnd( IDC_BUTTON_SEARCHPREV ), FALSE );
::EnableWindow( GetItemHwnd( IDC_BUTTON_SEARCHNEXT ), FALSE );
::EnableWindow( GetItemHwnd( IDC_BUTTON_REPALCE ), FALSE );
}
return TRUE;
case IDC_RADIO_ALLAREA:
/* ファイル全体 */
if( ::IsDlgButtonChecked( GetHwnd(), IDC_RADIO_ALLAREA ) ){
::EnableWindow( GetItemHwnd( IDC_BUTTON_SEARCHPREV ), TRUE );
::EnableWindow( GetItemHwnd( IDC_BUTTON_SEARCHNEXT ), TRUE );
::EnableWindow( GetItemHwnd( IDC_BUTTON_REPALCE ), TRUE );
}else{
::EnableWindow( GetItemHwnd( IDC_BUTTON_SEARCHPREV ), FALSE );
::EnableWindow( GetItemHwnd( IDC_BUTTON_SEARCHNEXT ), FALSE );
::EnableWindow( GetItemHwnd( IDC_BUTTON_REPALCE ), FALSE );
}
return TRUE;
// To Here 2001.12.03 hor
case IDC_BUTTON_HELP:
/* 「置換」のヘルプ */
//Stonee, 2001/03/12 第四引数を、機能番号からヘルプトピック番号を調べるようにした
MyWinHelp( GetHwnd(), HELP_CONTEXT, ::FuncID_To_HelpContextID(F_REPLACE_DIALOG) ); // 2006.10.10 ryoji MyWinHelpに変更に変更
return TRUE;
// case IDC_CHK_LOHICASE: /* 大文字と小文字を区別する */
// MYTRACE( L"IDC_CHK_LOHICASE\n" );
// return TRUE;
// case IDC_CHK_WORDONLY: /* 一致する単語のみ検索 */
// MYTRACE( L"IDC_CHK_WORDONLY\n" );
// break;
case IDC_CHK_REGULAREXP: /* 正規表現 */
// MYTRACE( L"IDC_CHK_REGULAREXP ::IsDlgButtonChecked( GetHwnd(), IDC_CHK_REGULAREXP ) = %d\n", ::IsDlgButtonChecked( GetHwnd(), IDC_CHK_REGULAREXP ) );
if( ::IsDlgButtonChecked( GetHwnd(), IDC_CHK_REGULAREXP ) ){
// From Here Jun. 26, 2001 genta
// 正規表現ライブラリの差し替えに伴う処理の見直し
if( !CheckRegexpVersion( GetHwnd(), IDC_STATIC_JRE32VER, true ) ){
::CheckDlgButton( GetHwnd(), IDC_CHK_REGULAREXP, 0 );
}else{
// To Here Jun. 26, 2001 genta
/* 英大文字と英小文字を区別する */
// Jan. 31, 2002 genta
// 大文字・小文字の区別は正規表現の設定に関わらず保存する
//::CheckDlgButton( GetHwnd(), IDC_CHK_LOHICASE, 1 );
//::EnableWindow( GetItemHwnd( IDC_CHK_LOHICASE ), FALSE );
// 2001/06/23 N.Nakatani
/* 単語単位で探す */
::EnableWindow( GetItemHwnd( IDC_CHK_WORD ), FALSE );
/*「すべて置換」は置換の繰返し */
::EnableWindow( GetItemHwnd( IDC_CHECK_CONSECUTIVEALL ), TRUE ); // 2007.01.16 ryoji
}
}else{
/* 英大文字と英小文字を区別する */
//::EnableWindow( GetItemHwnd( IDC_CHK_LOHICASE ), TRUE );
// Jan. 31, 2002 genta
// 大文字・小文字の区別は正規表現の設定に関わらず保存する
//::CheckDlgButton( GetHwnd(), IDC_CHK_LOHICASE, 0 );
// 2001/06/23 N.Nakatani
/* 単語単位で探す */
::EnableWindow( GetItemHwnd( IDC_CHK_WORD ), TRUE );
/*「すべて置換」は置換の繰返し */
::EnableWindow( GetItemHwnd( IDC_CHECK_CONSECUTIVEALL ), FALSE ); // 2007.01.16 ryoji
}
return TRUE;
// case IDOK: /* 下検索 */
// /* ダイアログデータの取得 */
// nRet = GetData();
// if( 0 < nRet ){
// ::EndDialog( hwndDlg, 2 );
// }else
// if( 0 == nRet ){
// ::EndDialog( hwndDlg, 0 );
// }
// return TRUE;
case IDC_BUTTON_SEARCHPREV: /* 上検索 */
nRet = GetData();
if( 0 < nRet ){
// 検索開始位置を登録 02/07/28 ai start
if( FALSE != pcEditView->m_bSearch ){
pcEditView->m_ptSrchStartPos_PHY = m_ptEscCaretPos_PHY;
pcEditView->m_bSearch = FALSE;
}// 02/07/28 ai end
/* コマンドコードによる処理振り分け */
/* 前を検索 */
pcEditView->GetCommander().HandleCommand( F_SEARCH_PREV, true, (LPARAM)GetHwnd(), 0, 0, 0 );
/* 再描画(0文字幅マッチでキャレットを表示するため) */
pcEditView->Redraw(); // 前回0文字幅マッチの消去にも必要
}else if(nRet == 0){
OkMessage( GetHwnd(), LS(STR_DLGREPLC_STR) );
}
return TRUE;
case IDC_BUTTON_SEARCHNEXT: /* 下検索 */
nRet = GetData();
if( 0 < nRet ){
// 検索開始位置を登録 02/07/28 ai start
if( FALSE != pcEditView->m_bSearch ){
pcEditView->m_ptSrchStartPos_PHY = m_ptEscCaretPos_PHY;
pcEditView->m_bSearch = FALSE;
}// 02/07/28 ai end
/* コマンドコードによる処理振り分け */
/* 次を検索 */
pcEditView->GetCommander().HandleCommand( F_SEARCH_NEXT, true, (LPARAM)GetHwnd(), 0, 0, 0 );
/* 再描画(0文字幅マッチでキャレットを表示するため) */
pcEditView->Redraw(); // 前回0文字幅マッチの消去にも必要
}else if(nRet == 0){
OkMessage( GetHwnd(), LS(STR_DLGREPLC_STR) );
}
return TRUE;
case IDC_BUTTON_SETMARK: //2002.01.16 hor 該当行マーク
nRet = GetData();
if( 0 < nRet ){
pcEditView->GetCommander().HandleCommand( F_BOOKMARK_PATTERN, false, 0, 0, 0, 0 );
::SendMessage(GetHwnd(),WM_NEXTDLGCTL,(WPARAM)GetItemHwnd( IDC_COMBO_TEXT ),TRUE);
}
return TRUE;
case IDC_BUTTON_REPALCE: /* 置換 */
nRet = GetData();
if( 0 < nRet ){
// 置換開始位置を登録 02/07/28 ai start
if( FALSE != pcEditView->m_bSearch ){
pcEditView->m_ptSrchStartPos_PHY = m_ptEscCaretPos_PHY;
pcEditView->m_bSearch = FALSE;
}// 02/07/28 ai end
/* 置換 */
//@@@ 2002.2.2 YAZAKI 置換コマンドをCEditViewに新設
//@@@ 2002/04/08 YAZAKI 親ウィンドウのハンドルを渡すように変更。
pcEditView->GetCommander().HandleCommand( F_REPLACE, true, (LPARAM)GetHwnd(), 0, 0, 0 );
/* 再描画 */
pcEditView->GetCommander().HandleCommand( F_REDRAW, true, 0, 0, 0, 0 );
}else if(nRet == 0){
OkMessage( GetHwnd(), LS(STR_DLGREPLC_STR) );
}
return TRUE;
case IDC_BUTTON_REPALCEALL: /* すべて置換 */
nRet = GetData();
if( 0 < nRet ){
// 置換開始位置を登録 02/07/28 ai start
if( FALSE != pcEditView->m_bSearch ){
pcEditView->m_ptSrchStartPos_PHY = m_ptEscCaretPos_PHY;
pcEditView->m_bSearch = FALSE;
}// 02/07/28 ai end
/* すべて行置換時の処置は「すべて置換」は置換の繰返しオプションOFFの場合にして削除 2007.01.16 ryoji */
pcEditView->GetCommander().HandleCommand( F_REPLACE_ALL, true, 0, 0, 0, 0 );
pcEditView->GetCommander().HandleCommand( F_REDRAW, true, 0, 0, 0, 0 );
/* アクティブにする */
ActivateFrameWindow( GetHwnd() );
TopOkMessage( GetHwnd(), LS(STR_DLGREPLC_REPLACE), m_nReplaceCnt);
if( !m_bCanceled ){
if( m_bModal ){ /* モーダルダイアログか */
/* 置換ダイアログを閉じる */
::EndDialog( GetHwnd(), 0 );
}else{
/* 置換 ダイアログを自動的に閉じる */
if( m_pShareData->m_Common.m_sSearch.m_bAutoCloseDlgReplace ){
::DestroyWindow( GetHwnd() );
}
}
}
return TRUE;
}else if(nRet == 0){
OkMessage( GetHwnd(), LS(STR_DLGREPLC_REPSTR) );
}
return TRUE;
// case IDCANCEL:
// ::EndDialog( hwndDlg, 0 );
// return TRUE;
}
/* 基底クラスメンバ */
return CDialog::OnBnClicked( wID );
}
BOOL CDlgReplace::OnActivate( WPARAM wParam, LPARAM lParam )
{
// 0文字幅マッチ描画のON/OFF // 2009.11.29 ryoji
CEditView* pcEditView = (CEditView*)m_lParam;
CLayoutRange cRangeSel = pcEditView->GetSelectionInfo().m_sSelect;
if( cRangeSel.IsValid() && cRangeSel.IsLineOne() && cRangeSel.IsOne() )
pcEditView->InvalidateRect(NULL); // アクティブ化/非アクティブ化が完了してから再描画
return CDialog::OnActivate(wParam, lParam);
}
//@@@ 2002.01.18 add start
LPVOID CDlgReplace::GetHelpIdTable(void)
{
return (LPVOID)p_helpids;
}
//@@@ 2002.01.18 add end
|
// String Tester
// Demonstrates string objects
#include <iostream>
#include <string>
using namespace std;
int main()
{
string word1 = "Game";
string word2("Over");
string word3(3, '!');
string phrase = word1 + " " + word2 + word3;
cout << "The phrase is: " << phrase << "\n\n";
cout << "The phrase has " << phrase.size() << " characters in it.\n\n";
cout << "The character at position 0 is: " << phrase[0] << "\n\n";
cout << "Changing the character at position 0.\n";
phrase[0] = 'L';
cout << "The phrase is now: " << phrase << "\n\n";
for (unsigned int i = 0; i < phrase.size(); ++i)
{
cout << "Character at position " << i << " is: " << phrase[i] << endl;
}
cout << "\nThe sequence 'Over' begins at location ";
cout << phrase.find("Over") << endl;
if (phrase.find("eggplant") == string::npos)
{
cout << "'eggplant' is not in the phrase.\n\n";
}
phrase.erase(4, 5);
cout << "The phrase is now: " << phrase << endl;
phrase.erase(4);
cout << "The phrase is now: " << phrase << endl;
phrase.erase();
cout << "The phrase is now: " << phrase << endl;
if (phrase.empty())
{
cout << "\nThe phrase is no more.\n";
}
return 0;
}
|
#include "capi/capi.hpp"
#include "capi/include/ruby.h"
using namespace rubinius;
using namespace rubinius::capi;
extern "C" {
VALUE rb_hash_new() {
return rb_funcall(rb_cHash, rb_intern("new"), 0);
}
VALUE rb_hash_aref(VALUE self, VALUE key) {
return rb_funcall(self, rb_intern("[]"), 1, key);
}
VALUE rb_hash_aset(VALUE self, VALUE key, VALUE value) {
return rb_funcall(self, rb_intern("[]="), 2, key, value);
}
VALUE rb_hash_delete(VALUE self, VALUE key) {
return rb_funcall(self, rb_intern("delete"), 1, key);
}
VALUE rb_hash_delete_if(VALUE self) {
NativeMethodEnvironment* env = NativeMethodEnvironment::get();
VALUE block_handle = env->get_handle(env->block());
return rb_funcall2b(self, rb_intern("delete_if"), 0, 0, block_handle);
}
VALUE rb_hash_size(VALUE self) {
return rb_funcall(self, rb_intern("size"), 0);
}
VALUE rb_hash_lookup(VALUE self, VALUE key) {
VALUE entry = rb_funcall(self, rb_intern("find_entry"), 1, key);
if(entry != Qnil) {
return rb_funcall(entry, rb_intern("value"), 0);
} else {
return Qnil;
}
}
void rb_hash_foreach(VALUE self,
int (*func)(ANYARGS),
VALUE farg)
{
VALUE iter = rb_funcall(self, rb_intern("to_iter"), 0);
VALUE entry = Qnil;
while(RTEST(entry = rb_funcall(iter, rb_intern("next"), 1, entry))) {
VALUE key = rb_funcall(entry, rb_intern("key"), 0);
VALUE val = rb_funcall(entry, rb_intern("value"), 0);
int ret = (*func)(key, val, farg);
switch(ret) {
case 0: // ST_CONTINUE:
continue;
case 1: // ST_STOP:
return;
default:
rubinius::bug("unsupported hash_foreach value");
}
}
}
}
|
// Copyright (c) 2018 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <bench/bench.h>
#include <validation.h>
#include <bech32.h>
#include <utilstrencodings.h>
#include <vector>
#include <string>
static void Bech32Encode(benchmark::State& state)
{
std::vector<uint8_t> v = ParseHex("c97f5a67ec381b760aeaf67573bc164845ff39a3bb26a1cee401ac67243b48db");
std::vector<unsigned char> tmp = {0};
tmp.reserve(1 + 32 * 8 / 5);
ConvertBits<8, 5, true>([&](unsigned char c) { tmp.push_back(c); }, v.begin(), v.end());
while (state.KeepRunning()) {
bech32::Encode("bc", tmp);
}
}
static void Bech32Decode(benchmark::State& state)
{
std::string addr = "bc1qkallence7tjawwvy0dwt4twc62qjgaw8f4vlhyd006d99f09";
std::vector<unsigned char> vch;
while (state.KeepRunning()) {
bech32::Decode(addr);
}
}
BENCHMARK(Bech32Encode, 800 * 1000);
BENCHMARK(Bech32Decode, 800 * 1000);
|
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if defined(V8_TARGET_ARCH_X64)
#include "codegen-inl.h"
#include "macro-assembler.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void Builtins::Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- rax : number of arguments excluding receiver
// -- rdi : called function (only guaranteed when
// extra_args requires it)
// -- rsi : context
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -- ...
// -- rsp[8 * argc] : first argument (argc == rax)
// -- rsp[8 * (argc +1)] : receiver
// -----------------------------------
// Insert extra arguments.
int num_extra_args = 0;
if (extra_args == NEEDS_CALLED_FUNCTION) {
num_extra_args = 1;
__ pop(kScratchRegister); // Save return address.
__ push(rdi);
__ push(kScratchRegister); // Restore return address.
} else {
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
}
// JumpToExternalReference expects rax to contain the number of arguments
// including the receiver and the extra arguments.
__ addq(rax, Immediate(num_extra_args + 1));
__ JumpToExternalReference(ExternalReference(id), 1);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(rbp);
__ movq(rbp, rsp);
// Store the arguments adaptor context sentinel.
__ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
// Push the function on the stack.
__ push(rdi);
// Preserve the number of arguments on the stack. Must preserve both
// rax and rbx because these registers are used when copying the
// arguments and the receiver.
__ Integer32ToSmi(rcx, rax);
__ push(rcx);
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Retrieve the number of arguments from the stack. Number is a Smi.
__ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
// Leave the frame.
__ movq(rsp, rbp);
__ pop(rbp);
// Remove caller arguments from the stack.
__ pop(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
__ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ push(rcx);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : actual number of arguments
// -- rbx : expected number of arguments
// -- rdx : code entry to call
// -----------------------------------
Label invoke, dont_adapt_arguments;
__ IncrementCounter(&Counters::arguments_adaptors, 1);
Label enough, too_few;
__ cmpq(rax, rbx);
__ j(less, &too_few);
__ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
{ // Enough parameters: Actual >= expected.
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
__ movq(rcx, Immediate(-1)); // account for receiver
Label copy;
__ bind(©);
__ incq(rcx);
__ push(Operand(rax, 0));
__ subq(rax, Immediate(kPointerSize));
__ cmpq(rcx, rbx);
__ j(less, ©);
__ jmp(&invoke);
}
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
__ movq(rcx, Immediate(-1)); // account for receiver
Label copy;
__ bind(©);
__ incq(rcx);
__ push(Operand(rdi, 0));
__ subq(rdi, Immediate(kPointerSize));
__ cmpq(rcx, rax);
__ j(less, ©);
// Fill remaining expected arguments with undefined values.
Label fill;
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ bind(&fill);
__ incq(rcx);
__ push(kScratchRegister);
__ cmpq(rcx, rbx);
__ j(less, &fill);
// Restore function pointer.
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
// Call the entry point.
__ bind(&invoke);
__ call(rdx);
// Leave frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ ret(0);
// -------------------------------------------
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
__ jmp(rdx);
}
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Stack Layout:
// rsp[0]: Return address
// rsp[1]: Argument n
// rsp[2]: Argument n-1
// ...
// rsp[n]: Argument 1
// rsp[n+1]: Receiver (function to call)
//
// rax contains the number of arguments, n, not counting the receiver.
//
// 1. Make sure we have at least one argument.
{ Label done;
__ testq(rax, rax);
__ j(not_zero, &done);
__ pop(rbx);
__ Push(Factory::undefined_value());
__ push(rbx);
__ incq(rax);
__ bind(&done);
}
// 2. Get the function to call (passed as receiver) from the stack, check
// if it is a function.
Label non_function;
// The function to call is at position n+1 on the stack.
__ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
__ JumpIfSmi(rdi, &non_function);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &non_function);
// 3a. Patch the first argument if necessary when calling a function.
Label shift_arguments;
{ Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
__ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
__ JumpIfSmi(rbx, &convert_to_object);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(equal, &use_global_receiver);
__ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
__ j(below, &convert_to_object);
__ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
__ j(below_equal, &shift_arguments);
__ bind(&convert_to_object);
__ EnterInternalFrame(); // In order to preserve argument count.
__ Integer32ToSmi(rax, rax);
__ push(rax);
__ push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ movq(rbx, rax);
__ pop(rax);
__ SmiToInteger32(rax, rax);
__ LeaveInternalFrame();
// Restore the function to rdi.
__ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
__ jmp(&patch_receiver);
// Use the global receiver object from the called function as the
// receiver.
__ bind(&use_global_receiver);
const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ movq(rbx, FieldOperand(rsi, kGlobalIndex));
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
__ movq(rbx, FieldOperand(rbx, kGlobalIndex));
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
__ movq(Operand(rsp, rax, times_pointer_size, 0), rbx);
__ jmp(&shift_arguments);
}
// 3b. Patch the first argument when calling a non-function. The
// CALL_NON_FUNCTION builtin expects the non-function callee as
// receiver, so overwrite the first argument which will ultimately
// become the receiver.
__ bind(&non_function);
__ movq(Operand(rsp, rax, times_pointer_size, 0), rdi);
__ xor_(rdi, rdi);
// 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
__ bind(&shift_arguments);
{ Label loop;
__ movq(rcx, rax);
__ bind(&loop);
__ movq(rbx, Operand(rsp, rcx, times_pointer_size, 0));
__ movq(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
__ decq(rcx);
__ j(not_sign, &loop); // While non-negative (to copy return address).
__ pop(rbx); // Discard copy of return address.
__ decq(rax); // One fewer argument (first argument is new receiver).
}
// 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
{ Label function;
__ testq(rdi, rdi);
__ j(not_zero, &function);
__ xor_(rbx, rbx);
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
__ bind(&function);
}
// 5b. Get the code to call from the function and check that the number of
// expected arguments matches what we're providing. If so, jump
// (tail-call) to the code in register edx without checking arguments.
__ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movsxlq(rbx,
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
__ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
__ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
__ cmpq(rax, rbx);
__ j(not_equal,
Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
ParameterCount expected(0);
__ InvokeCode(rdx, expected, expected, JUMP_FUNCTION);
}
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Stack at entry:
// rsp: return address
// rsp+8: arguments
// rsp+16: receiver ("this")
// rsp+24: function
__ EnterInternalFrame();
// Stack frame:
// rbp: Old base pointer
// rbp[1]: return address
// rbp[2]: function arguments
// rbp[3]: receiver
// rbp[4]: function
static const int kArgumentsOffset = 2 * kPointerSize;
static const int kReceiverOffset = 3 * kPointerSize;
static const int kFunctionOffset = 4 * kPointerSize;
__ push(Operand(rbp, kFunctionOffset));
__ push(Operand(rbp, kArgumentsOffset));
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
// Check the stack for overflow. We are not trying need to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
__ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
__ movq(rcx, rsp);
// Make rcx the space we have left. The stack might already be overflowed
// here which will cause rcx to become negative.
__ subq(rcx, kScratchRegister);
// Make rdx the space we need for the array when it is unrolled onto the
// stack.
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
__ cmpq(rcx, rdx);
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
__ push(Operand(rbp, kFunctionOffset));
__ push(rax);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
__ bind(&okay);
// End of stack check.
// Push current index and limit.
const int kLimitOffset =
StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
__ push(rax); // limit
__ push(Immediate(0)); // index
// Change context eagerly to get the right global object if
// necessary.
__ movq(rdi, Operand(rbp, kFunctionOffset));
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Compute the receiver.
Label call_to_object, use_global_receiver, push_receiver;
__ movq(rbx, Operand(rbp, kReceiverOffset));
__ JumpIfSmi(rbx, &call_to_object);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(equal, &use_global_receiver);
// If given receiver is already a JavaScript object then there's no
// reason for converting it.
__ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
__ j(below, &call_to_object);
__ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
__ j(below_equal, &push_receiver);
// Convert the receiver to an object.
__ bind(&call_to_object);
__ push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ movq(rbx, rax);
__ jmp(&push_receiver);
// Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
const int kGlobalOffset =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ movq(rbx, FieldOperand(rsi, kGlobalOffset));
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
__ movq(rbx, FieldOperand(rbx, kGlobalOffset));
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
__ bind(&push_receiver);
__ push(rbx);
// Copy all arguments from the array to the stack.
Label entry, loop;
__ movq(rax, Operand(rbp, kIndexOffset));
__ jmp(&entry);
__ bind(&loop);
__ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
// Use inline caching to speed up access to arguments.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// It is important that we do not have a test instruction after the
// call. A test instruction after the call is used to indicate that
// we have generated an inline version of the keyed load. In this
// case, we know that we are not generating a test instruction next.
// Push the nth argument.
__ push(rax);
// Update the index on the stack and in register rax.
__ movq(rax, Operand(rbp, kIndexOffset));
__ SmiAddConstant(rax, rax, Smi::FromInt(1));
__ movq(Operand(rbp, kIndexOffset), rax);
__ bind(&entry);
__ cmpq(rax, Operand(rbp, kLimitOffset));
__ j(not_equal, &loop);
// Invoke the function.
ParameterCount actual(rax);
__ SmiToInteger32(rax, rax);
__ movq(rdi, Operand(rbp, kFunctionOffset));
__ InvokeFunction(rdi, actual, CALL_FUNCTION);
__ LeaveInternalFrame();
__ ret(3 * kPointerSize); // remove function, receiver, and arguments
}
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
// Load the global context.
__ movq(result, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ movq(result, FieldOperand(result, GlobalObject::kGlobalContextOffset));
// Load the Array function from the global context.
__ movq(result,
Operand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
}
// Number of empty elements to allocate for an empty array.
static const int kPreallocatedArrayElements = 4;
// Allocate an empty JSArray. The allocated array is put into the result
// register. If the parameter initial_capacity is larger than zero an elements
// backing store is allocated with this size and filled with the hole values.
// Otherwise the elements backing store is set to the empty FixedArray.
static void AllocateEmptyJSArray(MacroAssembler* masm,
Register array_function,
Register result,
Register scratch1,
Register scratch2,
Register scratch3,
int initial_capacity,
Label* gc_required) {
ASSERT(initial_capacity >= 0);
// Load the initial map from the array function.
__ movq(scratch1, FieldOperand(array_function,
JSFunction::kPrototypeOrInitialMapOffset));
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
int size = JSArray::kSize;
if (initial_capacity > 0) {
size += FixedArray::SizeFor(initial_capacity);
}
__ AllocateInNewSpace(size,
result,
scratch2,
scratch3,
gc_required,
TAG_OBJECT);
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
// result: JSObject
// scratch1: initial map
// scratch2: start of next object
__ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
__ Move(FieldOperand(result, JSArray::kPropertiesOffset),
Factory::empty_fixed_array());
// Field JSArray::kElementsOffset is initialized later.
__ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
// If no storage is requested for the elements array just set the empty
// fixed array.
if (initial_capacity == 0) {
__ Move(FieldOperand(result, JSArray::kElementsOffset),
Factory::empty_fixed_array());
return;
}
// Calculate the location of the elements array and set elements array member
// of the JSArray.
// result: JSObject
// scratch2: start of next object
__ lea(scratch1, Operand(result, JSArray::kSize));
__ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
// Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// scratch1: elements array
// scratch2: start of next object
__ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
Factory::fixed_array_map());
__ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
Smi::FromInt(initial_capacity));
// Fill the FixedArray with the hole value. Inline the code if short.
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
static const int kLoopUnfoldLimit = 4;
ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
__ Move(scratch3, Factory::the_hole_value());
if (initial_capacity <= kLoopUnfoldLimit) {
// Use a scratch register here to have only one reloc info when unfolding
// the loop.
for (int i = 0; i < initial_capacity; i++) {
__ movq(FieldOperand(scratch1,
FixedArray::kHeaderSize + i * kPointerSize),
scratch3);
}
} else {
Label loop, entry;
__ jmp(&entry);
__ bind(&loop);
__ movq(Operand(scratch1, 0), scratch3);
__ addq(scratch1, Immediate(kPointerSize));
__ bind(&entry);
__ cmpq(scratch1, scratch2);
__ j(below, &loop);
}
}
// Allocate a JSArray with the number of elements stored in a register. The
// register array_function holds the built-in Array function and the register
// array_size holds the size of the array as a smi. The allocated array is put
// into the result register and beginning and end of the FixedArray elements
// storage is put into registers elements_array and elements_array_end (see
// below for when that is not the case). If the parameter fill_with_holes is
// true the allocated elements backing store is filled with the hole values
// otherwise it is left uninitialized. When the backing store is filled the
// register elements_array is scratched.
static void AllocateJSArray(MacroAssembler* masm,
Register array_function, // Array function.
Register array_size, // As a smi.
Register result,
Register elements_array,
Register elements_array_end,
Register scratch,
bool fill_with_hole,
Label* gc_required) {
Label not_empty, allocated;
// Load the initial map from the array function.
__ movq(elements_array,
FieldOperand(array_function,
JSFunction::kPrototypeOrInitialMapOffset));
// Check whether an empty sized array is requested.
__ testq(array_size, array_size);
__ j(not_zero, ¬_empty);
// If an empty array is requested allocate a small elements array anyway. This
// keeps the code below free of special casing for the empty array.
int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
__ AllocateInNewSpace(size,
result,
elements_array_end,
scratch,
gc_required,
TAG_OBJECT);
__ jmp(&allocated);
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
__ bind(¬_empty);
SmiIndex index =
masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
__ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
index.scale,
index.reg,
result,
elements_array_end,
scratch,
gc_required,
TAG_OBJECT);
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
// result: JSObject
// elements_array: initial map
// elements_array_end: start of next object
// array_size: size of array (smi)
__ bind(&allocated);
__ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
__ Move(elements_array, Factory::empty_fixed_array());
__ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
// Field JSArray::kElementsOffset is initialized later.
__ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
// Calculate the location of the elements array and set elements array member
// of the JSArray.
// result: JSObject
// elements_array_end: start of next object
// array_size: size of array (smi)
__ lea(elements_array, Operand(result, JSArray::kSize));
__ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
// Initialize the fixed array. FixedArray length is stored as a smi.
// result: JSObject
// elements_array: elements array
// elements_array_end: start of next object
// array_size: size of array (smi)
__ Move(FieldOperand(elements_array, JSObject::kMapOffset),
Factory::fixed_array_map());
Label not_empty_2, fill_array;
__ SmiTest(array_size);
__ j(not_zero, ¬_empty_2);
// Length of the FixedArray is the number of pre-allocated elements even
// though the actual JSArray has length 0.
__ Move(FieldOperand(elements_array, FixedArray::kLengthOffset),
Smi::FromInt(kPreallocatedArrayElements));
__ jmp(&fill_array);
__ bind(¬_empty_2);
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
// same.
__ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
// elements_array: elements array
// elements_array_end: start of next object
__ bind(&fill_array);
if (fill_with_hole) {
Label loop, entry;
__ Move(scratch, Factory::the_hole_value());
__ lea(elements_array, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
__ jmp(&entry);
__ bind(&loop);
__ movq(Operand(elements_array, 0), scratch);
__ addq(elements_array, Immediate(kPointerSize));
__ bind(&entry);
__ cmpq(elements_array, elements_array_end);
__ j(below, &loop);
}
}
// Create a new array for the built-in Array function. This function allocates
// the JSArray object and the FixedArray elements array and initializes these.
// If the Array cannot be constructed in native code the runtime is called. This
// function assumes the following state:
// rdi: constructor (built-in Array function)
// rax: argc
// rsp[0]: return address
// rsp[8]: last argument
// This function is used for both construct and normal calls of Array. The only
// difference between handling a construct call and a normal call is that for a
// construct call the constructor function in rdi needs to be preserved for
// entering the generic code. In both cases argc in rax needs to be preserved.
// Both registers are preserved by this code so no need to differentiate between
// a construct call and a normal call.
static void ArrayNativeCode(MacroAssembler* masm,
Label *call_generic_code) {
Label argc_one_or_more, argc_two_or_more;
// Check for array construction with zero arguments.
__ testq(rax, rax);
__ j(not_zero, &argc_one_or_more);
// Handle construction of an empty array.
AllocateEmptyJSArray(masm,
rdi,
rbx,
rcx,
rdx,
r8,
kPreallocatedArrayElements,
call_generic_code);
__ IncrementCounter(&Counters::array_function_native, 1);
__ movq(rax, rbx);
__ ret(kPointerSize);
// Check for one argument. Bail out if argument is not smi or if it is
// negative.
__ bind(&argc_one_or_more);
__ cmpq(rax, Immediate(1));
__ j(not_equal, &argc_two_or_more);
__ movq(rdx, Operand(rsp, kPointerSize)); // Get the argument from the stack.
__ JumpIfNotPositiveSmi(rdx, call_generic_code);
// Handle construction of an empty array of a certain size. Bail out if size
// is to large to actually allocate an elements array.
__ SmiCompare(rdx, Smi::FromInt(JSObject::kInitialMaxFastElementArray));
__ j(greater_equal, call_generic_code);
// rax: argc
// rdx: array_size (smi)
// rdi: constructor
// esp[0]: return address
// esp[8]: argument
AllocateJSArray(masm,
rdi,
rdx,
rbx,
rcx,
r8,
r9,
true,
call_generic_code);
__ IncrementCounter(&Counters::array_function_native, 1);
__ movq(rax, rbx);
__ ret(2 * kPointerSize);
// Handle construction of an array from a list of arguments.
__ bind(&argc_two_or_more);
__ movq(rdx, rax);
__ Integer32ToSmi(rdx, rdx); // Convet argc to a smi.
// rax: argc
// rdx: array_size (smi)
// rdi: constructor
// esp[0] : return address
// esp[8] : last argument
AllocateJSArray(masm,
rdi,
rdx,
rbx,
rcx,
r8,
r9,
false,
call_generic_code);
__ IncrementCounter(&Counters::array_function_native, 1);
// rax: argc
// rbx: JSArray
// rcx: elements_array
// r8: elements_array_end (untagged)
// esp[0]: return address
// esp[8]: last argument
// Location of the last argument
__ lea(r9, Operand(rsp, kPointerSize));
// Location of the first array element (Parameter fill_with_holes to
// AllocateJSArrayis false, so the FixedArray is returned in rcx).
__ lea(rdx, Operand(rcx, FixedArray::kHeaderSize - kHeapObjectTag));
// rax: argc
// rbx: JSArray
// rdx: location of the first array element
// r9: location of the last argument
// esp[0]: return address
// esp[8]: last argument
Label loop, entry;
__ movq(rcx, rax);
__ jmp(&entry);
__ bind(&loop);
__ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
__ movq(Operand(rdx, 0), kScratchRegister);
__ addq(rdx, Immediate(kPointerSize));
__ bind(&entry);
__ decq(rcx);
__ j(greater_equal, &loop);
// Remove caller arguments from the stack and return.
// rax: argc
// rbx: JSArray
// esp[0]: return address
// esp[8]: last argument
__ pop(rcx);
__ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
__ push(rcx);
__ movq(rax, rbx);
__ ret(0);
}
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
Label generic_array_code;
// Get the Array function.
GenerateLoadArrayFunction(masm, rdi);
if (FLAG_debug_code) {
// Initial map for the builtin Array function shoud be a map.
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
__ Check(not_smi, "Unexpected initial map for Array function");
__ CmpObjectType(rbx, MAP_TYPE, rcx);
__ Check(equal, "Unexpected initial map for Array function");
}
// Run the native code for the Array function called as a normal function.
ArrayNativeCode(masm, &generic_array_code);
// Jump to the generic array code in case the specialized code cannot handle
// the construction.
__ bind(&generic_array_code);
Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
Handle<Code> array_code(code);
__ Jump(array_code, RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rdi : constructor
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
Label generic_constructor;
if (FLAG_debug_code) {
// The array construct code is only set for the builtin Array function which
// does always have a map.
GenerateLoadArrayFunction(masm, rbx);
__ cmpq(rdi, rbx);
__ Check(equal, "Unexpected Array function");
// Initial map for the builtin Array function should be a map.
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
__ Check(not_smi, "Unexpected initial map for Array function");
__ CmpObjectType(rbx, MAP_TYPE, rcx);
__ Check(equal, "Unexpected initial map for Array function");
}
// Run the native code for the Array function called as constructor.
ArrayNativeCode(masm, &generic_constructor);
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
}
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax: number of arguments
// -- rdi: constructor function
// -----------------------------------
Label non_function_call;
// Check that function is not a smi.
__ JumpIfSmi(rdi, &non_function_call);
// Check that function is a JSFunction.
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &non_function_call);
// Jump to the function-specific construct stub.
__ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kConstructStubOffset));
__ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
__ jmp(rbx);
// edi: called object
// eax: number of arguments
__ bind(&non_function_call);
// CALL_NON_FUNCTION expects the non-function constructor as receiver
// (instead of the original receiver from the call site). The receiver is
// stack element argc+1.
__ movq(Operand(rsp, rax, times_pointer_size, kPointerSize), rdi);
// Set expected number of arguments to zero (not changing rax).
__ movq(rbx, Immediate(0));
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
}
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function) {
// Enter a construct frame.
__ EnterConstructFrame();
// Store a smi-tagged arguments count on the stack.
__ Integer32ToSmi(rax, rax);
__ push(rax);
// Push the function to invoke on the stack.
__ push(rdi);
// Try to allocate the object without transitioning into C code. If any of the
// preconditions is not met, the code bails out to the runtime call.
Label rt_call, allocated;
if (FLAG_inline_new) {
Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address();
__ movq(kScratchRegister, debug_step_in_fp);
__ cmpq(Operand(kScratchRegister, 0), Immediate(0));
__ j(not_equal, &rt_call);
#endif
// Verified that the constructor is a JSFunction.
// Load the initial map and verify that it is in fact a map.
// rdi: constructor
__ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi
ASSERT(kSmiTag == 0);
__ JumpIfSmi(rax, &rt_call);
// rdi: constructor
// rax: initial map (if proven valid below)
__ CmpObjectType(rax, MAP_TYPE, rbx);
__ j(not_equal, &rt_call);
// Check that the constructor is not constructing a JSFunction (see comments
// in Runtime_NewObject in runtime.cc). In which case the initial map's
// instance type would be JS_FUNCTION_TYPE.
// rdi: constructor
// rax: initial map
__ CmpInstanceType(rax, JS_FUNCTION_TYPE);
__ j(equal, &rt_call);
// Now allocate the JSObject on the heap.
__ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
__ shl(rdi, Immediate(kPointerSizeLog2));
// rdi: size of new object
__ AllocateInNewSpace(rdi,
rbx,
rdi,
no_reg,
&rt_call,
NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields.
// rax: initial map
// rbx: JSObject (not HeapObject tagged - the actual address).
// rdi: start of next object
__ movq(Operand(rbx, JSObject::kMapOffset), rax);
__ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
__ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
__ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
// Set extra fields in the newly allocated object.
// rax: initial map
// rbx: JSObject
// rdi: start of next object
{ Label loop, entry;
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
__ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
__ jmp(&entry);
__ bind(&loop);
__ movq(Operand(rcx, 0), rdx);
__ addq(rcx, Immediate(kPointerSize));
__ bind(&entry);
__ cmpq(rcx, rdi);
__ j(less, &loop);
}
// Add the object tag to make the JSObject real, so that we can continue and
// jump into the continuation code at any time from now on. Any failures
// need to undo the allocation, so that the heap is in a consistent state
// and verifiable.
// rax: initial map
// rbx: JSObject
// rdi: start of next object
__ or_(rbx, Immediate(kHeapObjectTag));
// Check if a non-empty properties array is needed.
// Allocate and initialize a FixedArray if it is.
// rax: initial map
// rbx: JSObject
// rdi: start of next object
// Calculate total properties described map.
__ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
__ movzxbq(rcx, FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
__ addq(rdx, rcx);
// Calculate unused properties past the end of the in-object properties.
__ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
__ subq(rdx, rcx);
// Done if no extra properties are to be allocated.
__ j(zero, &allocated);
__ Assert(positive, "Property allocation count failed.");
// Scale the number of elements by pointer size and add the header for
// FixedArrays to the start of the next object calculation from above.
// rbx: JSObject
// rdi: start of next object (will be start of FixedArray)
// rdx: number of elements in properties array
__ AllocateInNewSpace(FixedArray::kHeaderSize,
times_pointer_size,
rdx,
rdi,
rax,
no_reg,
&undo_allocation,
RESULT_CONTAINS_TOP);
// Initialize the FixedArray.
// rbx: JSObject
// rdi: FixedArray
// rdx: number of elements
// rax: start of next object
__ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
__ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
__ Integer32ToSmi(rdx, rdx);
__ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
// Initialize the fields to undefined.
// rbx: JSObject
// rdi: FixedArray
// rax: start of next object
// rdx: number of elements
{ Label loop, entry;
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
__ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
__ jmp(&entry);
__ bind(&loop);
__ movq(Operand(rcx, 0), rdx);
__ addq(rcx, Immediate(kPointerSize));
__ bind(&entry);
__ cmpq(rcx, rax);
__ j(below, &loop);
}
// Store the initialized FixedArray into the properties field of
// the JSObject
// rbx: JSObject
// rdi: FixedArray
__ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
__ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
// Continue with JSObject being successfully allocated
// rbx: JSObject
__ jmp(&allocated);
// Undo the setting of the new top so that the heap is verifiable. For
// example, the map's unused properties potentially do not match the
// allocated objects unused properties.
// rbx: JSObject (previous new top)
__ bind(&undo_allocation);
__ UndoAllocationInNewSpace(rbx);
}
// Allocate the new receiver object using the runtime call.
// rdi: function (constructor)
__ bind(&rt_call);
// Must restore rdi (constructor) before calling runtime.
__ movq(rdi, Operand(rsp, 0));
__ push(rdi);
__ CallRuntime(Runtime::kNewObject, 1);
__ movq(rbx, rax); // store result in rbx
// New object allocated.
// rbx: newly allocated object
__ bind(&allocated);
// Retrieve the function from the stack.
__ pop(rdi);
// Retrieve smi-tagged arguments count from the stack.
__ movq(rax, Operand(rsp, 0));
__ SmiToInteger32(rax, rax);
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
// conventions dictate that the called function pops the receiver.
__ push(rbx);
__ push(rbx);
// Setup pointer to last argument.
__ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
Label loop, entry;
__ movq(rcx, rax);
__ jmp(&entry);
__ bind(&loop);
__ push(Operand(rbx, rcx, times_pointer_size, 0));
__ bind(&entry);
__ decq(rcx);
__ j(greater_equal, &loop);
// Call the function.
if (is_api_function) {
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
Handle<Code> code = Handle<Code>(
Builtins::builtin(Builtins::HandleApiCallConstruct));
ParameterCount expected(0);
__ InvokeCode(code, expected, expected,
RelocInfo::CODE_TARGET, CALL_FUNCTION);
} else {
ParameterCount actual(rax);
__ InvokeFunction(rdi, actual, CALL_FUNCTION);
}
// Restore context from the frame.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
// of the receiver and use the result; see ECMA-262 section 13.2.2-7
// on page 74.
Label use_receiver, exit;
// If the result is a smi, it is *not* an object in the ECMA sense.
__ JumpIfSmi(rax, &use_receiver);
// If the type of the result (stored in its map) is less than
// FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
__ j(above_equal, &exit);
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
__ movq(rax, Operand(rsp, 0));
// Restore the arguments count and leave the construct frame.
__ bind(&exit);
__ movq(rbx, Operand(rsp, kPointerSize)); // get arguments count
__ LeaveConstructFrame();
// Remove caller arguments from the stack and return.
__ pop(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
__ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ push(rcx);
__ IncrementCounter(&Counters::constructed_objects, 1);
__ ret(0);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true);
}
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Expects five C++ function parameters.
// - Address entry (ignored)
// - JSFunction* function (
// - Object* receiver
// - int argc
// - Object*** argv
// (see Handle::Invoke in execution.cc).
// Platform specific argument handling. After this, the stack contains
// an internal frame and the pushed function and receiver, and
// register rax and rbx holds the argument count and argument array,
// while rdi holds the function pointer and rsi the context.
#ifdef _WIN64
// MSVC parameters in:
// rcx : entry (ignored)
// rdx : function
// r8 : receiver
// r9 : argc
// [rsp+0x20] : argv
// Clear the context before we push it when entering the JS frame.
__ xor_(rsi, rsi);
__ EnterInternalFrame();
// Load the function context into rsi.
__ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
// Push the function and the receiver onto the stack.
__ push(rdx);
__ push(r8);
// Load the number of arguments and setup pointer to the arguments.
__ movq(rax, r9);
// Load the previous frame pointer to access C argument on stack
__ movq(kScratchRegister, Operand(rbp, 0));
__ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
// Load the function pointer into rdi.
__ movq(rdi, rdx);
#else // _WIN64
// GCC parameters in:
// rdi : entry (ignored)
// rsi : function
// rdx : receiver
// rcx : argc
// r8 : argv
__ movq(rdi, rsi);
// rdi : function
// Clear the context before we push it when entering the JS frame.
__ xor_(rsi, rsi);
// Enter an internal frame.
__ EnterInternalFrame();
// Push the function and receiver and setup the context.
__ push(rdi);
__ push(rdx);
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Load the number of arguments and setup pointer to the arguments.
__ movq(rax, rcx);
__ movq(rbx, r8);
#endif // _WIN64
// Set up the roots register.
ExternalReference roots_address = ExternalReference::roots_address();
__ movq(kRootRegister, roots_address);
// Current stack contents:
// [rsp + 2 * kPointerSize ... ]: Internal frame
// [rsp + kPointerSize] : function
// [rsp] : receiver
// Current register contents:
// rax : argc
// rbx : argv
// rsi : context
// rdi : function
// Copy arguments to the stack in a loop.
// Register rbx points to array of pointers to handle locations.
// Push the values of these handles.
Label loop, entry;
__ xor_(rcx, rcx); // Set loop variable to 0.
__ jmp(&entry);
__ bind(&loop);
__ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
__ push(Operand(kScratchRegister, 0)); // dereference handle
__ addq(rcx, Immediate(1));
__ bind(&entry);
__ cmpq(rcx, rax);
__ j(not_equal, &loop);
// Invoke the code.
if (is_construct) {
// Expects rdi to hold function pointer.
__ Call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(rax);
// Function must be in rdi.
__ InvokeFunction(rdi, actual, CALL_FUNCTION);
}
// Exit the JS frame. Notice that this also removes the empty
// context and the function left on the stack by the code
// invocation.
__ LeaveInternalFrame();
// TODO(X64): Is argument correct? Is there a receiver to remove?
__ ret(1 * kPointerSize); // remove receiver
}
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, false);
}
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
|
#ifndef INCLUDED_ZENO_GUI_COMBO_BOX_HPP
#define INCLUDED_ZENO_GUI_COMBO_BOX_HPP
#include <zeno/GUI/GuiBase.hpp>
namespace zeno {
class DropdownMenu : public GuiBase
{
public:
////////////////////////////////////////////////////////////
//
// Virtual function which returns whether or not the
// element can handle the given event, and if it can handle
// it, it does so
//
////////////////////////////////////////////////////////////
virtual bool processEvent(const GUIEvent& _event);
////////////////////////////////////////////////////////////
//
// Renders the element
//
////////////////////////////////////////////////////////////
virtual void render(Mat4x4 _transform) const;
////////////////////////////////////////////////////////////
//
// Returns the Rect object that contains all the points of
// the GUI element.
//
////////////////////////////////////////////////////////////
virtual FloatRect getBounds(void);
//~ Expensive, causes combo box to be rebuilt
void addChoice(const std::string& _choice);
std::string getCurrentChoice(void) const;
void setSize(const zeno::Vector2f& _size);
private:
friend class Desktop;
////////////////////////////////////////////////////////////
//
// Default constructor
//
////////////////////////////////////////////////////////////
DropdownMenu(const std::string& _id, GuiBase *_parent, Desktop& _desktop);
////////////////////////////////////////////////////////////
//
// Destructor
//
////////////////////////////////////////////////////////////
~DropdownMenu(void);
static DropdownMenu *createElement(const std::string& _id, GuiBase *_parent, Desktop& _desktop);
void createBoxPositions(void);
void createBoxColours(void);
void setTrianglePositions(void);
void setTriangleColours(void);
void setBackgroundPositions(void);
void setBackgroundColours(void);
void setOptionBoxPositions(void);
void setOptionBoxColours(void);
virtual void initialise(void);
private:
unsigned int m_VAO;
unsigned int m_PositionVBO;
unsigned int m_ColourVBO;
Vector2f m_BoxSize;
bool m_Extended;
unsigned int m_Options;
std::vector<std::string> m_OptionStrings;
int m_CurrentChoice;
};
} //~ namespace zeno
#endif //~ INCLUDED_ZENO_GUI_COMBO_BOX_HPP
|
#include "Language.h"
#include "sort.h"
#include "Speller.h"
#include "Sections.h"
// word/phrase must be in at least this many docs to be included in our dict
#define MIN_DOCS 3
// ROUTINES NEEDED FOR GBSORT
// The dict is stored as a tuple of ( original word, phonetic, (lang, score)..)
int cmpPhonet (const void *v1, const void *v2) {
char *word1 = *(char **)v1;
// phrase
char *p1 = word1;
// phonetic
p1 += gbstrlen(p1) + 1;
char *word2 = *(char **)v2;
// phrase
char *p2 = word2;
// phonetic
p2 += gbstrlen(p2) + 1;
return strcmp(p1,p2);
}
int cmpScores (const void *v1, const void *v2) {
Reco r1 = *(Reco *) v1;
Reco r2 = *(Reco *) v2;
return ( r1.score > r2.score );
}
int cmpFrnt (const void *v1, const void *v2) {
// compare phrase
char *p1 = *(char **) v1;
char *p2 = *(char **) v2;
return strcmp ( p1,p2 );
}
int cmpBck (const void *v1, const void *v2) {
char *p1 = *(char **) v1;
char *p2 = *(char **) v2;
// string compare for reverse
// go to the end
p1 += gbstrlen(p1) - 1;
p2 += gbstrlen(p2) - 1;
while ( *p1 != '\0' && *p2 != '\0' ) {
if ( *p1 > *p2 )
return 1;
else if ( *p1 < *p2 )
return -1;
p1--;
p2--;
}
if ( *p1 == '\0' )
return -1;
if ( *p2 == '\0' )
return 1;
return 0;
}
static char s_keyMap[] = { 10, 24, 22, 12, 2, 13, 14, 15, 7, 16,
17, 18, 26, 25, 8, 9 , 0 , 3 , 11, 4,
6 , 23, 1 , 21, 5, 20 };
static char s_keyboard[] = {'q' ,'w','e','r','t','y','u','i','o' ,'p' ,
'a' ,'s','d','f','g','h','j','k','l' ,'\0',
'z','x','c','v','b','n','m','\0','\0','\0'};
//static void gotSummaryWrapper ( void *state );
//static void gotIndexListWrapper( void *state , RdbList *list );
//static void gotTermFreqsWrapper( void *state );
/*static void gotAffinityFreqs1Wrapper(void *state);
static void gotAffinityFreqs2Wrapper(void *state);*/
Language::Language(){
m_rulesBuf = NULL;
m_rulesBufSize = 0;
m_rulesPtr = NULL;
m_rulesPtrSize = 0;
m_distributedBuf = NULL;
m_distributedBufSize = 0;
m_tuplePtr = NULL;
m_tuplePtrSize = 0;
m_narrowBuf = NULL;
m_narrowBufSize = 0;
m_numNarrowPtrs = 0;
// Set to the default aspell parms
m_editDistanceWeightsDel1 = 95;
m_editDistanceWeightsDel2 = 95;
m_editDistanceWeightsSwap = 90;
m_editDistanceWeightsSub = 100;
m_editDistanceWeightsSimilar = 10;
m_editDistanceWeightsMin = 95;
m_editDistanceWeightsMax = 100;
m_soundslikeWeight = 15;
m_wordWeight = 85;
m_span = 50;
// . set m_map
// . this maps an ascii char to a char in dict space
// . used in loadNarrow
/*
for ( int32_t i = 0 ; i < 256 ; i++ ) {
unsigned char d = to_upper_ascii(i);
if ( is_alpha(d) ) {
// some like char 254 aren't really ascii!!
// so make them into Z's, a rare letter, which
// probably isn't in the same alphabet as 222 and 254
if ( d == 222 ) m_map[i] = 'Z' - 'A' + 12;
else if ( d == 254 ) m_map[i] = 'Z' - 'A' + 12;
else if ( d < 'A' ) m_map[i] = 38; // use apostrophes
else if ( d > 'Z' ) m_map[i] = 38; // use apostrophes
else m_map[i] = d - 'A' + 12;
continue;
}
if ( is_digit(d) ) m_map[i] = d - '0' + 2;
else if ( d == 0 ) m_map[i] = 0;
else if ( d == '\'' ) m_map[i] = 38;
else if ( d == '-' ) m_map[i] = 39;
else if ( d == '\n' ) m_map[i] = 0;
else m_map[i] = 1; // a space
}
*/
reset();
}
/*
bool Language::convertLatin1DictToUTF8( char *infile ){
// open the file for reading
FILE *fdr = fopen ( infile , "r" );
if ( ! fdr )
return log( "lang: Failed to open %s for reading: "
"%s.",infile, strerror(errno) );
char ff[1024];
// open for writing
sprintf ( ff , "%s.utf8", infile );
// delete it first
unlink ( ff );
// then open a new one for appending
int fdw = open ( ff ,
O_CREAT | O_RDWR | O_APPEND ,
S_IRUSR |S_IWUSR |S_IRGRP |S_IWGRP| S_IROTH);
if ( fdw < 0 ){
return log("lang: Could not open for %s "
"writing: %s.",ff, strerror(errno));
}
char buf[1024];
char out[4*1024];
// this loop goes through all the words and only adds those
// words into the phonetic dict that have phonets.
while ( fgets ( buf , 1024 , fdr ) ) {
int32_t wlen = gbstrlen(buf);
if ( wlen <= 0 || wlen > MAX_PHRASE_LEN )
continue;
// remove the newline \n
buf [wlen - 1] = '\0';
int32_t outLen = latin1ToUtf8(out, 4*1024, buf, gbstrlen(buf));
// write out the trailing \n as well
out[outLen] = '\n';
outLen++;
int32_t wn = write ( fdw , out , outLen ) ;
if ( wn != outLen )
return log("lang: write: %s",
strerror(errno));
}
fclose(fdr);
close(fdw);
return true;
}
*/
Language::~Language(){
reset();
}
void Language::reset(){
if ( m_rulesBuf && m_rulesBufSize > 0 ){
mfree( m_rulesBuf, m_rulesBufSize, "LanguageBuf" );
m_rulesBuf = NULL;
m_rulesBufSize = 0;
}
if ( m_rulesPtr && m_rulesPtrSize > 0 ){
mfree( m_rulesPtr, m_rulesPtrSize, "LanguagePtrBuf" );
m_rulesPtr = NULL;
m_rulesPtrSize = 0;
}
if ( m_distributedBuf && m_distributedBufSize > 0 ){
mfree( m_distributedBuf, m_distributedBufSize,
"DistributedPtrBuf" );
m_distributedBuf = NULL;
m_distributedBufSize = 0;
}
if ( m_tuplePtr && m_tuplePtrSize >0 ){
mfree(m_tuplePtr, m_tuplePtrSize, "LanguageWordsPtr");
m_tuplePtr = NULL;
m_tuplePtrSize = 0;
}
if ( m_narrowBuf && m_narrowBufSize > 0 ){
mfree(m_narrowBuf, m_narrowBufSize, "LanguageNarrowBuf");
m_narrowBuf = NULL;
m_narrowBufSize = 0;
}
m_numRules = 0;
m_numTuples = 0;
m_followup = true;
m_collapseResult = false;
m_removeAccents = true;
}
bool Language::init( char *unifiedBuf, int32_t unifiedBufSize, int32_t lang,
int32_t hostsPerSplit, uint32_t myHash ){
reset();
if ( ! m_phonetics.set(256) ) return false;
if ( ! m_dict.set(256) ) return false;
if ( ! m_distributedPopPhrases.set(256) ) return false;
m_lang = lang;
m_charset = getLanguageCharset(m_lang);
// load the hashtable for getPhrasePopularity
//if ( !loadDict() )
// load the rules dictionary
if ( !loadRules( ) ||
!loadSpellerDict( unifiedBuf, unifiedBufSize, hostsPerSplit,
myHash ) ){
log ( LOG_INIT,"lang: Error initializing for "
"language %s", getLanguageAbbr(m_lang) );
return false;
}
//if ( g_conf.m_doNarrowSearch &&
// !loadNarrow( unifiedBuf, unifiedBufSize, hostsPerSplit, myHash) ){
// log ( LOG_INIT,"lang: Error initializing narrow search for "
// "language %s", getLanguageAbbr(m_lang) );
// // don't return since this isn't critical
// //return false
//}
return true;
}
///////////////////////////////////////////////////////
// DICTIONARY LOADING ROUTINES BELOW HERE
//
// These will load g_hostdb.m_dir/dict/ files from
///////////////////////////////////////////////////////
bool Language::loadRules ( ) {
char ff[1024];
File f;
sprintf ( ff , "%sdict/%s/%s_phonet.dat", g_hostdb.m_dir,
getLanguageAbbr(m_lang), getLanguageAbbr(m_lang));
f.set ( ff );
// open file
if ( ! f.open ( O_RDONLY ) ) {
log("lang: open: %s",mstrerror(g_errno));
return false;
}
// get file size
int32_t fileSize = f.getFileSize() ;
// store a \0 at the end
m_rulesBufSize = fileSize + 1;
// make buffer to hold all
m_rulesBuf = (char *) mmalloc( m_rulesBufSize, "LanguageBuf" );
if ( !m_rulesBuf ) {
g_errno = ENOMEM;
log("lang: mmalloc: %s",mstrerror(errno));
return false;
}
// read em all in
if ( ! f.read ( m_rulesBuf , fileSize , 0 ) ) {
log("lang: read: %s", mstrerror(g_errno));
return false;
}
m_rulesBuf[fileSize] = '\0';
// change \n to \0
for ( int32_t i = 0 ; i < m_rulesBufSize ; i++ ) {
if ( m_rulesBuf[i] != '\n' )
continue;
m_rulesBuf[i] = '\0';
}
f.close();
m_numRules = 0;
char *p = m_rulesBuf;
// This loop checks how many rules we have
while ( p < ( m_rulesBuf + m_rulesBufSize ) ){
// if it is a comment, skip
// if no line, skip
if ( *p == '#' || gbstrlen(p) == 0 || *p == ' ' ){
p += gbstrlen(p) + 1;
continue;
}
// we have a tuple
if ( strstr(p, "followup") == p ){
while ( *p != ' ' )
p++;
while ( *p == ' ' )
p++;
if ( *p != '1' )
m_followup = false;
}
else if ( strstr(p, "collapse_result") == p ){
while ( *p != ' ' )
p++;
while ( *p == ' ' )
p++;
if ( *p == '1' )
m_collapseResult = true;
}
else if ( strstr(p, "version") == p ){
while ( *p != ' ' )
p++;
while ( *p == ' ' )
p++;
if ( *p != '1' )
m_removeAccents = false;
}
// else the rules start or end here
else
m_numRules += 2;
p += gbstrlen(p) + 1;
}
// allocate memory for the ruleptrs
m_rulesPtrSize = m_numRules * sizeof ( char* ) * m_numRules;
m_rulesPtr = (char **) mmalloc(m_rulesPtrSize,"LanguagePtrBuf");
if ( !m_rulesPtr ){
g_errno = ENOMEM;
log("lang: mmalloc: %s",mstrerror(errno));
return false;
}
// init
for ( int32_t i = 0; i < MAX_CHARS; i++) {
m_ruleStarts[i] = -1;
m_ruleChars[i] = false;
}
// do the loop again and assign the pointers
p = m_rulesBuf;
int32_t numRules = 0;
while ( p < ( m_rulesBuf + m_rulesBufSize ) ){
char *start = p;
// if it is a comment, skip
// if no line, skip
if ( *p == '#' || gbstrlen(p) == 0 || *p == ' ' ){
p += gbstrlen(p) + 1;
continue;
}
// we have a tuple
while ( *p != ' ' )
p++;
while ( *p == ' ' ){
*p = '\0';
p++;
}
// if the rule converts a letter into a '_' (blank)
if ( *p == '_' )
*p = '\0';
if ( strstr(start, "followup") == start ){
if ( *p != '1' )
m_followup = false;
}
else if ( strstr(start, "collapse_result") == start ){
if ( *p == '1' )
m_collapseResult = true;
}
else if ( strstr(start, "version") == start ){
if ( *p != '1' )
m_removeAccents = false;
}
// else the rules start or end here
else{
m_rulesPtr[numRules++] = start;
m_rulesPtr[numRules++] = p;
// mark the chars that occur in the rule
// lets just mark the first char. It seems to suffice
if ( *p )
m_ruleChars[(int32_t)*p] = true;
}
p += gbstrlen(p) + 1;
}
// m_ruleStarts[i] points to the index of the m_rulesPtr where the
// rule of character i starts
for ( int32_t i = 0; i < numRules; i += 2) {
int32_t k = (UChar8) m_rulesPtr[i][0];
if ( m_ruleStarts[k] < 0 )
m_ruleStarts[k] = i;
}
// if ( m_lang == 2 || m_lang == 3 ) makeDict();
return true;
}
bool Language::loadSpellerDict( char *spellerBuf, int32_t spellerBufSize,
int32_t hostsPerSplit, uint32_t myHash ){
File distributedPopFile;
char ff[1024];
// load the distributed pop file
sprintf ( ff , "%sdict/%s/%s.query.phonet.%"INT32"", g_hostdb.m_dir,
getLanguageAbbr(m_lang), getLanguageAbbr(m_lang), myHash);
distributedPopFile.set ( ff );
if ( ! distributedPopFile.open ( O_RDONLY ) ) {
log("lang: open: %s. Generating from common pop file",
mstrerror(g_errno));
sprintf ( ff , "%sdict/%s/%s.query.phonet", g_hostdb.m_dir,
getLanguageAbbr(m_lang), getLanguageAbbr(m_lang));
// If we don't have the distributed pop file, open the
// common pop file and generate the distributed one
if ( !genDistributedPopFile( ff, myHash ))
return false;
// try opening the file now
if ( ! distributedPopFile.open ( O_RDONLY ) ) {
log("lang: open: %s",mstrerror(g_errno));
return false;
}
}
// get file sizes
int32_t distributedPopFileSize = distributedPopFile.getFileSize();
// store a \0 at the end
m_distributedBufSize = distributedPopFileSize + 1;
// make buffer to hold all
m_distributedBuf = (char *) mmalloc(m_distributedBufSize,
"DistributedPtrBuf");
if ( !m_distributedBuf) {
log("lang: mmalloc: %s",mstrerror(errno));return false;
}
char *p = m_distributedBuf;
// read em all in
if ( ! distributedPopFile.read ( p , distributedPopFileSize , 0 ) ){
log("lang: read: %s", mstrerror(g_errno));
return false;
}
m_distributedBuf[distributedPopFileSize] = '\0';
distributedPopFile.close();
// count the tuples that belong to this language that come from
// the wordlist and query file (i.e. that are not negative )
p = spellerBuf;
while ( p < spellerBuf + spellerBufSize - 1){
// first is the phrase
char *phrase = p;
// if line is a comment skip it
if ( *p == '#' ){
p += gbstrlen(p) + 1;
continue;
}
// skip phrase and move to phonet
p += gbstrlen(p) + 1 ;
char *phonet = p;
if ( p >= spellerBuf + spellerBufSize-1 ) break;
// skip phonet and move to (lang,score) tuples
p += gbstrlen(p) + 1;
if ( p >= spellerBuf + spellerBufSize-1 ) break;
// skip (lang, score) tuple
p += gbstrlen(p) + 1;
// check if phonet it present
if ( *phonet == '\0' )
continue;
uint64_t phonetKey = hash64Lower_utf8(phonet);
// check if this phonet belongs to this host
if ( phonetKey % hostsPerSplit != myHash )
continue;
uint64_t h = hash64d(phrase, gbstrlen(phrase));
// check if this phrase belongs to this language
// can do that by calling spellers getphrasepopularity
if ( g_speller.getPhrasePopularity( phrase, h, false,
m_lang ) <= 0 )
continue;
m_numTuples++;
}
// also change the \t to \0
p = m_distributedBuf;
while ( p < m_distributedBuf + m_distributedBufSize ){
m_numTuples++;
while ( *p != '\n' &&
p < m_distributedBuf + m_distributedBufSize - 1) {
if ( *p == '\t' )
*p = '\0';
p++;
}
*p = '\0';
p++;
}
// tuples have already been counted
m_tuplePtrSize = m_numTuples * sizeof(char *);
m_tuplePtr = (char **) mmalloc ( m_tuplePtrSize, "LanguageTuplePtr" );
if ( !m_tuplePtr ) {
log("lang: mmalloc: %s",mstrerror(errno));return false;}
int32_t numTuples = 0;
// now go through the unified dict again and assign the pointers
p = spellerBuf;
while ( p < spellerBuf + spellerBufSize - 1){
// first is the phrase
char *phrase = p;
// if line is a comment skip it
if ( *p == '#' ){
p += gbstrlen(p) + 1;
continue;
}
// skip phrase and move to phonet
p += gbstrlen(p) + 1;
char *phonet = p;
if ( p >= spellerBuf + spellerBufSize - 1 ) break;
// skip phonet and move to (lang,score) tuples
p += gbstrlen(p) + 1;
if ( p >= spellerBuf + spellerBufSize - 1 ) break;
// skip (lang, score) tuple
p += gbstrlen(p) + 1;
if ( *phonet == '\0' )
continue;
uint64_t phonetKey = hash64Lower_utf8(phonet);
// check if this phonet belongs to this host
if ( phonetKey % hostsPerSplit != myHash )
continue;
uint64_t h = hash64d(phrase, gbstrlen(phrase));
// check if this phrase belongs to this language
// can do that by calling spellers getphrasepopularity
if ( g_speller.getPhrasePopularity( phrase, h, false,
m_lang ) <= 0 )
continue;
m_tuplePtr[numTuples] = phrase;
numTuples++;
}
// go through the distributed dict and assign the pointers
p = m_distributedBuf;
while ( p < m_distributedBuf + m_distributedBufSize ){
m_tuplePtr[numTuples++] = p;
// skip phrase
p += gbstrlen(p) + 1;
if ( p >= m_distributedBuf + m_distributedBufSize ) break;
// skip phonet
p += gbstrlen(p) + 1;
if ( p >= m_distributedBuf + m_distributedBufSize ) break;
// skip popularity
p += gbstrlen(p) + 1;
}
// sanity
for ( int32_t j = 0 ; j< numTuples ; j++ )
gbstrlen(m_tuplePtr[j]) ;
// sanity check
if ( numTuples != m_numTuples ){
char *xx = NULL; *xx = 0;
}
// kill last one seems problemtic with #define EFENCE in Mem.cpp
numTuples--;
m_numTuples--;
// sort the wordsPtrs accoding to their phonetics
gbsort( m_tuplePtr, m_numTuples, sizeof(char*), cmpPhonet );
char *tuple;
m_numPhonets = 0;
int32_t startIndex = 0;
int32_t index = 0;
while ( index < m_numTuples ) {
// The distributed dict is stored as a tuple of
// ( original phrase, phonetic, lang, score )
// first to come is the phrase
tuple = m_tuplePtr[index];
// move to the phonet
tuple += gbstrlen(tuple) + 1;
uint64_t phonetKey = hash64Lower_utf8 ( tuple );
if ( phonetKey % hostsPerSplit != myHash ){
index++;
continue;
}
int32_t numWordsInPhonet = 0;
startIndex = index;
while ( index < m_numTuples ){
// first to come is the phrase
tuple = m_tuplePtr[index];
char *phrase = m_tuplePtr[index];
// move to the phonet
tuple += gbstrlen(tuple) + 1;
uint64_t pKey = hash64Lower_utf8(tuple);
if ( pKey != phonetKey )
break;
// move to the popularity
tuple += gbstrlen(tuple) + 1;
// only add the distributed pop words if they come
// out of the distributed pop words dict
if (phrase > m_distributedBuf &&
phrase < m_distributedBuf + m_distributedBufSize){
// add the distributed pop words
uint64_t h = hash64d( phrase,
gbstrlen(phrase));
int32_t slot = m_distributedPopPhrases.
getSlot(h);
int32_t pop = atoi(tuple);
if ( slot == -1 )
m_distributedPopPhrases.addKey(h, pop);
}
numWordsInPhonet++;
index++;
}
int32_t slot = m_phonetics.getSlot ( phonetKey );
if ( slot != -1 ){
log(LOG_LOGIC, "speller: %"INT32" != -1, %16"XINT64", %s",
slot, phonetKey, tuple);
char *xx = NULL; *xx = 0;
}
// make the composite value
uint64_t value = startIndex;
// make it the higher 32 bits
value <<= 32;
value += numWordsInPhonet;
m_phonetics.addKey( phonetKey, value );
m_numPhonets++;
}
log(LOG_INIT,"lang: Read %"INT32" words and %"INT32" phonets into memory",
m_numTuples, m_numPhonets );
return true;
}
/*
bool Language::loadNarrow( char *spellerBuf, int32_t spellerBufSize,
int32_t hostsPerSplit, uint32_t myHash ){
// don't load for any other language except english
if ( m_lang != langEnglish )
return true;
// first find out how many phrases have more than 1 word
// count the tuples that belong to this language that come from
// the wordlist and query file (i.e. that are not negative )
char *p = spellerBuf;
while ( p < spellerBuf + spellerBufSize - 1){
// first is the phrase
char *phrase = p;
// if line is a comment skip it
if ( *p == '#' ){
p += gbstrlen(p) + 1;
continue;
}
// skip phrase and move to phonet
p += gbstrlen(p) + 1;
char *phonet = p;
// skip phonet and move to (lang,score) tuples
p += gbstrlen(p) + 1;
// skip (lang, score) tuple
p += gbstrlen(p) + 1;
uint64_t h = hash64d(phrase, gbstrlen(phrase));
// check if this phrase belongs to this language
// can do that by calling spellers getphrasepopularity
if ( g_speller.
getPhrasePopularity( phrase, h, false, m_lang ) <= 0 ){
continue;
}
// check if phonet it present
if ( *phonet == '\0' ){
continue;
}
uint64_t phonetKey = hash64Lower_utf8(phonet);
// check if this phonet belongs to this host
if ( phonetKey % hostsPerSplit != myHash ){
continue;
}
// make sure the phrase has 3 or more letters
if ( gbstrlen(phrase) < 3 )
continue;
// check if the phrase has more than 1 word
bool isPhrase = false;
char *q = phrase;
while ( *q != '\0' ){
if ( *q == ' ' )
isPhrase = true;
q++;
}
if ( !isPhrase )
continue;
m_numNarrowPtrs++;
}
p = m_distributedBuf;
while ( p < m_distributedBuf + m_distributedBufSize ){
// first is the phrase
char *phrase = p;
// if line is a comment skip it
if ( *p == '#' ){
p += gbstrlen(p) + 1;
continue;
}
// skip phrase and move to phonet
p += gbstrlen(p) + 1;
// skip phonet
p += gbstrlen(p) + 1;
// skip popularity
p += gbstrlen(p) + 1;
// make sure the phrase has 3 or more letters
if ( gbstrlen(phrase) < 3 )
continue;
// check if the phrase has more than 1 word
bool isPhrase = false;
char *q = phrase;
while ( *q != '\0' ){
if ( *q == ' ' )
isPhrase = true;
q++;
}
if ( !isPhrase )
continue;
m_numNarrowPtrs++;
}
// allocate memory for that
// also allocate memory for the m_frntCharPtrs and m_bckCharPtrs
m_narrowBufSize = 2 * sizeof (char *) * m_numNarrowPtrs +
( NUM_CHARS * NUM_CHARS * NUM_CHARS * 4 * 2 );
m_narrowBuf = (char *) mmalloc( m_narrowBufSize, "LanguageNarrowBuf" );
if ( !m_narrowBuf ){
log("lang: Could not allocate %"INT32" bytes for narrow buf",
m_narrowBufSize);
g_errno = ENOMEM;
return false;
}
p = m_narrowBuf;
m_frntPtrs = (char **) p;
p += sizeof(char **) * m_numNarrowPtrs;
m_bckPtrs = (char **) p;
p += sizeof(char *) * m_numNarrowPtrs;
m_frntCharPtrs = (int32_t *) p;
p += NUM_CHARS * NUM_CHARS * NUM_CHARS * 4;
m_bckCharPtrs = (int32_t *)p;
p += NUM_CHARS * NUM_CHARS * NUM_CHARS * 4;
int32_t numNarrowPtrs = 0;
// go through the loop again and set the positions
p = spellerBuf;
while ( p < spellerBuf + spellerBufSize - 1){
// first is the phrase
char *phrase = p;
// if line is a comment skip it
if ( *p == '#' ){
p += gbstrlen(p) + 1;
continue;
}
// skip phrase and move to phonet
p += gbstrlen(p) + 1;
char *phonet = p;
// skip phonet and move to (lang,score) tuples
p += gbstrlen(p) + 1;
// skip (lang, score) tuple
p += gbstrlen(p) + 1;
uint64_t h = hash64d(phrase, gbstrlen(phrase));
// check if this phrase belongs to this language
// can do that by calling spellers getphrasepopularity
if ( g_speller.
getPhrasePopularity( phrase, h, false, m_lang ) <= 0 ){
continue;
}
// check if phonet it present
if ( *phonet == '\0' ){
continue;
}
uint64_t phonetKey = hash64Lower_utf8(phonet);
// check if this phonet belongs to this host
if ( phonetKey % hostsPerSplit != myHash ){
continue;
}
// make sure the phrase has 3 or more letters
if ( gbstrlen(phrase) < 3 )
continue;
// check if the phrase has more than 1 word
bool isPhrase = false;
char *q = phrase;
while ( *q != '\0' ){
if ( *q == ' ' )
isPhrase = true;
q++;
}
if ( !isPhrase )
continue;
m_frntPtrs[numNarrowPtrs] = phrase;
m_bckPtrs[numNarrowPtrs] = phrase;
numNarrowPtrs++;
}
p = m_distributedBuf;
while ( p < m_distributedBuf + m_distributedBufSize ){
// skip phrase
char *phrase = p;
// if line is a comment skip it
if ( *p == '#' ){
p += gbstrlen(p) + 1;
continue;
}
p += gbstrlen(p) + 1;
// skip phonet
p += gbstrlen(p) + 1;
// skip popularity
p += gbstrlen(p) + 1;
// make sure the phrase has 3 or more letters
if ( gbstrlen(phrase) < 3 )
continue;
// check if the phrase has more than 1 word
bool isPhrase = false;
char *q = phrase;
while ( *q != '\0' ){
if ( *q == ' ' )
isPhrase = true;
q++;
}
if ( !isPhrase )
continue;
m_frntPtrs[numNarrowPtrs] = phrase;
m_bckPtrs[numNarrowPtrs] = phrase;
numNarrowPtrs++;
}
// sanity check
if ( numNarrowPtrs != m_numNarrowPtrs ){
log(LOG_LOGIC, "speller: %"INT32" != %"INT32" numNarrowPtrs",
numNarrowPtrs, m_numNarrowPtrs);
char *xx=NULL; *xx=0;
}
// sort the front pointers and back pointers
gbsort ( m_frntPtrs, m_numNarrowPtrs, sizeof(char*), cmpFrnt );
gbsort ( m_bckPtrs, m_numNarrowPtrs, sizeof(char*), cmpBck );
// printing them out
//for ( int32_t i = 0; i < m_numNarrowPtrs; i++ )
// log ( "lang: frnt=%s\t\t bck=%s",
// m_frntPtrs[i] + gbstrlen(m_frntPtrs[i]) + 1,
// m_bckPtrs[i] + gbstrlen(m_bckPtrs[i]) + 1);
// now set the m_frntCharPtrs and m_bckCharPtrs
for ( int32_t i = 0; i < NUM_CHARS * NUM_CHARS * NUM_CHARS; i++ ){
m_frntCharPtrs[i] = -1;
m_bckCharPtrs[i] = -1;
}
for ( int32_t i = 0; i < m_numNarrowPtrs; i++ ){
// align to the phrase
char *frnt = m_frntPtrs[i];
char *bck = m_bckPtrs[i];
bck += gbstrlen(bck) - 1;
char f0 = to_dict_char(frnt[0]);
char f1 = to_dict_char(frnt[1]);
char f2 = to_dict_char(frnt[2]);
char b0 = to_dict_char(bck[0]);
char b1 = to_dict_char(bck[-1]);
char b2 = to_dict_char(bck[-2]);
int32_t fx = f0 * NUM_CHARS * NUM_CHARS + f1 * NUM_CHARS + f2;
int32_t bx = b0 * NUM_CHARS * NUM_CHARS + b1 * NUM_CHARS + b2;
if ( m_frntCharPtrs[fx] == -1 )
m_frntCharPtrs[fx]= i;
if ( m_bckCharPtrs[bx] == -1 )
m_bckCharPtrs[bx] = i;
}
return true;
}
*/
bool Language::loadDictHashTable( ){
char ff[MAX_FRAG_SIZE];
// first load the language dict
// open the input file
FILE *fdr;
sprintf ( ff , "%sdict/%s/%s.wl.phonet", g_hostdb.m_dir,
getLanguageAbbr(m_lang), getLanguageAbbr(m_lang) );
// then open
fdr = fopen ( ff, "r" );
if ( !fdr )
return log("lang: Could not open %s for reading: "
"%s.", ff, strerror(errno));
char buf[1024];
// this loop goes through all the words
while ( fgets ( buf , 1024 , fdr ) ) {
int32_t wlen = gbstrlen(buf);
if ( wlen <= 0 || wlen > MAX_PHRASE_LEN )
continue;
// remove the newline \n
buf [wlen - 1] = '\0';
char *p = buf;
int32_t pop = atoi(p);
// move to the phrase
while ( *p != '\t' )
p++;
p++;
char *phrase = p;
// move to the next tab before the phonetic
while ( *p != '\t' )
p++;
uint64_t key = hash64d( phrase, p - phrase);
int32_t slot = m_dict.getSlot(key);
int32_t value = 0;
if ( slot != -1 ){
value = m_dict.getValueFromSlot(slot);
if ( pop < value )
continue;
}
m_dict.addKey( key, pop );
}
fclose(fdr);
// now for the top pop words from the query log
sprintf ( ff , "%sdict/%s/%s.query.phonet.top", g_hostdb.m_dir,
getLanguageAbbr(m_lang), getLanguageAbbr(m_lang) );
// then open
fdr = fopen ( ff, "r" );
if ( !fdr )
return log("lang: Could not open %s for reading: "
"%s.", ff, strerror(errno));
// this loop goes through all the words
while ( fgets ( buf , 1024 , fdr ) ) {
int32_t wlen = gbstrlen(buf);
if ( wlen <= 0 || wlen > MAX_PHRASE_LEN )
continue;
// remove the newline \n
buf [wlen - 1] = '\0';
char *p = buf;
int32_t pop = atoi(p);
// move to the phrase
while ( *p != '\t' )
p++;
p++;
char *phrase = p;
// move to the next tab before the phonetic
while ( *p != '\t' )
p++;
uint64_t key = hash64d( p, p - phrase);
int32_t slot = m_dict.getSlot(key);
int32_t value = 0;
if ( slot != -1 ){
value = m_dict.getValueFromSlot(slot);
if ( pop < value )
continue;
}
m_dict.addKey( key, pop );
}
fclose(fdr);
// now for the title rec dicts. If the phrase is only present in the
// titlerec dict then store it as a negative value
for ( int32_t i = 0; i < NUM_CHARS; i++ ){
// open the input file
FILE *fdr;
sprintf ( ff , "%sdict/%s/%s.dict.%"INT32"", g_hostdb.m_dir,
getLanguageAbbr(m_lang), getLanguageAbbr(m_lang), i);
// then open
fdr = fopen ( ff, "r" );
if ( !fdr )
return log("lang: Could not open %s for reading: "
"%s.", ff, strerror(errno));
// this loop goes through all the words and only adds those
// words into the phonetic dict that have phonets.
while ( fgets ( buf , 1024 , fdr ) ) {
int32_t wlen = gbstrlen(buf);
if ( wlen <= 0 || wlen > MAX_PHRASE_LEN )
continue;
// remove the newline \n
buf [wlen - 1] = '\0';
char *p = buf;
int32_t pop = ( atoi(p) * 32000 )/ 10000;
// move to the phrase
while ( *p != '\t' )
p++;
p++;
uint64_t key = hash64d( p, gbstrlen(p) );
// add only if it is not found in english dict and
// query dict
int32_t slot = m_dict.getSlot(key);
int32_t value = 0;
if ( slot != -1 ){
value = m_dict.getValueFromSlot(slot);
if ( pop < value )
continue;
}
// if phrase is only present in the title rec, store
// as a negative value
else
pop *= -1;
m_dict.addKey( key, pop );
}
fclose(fdr);
}
return true;
}
bool Language::loadWikipediaWords(){
// open the wikipedia file
char ff[1024];
sprintf ( ff , "%sdict/%s/%s.wiki", g_hostdb.m_dir,
getLanguageAbbr(m_lang), getLanguageAbbr(m_lang));
FILE *fdr = fopen ( ff, "r" );
if ( ! fdr ) {
return log("lang: Could not open for mispelled words"
"reading: %s.",strerror(errno));
}
m_wiki.set(1024);
char buf[1024];
// go through the words in dict/words
while ( fgets ( buf , 1024 , fdr ) ) {
// length of word(s), including the terminating \n
int32_t wlen = gbstrlen(buf) ;
// skip if empty
if ( wlen <= 0 ) continue;
buf[wlen-1]='\0';
uint32_t key = hash32d(buf, gbstrlen(buf));
int32_t slot = m_wiki.getSlot ( key );
if ( slot != -1 ){
continue;
char *xx=NULL; *xx=0;
}
m_wiki.addKey(key,1);
}
fclose(fdr);
return true;
}
bool Language::loadMispelledWords(){
char ff [1024];
// also open the commonly misspelled words file
sprintf ( ff , "%sdict/%s/%s.misp", g_hostdb.m_dir,
getLanguageAbbr(m_lang), getLanguageAbbr(m_lang));
FILE *fdr = fopen ( ff, "r" );
if ( ! fdr ) {
return log("lang: Could not open for mispelled words"
"reading: %s.",strerror(errno));
}
m_misp.set(1024);
char buf[1024];
// go through the words in dict/words
while ( fgets ( buf , 1024 , fdr ) ) {
// length of word(s), including the terminating \n
int32_t wlen = gbstrlen(buf) ;
// skip if empty
if ( wlen <= 0 ) continue;
buf[wlen-1]='\0';
uint32_t key = hash32d(buf, gbstrlen(buf));
int32_t slot = m_misp.getSlot ( key );
if ( slot != -1 ){
char *xx=NULL; *xx=0;
}
m_misp.addKey(key,1);
}
fclose(fdr);
return true;
}
///////////////////////////////////////////////////////
// LANGUAGE RECOMMENDATION ROUTINES BELOW HERE
//
///////////////////////////////////////////////////////
/*
int32_t Language::narrowPhrase ( char *request, char *phrases, int32_t *pops,
int32_t maxPhrases ){
// if we haven't been loaded, just return
if ( m_numNarrowPtrs == 0 )
return 0;
int32_t numPhrases = 0;
int32_t requestLen = gbstrlen(request);
// don't check for narrow phrase if the original phrase is more than
// MAX_PHRASE_LEN - 3 OR less than 3 chars.
// Why MAX_PHRASE_LEN - 3 ? Because then only can we find a narrow
// phrase
if ( requestLen > MAX_PHRASE_LEN - 3 || requestLen < 3 )
return numPhrases;
// get the start and end two chars and convert them to dict_char
char f0 = to_dict_char(request[0]);
char f1 = to_dict_char(request[1]);
char f2 = to_dict_char(request[2]);
char *bck = request + requestLen - 1;
char b0 = to_dict_char(bck[0]);
char b1 = to_dict_char(bck[-1]);
char b2 = to_dict_char(bck[-2]);
uint64_t start = gettimeofdayInMilliseconds();
int32_t minPop = 0;
char req[MAX_PHRASE_LEN];
// first get all the ones in the front
strcpy(req, request);
// add a space so that we match the exact phrase
req[requestLen] = ' ';
req[requestLen + 1] = '\0';
int32_t fx = f0 * NUM_CHARS * NUM_CHARS + f1 * NUM_CHARS + f2;
int32_t index = m_frntCharPtrs[fx];
if ( index == -1 )
goto skipFrnt;
while ( index < m_numNarrowPtrs ){
char *tuple = m_frntPtrs[index++];
char *phrase = tuple;
//check if we have gone over the phrase (if present) or not
int32_t cmp = strncasecmp (phrase, req, gbstrlen(req));
if ( cmp > 0 )
break;
if ( cmp < 0 )
continue;
// found it. get the popularity
int32_t pop = 0;
// if its from the distributed dict, get it directly
if ( tuple > m_distributedBuf &&
tuple < m_distributedBuf + m_distributedBufSize ){
// skip the phrase
tuple += gbstrlen(tuple) + 1;
// skip the phonet
tuple += gbstrlen(tuple) + 1;
pop = atoi(tuple);
}
// else get it by getphrasePopularity
else {
uint64_t h = hash64d(phrase, gbstrlen(phrase));
pop = g_speller.getPhrasePopularity(phrase, h, false,
m_lang);
}
int32_t indx = numPhrases;
// if not full
if ( numPhrases < maxPhrases )
numPhrases++;
// if full
else{
if ( minPop >= pop )
continue;
int32_t minIndx = 0;
minPop = pops[0];
for ( int32_t j = 1; j < maxPhrases; j++ ){
if ( minPop < pops[j] )
continue;
minPop = pops[j];
minIndx = j;
}
if ( minPop >= pop )
continue;
indx = minIndx;
minPop = pop;
}
// store the pop
pops[indx] = pop;
strcpy ( &phrases[MAX_FRAG_SIZE * indx],phrase );
log (LOG_DEBUG,"speller: Narrow phrase=%s, pop=%"INT32"",
&phrases[MAX_FRAG_SIZE * indx], pops[indx]);
}
skipFrnt:
// now get the back
req[0] = ' ';
strcpy(&req[1],request);
int32_t bx = b0 * NUM_CHARS * NUM_CHARS + b1 * NUM_CHARS + b2;
index = m_bckCharPtrs[bx];
if ( index == -1 )
return numPhrases;
while ( index < m_numNarrowPtrs ){
char *tuple = m_bckPtrs[index++];
char *phrase = tuple;
//check if we have gone over the phrase (if present) or not
// cannot use strcasecmp because we compare from the back
char *p1 = phrase + gbstrlen(phrase) - 1;
char *p2 = req + gbstrlen(req) - 1;
while ( p1 >= phrase && p2 >= req ) {
if ( *p1 != *p2 )
break;
p1--;
p2--;
}
if ( p2 >= req || p1 < phrase ){
if ( *p1 > *p2 )
break;
continue;
}
// found it
int32_t pop = 0;
// if its from the distributed dict, get it directly
if ( tuple > m_distributedBuf &&
tuple < m_distributedBuf + m_distributedBufSize ){
// skip the phrase
tuple += gbstrlen(tuple) + 1;
// skip the phonet
tuple += gbstrlen(tuple) + 1;
pop = atoi(tuple);
}
// else get it by getphrasePopularity
else {
uint64_t h = hash64d(phrase, gbstrlen(phrase));
pop = g_speller.getPhrasePopularity(phrase, h, false,
m_lang);
}
int32_t indx = numPhrases;
// if not full
if ( numPhrases < maxPhrases )
numPhrases++;
// if full
else{
if ( minPop >= pop )
continue;
int32_t minIndx = 0;
minPop = pops[0];
for ( int32_t j = 1; j < maxPhrases; j++ ){
if ( minPop < pops[j] )
continue;
minPop = pops[j];
minIndx = j;
}
if ( minPop >= pop )
continue;
indx = minIndx;
minPop = pop;
}
// store the pop
pops[indx] = pop;
strcpy ( &phrases[MAX_FRAG_SIZE * indx],phrase );
log (LOG_DEBUG,"speller: Narrow phrase=%s, pop=%"INT32"",
&phrases[MAX_FRAG_SIZE * indx], pops[indx]);
}
uint64_t took = gettimeofdayInMilliseconds() - start;
if ( took > 5)
log ( LOG_WARN,"lang: Finding narrow phrases took %"INT64" ms",
took );
return numPhrases;
}
*/
// . return the clean buffer that can be spellchecked
// . in utf8 always now
bool Language::makeClean( char *src, int32_t srcSize,
char *dst, int32_t dstSize ) {
//char *pin = inBuf;
//char *pout = outBuf;
char *srcEnd = src + srcSize;
char *dstEnd = dst + dstSize;
char cs;
//while ( pout - outBuf < outBufSize && *pin != '\0' ){
for ( ; src < srcEnd ; src += cs ) {
cs = getUtf8CharSize ( src );
//UChar32 c = 0;
//if ( isUTF16 )
// c = utf16Decode( (UChar *)pin, &(UChar *)pin );
//else
// c = utf8Decode ( pin, &pin );
// Since we're english cannot check anything but ASCII
//if ( c > 0x7f )
// return false;
//if (!ucIsAlnum(c) && !ucIsWhiteSpace(c) && c != (int32_t)'\'' &&
// c != (int32_t)' ' && c != (int32_t)'-' )
// return false;
// skip more advanced forms of punct
if ( ! is_alnum_utf8 ( src ) &&
! is_wspace_utf8 ( src ) &&
*src != '\'' &&
*src != ' ' &&
*src != '-' )
return false;
// return false to avoid overflow
if ( dst + 5 >= dstEnd ) return false;
if ( cs == 1 ) *dst++ = to_upper_a (*src);
else dst += to_upper_utf8 ( dst , src );
// write the char as upper case
//dst += getClean ( dst , src );
}
// null end it
*dst = '\0';
return true;
}
// returns the number of recommendations that were found
// First finds recommendations by the soundslike (phonetic) score
// Then tries to split the word and finds recommendations by the word score
// Stores the top MAX_RECOMMENDATIONS in the array, and then returns the
// highest popularity recommendation out of them
bool Language::getRecommendation( char *origWord, int32_t origWordLen,
char *recommendation, int32_t recommendationLen,
bool *found, int32_t *score, int32_t *popularity,
bool forceReco ){
// if rules and words are not loaded, return
if ( m_numRules == 0 || m_numTuples == 0 )
return true;
// don't check for recommendation if the original phrase is more than
// MAX_PHRASE_LEN - 1
if ( origWordLen > MAX_PHRASE_LEN - 1 )
return false;
char origPhonet[MAX_PHRASE_LEN];
char origClean[MAX_PHRASE_LEN];
char possiblePhonet[ MAX_PHRASE_LEN ];
Reco recos[MAX_RECOMMENDATIONS];
// also keep the lowest score that we've found.
int32_t lowestScore = LARGE_SCORE;
/*char recos[MAX_RECOMMENDATIONS][MAX_PHRASE_LEN];
int32_t recoScores[MAX_RECOMMENDATIONS];*/
int32_t numRecos = 0;
// null end recommendation in case we don't find anything.
*recommendation = '\0';
*found = false;
*score = LARGE_SCORE;
*popularity = 0;
// no recommendations for 1 letter words
if ( origWordLen < 2 )
return false;
// no recommendation if the word is found in the dictionary
if ( !forceReco ){
// if we are spell checking a query then we start with the
// phrases and then move on to individual words. This should
// eliminate bugs like saying "brittany spears" is correct
// because the phrase shall be checked before individual words
uint64_t h = hash64d( origWord, gbstrlen(origWord));
if ( g_speller.getPhrasePopularity( origWord,
h, false ) != 0 ){
*found = true;
return false;
}
// check if it is present in the distributed dictionary
if ( m_distributedPopPhrases.getSlot ( h ) != -1 ){
*found = true;
return false;
}
}
//int32_t minRecoScore = LARGE_SCORE;
// clean the word, i.e. convert word to uppercase and
// remove possible accents
if ( !makeClean ( origWord, origWordLen, origClean, MAX_PHRASE_LEN) )
return false;
// memset ( phonet, '\0', MAX_PHRASE_LEN );
// get the phonetic
getPhonetic ( origClean, gbstrlen(origClean), origPhonet,
MAX_PHRASE_LEN );
log ( LOG_DEBUG,"speller: original - %s %s %s",origWord,
origClean, origPhonet );
// this is the max score that we are trying to get
// this is the radius around the misspelled word that we are checking
int32_t tryForScore = 3 * ( m_wordWeight * m_editDistanceWeightsMax )/100;
// decrease score by 50pc if the length of the phonet is less than 5
// decrease score by 20pc if the length of the phonet is less than 7
if ( gbstrlen(origPhonet) < 5 ) tryForScore -= tryForScore / 2;
else if ( gbstrlen(origPhonet) < 7 ) tryForScore -= tryForScore / 5;
// first try the same phonetic as the original word
int32_t origLen = gbstrlen(origPhonet);
// first add the original
strcpy ( possiblePhonet, origPhonet );
// get recos from this phonet
numRecos = tryPhonet( possiblePhonet, origPhonet,
origClean, tryForScore,
recos, numRecos, &lowestScore );
// generate different phonets using addition, deletion, substitution
// and swapping.
// ADDITION
for ( int32_t i = 0; i < origLen + 1; i++ ){
for ( int32_t j = 0; j < MAX_CHARS; j++ ){
if ( !m_ruleChars[j] ) continue;
char *p = possiblePhonet;
// first put in all the chars the are before the char
// to be added
gbmemcpy ( p, origPhonet, i ); p += i;
// the index of m_ruleChars[] is the char to be added
*p++ = j;
gbmemcpy ( p, origPhonet + i, origLen - i );
p += origLen - i;
*p++ = '\0';
numRecos = tryPhonet( possiblePhonet, origPhonet,
origClean, tryForScore,
recos, numRecos, &lowestScore );
}
}
// DELETION
for ( int32_t i = 0; i < origLen; i++ ){
char *p = possiblePhonet;
// put the chars that come before the deleted char
gbmemcpy ( p, origPhonet, i ); p += i;
// put the chars that come after the deleted char
gbmemcpy ( p, origPhonet + i + 1, origLen - i - 1 );
p += origLen - i - 1;
*p++ = '\0';
numRecos = tryPhonet( possiblePhonet, origPhonet,
origClean, tryForScore,
recos, numRecos, &lowestScore );
}
// SUBSTITUTION
for ( int32_t i = 0; i < origLen; i++ ){
for ( int32_t j = 0; j < MAX_CHARS; j++ ){
if ( !m_ruleChars[j] ) continue;
char *p = possiblePhonet;
// cannot substitue if both chars are the same
if ( j == *( origPhonet + i ) ) continue;
// put the chars that come before the substituted char
gbmemcpy ( p, origPhonet, i ); p += i;
// substitute the char
*p++ = j;
// put the chars that come after the deleted char
gbmemcpy ( p, origPhonet + i + 1, origLen - i - 1);
p += origLen - i - 1;
*p++ = '\0';
numRecos = tryPhonet( possiblePhonet, origPhonet,
origClean, tryForScore,
recos, numRecos, &lowestScore );
}
}
// SWAPPING
for ( int32_t i = 0; i < origLen - 1; i++ ){
char *p = possiblePhonet;
// cannot swap if both chars are the same
if ( *( origPhonet + i ) == *( origPhonet + i + 1 ) ) continue;
// put the chars that come before the swapped char
gbmemcpy ( p, origPhonet, i ); p += i;
//swap the chars
*p++ = *( origPhonet + i + 1);
*p++ = *( origPhonet + i );
// put the chars that come after the deleted char
gbmemcpy ( p, origPhonet + i + 2, origLen - i - 2);
p += origLen - i - 2;
*p++ = '\0';
numRecos = tryPhonet( possiblePhonet, origPhonet,
origClean, tryForScore,
recos, numRecos, &lowestScore );
}
// check if splitting the word gives us any good recommendations
// this works like the try_split() function of aspell in suggest.cpp
// dont split the word if its less than 4 chars
if ( gbstrlen(origWord) < 4 )
goto skipSplit;
// copy it over to another string
char splitWord[MAX_PHRASE_LEN];
strcpy ( splitWord, origWord );
splitWord[ gbstrlen(splitWord) + 1 ] = '\0';
splitWord[ gbstrlen(splitWord) ] = splitWord[ gbstrlen(splitWord) - 1 ];
for ( int32_t i = gbstrlen( origWord ) - 2; i >= 2; --i) {
splitWord[i+1] = splitWord[i];
splitWord[i] = '\0';
uint64_t h = hash64d ( splitWord, gbstrlen(splitWord));
// check if the split words exist in the dictionary
int32_t pop = g_speller.getPhrasePopularity(splitWord,h,false);
if ( pop == 0 ){
// check the distributed dict also
int32_t slot = m_distributedPopPhrases.getSlot(h);
if ( slot != -1 )
pop = m_distributedPopPhrases.
getValueFromSlot(slot);
if ( pop == 0 )
continue;
}
h = hash64d ( splitWord + i + 1, gbstrlen(splitWord + i + 1));
pop = g_speller.getPhrasePopularity( splitWord + i + 1, h,
false );
if ( pop == 0 ){
// check the distributed dict also
int32_t slot = m_distributedPopPhrases.getSlot(h);
if ( slot != -1 )
pop = m_distributedPopPhrases.
getValueFromSlot(slot);
if ( pop == 0 )
continue;
}
// replace the '\0' in between the split with a ' '
splitWord[i] = ' ';
int32_t wordScore = m_editDistanceWeightsDel2 * 3 / 2;
char phonetReco[MAX_PHRASE_LEN];
// get phonetic
getPhonetic ( splitWord, gbstrlen(splitWord), phonetReco,
MAX_PHRASE_LEN );
int32_t soundslikeScore = editDistance ( origPhonet,
phonetReco );
// the final score taking into consideration the
// phonetic score as well as the word score
int32_t score = weightedAverage ( soundslikeScore, wordScore );
if ( score > tryForScore + m_span )
continue;
// also continue if the score is greater than 2*lowestScore,
// because then this reco doesn't have a chance
if ( score > lowestScore * 2 )
continue;
// change the lowest score if needed
if ( score < lowestScore )
lowestScore = score;
// try to add this to the recommendations
/*log ( LOG_WARN, "lang: reco=%s wordScore=%"INT32" "
"phonetScore=%"INT32" score=%"INT32"",
splitWord, wordScore, soundslikeScore, score );*/
if ( numRecos < MAX_RECOMMENDATIONS ){
strcpy ( recos[numRecos].reco, splitWord );
recos[numRecos].score = score;
numRecos++;
continue;
}
int32_t maxScore = 0;
int32_t maxIndex = 0;
// find the largest score
for ( int32_t k = 0; k < numRecos; k++ ){
if ( recos[k].score > maxScore ){
maxScore = recos[k].score;
maxIndex = k;
}
}
// boot out the largest score if it is more than this
// score
if ( score > maxScore )
continue;
strcpy ( recos[maxIndex].reco, splitWord );
recos[maxIndex].score = score;
}
skipSplit:
// if no recos return
if ( numRecos == 0 )
return false;
// sort the recos according to their scores
gbsort ( recos, numRecos, sizeof(Reco), cmpScores );
log ( LOG_DEBUG, "speller: --------Top Recos--------" );
// select the best recommendation among them by score
int32_t bestRecoIndex = 0;
int32_t bestRecoPop = -1;
for ( int32_t i = 0; i < numRecos; i++ ){
uint64_t h = hash64d ( recos[i].reco,
gbstrlen(recos[i].reco));
int32_t pop = g_speller.getPhrasePopularity(recos[i].reco, h,
false);
if ( pop == 0 ){
// check the distributed dict also
int32_t slot = m_distributedPopPhrases.getSlot(h);
if ( slot != -1 )
pop = m_distributedPopPhrases.
getValueFromSlot(slot);
}
if ( ( recos[i].score < ( recos[bestRecoIndex].score * 2 ) &&
pop > ( bestRecoPop * 4 ) ) ||
( recos[i].score == recos[bestRecoIndex].score &&
pop > bestRecoPop ) ){
bestRecoPop = pop;
bestRecoIndex = i;
}
log ( LOG_DEBUG,"speller: %"INT32") reco=%s score=%"INT32" pop=%"INT32"",
i, recos[i].reco, recos[i].score, pop );
}
log ( LOG_DEBUG, "speller: the best reco found is %s for word %s",
recos[bestRecoIndex].reco, origWord );
// put the best reco into the recommendation
strcpy ( recommendation, recos[bestRecoIndex].reco );
*score = recos[bestRecoIndex].score;
*popularity = bestRecoPop;
return true;
}
int32_t Language::tryPhonet( char *phonetTmp, char *origPhonet,
char *origClean, int32_t tryForScore,
Reco *recos, int32_t numRecos, int32_t *lowestScore ){
// go through all the phonetics and select those that have score <= 100
uint64_t key = hash64Lower_utf8(phonetTmp);
int32_t slot = m_phonetics.getSlot ( key );
if ( slot == -1 )
return numRecos;
// the value is a combination of the index and the number of
// words having the same phonet
uint64_t value = m_phonetics.getValueFromSlot(slot);
int32_t index = value >> 32;
int32_t numWordsInPhonet = value & 0xffffffff;
log ( LOG_DEBUG,"speller: next phonet is %s, index=%"INT32", numWords=%"INT32"",
phonetTmp, index, numWordsInPhonet );
//if ( strcmp(phonetTmp,"WST") == 0 )
//log(LOG_WARN,"BRTNSPS");
// check the score to see if this phonet is any good.
// phonet score is 100 for phonets that do not contain all
// the letters of the word phonet. e.g. word Phonet = "PLKN",
// phonet = "PLKS" phonet score is 95 for phonets that contain
// all letters, and 0 where the phonets are same.
int32_t phonetScore = limit1EditDistance( phonetTmp, origPhonet );
if ( phonetScore >= LARGE_SCORE )
return numRecos;
//log ( LOG_WARN,"lang: checking phonet %s, "
//"numWords=%"INT32"",phonetTmp, numWordsInPhonet);
// this phonet works, for all the words under this phonet,
// get their score.
for ( int32_t j = 0; j < numWordsInPhonet; j++ ){
// The dict is stored as a tuple of
// ( original phrase, phonetic, (lang, score)... )
char *wordReco = m_tuplePtr[j + index];
// make the clean Reco
char cleanReco[MAX_PHRASE_LEN];
// sanity check, this is in the dict, so we should be able to
// make the word into clean
if ( !makeClean( wordReco, gbstrlen(wordReco), cleanReco,
MAX_PHRASE_LEN ) ){
char *xx = NULL; *xx = 0;
}
// now the phonetic
char *phonetReco = wordReco + gbstrlen(wordReco) + 1;
// sanity check
if ( !cleanReco[0] || !phonetReco ){
char *xx = NULL; *xx = 0;
}
// we want the min Score, so this is init'ed to max
int32_t wordScore = LARGE_SCORE;
// init this to phonetScore
int32_t soundslikeScore = phonetScore;
//log (LOG_WARN,"lang: %s\t%s\t%s %"INT32" %"INT32"",
// wordReco, cleanReco, phonetReco,
// wordScore, soundslikeScore);
if ( wordScore >= LARGE_SCORE ){
int32_t slScore = soundslikeScore;
if ( slScore >= LARGE_SCORE )
slScore = 0;
int32_t level = ( 100 * tryForScore -
m_soundslikeWeight * slScore )/
(m_wordWeight *
m_editDistanceWeightsMin);
if ( level < 0 )
level = 0;
if ( level >= int32_t(slScore/
m_editDistanceWeightsMin))
wordScore = editDistance ( origClean,
cleanReco,
level,
level );
}
if ( wordScore >= LARGE_SCORE )
continue;
// this is needed for split words, that are taken
// care of after this loop
/*if ( soundslikeScore >= LARGE_SCORE ){
if ( weightedAverage( 0, wordScore ) >
tryForScore )
continue;
soundslikeScore = editDistance ( origPhonet,
phonetReco );
}*/
// the final score taking into consideration the
// phonetic score as well as the word score
int32_t score = weightedAverage ( soundslikeScore,
wordScore );
if ( score > tryForScore + m_span || score == 0)
continue;
// also continue if the score is greater than 2*lowestScore,
// because then this reco doesn't have a chance
if ( score > *lowestScore * 2 )
continue;
// change the lowest score if needed
if ( score < *lowestScore )
*lowestScore = score;
/*int32_t reduceScore=reduceScore(origClean,cleanReco);
if ( reduceScore > 0 )
log ( LOG_DEBUG,"lang: reducing score request=%s, "
"reco=%s, score=%"INT32", reduce=%"INT32"", origClean,
cleanReco, score, reduceScore );
score -= reduceScore;*/
//log ( LOG_WARN, "lang: reco=%s phonet=%s "
//"wordScore=%"INT32" phonetScore=%"INT32" score=%"INT32"",
//wordReco, phonetReco, wordScore,
//soundslikeScore, score );
/*if ( minRecoScore < score )
continue;
// this is our best recommendation yet
minRecoScore = score;
strcpy ( recommendation, wordReco );*/
if ( numRecos < MAX_RECOMMENDATIONS ){
strcpy ( recos[numRecos].reco, wordReco );
recos[numRecos].score = score;
numRecos++;
continue;
}
int32_t maxScore = 0;
int32_t maxIndex = 0;
// find the largest score
for ( int32_t k = 0; k < numRecos; k++ ){
if ( recos[k].score > maxScore ){
maxScore = recos[k].score;
maxIndex = k;
}
}
// boot out the largest score if it is more than this
// score
if ( score > maxScore )
continue;
strcpy ( recos[maxIndex].reco, wordReco );
recos[maxIndex].score = score;
}
return numRecos;
}
int32_t Language::editDistance( char *a, char *b, int32_t level, // starting level
int32_t limit ) { // maximum level
// sanity check
if ( level <= 0 || limit < level){
char *xx = NULL; *xx = 0;
}
int32_t score = LARGE_SCORE;
while (score >= LARGE_SCORE && level <= limit) {
if (level == 2)
score = limit2EditDistance( a, b );
else if (level < 5)
score = limitEditDistance( a, b, level );
else {
char *xx = NULL; *xx = 0;
//score = editDistance(a,b,w);
}
++level;
}
return score;
}
int32_t Language::weightedAverage(int32_t soundslikeScore, int32_t wordScore) {
return ( m_wordWeight * wordScore +
m_soundslikeWeight * soundslikeScore) / 100;
}
int32_t Language::limitEditDistance( char * a, char * b,
int32_t limit ) {
limit = limit * m_editDistanceWeightsMax;
static const int size = 10;
struct Edit {
char * a;
char * b;
int score;
};
Edit begin[size];
Edit * i = begin;
// const char * a0;
// const char * b0;
int32_t score = 0;
int32_t min = LARGE_SCORE;
while (true) {
while (*a == *b) {
if (*a == '\0') {
if (score < min) min = score;
goto FINISH;
}
++a;
++b;
}
if (*a == '\0') {
do {
score += m_editDistanceWeightsDel2;
if (score >= min) goto FINISH;
++b;
} while (*b != '\0');
min = score;
}
else if (*b == '\0') {
do {
score += m_editDistanceWeightsDel1;
if (score >= min)
goto FINISH;
++a;
} while (*a != '\0');
min = score;
}
// if floor(score/max)=limit/max-1 then this edit is only good
// if it makes the rest of the string match. So check if
// the rest of the string matches to avoid the overhead of
// pushing it on then off the stack
else if ( score + m_editDistanceWeightsMax <= limit ) {
if ( limit * m_editDistanceWeightsMin <=
m_editDistanceWeightsMax *
( m_editDistanceWeightsMin + score ) ) {
// delete a character from a
min = checkRest( a+1, b,
score +
m_editDistanceWeightsDel1,
NULL, min );
// delete a character from b
min = checkRest( a, b+1,
score +
m_editDistanceWeightsDel2,
NULL, min );
if (*a == *(b+1) && *b == *(a+1)) {
// swap two characters
min=checkRest(a+2, b+2,
score +
m_editDistanceWeightsSwap,
NULL, min );
}
// substitute one character for another which
// is the same thing as deleting a character
// from both a & b
else {
min=checkRest(a+1, b+1,
score +
m_editDistanceWeightsSub,
NULL, min );
}
}
else {
// delete a character from a
i->a = a + 1;
i->b = b;
i->score = score + m_editDistanceWeightsDel1;
++i;
// delete a character from b
i->a = a;
i->b = b + 1;
i->score = score + m_editDistanceWeightsDel2;
++i;
// If two characters can be swapped and make
// a match then the substitution is pointless.
// Also, there is no need to push this on
// the stack as it is going to be imminently
// removed.
if (*a == *(b+1) && *b == *(a+1)) {
// swap two characters
a = a + 2;
b = b + 2;
score += m_editDistanceWeightsSwap;
continue;
}
// substitute one character for another
// which is the same thing as deleting a
// character from both a & b
else {
a = a + 1;
b = b + 1;
score += m_editDistanceWeightsSub;
continue;
}
}
}
FINISH:
if (i == begin) return min;
--i;
a = i->a;
b = i->b;
score = i->score;
}
}
int32_t Language::limit1EditDistance( char *a, char *b ){
int32_t min = LARGE_SCORE;
char * amax = a;
while(*a == *b) {
if (*a == '\0')
return 0; //EditDist(0, a);
++a; ++b;
}
if (*a == '\0') {
++b;
if (*b == '\0')
return m_editDistanceWeightsDel2;
//EditDist(ws.del2, a);
return LARGE_SCORE;
// EditDist(LARGE_SCORE, a);
}
else if (*b == '\0') {
++a;
if (*a == '\0')
return m_editDistanceWeightsDel1;
//EditDist(ws.del1, a);
return LARGE_SCORE;
//EditDist(LARGE_SCORE, a);
}
else {
// delete a character from a
min = checkRest( a+1, b, m_editDistanceWeightsDel1,
amax, min );
// delete a character from b
min = checkRest( a, b+1, m_editDistanceWeightsDel2,
amax, min );
if (*a == *(b+1) && *b == *(a+1)) {
// swap two characters
min = checkRest( a+2, b+2, m_editDistanceWeightsSwap,
amax, min );
}
else {
// substitute one character for another which is the
// same thing as deleting a character from both a & b
min = checkRest( a+1, b+1, m_editDistanceWeightsSub,
amax, min );
}
}
return min;
//EditDist(min, amax);
}
int32_t Language::limit2EditDistance( char *a, char *b ) {
int min = LARGE_SCORE;
char * amax = a;
while(*a == *b) {
if (*a == '\0')
return 0;
//return EditDist(0, a);
++a; ++b;
}
if (*a == '\0') {
++b;
if (*b == '\0')
return m_editDistanceWeightsDel2;
//return EditDist(ws.del2,a);
++b;
if (*b == '\0')
return 2 * m_editDistanceWeightsDel2;
//return EditDist(2*ws.del2, a);
return LARGE_SCORE;//EditDist(LARGE_SCORE, a);
}
else if (*b == '\0') {
++a;
if (*a == '\0')
return m_editDistanceWeightsDel1;
//return EditDist(ws.del1, a);
++a;
if (*a == '\0')
return 2 * m_editDistanceWeightsDel1;
//return EditDist(2*ws.del1, a);
return LARGE_SCORE;
//return EditDist(LARGE_SCORE, a);
}
else {
// delete a character from a
min = check2( a+1, b, m_editDistanceWeightsDel1, amax, min );
// delete a character from b
min = check2( a, b+1, m_editDistanceWeightsDel2, amax, min );
if (*a == *(b+1) && *b == *(a+1)) {
// swap two characters
min = check2( a+2, b+2, m_editDistanceWeightsSwap,
amax, min );
}
else {
// substitute one character for another which is the
// same thing as deleting a character from both a & b
min = check2( a+1, b+1, m_editDistanceWeightsSub,
amax, min );
}
}
return min;
//return EditDist(min, amax);
}
int32_t Language::checkRest( char *a, char *b,
int32_t w, char *amax, int32_t min ){
char *a0 = a;
char *b0 = b;
while(*a0 == *b0) {
if (*a0 == '\0') {
if (w < min) min = w;
break;
}
++a0;
++b0;
}
if ( amax && amax < a0) amax = a0;
return min;
}
int32_t Language::check2( char *a, char *b, int32_t w, char *amax, int32_t min ){
char *aa = a;
char *bb = b;
while(*aa == *bb) {
if (*aa == '\0') {
if (amax < aa) amax = aa;
if (w < min) min = w;
break;
}
++aa;
++bb;
}
if (*aa == '\0') {
if (amax < aa) amax = aa;
if (*bb == '\0') {}
else if (*(bb+1) == '\0' &&
w + m_editDistanceWeightsDel2 < min)
min = w + m_editDistanceWeightsDel2;
}
else if (*bb == '\0') {
++aa;
if (amax < aa) amax = aa;
if (*aa == '\0' &&
w + m_editDistanceWeightsDel1 < min)
min = w + m_editDistanceWeightsDel1;
}
else {
min = checkRest( aa+1, bb,
w + m_editDistanceWeightsDel1, amax, min );
min = checkRest( aa, bb+1,
w + m_editDistanceWeightsDel2, amax, min );
if (*aa == *(bb+1) && *bb == *(aa+1))
min = checkRest( aa+2, bb+2,
w + m_editDistanceWeightsSwap,
amax, min);
else
min = checkRest( aa+1, bb+1,
w + m_editDistanceWeightsSub,
amax, min );
}
return min;
}
int16_t Language::editDistance( char *a0, char *b0 ){
int32_t aSize = gbstrlen(a0) + 1;
int32_t bSize = gbstrlen(b0) + 1;
// VARARRAY(int16_t, e_d, a_size * b_size);
int16_t e[aSize * bSize];
// ShortMatrix e(a_size,b_size,e_d);
e[0] = 0;// e(0, 0) = 0;
for ( int32_t j = 1; j != bSize; ++j )
e[0 + j * aSize] = e[(j-1) * aSize] +
m_editDistanceWeightsDel1;
const char * a = a0 - 1;
const char * b = b0 - 1;
int16_t te;
for (int32_t i = 1; i != aSize; ++i) {
e[i] = e[i-1] + m_editDistanceWeightsDel2;
for (int32_t j = 1; j != bSize; ++j) {
if (a[i] == b[j]) {
e[i + j * aSize] = e[(i-1) + (j-1) * aSize];
}
else {
e[i + j * aSize] = m_editDistanceWeightsSub +
e[(i-1) + (j-1) * aSize];
if (i != 1 && j != 1 &&
a[i] == b[j-1] && a[i-1] == b[j]) {
te = m_editDistanceWeightsSwap +
e[(i-2) + (j-2) * aSize];
if (te < e[i + j * aSize])
e[i + j * aSize] = te;
}
te = m_editDistanceWeightsDel1 +
e[i-1 + j * aSize];
if (te < e[i + j * aSize])
e[i + j * aSize] = te;
te = m_editDistanceWeightsDel2 +
e[i + (j-1) * aSize];
if (te < e[i + j * aSize])
e[i + j * aSize] = te;
}
}
}
return e[(aSize - 1) + (bSize - 1) * aSize];
}
// reduces score for substitutions that are close on the key board
// eg. we want "hakt" --> "halt", but it used to give "hakt"->"hat"
// string 'a' is the mispelling, string 'b' is the recommendation
int16_t Language::reduceScore ( char *a, char *b ){
// reduce score only for substitutions and for 1 edit hop away
// so essentially both strings should be of the same length
if ( gbstrlen(a) != gbstrlen(b) )
return 0;
int16_t reduceScore = 0;
while ( *a && *b ){
if ( *a == *b ){
a++;
b++;
continue;
}
char c = to_lower_a(*a);
char bplace = s_keyMap[to_lower_a(*b) - 'a'];
// check for all chars around it. For eg. for the letter
// 'j'(16); check 'u'(6),'i'(7),'h'(15),'k'(17),'n'(25),'m'(26)
if ( bplace - 10 >= 0 ) {
if ( ( s_keyboard[bplace - 10] == c ) ||
( s_keyboard[bplace - 9 ] == c ) )
reduceScore += 45;
}
if ( bplace < 10 ) {
if ( s_keyboard[bplace + 1] == c )
reduceScore += 45;
}
if ( bplace % 10 > 0 ) {
if ( s_keyboard[bplace - 1] == c )
reduceScore += 45;
}
if ( bplace - 10 < 28 ) {
if ( ( s_keyboard[bplace + 10] == c ) ||
( s_keyboard[bplace + 9 ] == c ) )
reduceScore += 45;
}
a++;
b++;
}
if ( reduceScore == 45 )
return 45;
return 0;
}
bool Language::getPhonetic( char *origWord, int32_t origWordLen,
char *target, int32_t targetLen ){
*target = '\0';
char word[MAX_PHRASE_LEN];
if ( !makeClean(origWord, origWordLen, word, targetLen ) )
return false;
int32_t wordLen = gbstrlen(word);
int32_t i = 0;
int32_t j = 0;
int32_t k = 0; // number of letters found
int32_t n = 0; // index of m_rulesPtr where the rules for the char starts
int32_t p = 0; // priority of the rule
int32_t z = 0;
int32_t k0 = -333;
int32_t n0 = -333;
int32_t p0 = -333;
int32_t z0 = 0;
char c,c0;
const char *s;
while ( word[i] ){
c = word[i];
//log ( LOG_WARN,"lang: Checking Position %"INT32", word=%s "
// "\ttarget=%s", j, word, target );
z0 = 0;
n = m_ruleStarts[(UChar8) c];
// while the rule exists
if ( n >= 0 ){
// check all rules that start with the same letter
while ( m_rulesPtr[n] && m_rulesPtr[n][0] == (UChar8) c ){
//log( LOG_WARN, "lang: Checking rule "
// "No.%"INT32", \"%s\"\t--> \"%\"s", n,
// m_rulesPtr[n], m_rulesPtr[n+1]);
/** check whole string **/
k = 1; /** number of found letters **/
p = 5; /** default priority **/
s = m_rulesPtr[n];
s++; /** important for (see below) "*(s-1)" **/
// while we are not at the end of the rule and
// the next character of the word is s and
// s is not a digit (priority) and
// s is not (-<^$, we are on the right track
// so keep on checking the next char's.
while (*s != '\0' && word[i+k] == *s &&
!isdigit (*s) &&
strchr ("(-<^$", *s) == NULL) {
k++;
s++;
}
// letters in brackets means only one of these
// chars must fit (OR)
// eg. rule OH(AEIOUY) means A OR E OR I....
if (*s == '(') {
/** check letters in "(..)" **/
// isalpha makes sure that we check
// only letters, and letters are only
// inside the brackets
if ( isalpha(word[i+k] ) &&
strchr(s+1, word[i+k]) != NULL ) {
k++;
while (*s != ')')
s++;
s++;
}
}
p0 = (int) *s;
k0 = k;
// The number of dashes determines how many
// characters from the end will not be replaced
while (*s == '-' && k > 1) {
k--;
s++;
}
// if a `<' is appended to the search string,
// the search for replacement rules will
// continue with the replacement string
// and not with the next character of the word.
if (*s == '<')
s++;
// the priority is the digit
if (isdigit (*s)) {
p = *s - '0';
s++;
}
// The control character `^' says that the
// search string only matches at the beginning
// of words
if (*s == '^' && *(s+1) == '^')
s++;
/* FOR FOLLOWUP RULES
if not at the end of the rule OR
( not on rule that applies only to beginning
of word AND
( i is 0 OR word[i-1] is not alphabet ) AND
( not on rule that applies only to end of
word AND i > 0 AND word[i-1] is not alphabet
AND word[i+k0] is not alphabet ) */
if (*s == '\0' ||
( *s == '^' &&
( i == 0 || !isalpha(word[i-1])) &&
(*(s+1) != '$' ||
(!isalpha(word[i+k0]) ))) ||
(*s == '$' && i > 0 &&
isalpha(word[i-1]) &&
(!isalpha(word[i+k0]) ))) {
/** search for followup rules, if: **/
/** parms.followup and k > 1 and NO '-' in searchstring **/
c0 = word[i+k-1];
n0 = m_ruleStarts[(UChar8)c0];
// followup gives better results.
if ( //parms.followup &&
k > 1 && n0 >= 0 &&
p0 != (int) '-' &&
word[i+k] != '\0' ) {
/** test follow-up rule for "word[i+k]" **/
while (m_rulesPtr[n0][0]==c0) {
/*log (LOG_WARN,
"lang: "
"follow-up rule "
"No.%"INT32"....%s\t -->
%s",n0,
m_rulesPtr[n0],
m_rulesPtr[n0+1] );*/
/** check whole string **/
k0 = k;
p0 = 5;
s = m_rulesPtr[n0];
s++;
while (*s != '\0' &&
word[i+k0] == *s &&
!isdigit(*s) &&
strchr("(-<^$",*s) == NULL) {
k0++;
s++;
}
if (*s == '(') {
/** check letters **/
if ( isalpha(word[i+k0]) &&
strchr (s+1, word[i+k0] ) != NULL) {
k0++;
while (*s != ')' && *s != '\0')
s++;
if (*s == ')')
s++;
}
}
while (*s == '-') {
/** "k0" gets NOT reduced **/
/** because "if (k0 == k)" **/
s++;
}
if (*s == '<')
s++;
if (isdigit (*s)) {
p0 = *s - '0';
s++;
}
if (*s == '\0' ||
/** *s == '^' cuts **/
(*s == '$' && !isalpha(word[i+k0]))) {
if (k0 == k) {
/** this is just a piece of the string **/
//log(LOG_WARN,"lang: discarded (too int16_t)");
n0 += 2;
continue;
}
if (p0 < p) {
/** priority too low **/
//log(LOG_WARN,"lang: discarded (priority)");
n0 += 2;
continue;
}
/** rule fits; stop search **/
break;
}
// log(LOG_WARN,"lang: discarded");
n0 += 2;
} /** End of "while (parms.rules[n0][0] == c0)" **/
if (p0 >= p && m_rulesPtr[n0][0] == c0) {
/*log(LOG_WARN,"lang: Rule No.%"INT32", %s",n, m_rulesPtr[n]);
log(LOG_WARN,"lang: not used because of follow-up Rule No.%"INT32", %s",
n0,m_rulesPtr[n0]);*/
n += 2;
continue;
}
} /** end of follow-up stuff **/
/** replace string **/
/*log(LOG_WARN,"lang: Using rule "
"No.%"INT32", %s\t --> %s", n,
m_rulesPtr[n],m_rulesPtr[n+1]);*/
s = m_rulesPtr[n+1];
p0 = ( m_rulesPtr[n][0] != '\0' &&
strchr ( m_rulesPtr[n]+1,'<') != NULL) ? 1:0;
if (p0 == 1 && z == 0) {
/** rule with '<' is used **/
if (j > 0 && *s != '\0' &&
(target[j-1] == c ||
target[j-1] == *s)) {
j--;
}
z0 = 1;
z = 1;
k0 = 0;
while (*s != '\0' && word[i+k0] != '\0') {
word[i+k0] = *s;
k0++;
s++;
}
if (k > k0){
//strmove (&word[0]+i+k0, &word[0]+i+k);
char *to = &word[0]+i+k0;
char *from = &word[0]+i+k;
while (( *to++ = *from++ ) != 0 )
;
}
/** new "actual letter" **/
c = word[i];
}
else { /** no '<' rule used **/
i += k - 1;
z = 0;
while (*s != '\0'
&& *(s+1) != '\0' && j < wordLen) {
if (j == 0 || target[j-1] != *s) {
target[j] = *s;
j++;
}
s++;
}
/** new "actual letter" **/
c = *s;
if (m_rulesPtr[n][0] != '\0'
&& strstr (m_rulesPtr[n]+1, "^^") != NULL) {
if (c != '\0') {
target[j] = c;
j++;
}
//strmove (&word[0], &word[0]+i+1);
char *to = &word[0];
char *from = &word[0]+i+1;
while (( *to++ = *from++ ) != 0 )
;
i = 0;
z0 = 1;
}
}
break;
} /** end of follow-up stuff **/
n += 2;
} /** end of while (parms.rules[n][0] == c) **/
} /** end of if (n >= 0) **/
if (z0 == 0) {
// collapse_result is false for english
if (k && p0 != -333 && !p0 &&
//(assert(p0!=-333),!p0) &&
j < wordLen && c != '\0' ) { //&&
//(!parms.collapse_result ||
// j == 0 || target[j-1] != c))
/** condense only double letters **/
target[j] = c;
///printf("\n setting \n");
j++;
}
/*else if (p0 || !k)
log( LOG_WARN,"lang: no rule found; "
"character \"%c\" skipped",word[i] );*/
// goto the next character of the word
i++;
z = 0;
k=0;
}
} /** end of while ((c = word[i]) != '\0') **/
target[j] = '\0';
return true;
}
bool Language::hasMispelling(char *phrase, int32_t phraseLen){
char *p = phrase;
char *pend = p;
while ( pend < phrase + phraseLen ){
while ( *pend != ' ' && pend < phrase + phraseLen )
pend++;
char word[1024];
gbmemcpy(word, p, pend - p);
word[pend - p] = '\0';
uint32_t key = hash32d(p, pend - p);
int32_t slot = m_misp.getSlot(key);
if ( slot != -1 ){
log(LOG_WARN,"lang: found mispelling in %s", word);
return true;
}
pend++;
p = pend;
}
return false;
}
///////////////////////////////////////////////////////
// DICTIONARY GENERATION ROUTINES BELOW HERE
//
///////////////////////////////////////////////////////
/*
// . return false and set g_errno on error, true on success
bool Language::generateDicts ( int32_t numWordsToDump , char *coll ) {
log(LOG_INIT,
"lang: Reading first %"INT32" words from titledb records in "
"collection '%s'.",
numWordsToDump,coll);
// ensure we got a dict dir in our working dir
char dd[1024];
if ( gbstrlen ( g_hostdb.m_dir ) > 1000 ) {
g_errno = EBADENGINEER;
log("lang: Working directory %s is too long.",
g_hostdb.m_dir);
return false;
}
sprintf ( dd , "mkdir %sdict.new/" , g_hostdb.m_dir );
log(LOG_INIT,"lang: %s",dd);
if ( gbsystem ( dd ) == -1 ) return false;
sprintf ( dd , "mkdir %stmp/" , g_hostdb.m_dir );
log(LOG_INIT,"lang: %s",dd);
if ( gbsystem ( dd ) == -1 ) return false;
// . loop through all titleRecs
// . put all words/phrases that begin with letter X in file
// words.Y, where Y is the numeric value of to_dict_char(X)
// . don't dump out more than "100,000" words/phrases
// . only dump out one title rec per IP
// . do not dump out a word/phrase more than once for the same titleRec
// . stores files in /tmp/ dir
if (!ucInit(g_hostdb.m_dir))
return log("Unicode initialization failed!");
g_conf.m_spiderdbMaxTreeMem = 1024*1024*30;
g_titledb.init ();
g_collectiondb.init(true);
g_titledb.addColl ( coll );
// load the mispellings file first
//if ( !loadMispelledWords() )
// log (LOG_WARN,"lang: mispelled file could not be loaded");
//log(LOG_DEBUG, "lang: making query files");
//if( !makeQueryFiles ( ) )
// return log("lang: had error: %s.",
// mstrerror(g_errno));
log(LOG_DEBUG, "lang: making word files");
if( ! makeWordFiles ( numWordsToDump , MAX_WORDS_PER_PHRASE , coll ) )
return log("lang: had error: %s.",
mstrerror(g_errno));
log(LOG_DEBUG, "lang: making pop files");
if ( ! makePopFiles ( numWordsToDump , MAX_WORDS_PER_PHRASE , coll ) )
return log("lang: had error: %s.",
mstrerror(g_errno));
// add words from /usr/dict/words to the word files
//if ( ! addDictWords ( ) ) return false;
// sort each file
for ( int32_t i = 0 ; i < NUM_CHARS ; i++ ) {
char tmp[1024];
// . sort should treat all lower chars as upper
// . sort in reverse order so longer fragments are on top
// of their int16_ter sub fragments so if they have the
// same score in the end, we'll keep the longer fragment
sprintf(tmp,"sort -f -r %stmp/%s/%s.words.%"INT32" > "
"%stmp/%s/%s.words.%"INT32".sorted",
g_hostdb.m_dir, getLanguageAbbr(m_lang),
getLanguageAbbr(m_lang), i, g_hostdb.m_dir,
getLanguageAbbr(m_lang), getLanguageAbbr(m_lang), i);
log(LOG_INIT,"lang: %s",tmp);
gbsystem ( tmp );
}
// . now convert each sorted file into a unique list of word/phrases
// with scores
// . score is number of times that word/phrase was found in the file
// . truncate each file to the top "1000000" words/phrases
if ( ! makeScoreFiles ( 180000 ))//numWordsToDump, max # words per file
return log(
"lang: had error: %s.",mstrerror(g_errno));
loadRules();
// success
return true;
}
// . TODO: remove bad words
// . loop through all titleRecs
// . put all words/phrases that begin with letter X in file
// words.Y, where Y = to_dict_char(X) [that compress the char value]
// . don't dump out more than "100,000" words/phrases
// . only dump out one title rec per IP
// . do not dump out a word/phrase more than once for the same titleRec
// . stores files in /tmp/ dir
// . return false and set g_errno on error, true on success
bool Language::makeWordFiles ( int32_t numWordsToDump , int32_t numWordsPerPhrase ,
char *coll ) {
int32_t numDumped = 0;
// message
log(LOG_INIT,"lang: Dumping first %"INT32" words/phrases.",
numWordsToDump );
// . only allow 1 vote per ip domain
// . assume each titlerec has about 50 words in it
uint32_t maxNumIps = numWordsToDump / 50 ;
if ( maxNumIps < 100000 ) maxNumIps = 100000;
int32_t iptableSize = maxNumIps * 4;
log(LOG_INIT,"lang: Allocating %"INT32" bytes.", iptableSize );
int32_t *iptable = (int32_t *) mmalloc ( iptableSize , "Language" );
if ( ! iptable ) {
return log(
"lang: Could not allocate %"INT32" bytes: %s",
iptableSize,mstrerror(g_errno));
}
memset ( iptable , 0 , iptableSize );
// get the default siteRec
//SiteRec sr;
//Url dummy;
//dummy.set ( "www.jinx.com" , gbstrlen("www.jinx.com") );
//sr.set ( &dummy , coll , gbstrlen(coll) , 7 ); // filenum
// read in 12 byte key, 4 byte size then data of that size
uint32_t ip;
int32_t totalVoters = 0;
uint32_t h;
// buffer used for storing de-tagged doc content
// JAB: warning abatement
// int32_t xbufSize ;
// declare up here so we can jump to done: label
int32_t nw;
//XmlDoc doc;
Words w;
Xml xml;
Url *u;
TitleRec tr;
// JAB: warning abatement
//char xbuf [ 1024*512 ] ; //1024 ];
//int32_t jx = numWordsPerPhrase * 2;
// the word vote table to ensure one vote per word per doc
int32_t vnumEntries ;
int32_t vtableSize = 0 ;
int32_t *vtable = NULL;
// display titlerec # we are scanning
int32_t count = 0;
// open all files for appending
int fds [ NUM_CHARS ];
for ( int32_t i = 0 ; i < NUM_CHARS ; i++ ) {
char ff[1024];
sprintf ( ff , "%stmp/%s/%s.words.%"INT32"", g_hostdb.m_dir,
getLanguageAbbr(m_lang),getLanguageAbbr(m_lang), i );
// delete it first
unlink ( ff );
// then open a new one for appending
fds[i] = open ( ff ,
O_CREAT | O_RDWR | O_APPEND ,
S_IRUSR |S_IWUSR |S_IRGRP |S_IWGRP| S_IROTH);
if ( fds[i] < 0 )
return log("lang: Could not open %s for writing: "
"%s.",ff, strerror(errno));
}
// message
//log(LOG_INIT,"lang: Scanning title recs for words and phrases in "
// "%s",colldir);
//
// THE TITLE SCAN LOOP
//
//g_conf.m_spiderdbMaxTreeMem = 1024*1024*30;
//g_titledb.init ();
//g_collectiondb.init(true);
//g_titledb.addColl ( coll );
key_t startKey ;
key_t endKey ;
startKey.setMin();
endKey.setMax();
startKey = g_titledb.makeFirstTitleRecKey ( 0 ); // docid );
// turn off threads
g_threads.disableThreads();
// get a meg at a time
int32_t minRecSizes = 1024*1024;
Msg5 msg5;
Msg5 msg5b;
RdbList list;
key_t k ;
char *rec ;
int32_t recSize ;
int32_t sameip = 0;
int32_t y;
char quality;
loop:
// use msg5 to get the list, should ALWAYS block since no threads
if ( ! msg5.getList ( RDB_TITLEDB ,
//"main" , // coll ,
coll ,
&list ,
startKey ,
endKey ,
minRecSizes ,
false , // includeTree ,
false , // add to cache?
0 , // max cache age
0 , // startFileNum ,
1 , // numFiles ,
NULL , // state
NULL , // callback
0 , // niceness
false , // err correction?
NULL , // cache key ptr
0 , // retry num
-1 , // maxRetries
true , // compensate for merge
-1LL , // sync point
&msg5b )){
log(LOG_LOGIC,"lang: getList did not block.");
return false;
}
// all done if empty
log(LOG_INIT, "lang: got list: %"INT32" recs", list.getNumRecs());
if ( list.isEmpty() ) goto done;
k = list.getCurrentKey();
rec = list.getCurrentRec();
recSize = list.getCurrentRecSize();
startKey = *(key_t *)list.getLastKey();
startKey += (uint32_t) 1;
// watch out for wrap around
if ( startKey < *(key_t *)list.getLastKey() ) goto done;
//
// END SCAN LOOP
//
// parse out and decompress the TitleRec
tr.set ( rec , recSize , false ) ; // owndata?
// if quality is low, skip this doc
quality = tr.getDocQuality();
if ( quality < 60 )
goto loop;
// only do your language
if ( tr.m_language != m_lang )
goto loop;
// extract the url
u = tr.getUrl();
// get ip
ip = u->getIp();
// look up in ip table
h = ip % maxNumIps;
y = 0;
ipchain:
if ( iptable[h] ) {
// skip if already voted
if ( iptable[h] == (int32_t)ip ) { sameip++; goto loop; }
// chain to next bucket
if ( ++h >= maxNumIps ) h = 0;
if ( ++y > (int32_t)maxNumIps ) {
log(LOG_LOGIC,"spell: IP table is too small. "
"Exiting.");
char *xx = NULL; *xx = 0;
}
goto ipchain;
}
// store in bucket so no doc from this ip votes again
iptable[h] = ip;
// count the voters
totalVoters++;
// parse all the tags out
//doc.set ( &tr , &sr );
// store in this xbuf w/o tags
xml.set ( tr.getCharset(),tr.getContent() , tr.getContentLen() ,
false , 0, false ,
tr.getVersion() );
//xml = doc.getXml();
// xbufSize = xml.getText ( xbuf ,
// 1024*512 ,
// 0 ,
// 999999 ,
// false ,
// true ,
// true );
// convert non-tag content into words
w.set(&xml, true, true);
// hash each phrase
nw = w.getNumWords();
// TODO: make the above a getWords(&w) routine!!
// so it can take from titleRecs or query logs
// . don't hash a word from this doc more than once
// . wvtable = word vote table
vnumEntries = (nw * numWordsPerPhrase * 130) / 100;
vtableSize = vnumEntries * 4;
//log("mallocing2b %"INT32" bytes", vtableSize );
if ( (count % 100) == 0 )
log(LOG_INIT,"lang: Scanning document %"INT32" "
"(%"INT32" dup ips, %"INT32" words dumped).",
count,sameip,numDumped);
count++;
vtable = (int32_t *) mmalloc ( vtableSize , "Language" );
if ( ! vtable ) {
mfree ( iptable , iptableSize , "Language" );
return log("lang: Failed to allocate %"INT32" "
"bytes: %s.",iptableSize,mstrerror(g_errno));
}
memset ( vtable , 0 , vtableSize );
// every other word is punctuation, so step by 2
for ( int32_t i = 0 ; i < nw ; i ++ ) {
// skip punct. wordId is 0.
if ( w.isPunct(i) ) continue;
// is the ith word a stop word?
// tmp buffer to hold word/phrase
char tmp[1024];
char *tmpp = tmp;
char *tmpend = tmp + 1024 - 3;
char *ww = w.getWord(i);
int32_t wwlen = w.getWordLen(i);
if ( wwlen < 2 )
continue;
bool isStop = ::isStopWord ( ww, wwlen, w.getWordId (i));
// BUT ok if Capitalized or number
if ( isStop ) {
if ( is_digit (ww[0]) ) isStop = false;
if ( is_cap (ww,wwlen) ) isStop = false;
// e-mail, c file, c. s. lewis
if ( wwlen == 1 && ww[0] != 'a' ) isStop = false;
}
// loop over # of words per phrase
for ( int32_t k = 1 ; k < numWordsPerPhrase ; k++ ) {
tmpp = tmp;
// stop words cannot start dictionary phrases
if ( k > 1 && isStop ) break;
int32_t lastj = -1;
// do not end on stop word either
for ( int32_t j = i ; j < i + k * 2 ; j++ ) {
// skip if overflow
if ( j >= nw ) continue;
// skip punct
if ( w.isPunct(j) ) continue;
// point to word
char *ww = w.getWord(j);
int32_t wwlen = w.getWordLen(j);
// if no room to store word, skip it
if ( tmpp + wwlen >= tmpend ) {
tmpp = tmp; break; }
// write word into buf
// convert to lower case so our sort works
// they way it should
char tx[1024];
// n is how many bytes we wrote into "tx"
int32_t n = to_lower_utf8(tmpp,tmpend,ww,wwlen);
// advance it
tmpp += n;
// no longer convert to utf8, cuz title rec
// is now already in utf8 by default!!
//tmpp += latin1ToUtf8( tmpp,
// tmpend - tmpp,
// tx, wwlen );
// remember last word # we added
lastj = j;
// followed by space, apostrophe or hyphen
if ( ww[wwlen] == '-' ) *tmpp = '-';
else if ( ww[wwlen] == '\'' ) *tmpp = '\'';
else *tmpp = ' ';
tmpp++;
}
// bail if nothing to add
if ( tmpp <= tmp )
continue;
// don't add dict phrase if last word is a stop word
if ( k > 1 && lastj >= 0 ) {
char *ww = w.getWord ( lastj );
int32_t wwlen = w.getWordLen ( lastj );
int64_t wid = w.getWordId ( lastj );
bool isStop = ::isStopWord(ww,wwlen,wid);
// BUT ok if Capitalized or number
if ( isStop ) {
if (is_digit (ww[0]) ) isStop=false;
if (is_cap (ww,wwlen)) isStop=false;
}
if ( isStop ) continue;
}
// point to last space
tmpp--;
// overwrite it, terminate with a \n
*tmpp = '\n';
// how long is it? does not include terminating \n
int32_t tmplen = tmpp - tmp;
// skip if nothing
if ( tmplen <= 0 )
continue;
// skip word if it has binary chars in it
if ( has_binary ( tmp , tmplen ) )
continue;
// debug
//if ( strncasecmp ( tmp , "a zero" , 6 ) == 0 )
// log("shit");
// get hash of word/phrase
// we need to preserve distinguish between proper
// and improper accent marks, so don't do just ascii
// by using wh = w.getWordId(j)
uint64_t hh = hash64Lower_utf8 (tmp,tmplen );
// don't allow more than one vote per doc for a word
int32_t ii = hh % vnumEntries;
vchain:
if ( vtable[ii] && vtable[ii] != (int32_t)hh ) {
if ( ++ii >= vnumEntries ) ii = 0 ;
goto vchain;
}
if ( vtable[ii] ) continue;
// store it
vtable[ii] = (int32_t)hh;
// a new word for this doc
// append the word out to file
int32_t fn = to_dict_char(tmp[0]);
// write the hash before the word
//char tt[32];
//sprintf ( tt , "%016"XINT64" ", hh );
//if ( write ( fds[fn], tt , 17 ) != 17 )
// return log("spell: makeWordFiles: write: %s",
// strerror(errno));
char tmpx[2080];
tmpp++;
*tmpp = '\0';
sprintf(tmpx,"%s", tmp);
int32_t tmpxlen = gbstrlen(tmpx);
// write out the trailing \n as well
int32_t wn = write ( fds[fn] , tmpx , tmpxlen ) ;
if ( wn != tmpxlen )
return log("spell: makeWordFiles: write: %s",
strerror(errno));
numDumped++;
if ( numDumped >= numWordsToDump ) goto done;
}
}
// breakout:
// don't need the word voting table anymore
if ( vtable ) mfree ( vtable , vtableSize , "Language");
vtable = NULL;
// get more titlerecs so we can hash more words/phrases
goto loop;
done:
// don't need the word voting table anymore
if ( vtable ) mfree ( vtable , vtableSize , "Language");
vtable = NULL;
// close all files
for ( int32_t i = 0 ; i < NUM_CHARS ; i++ )
close ( fds[i] );
return true;
}
#define NUM_UNIFILES MAX_LANGUAGES
bool Language::makePopFiles ( int32_t numWordsToDump , int32_t numWordsPerPhrase ,
char *coll) {
int32_t numDumped = 0;
int32_t docCount = 0;
// message
log(LOG_INIT,"lang: Dumping first %"INT32" words/phrases.",
numWordsToDump );
// . only allow 1 vote per ip domain
// . assume each titlerec has about 50 words in it
uint32_t maxNumIps = numWordsToDump / 50 ;
if ( maxNumIps < 100000 ) maxNumIps = 100000;
int32_t iptableSize = maxNumIps * 4;
log(LOG_INIT,"lang: Allocating %"INT32" bytes.", iptableSize );
int32_t *iptable = (int32_t *) mmalloc ( iptableSize , "Language" );
if ( ! iptable ) {
return log(
"lang: Could not allocate %"INT32" bytes: %s",
iptableSize,mstrerror(g_errno));
}
memset ( iptable , 0 , iptableSize );
// get the default siteRec
//SiteRec sr;
//Url dummy;
//dummy.set ( "www.jinx.com" , gbstrlen("www.jinx.com") );
//sr.set ( &dummy , coll , gbstrlen(coll) , 7 ); // filenum
// read in 12 byte key, 4 byte size then data of that size
uint32_t ip;
int32_t totalVoters = 0;
uint32_t h;
// buffer used for storing de-tagged doc content
int32_t xbufSize ;
// declare up here so we can jump to done: label
int32_t nw;
//XmlDoc doc;
Words w;
Xml xml;
//Scores s;
Url *u;
TitleRec tr;
char xbuf [ 1024*512 ] ; //1024 ];
//int32_t jx = numWordsPerPhrase * 2;
// the word vote table to ensure one vote per word per doc
int32_t vnumEntries ;
int32_t vtableSize = 0 ;
int32_t *vtable = NULL;
// display titlerec # we are scanning
int32_t count = 0;
// open all files for appending
int fds [ NUM_UNIFILES ];
for ( int32_t i = 0 ; i < NUM_UNIFILES ; i++ ) {
char ff[1024];
sprintf ( ff , "%stmp/%s/%s.popwords.%"INT32"", g_hostdb.m_dir ,
getLanguageAbbr(m_lang),getLanguageAbbr(m_lang), i );
// delete it first
unlink ( ff );
// then open a new one for appending
fds[i] = open ( ff ,
O_CREAT | O_RDWR | O_APPEND ,
S_IRUSR |S_IWUSR |S_IRGRP |S_IWGRP| S_IROTH);
if ( fds[i] < 0 )
return log("lang: Could not open %s for writing: "
"%s.",ff, strerror(errno));
}
// message
//log(LOG_INIT,"lang: Scanning title recs for words and phrases in "
// "%s",colldir);
//
// THE TITLE SCAN LOOP
//
//g_conf.m_spiderdbMaxTreeMem = 1024*1024*30;
//g_titledb.init ();
//g_collectiondb.init(true);
//g_titledb.addColl ( coll );
key_t startKey ;
key_t endKey ;
startKey.setMin();
endKey.setMax();
startKey = g_titledb.makeFirstTitleRecKey ( 0 ); // docid );
// turn off threads
g_threads.disableThreads();
// get a meg at a time
int32_t minRecSizes = 1024*1024;
Msg5 msg5;
Msg5 msg5b;
RdbList list;
key_t k ;
char *rec ;
int32_t recSize ;
int32_t sameip = 0;
int32_t y;
char quality;
int32_t badFlags = SEC_SCRIPT|SEC_STYLE|SEC_SELECT;
Sections ss;
loop:
// use msg5 to get the list, should ALWAYS block since no threads
if ( ! msg5.getList ( RDB_TITLEDB ,
//"main" , // coll ,
coll ,
&list ,
startKey ,
endKey ,
minRecSizes ,
false , // includeTree ,
false , // add to cache?
0 , // max cache age
0 , // startFileNum ,
-1 , // numFiles ,
NULL , // state
NULL , // callback
0 , // niceness
false , // err correction?
NULL , // cache key ptr
0 , // retry num
-1 , // maxRetries
true , // compensate for merge
-1LL , // sync point
&msg5b )){
log(LOG_LOGIC,"lang: getList did not block.");
return false;
}
// all done if empty
log(LOG_INIT, "lang: got list: %"INT32" recs", list.getNumRecs());
if ( list.isEmpty() ) goto done;
list.resetListPtr();
docloop:
k = list.getCurrentKey();
rec = list.getCurrentRec();
recSize = list.getCurrentRecSize();
//
// END SCAN LOOP
//
docCount++;
// parse out and decompress the TitleRec
tr.set ( rec , recSize , false ) ; // owndata?
// if quality is low, skip this doc
quality = tr.getDocQuality();
if ( quality < 60 )
goto docdone;
if ( tr.m_language != m_lang )
goto docdone;
// extract the url
u = tr.getUrl();
// get ip
ip = u->getIp();
// look up in ip table
h = ip % maxNumIps;
y = 0;
ipchain:
if ( iptable[h] ) {
// skip if already voted
if ( iptable[h] == (int32_t)ip ) { sameip++; goto docdone; }
// chain to next bucket
if ( ++h >= maxNumIps ) h = 0;
if ( ++y > (int32_t)maxNumIps ) {
log(LOG_LOGIC,"spell: IP table is too small. "
"Exiting.");
char *xx = NULL; *xx = 0;
}
goto ipchain;
}
// store in bucket so no doc from this ip votes again
iptable[h] = ip;
// count the voters
totalVoters++;
// parse all the tags out
//doc.set ( &tr , &sr );
// store in this xbuf w/o tags
xml.set ( tr.getCharset(),tr.getContent() , tr.getContentLen() ,
false , 0, false ,
tr.getVersion() );
//xml = doc.getXml();
xbufSize = xml.getText ( xbuf ,
1024*512 ,
0 ,
999999 ,
false ,
true ,
true );
// convert non-tag content into words
//w.set ( true, (char*)xbuf , xbufSize );
w.set ( &xml, true, true);
//s.set ( &w, &xml , TITLEREC_CURRENT_VERSION );
//s.set ( &w, TITLEREC_CURRENT_VERSION , false );
ss.set ( &w,NULL,0,NULL,0,NULL,NULL,&tr,NULL,0);
// hash each phrase
nw = w.getNumWords();
// TODO: make the above a getWords(&w) routine!!
// so it can take from titleRecs or query logs
// . don't hash a word from this doc more than once
// . wvtable = word vote table
vnumEntries = (nw * numWordsPerPhrase * 130) / 100;
vtableSize = vnumEntries * 4;
//log("mallocing2b %"INT32" bytes", vtableSize );
if ( (count % 100) == 0 )
log(LOG_INIT,"lang: Scanning document %"INT32" "
"(%"INT32" dup ips, %"INT32" words dumped).",
count,sameip,numDumped);
count++;
vtable = (int32_t *) mmalloc ( vtableSize , "Language" );
if ( ! vtable ) {
mfree ( iptable , iptableSize , "Language" );
return log("lang: Failed to allocate %"INT32" "
"bytes: %s.",iptableSize,mstrerror(g_errno));
}
memset ( vtable , 0 , vtableSize );
// every other word is punctuation, so step by 2
//log("Adding %d words", nw);
for ( int32_t i = 0 ; i < nw ; i ++ ) {
// skip punct
//if ( w.isPunct(i) ) continue;
//if ( !s.getScore(i) ) continue;
if ( ss.m_sectionPtrs[i]->m_flags & badFlags ) continue;
// is the ith word a stop word?
// tmp buffer to hold word/phrase
char tmp[2048];
char *tmpp = tmp;
char *tmpend = tmp + 2048 - 3;
char *ww = w.getWord(i);
int32_t wwlen = w.getWordLen(i);
bool isStop = ::isStopWord ( ww, wwlen, w.getWordId (i));
// BUT ok if Capitalized or number
if ( isStop ) {
if ( w.isNum(i) ) isStop = false;
if ( w.isUpper(i)) isStop = false;
// e-mail, c file, c. s. lewis
if ( wwlen == 1 && ww[0] != 'a' )
isStop = false;
}
// loop over # of words per phrase
for ( int32_t k = 1 ; k < numWordsPerPhrase ; k++ ) {
tmpp = tmp;
// stop words cannot start dictionary phrases
if ( k > 1 && isStop ) break;
int32_t lastj = -1;
// do not end on stop word either
for ( int32_t j = i ; j < i + k * 2 ; j++ ) {
// skip if overflow
if ( j >= nw ) continue;
// skip punct
//if ( w.isPunct(i+j) ) continue;
//if ( !s.getScore(i+j) ) continue;
if ( ss.m_sectionPtrs[j]->m_flags &badFlags )
continue;
// point to word
char *ww = w.getWord(j);
int32_t wwlen = w.getWordLen(j);
// if no room to store word, skip it
if ( tmpp + wwlen >= tmpend ) {
tmpp = tmp; break; }
// write word into buf
// convert to lower case so our sort works
// they way it should
// n is how many bytes we wrote into "tx"
int32_t n = to_lower_utf8(tmpp,tmpend,ww,wwlen);
// advance it
tmpp += n;
// remember last word # we added
lastj = j;
// followed by space, apostrophe or hyphen
if ( ww[wwlen] == '-' ) *tmpp = '-';
else if ( ww[wwlen] == '\'' ) *tmpp = '\'';
else *tmpp = ' ';
tmpp++;
}
// bail if nothing to add
if ( tmpp <= tmp ) continue;
// don't add dict phrase if last word is a stop word
if ( k > 1 && lastj >= 0 ) {
char *ww = w.getWord ( lastj );
int32_t wwlen = w.getWordLen ( lastj );
int64_t wid = w.getWordId ( lastj );
isStop =::isStopWord(ww,wwlen,wid);
// BUT ok if Capitalized or number
if ( isStop ) {
if ( w.isNum(lastj) ) isStop=false;
if ( w.isUpper( lastj ) ) isStop=false;
}
if ( isStop ) continue;
}
// point to last space
//tmpp--;
// overwrite it, terminate with a \n
*tmpp = '\n';
// how long is it? does not include terminating \n
int32_t tmplen = tmpp - tmp;
// skip if nothing
if ( tmplen <= 0 ) continue;
// skip word if it has binary chars in it
if ( has_binary ( tmp , tmplen ) ) continue;
// debug
//if ( strncasecmp ( tmp , "a zero" , 6 ) == 0 )
// log("shit");
// get hash of word/phrase
// we need to preserve distinguish between proper
// and improper accent marks, so don't do just ascii
// by using wh = w.getWordId(i+j)
uint64_t hh = hash64Lower_utf8 (tmp,tmplen );
// don't allow more than one vote per doc for a word
int32_t ii = hh % vnumEntries;
vchain:
if ( vtable[ii] && vtable[ii] != (int32_t)hh ) {
if ( ++ii >= vnumEntries ) ii = 0 ;
goto vchain;
}
if ( vtable[ii] ) continue;
// store it
vtable[ii] = (int32_t)hh;
// a new word for this doc
// append the word out to file
//int32_t fn = to_dict_char(tmp[0]);
int32_t fn = tr.getLanguage();
// write the hash before the word
//char tt[32];
//sprintf ( tt , "%016"XINT64" ", hh );
//if ( write ( fds[fn], tt , 17 ) != 17 )
// return log("spell: makeWordFiles: write: %s",
// strerror(errno));
// write out the trailing \n as well
int32_t wn = write ( fds[fn] , tmp , tmplen + 1) ;
if ( wn != tmplen + 1 )
return log("spell: makePopFiles: "
"write: %s",
strerror(errno));
numDumped++;
if ( numDumped >= numWordsToDump )
goto done;
}
}
//log(LOG_INIT, "lang: got %"INT32" docs, %"INT32" words",
//docCount, numDumped);
// breakout:
// don't need the word voting table anymore
if ( vtable ) mfree ( vtable , vtableSize , "Language");
vtable = NULL;
docdone:
// get more titlerecs so we can hash more words/phrases
list.skipCurrentRecord();
if (!list.isExhausted())
goto docloop;
startKey = *(key_t *)list.getLastKey();
startKey += (uint32_t) 1;
// watch out for wrap around
if ( startKey < *(key_t *)list.getLastKey() ) goto done;
goto loop;
done:
// don't need the word voting table anymore
log(LOG_INIT, "lang: got %"INT32" docs total", docCount);
if ( vtable ) mfree ( vtable , vtableSize , "Language");
vtable = NULL;
// close all files
for ( int32_t i = 0 ; i < NUM_UNIFILES ; i++ ) close ( fds[i] );
return true;
}
// . now convert each sorted file into a unique list of word/phrases
// with scores
// . score is number of times that word/phrase was found in the file
// . truncate each file to the top "maxWordsPerFile" words/phrases
bool Language::makeScoreFiles ( int32_t maxWordsPerFile ) {
// convert each file
for ( int32_t i = 0 ; i < NUM_CHARS ; i++ ) {
// open the file for reading
char ff[1024];
sprintf ( ff , "%stmp/%s/%s.words.%"INT32".sorted", g_hostdb.m_dir,
getLanguageAbbr(m_lang),getLanguageAbbr(m_lang), i );
FILE *fdr = fopen ( ff , "r" );
if ( ! fdr )
return log(
"lang: Failed to open %s for reading: "
"%s.",ff, strerror(errno));
// and one for writing out score/word pairs
sprintf ( ff, "%stmp/%s/%s.words.%"INT32".prescored",g_hostdb.m_dir,
getLanguageAbbr(m_lang),getLanguageAbbr(m_lang), i );
FILE *fdw = fopen ( ff , "w" );
if ( ! fdw )
return log(
"lang: Failed to open %s for writing: "
"%s.",ff, strerror(errno));
log(LOG_INIT,"lang: Making %s.", ff );
// ongoing score count
int32_t score = 0;
int32_t oldscore = 0;
// store last word/phrase in here
char lastw [ 1029];
lastw[0] = '\0';
// and its hash in here
uint64_t lasthh = 0;
char pbuf[1024];
//int32_t bonus = 0;
//bool gotit = false; // do we start w/ '*'? means in dict.
// read in each line
while ( fgets ( pbuf , 1024 , fdr ) ) {
char *p = pbuf;
// skip '*'
//if ( *p == '*' ) { gotit = true ; p++; }
//else gotit = false;
// skip lines beginning with "the " TOO COMMON
if ( (p[0] == 't' || p[0] == 'T') &&
strncasecmp ( p , "the ", 4 ) == 0 )
continue;
// also, "and "
if ( (p[0] == 'a' || p[0] == 'A') &&
strncasecmp ( p , "and ", 4 ) == 0 )
continue;
// and, "a "
if ( (p[0] == 'a' || p[0] == 'A') && p[1] == ' ')
continue;
// don't include terminating \n in the length
int32_t plen = gbstrlen(p) - 1;
if ( plen <= 0 ) continue;
// skip if too big and might have been truncated
if ( plen >= 1000 ) continue;
// NULL terminate it to take off ending * and/or \n
p [plen] = '\0';
// get the hash of this word/phrase
uint64_t hh = hash64Lower_utf8 ( p , plen );
//sscanf ( buf , "%"XINT64"" , &hh );
// was it same as last? if so, tally and continue
if ( hh == lasthh ) {
score++;
//if ( gotit ) bonus = IN_DICT_BONUS;
continue;
}
// add bonus to score to get final score
//score += bonus;
// . otherwise, we're starting a new word
// . print out the word before us
if ( score >= MIN_DOCS ) {
//if ( gotit ) // bonus )
// fprintf(fdw,"%05"INT32" *%s\n",score,lastw);
//else
fprintf(fdw,"%05"INT32" %s\n" ,score,lastw);
}
// we are now the new word
lasthh = hh;
strncpy ( lastw , p , 1010 );
//if ( gotit ) bonus = IN_DICT_BONUS;
//else bonus = 0;
// give us score 1
score = 1;
}
// write out the last
// skip if too big and might have been truncated
//score += bonus;
if ( score >= MIN_DOCS && gbstrlen(lastw) < 1000) {
//if (gotit) fprintf (fdw,"%05"INT32" *%s\n",score,lastw );
// else fprintf (fdw,"%05"INT32" %s\n" ,score,lastw );
fprintf (fdw,"%05"INT32" %s\n" ,score,lastw );
}
fclose ( fdr );
fclose ( fdw );
//
// now remove small phrases in there just because the
// big phrase containing them is the popular one
//
// open the file for reading
sprintf ( ff, "%stmp/%s/%s.words.%"INT32".prescored",g_hostdb.m_dir,
getLanguageAbbr(m_lang),getLanguageAbbr(m_lang), i );
fdr = fopen ( ff , "r" );
if ( ! fdr )
return log(
"lang: Failed to open %s for reading: "
"%s.",ff, strerror(errno));
// and one for writing out score/word pairs
sprintf ( ff , "%stmp/%s/%s.words.%"INT32".scored", g_hostdb.m_dir,
getLanguageAbbr(m_lang),getLanguageAbbr(m_lang), i );
fdw = fopen ( ff , "w" );
if ( ! fdw )
return log(
"lang: Failed to open %s for writing: "
"%s.",ff, strerror(errno));
lastw[0] = '\0';
// read in each line
while ( fgets ( pbuf , 1024 , fdr ) ) {
char *p = pbuf;
// don't include terminating \n in the length
int32_t plen = gbstrlen(p) - 1;
// NULL terminate it to take off ending * and/or \n
p [plen] = '\0';
// get score
int32_t score = atoi(p);
// advance p over score and separating space
while ( isdigit(*p) ) p++;
p++;
// skip '*'
//if ( *p == '*' ) { gotit = true ; p++; }
//else gotit = false;
// debug point
//if ( strcmp ( p , "a wide variety of topics" )==0)
// log("got it");
// does the new chunk match the last one?
int32_t n;
for ( n = 0 ; p[n] &&
to_lower_a(p[n]) ==
to_lower_a(lastw[n]); n++ );
// cancel match if doesn't fail on a word boundary
if ( p[n] ) n = 0;
if ( is_alnum(lastw[n]) ) n = 0;
// if match subtract score so we don't leech our
// points from him
if ( n > 0 ) score -= oldscore;
// if our score is now too low, don't add ourselves
if ( score < MIN_DOCS ) continue;
// . save it to disk
// . this puts the asterisk back at the end of the
// word for easier reading
//if ( gotit) fprintf(fdw,"%05"INT32" %s*\n",score,p);
//else fprintf(fdw,"%05"INT32" %s\n" ,score,p);
fprintf(fdw,"%05"INT32"\t%s\n" ,score,p);
// store as last
oldscore = score;
strncpy ( lastw , p , 1010 );
}
fclose ( fdr );
fclose ( fdw );
// sort the score file and output to dict.%"INT32"
char bb[1024];
sprintf( bb,
"sort -f -r %stmp/%s/%s.words.%"INT32".scored | "
"head -%"INT32" > %sdict.new/%s/%s.dict.%"INT32"",
g_hostdb.m_dir, getLanguageAbbr(m_lang),
getLanguageAbbr(m_lang), i, maxWordsPerFile,
g_hostdb.m_dir, getLanguageAbbr(m_lang),
getLanguageAbbr(m_lang), i );
log(LOG_INIT,"lang: %s",bb);
gbsystem ( bb );
// make the phonets for it too
//sprintf(bb,"%sdict.new/dict.%"INT32"",g_hostdb.m_dir,i);
//makePhonet ( bb );
}
return true;
}
// Get the queries from the http query requests and use them as phrases
bool Language::makeQueryFiles ( ) {
char buf [1024*10];
for ( int32_t i = 1; i < 2; i++ ){
//fdr = fopen ( "dict/queries.mamma","r" );
char fx[1024];
sprintf( fx,"%sdict/queries.mamma%"INT32"",g_hostdb.m_dir, i );
FILE *fdr = fopen ( fx,"r" );
if ( ! fdr ) {
return log("lang: Could not open query file for "
"reading: %s.",strerror(errno));
}
// open for writing
char ff[1024];
sprintf ( ff , "%stmp/dict.queries.%"INT32"", g_hostdb.m_dir, i );
// delete it first
unlink ( ff );
// then open a new one for appending
int fdw = open ( ff ,
O_CREAT | O_RDWR | O_APPEND ,
S_IRUSR |S_IWUSR |S_IRGRP |S_IWGRP| S_IROTH);
if ( fdw < 0 ){
return log("lang: Could not open for %s "
"writing: %s.",ff, strerror(errno));
}
Url u;
Query q;
while ( fgets ( buf , 1024 * 10, fdr ) ) {
buf[1024 * 10 - 1] = '\0';
// length of word(s), including the terminating \n
int32_t wlen = gbstrlen(buf) ;
// skip if empty
if ( wlen <= 0 ) continue;
buf[wlen-1]='\0';
u.set(buf,gbstrlen(buf));
HttpRequest r1,r2;
bool status = r1.set ( &u ) ;
if ( !status )
continue;
r2.set( r1.getRequest(), r1.getRequestLen(), NULL );
char frag[1024];
int32_t flen;
char *query = r2.getString( "uip",&flen );
gbmemcpy ( frag, query, flen );
frag[flen++] = '\t';
int32_t queryLen;
query = r2.getString( "q",&queryLen );
q.set(query, queryLen, NULL, 0, true);
// don't use truncated queries
if ( q.m_truncated )
continue;
if ( q.m_isBoolean )
continue;
int32_t nqw = q.m_numWords;
for ( int32_t i = 0 ; i < nqw ; i++ ) {
int32_t fragLen = flen;
// get a word in the Query to start a fragment
// with
QueryWord *qw = &q.m_qwords[i];
// can he start the phrase?
bool canStart = true;
if (!qw->isAlphaWord())
canStart = false;
// MDW: wtf is this?
//UCScript script = qw->wordScript();
//if ((script != ucScriptCommon) &&
// (script != ucScriptLatin))
// canStart = false;
if ( qw->m_ignoreWord &&
qw->m_ignoreWord != IGNORE_CONNECTED &&
qw->m_ignoreWord != IGNORE_QUOTED )
canStart = false;
// if he can't start our fragment,
// just copy over to "dst"
if ( ! canStart ) {
continue;
}
bool inQuotes = qw->m_inQuotes;
char fieldCode = qw->m_fieldCode;
// . get longest continual fragment that
// . starts with word #i. get the following
// words that can be in a fragment
// that starts with word #i start of the frag
char *p = qw->m_word;
int32_t plen = 0;
int32_t lastLen = 0;
for ( ; i < nqw ; i++ ) {
// . skip if we should
// . keep punct, however
QueryWord *qw = &q.m_qwords[i];
if ( qw->m_opcode )
break;
if ( qw->m_inQuotes != inQuotes )
break;
if ( qw->m_fieldCode != fieldCode )
break;
// are we punct?
lastLen = 0;
if ( is_alnum_utf8 ( qw->m_word ) )
lastLen=plen;
// inc the ptr
plen += qw->m_wordLen;
}
// revisit this i in big loop since we did not
// include it
i--;
// if last thing we added was punct, roll back
// over it
if ( lastLen ) { plen = lastLen; i--; }
bool lastPunct = false;
char *pend = p + plen;
for ( ; p < pend ; p += getUtf8CharSize(p) ) {
//skip anything but latin-1
//if (c > 255) continue;
if ( getUtf8CharSize(p) != 1) continue;
// only works on a single character
if ( ! to_dict_char ( *p ) )
continue;
// skip back to back punct/spaces
if ( ! is_alnum_utf8(p) && lastPunct )
continue;
if ( ! is_alnum_utf8(p) )
lastPunct = true;
else
lastPunct=false;
// check for a breech
if ( fragLen+4>=1023) {
break;
g_errno = EBUFTOOSMALL;
return false; }
// language phrases are looking
// for latin-1
char cs = getUtf8CharSize(p);
if ( cs == 1 ) {
frag[fragLen++] = *p;
continue;
}
// otherwise, more than 1 byte char
gbmemcpy(frag+fragLen,p,cs);
fragLen += cs;
}
// if any part of the phrase has a mispelling,
// discard the query
if ( hasMispelling( &frag[flen],
fragLen - flen) ){
break;
}
frag[fragLen++] = '\n';
frag[fragLen] = '\0';
// write out the trailing \n as well
int32_t wn = write ( fdw, frag, fragLen ) ;
if ( wn != fragLen )
return log("spell: makeWordFiles: "
"write: %s",
strerror(errno));
// break here so that we only print one phrase
// per query
break;
}
}
fclose (fdr);
close (fdw);
// each ip can only vote once for a particular query.
// Each ip vote counts as one popular vote
//char cmd[2048];
// sort, the uniquify so that each ip can have only 1 occurance
// of each phrase. Then awk to get just the phrase.
// Then sort again and uniquify with count and remove single
// occurance phrases. Then sort on the count to get the most
// common phrases on top.
//sprintf( cmd, "sort -f %s | uniq -i | "
//"awk -F \'\\t\' \'{print $2}\' "
//"| sort -f | uniq -i -c -d | sort -g -r -k 1,1 "
//"> %s.uniq.sorted", ff, ff );
//log ( LOG_INIT,"lang: %s", cmd );
//gbsystem(cmd);
}
return true;
}
// Make a list of the wikipedia titles of docs found by the query
// "site:xx.wikipedia.org", where xx is the abbr of the language.
// Store in xx.wiki
bool Language::makeWikiFiles( ) {
// open for writing
char ff[1024];
sprintf ( ff , "%sdict/%s/%s.wiki", g_hostdb.m_dir,
getLanguageAbbr(m_lang), getLanguageAbbr(m_lang) );
// delete it first
unlink ( ff );
// then open a new one for appending
int fdw = open ( ff ,
O_CREAT | O_RDWR | O_APPEND ,
S_IRUSR |S_IWUSR |S_IRGRP |S_IWGRP| S_IROTH);
if ( fdw < 0 ){
log("lang: Could not open for %s "
"writing: %s.",ff, strerror(errno));
return true;
}
// make a state
StateWik *st ;
try { st = new (StateWik); }
catch ( ... ) {
g_errno = ENOMEM;
log("Lang: new(%i): %s", sizeof(StateWik),
mstrerror(g_errno));
return false;
}
mnew ( st , sizeof(StateWik) , "LanguageWik" );
st->m_fdw = fdw;
char query [MAX_QUERY_LEN];
sprintf(query,"site:%s.wikipedia.org",getLanguageAbbr(m_lang));
st->m_coll = g_conf.m_defaultColl;
st->m_collLen = gbstrlen(st->m_coll);
// . a boolFlag of 0 means query is not boolean
st->m_q.set ( query, gbstrlen(query), st->m_coll, st->m_collLen,
0 ); // boolFlag
st->m_termId = st->m_q.getTermId(0);
st->m_startKey = g_indexdb.makeStartKey ( st->m_termId );
st->m_endKey = g_indexdb.makeEndKey ( st->m_termId );
st->m_minRecSize = 500 * 1024;
if ( !st->getIndexList( ) )
return false;
return st->getSummary();
}
bool StateWik::getIndexList( ) {
// get the rdb ptr to titledb's rdb
//Rdb *rdb = g_indexdb.getRdb();
// -1 means read from all files in Indexdb
// get the title rec at or after this docId
if ( ! m_msg0.getList ( -1 ,
0 ,
0 ,
0 , // max cache age
false , // add to cache?
RDB_INDEXDB , // rdbId of 2 = indexdb
m_coll ,
&m_list ,
m_startKey ,
m_endKey ,
m_minRecSize, // recSizes
//st->m_useTree , // include tree?
//st->m_useCache , // include cache?
//false , // add to cache?
//0 , // startFileNum
//numFiles , // numFiles
this , // state
gotIndexListWrapper ,
0 ) ) // niceness
return false;
return getSummary( );
}
void gotIndexListWrapper( void *state , RdbList *list ){
StateWik *st = (StateWik *) state;
list->resetListPtr();
st->getSummary();
return;
}
bool StateWik::getSummary( ){
m_numMsg20sOutstanding = 0;
m_numMsg20sReceived = 0;
int32_t numLaunched = 0;
// launch MAX_FRAG_SIZE msg20's at a time, wait for all of them
while ( numLaunched < MAX_FRAG_SIZE && !m_list.isExhausted() ){
int64_t docId = m_list.getCurrentDocId () ;
// set the summary request then get it!
Msg20Request req;
Query *q = &m_q;
//int32_t nt = q->m_numTerms;
req.ptr_qbuf = q->getQuery();
req.size_qbuf = q->getQueryLen()+1;
req.ptr_coll = m_coll;
req.size_coll = m_collLen+1;
req.m_docId = docId;
req.m_numSummaryLines = 3;
req.m_maxCacheAge = g_conf.m_indexdbMaxIndexListAge;
req.m_wcache = true; // addToCache
req.m_state = this;
req.m_callback = gotSummaryWrapper;
req.m_niceness = 0;
req.m_expected = true;
req.m_boolFlag = q->m_isBoolean; // 2 means auto?
req.m_allowPunctInPhrase = true;
req.m_showBanned = false;
if ( ! m_msg20s[numLaunched].getSummary ( &req ) )
m_numMsg20sOutstanding++;
#ifdef _OLDMSG20_
if ( !m_msg20s[numLaunched].
getSummary(&m_q,
NULL,
NULL,
docId,
-1, //clusterLevel
3,//numLinesInSummary,
g_conf.m_indexdbMaxIndexListAge,
1 , //addToCache
m_coll ,
m_collLen ,
this ,
gotSummaryWrapper ,
0 ,// niceness
//m_sequentialTitledbLookup,
false ,// titledb restrict?
NULL,//m_si->m_displayMetas ,
0,//m_si->m_displayMetasLen ,
0,//bigSampleRadius ,
0,//bigSampleMaxLen ,
true,//m_si->m_isMasterAdmin ,
true , //requireallterms
false , //count links
0,
NULL, //url
false, //just get link info
false,//considerTitlesFromBody
true,// usenewsummaries
0,
NULL, //link info
NULL, //hostdb
true,//expect 2b there?
NULL,
0,
0,
true,//getvectorrec
false,//deduping
true,// allowPunctinPhrase
false,//showbanned
false,//excludeLinkText,
false,//hackFixWords,
false,//hackFixPhrases,
0,//includeCachedCopy
false))// justgetlinkquality
m_numMsg20sOutstanding++;
#endif
m_list.skipCurrentRecord();
numLaunched++;
}
m_numMsg20sLaunched = numLaunched;
if ( m_numMsg20sOutstanding > 0 )
return false;
gotSummaryWrapper( this );
return false;
}
void gotSummaryWrapper ( void *state ){
StateWik *st = (StateWik *) state;
st->m_numMsg20sReceived++;
if ( !st->m_list.isExhausted() &&
st->m_numMsg20sLaunched < MAX_FRAG_SIZE )
return;
if ( st->m_numMsg20sReceived < st->m_numMsg20sOutstanding )
return;
if ( !st->gotSummary( ) )
return;
return;
}
bool StateWik::gotSummary ( ){
for ( int32_t i = 0; i < m_numMsg20sLaunched; i++ ){
if ( m_msg20s[i].m_errno )
continue;
char frag[MAX_FRAG_SIZE];
int32_t flen = 0;
strcpy(frag, m_msg20s[i].getTitle());
flen = gbstrlen(frag);
//log ( LOG_WARN,"lang: Got url %s with title %s",
// m_msg20s[i].getUrl(),
// m_msg20s[i].getTitle() );
// check for two or more consecutive puncts
bool lastPunct = false;
bool skip = false;
char *p = frag;
char *pend = frag + flen;
for ( ; p < pend ; p += getUtf8CharSize(p) ) {
if ( lastPunct && !is_alnum_utf8(p) ){
skip = true;
break;
}
if ( !is_alnum_utf8 ( p ) )
lastPunct = true;
}
if ( skip )
continue;
// check if all the letters are not alphabets
int32_t numAlphas = 0;
// anoterh loop
p = frag;
for ( ; p < pend ; p += getUtf8CharSize(p) ) {
if ( !is_alpha_utf8 ( p ) )
numAlphas++;
}
if ( numAlphas >= flen )
continue;
frag[flen++] = '\n';
frag[flen] = '\0';
//log ( LOG_WARN,"lang: Got url %s with title %s",
// m_msg20s[i].getUrl(),frag );
// write out the trailing \n as well
int32_t wn = write ( m_fdw, frag, flen ) ;
if ( wn != flen )
continue;
}
// see if u can launch more
if ( !m_list.isExhausted() )
return getSummary();
// see if the termlist is over
if ( m_list.getListSize() >= m_minRecSize ){
// see if u can get some more of the list.
m_startKey = *(key_t *)m_list.getLastKey();
m_startKey += (uint32_t) 1;
// watch out for wrap around
if ( m_startKey >= *(key_t *)m_list.getLastKey() )
return getIndexList();
}
// close the file
close(m_fdw);
return true;
}
// Generates the phonetics of the words of the dictionary.
// Finds the term frequency and then put it as the popularity after adjusting
bool Language::makeDict(){
StateDict *st ;
try { st = new (StateDict); }
catch ( ... ) {
g_errno = ENOMEM;
log("Lang: new(%i): %s", sizeof(StateDict),
mstrerror(g_errno));
return true;
}
mnew ( st , sizeof(StateDict) , "StateDict" );
m_stateDict = st;
char ff[1024];
sprintf(ff,"%sdict/%s/%s.wl", g_hostdb.m_dir,
getLanguageAbbr(m_lang), getLanguageAbbr(m_lang));
File f;
f.set (ff);
// open file
if ( ! f.open ( O_RDONLY ) ) {
log("lang: open: %s",mstrerror(g_errno));
return true;
}
// TODO : CHANGE THIS TO USE fgets
// get file size
int32_t fileSize = f.getFileSize() ;
// store a \0 at the end
st->m_dictBufSize = fileSize + 1;
// make buffer to hold all
st->m_dictBuf = (char *) mmalloc ( st->m_dictBufSize ,
"LanguageWordsBuf" );
if ( ! st->m_dictBuf) {
log("lang: mmalloc: %s",mstrerror(errno));return false;
}
// read em all in
if ( ! f.read ( st->m_dictBuf , fileSize , 0 ) ) {
log("lang: read: %s", mstrerror(g_errno));
return true;
}
// change \n to \0
st->m_numTuples = 0;
for ( int32_t i = 0 ; i < st->m_dictBufSize ; i++ ) {
if ( st->m_dictBuf[i] != '\n' ) continue;
st->m_dictBuf[i] = '\0';
st->m_numTuples++;
}
f.close();
// log a msg
log(LOG_INIT,"lang: read %"INT32" words into memory", st->m_numTuples );
// alloc space to make them into termids
st->m_bufSize = st->m_numTuples * ( sizeof (char*) +
2 * sizeof (int64_t) );
st->m_buf = (char *) mmalloc ( st->m_bufSize, "LanguagePtrs" );
if ( !st->m_buf ) {
log ( LOG_WARN,"lang: could not alloc %"INT32" bytes",
st->m_bufSize );
g_errno = ENOMEM;
return true;
}
char *p = st->m_buf;
st->m_wordsPtr = (char **) p;
p += st->m_numTuples * sizeof(char *);
st->m_termIds = (int64_t *)p;
p += st->m_numTuples * sizeof(int64_t);
st->m_termFreqs = (int64_t *)p;
p += st->m_numTuples * sizeof(int64_t);
char *coll = g_conf.m_defaultColl;
int32_t collLen = gbstrlen(coll);
p = st->m_dictBuf;
for ( int32_t i = 0; i < st->m_numTuples; i++ ){
st->m_wordsPtr[i] = p;
p += gbstrlen(p) + 1;
int32_t wordLen = gbstrlen(st->m_wordsPtr[i]);
// . set query class
// . a boolFlag of 0 means query is not boolean
Query q;
q.set ( st->m_wordsPtr[i], wordLen , coll , collLen , 0 );
st->m_termIds[i] = q.getTermId(0);
st->m_termFreqs[i] = 0;
}
if ( !st->m_msg37.getTermFreqs ( coll ,
0 , // maxAge
st->m_termIds ,
st->m_numTuples ,
st->m_termFreqs ,
this ,
gotTermFreqsWrapper,
0 , // niceness
false ))// exact count?
return false;
gotTermFreqsWrapper(this);
return true;
}
void gotTermFreqsWrapper(void *state){
Language *lang = (Language *) state;
lang->gotTermFreqs(lang->m_stateDict);
}
bool Language::gotTermFreqs( StateDict *st ){
int fd;
char ff[1024];
sprintf ( ff , "%sdict/%s/%s.wl.phonet",g_hostdb.m_dir,
getLanguageAbbr(m_lang), getLanguageAbbr(m_lang));
// delete it first
unlink ( ff );
// then open a new one for appending
fd = open ( ff ,
O_CREAT | O_RDWR | O_APPEND ,
S_IRUSR |S_IWUSR |S_IRGRP |S_IWGRP| S_IROTH);
if ( fd < 0 ){
log("lang: Could not open %s for writing: "
"%s.",ff, strerror(errno));
st->m_numTuples = 0;
}
int64_t max = 0LL;
for ( int32_t i = 0; i < st->m_numTuples; i++ ){
if ( st->m_termFreqs[i] > max )
max = st->m_termFreqs[i];
}
char cleanWord[MAX_PHRASE_LEN];
char phonetic[MAX_PHRASE_LEN];
int32_t wordLen = 0;
char tmp[1024];
for ( int32_t i = 0; i < st->m_numTuples; i++ ){
wordLen = gbstrlen(st->m_wordsPtr[i]);
// clean the word, i.e. convert word to uppercase and
// remove possible accents
makeClean( st->m_wordsPtr[i], wordLen,
cleanWord, MAX_PHRASE_LEN );
getPhonetic ( cleanWord, gbstrlen(cleanWord),
phonetic, MAX_PHRASE_LEN );
int64_t freq = ( st->m_termFreqs[i] * 32000 ) / max ;
sprintf(tmp,"%"INT64"\t%s\t%s\n", freq,
st->m_wordsPtr[i], phonetic);
uint32_t wn = write ( fd , tmp , gbstrlen(tmp) ) ;
if ( wn != gbstrlen(tmp) ){
log("lang: makeWordFiles: write: %s",
strerror(errno));
break;
}
}
close(fd);
mfree ( st->m_dictBuf, st->m_dictBufSize,"LanguageDictBuf" );
mfree ( st->m_buf, st->m_bufSize,"LanguageBuf");
mdelete(st,sizeof(StateDict),"StateDict");
delete(st);
return true;
}
#if 0
bool Language::makeAffinities(){
// make a state
StateAff *st ;
try { st = new (StateAff); }
catch ( ... ) {
g_errno = ENOMEM;
log("Lang: new(%i): %s", sizeof(StateAff),
mstrerror(g_errno));
return false;
}
mnew ( st , sizeof(StateAff) , "LanguageAffinity" );
st->m_fileNum = 12;
// blocked
if ( !openAffinityFile(st) )
return false;
return st->doneAffinities(st);
}
bool StateAff::openAffinityFile( ){
if ( m_fileNum >= NUM_CHARS )
return true;
// open for reading
char ff[1024];
sprintf ( ff , "%sdict/dict.%"INT32"", g_hostdb.m_dir, m_fileNum );
m_fdr = fopen ( ff, "r" );
if ( !m_fdr ) {
log("lang: test: Could not open %s for "
"reading: %s.", ff,strerror(errno));
return true;
}
// open for writing
sprintf ( ff , "%sdict.new/dict.%"INT32".aff", g_hostdb.m_dir,
m_fileNum );
// delete it first
unlink ( ff );
// then open a new one for appending
m_fdw = open ( ff , O_CREAT | O_RDWR | O_APPEND ,
S_IRUSR |S_IWUSR |S_IRGRP |S_IWGRP| S_IROTH);
if ( m_fdw < 0 ){
log("lang: Could not open for %s "
"writing: %s.",ff, strerror(errno));
return true;
}
if ( !launchAffinity(st) ){
return false;
}
m_fileNum++;
return openAffinityFile(st);
}
bool Language::launchAffinity(StateAff *st){
//char dst[1026];
// go through the words in dict/words
while ( fgets ( m_buf , MAX_FRAG_SIZE , m_fdr ) ){
// length of word(s), including the terminating \n
int32_t wlen = gbstrlen(m_buf) ;
// skip if empty
if ( wlen <= 0 )
return launchAffinity(st);
m_buf[wlen-1]='\0';
// skip to the phrase. titlerec dict have space as a seperator
char *p = m_buf;
while ( *p != ' ' )
p++;
p++;
char *coll = g_conf.m_defaultColl;
int32_t collLen = gbstrlen(coll);
// . set query class
// . a boolFlag of 0 means query is not boolean
int32_t numTerms = 0;
Query *q = &m_q;
if ( q->set ( p, gbstrlen(p), coll, collLen, 0 ) )
numTerms = q->getNumTerms();
// no use doing affinities on 1 word phrases
if ( numTerms <= 1 ){
char dst[1096];
sprintf( dst, "00000\t%s\n", m_buf );
log("%s",dst);
uint32_t wn = write(m_fdw, dst, gbstrlen(dst));
if ( wn != gbstrlen(dst) )
log("lang: genTopPopFile: write: %s",
strerror(errno));
continue;
}
m_msg3a.reset();
if ( !m_msg3a.
getDocIds( q ,
coll ,
collLen ,
100.0 ,
g_conf.m_indexdbMaxIndexListAge,
true ,
0 ,//stage0
30,
0 ,
this,
gotAffinityFreqs1Wrapper ) )
return false;
return gotAffinityFreqs1(st);
}
fclose(m_fdr);
close(m_fdw);
return true;
}
void gotAffinityFreqs1Wrapper(void *state){
StateAff *st = (StateAff *) state;
st->gotAffinityFreqs1(st);
return;
}
bool StateAff::gotAffinityFreqs1( ){
m_denominator = m_msg3a.getNumTotalHits();
// now get the phrase hits
char *p = m_buf;
while ( *p != ' ' )
p++;
// change the space to a quote
*p = '\"';
//go to the end
while ( *p != '\0' )
p++;
//change that to quote
*p = '\"';
p++;
// null end
*p = '\0';
p = m_buf;
while ( *p != '\"')
p++;
char *coll = g_conf.m_defaultColl;
int32_t collLen = gbstrlen(coll);
// . set query class
// . a boolFlag of 0 means query is not boolean
Query *q = &m_q;
q->set ( p, gbstrlen(p), coll, collLen, 0 );
m_msg3a.reset();
if ( !m_msg3a.
getDocIds( q ,
coll ,
collLen ,
100.0 ,
g_conf.m_indexdbMaxIndexListAge,
true ,
0 ,//stage0
30,
0 ,
this ,
gotAffinityFreqs2Wrapper ) )
return false;
return gotAffinityFreqs2(st);
}
void gotAffinityFreqs2Wrapper(void *state){
StateAff *st = (StateAff *) state;
st->gotAffinityFreqs2(st);
return;
}
bool StateAff::gotAffinityFreqs2(StateAff *st){
m_numerator = m_msg3a.getNumTotalHits();
double affinity = 0;
if ( m_denominator > 0 )
affinity = (double)m_numerator / (double)m_denominator;
affinity *= 10000;
char dst[1096];
sprintf( dst, "%05.0f\t%s\n", affinity, m_buf );
log("num=%"INT64", denom=%"INT64", %s",m_numerator,m_denominator,dst);
uint32_t wn = write ( m_fdw , dst , gbstrlen(dst) ) ;
if ( wn != gbstrlen(dst) )
log("lang: genTopPopFile: write: %s",strerror(errno));
//blocked
if ( !launchAffinity(st) )
return false;
// didn't block means the file ended
m_fileNum++;
if ( !openAffinityFile(st) )
return false;
return doneAffinities(st);
}
bool StateAff::doneAffinities(StateAff *st){
mdelete(st,sizeof(StateAff), "StateAff");
delete(st);
return true;
}
#endif
///////////////////////////////////////////////////////
// DICTIONARY MANIPULATION ROUTINES BELOW HERE
//
///////////////////////////////////////////////////////
// Clean query dict file of mispelleings
// NOTE: This function shall only compare each word to see if the phrase
// is present in the most commonly mispelled words list, that is present
// in the file mispelled_words. For spellchecking, use spellcheckDict()
// NOTE: Whenever you use these functions, please check the infile, outfile
// and the text format is correct
bool Language::cleanDictFile ( ) {
char buf [1024*10];
char fx[1024];
sprintf( fx,"%sdict/%s/%s.query.phonet",g_hostdb.m_dir,
getLanguageAbbr(m_lang),getLanguageAbbr(m_lang) );
FILE *fdr = fopen ( fx,"r" );
if ( ! fdr ) {
return log("lang: Could not open query file for "
"reading: %s.",strerror(errno));
}
// open for writing
char ff[1024];
sprintf ( ff , "%stmp/query.phonet.clean", g_hostdb.m_dir );
// delete it first
unlink ( ff );
// then open a new one for appending
int fdw = open ( ff ,
O_CREAT | O_RDWR | O_APPEND ,
S_IRUSR |S_IWUSR |S_IRGRP |S_IWGRP| S_IROTH);
if ( fdw < 0 ){
return log("lang: Could not open for %s "
"writing: %s.",ff, strerror(errno));
}
while ( fgets ( buf , 1024 * 10, fdr ) ) {
buf[1024 * 10 - 1] = '\0';
// length of word(s), including the terminating \n
int32_t wlen = gbstrlen(buf) ;
// skip if empty
if ( wlen <= 0 ) continue;
//buf[wlen-1]='\0';
char *p = buf;
while ( *p != '\t' )
p++;
p++;
char *str = p;
while ( *p != '\t' )
p++;
if ( hasMispelling(str, p - str) )
continue;
// write out the trailing \n as well
int32_t wn = write ( fdw, buf, wlen ) ;
if ( wn != wlen )
return log("spell: makeWordFiles: "
"write: %s",
strerror(errno));
// break here so that we only print one phrase
// per query
}
return true;
}
// opens each file and creates the (score, word, phonet) tuple and stores
// in phonet file. Normalizes scores to a high score of 32000. Also removes
// tuples for which there are no phonets and tuples that are adult.
// The incoming file is supposed to be a tuple of (score, word)
bool Language::makePhonet( char *infile){
loadRules();
// create the output file
int fdw;
char outfile[1024];
sprintf ( outfile , "%s.phonet", infile);
// delete it first
unlink ( outfile );
// then open a new one for appending
fdw = open ( outfile ,
O_CREAT | O_RDWR | O_APPEND ,
S_IRUSR |S_IWUSR |S_IRGRP |S_IWGRP| S_IROTH);
if ( fdw < 0 )
return log("lang: Could not open %s for writing: "
"%s.", outfile, strerror(errno));
char buf[1024];
int32_t max = 0;
// open the input file
FILE *fdr;
// then open
fdr = fopen ( infile, "r" );
if ( !fdr )
return log("lang: Could not open %s for writing: "
"%s.", outfile, strerror(errno));
// this loop goes through all the tuples and finds max score
while ( fgets ( buf , 1024 , fdr ) ) {
int32_t wlen = gbstrlen(buf);
if ( wlen <= 0 || wlen > MAX_PHRASE_LEN )
continue;
// remove the newline \n
buf [wlen - 1] = '\0';
char *p = buf;
while ( *p == ' ' )
p++;
// first is the popularity score
if ( atoi (p) > max )
max = atoi(p);
}
// close
fclose(fdr);
// then open
fdr = fopen ( infile, "r" );
if ( !fdr )
return log("lang: Could not open %s for writing: "
"%s.", outfile, strerror(errno));
char *scorePtr;
char *wordPtr;
char cleanWord[MAX_PHRASE_LEN];
char phonetic[MAX_PHRASE_LEN];
int32_t wordLen = 0;
char tmp[1024];
// this loop goes through all the tuples and only adds those
// tuples into the phonetic dict that have phonets. Normalizes scores.
while ( fgets ( buf , 1024 , fdr ) ) {
int32_t wlen = gbstrlen(buf);
if ( wlen <= 0 || wlen > MAX_PHRASE_LEN )
continue;
// remove the newline \n
buf [wlen - 1] = '\0';
char *p = buf;
while ( *p == ' ' )
p++;
// first is the popularity score
scorePtr = p;
int64_t score = (int64_t ) atoi(scorePtr);
// normalize score
score = ( score * 32000 )/ max;
// skip it
while ( *p != '\t' )
p++;
// null end it
*p = '\0';
p++;
wordPtr = p;
wordLen = gbstrlen( wordPtr );
// make the all letters in lower case
to_lower1(p);
// clean the word, i.e. convert word to uppercase and
// remove possible accents
if (!makeClean(wordPtr, wordLen, cleanWord, MAX_PHRASE_LEN)){
log ( "removed unclean phrase %s", p );
continue;
}
if ( !getPhonetic ( cleanWord, gbstrlen(cleanWord), phonetic,
MAX_PHRASE_LEN ) ){
log ( "could not get phonetic of phrase %s", p );
continue;
}
if ( gbstrlen(phonetic) == 0 ){
log ( "got 0 len phonetic of phrase %s", p );
continue;
}
sprintf(tmp,"%"INT64"\t%s\t%s\n",score, wordPtr, phonetic);
uint32_t wn = write ( fdw , tmp , gbstrlen(tmp) ) ;
if ( wn != gbstrlen(tmp) )
return log("lang: makePopPhonet: write: "
"%s",strerror(errno));
}
close(fdw);
fclose(fdr);
// all done
return true;
}
bool Language::genTopPopFile ( char *infile ){
// open the input file
FILE *fdr;
// then open
fdr = fopen ( infile, "r" );
if ( !fdr )
return log("lang: Could not open %s for reading: "
"%s.", infile, strerror(errno));
// create the output file
int fdw;
char outfile[1024];
sprintf ( outfile , "%s.top", infile );
// delete it first
unlink ( outfile );
// then open a new one for appending
fdw = open ( outfile ,
O_CREAT | O_RDWR | O_APPEND ,
S_IRUSR |S_IWUSR |S_IRGRP |S_IWGRP| S_IROTH);
if ( fdw < 0 )
return log("lang: Could not open %s for writing: "
"%s.", outfile, strerror(errno));
char buf[1024];
int32_t count = 0;
// this loop goes through all the words and only adds those
// tuples into the distributed file that belong to this host.
while ( fgets ( buf , 1024 , fdr ) ) {
// put the first TOP_POP_PHRASES words
if ( count++ >= TOP_POP_PHRASES )
break;
int32_t wlen = gbstrlen(buf);
if ( wlen <= 0 || wlen > MAX_PHRASE_LEN )
continue;
uint32_t wn = write ( fdw , buf , gbstrlen(buf) ) ;
if ( wn != gbstrlen(buf) )
return log("lang: genTopPopFile: write: "
"%s",strerror(errno));
}
close(fdw);
fclose(fdr);
return true;
}
*/
// the distributed pop file is stored as a tuple of (phrase, phonet, lang, pop)
// to comply with the unified dict
bool Language::genDistributedPopFile ( char *infile, uint32_t myHash ){
// open the input file
FILE *fdr;
// then open
fdr = fopen ( infile, "r" );
if ( !fdr )
return log("lang: Could not open %s for writing: "
"%s.", infile, strerror(errno));
// create the output file
int fdw;
char outfile[1024];
sprintf ( outfile , "%s.%"INT32"", infile, myHash );
// delete it first
unlink ( outfile );
// then open a new one for appending
fdw = open ( outfile ,
O_CREAT | O_RDWR | O_APPEND ,
S_IRUSR |S_IWUSR |S_IRGRP |S_IWGRP| S_IROTH);
if ( fdw < 0 )
return log("lang: Could not open %s for writing: "
"%s.", outfile, strerror(errno));
char buf[1024];
int32_t hostsPerSplit = g_hostdb.m_numHosts / g_hostdb.m_indexSplits;
hostsPerSplit /= g_hostdb.m_numHostsPerShard;
int32_t count = 0;
// this loop goes through all the words and only adds those
// tuples into the distributed file that belong to this host.
while ( fgets ( buf , 1024 , fdr ) ) {
// skip the first TOP_POP_PHRASES words because they shall be
// put in the top pop file
if ( count++ < TOP_POP_PHRASES )
continue;
int32_t wlen = gbstrlen(buf);
if ( wlen <= 0 || wlen > MAX_PHRASE_LEN )
continue;
// remove the newline \n
buf [wlen - 1] = '\0';
char *p = buf;
char *pend = p + wlen - 1;
// first is the popularity score
char *score = p;
while ( *p != '\t' && p < pend )
p++;
// null end the score
*p = '\0';
p++;
// next is the phrase
char *phrase = p;
while ( *p != '\t' && p < pend )
p++;
p++;
// check if we're at the phonet
if ( p >= pend )
continue;
char *phonet = p;
uint64_t phonetKey = hash64Lower_utf8(phonet);
if ( phonetKey % hostsPerSplit != myHash )
continue;
char tmp[1024];
sprintf(tmp,"%s\t%s\n", phrase, score);
// put the \n in place of \0
//buf [wlen-1] = '\n';
uint32_t wn = write ( fdw , tmp , gbstrlen(tmp) ) ;
if ( (int32_t)wn != gbstrlen(tmp) )
return log("lang: genDistributedPop: write: "
"%s",strerror(errno));
}
close(fdw);
fclose(fdr);
return true;
}
// heuristic code to spellcheck the dictionary
// spellcheck each word in the pop words dictionary with forceReco on so that
// we get a recommendation. Output words that have a recommendation that has
// 4 times the popularity of the word
int32_t Language::spellcheckDict(){
if ( !loadWikipediaWords() )
return 0;
char ff[1024];
sprintf ( ff , "%sdict/%s/%s.query.phonet", g_hostdb.m_dir,
getLanguageAbbr(m_lang), getLanguageAbbr(m_lang));
FILE *fd = fopen ( ff, "r" );
if ( ! fd ) {
log("lang: test: Could not open %s for "
"reading: %s.", "query.phonet",strerror(errno));
return 0;
}
// create the output file
int fdw;
char outfile[1024];
sprintf ( outfile , "%s.spellcheck", ff );
// delete it first
unlink ( outfile );
// then open a new one for appending
fdw = open ( outfile ,
O_CREAT | O_RDWR | O_APPEND ,
S_IRUSR |S_IWUSR |S_IRGRP |S_IWGRP| S_IROTH);
if ( fdw < 0 )
return log("lang: Could not open %s for writing: "
"%s.", outfile, strerror(errno));
HashTableT <int32_t,int32_t> kickedOutPhrases;
kickedOutPhrases.set(256);
int32_t notFound = 0;
char buf[1026];
//char dst[1026];
// go through the words in dict/words
while ( fgets ( buf , MAX_FRAG_SIZE , fd ) ) {
// length of word(s), including the terminating \n
int32_t wlen = gbstrlen(buf) ;
// skip if empty
if ( wlen <= 0 ) continue;
buf[wlen-1]='\0';
for ( int32_t j = 0; j < wlen; j++ )
if ( buf[j] == '\t')
buf[j] = '\0';
char *tuple = buf;
//skip score and go to phrase
tuple += gbstrlen(tuple) + 1;
char *word = tuple;
// . make the all letters in lower case
// . TODO: fix for utf8 words?
to_lower1_a(word);
// check for adult words
/*if ( isAdult (word) ){
log(LOG_WARN,"lang: kicking out adult phrase=%s",
word);
continue;
}*/
uint64_t h = hash64d ( word, gbstrlen(word));
bool isInWiki = false;
// if the phrase is in wikipedia, its safe
int32_t slot = m_wiki.getSlot(h);
if ( slot != -1 )
isInWiki = true;
int32_t wordPop = g_speller.getPhrasePopularity( word, h, false );
if ( wordPop == 0 ) {
slot = m_distributedPopPhrases.getSlot(h);
if ( slot != -1 ){
wordPop = m_distributedPopPhrases.
getValueFromSlot(slot);
}
}
bool isPhrase = false;
while ( *tuple != '\0' ){
if ( *tuple == ' ' )
isPhrase = true;
tuple++;
}
// point back to the phrase
tuple = word;
char recommendation[MAX_PHRASE_LEN];
bool found;
int32_t score;
int32_t pop;
/*
if ( !isPhrase && !isInWiki ){
// just the the best narrow phrase we can find
int32_t numNarrow = 0;
char narrow[MAX_PHRASE_LEN];
int32_t narrowPop;
numNarrow = narrowPhrase ( word, narrow,
&narrowPop, 1 );
if ( numNarrow == 0 ){
log (LOG_WARN,"lang: no Narrow Searches "
"for %s",word);
continue;
}
word = narrow;
wordPop = narrowPop;
}
*/
bool reco = getRecommendation( word, gbstrlen(word),
recommendation, MAX_PHRASE_LEN,
&found, &score, &pop,
true );// forceReco
// if a kicked out phrase is the recommendation, then DON'T
// kick out this one too, because it probably means that the
// kicked out phrase was good. BUT should we put the kicked
// out phrase back ??
if ( reco && !isInWiki ){
int32_t h1 = hash32d ( recommendation,
gbstrlen(recommendation) );
slot = m_wiki.getSlot(h1);
// if the recommendation is in wiki, then double the
// pop of the recommendation
if ( slot != -1 && !isInWiki ){
log (LOG_WARN,"lang: recommendation=%s "
"is in the wiki. kicks out phrase %s",
recommendation, buf+gbstrlen(buf)+1);
pop *= 2;
}
slot = kickedOutPhrases.getSlot(h1);
if ( slot != -1 ){
log (LOG_WARN,"lang: recommendation has "
"already been kicked out, word=%s, "
"reco=%s",buf+gbstrlen(buf)+1,
recommendation );
reco = false;
}
}
// if it is found in wikipedia OR
// if no reco is found (even though it is a phrase) OR
// if phrase popularity is 4x the recommendation popularity
// if score is less than 99.
if ( isInWiki || !reco || wordPop * 4 > pop || score > 99 ){
char tmp[MAX_FRAG_SIZE];
sprintf(tmp,"%s\t%s\t%s\n",buf, tuple,
tuple + gbstrlen(tuple) + 1);
uint32_t wn = write ( fdw , tmp , gbstrlen(tmp) );
if ( (int32_t)wn != gbstrlen(tmp) )
return log("spell: spellCheckDict: write: "
"%s",strerror(errno));
continue;
}
kickedOutPhrases.addKey(h,1);
log ( LOG_WARN,"lang: not found=%s, reco=%s, "
"score=%"INT32", wordPop=%"INT32", recoPop=%"INT32"",
buf + gbstrlen(buf) + 1, recommendation, score,
wordPop, pop );
notFound++;
}
close (fdw);
fclose(fd);
return notFound;
}
|
#include "gula/net/Socket.h"
#include "gula/base/Logging.h"
#include "gula/net/InetAddress.h"
#include "gula/net/SocketsOps.h"
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <stdio.h> // snprintf
using namespace gula;
using namespace gula::net;
Socket::~Socket()
{
sockets::close(sockfd_);
}
bool Socket::getTcpInfo(struct tcp_info* tcpi) const
{
socklen_t len = sizeof(*tcpi);
memZero(tcpi, len);
return ::getsockopt(sockfd_, SOL_TCP, TCP_INFO, tcpi, &len) == 0;
}
bool Socket::getTcpInfoString(char* buf, int len) const
{
struct tcp_info tcpi;
bool ok = getTcpInfo(&tcpi);
if (ok)
{
snprintf(buf, len, "unrecovered=%u "
"rto=%u ato=%u snd_mss=%u rcv_mss=%u "
"lost=%u retrans=%u rtt=%u rttvar=%u "
"sshthresh=%u cwnd=%u total_retrans=%u",
tcpi.tcpi_retransmits, // Number of unrecovered [RTO] timeouts
tcpi.tcpi_rto, // Retransmit timeout in usec
tcpi.tcpi_ato, // Predicted tick of soft clock in usec
tcpi.tcpi_snd_mss,
tcpi.tcpi_rcv_mss,
tcpi.tcpi_lost, // Lost packets
tcpi.tcpi_retrans, // Retransmitted packets out
tcpi.tcpi_rtt, // Smoothed round trip time in usec
tcpi.tcpi_rttvar, // Medium deviation
tcpi.tcpi_snd_ssthresh,
tcpi.tcpi_snd_cwnd,
tcpi.tcpi_total_retrans); // Total retransmits for entire connection
}
return ok;
}
void Socket::bindAddress(const InetAddress& addr)
{
sockets::bindOrDie(sockfd_, addr.getSockAddr());
}
void Socket::listen()
{
sockets::listenOrDie(sockfd_);
}
int Socket::accept(InetAddress* peeraddr)
{
struct sockaddr_in6 addr;
memZero(&addr, sizeof addr);
int connfd = sockets::accept(sockfd_, &addr);
if (connfd >= 0)
{
peeraddr->setSockAddrInet6(addr);
}
return connfd;
}
void Socket::shutdownWrite()
{
sockets::shutdownWrite(sockfd_);
}
void Socket::setTcpNoDelay(bool on)
{
int optval = on ? 1 : 0;
::setsockopt(sockfd_, IPPROTO_TCP, TCP_NODELAY,
&optval, static_cast<socklen_t>(sizeof optval));
// FIXME CHECK
}
void Socket::setReuseAddr(bool on)
{
int optval = on ? 1 : 0;
::setsockopt(sockfd_, SOL_SOCKET, SO_REUSEADDR,
&optval, static_cast<socklen_t>(sizeof optval));
// FIXME CHECK
}
void Socket::setReusePort(bool on)
{
#ifdef SO_REUSEPORT
int optval = on ? 1 : 0;
int ret = ::setsockopt(sockfd_, SOL_SOCKET, SO_REUSEPORT,
&optval, static_cast<socklen_t>(sizeof optval));
if (ret < 0 && on)
{
LOG_SYSERR << "SO_REUSEPORT failed.";
}
#else
if (on)
{
LOG_ERROR << "SO_REUSEPORT is not supported.";
}
#endif
}
void Socket::setKeepAlive(bool on)
{
int optval = on ? 1 : 0;
::setsockopt(sockfd_, SOL_SOCKET, SO_KEEPALIVE,
&optval, static_cast<socklen_t>(sizeof optval));
// FIXME CHECK
}
|
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All right reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: Mike Taylor, Radu Serban
// =============================================================================
//
// Test for the rotational spring damper
//
// Recall that Irrlicht uses a left-hand frame, so everything is rendered with
// left and right flipped.
//
// =============================================================================
#include <ostream>
#include <fstream>
#include <cmath>
#include "chrono/physics/ChSystemNSC.h"
#include "chrono/physics/ChBody.h"
#include "chrono/utils/ChUtilsInputOutput.h"
#include "chrono/utils/ChUtilsValidation.h"
#include "chrono_irrlicht/ChIrrApp.h"
#include "chrono_thirdparty/filesystem/path.h"
#include "ChronoValidation_config.h"
using namespace chrono;
using namespace chrono::irrlicht;
using namespace irr;
// =============================================================================
// Local variables
//
static const std::string val_dir = "../RESULTS/";
static const std::string out_dir = val_dir + "rotspring_force/";
static const std::string ref_dir = "rotspring_force/";
// =============================================================================
// Functor class for a custom rotaional spring constant modifier (function of
// position only)
class ChFunction_CustomSpring : public ChFunction {
public:
virtual ChFunction_CustomSpring *Clone() const override {
return new ChFunction_CustomSpring;
}
virtual double Get_y(double x) const override {
double spring_coef = 50;
double spring_nonlin_coef = 10;
return spring_coef + spring_nonlin_coef * fabs(x);
}
};
// =============================================================================
// Prototypes of local functions
//
bool TestRotSpring(const ChVector<>& jointLoc, const ChQuaternion<>& jointRot,
const int customSpringType,
double simTimeStep, double outTimeStep,
const std::string& testName, bool animate, bool save);
bool ValidateReference(const std::string& testName, const std::string& what, double tolerance);
bool ValidateConstraints(const std::string& testName, double tolerance);
bool ValidateEnergy(const std::string& testName, double tolerance);
utils::CSV_writer OutStream();
// =============================================================================
//
// Main driver function for running the simulation and validating the results.
//
int main(int argc, char* argv[])
{
bool animate = (argc > 1);
bool save = (argc > 2);
// Set the path to the Chrono data folder
SetChronoDataPath(CHRONO_DATA_DIR);
// Create output directory (if it does not already exist)
if (!filesystem::create_directory(filesystem::path(val_dir))) {
std::cout << "Error creating directory " << val_dir << std::endl;
return 1;
}
if (!filesystem::create_directory(filesystem::path(out_dir))) {
std::cout << "Error creating directory " << out_dir << std::endl;
return 1;
}
// Set the simulation and output step sizes
double sim_step = 2e-4;
double out_step = 1e-2;
std::string test_name;
bool test_passed = true;
// Case 1 - Revolute Joint at the origin and aligned with the global Y axis.
// Since the axis of rotation of a revolute joint is the Z-axis, the joint
// must be rotated -pi/2 about the global X-axis.
// Simple Spring
test_name = "RotSpring_Case01";
TestRotSpring(ChVector<>(0, 0, 0), Q_from_AngX(-CH_C_PI_2), 1, sim_step, out_step, test_name, animate, save);
if (!animate) {
test_passed &= ValidateReference(test_name, "Pos", 1e-3);
test_passed &= ValidateReference(test_name, "Vel", 5e-4);
test_passed &= ValidateReference(test_name, "Acc", 2e-2);
test_passed &= ValidateReference(test_name, "Quat", 1e-3);
test_passed &= ValidateReference(test_name, "Avel", 1e-3);
test_passed &= ValidateReference(test_name, "Aacc", 5e-3);
test_passed &= ValidateReference(test_name, "Rforce", 5e-3);
test_passed &= ValidateReference(test_name, "Rtorque", 1e-2);
test_passed &= ValidateConstraints(test_name, 1e-5);
}
// Case 2 - Same as Case01 except a nonlinear spring coefficent is used
test_name = "RotSpring_Case02";
TestRotSpring(ChVector<>(0, 0, 0), Q_from_AngX(-CH_C_PI_2), 2, sim_step, out_step, test_name, animate, save);
if (!animate) {
test_passed &= ValidateReference(test_name, "Pos", 1e-3);
test_passed &= ValidateReference(test_name, "Vel", 5e-4);
test_passed &= ValidateReference(test_name, "Acc", 2e-2);
test_passed &= ValidateReference(test_name, "Quat", 1e-3);
test_passed &= ValidateReference(test_name, "Avel", 1e-3);
test_passed &= ValidateReference(test_name, "Aacc", 5e-3);
test_passed &= ValidateReference(test_name, "Rforce", 5e-3);
test_passed &= ValidateReference(test_name, "Rtorque", 1e-2);
test_passed &= ValidateConstraints(test_name, 1e-5);
}
// Return 0 if all tests passed and 1 otherwise
std::cout << std::endl << "UNIT TEST: " << (test_passed ? "PASSED" : "FAILED") << std::endl;
return !test_passed;
}
// =============================================================================
//
// Worker function for performing the simulation with specified parameters.
//
bool TestRotSpring(const ChVector<>& jointLoc, // absolute location of joint
const ChQuaternion<>& jointRot, // orientation of joint
const int customSpringType, // Flag for selecting a spring
double simTimeStep, // simulation time step
double outTimeStep, // output time step
const std::string& testName, // name of this test
bool animate, // if true, animate with Irrlich
bool save) // if true, also save animation data
{
std::cout << "TEST: " << testName << std::endl;
// Settings
//---------
// There are no units in Chrono, so values must be consistent
// (MKS is used in this example)
double mass = 1.0; // mass of pendulum
double length = 4.0; // length of pendulum
ChVector<> inertiaXX(0.04, 0.1, 0.1); // mass moments of inertia of pendulum (centroidal frame)
double g = 9.80665;
double timeRecord = 5; // simulation length
// Create the mechanical system
// ----------------------------
// Create a ChronoENGINE physical system: all bodies and constraints will be
// handled by this ChSystem object.
ChSystemNSC my_system;
my_system.Set_G_acc(ChVector<>(0.0, 0.0, -g));
my_system.SetTimestepperType(ChTimestepper::Type::EULER_IMPLICIT_LINEARIZED);
my_system.SetMaxItersSolverSpeed(100);
my_system.SetMaxItersSolverStab(100); //Tasora stepper uses this, Anitescu does not
my_system.SetSolverType(ChSolver::Type::SOR);
my_system.SetTol(1e-6);
my_system.SetTolForce(1e-4);
// Create the ground body
auto ground = chrono_types::make_shared<ChBody>();
my_system.AddBody(ground);
ground->SetBodyFixed(true);
// Add some geometry to the ground body for visualizing the revolute joint
auto cyl_g = chrono_types::make_shared<ChCylinderShape>();
cyl_g->GetCylinderGeometry().p1 = jointLoc + jointRot.Rotate(ChVector<>(0, 0, -0.4));
cyl_g->GetCylinderGeometry().p2 = jointLoc + jointRot.Rotate(ChVector<>(0, 0, 0.4));
cyl_g->GetCylinderGeometry().rad = 0.05;
ground->AddAsset(cyl_g);
// Create the pendulum body in an initial configuration at rest, with an
// orientation that matches the specified joint orientation and a position
// consistent with the specified joint location.
// The pendulum CG is assumed to be at half its length.
auto pendulum = chrono_types::make_shared<ChBody>();
my_system.AddBody(pendulum);
pendulum->SetPos(jointLoc + jointRot.Rotate(ChVector<>(length / 2, 0, 0)));
pendulum->SetRot(jointRot);
pendulum->SetMass(mass);
pendulum->SetInertiaXX(inertiaXX);
// Add some geometry to the pendulum for visualization
auto cyl_p1 = chrono_types::make_shared<ChCylinderShape>();
cyl_p1->GetCylinderGeometry().p1 = ChVector<>(-length / 2, 0, 0);
cyl_p1->GetCylinderGeometry().p2 = ChVector<>(length / 2, 0, 0);
cyl_p1->GetCylinderGeometry().rad = 0.1;
pendulum->AddAsset(cyl_p1);
auto cyl_p2 = chrono_types::make_shared<ChCylinderShape>();
cyl_p2->GetCylinderGeometry().p1 = ChVector<>(-length / 2, 0, -0.2);
cyl_p2->GetCylinderGeometry().p2 = ChVector<>(-length / 2, 0, 0.2);
cyl_p2->GetCylinderGeometry().rad = 0.1;
pendulum->AddAsset(cyl_p2);
// Create revolute joint between pendulum and ground at "loc" in the global
// reference frame. The revolute joint's axis of rotation will be the Z axis
// of the specified rotation matrix.
auto revoluteJoint = chrono_types::make_shared<ChLinkLockRevolute>();
revoluteJoint->Initialize(pendulum, ground, ChCoordsys<>(jointLoc, jointRot));
my_system.AddLink(revoluteJoint);
// Add a rotational spring damper to the revolute joint
auto force = std::make_unique<ChLinkForce>();
auto customSpring = chrono_types::make_shared<ChFunction_CustomSpring>();
revoluteJoint->GetForce_Rz().SetActive(true);
revoluteJoint->GetForce_Rz().SetK(200);
revoluteJoint->GetForce_Rz().SetR(10);
if (customSpringType == 2) {
revoluteJoint->GetForce_Rz().SetK(1);
revoluteJoint->GetForce_Rz().SetModulationK(customSpring);
}
// Perform the simulation (animation with Irrlicht option)
// -------------------------------------------------------
if (animate) {
// Create the Irrlicht application for visualization
ChIrrApp *application =
new ChIrrApp(&my_system, L"ChLinkRevolute demo",
core::dimension2d<u32>(800, 600), false, true);
application->AddTypicalLogo();
application->AddTypicalSky();
application->AddTypicalLights();
core::vector3df lookat((f32)jointLoc.x(), (f32)jointLoc.y(), (f32)jointLoc.z());
application->AddTypicalCamera(lookat + core::vector3df(0, 3, -6), lookat);
// Now have the visulization tool (Irrlicht) create its geometry from the
// assets defined above
application->AssetBindAll();
application->AssetUpdateAll();
application->SetTimestep(simTimeStep);
// Simulation loop
double outTime = 0;
int outFrame = 1;
std::string pov_dir = out_dir + "POVRAY_" + testName;
if (!filesystem::create_directory(filesystem::path(pov_dir))) {
std::cout << "Error creating directory " << pov_dir << std::endl;
return false;
}
while (application->GetDevice()->run())
{
if (save && my_system.GetChTime() >= outTime - simTimeStep / 2) {
char filename[100];
sprintf(filename, "%s/data_%03d.dat", pov_dir.c_str(), outFrame);
utils::WriteShapesPovray(&my_system, filename);
outTime += outTimeStep;
outFrame++;
}
application->BeginScene();
application->DrawAll();
// Draw an XZ grid at the global origin to add in visualization
ChIrrTools::drawGrid(
application->GetVideoDriver(), 1, 1, 20, 20,
ChCoordsys<>(ChVector<>(0, 0, 0), Q_from_AngX(CH_C_PI_2)),
video::SColor(255, 80, 100, 100), true);
application->DoStep(); //Take one step in time
application->EndScene();
}
return true;
}
// Perform the simulation (record results option)
// ------------------------------------------------
// Create the CSV_Writer output objects (TAB delimited)
utils::CSV_writer out_pos = OutStream();
utils::CSV_writer out_vel = OutStream();
utils::CSV_writer out_acc = OutStream();
utils::CSV_writer out_quat = OutStream();
utils::CSV_writer out_avel = OutStream();
utils::CSV_writer out_aacc = OutStream();
utils::CSV_writer out_rfrc = OutStream();
utils::CSV_writer out_rtrq = OutStream();
utils::CSV_writer out_energy = OutStream();
utils::CSV_writer out_cnstr = OutStream();
// Write headers
out_pos << "Time" << "X_Pos" << "Y_Pos" << "Z_Pos" << std::endl;
out_vel << "Time" << "X_Vel" << "Y_Vel" << "Z_Vel" << std::endl;
out_acc << "Time" << "X_Acc" << "Y_Acc" << "Z_Acc" << std::endl;
out_quat << "Time" << "e0" << "e1" << "e2" << "e3" << std::endl;
out_avel << "Time" << "X_AngVel" << "Y_AngVel" << "Z_AngVel" << std::endl;
out_aacc << "Time" << "X_AngAcc" << "Y_AngAcc" << "Z_AngAcc" << std::endl;
out_rfrc << "Time" << "X_Force" << "Y_Force" << "Z_Force" << std::endl;
out_rtrq << "Time" << "X_Torque" << "Y_Torque" << "Z_Torque" << std::endl;
out_energy << "Time" << "Transl_KE" << "Rot_KE" << "Delta_PE" << "KE+PE" << std::endl;
out_cnstr << "Time" << "Cnstr_1" << "Cnstr_2" << "Cnstr_3" << "Constraint_4" << "Cnstr_5" << std::endl;
// Perform a system assembly to ensure we have the correct accelerations at
// the initial time.
my_system.DoFullAssembly();
// Total energy at initial time.
ChMatrix33<> inertia = pendulum->GetInertia();
ChVector<> angVelLoc = pendulum->GetWvel_loc();
double transKE = 0.5 * mass * pendulum->GetPos_dt().Length2();
double rotKE = 0.5 * Vdot(angVelLoc, inertia * angVelLoc);
double deltaPE = mass * g * (pendulum->GetPos().z() - jointLoc.z());
double totalE0 = transKE + rotKE + deltaPE;
// Simulation loop
double simTime = 0;
double outTime = 0;
while (simTime <= timeRecord + simTimeStep / 2)
{
// Ensure that the final data point is recorded.
if (simTime >= outTime - simTimeStep / 2)
{
// CM position, velocity, and acceleration (expressed in global frame).
const ChVector<>& position = pendulum->GetPos();
const ChVector<>& velocity = pendulum->GetPos_dt();
out_pos << simTime << position << std::endl;
out_vel << simTime << velocity << std::endl;
out_acc << simTime << pendulum->GetPos_dtdt() << std::endl;
// Orientation, angular velocity, and angular acceleration (expressed in
// global frame).
out_quat << simTime << pendulum->GetRot() << std::endl;
out_avel << simTime << pendulum->GetWvel_par() << std::endl;
out_aacc << simTime << pendulum->GetWacc_par() << std::endl;
// Reaction Force and Torque: acting on the ground body, as applied at the
// joint location and expressed in the global frame.
// Chrono returns the reaction force and torque on body 2 (as specified in
// the joint Initialize() function), as applied at the joint location and
// expressed in the joint frame. Here, the 2nd body is the ground.
// joint frame on 2nd body (ground), expressed in the body frame
ChCoordsys<> linkCoordsys = revoluteJoint->GetLinkRelativeCoords();
// reaction force and torque on ground, expressed in joint frame
ChVector<> reactForce = revoluteJoint->Get_react_force();
ChVector<> reactTorque = revoluteJoint->Get_react_torque();
// force and torque from the spring damper on ground, expressed in joint frame
ChVector<> springForce = revoluteJoint->GetC_force();
ChVector<> springTorque = revoluteJoint->GetC_torque();
// Combine the joint reactions with the spring force and torque to match
// ADAMS comparison files
ChVector<> jointForce = reactForce - springForce;
ChVector<> jointTorque = reactTorque - springTorque;
// reaction force and torque on ground, expressed in ground frame
jointForce = linkCoordsys.TransformDirectionLocalToParent(jointForce);
jointTorque = linkCoordsys.TransformDirectionLocalToParent(jointTorque);
// since the ground body frame coincides with the global (absolute)
// frame, the above quantities also represent the reaction force and
// torque on ground, expressed in the global frame
out_rfrc << simTime << jointForce << std::endl;
out_rtrq << simTime << jointTorque << std::endl;
// Conservation of Energy
// Translational Kinetic Energy (1/2*m*||v||^2)
// Rotational Kinetic Energy (1/2 w'*I*w)
// Delta Potential Energy (m*g*dz)
ChMatrix33<> inertia = pendulum->GetInertia();
ChVector<> angVelLoc = pendulum->GetWvel_loc();
double transKE = 0.5 * mass * velocity.Length2();
double rotKE = 0.5 * Vdot(angVelLoc, inertia * angVelLoc);
double deltaPE = mass * g * (position.z() - jointLoc.z());
double totalE = transKE + rotKE + deltaPE;
out_energy << simTime << transKE << rotKE << deltaPE << totalE - totalE0 << std::endl;;
// Constraint violations
ChVectorDynamic<> C = revoluteJoint->GetC();
out_cnstr << simTime << C(0) << C(1) << C(2) << C(3) << C(4) << std::endl;
// Increment output time
outTime += outTimeStep;
}
// Advance simulation by one step
my_system.DoStepDynamics(simTimeStep);
// Increment simulation time
simTime += simTimeStep;
}
// Write output files
out_pos.write_to_file(out_dir + testName + "_CHRONO_Pos.txt", testName + "\n\n");
out_vel.write_to_file(out_dir + testName + "_CHRONO_Vel.txt", testName + "\n\n");
out_acc.write_to_file(out_dir + testName + "_CHRONO_Acc.txt", testName + "\n\n");
out_quat.write_to_file(out_dir + testName + "_CHRONO_Quat.txt", testName + "\n\n");
out_avel.write_to_file(out_dir + testName + "_CHRONO_Avel.txt", testName + "\n\n");
out_aacc.write_to_file(out_dir + testName + "_CHRONO_Aacc.txt", testName + "\n\n");
out_rfrc.write_to_file(out_dir + testName + "_CHRONO_Rforce.txt", testName + "\n\n");
out_rtrq.write_to_file(out_dir + testName + "_CHRONO_Rtorque.txt", testName + "\n\n");
out_energy.write_to_file(out_dir + testName + "_CHRONO_Energy.txt", testName + "\n\n");
out_cnstr.write_to_file(out_dir + testName + "_CHRONO_Constraints.txt", testName + "\n\n");
return true;
}
// =============================================================================
//
// Wrapper function for comparing the specified simulation quantities against a
// reference file.
//
bool ValidateReference(const std::string& testName, // name of this test
const std::string& what, // identifier for test quantity
double tolerance) // validation tolerance
{
std::string sim_file = out_dir + testName + "_CHRONO_" + what + ".txt";
std::string ref_file = ref_dir + testName + "_ADAMS_" + what + ".txt";
utils::DataVector norms;
bool check = utils::Validate(sim_file, utils::GetValidationDataFile(ref_file), utils::RMS_NORM, tolerance, norms);
std::cout << " validate " << what << (check ? ": Passed" : ": Failed") << " [ ";
for (size_t col = 0; col < norms.size(); col++)
std::cout << norms[col] << " ";
std::cout << " ]" << std::endl;
return check;
}
// Wrapper function for checking constraint violations.
//
bool ValidateConstraints(const std::string& testName, // name of this test
double tolerance) // validation tolerance
{
std::string sim_file = out_dir + testName + "_CHRONO_Constraints.txt";
utils::DataVector norms;
bool check = utils::Validate(sim_file, utils::RMS_NORM, tolerance, norms);
std::cout << " validate Constraints" << (check ? ": Passed" : ": Failed") << " [ ";
for (size_t col = 0; col < norms.size(); col++)
std::cout << norms[col] << " ";
std::cout << " ]" << std::endl;
return check;
}
// =============================================================================
//
// Utility function to create a CSV output stream and set output format options.
//
utils::CSV_writer OutStream()
{
utils::CSV_writer out("\t");
out.stream().setf(std::ios::scientific | std::ios::showpos);
out.stream().precision(6);
return out;
}
|
// Copyright Oliver Kowalke 2013.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_FIBERS_CONTEXT_H
#define BOOST_FIBERS_CONTEXT_H
#include <atomic>
#include <chrono>
#include <cstdint>
#include <exception>
#include <functional>
#include <iostream>
#include <map>
#include <memory>
#include <tuple>
#include <type_traits>
#include <boost/assert.hpp>
#include <boost/config.hpp>
#if defined(BOOST_NO_CXX17_STD_APPLY)
#include <boost/context/detail/apply.hpp>
#endif
#include <boost/context/continuation.hpp>
#include <boost/context/stack_context.hpp>
#include <boost/intrusive/list.hpp>
#include <boost/intrusive/parent_from_member.hpp>
#include <boost/intrusive_ptr.hpp>
#include <boost/intrusive/set.hpp>
#include <boost/intrusive/slist.hpp>
#include <boost/fiber/detail/config.hpp>
#include <boost/fiber/detail/data.hpp>
#include <boost/fiber/detail/decay_copy.hpp>
#include <boost/fiber/detail/fss.hpp>
#include <boost/fiber/detail/spinlock.hpp>
#include <boost/fiber/exceptions.hpp>
#include <boost/fiber/fixedsize_stack.hpp>
#include <boost/fiber/policy.hpp>
#include <boost/fiber/properties.hpp>
#include <boost/fiber/segmented_stack.hpp>
#include <boost/fiber/type.hpp>
#ifdef BOOST_HAS_ABI_HEADERS
# include BOOST_ABI_PREFIX
#endif
#ifdef _MSC_VER
# pragma warning(push)
# pragma warning(disable:4251)
#endif
namespace boost {
namespace fibers {
class context;
class fiber;
class scheduler;
namespace detail {
struct wait_tag;
typedef intrusive::list_member_hook<
intrusive::tag< wait_tag >,
intrusive::link_mode<
intrusive::auto_unlink
>
> wait_hook;
// declaration of the functor that converts between
// the context class and the wait-hook
struct wait_functor {
// required types
typedef wait_hook hook_type;
typedef hook_type * hook_ptr;
typedef const hook_type * const_hook_ptr;
typedef context value_type;
typedef value_type * pointer;
typedef const value_type * const_pointer;
// required static functions
static hook_ptr to_hook_ptr( value_type &value);
static const_hook_ptr to_hook_ptr( value_type const& value);
static pointer to_value_ptr( hook_ptr n);
static const_pointer to_value_ptr( const_hook_ptr n);
};
struct ready_tag;
typedef intrusive::list_member_hook<
intrusive::tag< ready_tag >,
intrusive::link_mode<
intrusive::auto_unlink
>
> ready_hook;
struct sleep_tag;
typedef intrusive::set_member_hook<
intrusive::tag< sleep_tag >,
intrusive::link_mode<
intrusive::auto_unlink
>
> sleep_hook;
struct worker_tag;
typedef intrusive::list_member_hook<
intrusive::tag< worker_tag >,
intrusive::link_mode<
intrusive::auto_unlink
>
> worker_hook;
struct terminated_tag;
typedef intrusive::slist_member_hook<
intrusive::tag< terminated_tag >,
intrusive::link_mode<
intrusive::safe_link
>
> terminated_hook;
struct remote_ready_tag;
typedef intrusive::slist_member_hook<
intrusive::tag< remote_ready_tag >,
intrusive::link_mode<
intrusive::safe_link
>
> remote_ready_hook;
}
class BOOST_FIBERS_DECL context {
public:
typedef intrusive::list<
context,
intrusive::function_hook< detail::wait_functor >,
intrusive::constant_time_size< false >
> wait_queue_t;
private:
friend class dispatcher_context;
friend class main_context;
template< typename Fn, typename ... Arg > friend class worker_context;
friend class scheduler;
struct fss_data {
void * vp{ nullptr };
detail::fss_cleanup_function::ptr_t cleanup_function{};
fss_data() noexcept {
}
fss_data( void * vp_,
detail::fss_cleanup_function::ptr_t const& fn) noexcept :
vp( vp_),
cleanup_function( fn) {
BOOST_ASSERT( cleanup_function);
}
void do_cleanup() {
( * cleanup_function)( vp);
}
};
typedef std::map< uintptr_t, fss_data > fss_data_t;
#if ! defined(BOOST_FIBERS_NO_ATOMICS)
std::atomic< std::size_t > use_count_;
#else
std::size_t use_count_;
#endif
#if ! defined(BOOST_FIBERS_NO_ATOMICS)
detail::remote_ready_hook remote_ready_hook_{};
#endif
detail::spinlock splk_{};
bool terminated_{ false };
wait_queue_t wait_queue_{};
public:
detail::wait_hook wait_hook_{};
#if ! defined(BOOST_FIBERS_NO_ATOMICS)
std::atomic< std::intptr_t > twstatus{ 0 };
#endif
private:
scheduler * scheduler_{ nullptr };
fss_data_t fss_data_{};
detail::sleep_hook sleep_hook_{};
detail::ready_hook ready_hook_{};
detail::terminated_hook terminated_hook_{};
detail::worker_hook worker_hook_{};
fiber_properties * properties_{ nullptr };
std::chrono::steady_clock::time_point tp_{ (std::chrono::steady_clock::time_point::max)() };
boost::context::continuation c_{};
type type_;
launch policy_;
context( std::size_t initial_count, type t, launch policy) noexcept :
use_count_{ initial_count },
type_{ t },
policy_{ policy } {
}
public:
class id {
private:
context * impl_{ nullptr };
public:
id() = default;
explicit id( context * impl) noexcept :
impl_{ impl } {
}
bool operator==( id const& other) const noexcept {
return impl_ == other.impl_;
}
bool operator!=( id const& other) const noexcept {
return impl_ != other.impl_;
}
bool operator<( id const& other) const noexcept {
return impl_ < other.impl_;
}
bool operator>( id const& other) const noexcept {
return other.impl_ < impl_;
}
bool operator<=( id const& other) const noexcept {
return ! ( * this > other);
}
bool operator>=( id const& other) const noexcept {
return ! ( * this < other);
}
template< typename charT, class traitsT >
friend std::basic_ostream< charT, traitsT > &
operator<<( std::basic_ostream< charT, traitsT > & os, id const& other) {
if ( nullptr != other.impl_) {
return os << other.impl_;
} else {
return os << "{not-valid}";
}
}
explicit operator bool() const noexcept {
return nullptr != impl_;
}
bool operator!() const noexcept {
return nullptr == impl_;
}
};
static context * active() noexcept;
static void reset_active() noexcept;
context( context const&) = delete;
context & operator=( context const&) = delete;
friend bool
operator==( context const& lhs, context const& rhs) noexcept {
return & lhs == & rhs;
}
virtual ~context();
scheduler * get_scheduler() const noexcept {
return scheduler_;
}
id get_id() const noexcept;
bool is_resumable() const noexcept {
if ( c_) return true;
else return false;
}
void resume() noexcept;
void resume( detail::spinlock_lock &) noexcept;
void resume( context *) noexcept;
void suspend() noexcept;
void suspend( detail::spinlock_lock &) noexcept;
boost::context::continuation suspend_with_cc() noexcept;
boost::context::continuation terminate() noexcept;
void join();
void yield() noexcept;
bool wait_until( std::chrono::steady_clock::time_point const&) noexcept;
bool wait_until( std::chrono::steady_clock::time_point const&,
detail::spinlock_lock &) noexcept;
void schedule( context *) noexcept;
bool is_context( type t) const noexcept {
return type::none != ( type_ & t);
}
void * get_fss_data( void const * vp) const;
void set_fss_data(
void const * vp,
detail::fss_cleanup_function::ptr_t const& cleanup_fn,
void * data,
bool cleanup_existing);
void set_properties( fiber_properties * props) noexcept;
fiber_properties * get_properties() const noexcept {
return properties_;
}
launch get_policy() const noexcept {
return policy_;
}
bool worker_is_linked() const noexcept;
bool ready_is_linked() const noexcept;
bool remote_ready_is_linked() const noexcept;
bool sleep_is_linked() const noexcept;
bool terminated_is_linked() const noexcept;
bool wait_is_linked() const noexcept;
template< typename List >
void worker_link( List & lst) noexcept {
static_assert( std::is_same< typename List::value_traits::hook_type, detail::worker_hook >::value, "not a worker-queue");
BOOST_ASSERT( ! worker_is_linked() );
lst.push_back( * this);
}
template< typename List >
void ready_link( List & lst) noexcept {
static_assert( std::is_same< typename List::value_traits::hook_type, detail::ready_hook >::value, "not a ready-queue");
BOOST_ASSERT( ! ready_is_linked() );
lst.push_back( * this);
}
template< typename List >
void remote_ready_link( List & lst) noexcept {
static_assert( std::is_same< typename List::value_traits::hook_type, detail::remote_ready_hook >::value, "not a remote-ready-queue");
BOOST_ASSERT( ! remote_ready_is_linked() );
lst.push_back( * this);
}
template< typename Set >
void sleep_link( Set & set) noexcept {
static_assert( std::is_same< typename Set::value_traits::hook_type,detail::sleep_hook >::value, "not a sleep-queue");
BOOST_ASSERT( ! sleep_is_linked() );
set.insert( * this);
}
template< typename List >
void terminated_link( List & lst) noexcept {
static_assert( std::is_same< typename List::value_traits::hook_type, detail::terminated_hook >::value, "not a terminated-queue");
BOOST_ASSERT( ! terminated_is_linked() );
lst.push_back( * this);
}
template< typename List >
void wait_link( List & lst) noexcept {
static_assert( std::is_same< typename List::value_traits::hook_type, detail::wait_hook >::value, "not a wait-queue");
BOOST_ASSERT( ! wait_is_linked() );
lst.push_back( * this);
}
void worker_unlink() noexcept;
void ready_unlink() noexcept;
void sleep_unlink() noexcept;
void wait_unlink() noexcept;
void detach() noexcept;
void attach( context *) noexcept;
friend void intrusive_ptr_add_ref( context * ctx) noexcept {
BOOST_ASSERT( nullptr != ctx);
ctx->use_count_.fetch_add( 1, std::memory_order_relaxed);
}
friend void intrusive_ptr_release( context * ctx) noexcept {
BOOST_ASSERT( nullptr != ctx);
if ( 1 == ctx->use_count_.fetch_sub( 1, std::memory_order_release) ) {
std::atomic_thread_fence( std::memory_order_acquire);
boost::context::continuation c = std::move( ctx->c_);
// destruct context
ctx->~context();
// deallocated stack
c.resume();
}
}
};
inline
bool operator<( context const& l, context const& r) noexcept {
return l.get_id() < r.get_id();
}
template< typename Fn, typename ... Arg >
class worker_context final : public context {
private:
typename std::decay< Fn >::type fn_;
std::tuple< Arg ... > arg_;
boost::context::continuation
run_( boost::context::continuation && c) {
{
// fn and tpl must be destroyed before calling terminate()
auto fn = std::move( fn_);
auto arg = std::move( arg_);
c.resume();
#if defined(BOOST_NO_CXX17_STD_APPLY)
boost::context::detail::apply( std::move( fn), std::move( arg) );
#else
std::apply( std::move( fn), std::move( arg) );
#endif
}
// terminate context
return terminate();
}
public:
template< typename StackAlloc >
worker_context( launch policy,
boost::context::preallocated const& palloc, StackAlloc const& salloc,
Fn && fn, Arg ... arg) :
context{ 1, type::worker_context, policy },
fn_( std::forward< Fn >( fn) ),
arg_( std::forward< Arg >( arg) ... ) {
c_ = boost::context::callcc(
std::allocator_arg, palloc, salloc,
std::bind( & worker_context::run_, this, std::placeholders::_1) );
}
};
template< typename StackAlloc, typename Fn, typename ... Arg >
static intrusive_ptr< context > make_worker_context( launch policy,
StackAlloc salloc,
Fn && fn, Arg ... arg) {
typedef worker_context< Fn, Arg ... > context_t;
auto sctx = salloc.allocate();
// reserve space for control structure
void * storage = reinterpret_cast< void * >(
( reinterpret_cast< uintptr_t >( sctx.sp) - static_cast< uintptr_t >( sizeof( context_t) ) )
& ~ static_cast< uintptr_t >( 0xff) );
void * stack_bottom = reinterpret_cast< void * >(
reinterpret_cast< uintptr_t >( sctx.sp) - static_cast< uintptr_t >( sctx.size) );
const std::size_t size = reinterpret_cast< uintptr_t >( storage) - reinterpret_cast< uintptr_t >( stack_bottom);
// placement new of context on top of fiber's stack
return intrusive_ptr< context >{
new ( storage) context_t{
policy,
boost::context::preallocated{ storage, size, sctx },
salloc,
std::forward< Fn >( fn),
std::forward< Arg >( arg) ... } };
}
namespace detail {
inline
wait_functor::hook_ptr wait_functor::to_hook_ptr( wait_functor::value_type & value) {
return & value.wait_hook_;
}
inline
wait_functor::const_hook_ptr wait_functor::to_hook_ptr( wait_functor::value_type const& value) {
return & value.wait_hook_;
}
inline
wait_functor::pointer wait_functor::to_value_ptr( wait_functor::hook_ptr n) {
return intrusive::get_parent_from_member< context >( n, & context::wait_hook_);
}
inline
wait_functor::const_pointer wait_functor::to_value_ptr( wait_functor::const_hook_ptr n) {
return intrusive::get_parent_from_member< context >( n, & context::wait_hook_);
}
}}}
#ifdef _MSC_VER
# pragma warning(pop)
#endif
#ifdef BOOST_HAS_ABI_HEADERS
# include BOOST_ABI_SUFFIX
#endif
#endif // BOOST_FIBERS_CONTEXT_H
|
/*!
******************************************************************************
*
* \file
*
* \brief Header file for loop kernel internals: LoopData structure and
* related helper functions.
*
******************************************************************************
*/
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~//
// Copyright (c) 2016-20, Lawrence Livermore National Security, LLC
// and RAJA project contributors. See the RAJA/COPYRIGHT file for details.
//
// SPDX-License-Identifier: (BSD-3-Clause)
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~//
#ifndef RAJA_pattern_kernel_internal_LoopData_HPP
#define RAJA_pattern_kernel_internal_LoopData_HPP
#include "RAJA/config.hpp"
#include "RAJA/index/IndexSet.hpp"
#include "RAJA/util/macros.hpp"
#include "RAJA/util/types.hpp"
#include "camp/camp.hpp"
#include "RAJA/pattern/detail/privatizer.hpp"
#include "RAJA/pattern/kernel/internal/StatementList.hpp"
#include <iterator>
#include <type_traits>
namespace RAJA
{
namespace internal
{
// Universal base of all For wrappers for type traits
struct ForList {
};
struct ForBase {
};
struct CollapseBase {
};
template <camp::idx_t ArgumentId, typename Policy>
struct ForTraitBase : public ForBase {
constexpr static camp::idx_t index_val = ArgumentId;
using index = camp::num<ArgumentId>;
using index_type = camp::nil; // default to invalid type
using policy_type = Policy;
using type = ForTraitBase; // make camp::value compatible
};
template <typename Iterator>
struct iterable_difftype_getter {
using type = typename std::iterator_traits<
typename Iterator::iterator>::difference_type;
};
template <typename Segments>
using difftype_list_from_segments =
typename camp::transform<iterable_difftype_getter, Segments>::type;
template <typename Segments>
using difftype_tuple_from_segments =
typename camp::apply_l<camp::lambda<camp::tuple>,
difftype_list_from_segments<Segments>>::type;
template <typename Iterator>
struct iterable_value_type_getter {
using type =
typename std::iterator_traits<typename Iterator::iterator>::value_type;
};
template <typename Segments>
using value_type_list_from_segments =
typename camp::transform<iterable_value_type_getter, Segments>::type;
template <typename Segments>
using index_tuple_from_segments =
typename camp::apply_l<camp::lambda<camp::tuple>,
value_type_list_from_segments<Segments>>::type;
template <typename SegmentTuple,
typename ParamTuple,
typename... Bodies>
struct LoopData {
using Self = LoopData<SegmentTuple, ParamTuple, Bodies...>;
using offset_tuple_t =
difftype_tuple_from_segments<typename SegmentTuple::TList>;
using index_tuple_t = index_tuple_from_segments<typename SegmentTuple::TList>;
using segment_tuple_t = SegmentTuple;
SegmentTuple segment_tuple;
using param_tuple_t = ParamTuple;
ParamTuple param_tuple;
using BodiesTuple = camp::tuple<Bodies...>;
const BodiesTuple bodies;
offset_tuple_t offset_tuple;
RAJA_INLINE RAJA_HOST_DEVICE constexpr
LoopData(SegmentTuple const &s, ParamTuple const &p, Bodies const &... b)
: segment_tuple(s), param_tuple(p), bodies(b...)
{
//assign_begin_all();
}
constexpr LoopData(LoopData const &) = default;
constexpr LoopData(LoopData &&) = default;
template <camp::idx_t Idx, typename IndexT>
RAJA_HOST_DEVICE RAJA_INLINE void assign_offset(IndexT const &i)
{
camp::get<Idx>(offset_tuple) = i;
}
template <typename ParamId, typename IndexT>
RAJA_HOST_DEVICE RAJA_INLINE void assign_param(IndexT const &i)
{
using param_t = camp::at_v<typename param_tuple_t::TList, ParamId::param_idx>;
camp::get<ParamId::param_idx>(param_tuple) = param_t(i);
}
template <typename ParamId>
RAJA_HOST_DEVICE RAJA_INLINE
auto get_param() ->
camp::at_v<typename param_tuple_t::TList, ParamId::param_idx>
{
return camp::get<ParamId::param_idx>(param_tuple);
}
};
template <camp::idx_t ArgumentId, typename Data>
using segment_diff_type =
typename std::iterator_traits<
typename camp::at_v<typename Data::segment_tuple_t::TList,
ArgumentId>::iterator>::difference_type;
template <camp::idx_t ArgumentId, typename Data>
RAJA_INLINE RAJA_HOST_DEVICE auto segment_length(Data const &data) ->
segment_diff_type<ArgumentId, Data>
{
return camp::get<ArgumentId>(data.segment_tuple).end() -
camp::get<ArgumentId>(data.segment_tuple).begin();
}
template <typename Data, typename Types, typename... EnclosedStmts>
struct GenericWrapper : GenericWrapperBase {
using data_t = camp::decay<Data>;
data_t &data;
RAJA_INLINE
constexpr explicit GenericWrapper(data_t &d) : data{d} {}
RAJA_INLINE
void exec() { execute_statement_list<camp::list<EnclosedStmts...>, Types>(data); }
};
/*!
* Convenience object used to create thread-private a LoopData object.
*/
template <typename T>
struct NestedPrivatizer {
using data_t = typename T::data_t;
using value_type = camp::decay<T>;
using reference_type = value_type &;
data_t privatized_data;
value_type privatized_wrapper;
RAJA_INLINE
constexpr NestedPrivatizer(const T &o)
: privatized_data{o.data}, privatized_wrapper(privatized_data)
{
}
RAJA_INLINE
reference_type get_priv() { return privatized_wrapper; }
};
} // end namespace internal
} // end namespace RAJA
#endif /* RAJA_pattern_kernel_internal_LoopData_HPP */
|
// stdafx.cpp : source file that includes just the standard includes
// t2Test.pch will be the pre-compiled header
// stdafx.obj will contain the pre-compiled type information
//#include "pch.h"
#include "stdafx.h"
// TODO: reference any additional headers you need in STDAFX.H
// and not in this file
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2018, Intel Corporation
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
// the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Author(s): Filip Strugar (filip.strugar@intel.com)
//
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
#include "vaDebugCanvas.h"
#include "Rendering/vaRenderDeviceContext.h"
//#include "Rendering/DirectX/vaDirectXIncludes.h"
//#include "Rendering/DirectX/vaRenderBuffersDX11.h"
//#include "Rendering/DirectX/vaRenderDeviceContextDX11.h"
using namespace Vanilla;
vaDebugCanvas2D::vaDebugCanvas2D( const vaRenderingModuleParams & params )
: m_vertexBuffer( params, m_vertexBufferSize, nullptr, true ),
m_pixelShader( params ),
m_vertexShader( params )
{
std::vector<vaVertexInputElementDesc> inputElements;
inputElements.push_back( { "SV_Position", 0, vaResourceFormat::R32G32B32A32_FLOAT, 0, vaVertexInputElementDesc::AppendAlignedElement, vaVertexInputElementDesc::InputClassification::PerVertexData, 0 } );
inputElements.push_back( { "COLOR", 0, vaResourceFormat::B8G8R8A8_UNORM, 0, vaVertexInputElementDesc::AppendAlignedElement, vaVertexInputElementDesc::InputClassification::PerVertexData, 0 } );
inputElements.push_back( { "TEXCOORD", 0, vaResourceFormat::R32G32B32A32_FLOAT, 0, vaVertexInputElementDesc::AppendAlignedElement, vaVertexInputElementDesc::InputClassification::PerVertexData, 0 } );
inputElements.push_back( { "TEXCOORD", 1, vaResourceFormat::R32G32_FLOAT, 0, vaVertexInputElementDesc::AppendAlignedElement, vaVertexInputElementDesc::InputClassification::PerVertexData, 0 } );
m_vertexShader->CreateShaderAndILFromFile( L"vaCanvas.hlsl", "vs_5_0", "VS_Canvas2D", inputElements, vaShaderMacroContaner(), false );
m_pixelShader->CreateShaderFromFile( L"vaCanvas.hlsl", "ps_5_0", "PS_Canvas2D", vaShaderMacroContaner(), false );
m_vertexBufferCurrentlyUsed = 0;
//m_vertexBufferSize = 0;
m_vertexBufferCurrentlyUsed = 0;
}
vaDebugCanvas2D::~vaDebugCanvas2D( )
{
}
#define vaDirectXCanvas2D_FORMAT_WSTR() \
va_list args; \
va_start(args, text); \
int nBuf; \
wchar_t szBuffer[2048]; \
nBuf = _vsnwprintf_s(szBuffer, _countof(szBuffer), _countof(szBuffer)-1, text, args); \
assert(nBuf < sizeof(szBuffer)); \
va_end(args); \
//
#define vaDirectXCanvas2D_FORMAT_STR() \
va_list args; \
va_start(args, text); \
int nBuf; \
char szBuffer[2048]; \
nBuf = _vsnprintf_s(szBuffer, _countof(szBuffer), _countof(szBuffer)-1, text, args); \
assert(nBuf < sizeof(szBuffer)); \
va_end(args); \
void vaDebugCanvas2D::DrawString( int x, int y, const wchar_t * text, ... )
{
vaDirectXCanvas2D_FORMAT_WSTR( );
m_drawStringLines.push_back( DrawStringItem( x, y, 0xFF000000, 0x00000000, szBuffer ) );
}
//
void vaDebugCanvas2D::DrawString( int x, int y, unsigned int penColor, const wchar_t * text, ... )
{
vaDirectXCanvas2D_FORMAT_WSTR( );
m_drawStringLines.push_back( DrawStringItem( x, y, penColor, 0x00000000, szBuffer ) );
}
//
void vaDebugCanvas2D::DrawString( int x, int y, unsigned int penColor, unsigned int shadowColor, const wchar_t * text, ... )
{
vaDirectXCanvas2D_FORMAT_WSTR( );
m_drawStringLines.push_back( DrawStringItem( x, y, penColor, shadowColor, szBuffer ) );
}
//
void vaDebugCanvas2D::DrawString( int x, int y, const char * text, ... )
{
vaDirectXCanvas2D_FORMAT_STR( );
m_drawStringLines.push_back( DrawStringItem( x, y, 0xFF000000, 0x00000000, vaStringTools::SimpleWiden( std::string( szBuffer ) ).c_str( ) ) );
}
//
void vaDebugCanvas2D::DrawString( int x, int y, unsigned int penColor, const char * text, ... )
{
vaDirectXCanvas2D_FORMAT_STR( );
m_drawStringLines.push_back( DrawStringItem( x, y, penColor, 0x00000000, vaStringTools::SimpleWiden( std::string( szBuffer ) ).c_str( ) ) );
}
//
void vaDebugCanvas2D::DrawString( int x, int y, unsigned int penColor, unsigned int shadowColor, const char * text, ... )
{
vaDirectXCanvas2D_FORMAT_STR( );
m_drawStringLines.push_back( DrawStringItem( x, y, penColor, shadowColor, vaStringTools::SimpleWiden( std::string( szBuffer ) ).c_str( ) ) );
}
//
void vaDebugCanvas2D::DrawLine( float x0, float y0, float x1, float y1, unsigned int penColor )
{
m_drawLines.push_back( DrawLineItem( x0, y0, x1, y1, penColor ) );
}
//
void vaDebugCanvas2D::DrawRectangle( float x0, float y0, float width, float height, unsigned int penColor )
{
DrawLine( x0 - 0.5f, y0, x0 + width, y0, penColor );
DrawLine( x0 + width, y0, x0 + width, y0 + height, penColor );
DrawLine( x0 + width, y0 + height, x0, y0 + height, penColor );
DrawLine( x0, y0 + height, x0, y0, penColor );
}
//
void vaDebugCanvas2D::FillRectangle( float x0, float y0, float width, float height, unsigned int brushColor )
{
m_drawRectangles.push_back( DrawRectangleItem( x0, y0, width, height, brushColor ) );
}
//
void vaDebugCanvas2D::DrawCircle( float x, float y, float radius, unsigned int penColor, float tess )
{
tess = vaMath::Clamp( tess, 0.0f, 1.0f );
float circumference = 2 * VA_PIf * radius;
int steps = (int)( circumference / 4.0f * tess );
steps = vaMath::Clamp( steps, 5, 32768 );
float cxp = x + cos( 0 * 2 * VA_PIf ) * radius;
float cyp = y + sin( 0 * 2 * VA_PIf ) * radius;
for( int i = 1; i <= steps; i++ )
{
float p = i / (float)steps;
float cx = x + cos( p * 2 * VA_PIf ) * radius;
float cy = y + sin( p * 2 * VA_PIf ) * radius;
DrawLine( cxp, cyp, cx, cy, penColor );
cxp = cx;
cyp = cy;
}
}
void vaDebugCanvas2D::CleanQueued( )
{
m_drawRectangles.clear( );
m_drawLines.clear( );
m_drawStringLines.clear( );
}
void vaDebugCanvas2D::Render( vaRenderDeviceContext & renderContext, int canvasWidth, int canvasHeight, bool bJustClearData )
{
//ID3D11DeviceContext * context = renderContext.SafeCast<vaRenderDeviceContextDX11*>( )->GetDXContext();
// Fill shapes first
if( !bJustClearData )
{
uint32 rectsDrawn = 0;
while( rectsDrawn < m_drawRectangles.size( ) )
{
if( ( m_vertexBufferCurrentlyUsed + 6 ) >= m_vertexBufferSize )
{
m_vertexBufferCurrentlyUsed = 0;
}
vaResourceMapType mapType = ( m_vertexBufferCurrentlyUsed == 0 ) ? ( vaResourceMapType::WriteDiscard ) : ( vaResourceMapType::WriteNoOverwrite );
if( m_vertexBuffer.Map( renderContext, mapType ) )
{
CanvasVertex2D * vertices = m_vertexBuffer.GetMappedData( );
int drawFromVertex = m_vertexBufferCurrentlyUsed;
while( ( rectsDrawn < m_drawRectangles.size( ) ) && ( ( m_vertexBufferCurrentlyUsed + 6 ) < m_vertexBufferSize ) )
{
const int index = m_vertexBufferCurrentlyUsed;
const DrawRectangleItem & rect = m_drawRectangles[rectsDrawn];
vertices[index + 0] = CanvasVertex2D( canvasWidth, canvasHeight, vaVector2( rect.x, rect.y ), rect.color );
vertices[index + 1] = CanvasVertex2D( canvasWidth, canvasHeight, vaVector2( rect.x + rect.width, rect.y ), rect.color );
vertices[index + 2] = CanvasVertex2D( canvasWidth, canvasHeight, vaVector2( rect.x, rect.y + rect.height ), rect.color );
vertices[index + 3] = CanvasVertex2D( canvasWidth, canvasHeight, vaVector2( rect.x, rect.y + rect.height ), rect.color );
vertices[index + 4] = CanvasVertex2D( canvasWidth, canvasHeight, vaVector2( rect.x + rect.width, rect.y ), rect.color );
vertices[index + 5] = CanvasVertex2D( canvasWidth, canvasHeight, vaVector2( rect.x + rect.width, rect.y + rect.height ), rect.color );
m_vertexBufferCurrentlyUsed += 6;
rectsDrawn++;
}
int drawVertexCount = m_vertexBufferCurrentlyUsed - drawFromVertex;
m_vertexBuffer.Unmap( renderContext );
vaGraphicsItem renderItem;
renderItem.CullMode = vaFaceCull::None;
renderItem.BlendMode = vaBlendMode::AlphaBlend;
renderItem.VertexShader = m_vertexShader;
renderItem.VertexBuffer = m_vertexBuffer.GetBuffer();
renderItem.Topology = vaPrimitiveTopology::TriangleList;
renderItem.PixelShader = m_pixelShader;
renderItem.SetDrawSimple( drawVertexCount, drawFromVertex );
renderContext.ExecuteSingleItem( renderItem, nullptr );
}
else
{
assert( false );
}
}
}
// Lines
if( !bJustClearData )
{
uint32 linesDrawn = 0;
while( linesDrawn < m_drawLines.size( ) )
{
if( ( m_vertexBufferCurrentlyUsed + 2 ) >= m_vertexBufferSize )
{
m_vertexBufferCurrentlyUsed = 0;
}
vaResourceMapType mapType = ( m_vertexBufferCurrentlyUsed == 0 ) ? ( vaResourceMapType::WriteDiscard ) : ( vaResourceMapType::WriteNoOverwrite );
if( m_vertexBuffer.Map( renderContext, mapType ) )
{
CanvasVertex2D * vertices = m_vertexBuffer.GetMappedData( );
int drawFromVertex = m_vertexBufferCurrentlyUsed;
while( ( linesDrawn < m_drawLines.size( ) ) && ( ( m_vertexBufferCurrentlyUsed + 2 ) < m_vertexBufferSize ) )
{
const int index = m_vertexBufferCurrentlyUsed;
vertices[index + 0] = CanvasVertex2D( canvasWidth, canvasHeight, vaVector2( m_drawLines[linesDrawn].x0, m_drawLines[linesDrawn].y0 ), m_drawLines[linesDrawn].penColor );
vertices[index + 1] = CanvasVertex2D( canvasWidth, canvasHeight, vaVector2( m_drawLines[linesDrawn].x1, m_drawLines[linesDrawn].y1 ), m_drawLines[linesDrawn].penColor );
m_vertexBufferCurrentlyUsed += 2;
linesDrawn++;
}
int drawVertexCount = m_vertexBufferCurrentlyUsed - drawFromVertex;
m_vertexBuffer.Unmap( renderContext );
vaGraphicsItem renderItem;
renderItem.CullMode = vaFaceCull::None;
renderItem.BlendMode = vaBlendMode::AlphaBlend;
renderItem.VertexShader = m_vertexShader;
renderItem.VertexBuffer = m_vertexBuffer.GetBuffer();
renderItem.Topology = vaPrimitiveTopology::LineList;
renderItem.PixelShader = m_pixelShader;
renderItem.SetDrawSimple( drawVertexCount, drawFromVertex );
renderContext.ExecuteSingleItem( renderItem, nullptr );
}
else
{
assert( false );
}
}
}
// Text
if( !bJustClearData && m_drawStringLines.size( ) > 0 )
{
assert( false ); // not implemented for DX12 so remove it for now :(
// m_font.Begin( );
// m_font.SetInsertionPos( 5, 5 );
// m_font.SetForegroundColor( 0xFFFFFFFF );
//
// for( size_t i = 0; i < m_drawStringLines.size( ); i++ )
// {
// if( ( m_drawStringLines[i].shadowColor & 0xFF000000 ) == 0 ) continue;
//
// m_font.SetInsertionPos( m_drawStringLines[i].x + 1, m_drawStringLines[i].y + 1 );
// m_font.SetForegroundColor( m_drawStringLines[i].shadowColor );
// m_font.DrawTextLine( m_drawStringLines[i].text.c_str( ) );
// }
//
// for( size_t i = 0; i < m_drawStringLines.size( ); i++ )
// {
// m_font.SetInsertionPos( m_drawStringLines[i].x, m_drawStringLines[i].y );
// m_font.SetForegroundColor( m_drawStringLines[i].penColor );
// m_font.DrawTextLine( m_drawStringLines[i].text.c_str( ) );
// }
//
// m_font.End( );
}
CleanQueued( );
}
/*
//struct Circle2D
//{
// float fX, fY, fRadiusFrom, fRadiusTo;
// u_int uColour;
// Circle2D( ) {}
// Circle2D( float fX, float fY, float fRadiusFrom, float fRadiusTo, u_int uColour ) : fX(fX), fY(fY), fRadiusFrom(fRadiusFrom), fRadiusTo(fRadiusTo), uColour(uColour) {}
//};
//struct PolyLinePoint2D
//{
// float fX, fY;
// float fThickness;
// u_int uColour;
// PolyLinePoint2D( ) {}
// PolyLinePoint2D( float fX, float fY, float fThickness, u_int uColour ) : fX(fX), fY(fY), fThickness(fThickness), uColour(uColour) { }
//};
//static const u_int g_nItemBufferSize = 4096;
//static Collection_Vector<Direct3D_2DRenderer::Circle2D> g_xCircles;
//static Collection_Vector<Direct3D_2DRenderer::Line2D> g_xLines;
//static Direct3D_2DRenderer::SlightlyLessSimpleVertex g_xDrawVertices[g_nItemBufferSize*6];
*/
/*
void Direct3D_2DRenderer::Flush( )
{
IDirect3DSurface9* pSurf = NULL;
Direct3D::D3DDevice->GetRenderTarget( 0, &pSurf );
D3DSURFACE_DESC xDesc;
pSurf->GetDesc( &xDesc );
SAFE_RELEASE( pSurf );
Direct3D::D3DDevice->SetRenderState( D3DRS_SRGBWRITEENABLE, FALSE );
Render::CurrentStates.RequestZBufferEnabled(false);
Render::CurrentStates.RequestZBufferWriteEnabled(false);
Render::CurrentStates.RequestCullMode( _CULLMODE_NONE );
Render::CurrentStates.RequestWireFrameMode( false );
Render::CurrentStates.RequestTranslucencyMode( _TRANSLUCENCY_NORMAL );
Render::CurrentStates.RequestZBufferEnabled(false);
g_pxEffect->SetParameterByName( "g_xScreenSize", D3DXVECTOR4( (float)xDesc.Width, (float)xDesc.Height, 1.0f / (float)xDesc.Width, 1.0f / (float)xDesc.Height ) );
Direct3D::D3DDevice->SetVertexDeclaration( g_pxVertDecl );
if( g_xLines.GetSize() > 0 )
{
u_int uVertexCount = g_xLines.GetSize() * 2;
for( u_int i = 0; i < g_xLines.GetSize(); i++ )
{
Line2D& xLine = g_xLines[i];
SlightlyLessSimpleVertex* pVerts = &g_xDrawVertices[i*2];
pVerts[0] = SlightlyLessSimpleVertex( Vector_2( xLine.fXFrom, xLine.fYFrom ), Vector_2( 0.0f, 0.0f ), Vector_2( 0.0f, 0.0f ), xLine.uColour );
pVerts[1] = SlightlyLessSimpleVertex( Vector_2( xLine.fXTo, xLine.fYTo ), Vector_2( 0.0f, 0.0f ), Vector_2( 0.0f, 0.0f ), xLine.uColour );
}
g_pxEffect->Begin( false, 2 );
g_pxEffect->RenderAllPrimitivePassesUp( D3DPT_LINELIST, uVertexCount/2, g_xDrawVertices, sizeof( SlightlyLessSimpleVertex ) );
g_pxEffect->End( );
}
if( g_xCircles.GetSize() > 0 )
{
u_int uVertexCount = g_xCircles.GetSize() * 6;
for( u_int i = 0; i < g_xCircles.GetSize(); i++ )
{
Circle2D& xCircle = g_xCircles[i];
SlightlyLessSimpleVertex* pVerts = &g_xDrawVertices[i*6];
pVerts[0] = SlightlyLessSimpleVertex( Vector_2( xCircle.fX - xCircle.fRadiusTo - 1.0f, xCircle.fY - xCircle.fRadiusTo - 1.0f ), Vector_2( xCircle.fX, xCircle.fY ), Vector_2( xCircle.fRadiusFrom, xCircle.fRadiusTo ), xCircle.uColour );
pVerts[1] = SlightlyLessSimpleVertex( Vector_2( xCircle.fX + xCircle.fRadiusTo + 1.0f, xCircle.fY - xCircle.fRadiusTo - 1.0f ), Vector_2( xCircle.fX, xCircle.fY ), Vector_2( xCircle.fRadiusFrom, xCircle.fRadiusTo ), xCircle.uColour );
pVerts[2] = SlightlyLessSimpleVertex( Vector_2( xCircle.fX - xCircle.fRadiusTo - 1.0f, xCircle.fY + xCircle.fRadiusTo + 1.0f ), Vector_2( xCircle.fX, xCircle.fY ), Vector_2( xCircle.fRadiusFrom, xCircle.fRadiusTo ), xCircle.uColour );
pVerts[3] = SlightlyLessSimpleVertex( Vector_2( xCircle.fX - xCircle.fRadiusTo - 1.0f, xCircle.fY + xCircle.fRadiusTo + 1.0f ), Vector_2( xCircle.fX, xCircle.fY ), Vector_2( xCircle.fRadiusFrom, xCircle.fRadiusTo ), xCircle.uColour );
pVerts[4] = SlightlyLessSimpleVertex( Vector_2( xCircle.fX + xCircle.fRadiusTo + 1.0f, xCircle.fY - xCircle.fRadiusTo - 1.0f ), Vector_2( xCircle.fX, xCircle.fY ), Vector_2( xCircle.fRadiusFrom, xCircle.fRadiusTo ), xCircle.uColour );
pVerts[5] = SlightlyLessSimpleVertex( Vector_2( xCircle.fX + xCircle.fRadiusTo + 1.0f, xCircle.fY + xCircle.fRadiusTo + 1.0f ), Vector_2( xCircle.fX, xCircle.fY ), Vector_2( xCircle.fRadiusFrom, xCircle.fRadiusTo ), xCircle.uColour );
}
g_pxEffect->Begin( false, 0 );
g_pxEffect->RenderAllPrimitivePassesUp( D3DPT_TRIANGLELIST, uVertexCount/3, g_xDrawVertices, sizeof( SlightlyLessSimpleVertex ) );
g_pxEffect->End( );
}
g_xLines.Clear();
g_xCircles.Clear();
const bool bSRGB = Direct3D::UseGammaCorrection();
Direct3D::D3DDevice->SetRenderState( D3DRS_SRGBWRITEENABLE, bSRGB );
}
void Direct3D_2DRenderer::DrawPolyline( PolyLinePoint2D* axPoints, int iPointCount )
{
const int iMaxPolylinePointCount = g_nItemBufferSize-2;
Assert( iPointCount < iMaxPolylinePointCount, "Direct3D_2DRenderer::DrawPolyline does not support as many points (will be clamped)" );
iPointCount = Maths::Min( iPointCount, iMaxPolylinePointCount );
IDirect3DSurface9* pSurf = NULL;
Direct3D::D3DDevice->GetRenderTarget( 0, &pSurf );
D3DSURFACE_DESC xDesc;
pSurf->GetDesc( &xDesc );
SAFE_RELEASE( pSurf );
Direct3D::D3DDevice->SetRenderState( D3DRS_SRGBWRITEENABLE, FALSE );
Render::CurrentStates.RequestZBufferEnabled(false);
Render::CurrentStates.RequestZBufferWriteEnabled(false);
Render::CurrentStates.RequestCullMode( _CULLMODE_NONE );
Render::CurrentStates.RequestWireFrameMode( false );
Render::CurrentStates.RequestTranslucencyMode( _TRANSLUCENCY_NORMAL );
Render::CurrentStates.RequestZBufferEnabled(false);
g_pxEffect->SetParameterByName( "g_xScreenSize", D3DXVECTOR4( (float)xDesc.Width, (float)xDesc.Height, 1.0f / (float)xDesc.Width, 1.0f / (float)xDesc.Height ) );
Direct3D::D3DDevice->SetVertexDeclaration( g_pxVertDecl );
u_int uVertexCount = (iPointCount-1) * 6;
Vector_2 xDirPrev;
Vector_2 xDirCurr;
for( int i = -1; i < (iPointCount-1); i++ )
{
Vector_2 xDirNext;
const PolyLinePoint2D& xPtNext = axPoints[i+1];
if( i < (iPointCount-2) )
{
const PolyLinePoint2D& xPtNextNext = axPoints[i+2];
xDirNext = Vector_2( xPtNextNext.fX, xPtNextNext.fY ) - Vector_2( xPtNext.fX, xPtNext.fY );
xDirNext.Normalise();
}
if( i >= 0 )
{
const PolyLinePoint2D& xPtCurrent = axPoints[i];
float fThicknessIn = xPtCurrent.fThickness;
float fThicknessOut = xPtNext.fThickness;
float fDotIn = xDirPrev * xDirCurr;
float fDotOut = xDirCurr * xDirNext;
float fInAngle = Maths::ArcCosine( Maths::ClampToRange( fDotIn, -0.9999f, 1.0f ) );
float fOutAngle = Maths::ArcCosine( Maths::ClampToRange( fDotOut, -0.9999f, 1.0f ) );
float fInDist = Maths::Tangent( fInAngle*0.5f );
float fOutDist = Maths::Tangent( fOutAngle*0.5f );
Vector_2 xDirCurrLeft = Vector_2( +xDirCurr.y, -xDirCurr.x );
Vector_2 xFrom( xPtCurrent.fX, xPtCurrent.fY );
Vector_2 xTo( xPtNext.fX, xPtNext.fY );
float fThicknessInMod = fThicknessIn * 0.5f + 1.0f;
float fThicknessOutMod = fThicknessOut * 0.5f + 1.0f;
fInDist *= Maths::Sign( xDirCurrLeft * xDirPrev );
fOutDist *= Maths::Sign( xDirCurrLeft * xDirNext );
Vector_2 xCFromLeft = xFrom - xDirCurr * (fThicknessInMod * fInDist);
Vector_2 xCFromRight = xFrom + xDirCurr * (fThicknessInMod * fInDist);
Vector_2 xCToLeft = xTo - xDirCurr * (fThicknessOutMod * fOutDist);
Vector_2 xCToRight = xTo + xDirCurr * (fThicknessOutMod * fOutDist);
Vector_2 xFromLeft = xCFromLeft + xDirCurrLeft * fThicknessInMod;
Vector_2 xFromRight = xCFromRight - xDirCurrLeft * fThicknessInMod;
Vector_2 xToLeft = xCToLeft + xDirCurrLeft * fThicknessOutMod;
Vector_2 xToRight = xCToRight - xDirCurrLeft * fThicknessOutMod;
SlightlyLessSimpleVertex* pVerts = &g_xDrawVertices[i*6];
pVerts[0] = SlightlyLessSimpleVertex( xFromLeft, xCFromLeft, Vector_2( fThicknessIn * 0.5f, 0.0f ), xPtCurrent.uColour );
pVerts[1] = SlightlyLessSimpleVertex( xToLeft, xCToLeft, Vector_2( fThicknessOut * 0.5f, 0.0f ), xPtNext.uColour );
pVerts[2] = SlightlyLessSimpleVertex( xFromRight, xCFromRight, Vector_2( fThicknessIn * 0.5f, 0.0f ), xPtCurrent.uColour );
pVerts[3] = SlightlyLessSimpleVertex( xFromRight, xCFromRight, Vector_2( fThicknessIn * 0.5f, 0.0f ), xPtCurrent.uColour );
pVerts[4] = SlightlyLessSimpleVertex( xToLeft, xCToLeft, Vector_2( fThicknessOut * 0.5f, 0.0f ), xPtNext.uColour );
pVerts[5] = SlightlyLessSimpleVertex( xToRight, xCToRight, Vector_2( fThicknessOut * 0.5f, 0.0f ), xPtNext.uColour );
}
else
{
xDirCurr = xDirNext;
}
xDirPrev = xDirCurr;
xDirCurr = xDirNext;
}
g_pxEffect->Begin( false, 3 );
g_pxEffect->RenderAllPrimitivePassesUp( D3DPT_TRIANGLELIST, uVertexCount/3, g_xDrawVertices, sizeof( SlightlyLessSimpleVertex ) );
g_pxEffect->End( );
const bool bSRGB = Direct3D::UseGammaCorrection();
Direct3D::D3DDevice->SetRenderState( D3DRS_SRGBWRITEENABLE, bSRGB );
}
#endif
*/
/*
float4 g_xScreenSize;
void VShader( inout float4 xColour : COLOR, inout float4 xPos : Position, inout float4 xUV : TEXCOORD0, out float2 xOrigScreenPos : TEXCOORD1 )
{
xOrigScreenPos = xPos.xy;
xPos.xy *= g_xScreenSize.zw;
xPos.xy *= float2( 2.0, -2.0 );
xPos.xy += float2( -1.0, 1.0 );
}
void PShader_Circle( inout float4 xColour : COLOR, in float4 xUV : TEXCOORD0, in float2 xOrigScreenPos : TEXCOORD1 )
{
float2 xDelta = xOrigScreenPos.xy - xUV.xy;
float fDistSq = dot( xDelta, xDelta );
float fRadius1 = xUV.z;
float fRadius2 = xUV.w;
if( !((fDistSq >= fRadius1*fRadius1) && (fDistSq < fRadius2*fRadius2)) )
discard;
}
void PShader_Rectangle( inout float4 xColour : COLOR, in float4 xUV : TEXCOORD0, in float2 xOrigScreenPos : TEXCOORD1 )
{
}
void PShader_Line( inout float4 xColour : COLOR, in float4 xUV : TEXCOORD0, in float2 xOrigScreenPos : TEXCOORD1 )
{
}
void PShader_LineAA( inout float4 xColour : COLOR, in float4 xUV : TEXCOORD0, in float2 xOrigScreenPos : TEXCOORD1 )
{
float2 xDist = xOrigScreenPos - xUV.xy;
xColour.a *= saturate( xUV.z - length( xDist ) + 0.5 );
}
technique Circle
{
pass p0
{
VertexShader = compile vs_3_0 VShader();
PixelShader = compile ps_3_0 PShader_Circle();
}
}
technique Rectangle
{
pass p0
{
VertexShader = compile vs_3_0 VShader();
PixelShader = compile ps_3_0 PShader_Rectangle();
}
}
technique Line
{
pass p0
{
VertexShader = compile vs_3_0 VShader();
PixelShader = compile ps_3_0 PShader_Line();
}
}
technique LineAA
{
pass p0
{
VertexShader = compile vs_3_0 VShader();
PixelShader = compile ps_3_0 PShader_LineAA();
}
}
*/
#include "Rendering/vaStandardShapes.h"
using namespace Vanilla;
vaDebugCanvas3D::vaDebugCanvas3D( const vaRenderingModuleParams & params )
: m_triVertexBuffer( params, m_triVertexBufferSizeInVerts, nullptr, true ),
m_lineVertexBuffer( params, m_lineVertexBufferSizeInVerts, nullptr, true ),
m_vertexShader( params ),
m_pixelShader( params )
{
std::vector<vaVertexInputElementDesc> inputElements;
inputElements.push_back( { "SV_Position", 0, vaResourceFormat::R32G32B32A32_FLOAT, 0, vaVertexInputElementDesc::AppendAlignedElement, vaVertexInputElementDesc::InputClassification::PerVertexData, 0 } );
inputElements.push_back( { "COLOR", 0, vaResourceFormat::B8G8R8A8_UNORM, 0, vaVertexInputElementDesc::AppendAlignedElement, vaVertexInputElementDesc::InputClassification::PerVertexData, 0 } );
m_vertexShader->CreateShaderAndILFromFile( L"vaCanvas.hlsl", "vs_5_0", "VS_Canvas3D", inputElements, vaShaderMacroContaner{}, false );
m_pixelShader->CreateShaderFromFile( L"vaCanvas.hlsl", "ps_5_0", "PS_Canvas3D", vaShaderMacroContaner{}, false );
m_triVertexBufferCurrentlyUsed = 0;
m_triVertexBufferStart = 0;
m_lineVertexBufferCurrentlyUsed = 0;
m_lineVertexBufferStart = 0;
vaStandardShapes::CreateSphere( m_sphereVertices, m_sphereIndices, 2, true );
m_triVertexBufferCurrentlyUsed = 0;
m_triVertexBufferStart = 0;
m_lineVertexBufferCurrentlyUsed = 0;
m_lineVertexBufferStart = 0;
}
vaDebugCanvas3D::~vaDebugCanvas3D( )
{
//m_triVertexBuffer.Destroy( );
m_triVertexBufferCurrentlyUsed = 0;
//m_triVertexBufferSizeInVerts = 0;
m_triVertexBufferStart = 0;
//m_lineVertexBuffer.Destroy( );
m_lineVertexBufferCurrentlyUsed = 0;
//m_lineVertexBufferSizeInVerts = 0;
m_lineVertexBufferStart = 0;
}
void vaDebugCanvas3D::CleanQueued( )
{
m_drawItems.clear( );
m_drawItemsTransforms.clear( );
m_drawLines.clear( );
m_drawLinesTransformed.clear( );
m_drawTrianglesTransformed.clear( );
}
void vaDebugCanvas3D::RenderLine( vaRenderDeviceContext & renderContext, const vaCameraBase & camera, const CanvasVertex3D & a, const CanvasVertex3D & b )
{
if( ( m_lineVertexBufferCurrentlyUsed + 2 ) >= m_lineVertexBufferSizeInVerts )
{
FlushLines( renderContext, camera );
m_lineVertexBufferCurrentlyUsed = 0;
m_lineVertexBufferStart = 0;
}
vaResourceMapType mapType = ( m_lineVertexBufferCurrentlyUsed == 0 ) ? ( vaResourceMapType::WriteDiscard ) : ( vaResourceMapType::WriteNoOverwrite );
if( m_lineVertexBuffer.Map( renderContext, mapType ) )
{
CanvasVertex3D * vertices = m_lineVertexBuffer.GetMappedData( );
vertices[m_lineVertexBufferCurrentlyUsed++] = a;
vertices[m_lineVertexBufferCurrentlyUsed++] = b;
m_lineVertexBuffer.Unmap( renderContext );
}
}
void vaDebugCanvas3D::RenderLineBatch( vaRenderDeviceContext & renderContext, const vaCameraBase & camera, DrawLineTransformed * itemFrom, size_t count )
{
if( ( m_lineVertexBufferCurrentlyUsed + count * 2 ) >= m_lineVertexBufferSizeInVerts )
{
FlushLines( renderContext, camera );
m_lineVertexBufferCurrentlyUsed = 0;
m_lineVertexBufferStart = 0;
}
vaResourceMapType mapType = ( m_lineVertexBufferCurrentlyUsed == 0 ) ? ( vaResourceMapType::WriteDiscard ) : ( vaResourceMapType::WriteNoOverwrite );
if( m_lineVertexBuffer.Map( renderContext, mapType ) )
{
CanvasVertex3D * vertices = m_lineVertexBuffer.GetMappedData( );
for( size_t i = 0; i < count; i++ )
{
DrawLineTransformed & line = itemFrom[i];
vertices[m_lineVertexBufferCurrentlyUsed++] = line.v0;
vertices[m_lineVertexBufferCurrentlyUsed++] = line.v1;
}
m_lineVertexBuffer.Unmap( renderContext );
}
}
void vaDebugCanvas3D::FlushLines( vaRenderDeviceContext & renderContext, const vaCameraBase & camera )
{
int verticesToRender = m_lineVertexBufferCurrentlyUsed - m_lineVertexBufferStart;
if( verticesToRender > 0 )
{
vaGraphicsItem renderItem;
renderItem.DepthEnable = true;
renderItem.DepthWriteEnable = false;
renderItem.DepthFunc = ( camera.GetUseReversedZ() )?( vaComparisonFunc::GreaterEqual ):( vaComparisonFunc::LessEqual );
renderItem.CullMode = vaFaceCull::None;
renderItem.BlendMode = vaBlendMode::AlphaBlend;
renderItem.VertexShader = m_vertexShader;
renderItem.VertexBuffer = m_lineVertexBuffer.GetBuffer();
renderItem.Topology = vaPrimitiveTopology::LineList;
renderItem.PixelShader = m_pixelShader;
renderItem.SetDrawSimple( verticesToRender, m_lineVertexBufferStart );
renderContext.ExecuteSingleItem( renderItem, nullptr );
}
m_lineVertexBufferStart = m_lineVertexBufferCurrentlyUsed;
}
void vaDebugCanvas3D::RenderTriangle( vaRenderDeviceContext & renderContext, const vaCameraBase & camera, const CanvasVertex3D & a, const CanvasVertex3D & b, const CanvasVertex3D & c )
{
if( ( m_triVertexBufferCurrentlyUsed + 3 ) >= m_triVertexBufferSizeInVerts )
{
FlushTriangles( renderContext, camera );
m_triVertexBufferCurrentlyUsed = 0;
m_triVertexBufferStart = 0;
}
vaResourceMapType mapType = ( m_triVertexBufferCurrentlyUsed == 0 ) ? ( vaResourceMapType::WriteDiscard ) : ( vaResourceMapType::WriteNoOverwrite );
if( m_triVertexBuffer.Map( renderContext, mapType ) )
{
CanvasVertex3D * vertices = m_triVertexBuffer.GetMappedData( );
vertices[m_triVertexBufferCurrentlyUsed++] = a;
vertices[m_triVertexBufferCurrentlyUsed++] = b;
vertices[m_triVertexBufferCurrentlyUsed++] = c;
m_triVertexBuffer.Unmap( renderContext );
}
}
void vaDebugCanvas3D::RenderTrianglesBatch( vaRenderDeviceContext & renderContext, const vaCameraBase & camera, DrawTriangleTransformed * itemFrom, size_t count )
{
if( ( m_triVertexBufferCurrentlyUsed + count * 3 ) >= m_triVertexBufferSizeInVerts )
{
FlushTriangles( renderContext, camera );
m_triVertexBufferCurrentlyUsed = 0;
m_triVertexBufferStart = 0;
}
vaResourceMapType mapType = ( m_triVertexBufferCurrentlyUsed == 0 ) ? ( vaResourceMapType::WriteDiscard ) : ( vaResourceMapType::WriteNoOverwrite );
if( m_triVertexBuffer.Map( renderContext, mapType ) )
{
CanvasVertex3D * vertices = m_triVertexBuffer.GetMappedData( );
for( size_t i = 0; i < count; i++ )
{
DrawTriangleTransformed & triangle = itemFrom[i];
vertices[m_triVertexBufferCurrentlyUsed++] = triangle.v0;
vertices[m_triVertexBufferCurrentlyUsed++] = triangle.v1;
vertices[m_triVertexBufferCurrentlyUsed++] = triangle.v2;
}
m_triVertexBuffer.Unmap( renderContext );
}
}
void vaDebugCanvas3D::FlushTriangles( vaRenderDeviceContext & renderContext, const vaCameraBase & camera )
{
int verticesToRender = m_triVertexBufferCurrentlyUsed - m_triVertexBufferStart;
if( verticesToRender > 0 )
{
vaGraphicsItem renderItem;
renderItem.DepthEnable = true;
renderItem.DepthWriteEnable = false;
renderItem.DepthFunc = ( camera.GetUseReversedZ() )?( vaComparisonFunc::GreaterEqual ):( vaComparisonFunc::LessEqual );
renderItem.CullMode = vaFaceCull::None;
renderItem.BlendMode = vaBlendMode::AlphaBlend;
renderItem.VertexShader = m_vertexShader;
renderItem.VertexBuffer = m_triVertexBuffer.GetBuffer();
renderItem.Topology = vaPrimitiveTopology::TriangleList;
renderItem.PixelShader = m_pixelShader;
renderItem.SetDrawSimple( verticesToRender, m_triVertexBufferStart );
renderContext.ExecuteSingleItem( renderItem, nullptr );
}
m_triVertexBufferStart = m_triVertexBufferCurrentlyUsed;
}
void vaDebugCanvas3D::Render( vaRenderDeviceContext & renderContext, const vaCameraBase & camera, bool bJustClearData )
{
vaMatrix4x4 viewProj = camera.GetViewMatrix( ) * camera.GetProjMatrix( );
if( !bJustClearData )
{
vaMatrix4x4 tempMat;
// first do triangles
for( size_t i = 0; i < m_drawItems.size( ); i++ )
{
DrawItem & item = m_drawItems[i];
// use viewProj by default
const vaMatrix4x4 * trans = &viewProj;
// or if the object has its own transform matrix, 'add' it to the viewProj
if( item.transformIndex != -1 )
{
//assert( false ); // this is broken; lines are different from triangles; add InternalDrawLine that accepts already transformed lines...
vaMatrix4x4 &local = m_drawItemsTransforms[item.transformIndex];
tempMat = local * viewProj;
trans = &tempMat;
}
if( item.type == Triangle )
{
CanvasVertex3D a0( item.v0, item.brushColor, trans );
CanvasVertex3D a1( item.v1, item.brushColor, trans );
CanvasVertex3D a2( item.v2, item.brushColor, trans );
if( ( item.brushColor & 0xFF000000 ) != 0 )
{
InternalDrawTriangle( a0, a1, a2 );
}
if( ( item.penColor & 0xFF000000 ) != 0 )
{
a0.color = item.penColor;
a1.color = item.penColor;
a2.color = item.penColor;
InternalDrawLine( a0, a1 );
InternalDrawLine( a1, a2 );
InternalDrawLine( a2, a0 );
}
}
if( item.type == Box )
{
const vaVector3 & boxMin = item.v0;
const vaVector3 & boxMax = item.v1;
vaVector3 va0( boxMin.x, boxMin.y, boxMin.z );
vaVector3 va1( boxMax.x, boxMin.y, boxMin.z );
vaVector3 va2( boxMax.x, boxMax.y, boxMin.z );
vaVector3 va3( boxMin.x, boxMax.y, boxMin.z );
vaVector3 vb0( boxMin.x, boxMin.y, boxMax.z );
vaVector3 vb1( boxMax.x, boxMin.y, boxMax.z );
vaVector3 vb2( boxMax.x, boxMax.y, boxMax.z );
vaVector3 vb3( boxMin.x, boxMax.y, boxMax.z );
CanvasVertex3D a0( va0, item.brushColor, trans );
CanvasVertex3D a1( va1, item.brushColor, trans );
CanvasVertex3D a2( va2, item.brushColor, trans );
CanvasVertex3D a3( va3, item.brushColor, trans );
CanvasVertex3D b0( vb0, item.brushColor, trans );
CanvasVertex3D b1( vb1, item.brushColor, trans );
CanvasVertex3D b2( vb2, item.brushColor, trans );
CanvasVertex3D b3( vb3, item.brushColor, trans );
if( ( item.brushColor & 0xFF000000 ) != 0 )
{
InternalDrawTriangle( a0, a2, a1 );
InternalDrawTriangle( a2, a0, a3 );
InternalDrawTriangle( b0, b1, b2 );
InternalDrawTriangle( b2, b3, b0 );
InternalDrawTriangle( a0, a1, b1 );
InternalDrawTriangle( b1, b0, a0 );
InternalDrawTriangle( a1, a2, b2 );
InternalDrawTriangle( b1, a1, b2 );
InternalDrawTriangle( a2, a3, b3 );
InternalDrawTriangle( b3, b2, a2 );
InternalDrawTriangle( a3, a0, b0 );
InternalDrawTriangle( b0, b3, a3 );
}
if( ( item.penColor & 0xFF000000 ) != 0 )
{
a0.color = item.penColor;
a1.color = item.penColor;
a2.color = item.penColor;
a3.color = item.penColor;
b0.color = item.penColor;
b1.color = item.penColor;
b2.color = item.penColor;
b3.color = item.penColor;
InternalDrawLine( a0, a1 );
InternalDrawLine( a1, a2 );
InternalDrawLine( a2, a3 );
InternalDrawLine( a3, a0 );
InternalDrawLine( a0, b0 );
InternalDrawLine( a1, b1 );
InternalDrawLine( a2, b2 );
InternalDrawLine( a3, b3 );
InternalDrawLine( b0, b1 );
InternalDrawLine( b1, b2 );
InternalDrawLine( b2, b3 );
InternalDrawLine( b3, b0 );
}
}
if( item.type == Sphere )
{
if( ( item.brushColor & 0xFF000000 ) != 0 )
{
for( size_t j = 0; j < m_sphereIndices.size( ); j += 3 )
{
vaVector3 sCenter = item.v0;
float sRadius = item.v1.x;
CanvasVertex3D a0( m_sphereVertices[m_sphereIndices[j + 0]] * sRadius + sCenter, item.brushColor, trans );
CanvasVertex3D a1( m_sphereVertices[m_sphereIndices[j + 1]] * sRadius + sCenter, item.brushColor, trans );
CanvasVertex3D a2( m_sphereVertices[m_sphereIndices[j + 2]] * sRadius + sCenter, item.brushColor, trans );
InternalDrawTriangle( a0, a1, a2 );
}
}
if( ( item.penColor & 0xFF000000 ) != 0 )
{
for( size_t j = 0; j < m_sphereIndices.size( ); j += 3 )
{
vaVector3 sCenter = item.v0;
float sRadius = item.v1.x;
CanvasVertex3D a0( m_sphereVertices[m_sphereIndices[j + 0]] * sRadius + sCenter, item.penColor, trans );
CanvasVertex3D a1( m_sphereVertices[m_sphereIndices[j + 1]] * sRadius + sCenter, item.penColor, trans );
CanvasVertex3D a2( m_sphereVertices[m_sphereIndices[j + 2]] * sRadius + sCenter, item.penColor, trans );
InternalDrawLine( a0, a1 );
InternalDrawLine( a1, a2 );
InternalDrawLine( a2, a0 );
}
}
}
}
size_t batchSize = 512;
for( size_t i = 0; i < m_drawTrianglesTransformed.size( ); i += batchSize )
RenderTrianglesBatch( renderContext, camera, m_drawTrianglesTransformed.data( ) + i, vaMath::Min( batchSize, m_drawTrianglesTransformed.size( ) - i ) );
FlushTriangles( renderContext, camera );
// then add the lines (non-transformed to transformed)
for( size_t i = 0; i < m_drawLines.size( ); i++ )
m_drawLinesTransformed.push_back( DrawLineTransformed( CanvasVertex3D(m_drawLines[i].v0, m_drawLines[i].penColor0, &viewProj), CanvasVertex3D(m_drawLines[i].v1, m_drawLines[i].penColor1, &viewProj) ) );
for( size_t i = 0; i < m_drawLinesTransformed.size( ); i += batchSize )
RenderLineBatch( renderContext, camera, m_drawLinesTransformed.data( ) + i, vaMath::Min( batchSize, m_drawLinesTransformed.size( ) - i ) );
FlushLines( renderContext, camera );
}
CleanQueued( );
}
|
/**
* WAP to compute the factors of a given integer
*
* Written by: Sudipto Ghosh for University of Delhi
* Date: 16 - 08 - 2019
*/
#include <iostream>
using namespace std;
int main()
{
int num, factor;
// prompt user and accept num
cout << "Enter an integer: ";
cin >> num;
cout << "Factors: ";
// iterating over the interval [1, num],
// integral values in the range that divide
// num completely are printed to stdout
for (int i = 1; i <= num; i++)
if (num % i == 0)
cout << i << " ";
cout << endl;
return 0;
}
|
/*******************************************************************\
Module: Pointer Logic
Author: Daniel Kroening, kroening@kroening.com
\*******************************************************************/
#include <cassert>
#include <util/arith_tools.h>
#include <util/std_expr.h>
#include <util/prefix.h>
#include <util/pointer_offset_size.h>
#include "pointer_logic.h"
/*******************************************************************\
Function: pointer_logict::is_dynamic_object
Inputs:
Outputs:
Purpose:
\*******************************************************************/
bool pointer_logict::is_dynamic_object(const exprt &expr) const
{
if(expr.type().get_bool("#dynamic")) return true;
if(expr.id()==ID_symbol)
if(has_prefix(id2string(to_symbol_expr(expr).get_identifier()),
"symex_dynamic::"))
return true;
return false;
}
/*******************************************************************\
Function: pointer_logict::get_dynamic_objects
Inputs:
Outputs:
Purpose:
\*******************************************************************/
void pointer_logict::get_dynamic_objects(std::vector<std::size_t> &o) const
{
o.clear();
std::size_t nr=0;
for(pointer_logict::objectst::const_iterator
it=objects.begin();
it!=objects.end();
it++, nr++)
if(is_dynamic_object(*it))
o.push_back(nr);
}
/*******************************************************************\
Function: pointer_logict::add_object
Inputs:
Outputs:
Purpose:
\*******************************************************************/
std::size_t pointer_logict::add_object(const exprt &expr)
{
// remove any index/member
if(expr.id()==ID_index)
{
assert(expr.operands().size()==2);
return add_object(expr.op0());
}
else if(expr.id()==ID_member)
{
assert(expr.operands().size()==1);
return add_object(expr.op0());
}
return objects.number(expr);
}
/*******************************************************************\
Function: pointer_logict::pointer_expr
Inputs:
Outputs:
Purpose:
\*******************************************************************/
exprt pointer_logict::pointer_expr(
std::size_t object,
const typet &type) const
{
pointert pointer(object, 0);
return pointer_expr(pointer, type);
}
/*******************************************************************\
Function: pointer_logict::pointer_expr
Inputs:
Outputs:
Purpose:
\*******************************************************************/
exprt pointer_logict::pointer_expr(
const pointert &pointer,
const typet &type) const
{
if(pointer.object==null_object) // NULL?
{
if(pointer.offset==0)
{
constant_exprt result(type);
result.set_value(ID_NULL);
return result;
}
else
{
constant_exprt null(type);
null.set_value(ID_NULL);
return plus_exprt(null,
from_integer(pointer.offset, integer_typet()));
}
}
else if(pointer.object==invalid_object) // INVALID?
{
constant_exprt result(type);
result.set_value("INVALID");
return result;
}
if(pointer.object>=objects.size())
{
constant_exprt result(type);
result.set_value("INVALID-"+std::to_string(pointer.object));
return result;
}
const exprt &object_expr=objects[pointer.object];
exprt deep_object=object_rec(pointer.offset, type, object_expr);
exprt result;
if(type.id()==ID_pointer)
result=exprt(ID_address_of, type);
else if(type.id()==ID_reference)
result=exprt("reference_to", type);
else
assert(0);
result.copy_to_operands(deep_object);
return result;
}
/*******************************************************************\
Function: pointer_logict::object_rec
Inputs:
Outputs:
Purpose:
\*******************************************************************/
exprt pointer_logict::object_rec(
const mp_integer &offset,
const typet &pointer_type,
const exprt &src) const
{
if(src.type().id()==ID_array)
{
mp_integer size=
pointer_offset_size(src.type().subtype(), ns);
if(size==0) return src;
mp_integer index=offset/size;
mp_integer rest=offset%size;
if(rest<0) rest=-rest;
index_exprt tmp(src.type().subtype());
tmp.index()=from_integer(index, typet(ID_integer));
tmp.array()=src;
return object_rec(rest, pointer_type, tmp);
}
else if(src.type().id()==ID_struct)
{
const struct_typet::componentst &components=
to_struct_type(src.type()).components();
if(offset<0) return src;
mp_integer current_offset=0;
for(struct_typet::componentst::const_iterator
it=components.begin();
it!=components.end();
it++)
{
assert(offset>=current_offset);
const typet &subtype=it->type();
mp_integer sub_size=pointer_offset_size(subtype, ns);
mp_integer new_offset=current_offset+sub_size;
if(new_offset>offset)
{
// found it
member_exprt tmp(subtype);
tmp.set_component_name(it->get_name());
tmp.op0()=src;
return object_rec(
offset-current_offset, pointer_type, tmp);
}
assert(new_offset<=offset);
current_offset=new_offset;
assert(current_offset<=offset);
}
return src;
}
else if(src.type().id()==ID_union)
return src;
return src;
}
/*******************************************************************\
Function: pointer_logict::pointer_logict
Inputs:
Outputs:
Purpose:
\*******************************************************************/
pointer_logict::pointer_logict(const namespacet &_ns):ns(_ns)
{
// add NULL
null_object=objects.number(exprt(ID_NULL));
assert(null_object==0);
// add INVALID
invalid_object=objects.number(exprt("INVALID"));
}
/*******************************************************************\
Function: pointer_logict::~pointer_logict
Inputs:
Outputs:
Purpose:
\*******************************************************************/
pointer_logict::~pointer_logict()
{
}
|
#ifndef _BLASR_TITLE_TABLE_HPP_
#define _BLASR_TITLE_TABLE_HPP_
#include <cstring>
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <vector>
#include <pbdata/utils.hpp>
class TitleTable
{
public:
char **table;
int tableLength;
TitleTable();
~TitleTable();
void Copy(char **src, int nSrc);
void Write(std::string &name);
void Write(std::ofstream &out);
void Read(std::string &inFileName);
void CopyFromVector(std::vector<std::string> &titles);
void Read(std::ifstream &in);
void Free();
bool Lookup(std::string title, int &index);
static void ResetTableToIntegers(char **table, int *tableLengths, int nTable);
};
#endif
|
/*
* Copyright (c) 2011-2014, University of Delaware
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <unistd.h>
#include "MSchedPolicy.h"
#include "MicroScheduler.h"
#include "TPScheduler.h"
#include "TPSchedPolicy.h"
#include "Codelet.h"
#ifdef TRACE
#include "getClock.h"
#endif
namespace darts
{
void
MicroStandard::policy(void)
{
useconds_t usecs = 1,
range = 1;
#ifdef TRACE
addRecord(getTime(), (void*) &MicroStandard::policy);
#endif
while (alive())
{
Codelet * tempCodelet = popCodelet();
while (tempCodelet)
{
usecs = range; // reset sleep time
ThreadedProcedure * checkTP = tempCodelet->getTP();
//Does our codelet have a TP (not final codelet)
//If yes then does that TP have a parent (means not a serial loop)
//If yes then delete the TP
//Else do not delete the TP
bool deleteTP = (checkTP) ? checkTP->checkParent() : false;
#ifdef TRACE
addRecord(getTime(), tempCodelet->returnFunct());
#endif
#ifdef COUNT
if(getAffinity()) getAffinity()->startCounters(getID());
#endif
tempCodelet->fire();
#ifdef COUNT
if(getAffinity()) getAffinity()->incrementCounters(getID());
#endif
#ifdef TRACE
addRecord(getTime(), (void*) &MicroStandard::policy);
#endif
if (deleteTP)
{
if (checkTP->decRef())
delete checkTP;
}
tempCodelet = popCodelet();
}
if (!tempCodelet) {
usleep(usecs);
usecs += range;
}
}
}
void
MicroStatic::policy(void)
{
useconds_t usecs = 1,
range = 1;
#ifdef TRACE
addRecord(getTime(), (void*) &MicroStatic::policy);
#endif
while (alive())
{
Codelet * tempCodelet = popCodelet();
while (tempCodelet)
{
usecs = range; // reset sleep time
ThreadedProcedure * checkTP = tempCodelet->getTP();
//Does our codelet have a TP (not final codelet)
//If yes then does that TP have a parent (means not a serial loop)
//If yes then delete the TP
//Else do not delete the TP
bool deleteTP = (checkTP) ? checkTP->checkParent() : false;
#ifdef TRACE
addRecord(getTime(), tempCodelet->returnFunct());
#endif
#ifdef COUNT
if(getAffinity()) getAffinity()->startCounters(getID());
#endif
tempCodelet->fire();
#ifdef COUNT
if(getAffinity()) getAffinity()->incrementCounters(getID());
#endif
#ifdef TRACE
addRecord(getTime(), (void*) &MicroStatic::policy);
#endif
if (deleteTP)
{
if (checkTP->decRef())
delete checkTP;
}
tempCodelet = popCodelet();
}
if (!tempCodelet) {
usleep(usecs);
usecs += range;
}
}
}
void
MicroDynamic::policy()
{
useconds_t usecs = 1,
range = 1;
#ifdef TRACE
addRecord(getTime(), (void*) &MicroDynamic::policy);
#endif
while (alive())
{
TPScheduler * myTPSched = getParentScheduler();
Codelet * tempCodelet = myTPSched->popCodelet();
while (tempCodelet)
{
usecs = range; // reset sleep time
ThreadedProcedure * checkTP = tempCodelet->getTP();
//Does our codelet have a TP (not final codelet)
//If yes then does that TP have a parent (means not a serial loop)
//If yes then delete the TP
//Else do not delete the TP
bool deleteTP = (checkTP) ? checkTP->checkParent() : false;
#ifdef TRACE
addRecord(getTime(), tempCodelet->returnFunct());
#endif
#ifdef COUNT
if(getAffinity()) getAffinity()->startCounters(getID());
#endif
tempCodelet->fire();
#ifdef COUNT
if(getAffinity()) getAffinity()->incrementCounters(getID());
#endif
#ifdef TRACE
addRecord(getTime(), (void*) &MicroDynamic::policy);
#endif
if (deleteTP)
{
if (checkTP->decRef())
delete checkTP;
}
tempCodelet = myTPSched->popCodelet();
}
if (!tempCodelet) {
usleep(usecs);
usecs += range;
}
}
}
void
MicroSteal::policy()
{
useconds_t usecs = 1,
range = 1;
#ifdef TRACE
addRecord(getTime(), (void*) &MicroSteal::policy);
#endif
while (alive())
{
Codelet * tempCodelet = popCodelet();
if(!tempCodelet)
tempCodelet = stealCodelet();
if(tempCodelet)
{
usecs = range; // reset sleep time
ThreadedProcedure * checkTP = tempCodelet->getTP();
bool deleteTP = (checkTP) ? checkTP->checkParent() : false;
#ifdef TRACE
addRecord(getTime(), tempCodelet->returnFunct());
#endif
#ifdef COUNT
if(getAffinity()) getAffinity()->startCounters(getID());
#endif
tempCodelet->fire();
#ifdef COUNT
if(getAffinity()) getAffinity()->incrementCounters(getID());
#endif
#ifdef TRACE
addRecord(getTime(), (void*) &MicroSteal::policy);
#endif
if (deleteTP)
{
if (checkTP->decRef())
delete checkTP;
}
} else {
usleep(usecs);
usecs += range;
}
}
}
MScheduler *
MScheduler::create(unsigned int type)
{
if (type==MCSTANDARD) return new MicroStandard;
if (type==MCSTATIC) return new MicroStatic;
if (type==MCDYNAMIC) return new MicroDynamic;
if (type==MCSTEAL) return new MicroSteal;
else return NULL;
}
}
|
#define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do this in one cpp file
#include "catch2.hpp"
#include <iostream>
#include "LinkedList.h"
TEST_CASE("Create List, push&pop") {
LinkedList testList = LinkedList();
testList.push_back(5);
testList.push_back(6);
testList.pop_back();
testList.pop_front();
REQUIRE(testList.empty() == 1);
testList.push_back(2);
testList.push_back(3);
testList.push_front(1);
char rightList[3] = {1, 2, 3};
char pos = 0;
for (LinkedList::iterator i = testList.begin(); i != testList.end(); i++){
REQUIRE((*i) == rightList[pos]);
pos++;
}
}
TEST_CASE("Clean up") {
LinkedList testList = LinkedList();
testList.push_front(5);
testList.push_front(6);
testList.push_front(6);
testList.push_front(7);
testList.push_front(8);
testList.remove(6);
REQUIRE(testList.size() == 3); // {8 7 5}
LinkedList::iterator testIterator = testList.begin();
testIterator++;
testIterator = testList.erase(testIterator);
char rightList[2] = {8, 5};
char pos = 0;
for (LinkedList::iterator i = testList.begin(); i != testList.end(); i++){
REQUIRE((*i) == rightList[pos]);
pos++;
}
testList.clear();
REQUIRE(testList.empty());
}
TEST_CASE("Insert"){
LinkedList testList = LinkedList();
testList.push_front(1);
testList.push_back(2);
LinkedList::iterator i = testList.begin();
testList.insert(i,0);
char rightList[3] = {0, 1, 2};
char pos = 0;
for (LinkedList::iterator i = testList.begin(); i != testList.end(); i++){
REQUIRE((*i) == rightList[pos]);
pos++;
}
}
TEST_CASE (" = & == "){
LinkedList firstList = LinkedList();
LinkedList secondList = LinkedList();
firstList.push_front(1);
firstList.push_back(2);
firstList.push_back(3);
secondList.push_front(1);
secondList.push_back(2);
secondList.push_back(3);
REQUIRE(firstList == secondList);
firstList.push_front(0);
REQUIRE(firstList != secondList);
secondList = firstList;
REQUIRE(firstList == secondList);
}
|
#pragma once
#include "common.hpp"
#include "float3.hpp"
namespace hlml {
struct float3x3 {
float3 c0, c1, c2;
HLML_INLINEF float3x3()
: c0()
, c1()
, c2() {}
HLML_INLINEF explicit float3x3( const f32* p )
: c0( p )
, c1( p + 3 )
, c2( p + 6 ) {}
HLML_INLINEF float3x3( f32 x0, f32 y0, f32 z0, f32 x1, f32 y1, f32 z1, f32 x2, f32 y2, f32 z2 )
: c0( x0, y0, z0 )
, c1( x1, y1, z1 )
, c2( x2, y2, z2 ) {}
HLML_INLINEF float3x3( float3 col0, float3 col1, float3 col2 )
: c0( col0 )
, c1( col1 )
, c2( col2 ) {}
HLML_INLINEF static float3x3 identity() {
static float3x3 i( { 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f } );
return i;
}
HLML_INLINEF float3x3& operator=( float3x3 rhs ) {
c0 = rhs.c0;
c1 = rhs.c1;
c2 = rhs.c2;
return *this;
}
HLML_INLINEF float3x3& operator=( f32 s ) {
c0 = c1 = c2 = float3( s );
return *this;
}
};
HLML_INLINEF float3x3 transpose( float3x3 m ) {
float3 t0 = m.c0, t1 = m.c1, t2 = m.c2, vT( funcs::azbzawbw( t0.m, t1.m ) );
t0.m = funcs::axbxayby( t0.m, t1.m );
m.c0.m = funcs::axaybxbw( t0.m, t2.m );
m.c1.m = funcs::azawbybw( t0.m, t2.m );
m.c2.m = funcs::axaybzbw( vT.m, t2.m );
return m;
}
HLML_INLINEF float3x3 inverse( float3x3 m ) {
float3 t0 = m.c0, t1 = m.c1, t2 = m.c2;
float3 dets = rcp( dotv( t0, cross( t1, t2 ) ) );
dets.m = funcs::axaxazaz( dets.m );
m.c0 = cross( t1, t2 ) * dets;
m.c1 = cross( t2, t0 ) * dets;
m.c2 = cross( t0, t1 ) * dets;
return transpose( m );
}
HLML_INLINEF b8 operator==( float3x3 lhs, float3x3 rhs ) { return all( lhs.c0 == rhs.c0 ) && all( lhs.c1 == rhs.c1 ) && all( lhs.c2 == rhs.c2 ); }
HLML_INLINEF b8 operator!=( float3x3 lhs, float3x3 rhs ) { return !( lhs == rhs ); }
HLML_INLINEF float3x3 operator+( float3x3 m ) { return m; }
HLML_INLINEF float3x3 operator+( float3x3 a, float3x3 b ) {
a.c0 += b.c0;
a.c1 += b.c1;
a.c2 += b.c2;
return a;
}
HLML_INLINEF float3x3 operator+( float3x3 a, f32 s ) {
a.c0 += s;
a.c1 += s;
a.c2 += s;
return a;
}
HLML_INLINEF float3x3 operator+( f32 s, float3x3 a ) { return a + s; }
HLML_INLINEF float3x3& operator+=( float3x3& a, float3x3 b ) {
a = a + b;
return a;
}
HLML_INLINEF float3x3& operator+=( float3x3& a, f32 s ) {
a = a + s;
return a;
}
HLML_INLINEF float3x3 operator-( float3x3 m ) {
m.c0 = -m.c0;
m.c1 = -m.c1;
m.c2 = -m.c2;
return m;
}
HLML_INLINEF float3x3 operator-( float3x3 a, float3x3 b ) {
a.c0 -= b.c0;
a.c1 -= b.c1;
a.c2 -= b.c2;
return a;
}
HLML_INLINEF float3x3 operator-( float3x3 a, f32 s ) {
float3 tmp( s );
return a - float3x3( tmp, tmp, tmp );
}
HLML_INLINEF float3x3 operator-( f32 s, float3x3 a ) {
float3 tmp( s );
return float3x3( tmp, tmp, tmp ) - a;
}
HLML_INLINEF float3x3& operator-=( float3x3& a, float3x3 b ) {
a = a - b;
return a;
}
HLML_INLINEF float3x3& operator-=( float3x3& a, f32 s ) {
a = a - s;
return a;
}
HLML_INLINEF float3x3 operator*( float3x3 a, float3x3 b ) {
float3 lc0 = a.c0, lc1 = a.c1, lc2 = a.c2, rc0 = b.c0, rc1 = b.c1, rc2 = b.c2;
a.c0 = lc0 * rc0.xxx() + lc1 * rc0.yyy() + lc2 * rc0.zzz();
a.c1 = lc0 * rc1.xxx() + lc1 * rc1.yyy() + lc2 * rc1.zzz();
a.c2 = lc0 * rc2.xxx() + lc1 * rc2.yyy() + lc2 * rc2.zzz();
return a;
}
HLML_INLINEF float3x3 operator*( float3x3 a, f32 s ) {
a.c0 *= s;
a.c1 *= s;
a.c2 *= s;
return a;
}
HLML_INLINEF float3x3 operator*( f32 s, float3x3 a ) { return a * s; }
HLML_INLINEF float3 operator*( float3x3 a, float3 v ) { return v.xxx() * a.c0 + v.yyy() * a.c1 + v.zzz() * a.c2; }
HLML_INLINEF float3 operator*( float3 v, float3x3 a ) {
float3 xxx( dotv( v, a.c0 ) ), yyy( dotv( v, a.c1 ) ), zzz( dotv( v, a.c2 ) );
float3 xyxy( funcs::axbxayby( xxx.m, yyy.m ) ), xyz0( funcs::bzbwazaw( zzz.m, xyxy.m ) );
return xyz0;
}
HLML_INLINEF float3x3& operator*=( float3x3& a, float3x3 b ) {
a = a * b;
return a;
}
HLML_INLINEF float3x3& operator*=( float3x3& a, f32 s ) {
a = a * s;
return a;
}
} // namespace hlml
|
/**
* @file load_save_test.cpp
* @author Ryan Curtin
*
* Tests for data::Load() and data::Save().
*
* mlpack is free software; you may redistribute it and/or modify it under the
* terms of the 3-clause BSD license. You should have received a copy of the
* 3-clause BSD license along with mlpack. If not, see
* http://www.opensource.org/licenses/BSD-3-Clause for more information.
*/
#include <sstream>
#include <mlpack/core.hpp>
#include <mlpack/core/data/load_arff.hpp>
#include <mlpack/core/data/map_policies/missing_policy.hpp>
#include <boost/test/unit_test.hpp>
#include "test_tools.hpp"
using namespace mlpack;
using namespace mlpack::data;
using namespace std;
BOOST_AUTO_TEST_SUITE(LoadSaveTest);
/**
* Make sure failure occurs when no extension given.
*/
BOOST_AUTO_TEST_CASE(NoExtensionLoad)
{
arma::mat out;
BOOST_REQUIRE(data::Load("noextension", out) == false);
}
/**
* Make sure failure occurs when no extension given.
*/
BOOST_AUTO_TEST_CASE(NoExtensionSave)
{
arma::mat out;
BOOST_REQUIRE(data::Save("noextension", out) == false);
}
/**
* Make sure load fails if the file does not exist.
*/
BOOST_AUTO_TEST_CASE(NotExistLoad)
{
arma::mat out;
BOOST_REQUIRE(data::Load("nonexistentfile_______________.csv", out) == false);
}
/**
* Make sure a CSV is loaded correctly.
*/
BOOST_AUTO_TEST_CASE(LoadCSVTest)
{
fstream f;
f.open("test_file.csv", fstream::out);
f << "1, 2, 3, 4" << endl;
f << "5, 6, 7, 8" << endl;
f.close();
arma::mat test;
BOOST_REQUIRE(data::Load("test_file.csv", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; i++)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
// Remove the file.
remove("test_file.csv");
}
/**
* Make sure a TSV is loaded correctly.
*/
BOOST_AUTO_TEST_CASE(LoadTSVTest)
{
fstream f;
f.open("test_file.csv", fstream::out);
f << "1\t2\t3\t4" << endl;
f << "5\t6\t7\t8" << endl;
f.close();
arma::mat test;
BOOST_REQUIRE(data::Load("test_file.csv", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; i++)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
// Remove the file.
remove("test_file.csv");
}
/**
* Test TSV loading with .tsv extension.
*/
BOOST_AUTO_TEST_CASE(LoadTSVExtensionTest)
{
fstream f;
f.open("test_file.tsv", fstream::out);
f << "1\t2\t3\t4" << endl;
f << "5\t6\t7\t8" << endl;
f.close();
arma::mat test;
BOOST_REQUIRE(data::Load("test_file.tsv", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; i++)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
// Remove the file.
remove("test_file.tsv");
}
/**
* Make sure a CSV is saved correctly.
*/
BOOST_AUTO_TEST_CASE(SaveCSVTest)
{
arma::mat test = "1 5;"
"2 6;"
"3 7;"
"4 8;";
BOOST_REQUIRE(data::Save("test_file.csv", test) == true);
// Load it in and make sure it is the same.
arma::mat test2;
BOOST_REQUIRE(data::Load("test_file.csv", test2) == true);
BOOST_REQUIRE_EQUAL(test2.n_rows, 4);
BOOST_REQUIRE_EQUAL(test2.n_cols, 2);
for (size_t i = 0; i < 8; i++)
BOOST_REQUIRE_CLOSE(test2[i], (double) (i + 1), 1e-5);
// Remove the file.
remove("test_file.csv");
}
/**
* Make sure CSVs can be loaded in transposed form.
*/
BOOST_AUTO_TEST_CASE(LoadTransposedCSVTest)
{
fstream f;
f.open("test_file.csv", fstream::out);
f << "1, 2, 3, 4" << endl;
f << "5, 6, 7, 8" << endl;
f.close();
arma::mat test;
BOOST_REQUIRE(data::Load("test_file.csv", test, false, true) == true);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
for (size_t i = 0; i < 8; ++i)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
// Remove the file.
remove("test_file.csv");
}
/**
* Make sure ColVec can be loaded.
*/
BOOST_AUTO_TEST_CASE(LoadColVecCSVTest)
{
fstream f;
f.open("test_file.csv", fstream::out);
for (size_t i = 0; i < 8; ++i)
f << i << endl;
f.close();
arma::colvec test;
BOOST_REQUIRE(data::Load("test_file.csv", test, false) == true);
BOOST_REQUIRE_EQUAL(test.n_cols, 1);
BOOST_REQUIRE_EQUAL(test.n_rows, 8);
for (size_t i = 0; i < 8; ++i)
BOOST_REQUIRE_CLOSE(test[i], (double) i, 1e-5);
// Remove the file.
remove("test_file.csv");
}
/**
* Make sure we can load a transposed column vector.
*/
BOOST_AUTO_TEST_CASE(LoadColVecTransposedCSVTest)
{
fstream f;
f.open("test_file.csv", fstream::out);
for (size_t i = 0; i < 8; ++i)
f << i << ", ";
f << "8" << endl;
f.close();
arma::colvec test;
BOOST_REQUIRE(data::Load("test_file.csv", test, false) == true);
BOOST_REQUIRE_EQUAL(test.n_cols, 1);
BOOST_REQUIRE_EQUAL(test.n_rows, 9);
for (size_t i = 0; i < 9; ++i)
BOOST_REQUIRE_CLOSE(test[i], (double) i, 1e-5);
// Remove the file.
remove("test_file.csv");
}
/**
* Make sure besides numeric data "quoted strings" or
* 'quoted strings' in csv files are loaded correctly.
*/
BOOST_AUTO_TEST_CASE(LoadQuotedStringInCSVTest)
{
fstream f;
f.open("test_file.csv", fstream::out);
f << "1,field 2,field 3" << endl;
f << "2,\"field 2, with comma\",field 3" << endl;
f << "3,field 2 with \"embedded quote\",field 3" << endl;
f << "4, field 2 with embedded \\ ,field 3" << endl;
f << "5, ,field 3" << endl;
f.close();
std::vector<std::string> elements;
elements.push_back("field 2");
elements.push_back("\"field 2, with comma\"");
elements.push_back("field 2 with \"embedded quote\"");
elements.push_back("field 2 with embedded \\");
elements.push_back("");
arma::mat test;
data::DatasetInfo info;
BOOST_REQUIRE(data::Load("test_file.csv", test, info, false, true) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 3);
BOOST_REQUIRE_EQUAL(test.n_cols, 5);
BOOST_REQUIRE_EQUAL(info.Dimensionality(), 3);
// Check each element for equality/ closeness.
for (size_t i = 0; i < 5; ++i)
BOOST_REQUIRE_CLOSE(test.at(0, i), (double) (i + 1), 1e-5);
for (size_t i = 0; i < 5; ++i)
BOOST_REQUIRE_EQUAL(info.UnmapString(test.at(1, i), 1, 0), elements[i]);
for (size_t i = 0; i < 5; ++i)
BOOST_REQUIRE_EQUAL(info.UnmapString(test.at(2, i), 2, 0), "field 3");
// Clear the vector to free the space.
elements.clear();
// Remove the file.
remove("test_file.csv");
}
/**
* Make sure besides numeric data "quoted strings" or
* 'quoted strings' in txt files are loaded correctly.
*/
BOOST_AUTO_TEST_CASE(LoadQuotedStringInTXTTest)
{
fstream f;
f.open("test_file.txt", fstream::out);
f << "1 field2 field3" << endl;
f << "2 \"field 2 with space\" field3" << endl;
f.close();
std::vector<std::string> elements;
elements.push_back("field2");
elements.push_back("\"field 2 with space\"");
arma::mat test;
data::DatasetInfo info;
BOOST_REQUIRE(data::Load("test_file.txt", test, info, false, true) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 3);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
BOOST_REQUIRE_EQUAL(info.Dimensionality(), 3);
// Check each element for equality/ closeness.
for (size_t i = 0; i < 2; ++i)
BOOST_REQUIRE_CLOSE(test.at(0, i), (double) (i + 1), 1e-5);
for (size_t i = 0; i < 2; ++i)
BOOST_REQUIRE_EQUAL(info.UnmapString(test.at(1, i), 1, 0), elements[i]);
for (size_t i = 0; i < 2; ++i)
BOOST_REQUIRE_EQUAL(info.UnmapString(test.at(2, i), 2, 0), "field3");
// Clear the vector to free the space.
elements.clear();
// Remove the file.
remove("test_file.txt");
}
/**
* Make sure besides numeric data "quoted strings" or
* 'quoted strings' in tsv files are loaded correctly.
*/
BOOST_AUTO_TEST_CASE(LoadQuotedStringInTSVTest)
{
fstream f;
f.open("test_file.tsv", fstream::out);
f << "1\tfield 2\tfield 3" << endl;
f << "2\t\"field 2\t with tab\"\tfield 3" << endl;
f << "3\tfield 2 with \"embedded quote\"\tfield 3" << endl;
f << "4\t field 2 with embedded \\ \tfield 3" << endl;
f << "5\t \tfield 3" << endl;
f.close();
std::vector<std::string> elements;
elements.push_back("field 2");
elements.push_back("\"field 2\t with tab\"");
elements.push_back("field 2 with \"embedded quote\"");
elements.push_back("field 2 with embedded \\");
elements.push_back("");
arma::mat test;
data::DatasetInfo info;
BOOST_REQUIRE(data::Load("test_file.tsv", test, info, false, true) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 3);
BOOST_REQUIRE_EQUAL(test.n_cols, 5);
BOOST_REQUIRE_EQUAL(info.Dimensionality(), 3);
// Check each element for equality/ closeness.
for (size_t i = 0; i < 5; ++i)
BOOST_REQUIRE_CLOSE(test.at(0, i), (double) (i + 1), 1e-5);
for (size_t i = 0; i < 5; ++i)
BOOST_REQUIRE_EQUAL(info.UnmapString(test.at(1, i), 1, 0), elements[i]);
for (size_t i = 0; i < 5; ++i)
BOOST_REQUIRE_EQUAL(info.UnmapString(test.at(2, i), 2, 0), "field 3");
// Clear the vector to free the space.
elements.clear();
// Remove the file.
remove("test_file.tsv");
}
/**
* Make sure Load() throws an exception when trying to load a matrix into a
* colvec or rowvec.
*/
BOOST_AUTO_TEST_CASE(LoadMatinVec)
{
fstream f;
f.open("test_file.csv", fstream::out);
f << "1, 2" << endl;
f << "3, 4" << endl;
f.close();
/**
* Log::Fatal will be called when the matrix is not of the right size.
*/
Log::Fatal.ignoreInput = true;
arma::vec coltest;
BOOST_REQUIRE_THROW(data::Load("test_file.csv", coltest, true),
std::runtime_error);
arma::rowvec rowtest;
BOOST_REQUIRE_THROW(data::Load("test_file.csv", rowtest, true),
std::runtime_error);
Log::Fatal.ignoreInput = false;
remove("test_file.csv");
}
/**
* Make sure that rowvecs can be loaded successfully.
*/
BOOST_AUTO_TEST_CASE(LoadRowVecCSVTest)
{
fstream f;
f.open("test_file.csv", fstream::out);
for (size_t i = 0; i < 7; ++i)
f << i << ", ";
f << "7";
f << endl;
f.close();
arma::rowvec test;
BOOST_REQUIRE(data::Load("test_file.csv", test, false) == true);
BOOST_REQUIRE_EQUAL(test.n_cols, 8);
BOOST_REQUIRE_EQUAL(test.n_rows, 1);
for (size_t i = 0; i < 8 ; ++i)
BOOST_REQUIRE_CLOSE(test[i], (double) i , 1e-5);
remove("test_file.csv");
}
/**
* Make sure that we can load transposed row vectors.
*/
BOOST_AUTO_TEST_CASE(LoadRowVecTransposedCSVTest)
{
fstream f;
f.open("test_file.csv", fstream::out);
for (size_t i = 0; i < 8; ++i)
f << i << endl;
f.close();
arma::rowvec test;
BOOST_REQUIRE(data::Load("test_file.csv", test, false) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 1);
BOOST_REQUIRE_EQUAL(test.n_cols, 8);
for (size_t i = 0; i < 8; ++i)
BOOST_REQUIRE_CLOSE(test[i], (double) i, 1e-5);
// Remove the file.
remove("test_file.csv");
}
/**
* Make sure TSVs can be loaded in transposed form.
*/
BOOST_AUTO_TEST_CASE(LoadTransposedTSVTest)
{
fstream f;
f.open("test_file.csv", fstream::out);
f << "1\t2\t3\t4" << endl;
f << "5\t6\t7\t8" << endl;
f.close();
arma::mat test;
BOOST_REQUIRE(data::Load("test_file.csv", test, false, true) == true);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
for (size_t i = 0; i < 8; ++i)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
// Remove the file.
remove("test_file.csv");
}
/**
* Check TSV loading with .tsv extension.
*/
BOOST_AUTO_TEST_CASE(LoadTransposedTSVExtensionTest)
{
fstream f;
f.open("test_file.tsv", fstream::out);
f << "1\t2\t3\t4" << endl;
f << "5\t6\t7\t8" << endl;
f.close();
arma::mat test;
BOOST_REQUIRE(data::Load("test_file.tsv", test, false, true) == true);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
for (size_t i = 0; i < 8; ++i)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
// Remove the file.
remove("test_file.tsv");
}
/**
* Make sure CSVs can be loaded in non-transposed form.
*/
BOOST_AUTO_TEST_CASE(LoadNonTransposedCSVTest)
{
fstream f;
f.open("test_file.csv", fstream::out);
f << "1, 3, 5, 7" << endl;
f << "2, 4, 6, 8" << endl;
f.close();
arma::mat test;
BOOST_REQUIRE(data::Load("test_file.csv", test, false, false) == true);
BOOST_REQUIRE_EQUAL(test.n_cols, 4);
BOOST_REQUIRE_EQUAL(test.n_rows, 2);
for (size_t i = 0; i < 8; ++i)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
// Remove the file.
remove("test_file.csv");
}
/**
* Make sure CSVs can be saved in non-transposed form.
*/
BOOST_AUTO_TEST_CASE(SaveNonTransposedCSVTest)
{
arma::mat test = "1 2;"
"3 4;"
"5 6;"
"7 8;";
BOOST_REQUIRE(data::Save("test_file.csv", test, false, false) == true);
// Load it in and make sure it is in the same.
arma::mat test2;
BOOST_REQUIRE(data::Load("test_file.csv", test2, false, false) == true);
BOOST_REQUIRE_EQUAL(test2.n_rows, 4);
BOOST_REQUIRE_EQUAL(test2.n_cols, 2);
for (size_t i = 0; i < 8; ++i)
BOOST_REQUIRE_CLOSE(test[i], test2[i], 1e-5);
// Remove the file.
remove("test_file.csv");
}
/**
* Make sure arma_ascii is loaded correctly.
*/
BOOST_AUTO_TEST_CASE(LoadArmaASCIITest)
{
arma::mat test = "1 5;"
"2 6;"
"3 7;"
"4 8;";
arma::mat testTrans = trans(test);
BOOST_REQUIRE(testTrans.save("test_file.txt", arma::arma_ascii));
BOOST_REQUIRE(data::Load("test_file.txt", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; i++)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
// Remove the file.
remove("test_file.txt");
}
/**
* Make sure a CSV is saved correctly.
*/
BOOST_AUTO_TEST_CASE(SaveArmaASCIITest)
{
arma::mat test = "1 5;"
"2 6;"
"3 7;"
"4 8;";
BOOST_REQUIRE(data::Save("test_file.txt", test) == true);
// Load it in and make sure it is the same.
BOOST_REQUIRE(data::Load("test_file.txt", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; i++)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
// Remove the file.
remove("test_file.txt");
}
/**
* Make sure raw_ascii is loaded correctly.
*/
BOOST_AUTO_TEST_CASE(LoadRawASCIITest)
{
fstream f;
f.open("test_file.txt", fstream::out);
f << "1 2 3 4" << endl;
f << "5 6 7 8" << endl;
f.close();
arma::mat test;
BOOST_REQUIRE(data::Load("test_file.txt", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; i++)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
// Remove the file.
remove("test_file.txt");
}
/**
* Make sure CSV is loaded correctly as .txt.
*/
BOOST_AUTO_TEST_CASE(LoadCSVTxtTest)
{
fstream f;
f.open("test_file.txt", fstream::out);
f << "1, 2, 3, 4" << endl;
f << "5, 6, 7, 8" << endl;
f.close();
arma::mat test;
BOOST_REQUIRE(data::Load("test_file.txt", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; i++)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
// Remove the file.
remove("test_file.txt");
}
/**
* Make sure arma_binary is loaded correctly.
*/
BOOST_AUTO_TEST_CASE(LoadArmaBinaryTest)
{
arma::mat test = "1 5;"
"2 6;"
"3 7;"
"4 8;";
arma::mat testTrans = trans(test);
BOOST_REQUIRE(testTrans.quiet_save("test_file.bin", arma::arma_binary)
== true);
// Now reload through our interface.
BOOST_REQUIRE(data::Load("test_file.bin", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; i++)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
// Remove the file.
remove("test_file.bin");
}
/**
* Make sure arma_binary is saved correctly.
*/
BOOST_AUTO_TEST_CASE(SaveArmaBinaryTest)
{
arma::mat test = "1 5;"
"2 6;"
"3 7;"
"4 8;";
BOOST_REQUIRE(data::Save("test_file.bin", test) == true);
BOOST_REQUIRE(data::Load("test_file.bin", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; i++)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
// Remove the file.
remove("test_file.bin");
}
/**
* Make sure raw_binary is loaded correctly.
*/
BOOST_AUTO_TEST_CASE(LoadRawBinaryTest)
{
arma::mat test = "1 2;"
"3 4;"
"5 6;"
"7 8;";
arma::mat testTrans = trans(test);
BOOST_REQUIRE(testTrans.quiet_save("test_file.bin", arma::raw_binary)
== true);
// Now reload through our interface.
BOOST_REQUIRE(data::Load("test_file.bin", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 1);
BOOST_REQUIRE_EQUAL(test.n_cols, 8);
for (size_t i = 0; i < 8; i++)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
// Remove the file.
remove("test_file.bin");
}
/**
* Make sure load as PGM is successful.
*/
BOOST_AUTO_TEST_CASE(LoadPGMBinaryTest)
{
arma::mat test = "1 5;"
"2 6;"
"3 7;"
"4 8;";
arma::mat testTrans = trans(test);
BOOST_REQUIRE(testTrans.quiet_save("test_file.pgm", arma::pgm_binary)
== true);
// Now reload through our interface.
BOOST_REQUIRE(data::Load("test_file.pgm", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; i++)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
// Remove the file.
remove("test_file.pgm");
}
/**
* Make sure save as PGM is successful.
*/
BOOST_AUTO_TEST_CASE(SavePGMBinaryTest)
{
arma::mat test = "1 5;"
"2 6;"
"3 7;"
"4 8;";
BOOST_REQUIRE(data::Save("test_file.pgm", test) == true);
// Now reload through our interface.
BOOST_REQUIRE(data::Load("test_file.pgm", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; i++)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
// Remove the file.
remove("test_file.pgm");
}
#if defined(ARMA_USE_HDF5)
/**
* Make sure load as HDF5 is successful.
*/
BOOST_AUTO_TEST_CASE(LoadHDF5Test)
{
arma::mat test = "1 5;"
"2 6;"
"3 7;"
"4 8;";
arma::mat testTrans = trans(test);
BOOST_REQUIRE(testTrans.quiet_save("test_file.h5", arma::hdf5_binary)
== true);
BOOST_REQUIRE(testTrans.quiet_save("test_file.hdf5", arma::hdf5_binary)
== true);
BOOST_REQUIRE(testTrans.quiet_save("test_file.hdf", arma::hdf5_binary)
== true);
BOOST_REQUIRE(testTrans.quiet_save("test_file.he5", arma::hdf5_binary)
== true);
// Now reload through our interface.
BOOST_REQUIRE(data::Load("test_file.h5", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; ++i)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
// Make sure the other extensions work too.
BOOST_REQUIRE(data::Load("test_file.hdf5", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; ++i)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
BOOST_REQUIRE(data::Load("test_file.hdf", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; ++i)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
BOOST_REQUIRE(data::Load("test_file.he5", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; ++i)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
remove("test_file.h5");
remove("test_file.hdf");
remove("test_file.hdf5");
remove("test_file.he5");
}
/**
* Make sure save as HDF5 is successful.
*/
BOOST_AUTO_TEST_CASE(SaveHDF5Test)
{
arma::mat test = "1 5;"
"2 6;"
"3 7;"
"4 8;";
BOOST_REQUIRE(data::Save("test_file.h5", test) == true);
BOOST_REQUIRE(data::Save("test_file.hdf5", test) == true);
BOOST_REQUIRE(data::Save("test_file.hdf", test) == true);
BOOST_REQUIRE(data::Save("test_file.he5", test) == true);
// Now load them all and verify they were saved okay.
BOOST_REQUIRE(data::Load("test_file.h5", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; ++i)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
// Make sure the other extensions work too.
BOOST_REQUIRE(data::Load("test_file.hdf5", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; ++i)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
BOOST_REQUIRE(data::Load("test_file.hdf", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; ++i)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
BOOST_REQUIRE(data::Load("test_file.he5", test) == true);
BOOST_REQUIRE_EQUAL(test.n_rows, 4);
BOOST_REQUIRE_EQUAL(test.n_cols, 2);
for (size_t i = 0; i < 8; ++i)
BOOST_REQUIRE_CLOSE(test[i], (double) (i + 1), 1e-5);
remove("test_file.h5");
remove("test_file.hdf");
remove("test_file.hdf5");
remove("test_file.he5");
}
#endif
/**
* Test normalization of labels.
*/
BOOST_AUTO_TEST_CASE(NormalizeLabelSmallDatasetTest)
{
arma::irowvec labels("-1 1 1 -1 -1 -1 1 1");
arma::Row<size_t> newLabels;
arma::ivec mappings;
data::NormalizeLabels(labels, newLabels, mappings);
BOOST_REQUIRE_EQUAL(mappings[0], -1);
BOOST_REQUIRE_EQUAL(mappings[1], 1);
BOOST_REQUIRE_EQUAL(newLabels[0], 0);
BOOST_REQUIRE_EQUAL(newLabels[1], 1);
BOOST_REQUIRE_EQUAL(newLabels[2], 1);
BOOST_REQUIRE_EQUAL(newLabels[3], 0);
BOOST_REQUIRE_EQUAL(newLabels[4], 0);
BOOST_REQUIRE_EQUAL(newLabels[5], 0);
BOOST_REQUIRE_EQUAL(newLabels[6], 1);
BOOST_REQUIRE_EQUAL(newLabels[7], 1);
arma::irowvec revertedLabels;
data::RevertLabels(newLabels, mappings, revertedLabels);
for (size_t i = 0; i < labels.n_elem; ++i)
BOOST_REQUIRE_EQUAL(labels[i], revertedLabels[i]);
}
/**
* Harder label normalization test.
*/
BOOST_AUTO_TEST_CASE(NormalizeLabelTest)
{
arma::rowvec randLabels(5000);
for (size_t i = 0; i < 5000; ++i)
randLabels[i] = math::RandInt(-50, 50);
randLabels[0] = 0.65; // Hey, doubles work too!
arma::Row<size_t> newLabels;
arma::vec mappings;
data::NormalizeLabels(randLabels, newLabels, mappings);
// Now map them back and ensure they are right.
arma::rowvec revertedLabels(5000);
data::RevertLabels(newLabels, mappings, revertedLabels);
for (size_t i = 0; i < 5000; ++i)
BOOST_REQUIRE_EQUAL(randLabels[i], revertedLabels[i]);
}
// Test structures.
class TestInner
{
public:
TestInner(char c, string s) : c(c), s(s) { }
template<typename Archive>
void serialize(Archive& ar, const unsigned int /* version */)
{
ar & BOOST_SERIALIZATION_NVP(c);
ar & BOOST_SERIALIZATION_NVP(s);
}
// Public members for testing.
char c;
string s;
};
class Test
{
public:
Test(int x, int y) : x(x), y(y), ina('a', "hello"), inb('b', "goodbye") { }
template<typename Archive>
void serialize(Archive& ar, const unsigned int /* version */)
{
ar & BOOST_SERIALIZATION_NVP(x);
ar & BOOST_SERIALIZATION_NVP(y);
ar & BOOST_SERIALIZATION_NVP(ina);
ar & BOOST_SERIALIZATION_NVP(inb);
}
// Public members for testing.
int x;
int y;
TestInner ina;
TestInner inb;
};
/**
* Make sure we can load and save.
*/
BOOST_AUTO_TEST_CASE(LoadBinaryTest)
{
Test x(10, 12);
BOOST_REQUIRE_EQUAL(data::Save("test.bin", "x", x, false), true);
// Now reload.
Test y(11, 14);
BOOST_REQUIRE_EQUAL(data::Load("test.bin", "x", y, false), true);
BOOST_REQUIRE_EQUAL(y.x, x.x);
BOOST_REQUIRE_EQUAL(y.y, x.y);
BOOST_REQUIRE_EQUAL(y.ina.c, x.ina.c);
BOOST_REQUIRE_EQUAL(y.ina.s, x.ina.s);
BOOST_REQUIRE_EQUAL(y.inb.c, x.inb.c);
BOOST_REQUIRE_EQUAL(y.inb.s, x.inb.s);
}
/**
* Make sure we can load and save.
*/
BOOST_AUTO_TEST_CASE(LoadXMLTest)
{
Test x(10, 12);
BOOST_REQUIRE_EQUAL(data::Save("test.xml", "x", x, false), true);
// Now reload.
Test y(11, 14);
BOOST_REQUIRE_EQUAL(data::Load("test.xml", "x", y, false), true);
BOOST_REQUIRE_EQUAL(y.x, x.x);
BOOST_REQUIRE_EQUAL(y.y, x.y);
BOOST_REQUIRE_EQUAL(y.ina.c, x.ina.c);
BOOST_REQUIRE_EQUAL(y.ina.s, x.ina.s);
BOOST_REQUIRE_EQUAL(y.inb.c, x.inb.c);
BOOST_REQUIRE_EQUAL(y.inb.s, x.inb.s);
}
/**
* Make sure we can load and save.
*/
BOOST_AUTO_TEST_CASE(LoadTextTest)
{
Test x(10, 12);
BOOST_REQUIRE_EQUAL(data::Save("test.txt", "x", x, false), true);
// Now reload.
Test y(11, 14);
BOOST_REQUIRE_EQUAL(data::Load("test.txt", "x", y, false), true);
BOOST_REQUIRE_EQUAL(y.x, x.x);
BOOST_REQUIRE_EQUAL(y.y, x.y);
BOOST_REQUIRE_EQUAL(y.ina.c, x.ina.c);
BOOST_REQUIRE_EQUAL(y.ina.s, x.ina.s);
BOOST_REQUIRE_EQUAL(y.inb.c, x.inb.c);
BOOST_REQUIRE_EQUAL(y.inb.s, x.inb.s);
}
/**
* Test DatasetInfo by making a map for a dimension.
*/
BOOST_AUTO_TEST_CASE(DatasetInfoTest)
{
DatasetInfo di(100);
// Do all types default to numeric?
for (size_t i = 0; i < 100; ++i)
{
BOOST_REQUIRE(di.Type(i) == Datatype::numeric);
BOOST_REQUIRE_EQUAL(di.NumMappings(i), 0);
}
// Okay. Add some mappings for dimension 3.
const size_t first = di.MapString<size_t>("test_mapping_1", 3);
const size_t second = di.MapString<size_t>("test_mapping_2", 3);
const size_t third = di.MapString<size_t>("test_mapping_3", 3);
BOOST_REQUIRE_EQUAL(first, 0);
BOOST_REQUIRE_EQUAL(second, 1);
BOOST_REQUIRE_EQUAL(third, 2);
// Now dimension 3 should be categorical.
for (size_t i = 0; i < 100; ++i)
{
if (i == 3)
{
BOOST_REQUIRE(di.Type(i) == Datatype::categorical);
BOOST_REQUIRE_EQUAL(di.NumMappings(i), 3);
}
else
{
BOOST_REQUIRE(di.Type(i) == Datatype::numeric);
BOOST_REQUIRE_EQUAL(di.NumMappings(i), 0);
}
}
// Get the mappings back.
const string& strFirst = di.UnmapString(first, 3);
const string& strSecond = di.UnmapString(second, 3);
const string& strThird = di.UnmapString(third, 3);
BOOST_REQUIRE_EQUAL(strFirst, "test_mapping_1");
BOOST_REQUIRE_EQUAL(strSecond, "test_mapping_2");
BOOST_REQUIRE_EQUAL(strThird, "test_mapping_3");
}
/**
* Test loading regular CSV with DatasetInfo. Everything should be numeric.
*/
BOOST_AUTO_TEST_CASE(RegularCSVDatasetInfoLoad)
{
vector<string> testFiles;
testFiles.push_back("fake.csv");
testFiles.push_back("german.csv");
testFiles.push_back("iris.csv");
testFiles.push_back("vc2.csv");
testFiles.push_back("johnson8-4-4.csv");
testFiles.push_back("lars_dependent_y.csv");
testFiles.push_back("vc2_test_labels.txt");
for (size_t i = 0; i < testFiles.size(); ++i)
{
arma::mat one, two;
DatasetInfo info;
data::Load(testFiles[i], one);
data::Load(testFiles[i], two, info);
// Check that the matrices contain the same information.
BOOST_REQUIRE_EQUAL(one.n_elem, two.n_elem);
BOOST_REQUIRE_EQUAL(one.n_rows, two.n_rows);
BOOST_REQUIRE_EQUAL(one.n_cols, two.n_cols);
for (size_t i = 0; i < one.n_elem; ++i)
{
if (std::abs(one[i]) < 1e-8)
BOOST_REQUIRE_SMALL(two[i], 1e-8);
else
BOOST_REQUIRE_CLOSE(one[i], two[i], 1e-8);
}
// Check that all dimensions are numeric.
for (size_t i = 0; i < two.n_rows; ++i)
BOOST_REQUIRE(info.Type(i) == Datatype::numeric);
}
}
/**
* Test non-transposed loading of regular CSVs with DatasetInfo. Everything
* should be numeric.
*/
BOOST_AUTO_TEST_CASE(NontransposedCSVDatasetInfoLoad)
{
vector<string> testFiles;
testFiles.push_back("fake.csv");
testFiles.push_back("german.csv");
testFiles.push_back("iris.csv");
testFiles.push_back("vc2.csv");
testFiles.push_back("johnson8-4-4.csv");
testFiles.push_back("lars_dependent_y.csv");
testFiles.push_back("vc2_test_labels.txt");
for (size_t i = 0; i < testFiles.size(); ++i)
{
arma::mat one, two;
DatasetInfo info;
data::Load(testFiles[i], one, true, false); // No transpose.
data::Load(testFiles[i], two, info, true, false);
// Check that the matrices contain the same information.
BOOST_REQUIRE_EQUAL(one.n_elem, two.n_elem);
BOOST_REQUIRE_EQUAL(one.n_rows, two.n_rows);
BOOST_REQUIRE_EQUAL(one.n_cols, two.n_cols);
for (size_t i = 0; i < one.n_elem; ++i)
{
if (std::abs(one[i]) < 1e-8)
BOOST_REQUIRE_SMALL(two[i], 1e-8);
else
BOOST_REQUIRE_CLOSE(one[i], two[i], 1e-8);
}
// Check that all dimensions are numeric.
for (size_t i = 0; i < two.n_rows; ++i)
BOOST_REQUIRE(info.Type(i) == Datatype::numeric);
}
}
/**
* Create a file with a categorical string feature, then load it.
*/
BOOST_AUTO_TEST_CASE(CategoricalCSVLoadTest00)
{
fstream f;
f.open("test.csv", fstream::out);
f << "1, 2, hello" << endl;
f << "3, 4, goodbye" << endl;
f << "5, 6, coffee" << endl;
f << "7, 8, confusion" << endl;
f << "9, 10, hello" << endl;
f << "11, 12, confusion" << endl;
f << "13, 14, confusion" << endl;
f.close();
// Load the test CSV.
arma::umat matrix;
DatasetInfo info;
data::Load("test.csv", matrix, info);
BOOST_REQUIRE_EQUAL(matrix.n_cols, 7);
BOOST_REQUIRE_EQUAL(matrix.n_rows, 3);
BOOST_REQUIRE_EQUAL(matrix(0, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 0), 2);
BOOST_REQUIRE_EQUAL(matrix(2, 0), 0);
BOOST_REQUIRE_EQUAL(matrix(0, 1), 3);
BOOST_REQUIRE_EQUAL(matrix(1, 1), 4);
BOOST_REQUIRE_EQUAL(matrix(2, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(0, 2), 5);
BOOST_REQUIRE_EQUAL(matrix(1, 2), 6);
BOOST_REQUIRE_EQUAL(matrix(2, 2), 2);
BOOST_REQUIRE_EQUAL(matrix(0, 3), 7);
BOOST_REQUIRE_EQUAL(matrix(1, 3), 8);
BOOST_REQUIRE_EQUAL(matrix(2, 3), 3);
BOOST_REQUIRE_EQUAL(matrix(0, 4), 9);
BOOST_REQUIRE_EQUAL(matrix(1, 4), 10);
BOOST_REQUIRE_EQUAL(matrix(2, 4), 0);
BOOST_REQUIRE_EQUAL(matrix(0, 5), 11);
BOOST_REQUIRE_EQUAL(matrix(1, 5), 12);
BOOST_REQUIRE_EQUAL(matrix(2, 5), 3);
BOOST_REQUIRE_EQUAL(matrix(0, 6), 13);
BOOST_REQUIRE_EQUAL(matrix(1, 6), 14);
BOOST_REQUIRE_EQUAL(matrix(2, 6), 3);
BOOST_REQUIRE(info.Type(0) == Datatype::numeric);
BOOST_REQUIRE(info.Type(1) == Datatype::numeric);
BOOST_REQUIRE(info.Type(2) == Datatype::categorical);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("hello", 2), 0);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("goodbye", 2), 1);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("coffee", 2), 2);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("confusion", 2), 3);
BOOST_REQUIRE_EQUAL(info.UnmapString(0, 2), "hello");
BOOST_REQUIRE_EQUAL(info.UnmapString(1, 2), "goodbye");
BOOST_REQUIRE_EQUAL(info.UnmapString(2, 2), "coffee");
BOOST_REQUIRE_EQUAL(info.UnmapString(3, 2), "confusion");
remove("test.csv");
}
BOOST_AUTO_TEST_CASE(CategoricalCSVLoadTest01)
{
fstream f;
f.open("test.csv", fstream::out);
f << "1, 1, 1" << endl;
f << "1, 1, 1" << endl;
f << " , 1, 1" << endl;
f << "1, 1, 1" << endl;
f.close();
// Load the test CSV.
arma::umat matrix;
DatasetInfo info;
data::Load("test.csv", matrix, info, true);
BOOST_REQUIRE_EQUAL(matrix.n_cols, 4);
BOOST_REQUIRE_EQUAL(matrix.n_rows, 3);
BOOST_REQUIRE_EQUAL(matrix(0, 0), 0);
BOOST_REQUIRE_EQUAL(matrix(0, 1), 0);
BOOST_REQUIRE_EQUAL(matrix(0, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(0, 3), 0);
BOOST_REQUIRE_EQUAL(matrix(1, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 3), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 3), 1);
BOOST_REQUIRE(info.Type(0) == Datatype::categorical);
BOOST_REQUIRE(info.Type(1) == Datatype::numeric);
BOOST_REQUIRE(info.Type(2) == Datatype::numeric);
BOOST_REQUIRE(info.Type(3) == Datatype::numeric);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("1", 0), 0);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("", 0), 1);
BOOST_REQUIRE_EQUAL(info.UnmapString(0, 0), "1");
BOOST_REQUIRE_EQUAL(info.UnmapString(1, 0), "");
remove("test.csv");
}
BOOST_AUTO_TEST_CASE(CategoricalCSVLoadTest02)
{
fstream f;
f.open("test.csv", fstream::out);
f << "1, 1, 1" << endl;
f << ", 1, 1" << endl;
f << "1, 1, 1" << endl;
f << "1, 1, 1" << endl;
f.close();
// Load the test CSV.
arma::umat matrix;
DatasetInfo info;
data::Load("test.csv", matrix, info, true);
BOOST_REQUIRE_EQUAL(matrix.n_cols, 4);
BOOST_REQUIRE_EQUAL(matrix.n_rows, 3);
BOOST_REQUIRE_EQUAL(matrix(0, 0), 0);
BOOST_REQUIRE_EQUAL(matrix(0, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(0, 2), 0);
BOOST_REQUIRE_EQUAL(matrix(0, 3), 0);
BOOST_REQUIRE_EQUAL(matrix(1, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 3), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 3), 1);
BOOST_REQUIRE(info.Type(0) == Datatype::categorical);
BOOST_REQUIRE(info.Type(1) == Datatype::numeric);
BOOST_REQUIRE(info.Type(2) == Datatype::numeric);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("", 0), 1);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("1", 0), 0);
BOOST_REQUIRE_EQUAL(info.UnmapString(0, 0), "1");
BOOST_REQUIRE_EQUAL(info.UnmapString(1, 0), "");
remove("test.csv");
}
BOOST_AUTO_TEST_CASE(CategoricalCSVLoadTest03)
{
fstream f;
f.open("test.csv", fstream::out);
f << ", 1, 1" << endl;
f << "1, 1, 1" << endl;
f << "1, 1, 1" << endl;
f << "1, 1, 1" << endl;
f.close();
// Load the test CSV.
arma::umat matrix;
DatasetInfo info;
data::Load("test.csv", matrix, info, true);
BOOST_REQUIRE_EQUAL(matrix.n_cols, 4);
BOOST_REQUIRE_EQUAL(matrix.n_rows, 3);
BOOST_REQUIRE_EQUAL(matrix(0, 0), 0);
BOOST_REQUIRE_EQUAL(matrix(0, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(0, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(0, 3), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 3), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 3), 1);
BOOST_REQUIRE(info.Type(0) == Datatype::categorical);
BOOST_REQUIRE(info.Type(1) == Datatype::numeric);
BOOST_REQUIRE(info.Type(2) == Datatype::numeric);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("", 0), 0);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("1", 0), 1);
BOOST_REQUIRE_EQUAL(info.UnmapString(0, 0), "");
BOOST_REQUIRE_EQUAL(info.UnmapString(1, 0), "1");
remove("test.csv");
}
BOOST_AUTO_TEST_CASE(CategoricalCSVLoadTest04)
{
fstream f;
f.open("test.csv", fstream::out);
f << "200-DM, 1, 1" << endl;
f << "1, 1, 1" << endl;
f << "1, 1, 1" << endl;
f << "1, 1, 1" << endl;
f.close();
// Load the test CSV.
arma::umat matrix;
DatasetInfo info;
data::Load("test.csv", matrix, info, true);
BOOST_REQUIRE_EQUAL(matrix.n_cols, 4);
BOOST_REQUIRE_EQUAL(matrix.n_rows, 3);
BOOST_REQUIRE_EQUAL(matrix(0, 0), 0);
BOOST_REQUIRE_EQUAL(matrix(0, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(0, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(0, 3), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 3), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 3), 1);
BOOST_REQUIRE(info.Type(0) == Datatype::categorical);
BOOST_REQUIRE(info.Type(1) == Datatype::numeric);
BOOST_REQUIRE(info.Type(2) == Datatype::numeric);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("200-DM", 0), 0);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("1", 0), 1);
BOOST_REQUIRE_EQUAL(info.UnmapString(0, 0), "200-DM");
BOOST_REQUIRE_EQUAL(info.UnmapString(1, 0), "1");
remove("test.csv");
}
BOOST_AUTO_TEST_CASE(CategoricalNontransposedCSVLoadTest00)
{
fstream f;
f.open("test.csv", fstream::out);
f << "1, 2, hello" << endl;
f << "3, 4, goodbye" << endl;
f << "5, 6, coffee" << endl;
f << "7, 8, confusion" << endl;
f << "9, 10, hello" << endl;
f << "11, 12, 15" << endl;
f << "13, 14, confusion" << endl;
f.close();
// Load the test CSV.
arma::umat matrix;
DatasetInfo info;
data::Load("test.csv", matrix, info, true, false); // No transpose.
BOOST_REQUIRE_EQUAL(matrix.n_cols, 3);
BOOST_REQUIRE_EQUAL(matrix.n_rows, 7);
BOOST_REQUIRE_EQUAL(matrix(0, 0), 0);
BOOST_REQUIRE_EQUAL(matrix(0, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(0, 2), 2);
BOOST_REQUIRE_EQUAL(matrix(1, 0), 0);
BOOST_REQUIRE_EQUAL(matrix(1, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 2), 2);
BOOST_REQUIRE_EQUAL(matrix(2, 0), 0);
BOOST_REQUIRE_EQUAL(matrix(2, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 2), 2);
BOOST_REQUIRE_EQUAL(matrix(3, 0), 0);
BOOST_REQUIRE_EQUAL(matrix(3, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(3, 2), 2);
BOOST_REQUIRE_EQUAL(matrix(4, 0), 0);
BOOST_REQUIRE_EQUAL(matrix(4, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(4, 2), 2);
BOOST_REQUIRE_EQUAL(matrix(5, 0), 11);
BOOST_REQUIRE_EQUAL(matrix(5, 1), 12);
BOOST_REQUIRE_EQUAL(matrix(5, 2), 15);
BOOST_REQUIRE_EQUAL(matrix(6, 0), 0);
BOOST_REQUIRE_EQUAL(matrix(6, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(6, 2), 2);
BOOST_REQUIRE(info.Type(0) == Datatype::categorical);
BOOST_REQUIRE(info.Type(1) == Datatype::categorical);
BOOST_REQUIRE(info.Type(2) == Datatype::categorical);
BOOST_REQUIRE(info.Type(3) == Datatype::categorical);
BOOST_REQUIRE(info.Type(4) == Datatype::categorical);
BOOST_REQUIRE(info.Type(5) == Datatype::numeric);
BOOST_REQUIRE(info.Type(6) == Datatype::categorical);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("1", 0), 0);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("2", 0), 1);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("hello", 0), 2);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("3", 1), 0);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("4", 1), 1);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("goodbye", 1), 2);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("5", 2), 0);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("6", 2), 1);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("coffee", 2), 2);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("7", 3), 0);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("8", 3), 1);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("confusion", 3), 2);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("9", 4), 0);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("10", 4), 1);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("hello", 4), 2);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("13", 6), 0);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("14", 6), 1);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("confusion", 6), 2);
BOOST_REQUIRE_EQUAL(info.UnmapString(0, 0), "1");
BOOST_REQUIRE_EQUAL(info.UnmapString(1, 0), "2");
BOOST_REQUIRE_EQUAL(info.UnmapString(2, 0), "hello");
BOOST_REQUIRE_EQUAL(info.UnmapString(0, 1), "3");
BOOST_REQUIRE_EQUAL(info.UnmapString(1, 1), "4");
BOOST_REQUIRE_EQUAL(info.UnmapString(2, 1), "goodbye");
BOOST_REQUIRE_EQUAL(info.UnmapString(0, 2), "5");
BOOST_REQUIRE_EQUAL(info.UnmapString(1, 2), "6");
BOOST_REQUIRE_EQUAL(info.UnmapString(2, 2), "coffee");
BOOST_REQUIRE_EQUAL(info.UnmapString(0, 3), "7");
BOOST_REQUIRE_EQUAL(info.UnmapString(1, 3), "8");
BOOST_REQUIRE_EQUAL(info.UnmapString(2, 3), "confusion");
BOOST_REQUIRE_EQUAL(info.UnmapString(0, 4), "9");
BOOST_REQUIRE_EQUAL(info.UnmapString(1, 4), "10");
BOOST_REQUIRE_EQUAL(info.UnmapString(2, 4), "hello");
BOOST_REQUIRE_EQUAL(info.UnmapString(0, 6), "13");
BOOST_REQUIRE_EQUAL(info.UnmapString(1, 6), "14");
BOOST_REQUIRE_EQUAL(info.UnmapString(2, 6), "confusion");
remove("test.csv");
}
BOOST_AUTO_TEST_CASE(CategoricalNontransposedCSVLoadTest01)
{
fstream f;
f.open("test.csv", fstream::out);
f << "1, 1, 1" << endl;
f << "1, 1, 1" << endl;
f << " , 1, 1" << endl;
f << "1, 1, 1" << endl;
f.close();
// Load the test CSV.
arma::umat matrix;
DatasetInfo info;
data::Load("test.csv", matrix, info, true, false); // No transpose.
BOOST_REQUIRE_EQUAL(matrix.n_cols, 3);
BOOST_REQUIRE_EQUAL(matrix.n_rows, 4);
BOOST_REQUIRE_EQUAL(matrix(0, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(0, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(0, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 0), 0);
BOOST_REQUIRE_EQUAL(matrix(2, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(3, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(3, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(3, 2), 1);
BOOST_REQUIRE(info.Type(0) == Datatype::numeric);
BOOST_REQUIRE(info.Type(1) == Datatype::numeric);
BOOST_REQUIRE(info.Type(2) == Datatype::categorical);
BOOST_REQUIRE(info.Type(3) == Datatype::numeric);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("", 2), 0);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("1", 2), 1);
BOOST_REQUIRE_EQUAL(info.UnmapString(0, 2), "");
BOOST_REQUIRE_EQUAL(info.UnmapString(1, 2), "1");
remove("test.csv");
}
BOOST_AUTO_TEST_CASE(CategoricalNontransposedCSVLoadTest02)
{
fstream f;
f.open("test.csv", fstream::out);
f << "1, 1, 1" << endl;
f << ", 1, 1" << endl;
f << "1, 1, 1" << endl;
f << "1, 1, 1" << endl;
f.close();
// Load the test CSV.
arma::umat matrix;
DatasetInfo info;
data::Load("test.csv", matrix, info, true, false); // No transpose.
BOOST_REQUIRE_EQUAL(matrix.n_cols, 3);
BOOST_REQUIRE_EQUAL(matrix.n_rows, 4);
BOOST_REQUIRE_EQUAL(matrix(0, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(0, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(0, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 0), 0);
BOOST_REQUIRE_EQUAL(matrix(1, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(3, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(3, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(3, 2), 1);
BOOST_REQUIRE(info.Type(0) == Datatype::numeric);
BOOST_REQUIRE(info.Type(1) == Datatype::categorical);
BOOST_REQUIRE(info.Type(2) == Datatype::numeric);
BOOST_REQUIRE(info.Type(3) == Datatype::numeric);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("", 1), 0);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("1", 1), 1);
BOOST_REQUIRE_EQUAL(info.UnmapString(0, 1), "");
BOOST_REQUIRE_EQUAL(info.UnmapString(1, 1), "1");
remove("test.csv");
}
BOOST_AUTO_TEST_CASE(CategoricalNontransposedCSVLoadTest03)
{
fstream f;
f.open("test.csv", fstream::out);
f << ", 1, 1" << endl;
f << "1, 1, 1" << endl;
f << "1, 1, 1" << endl;
f << "1, 1, 1" << endl;
f.close();
// Load the test CSV.
arma::umat matrix;
DatasetInfo info;
data::Load("test.csv", matrix, info, true, false); // No transpose.
BOOST_REQUIRE_EQUAL(matrix.n_cols, 3);
BOOST_REQUIRE_EQUAL(matrix.n_rows, 4);
BOOST_REQUIRE_EQUAL(matrix(0, 0), 0);
BOOST_REQUIRE_EQUAL(matrix(0, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(0, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(3, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(3, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(3, 2), 1);
BOOST_REQUIRE(info.Type(0) == Datatype::categorical);
BOOST_REQUIRE(info.Type(1) == Datatype::numeric);
BOOST_REQUIRE(info.Type(2) == Datatype::numeric);
BOOST_REQUIRE(info.Type(3) == Datatype::numeric);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("", 1), 0);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("1", 1), 1);
BOOST_REQUIRE_EQUAL(info.UnmapString(0, 1), "");
BOOST_REQUIRE_EQUAL(info.UnmapString(1, 1), "1");
remove("test.csv");
}
BOOST_AUTO_TEST_CASE(CategoricalNontransposedCSVLoadTest04)
{
fstream f;
f.open("test.csv", fstream::out);
f << " 200-DM , 1 , 1 " << endl;
f << " 1 , 1 , 1 " << endl;
f << " 1 , 1 , 1 " << endl;
f << " 1 , 1 , 1 " << endl;
f.close();
// Load the test CSV.
arma::umat matrix;
DatasetInfo info;
data::Load("test.csv", matrix, info, true, false); // No transpose.
BOOST_REQUIRE_EQUAL(matrix.n_cols, 3);
BOOST_REQUIRE_EQUAL(matrix.n_rows, 4);
BOOST_REQUIRE(info.Type(0) == Datatype::categorical);
BOOST_REQUIRE(info.Type(1) == Datatype::numeric);
BOOST_REQUIRE(info.Type(2) == Datatype::numeric);
BOOST_REQUIRE(info.Type(3) == Datatype::numeric);
BOOST_REQUIRE_EQUAL(matrix(0, 0), 0);
BOOST_REQUIRE_EQUAL(matrix(0, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(0, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(1, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(2, 2), 1);
BOOST_REQUIRE_EQUAL(matrix(3, 0), 1);
BOOST_REQUIRE_EQUAL(matrix(3, 1), 1);
BOOST_REQUIRE_EQUAL(matrix(3, 2), 1);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("200-DM", 1), 0);
BOOST_REQUIRE_EQUAL(info.MapString<arma::uword>("1", 1), 1);
BOOST_REQUIRE_EQUAL(info.UnmapString(0, 1), "200-DM");
BOOST_REQUIRE_EQUAL(info.UnmapString(1, 1), "1");
remove("test.csv");
}
/**
* A harder test CSV based on the concerns in #658.
*/
BOOST_AUTO_TEST_CASE(HarderKeonTest)
{
fstream f;
f.open("test.csv", fstream::out);
f << "a,, 13,\t, 0" << endl;
f << "b, 3, 14, hello,1" << endl;
f << "b, 4, 15, , 2" << endl;
f << ", 5, 16, ," << endl;
f.close();
// Load transposed.
arma::mat dataset;
data::DatasetInfo info;
data::Load("test.csv", dataset, info, true, true);
BOOST_REQUIRE_EQUAL(dataset.n_rows, 5);
BOOST_REQUIRE_EQUAL(dataset.n_cols, 4);
BOOST_REQUIRE_EQUAL(info.Dimensionality(), 5);
BOOST_REQUIRE_EQUAL(info.NumMappings(0), 3);
BOOST_REQUIRE_EQUAL(info.NumMappings(1), 4);
BOOST_REQUIRE_EQUAL(info.NumMappings(2), 0);
BOOST_REQUIRE_EQUAL(info.NumMappings(3), 2); // \t and "" are equivalent.
BOOST_REQUIRE_EQUAL(info.NumMappings(4), 4);
// Now load non-transposed.
data::DatasetInfo ntInfo;
data::Load("test.csv", dataset, ntInfo, true, false);
BOOST_REQUIRE_EQUAL(dataset.n_rows, 4);
BOOST_REQUIRE_EQUAL(dataset.n_cols, 5);
BOOST_REQUIRE_EQUAL(ntInfo.Dimensionality(), 4);
BOOST_REQUIRE_EQUAL(ntInfo.NumMappings(0), 4);
BOOST_REQUIRE_EQUAL(ntInfo.NumMappings(1), 5);
BOOST_REQUIRE_EQUAL(ntInfo.NumMappings(2), 5);
BOOST_REQUIRE_EQUAL(ntInfo.NumMappings(3), 3);
remove("test.csv");
}
/**
* A simple ARFF load test. Two attributes, both numeric.
*/
BOOST_AUTO_TEST_CASE(SimpleARFFTest)
{
fstream f;
f.open("test.arff", fstream::out);
f << "@relation test" << endl;
f << endl;
f << "@attribute one NUMERIC" << endl;
f << "@attribute two NUMERIC" << endl;
f << endl;
f << "@data" << endl;
f << "1, 2" << endl;
f << "3, 4" << endl;
f << "5, 6" << endl;
f << "7, 8" << endl;
f.close();
arma::mat dataset;
DatasetInfo info;
data::Load("test.arff", dataset, info);
BOOST_REQUIRE_EQUAL(info.Dimensionality(), 2);
BOOST_REQUIRE(info.Type(0) == Datatype::numeric);
BOOST_REQUIRE(info.Type(1) == Datatype::numeric);
BOOST_REQUIRE_EQUAL(dataset.n_rows, 2);
BOOST_REQUIRE_EQUAL(dataset.n_cols, 4);
for (size_t i = 0; i < 8; ++i)
BOOST_REQUIRE_CLOSE(dataset[i], double(i + 1), 1e-5);
remove("test.arff");
}
/**
* Another simple ARFF load test. Three attributes, two categorical, one
* numeric.
*/
BOOST_AUTO_TEST_CASE(SimpleARFFCategoricalTest)
{
fstream f;
f.open("test.arff", fstream::out);
f << "@relation test" << endl;
f << endl;
f << "@attribute one STRING" << endl;
f << "@attribute two REAL" << endl;
f << endl;
f << "@attribute three STRING" << endl;
f << endl;
f << "% a comment line " << endl;
f << endl;
f << "@data" << endl;
f << "hello, 1, moo" << endl;
f << "cheese, 2.34, goodbye" << endl;
f << "seven, 1.03e+5, moo" << endl;
f << "hello, -1.3, goodbye" << endl;
f.close();
arma::mat dataset;
DatasetInfo info;
data::Load("test.arff", dataset, info);
BOOST_REQUIRE_EQUAL(info.Dimensionality(), 3);
BOOST_REQUIRE(info.Type(0) == Datatype::categorical);
BOOST_REQUIRE_EQUAL(info.NumMappings(0), 3);
BOOST_REQUIRE(info.Type(1) == Datatype::numeric);
BOOST_REQUIRE(info.Type(2) == Datatype::categorical);
BOOST_REQUIRE_EQUAL(info.NumMappings(2), 2);
BOOST_REQUIRE_EQUAL(dataset.n_rows, 3);
BOOST_REQUIRE_EQUAL(dataset.n_cols, 4);
// The first dimension must all be different (except the ones that are the
// same).
BOOST_REQUIRE_EQUAL(dataset(0, 0), dataset(0, 3));
BOOST_REQUIRE_NE(dataset(0, 0), dataset(0, 1));
BOOST_REQUIRE_NE(dataset(0, 1), dataset(0, 2));
BOOST_REQUIRE_NE(dataset(0, 2), dataset(0, 0));
BOOST_REQUIRE_CLOSE(dataset(1, 0), 1.0, 1e-5);
BOOST_REQUIRE_CLOSE(dataset(1, 1), 2.34, 1e-5);
BOOST_REQUIRE_CLOSE(dataset(1, 2), 1.03e5, 1e-5);
BOOST_REQUIRE_CLOSE(dataset(1, 3), -1.3, 1e-5);
BOOST_REQUIRE_EQUAL(dataset(2, 0), dataset(2, 2));
BOOST_REQUIRE_EQUAL(dataset(2, 1), dataset(2, 3));
BOOST_REQUIRE_NE(dataset(2, 0), dataset(2, 1));
remove("test.arff");
}
/**
* A harder ARFF test, where we have each type of supported value, and some
* random whitespace too.
*/
BOOST_AUTO_TEST_CASE(HarderARFFTest)
{
fstream f;
f.open("test.arff", fstream::out);
f << "@relation \t test" << endl;
f << endl;
f << endl;
f << "@attribute @@@@flfl numeric" << endl;
f << endl;
f << "% comment" << endl;
f << "@attribute \"hello world\" string" << endl;
f << "@attribute 12345 integer" << endl;
f << "@attribute real real" << endl;
f << "@attribute \"blah blah blah \t \" numeric % comment" << endl;
f << "% comment" << endl;
f << "@data" << endl;
f << "1, one, 3, 4.5, 6" << endl;
f << "2, two, 4, 5.5, 7 % comment" << endl;
f << "3, \"three five, six\", 5, 6.5, 8" << endl;
f.close();
arma::mat dataset;
DatasetInfo info;
data::Load("test.arff", dataset, info);
BOOST_REQUIRE_EQUAL(info.Dimensionality(), 5);
BOOST_REQUIRE(info.Type(0) == Datatype::numeric);
BOOST_REQUIRE(info.Type(1) == Datatype::categorical);
BOOST_REQUIRE_EQUAL(info.NumMappings(1), 3);
BOOST_REQUIRE(info.Type(2) == Datatype::numeric);
BOOST_REQUIRE(info.Type(3) == Datatype::numeric);
BOOST_REQUIRE(info.Type(4) == Datatype::numeric);
BOOST_REQUIRE_EQUAL(dataset.n_rows, 5);
BOOST_REQUIRE_EQUAL(dataset.n_cols, 3);
BOOST_REQUIRE_CLOSE(dataset(0, 0), 1.0, 1e-5);
BOOST_REQUIRE_CLOSE(dataset(0, 1), 2.0, 1e-5);
BOOST_REQUIRE_CLOSE(dataset(0, 2), 3.0, 1e-5);
BOOST_REQUIRE_NE(dataset(1, 0), dataset(1, 1));
BOOST_REQUIRE_NE(dataset(1, 1), dataset(1, 2));
BOOST_REQUIRE_NE(dataset(1, 0), dataset(1, 2));
BOOST_REQUIRE_CLOSE(dataset(2, 0), 3.0, 1e-5);
BOOST_REQUIRE_CLOSE(dataset(2, 1), 4.0, 1e-5);
BOOST_REQUIRE_CLOSE(dataset(2, 2), 5.0, 1e-5);
BOOST_REQUIRE_CLOSE(dataset(3, 0), 4.5, 1e-5);
BOOST_REQUIRE_CLOSE(dataset(3, 1), 5.5, 1e-5);
BOOST_REQUIRE_CLOSE(dataset(3, 2), 6.5, 1e-5);
BOOST_REQUIRE_CLOSE(dataset(4, 0), 6.0, 1e-5);
BOOST_REQUIRE_CLOSE(dataset(4, 1), 7.0, 1e-5);
BOOST_REQUIRE_CLOSE(dataset(4, 2), 8.0, 1e-5);
remove("test.arff");
}
/**
* If we pass a bad DatasetInfo, it should throw.
*/
BOOST_AUTO_TEST_CASE(BadDatasetInfoARFFTest)
{
fstream f;
f.open("test.arff", fstream::out);
f << "@relation \t test" << endl;
f << endl;
f << endl;
f << "@attribute @@@@flfl numeric" << endl;
f << endl;
f << "% comment" << endl;
f << "@attribute \"hello world\" string" << endl;
f << "@attribute 12345 integer" << endl;
f << "@attribute real real" << endl;
f << "@attribute \"blah blah blah \t \" numeric % comment" << endl;
f << "% comment" << endl;
f << "@data" << endl;
f << "1, one, 3, 4.5, 6" << endl;
f << "2, two, 4, 5.5, 7 % comment" << endl;
f << "3, \"three five, six\", 5, 6.5, 8" << endl;
f.close();
arma::mat dataset;
DatasetInfo info(6);
BOOST_REQUIRE_THROW(data::LoadARFF("test.arff", dataset, info),
std::invalid_argument);
remove("test.arff");
}
/**
* If file is not found, it should throw.
*/
BOOST_AUTO_TEST_CASE(NonExistentFileARFFTest)
{
arma::mat dataset;
DatasetInfo info;
BOOST_REQUIRE_THROW(data::LoadARFF("nonexistentfile.arff", dataset, info),
std::runtime_error);
}
/**
* A test to check whether the arff loader is case insensitive to declarations:
* @relation, @attribute, @data.
*/
BOOST_AUTO_TEST_CASE(CaseTest)
{
arma::mat dataset;
DatasetMapper<IncrementPolicy> info;
LoadARFF<double, IncrementPolicy>("casecheck.arff", dataset, info);
BOOST_CHECK_EQUAL(dataset.n_rows, 2);
BOOST_CHECK_EQUAL(dataset.n_cols, 3);
}
/**
* Test that a CSV with the wrong number of columns fails.
*/
BOOST_AUTO_TEST_CASE(MalformedCSVTest)
{
fstream f;
f.open("test.csv", fstream::out);
f << "1, 2, 3, 4" << endl;
f << "5, 6, 7" << endl;
f << "8, 9, 10, 11" << endl;
f.close();
arma::mat dataset;
DatasetInfo di;
BOOST_REQUIRE(!data::Load("test.csv", dataset, di, false));
remove("test.csv");
}
/**
* Test that a TSV can load with LoadCSV.
*/
BOOST_AUTO_TEST_CASE(LoadCSVTSVTest)
{
fstream f;
f.open("test.tsv", fstream::out);
f << "1\t2\t3\t4" << endl;
f << "5\t6\t7\t8" << endl;
f.close();
arma::mat dataset;
DatasetInfo di;
BOOST_REQUIRE(data::Load("test.tsv", dataset, di, false));
BOOST_REQUIRE_EQUAL(dataset.n_cols, 2);
BOOST_REQUIRE_EQUAL(dataset.n_rows, 4);
for (size_t i = 0; i < 8; ++i)
BOOST_REQUIRE_EQUAL(dataset[i], i + 1);
remove("test.tsv");
}
/**
* Test that a text file can load with LoadCSV.
*/
BOOST_AUTO_TEST_CASE(LoadCSVTXTTest)
{
fstream f;
f.open("test.txt", fstream::out);
f << "1 2 3 4" << endl;
f << "5 6 7 8" << endl;
f.close();
arma::mat dataset;
DatasetInfo di;
BOOST_REQUIRE(data::Load("test.txt", dataset, di, false));
BOOST_REQUIRE_EQUAL(dataset.n_cols, 2);
BOOST_REQUIRE_EQUAL(dataset.n_rows, 4);
for (size_t i = 0; i < 8; ++i)
BOOST_REQUIRE_EQUAL(dataset[i], i + 1);
remove("test.txt");
}
/**
* Test that a non-transposed CSV with the wrong number of columns fails.
*/
BOOST_AUTO_TEST_CASE(MalformedNoTransposeCSVTest)
{
fstream f;
f.open("test.csv", fstream::out);
f << "1, 2, 3, 4" << endl;
f << "5, 6, 7" << endl;
f << "8, 9, 10, 11" << endl;
f.close();
arma::mat dataset;
DatasetInfo di;
BOOST_REQUIRE(!data::Load("test.csv", dataset, di, false, false));
remove("test.csv");
}
/**
* Test that a non-transposed TSV can load with LoadCSV.
*/
BOOST_AUTO_TEST_CASE(LoadCSVNoTransposeTSVTest)
{
fstream f;
f.open("test.tsv", fstream::out);
f << "1\t2\t3\t4" << endl;
f << "5\t6\t7\t8" << endl;
f.close();
arma::mat dataset;
DatasetInfo di;
BOOST_REQUIRE(data::Load("test.tsv", dataset, di, false, false));
BOOST_REQUIRE_EQUAL(dataset.n_cols, 4);
BOOST_REQUIRE_EQUAL(dataset.n_rows, 2);
BOOST_REQUIRE_EQUAL(dataset[0], 1);
BOOST_REQUIRE_EQUAL(dataset[1], 5);
BOOST_REQUIRE_EQUAL(dataset[2], 2);
BOOST_REQUIRE_EQUAL(dataset[3], 6);
BOOST_REQUIRE_EQUAL(dataset[4], 3);
BOOST_REQUIRE_EQUAL(dataset[5], 7);
BOOST_REQUIRE_EQUAL(dataset[6], 4);
BOOST_REQUIRE_EQUAL(dataset[7], 8);
remove("test.tsv");
}
/**
* Test that a non-transposed text file can load with LoadCSV.
*/
BOOST_AUTO_TEST_CASE(LoadCSVNoTransposeTXTTest)
{
fstream f;
f.open("test.txt", fstream::out);
f << "1 2 3 4" << endl;
f << "5 6 7 8" << endl;
f.close();
arma::mat dataset;
DatasetInfo di;
BOOST_REQUIRE(data::Load("test.txt", dataset, di, false, false));
BOOST_REQUIRE_EQUAL(dataset.n_cols, 4);
BOOST_REQUIRE_EQUAL(dataset.n_rows, 2);
BOOST_REQUIRE_EQUAL(dataset[0], 1);
BOOST_REQUIRE_EQUAL(dataset[1], 5);
BOOST_REQUIRE_EQUAL(dataset[2], 2);
BOOST_REQUIRE_EQUAL(dataset[3], 6);
BOOST_REQUIRE_EQUAL(dataset[4], 3);
BOOST_REQUIRE_EQUAL(dataset[5], 7);
BOOST_REQUIRE_EQUAL(dataset[6], 4);
BOOST_REQUIRE_EQUAL(dataset[7], 8);
remove("test.txt");
}
/**
* Make sure DatasetMapper properly unmaps from non-unique strings.
*/
BOOST_AUTO_TEST_CASE(DatasetMapperNonUniqueTest)
{
DatasetMapper<MissingPolicy> dm(1);
// Map a couple of strings; they'll map to quiet_NaN().
dm.MapString<double>("0.5", 0); // No mapping created.
dm.MapString<double>("hello", 0); // Mapping created.
dm.MapString<double>("goodbye", 0);
dm.MapString<double>("cheese", 0);
double nan = std::numeric_limits<double>::quiet_NaN();
BOOST_REQUIRE_EQUAL(dm.NumMappings(0), 3);
BOOST_REQUIRE_EQUAL(dm.NumUnmappings(nan, 0), 3);
BOOST_REQUIRE_EQUAL(dm.UnmapString(nan, 0), "hello");
BOOST_REQUIRE_EQUAL(dm.UnmapString(nan, 0, 0), "hello");
BOOST_REQUIRE_EQUAL(dm.UnmapString(nan, 0, 1), "goodbye");
BOOST_REQUIRE_EQUAL(dm.UnmapString(nan, 0, 2), "cheese");
}
BOOST_AUTO_TEST_SUITE_END();
|
/**************************************************************************
* Copyright(c) 1998-2009, ALICE Experiment at CERN, All rights reserved. *
* *
* Author: The ALICE Off-line Project. *
* Contributors are mentioned in the code where appropriate. *
* *
* Permission to use, copy, modify and distribute this software and its *
* documentation strictly for non-commercial purposes is hereby granted *
* without fee, provided that the above copyright notice appears in all *
* copies and that both the copyright notice and this permission notice *
* appeuear in the supporting documentation. The authors make no claims *
* about the suitability of this software for any purpose. It is *
* provided "as is" without express or implied warranty. *
**************************************************************************/
/* $Id$ */
//
//
// Base class for DStar Analysis
//
//
// The D* spectra study is done in pt bins:
// [0,0.5] [0.5,1] [1,2] [2,3] [3,4] [4,5] [5,6] [6,7] [7,8],
// [8,10],[10,12], [12,16], [16,20] and [20,24]
//
// Cuts arew centralized in AliRDHFCutsDStartoKpipi
// Side Band and like sign background are implemented in the macro
//
//-----------------------------------------------------------------------
//
// Author A.Grelli
// ERC-QGP Utrecht University - a.grelli@uu.nl,
// Author Y.Wang
// University of Heidelberg - yifei@physi.uni-heidelberg.de
// Author C.Ivan
// ERC-QGP Utrecht University - c.ivan@uu.nl,
//
// modified for EMCAL production check
//-----------------------------------------------------------------------
#include <TSystem.h>
#include <TParticle.h>
#include <TH1I.h>
#include "TROOT.h"
#include <TDatabasePDG.h>
#include <AliAnalysisDataSlot.h>
#include <AliAnalysisDataContainer.h>
#include "AliRDHFCutsDStartoKpipi.h"
#include "AliMCEvent.h"
#include "AliAnalysisManager.h"
#include "AliAODMCHeader.h"
#include "AliAODHandler.h"
#include "AliLog.h"
#include "AliAODVertex.h"
#include "AliAODRecoDecay.h"
#include "AliAODRecoDecayHF.h"
#include "AliAODRecoCascadeHF.h"
#include "AliAODRecoDecayHF2Prong.h"
#include "AliAnalysisVertexingHF.h"
#include "AliESDtrack.h"
#include "AliAODMCParticle.h"
#include "AliNormalizationCounter.h"
#include "AliAODEvent.h"
#include "AliAnalysisTaskSEDStarEMCALProductionCheck.h"
#include "AliEmcalTriggerDecisionContainer.h"
#include "AliInputEventHandler.h"
#include "AliTrackerBase.h"
#include "AliGenPythiaEventHeader.h"
/// \cond CLASSIMP
ClassImp(AliAnalysisTaskSEDStarEMCALProductionCheck);
/// \endcond
//__________________________________________________________________________
AliAnalysisTaskSEDStarEMCALProductionCheck::AliAnalysisTaskSEDStarEMCALProductionCheck():
AliAnalysisTaskSE(),
fEvents(0),
fAnalysis(0),
fD0Window(0),
fPeakWindow(0),
fUseMCInfo(kFALSE),
fDoSearch(kFALSE),
fOutput(0),
fOutputAll(0),
fOutputPID(0),
fOutputProductionCheck(0),
fNSigma(3),
fCuts(0),
fCEvents(0),
fTrueDiff2(0),
fDeltaMassD1(0),
fCounter(0),
fAODProtection(1),
fDoImpParDstar(kFALSE),
fNImpParBins(400),
fLowerImpPar(-2000.),
fHigherImpPar(2000.),
fNPtBins(0),
fAllhist(0x0),
fPIDhist(0x0),
fDoDStarVsY(kFALSE),
fUseEMCalTrigger(kFALSE),
fTriggerSelectionString(0),
fCheckEMCALAcceptance(kFALSE),
fCheckEMCALAcceptanceNumber(0),
fApplyEMCALClusterEventCut(kFALSE)
{
//
/// Default ctor
//
for (Int_t i = 0; i < 5; i++) fHistMassPtImpParTCDs[i] = 0;
}
//___________________________________________________________________________
AliAnalysisTaskSEDStarEMCALProductionCheck::AliAnalysisTaskSEDStarEMCALProductionCheck(const Char_t* name, AliRDHFCutsDStartoKpipi* cuts) :
AliAnalysisTaskSE(name),
fEvents(0),
fAnalysis(0),
fD0Window(0),
fPeakWindow(0),
fUseMCInfo(kFALSE),
fDoSearch(kFALSE),
fOutput(0),
fOutputAll(0),
fOutputPID(0),
fOutputProductionCheck(0),
fNSigma(3),
fCuts(0),
fCEvents(0),
fTrueDiff2(0),
fDeltaMassD1(0),
fCounter(0),
fAODProtection(1),
fDoImpParDstar(kFALSE),
fNImpParBins(400),
fLowerImpPar(-2000.),
fHigherImpPar(2000.),
fNPtBins(0),
fAllhist(0x0),
fPIDhist(0x0),
fDoDStarVsY(kFALSE),
fUseEMCalTrigger(kFALSE),
fTriggerSelectionString(0),
fCheckEMCALAcceptance(kFALSE),
fCheckEMCALAcceptanceNumber(0),
fApplyEMCALClusterEventCut(kFALSE)
{
//
/// Constructor. Initialization of Inputs and Outputs
//
Info("AliAnalysisTaskSEDStarEMCALProductionCheck", "Calling Constructor");
fCuts = cuts;
for (Int_t i = 0; i < 5; i++) fHistMassPtImpParTCDs[i] = 0;
DefineOutput(1, TList::Class()); //counters
DefineOutput(2, TList::Class()); //All Entries output
DefineOutput(3, TList::Class()); //3sigma PID output
DefineOutput(4, AliRDHFCutsDStartoKpipi::Class()); //My private output
DefineOutput(5, AliNormalizationCounter::Class()); // normalization
DefineOutput(6, TList::Class()); //production check
}
//___________________________________________________________________________
AliAnalysisTaskSEDStarEMCALProductionCheck::~AliAnalysisTaskSEDStarEMCALProductionCheck() {
//
/// destructor
//
Info("~AliAnalysisTaskSEDStarEMCALProductionCheck", "Calling Destructor");
delete fOutput;
delete fOutputAll;
delete fOutputPID;
delete fOutputProductionCheck;
delete fCuts;
delete fCEvents;
delete fDeltaMassD1;
for (Int_t i = 0; i < 5; i++) {
delete fHistMassPtImpParTCDs[i];
}
for (Int_t i = 0; i < ((fNPtBins + 2) * 18); i++) {
delete fAllhist[i];
delete fPIDhist[i];
}
delete [] fAllhist;
delete [] fPIDhist;
}
//_________________________________________________
void AliAnalysisTaskSEDStarEMCALProductionCheck::Init() {
//
/// Initialization
//
if (fDebug > 1) printf("AnalysisTaskSEDStarSpectra::Init() \n");
AliRDHFCutsDStartoKpipi* copyfCuts = new AliRDHFCutsDStartoKpipi(*fCuts);
fNPtBins = fCuts->GetNPtBins();
// Post the data
PostData(4, copyfCuts);
return;
}
//_________________________________________________
void AliAnalysisTaskSEDStarEMCALProductionCheck::UserExec(Option_t *)
{
/// user exec
if (!fInputEvent) {
Error("UserExec", "NO EVENT FOUND!");
return;
}
fCEvents->Fill(0);//all events
if (fAODProtection >= 0) {
// Protection against different number of events in the AOD and deltaAOD
// In case of discrepancy the event is rejected.
Int_t matchingAODdeltaAODlevel = AliRDHFCuts::CheckMatchingAODdeltaAODevents();
if (matchingAODdeltaAODlevel < 0 || (matchingAODdeltaAODlevel == 0 && fAODProtection == 1)) {
// AOD/deltaAOD trees have different number of entries || TProcessID do not match while it was required
fCEvents->Fill(8);
return;
}
fCEvents->Fill(1);
}
fEvents++;
AliAODEvent* aodEvent = dynamic_cast<AliAODEvent*>(fInputEvent);
TClonesArray *arrayDStartoD0pi = 0;
TClonesArray *arrayD0toKpi = 0;
if (!aodEvent && AODEvent() && IsStandardAOD()) {
// In case there is an AOD handler writing a standard AOD, use the AOD
// event in memory rather than the input (ESD) event.
aodEvent = dynamic_cast<AliAODEvent*> (AODEvent());
// in this case the braches in the deltaAOD (AliAOD.VertexingHF.root)
// have to taken from the AOD event hold by the AliAODExtension
AliAODHandler* aodHandler = (AliAODHandler*)
((AliAnalysisManager::GetAnalysisManager())->GetOutputEventHandler());
if (aodHandler->GetExtensions()) {
AliAODExtension *ext = (AliAODExtension*)aodHandler->GetExtensions()->FindObject("AliAOD.VertexingHF.root");
AliAODEvent *aodFromExt = ext->GetAOD();
arrayDStartoD0pi = (TClonesArray*)aodFromExt->GetList()->FindObject("Dstar");
arrayD0toKpi = (TClonesArray*)aodFromExt->GetList()->FindObject("D0toKpi");
}
} else {
arrayDStartoD0pi = (TClonesArray*)aodEvent->GetList()->FindObject("Dstar");
arrayD0toKpi = (TClonesArray*)aodEvent->GetList()->FindObject("D0toKpi");
}
//objects for production check
AliAODMCHeader *mcHeader = nullptr;
AliGenPythiaEventHeader * pythiaHeader = nullptr;
TClonesArray *mcTrackArray = nullptr;
Double_t crossSection = 0.0;
Double_t ptHard = 0.0;
Int_t nTrials = 0;
// Int_t nPtBins = fCuts->GetNPtBins();
// const Int_t nPtBinLimits = nPtBins + 1;
// Int_t PtBinLimits[nPtBinLimits] = fCuts->GetPtBinLimits();
if (fUseMCInfo) {
// load MC header
mcHeader = (AliAODMCHeader*)aodEvent->GetList()->FindObject(AliAODMCHeader::StdBranchName());
if (!mcHeader) {
printf("AliAnalysisTaskSEDStarEMCALProductionCheck::UserExec: MC header branch not found!\n");
return;
}
AliGenPythiaEventHeader * pythiaHeader = (AliGenPythiaEventHeader*)mcHeader->GetCocktailHeader(0);
if (!pythiaHeader) {
printf("AliAnalysisTaskSEDStarEMCALProductionCheck::UserExec: AliGenPythiaEventHeader not found!\n");
return;
}
crossSection = pythiaHeader->GetXsection();
ptHard = pythiaHeader->GetPtHard();
nTrials = pythiaHeader->Trials();
mcTrackArray = dynamic_cast<TClonesArray*>(aodEvent->FindListObject(AliAODMCParticle::StdBranchName()));
if (!mcTrackArray) {std::cout << "no track array" << std::endl; return;};
}
// check before event cut
if (fUseMCInfo) {
for (Int_t j = 0; j < mcTrackArray->GetEntriesFast(); j++) {
AliAODMCParticle *mcTrackParticle = dynamic_cast< AliAODMCParticle*>(mcTrackArray->At(j));
if (!mcTrackParticle) {std::cout << "no particle" << std::endl; continue;}
Int_t pdgCodeMC = TMath::Abs(mcTrackParticle->GetPdgCode());
if (pdgCodeMC == 413)
{ //if the track is a DStar we check if it comes from charm
Double_t ptMC = mcTrackParticle->Pt();
Bool_t fromCharm = kFALSE;
Int_t mother = mcTrackParticle->GetMother();
Int_t istep = 0;
while (mother >= 0 ) {
istep++;
AliAODMCParticle* mcGranma = dynamic_cast<AliAODMCParticle*>(mcTrackArray->At(mother));
if (mcGranma) {
Int_t abspdgGranma = TMath::Abs(mcGranma->GetPdgCode());
if ((abspdgGranma == 4) || (abspdgGranma > 400 && abspdgGranma < 500) || (abspdgGranma > 4000 && abspdgGranma < 5000)) fromCharm = kTRUE;
mother = mcGranma->GetMother();
} else {
printf("AliVertexingHFUtils::IsTrackFromCharm: Failed casting the mother particle!");
break;
}
}
if (fromCharm)
{
Bool_t mcPionDStarPresent = kFALSE;
Bool_t mcPionD0Present = kFALSE;
Bool_t mcKaonPresent = kFALSE;
Int_t nDaughterDStar = mcTrackParticle->GetNDaughters();
if (nDaughterDStar == 2) {
for (Int_t iDaughterDStar = 0; iDaughterDStar < 2; iDaughterDStar++) {
AliAODMCParticle* daughterDStar = (AliAODMCParticle*)mcTrackArray->At(mcTrackParticle->GetDaughterLabel(iDaughterDStar));
if (!daughterDStar) break;
Int_t pdgCodeDaughterDStar = TMath::Abs(daughterDStar->GetPdgCode());
if (pdgCodeDaughterDStar == 211) { //if the track is a pion we save its monte carlo label
mcPionDStarPresent = kTRUE;
} else if (pdgCodeDaughterDStar == 421) { //if the track is a D0 we look at its daughters
Int_t mcLabelD0 = mcTrackParticle->GetDaughterLabel(iDaughterDStar);
Int_t nDaughterD0 = daughterDStar->GetNDaughters();
if (nDaughterD0 == 2) {
for (Int_t iDaughterD0 = 0; iDaughterD0 < 2; iDaughterD0++) {
AliAODMCParticle* daughterD0 = (AliAODMCParticle*)mcTrackArray->At(daughterDStar->GetDaughterLabel(iDaughterD0));
if (!daughterD0) break;
Int_t pdgCodeDaughterD0 = TMath::Abs(daughterD0->GetPdgCode());
if (pdgCodeDaughterD0 == 211) {
mcPionD0Present = kTRUE;
} else if (pdgCodeDaughterD0 == 321) {
mcKaonPresent = kTRUE;
} else break;
}
}
} else break;
}
}
if (mcPionDStarPresent && mcPionD0Present && mcKaonPresent)
{
TString fillthis = "";
fillthis = "DStarPtTruePreEventSelection";
((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(ptMC);
fillthis = "DStarPtTruePreEventSelectionWeighted";
((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(ptMC, crossSection);
// fillthis = "PtHardPreEventSelection";
// ((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(ptHard);
// fillthis = "PtHardWeightedPreEventSelection";
// ((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(ptHard, crossSection);
// fillthis = "WeightsPreEventSelection";
// ((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(crossSection);
// fillthis = "TrialsPreEventSelection";
// ((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->AddBinContent(1,nTrials);
fillthis = "DStar_per_bin_true_PreEventSelection";
((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(ptMC);
fillthis = "DStar_per_bin_true_PreEventSelection_weighted";
((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(ptMC, crossSection);
}
}
}
}
}
TString fillthishist = "";
fillthishist = "PtHardPreEventSelection";
((TH1F*)(fOutputProductionCheck->FindObject(fillthishist)))->Fill(ptHard);
fillthishist = "PtHardWeightedPreEventSelection";
((TH1F*)(fOutputProductionCheck->FindObject(fillthishist)))->Fill(ptHard, crossSection);
fillthishist = "WeightsPreEventSelection";
((TH1F*)(fOutputProductionCheck->FindObject(fillthishist)))->Fill(crossSection);
fillthishist = "TrialsPreEventSelection";
((TH1F*)(fOutputProductionCheck->FindObject(fillthishist)))->AddBinContent(1,nTrials);
// fix for temporary bug in ESDfilter
// the AODs with null vertex pointer didn't pass the PhysSel
if (!aodEvent->GetPrimaryVertex() || TMath::Abs(aodEvent->GetMagneticField()) < 0.001) return;
fCEvents->Fill(2);
fCounter->StoreEvent(aodEvent, fCuts, fUseMCInfo);
// trigger class for PbPb C0SMH-B-NOPF-ALLNOTRD
TString trigclass = aodEvent->GetFiredTriggerClasses();
if (trigclass.Contains("C0SMH-B-NOPF-ALLNOTRD") || trigclass.Contains("C0SMH-B-NOPF-ALL")) fCEvents->Fill(5);
if (!fCuts->IsEventSelected(aodEvent)) {
if (fCuts->GetWhyRejection() == 6) // rejected for Z vertex
fCEvents->Fill(6);
return;
}
// Use simulated EMCal trigger for MC. AliEmcalTriggerMakerTask needs to be run first.
if (fUseEMCalTrigger)
{
auto triggercont = static_cast<PWG::EMCAL::AliEmcalTriggerDecisionContainer*>(fInputEvent->FindListObject("EmcalTriggerDecision"));
if (!triggercont)
{
AliErrorStream() << "Trigger decision container not found in event - not possible to select EMCAL triggers" << std::endl;
return;
}
if (fTriggerSelectionString == "EG1DG1")
{
if (!triggercont->IsEventSelected("EG1") && !triggercont->IsEventSelected("DG1")) return;
} else if (!triggercont->IsEventSelected(fTriggerSelectionString)) return;
}
// Get field for EMCAL acceptance and cut events
AliAnalysisManager *man = AliAnalysisManager::GetAnalysisManager();
AliInputEventHandler* inputHandler = (AliInputEventHandler*) (man->GetInputEventHandler());
inputHandler->SetNeedField();
if (fApplyEMCALClusterEventCut)
{
Int_t numberOfCaloClustersEvent = aodEvent->GetNumberOfCaloClusters();
if (numberOfCaloClustersEvent >= 0)
{
Bool_t passClusterCuts = kFALSE;
for (Int_t iCluster = 0; iCluster < numberOfCaloClustersEvent; ++iCluster)
{
AliAODCaloCluster * trackEMCALCluster = (AliAODCaloCluster*)aodEvent->GetCaloCluster(iCluster);
if (trackEMCALCluster->GetNonLinCorrEnergy() < 9.0) continue;
if (trackEMCALCluster->GetTOF() > 15e-9) continue;
if (trackEMCALCluster->GetTOF() < -20e-9) continue;
if (trackEMCALCluster->GetIsExotic()) continue;
passClusterCuts = kTRUE;
}
if (!passClusterCuts) return;
} else return;
}
Bool_t isEvSel = fCuts->IsEventSelected(aodEvent);
fCEvents->Fill(3);
if (!isEvSel) return;
// Load the event
// AliInfo(Form("Event %d",fEvents));
//if (fEvents%10000 ==0) AliInfo(Form("Event %d",fEvents));
// counters for efficiencies
Int_t icountReco = 0;
//D* and D0 prongs needed to MatchToMC method
Int_t pdgDgDStartoD0pi[2] = {421, 211};
Int_t pdgDgD0toKpi[2] = {321, 211};
// AOD primary vertex
AliAODVertex *vtx1 = (AliAODVertex*)aodEvent->GetPrimaryVertex();
if (!vtx1) return;
if (vtx1->GetNContributors() < 1) return;
fCEvents->Fill(4);
//save cluster information for EMCal trigger selection check
Int_t numberOfCaloClustersEvent = aodEvent->GetNumberOfCaloClusters();
if (numberOfCaloClustersEvent >= 0)
{
for (Int_t iCluster = 0; iCluster < numberOfCaloClustersEvent; ++iCluster)
{
AliAODCaloCluster * trackEMCALCluster = (AliAODCaloCluster*)aodEvent->GetCaloCluster(iCluster);
//save cluster information
Float_t pos[3]={0};
trackEMCALCluster->GetPosition(pos);
TString fillthis = "";
fillthis = "fHistClusPosition";
((TH3F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(pos[0], pos[1], pos[2]);
}
}
if (!arrayDStartoD0pi || !arrayD0toKpi) {
AliInfo("Could not find array of HF vertices, skipping the event");
return;
} else AliDebug(2, Form("Found %d vertices", arrayDStartoD0pi->GetEntriesFast()));
Int_t nSelectedAna = 0;
Int_t nSelectedProd = 0;
// check after event cut
if (fUseMCInfo) {
for (Int_t j = 0; j < mcTrackArray->GetEntriesFast(); j++) {
AliAODMCParticle *mcTrackParticle = dynamic_cast< AliAODMCParticle*>(mcTrackArray->At(j));
if (!mcTrackParticle) {std::cout << "no particle" << std::endl; continue;}
Int_t pdgCodeMC = TMath::Abs(mcTrackParticle->GetPdgCode());
if (pdgCodeMC == 413)
{ //if the track is a DStar we check if it comes from charm
Double_t ptMC = mcTrackParticle->Pt();
Bool_t fromCharm = kFALSE;
Int_t mother = mcTrackParticle->GetMother();
Int_t istep = 0;
while (mother >= 0 ) {
istep++;
AliAODMCParticle* mcGranma = dynamic_cast<AliAODMCParticle*>(mcTrackArray->At(mother));
if (mcGranma) {
Int_t abspdgGranma = TMath::Abs(mcGranma->GetPdgCode());
if ((abspdgGranma == 4) || (abspdgGranma > 400 && abspdgGranma < 500) || (abspdgGranma > 4000 && abspdgGranma < 5000)) fromCharm = kTRUE;
mother = mcGranma->GetMother();
} else {
printf("AliVertexingHFUtils::IsTrackFromCharm: Failed casting the mother particle!");
break;
}
}
if (fromCharm)
{
Bool_t mcPionDStarPresent = kFALSE;
Bool_t mcPionD0Present = kFALSE;
Bool_t mcKaonPresent = kFALSE;
Int_t nDaughterDStar = mcTrackParticle->GetNDaughters();
if (nDaughterDStar == 2) {
for (Int_t iDaughterDStar = 0; iDaughterDStar < 2; iDaughterDStar++) {
AliAODMCParticle* daughterDStar = (AliAODMCParticle*)mcTrackArray->At(mcTrackParticle->GetDaughterLabel(iDaughterDStar));
if (!daughterDStar) break;
Int_t pdgCodeDaughterDStar = TMath::Abs(daughterDStar->GetPdgCode());
if (pdgCodeDaughterDStar == 211) { //if the track is a pion we save its monte carlo label
mcPionDStarPresent = kTRUE;
} else if (pdgCodeDaughterDStar == 421) { //if the track is a D0 we look at its daughters
Int_t mcLabelD0 = mcTrackParticle->GetDaughterLabel(iDaughterDStar);
Int_t nDaughterD0 = daughterDStar->GetNDaughters();
if (nDaughterD0 == 2) {
for (Int_t iDaughterD0 = 0; iDaughterD0 < 2; iDaughterD0++) {
AliAODMCParticle* daughterD0 = (AliAODMCParticle*)mcTrackArray->At(daughterDStar->GetDaughterLabel(iDaughterD0));
if (!daughterD0) break;
Int_t pdgCodeDaughterD0 = TMath::Abs(daughterD0->GetPdgCode());
if (pdgCodeDaughterD0 == 211) {
mcPionD0Present = kTRUE;
} else if (pdgCodeDaughterD0 == 321) {
mcKaonPresent = kTRUE;
} else break;
}
}
} else break;
}
}
if (mcPionDStarPresent && mcPionD0Present && mcKaonPresent)
{
TString fillthis = "";
fillthis = "DStarPtTruePostEventSelection";
((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(ptMC);
fillthis = "DStarPtTruePostEventSelectionWeighted";
((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(ptMC, crossSection);
// fillthis = "PtHardPostEventSelection";
// ((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(ptHard);
// fillthis = "PtHardWeightedPostEventSelection";
// ((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(ptHard, crossSection);
// fillthis = "WeightsPostEventSelection";
// ((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(crossSection);
// fillthis = "TrialsPostEventSelection";
// ((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->AddBinContent(1,nTrials);
fillthis = "DStar_per_bin_true_PostEventSelection";
((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(ptMC);
fillthis = "DStar_per_bin_true_PostEventSelection_weighted";
((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(ptMC, crossSection);
}
}
}
}
}
fillthishist = "";
fillthishist = "PtHardPostEventSelection";
((TH1F*)(fOutputProductionCheck->FindObject(fillthishist)))->Fill(ptHard);
fillthishist = "PtHardWeightedPostEventSelection";
((TH1F*)(fOutputProductionCheck->FindObject(fillthishist)))->Fill(ptHard, crossSection);
fillthishist = "WeightsPostEventSelection";
((TH1F*)(fOutputProductionCheck->FindObject(fillthishist)))->Fill(crossSection);
fillthishist = "TrialsPostEventSelection";
((TH1F*)(fOutputProductionCheck->FindObject(fillthishist)))->AddBinContent(1,nTrials);
// vHF object is needed to call the method that refills the missing info of the candidates
// if they have been deleted in dAOD reconstruction phase
// in order to reduce the size of the file
AliAnalysisVertexingHF *vHF = new AliAnalysisVertexingHF();
// loop over the tracks to search for candidates soft pion
for (Int_t iDStartoD0pi = 0; iDStartoD0pi < arrayDStartoD0pi->GetEntriesFast(); iDStartoD0pi++) {
// D* candidates and D0 from D*
AliAODRecoCascadeHF* dstarD0pi = (AliAODRecoCascadeHF*)arrayDStartoD0pi->At(iDStartoD0pi);
AliAODRecoDecayHF2Prong *trackD0;
if (dstarD0pi->GetIsFilled() < 1) {
trackD0 = (AliAODRecoDecayHF2Prong*)arrayD0toKpi->At(dstarD0pi->GetProngID(1));
} else {
trackD0 = (AliAODRecoDecayHF2Prong*)dstarD0pi->Get2Prong();
}
fCEvents->Fill(10);
TObjArray arrTracks(3);
for (Int_t ipr = 0; ipr < 3; ipr++) {
AliAODTrack *tr;
if (ipr == 0) tr = vHF->GetProng(aodEvent, dstarD0pi, ipr); //soft pion
else tr = vHF->GetProng(aodEvent, trackD0, ipr - 1); //D0 daughters
arrTracks.AddAt(tr, ipr);
}
if (!fCuts->PreSelect(arrTracks)) {
fCEvents->Fill(13);
continue;
}
Bool_t isDStarCand = kTRUE;
if (!(vHF->FillRecoCasc(aodEvent, dstarD0pi, isDStarCand))) { //Fill the data members of the candidate only if they are empty.
fCEvents->Fill(12); //monitor how often this fails
continue;
}
if (!dstarD0pi->GetSecondaryVtx()) continue;
AliAODRecoDecayHF2Prong* theD0particle = (AliAODRecoDecayHF2Prong*)dstarD0pi->Get2Prong();
if (!theD0particle) continue;
Int_t isDStar = 0;
TClonesArray *mcArray = 0;
// AliAODMCHeader *mcHeader = 0;
Bool_t isPrimary = kTRUE;
Float_t pdgCode = -2;
Float_t trueImpParXY = 0.;
// mc analysis
if (fUseMCInfo) {
//MC array need for maching
mcArray = dynamic_cast<TClonesArray*>(aodEvent->FindListObject(AliAODMCParticle::StdBranchName()));
if (!mcArray) {
AliError("Could not find Monte-Carlo in AOD");
return;
}
// load MC header
// mcHeader = (AliAODMCHeader*)aodEvent->GetList()->FindObject(AliAODMCHeader::StdBranchName());
// if (!mcHeader) {
// printf("AliAnalysisTaskSEDplus::UserExec: MC header branch not found!\n");
// return;
// }
// find associated MC particle for D* ->D0toKpi
Int_t mcLabel = dstarD0pi->MatchToMC(413, 421, pdgDgDStartoD0pi, pdgDgD0toKpi, mcArray);
if (mcLabel >= 0) {
AliAODMCParticle *partDSt = (AliAODMCParticle*)mcArray->At(mcLabel);
Int_t checkOrigin = CheckOrigin(mcArray, partDSt);
if (checkOrigin == 5) isPrimary = kFALSE;
AliAODMCParticle *dg0 = (AliAODMCParticle*)mcArray->At(partDSt->GetDaughterLabel(0));
// AliAODMCParticle *dg01 = (AliAODMCParticle*)mcArray->At(dg0->GetDaughterLabel(0));
pdgCode = TMath::Abs(partDSt->GetPdgCode());
if (!isPrimary) {
trueImpParXY = GetTrueImpactParameterD0(mcHeader, mcArray, dg0) * 1000.;
}
isDStar = 1;
} else {
pdgCode = -1;
}
}
if (pdgCode == -1) AliDebug(2, "No particle assigned! check\n");
Double_t Dstarpt = dstarD0pi->Pt();
// quality selction on tracks and region of interest
Int_t isTkSelected = fCuts->IsSelected(dstarD0pi, AliRDHFCuts::kTracks); // quality cuts on tracks
if (!isTkSelected) continue;
if (!fCuts->IsInFiducialAcceptance(dstarD0pi->Pt(), dstarD0pi->YDstar())) continue;
// EMCAL acceptance check
if (fCheckEMCALAcceptance)
{
Int_t numberInAcc = 0;
AliAODTrack *track[3];
for (Int_t iDaught = 0; iDaught < 3; iDaught++) {
track[iDaught] = (AliAODTrack*)arrTracks.At(iDaught);
Int_t numberOfCaloClusters = aodEvent->GetNumberOfCaloClusters();
if (numberOfCaloClusters >= 0)
{
Int_t trackEMCALClusterNumber = track[iDaught]->GetEMCALcluster();
if (!(trackEMCALClusterNumber < 0))
{
AliAODCaloCluster * trackEMCALCluster = (AliAODCaloCluster*)aodEvent->GetCaloCluster(trackEMCALClusterNumber);
if (!trackEMCALCluster) continue;
if (trackEMCALCluster->GetNonLinCorrEnergy() < 9.0) continue;
if (trackEMCALCluster->GetTOF() > 15e-9) continue;
if (trackEMCALCluster->GetTOF() < -20e-9) continue;
if (trackEMCALCluster->GetIsExotic()) continue;
numberInAcc++;
}
}
}
// Cut on number of events in EMCAL acceptance
if (numberInAcc < fCheckEMCALAcceptanceNumber) continue;
}
//histos for impact par studies - D0!!!
Double_t ptCand = dstarD0pi->Get2Prong()->Pt();
Double_t invMass = dstarD0pi->InvMassD0();
Double_t impparXY = dstarD0pi->Get2Prong()->ImpParXY() * 10000.;
Double_t arrayForSparse[3] = {invMass, ptCand, impparXY};
Double_t arrayForSparseTrue[3] = {invMass, ptCand, trueImpParXY};
// set the D0 and D* search window bin by bin - D* window useful to speed up the reconstruction and D0 window used *ONLY* to calculate side band bkg for the background subtraction methods, for the standard analysis the value in the cut file is considered
if (0 <= Dstarpt && Dstarpt < 0.5) {
if (fAnalysis == 1) {
fD0Window = 0.035;
fPeakWindow = 0.03;
} else {
fD0Window = 0.020;
fPeakWindow = 0.0018;
}
}
if (0.5 <= Dstarpt && Dstarpt < 1.0) {
if (fAnalysis == 1) {
fD0Window = 0.035;
fPeakWindow = 0.03;
} else {
fD0Window = 0.020;
fPeakWindow = 0.0018;
}
}
if (1.0 <= Dstarpt && Dstarpt < 2.0) {
if (fAnalysis == 1) {
fD0Window = 0.035;
fPeakWindow = 0.03;
} else {
fD0Window = 0.020;
fPeakWindow = 0.0018;
}
}
if (2.0 <= Dstarpt && Dstarpt < 3.0) {
if (fAnalysis == 1) {
fD0Window = 0.035;
fPeakWindow = 0.03;
} else {
fD0Window = 0.022;
fPeakWindow = 0.0016;
}
}
if (3.0 <= Dstarpt && Dstarpt < 4.0) {
if (fAnalysis == 1) {
fD0Window = 0.035;
fPeakWindow = 0.03;
} else {
fD0Window = 0.026;
fPeakWindow = 0.0014;
}
}
if (4.0 <= Dstarpt && Dstarpt < 5.0) {
if (fAnalysis == 1) {
fD0Window = 0.045;
fPeakWindow = 0.03;
} else {
fD0Window = 0.026;
fPeakWindow = 0.0014;
}
}
if (5.0 <= Dstarpt && Dstarpt < 6.0) {
if (fAnalysis == 1) {
fD0Window = 0.045;
fPeakWindow = 0.03;
} else {
fD0Window = 0.026;
fPeakWindow = 0.006;
}
}
if (6.0 <= Dstarpt && Dstarpt < 7.0) {
if (fAnalysis == 1) {
fD0Window = 0.055;
fPeakWindow = 0.03;
} else {
fD0Window = 0.026;
fPeakWindow = 0.006;
}
}
if (Dstarpt >= 7.0) {
if (fAnalysis == 1) {
fD0Window = 0.074;
fPeakWindow = 0.03;
} else {
fD0Window = 0.026;
fPeakWindow = 0.006;
}
}
nSelectedProd++;
nSelectedAna++;
// check that we are close to signal in the DeltaM - here to save time for PbPb
Double_t mPDGD0 = TDatabasePDG::Instance()->GetParticle(421)->Mass();
Double_t mPDGDstar = TDatabasePDG::Instance()->GetParticle(413)->Mass();
Double_t invmassDelta = dstarD0pi->DeltaInvMass();
if (TMath::Abs(invmassDelta - (mPDGDstar - mPDGD0)) > fPeakWindow) continue;
Int_t isSelected = fCuts->IsSelected(dstarD0pi, AliRDHFCuts::kCandidate, aodEvent); //selected
if (isSelected > 0) fCEvents->Fill(11);
// after cuts
if (fDoImpParDstar && isSelected) {
fHistMassPtImpParTCDs[0]->Fill(arrayForSparse);
if (isPrimary) fHistMassPtImpParTCDs[1]->Fill(arrayForSparse);
else {
fHistMassPtImpParTCDs[2]->Fill(arrayForSparse);
fHistMassPtImpParTCDs[3]->Fill(arrayForSparseTrue);
}
}
if (fDoDStarVsY && isSelected) {
((TH3F*) (fOutputPID->FindObject("deltamassVsyVsPt")))->Fill(dstarD0pi->DeltaInvMass(), dstarD0pi->YDstar(), dstarD0pi->Pt() );
}
// check after cuts
if (fUseMCInfo) {
Int_t mcLabel = dstarD0pi->MatchToMC(413, 421, pdgDgDStartoD0pi, pdgDgD0toKpi, mcTrackArray);
if (mcLabel >= 0) {
AliAODMCParticle *mcTrackParticle = dynamic_cast< AliAODMCParticle*>(mcTrackArray->At(mcLabel));
if (!mcTrackParticle) {std::cout << "no particle" << std::endl; continue;}
Int_t pdgCodeMC = TMath::Abs(mcTrackParticle->GetPdgCode());
if (pdgCodeMC == 413)
{ //if the track is a DStar we check if it comes from charm
Double_t ptMC = mcTrackParticle->Pt();
Bool_t fromCharm = kFALSE;
Int_t mother = mcTrackParticle->GetMother();
Int_t istep = 0;
while (mother >= 0 ) {
istep++;
AliAODMCParticle* mcGranma = dynamic_cast<AliAODMCParticle*>(mcTrackArray->At(mother));
if (mcGranma) {
Int_t abspdgGranma = TMath::Abs(mcGranma->GetPdgCode());
if ((abspdgGranma == 4) || (abspdgGranma > 400 && abspdgGranma < 500) || (abspdgGranma > 4000 && abspdgGranma < 5000)) fromCharm = kTRUE;
mother = mcGranma->GetMother();
} else {
printf("AliVertexingHFUtils::IsTrackFromCharm: Failed casting the mother particle!");
break;
}
}
if (fromCharm)
{
Bool_t mcPionDStarPresent = kFALSE;
Bool_t mcPionD0Present = kFALSE;
Bool_t mcKaonPresent = kFALSE;
Int_t nDaughterDStar = mcTrackParticle->GetNDaughters();
if (nDaughterDStar == 2) {
for (Int_t iDaughterDStar = 0; iDaughterDStar < 2; iDaughterDStar++) {
AliAODMCParticle* daughterDStar = (AliAODMCParticle*)mcTrackArray->At(mcTrackParticle->GetDaughterLabel(iDaughterDStar));
if (!daughterDStar) break;
Int_t pdgCodeDaughterDStar = TMath::Abs(daughterDStar->GetPdgCode());
if (pdgCodeDaughterDStar == 211) { //if the track is a pion we save its monte carlo label
mcPionDStarPresent = kTRUE;
} else if (pdgCodeDaughterDStar == 421) { //if the track is a D0 we look at its daughters
Int_t mcLabelD0 = mcTrackParticle->GetDaughterLabel(iDaughterDStar);
Int_t nDaughterD0 = daughterDStar->GetNDaughters();
if (nDaughterD0 == 2) {
for (Int_t iDaughterD0 = 0; iDaughterD0 < 2; iDaughterD0++) {
AliAODMCParticle* daughterD0 = (AliAODMCParticle*)mcTrackArray->At(daughterDStar->GetDaughterLabel(iDaughterD0));
if (!daughterD0) break;
Int_t pdgCodeDaughterD0 = TMath::Abs(daughterD0->GetPdgCode());
if (pdgCodeDaughterD0 == 211) {
mcPionD0Present = kTRUE;
} else if (pdgCodeDaughterD0 == 321) {
mcKaonPresent = kTRUE;
} else break;
}
}
} else break;
}
}
if (mcPionDStarPresent && mcPionD0Present && mcKaonPresent)
{
TString fillthis = "";
fillthis = "DStarPtTruePostCuts";
((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(ptMC);
fillthis = "DStarPtTruePostCutsWeighted";
((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(ptMC, crossSection);
// fillthis = "PtHardPostCuts";
// ((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(ptHard);
// fillthis = "PtHardWeightedPostCuts";
// ((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(ptHard, crossSection);
// fillthis = "WeightsPostCuts";
// ((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(crossSection);
// fillthis = "TrialsPostCuts";
// ((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->AddBinContent(1,nTrials);
fillthis = "DStarPtPostCuts";
((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(Dstarpt);
fillthis = "DStarPtPostCutsWeighted";
((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(Dstarpt, crossSection);
fillthis = "DStar_per_bin_true_PostCuts";
((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(ptMC);
fillthis = "DStar_per_bin_true_PostCuts_weighted";
((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(ptMC, crossSection);
fillthis = "DStar_per_bin_PostCuts";
((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(Dstarpt);
fillthis = "DStar_per_bin_PostCuts_weighted";
((TH1F*)(fOutputProductionCheck->FindObject(fillthis)))->Fill(Dstarpt, crossSection);
}
}
}
}
}
fillthishist = "";
fillthishist = "PtHardPostCuts";
((TH1F*)(fOutputProductionCheck->FindObject(fillthishist)))->Fill(ptHard);
fillthishist = "PtHardWeightedPostCuts";
((TH1F*)(fOutputProductionCheck->FindObject(fillthishist)))->Fill(ptHard, crossSection);
fillthishist = "WeightsPostCuts";
((TH1F*)(fOutputProductionCheck->FindObject(fillthishist)))->Fill(crossSection);
fillthishist = "TrialsPostCuts";
((TH1F*)(fOutputProductionCheck->FindObject(fillthishist)))->AddBinContent(1,nTrials);
// fill PID
FillSpectrum(dstarD0pi, isDStar, fCuts, isSelected, fOutputPID, fPIDhist);
SideBandBackground(dstarD0pi, fCuts, isSelected, fOutputPID, fPIDhist);
//WrongSignForDStar(dstarD0pi,fCuts,fOutputPID);
//swich off the PID selection
fCuts->SetUsePID(kFALSE);
Int_t isSelectedNoPID = fCuts->IsSelected(dstarD0pi, AliRDHFCuts::kCandidate, aodEvent); //selected
fCuts->SetUsePID(kTRUE);
FillSpectrum(dstarD0pi, isDStar, fCuts, isSelectedNoPID, fOutputAll, fAllhist);
// SideBandBackground(dstarD0pi,fCuts,isSelectedNoPID, fOutputAll);
// rare D search ------
if (fDoSearch) {
TLorentzVector lorentzTrack1(0, 0, 0, 0); // lorentz 4 vector
TLorentzVector lorentzTrack2(0, 0, 0, 0); // lorentz 4 vector
for (Int_t i = 0; i < aodEvent->GetNumberOfTracks(); i++) {
AliAODTrack* aodTrack = dynamic_cast<AliAODTrack*>(aodEvent->GetTrack(i));
if (!aodTrack) AliFatal("Not a standard AOD");
if (dstarD0pi->Charge() == aodTrack->Charge()) continue;
if ((!(aodTrack->GetStatus()&AliESDtrack::kITSrefit) || (!(aodTrack->GetStatus()&AliESDtrack::kTPCrefit)))) continue;
if (TMath::Abs(invmassDelta - (mPDGDstar - mPDGD0)) > 0.02) continue;
//build the D1 mass
Double_t mass = TDatabasePDG::Instance()->GetParticle(211)->Mass();
lorentzTrack1.SetPxPyPzE( dstarD0pi->Px(), dstarD0pi->Py(), dstarD0pi->Pz(), dstarD0pi->E(413) );
lorentzTrack2.SetPxPyPzE( aodTrack->Px(), aodTrack->Py(), aodTrack->Pz(), aodTrack->E(mass) );
//D1 mass
Double_t d1mass = ((lorentzTrack1 + lorentzTrack2).M());
//mass difference - at 0.4117 and 0.4566
fDeltaMassD1->Fill(d1mass - dstarD0pi->InvMassDstarKpipi());
}
}
if (isDStar == 1) {
fTrueDiff2->Fill(dstarD0pi->Pt(), dstarD0pi->DeltaInvMass());
}
}
fCounter->StoreCandidates(aodEvent, nSelectedProd, kTRUE);
fCounter->StoreCandidates(aodEvent, nSelectedAna, kFALSE);
delete vHF;
AliDebug(2, Form("Found %i Reco particles that are D*!!", icountReco));
PostData(1, fOutput);
PostData(2, fOutputAll);
PostData(3, fOutputPID);
PostData(5, fCounter);
PostData(6, fOutputProductionCheck);
}
//________________________________________ terminate ___________________________
void AliAnalysisTaskSEDStarEMCALProductionCheck::Terminate(Option_t*)
{
/// The Terminate() function is the last function to be called during
/// a query. It always runs on the client, it can be used to present
/// the results graphically or save the results to file.
//Info("Terminate","");
AliAnalysisTaskSE::Terminate();
fOutput = dynamic_cast<TList*> (GetOutputData(1));
if (!fOutput) {
printf("ERROR: fOutput not available\n");
return;
}
fCEvents = dynamic_cast<TH1F*>(fOutput->FindObject("fCEvents"));
fDeltaMassD1 = dynamic_cast<TH1F*>(fOutput->FindObject("fDeltaMassD1"));
fTrueDiff2 = dynamic_cast<TH2F*>(fOutput->FindObject("fTrueDiff2"));
fOutputAll = dynamic_cast<TList*> (GetOutputData(1));
if (!fOutputAll) {
printf("ERROR: fOutputAll not available\n");
return;
}
fOutputPID = dynamic_cast<TList*> (GetOutputData(2));
if (!fOutputPID) {
printf("ERROR: fOutputPID not available\n");
return;
}
fOutputProductionCheck = dynamic_cast<TList*> (GetOutputData(6));
if (!fOutputProductionCheck) {
printf("ERROR: fOutputProductionCheck not available\n");
return;
}
return;
}
//___________________________________________________________________________
void AliAnalysisTaskSEDStarEMCALProductionCheck::UserCreateOutputObjects() {
/// output
Info("UserCreateOutputObjects", "CreateOutputObjects of task %s\n", GetName());
//slot #1
//OpenFile(1);
fOutput = new TList();
fOutput->SetOwner();
fOutput->SetName("chist0");
fOutputAll = new TList();
fOutputAll->SetOwner();
fOutputAll->SetName("listAll");
fOutputPID = new TList();
fOutputPID->SetOwner();
fOutputPID->SetName("listPID");
fOutputProductionCheck = new TList();
fOutputProductionCheck->SetOwner();
fOutputProductionCheck->SetName("listPID");
// define histograms
DefineHistograms();
//Counter for Normalization
fCounter = new AliNormalizationCounter(Form("%s", GetOutputSlot(5)->GetContainer()->GetName()));
fCounter->Init();
if (fDoImpParDstar) CreateImpactParameterHistos();
PostData(1, fOutput);
PostData(2, fOutputAll);
PostData(3, fOutputPID);
PostData(5, fCounter);
PostData(6, fOutputProductionCheck);
return;
}
//___________________________________ hiostograms _______________________________________
void AliAnalysisTaskSEDStarEMCALProductionCheck::DefineHistograms() {
/// Create histograms
fCEvents = new TH1F("fCEvents", "counter", 14, 0, 14);
fCEvents->SetStats(kTRUE);
fCEvents->GetXaxis()->SetTitle("1");
fCEvents->GetYaxis()->SetTitle("counts");
fCEvents->GetXaxis()->SetBinLabel(1, "nEventsRead");
fCEvents->GetXaxis()->SetBinLabel(2, "nEvents Matched dAOD");
fCEvents->GetXaxis()->SetBinLabel(3, "good prim vtx and B field");
fCEvents->GetXaxis()->SetBinLabel(4, "no event selected");
fCEvents->GetXaxis()->SetBinLabel(5, "no vtx contributors");
fCEvents->GetXaxis()->SetBinLabel(6, "trigger for PbPb");
fCEvents->GetXaxis()->SetBinLabel(7, "no z vtx");
fCEvents->GetXaxis()->SetBinLabel(9, "nEvents Mismatched dAOD");
fCEvents->GetXaxis()->SetBinLabel(11, "no. of cascade candidates");
fCEvents->GetXaxis()->SetBinLabel(12, "no. of Dstar after selection cuts");
fCEvents->GetXaxis()->SetBinLabel(13, "no. of not on-the-fly rec Dstar");
fCEvents->GetXaxis()->SetBinLabel(14, "no. of Dstar rejected by preselect"); //toadd
fOutput->Add(fCEvents);
fTrueDiff2 = new TH2F("DiffDstar_pt", "True Reco diff vs pt", 200, 0, 15, 900, 0, 0.3);
fOutput->Add(fTrueDiff2);
fDeltaMassD1 = new TH1F("DeltaMassD1", "delta mass d1", 600, 0, 0.8);
fOutput->Add(fDeltaMassD1);
//temp a
fAllhist = new TH1F*[(fNPtBins + 2) * 18];
fPIDhist = new TH1F*[(fNPtBins + 2) * 18];
TString nameMass = " ", nameSgn = " ", nameBkg = " ";
for (Int_t i = -2; i < fNPtBins; i++) {
nameMass = "histDeltaMass_";
nameMass += i + 1;
nameSgn = "histDeltaSgn_";
nameSgn += i + 1;
nameBkg = "histDeltaBkg_";
nameBkg += i + 1;
if (i == -2) {
nameMass = "histDeltaMass";
nameSgn = "histDeltaSgn";
nameBkg = "histDeltaBkg";
}
TH1F* spectrumMass = new TH1F(nameMass.Data(), "D^{*}-D^{0} invariant mass; #DeltaM [GeV/c^{2}]; Entries", 700, 0.13, 0.2);
TH1F* spectrumSgn = new TH1F(nameSgn.Data(), "D^{*}-D^{0} Signal invariant mass - MC; #DeltaM [GeV/c^{2}]; Entries", 700, 0.13, 0.2);
TH1F* spectrumBkg = new TH1F(nameBkg.Data(), "D^{*}-D^{0} Background invariant mass - MC; #DeltaM [GeV/c^{2}]; Entries", 700, 0.13, 0.2);
nameMass = "histD0Mass_";
nameMass += i + 1;
nameSgn = "histD0Sgn_";
nameSgn += i + 1;
nameBkg = "histD0Bkg_";
nameBkg += i + 1;
if (i == -2) {
nameMass = "histD0Mass";
nameSgn = "histD0Sgn";
nameBkg = "histD0Bkg";
}
TH1F* spectrumD0Mass = new TH1F(nameMass.Data(), "D^{0} invariant mass; M(D^{0}) [GeV/c^{2}]; Entries", 200, 1.75, 1.95);
TH1F* spectrumD0Sgn = new TH1F(nameSgn.Data(), "D^{0} Signal invariant mass - MC; M(D^{0}) [GeV/c^{2}]; Entries", 200, 1.75, 1.95);
TH1F* spectrumD0Bkg = new TH1F(nameBkg.Data(), "D^{0} Background invariant mass - MC; M(D^{0}) [GeV/c^{2}]; Entries", 200, 1.75, 1.95);
nameMass = "histDstarMass_";
nameMass += i + 1;
nameSgn = "histDstarSgn_";
nameSgn += i + 1;
nameBkg = "histDstarBkg_";
nameBkg += i + 1;
if (i == -2) {
nameMass = "histDstarMass";
nameSgn = "histDstarSgn";
nameBkg = "histDstarBkg";
}
TH1F* spectrumDstarMass = new TH1F(nameMass.Data(), "D^{*} invariant mass; M(D^{*}) [GeV/c^{2}]; Entries", 200, 1.9, 2.1);
TH1F* spectrumDstarSgn = new TH1F(nameSgn.Data(), "D^{*} Signal invariant mass - MC; M(D^{*}) [GeV/c^{2}]; Entries", 200, 1.9, 2.1);
TH1F* spectrumDstarBkg = new TH1F(nameBkg.Data(), "D^{*} Background invariant mass - MC; M(D^{*}) [GeV/c^{2}]; Entries", 200, 1.9, 2.1);
nameMass = "histSideBandMass_";
nameMass += i + 1;
if (i == -2) {
nameMass = "histSideBandMass";
}
TH1F* spectrumSideBandMass = new TH1F(nameMass.Data(), "D^{*}-D^{0} sideband mass; M(D^{*}) [GeV/c^{2}]; Entries", 200, 0.1, 0.2);
nameMass = "histWrongSignMass_";
nameMass += i + 1;
if (i == -2) {
nameMass = "histWrongSignMass";
}
TH1F* spectrumWrongSignMass = new TH1F(nameMass.Data(), "D^{*}-D^{0} wrongsign mass; M(D^{*}) [GeV/c^{2}]; Entries", 200, 0.1, 0.2);
spectrumMass->Sumw2();
spectrumSgn->Sumw2();
spectrumBkg->Sumw2();
spectrumMass->SetLineColor(6);
spectrumSgn->SetLineColor(2);
spectrumBkg->SetLineColor(4);
spectrumMass->SetMarkerStyle(20);
spectrumSgn->SetMarkerStyle(20);
spectrumBkg->SetMarkerStyle(20);
spectrumMass->SetMarkerSize(0.6);
spectrumSgn->SetMarkerSize(0.6);
spectrumBkg->SetMarkerSize(0.6);
spectrumMass->SetMarkerColor(6);
spectrumSgn->SetMarkerColor(2);
spectrumBkg->SetMarkerColor(4);
spectrumD0Mass->Sumw2();
spectrumD0Sgn->Sumw2();
spectrumD0Bkg->Sumw2();
spectrumD0Mass->SetLineColor(6);
spectrumD0Sgn->SetLineColor(2);
spectrumD0Bkg->SetLineColor(4);
spectrumD0Mass->SetMarkerStyle(20);
spectrumD0Sgn->SetMarkerStyle(20);
spectrumD0Bkg->SetMarkerStyle(20);
spectrumD0Mass->SetMarkerSize(0.6);
spectrumD0Sgn->SetMarkerSize(0.6);
spectrumD0Bkg->SetMarkerSize(0.6);
spectrumD0Mass->SetMarkerColor(6);
spectrumD0Sgn->SetMarkerColor(2);
spectrumD0Bkg->SetMarkerColor(4);
spectrumDstarMass->Sumw2();
spectrumDstarSgn->Sumw2();
spectrumDstarBkg->Sumw2();
spectrumDstarMass->SetLineColor(6);
spectrumDstarSgn->SetLineColor(2);
spectrumDstarBkg->SetLineColor(4);
spectrumDstarMass->SetMarkerStyle(20);
spectrumDstarSgn->SetMarkerStyle(20);
spectrumDstarBkg->SetMarkerStyle(20);
spectrumDstarMass->SetMarkerSize(0.6);
spectrumDstarSgn->SetMarkerSize(0.6);
spectrumDstarBkg->SetMarkerSize(0.6);
spectrumDstarMass->SetMarkerColor(6);
spectrumDstarSgn->SetMarkerColor(2);
spectrumDstarBkg->SetMarkerColor(4);
spectrumSideBandMass->Sumw2();
spectrumSideBandMass->SetLineColor(4);
spectrumSideBandMass->SetMarkerStyle(20);
spectrumSideBandMass->SetMarkerSize(0.6);
spectrumSideBandMass->SetMarkerColor(4);
spectrumWrongSignMass->Sumw2();
spectrumWrongSignMass->SetLineColor(4);
spectrumWrongSignMass->SetMarkerStyle(20);
spectrumWrongSignMass->SetMarkerSize(0.6);
spectrumWrongSignMass->SetMarkerColor(4);
TH1F* allMass = (TH1F*)spectrumMass->Clone();
TH1F* allSgn = (TH1F*)spectrumSgn->Clone();
TH1F* allBkg = (TH1F*)spectrumBkg->Clone();
TH1F* pidMass = (TH1F*)spectrumMass->Clone();
TH1F* pidSgn = (TH1F*)spectrumSgn->Clone();
TH1F* pidBkg = (TH1F*)spectrumBkg->Clone();
fOutputAll->Add(allMass);
fOutputAll->Add(allSgn);
fOutputAll->Add(allBkg);
fAllhist[i + 2 + ((fNPtBins + 2)*kDeltaMass)] = allMass;
fAllhist[i + 2 + ((fNPtBins + 2)*kDeltaSgn)] = allSgn;
fAllhist[i + 2 + ((fNPtBins + 2)*kDeltaBkg)] = allBkg;
fOutputPID->Add(pidMass);
fOutputPID->Add(pidSgn);
fOutputPID->Add(pidBkg);
fPIDhist[i + 2 + ((fNPtBins + 2)*kDeltaMass)] = pidMass;
fPIDhist[i + 2 + ((fNPtBins + 2)*kDeltaSgn)] = pidSgn;
fPIDhist[i + 2 + ((fNPtBins + 2)*kDeltaBkg)] = pidBkg;
TH1F* allD0Mass = (TH1F*)spectrumD0Mass->Clone();
TH1F* allD0Sgn = (TH1F*)spectrumD0Sgn->Clone();
TH1F* allD0Bkg = (TH1F*)spectrumD0Bkg->Clone();
TH1F* pidD0Mass = (TH1F*)spectrumD0Mass->Clone();
TH1F* pidD0Sgn = (TH1F*)spectrumD0Sgn->Clone();
TH1F* pidD0Bkg = (TH1F*)spectrumD0Bkg->Clone();
fOutputAll->Add(allD0Mass);
fOutputAll->Add(allD0Sgn);
fOutputAll->Add(allD0Bkg);
fAllhist[i + 2 + ((fNPtBins + 2)*kDzMass)] = allD0Mass;
fAllhist[i + 2 + ((fNPtBins + 2)*kDzSgn)] = allD0Sgn;
fAllhist[i + 2 + ((fNPtBins + 2)*kDzBkg)] = allD0Bkg;
fOutputPID->Add(pidD0Mass);
fOutputPID->Add(pidD0Sgn);
fOutputPID->Add(pidD0Bkg);
fPIDhist[i + 2 + ((fNPtBins + 2)*kDzMass)] = pidD0Mass;
fPIDhist[i + 2 + ((fNPtBins + 2)*kDzSgn)] = pidD0Sgn;
fPIDhist[i + 2 + ((fNPtBins + 2)*kDzBkg)] = pidD0Bkg;
TH1F* allDstarMass = (TH1F*)spectrumDstarMass->Clone();
TH1F* allDstarSgn = (TH1F*)spectrumDstarSgn->Clone();
TH1F* allDstarBkg = (TH1F*)spectrumDstarBkg->Clone();
TH1F* pidDstarMass = (TH1F*)spectrumDstarMass->Clone();
TH1F* pidDstarSgn = (TH1F*)spectrumDstarSgn->Clone();
TH1F* pidDstarBkg = (TH1F*)spectrumDstarBkg->Clone();
fOutputAll->Add(allDstarMass);
fOutputAll->Add(allDstarSgn);
fOutputAll->Add(allDstarBkg);
fAllhist[i + 2 + ((fNPtBins + 2)*kDstarMass)] = allDstarMass;
fAllhist[i + 2 + ((fNPtBins + 2)*kDstarSgn)] = allDstarSgn;
fAllhist[i + 2 + ((fNPtBins + 2)*kDstarBkg)] = allDstarBkg;
fOutputPID->Add(pidDstarMass);
fOutputPID->Add(pidDstarSgn);
fOutputPID->Add(pidDstarBkg);
fPIDhist[i + 2 + ((fNPtBins + 2)*kDstarMass)] = pidDstarMass;
fPIDhist[i + 2 + ((fNPtBins + 2)*kDstarSgn)] = pidDstarSgn;
fPIDhist[i + 2 + ((fNPtBins + 2)*kDstarBkg)] = pidDstarBkg;
TH1F* allSideBandMass = (TH1F*)spectrumSideBandMass->Clone();
TH1F* pidSideBandMass = (TH1F*)spectrumSideBandMass->Clone();
fOutputAll->Add(allSideBandMass);
fOutputPID->Add(pidSideBandMass);
fAllhist[i + 2 + ((fNPtBins + 2)*kSideBandMass)] = allSideBandMass;
fPIDhist[i + 2 + ((fNPtBins + 2)*kSideBandMass)] = pidSideBandMass;
TH1F* allWrongSignMass = (TH1F*)spectrumWrongSignMass->Clone();
TH1F* pidWrongSignMass = (TH1F*)spectrumWrongSignMass->Clone();
fOutputAll->Add(allWrongSignMass);
fOutputPID->Add(pidWrongSignMass);
fAllhist[i + 2 + ((fNPtBins + 2)*kWrongSignMass)] = allWrongSignMass;
fPIDhist[i + 2 + ((fNPtBins + 2)*kWrongSignMass)] = pidWrongSignMass;
}
// pt spectra
nameMass = "ptMass";
nameSgn = "ptSgn";
nameBkg = "ptBkg";
TH1F* ptspectrumMass = new TH1F(nameMass.Data(), "D^{*} p_{T}; p_{T} [GeV]; Entries", 400, 0, 50);
TH1F* ptspectrumSgn = new TH1F(nameSgn.Data(), "D^{*} Signal p_{T} - MC; p_{T} [GeV]; Entries", 400, 0, 50);
TH1F* ptspectrumBkg = new TH1F(nameBkg.Data(), "D^{*} Background p_{T} - MC; p_{T} [GeV]; Entries", 400, 0, 50);
ptspectrumMass->Sumw2();
ptspectrumSgn->Sumw2();
ptspectrumBkg->Sumw2();
ptspectrumMass->SetLineColor(6);
ptspectrumSgn->SetLineColor(2);
ptspectrumBkg->SetLineColor(4);
ptspectrumMass->SetMarkerStyle(20);
ptspectrumSgn->SetMarkerStyle(20);
ptspectrumBkg->SetMarkerStyle(20);
ptspectrumMass->SetMarkerSize(0.6);
ptspectrumSgn->SetMarkerSize(0.6);
ptspectrumBkg->SetMarkerSize(0.6);
ptspectrumMass->SetMarkerColor(6);
ptspectrumSgn->SetMarkerColor(2);
ptspectrumBkg->SetMarkerColor(4);
TH1F* ptallMass = (TH1F*)ptspectrumMass->Clone();
TH1F* ptallSgn = (TH1F*)ptspectrumSgn->Clone();
TH1F* ptallBkg = (TH1F*)ptspectrumBkg->Clone();
TH1F* ptpidMass = (TH1F*)ptspectrumMass->Clone();
TH1F* ptpidSgn = (TH1F*)ptspectrumSgn->Clone();
TH1F* ptpidBkg = (TH1F*)ptspectrumBkg->Clone();
fOutputAll->Add(ptallMass);
fOutputAll->Add(ptallSgn);
fOutputAll->Add(ptallBkg);
fAllhist[((fNPtBins + 2)*kptMass)] = ptallMass;
fAllhist[((fNPtBins + 2)*kptSgn)] = ptallSgn;
fAllhist[((fNPtBins + 2)*kptBkg)] = ptallBkg;
fOutputPID->Add(ptpidMass);
fOutputPID->Add(ptpidSgn);
fOutputPID->Add(ptpidBkg);
fPIDhist[(fNPtBins + 2)*kptMass] = ptpidMass;
fPIDhist[(fNPtBins + 2)*kptSgn] = ptpidSgn;
fPIDhist[(fNPtBins + 2)*kptBkg] = ptpidBkg;
// eta spectra
nameMass = "etaMass";
nameSgn = "etaSgn";
nameBkg = "etaBkg";
TH1F* etaspectrumMass = new TH1F(nameMass.Data(), "D^{*} #eta; #eta; Entries", 200, -1, 1);
TH1F* etaspectrumSgn = new TH1F(nameSgn.Data(), "D^{*} Signal #eta - MC; #eta; Entries", 200, -1, 1);
TH1F* etaspectrumBkg = new TH1F(nameBkg.Data(), "D^{*} Background #eta - MC; #eta; Entries", 200, -1, 1);
etaspectrumMass->Sumw2();
etaspectrumSgn->Sumw2();
etaspectrumBkg->Sumw2();
etaspectrumMass->SetLineColor(6);
etaspectrumSgn->SetLineColor(2);
etaspectrumBkg->SetLineColor(4);
etaspectrumMass->SetMarkerStyle(20);
etaspectrumSgn->SetMarkerStyle(20);
etaspectrumBkg->SetMarkerStyle(20);
etaspectrumMass->SetMarkerSize(0.6);
etaspectrumSgn->SetMarkerSize(0.6);
etaspectrumBkg->SetMarkerSize(0.6);
etaspectrumMass->SetMarkerColor(6);
etaspectrumSgn->SetMarkerColor(2);
etaspectrumBkg->SetMarkerColor(4);
TH1F* etaallMass = (TH1F*)etaspectrumMass->Clone();
TH1F* etaallSgn = (TH1F*)etaspectrumSgn->Clone();
TH1F* etaallBkg = (TH1F*)etaspectrumBkg->Clone();
TH1F* etapidMass = (TH1F*)etaspectrumMass->Clone();
TH1F* etapidSgn = (TH1F*)etaspectrumSgn->Clone();
TH1F* etapidBkg = (TH1F*)etaspectrumBkg->Clone();
fOutputAll->Add(etaallMass);
fOutputAll->Add(etaallSgn);
fOutputAll->Add(etaallBkg);
fAllhist[(fNPtBins + 2)*ketaMass] = etaallMass;
fAllhist[(fNPtBins + 2)*ketaSgn] = etaallSgn;
fAllhist[(fNPtBins + 2)*ketaBkg] = etaallBkg;
fOutputPID->Add(etapidMass);
fOutputPID->Add(etapidSgn);
fOutputPID->Add(etapidBkg);
fPIDhist[(fNPtBins + 2)*ketaMass] = etapidMass;
fPIDhist[(fNPtBins + 2)*ketaSgn] = etapidSgn;
fPIDhist[(fNPtBins + 2)*ketaBkg] = etapidBkg;
if (fDoDStarVsY) {
TH3F* deltamassVsyVsPtPID = new TH3F("deltamassVsyVsPt", "delta mass Vs y Vs pT; #DeltaM [GeV/c^{2}]; y; p_{T} [GeV/c]", 700, 0.13, 0.2, 40, -1, 1, 36, 0., 36.);
fOutputPID->Add(deltamassVsyVsPtPID);
}
TString name_DStarPtTruePreEventSelection = "DStarPtTruePreEventSelection";
TH1F* hist_DStarPtTruePreEventSelection = new TH1F(name_DStarPtTruePreEventSelection.Data(), "DStarPtTruePreEventSelection; p_{T} [GeV/c]; Entries", 5000, 0, 1000);
hist_DStarPtTruePreEventSelection->Sumw2();
hist_DStarPtTruePreEventSelection->SetLineColor(6);
hist_DStarPtTruePreEventSelection->SetMarkerStyle(20);
hist_DStarPtTruePreEventSelection->SetMarkerSize(0.6);
hist_DStarPtTruePreEventSelection->SetMarkerColor(6);
TH1F* histogram_DStarPtTruePreEventSelection = (TH1F*)hist_DStarPtTruePreEventSelection->Clone();
fOutputProductionCheck->Add(histogram_DStarPtTruePreEventSelection);
TString name_DStarPtTruePreEventSelectionWeighted = "DStarPtTruePreEventSelectionWeighted";
TH1F* hist_DStarPtTruePreEventSelectionWeighted = new TH1F(name_DStarPtTruePreEventSelectionWeighted.Data(), "DStarPtTruePreEventSelectionWeighted; p_{T} [GeV/c]; Entries", 5000, 0, 1000);
hist_DStarPtTruePreEventSelectionWeighted->Sumw2();
hist_DStarPtTruePreEventSelectionWeighted->SetLineColor(6);
hist_DStarPtTruePreEventSelectionWeighted->SetMarkerStyle(20);
hist_DStarPtTruePreEventSelectionWeighted->SetMarkerSize(0.6);
hist_DStarPtTruePreEventSelectionWeighted->SetMarkerColor(6);
TH1F* histogram_DStarPtTruePreEventSelectionWeighted = (TH1F*)hist_DStarPtTruePreEventSelectionWeighted->Clone();
fOutputProductionCheck->Add(histogram_DStarPtTruePreEventSelectionWeighted);
TString name_DStarPtTruePostEventSelection = "DStarPtTruePostEventSelection";
TH1F* hist_DStarPtTruePostEventSelection = new TH1F(name_DStarPtTruePostEventSelection.Data(), "DStarPtTruePostEventSelection; p_{T} [GeV/c]; Entries", 5000, 0, 1000);
hist_DStarPtTruePostEventSelection->Sumw2();
hist_DStarPtTruePostEventSelection->SetLineColor(6);
hist_DStarPtTruePostEventSelection->SetMarkerStyle(20);
hist_DStarPtTruePostEventSelection->SetMarkerSize(0.6);
hist_DStarPtTruePostEventSelection->SetMarkerColor(6);
TH1F* histogram_DStarPtTruePostEventSelection = (TH1F*)hist_DStarPtTruePostEventSelection->Clone();
fOutputProductionCheck->Add(histogram_DStarPtTruePostEventSelection);
TString name_DStarPtTruePostEventSelectionWeighted = "DStarPtTruePostEventSelectionWeighted";
TH1F* hist_DStarPtTruePostEventSelectionWeighted = new TH1F(name_DStarPtTruePostEventSelectionWeighted.Data(), "DStarPtTruePostEventSelectionWeighted; p_{T} [GeV/c]; Entries", 5000, 0, 1000);
hist_DStarPtTruePostEventSelectionWeighted->Sumw2();
hist_DStarPtTruePostEventSelectionWeighted->SetLineColor(6);
hist_DStarPtTruePostEventSelectionWeighted->SetMarkerStyle(20);
hist_DStarPtTruePostEventSelectionWeighted->SetMarkerSize(0.6);
hist_DStarPtTruePostEventSelectionWeighted->SetMarkerColor(6);
TH1F* histogram_DStarPtTruePostEventSelectionWeighted = (TH1F*)hist_DStarPtTruePostEventSelectionWeighted->Clone();
fOutputProductionCheck->Add(histogram_DStarPtTruePostEventSelectionWeighted);
TString name_DStarPtTruePostCuts = "DStarPtTruePostCuts";
TH1F* hist_DStarPtTruePostCuts = new TH1F(name_DStarPtTruePostCuts.Data(), "DStarPtTruePostCuts; p_{T} [GeV/c]; Entries", 5000, 0, 1000);
hist_DStarPtTruePostCuts->Sumw2();
hist_DStarPtTruePostCuts->SetLineColor(6);
hist_DStarPtTruePostCuts->SetMarkerStyle(20);
hist_DStarPtTruePostCuts->SetMarkerSize(0.6);
hist_DStarPtTruePostCuts->SetMarkerColor(6);
TH1F* histogram_DStarPtTruePostCuts = (TH1F*)hist_DStarPtTruePostCuts->Clone();
fOutputProductionCheck->Add(histogram_DStarPtTruePostCuts);
TString name_DStarPtTruePostCutsWeighted = "DStarPtTruePostCutsWeighted";
TH1F* hist_DStarPtTruePostCutsWeighted = new TH1F(name_DStarPtTruePostCutsWeighted.Data(), "DStarPtTruePostCutsWeighted; p_{T} [GeV/c]; Entries", 5000, 0, 1000);
hist_DStarPtTruePostCutsWeighted->Sumw2();
hist_DStarPtTruePostCutsWeighted->SetLineColor(6);
hist_DStarPtTruePostCutsWeighted->SetMarkerStyle(20);
hist_DStarPtTruePostCutsWeighted->SetMarkerSize(0.6);
hist_DStarPtTruePostCutsWeighted->SetMarkerColor(6);
TH1F* histogram_DStarPtTruePostCutsWeighted = (TH1F*)hist_DStarPtTruePostCutsWeighted->Clone();
fOutputProductionCheck->Add(histogram_DStarPtTruePostCutsWeighted);
TString name_DStarPtPostCuts = "DStarPtPostCuts";
TH1F* hist_DStarPtPostCuts = new TH1F(name_DStarPtPostCuts.Data(), "DStarPtPostCuts; p_{T} [GeV/c]; Entries", 5000, 0, 1000);
hist_DStarPtPostCuts->Sumw2();
hist_DStarPtPostCuts->SetLineColor(6);
hist_DStarPtPostCuts->SetMarkerStyle(20);
hist_DStarPtPostCuts->SetMarkerSize(0.6);
hist_DStarPtPostCuts->SetMarkerColor(6);
TH1F* histogram_DStarPtPostCuts = (TH1F*)hist_DStarPtPostCuts->Clone();
fOutputProductionCheck->Add(histogram_DStarPtPostCuts);
TString name_DStarPtPostCutsWeighted = "DStarPtPostCutsWeighted";
TH1F* hist_DStarPtPostCutsWeighted = new TH1F(name_DStarPtPostCutsWeighted.Data(), "DStarPtPostCutsWeighted; p_{T} [GeV/c]; Entries", 5000, 0, 1000);
hist_DStarPtPostCutsWeighted->Sumw2();
hist_DStarPtPostCutsWeighted->SetLineColor(6);
hist_DStarPtPostCutsWeighted->SetMarkerStyle(20);
hist_DStarPtPostCutsWeighted->SetMarkerSize(0.6);
hist_DStarPtPostCutsWeighted->SetMarkerColor(6);
TH1F* histogram_DStarPtPostCutsWeighted = (TH1F*)hist_DStarPtPostCutsWeighted->Clone();
fOutputProductionCheck->Add(histogram_DStarPtPostCutsWeighted);
TString name_PtHardPreEventSelection = "PtHardPreEventSelection";
TH1F* hist_PtHardPreEventSelection = new TH1F(name_PtHardPreEventSelection.Data(), "PtHardPreEventSelection; p_{T} [GeV/c]; Entries", 5000, 0, 1000);
hist_PtHardPreEventSelection->Sumw2();
hist_PtHardPreEventSelection->SetLineColor(6);
hist_PtHardPreEventSelection->SetMarkerStyle(20);
hist_PtHardPreEventSelection->SetMarkerSize(0.6);
hist_PtHardPreEventSelection->SetMarkerColor(6);
TH1F* histogram_PtHardPreEventSelection = (TH1F*)hist_PtHardPreEventSelection->Clone();
fOutputProductionCheck->Add(histogram_PtHardPreEventSelection);
TString name_PtHardPostEventSelection = "PtHardPostEventSelection";
TH1F* hist_PtHardPostEventSelection = new TH1F(name_PtHardPostEventSelection.Data(), "PtHardPostEventSelection; p_{T} [GeV/c]; Entries", 5000, 0, 1000);
hist_PtHardPostEventSelection->Sumw2();
hist_PtHardPostEventSelection->SetLineColor(6);
hist_PtHardPostEventSelection->SetMarkerStyle(20);
hist_PtHardPostEventSelection->SetMarkerSize(0.6);
hist_PtHardPostEventSelection->SetMarkerColor(6);
TH1F* histogram_PtHardPostEventSelection = (TH1F*)hist_PtHardPostEventSelection->Clone();
fOutputProductionCheck->Add(histogram_PtHardPostEventSelection);
TString name_PtHardPostCuts = "PtHardPostCuts";
TH1F* hist_PtHardPostCuts = new TH1F(name_PtHardPostCuts.Data(), "PtHardPostCuts; p_{T} [GeV/c]; Entries", 5000, 0, 1000);
hist_PtHardPostCuts->Sumw2();
hist_PtHardPostCuts->SetLineColor(6);
hist_PtHardPostCuts->SetMarkerStyle(20);
hist_PtHardPostCuts->SetMarkerSize(0.6);
hist_PtHardPostCuts->SetMarkerColor(6);
TH1F* histogram_PtHardPostCuts = (TH1F*)hist_PtHardPostCuts->Clone();
fOutputProductionCheck->Add(histogram_PtHardPostCuts);
TString name_PtHardWeightedPreEventSelection = "PtHardWeightedPreEventSelection";
TH1F* hist_PtHardWeightedPreEventSelection = new TH1F(name_PtHardWeightedPreEventSelection.Data(), "PtHardWeightedPreEventSelection; p_{T} [GeV/c]; Entries", 5000, 0, 1000);
hist_PtHardWeightedPreEventSelection->Sumw2();
hist_PtHardWeightedPreEventSelection->SetLineColor(6);
hist_PtHardWeightedPreEventSelection->SetMarkerStyle(20);
hist_PtHardWeightedPreEventSelection->SetMarkerSize(0.6);
hist_PtHardWeightedPreEventSelection->SetMarkerColor(6);
TH1F* histogram_PtHardWeightedPreEventSelection = (TH1F*)hist_PtHardWeightedPreEventSelection->Clone();
fOutputProductionCheck->Add(histogram_PtHardWeightedPreEventSelection);
TString name_PtHardWeightedPostEventSelection = "PtHardWeightedPostEventSelection";
TH1F* hist_PtHardWeightedPostEventSelection = new TH1F(name_PtHardWeightedPostEventSelection.Data(), "PtHardWeightedPostEventSelection; p_{T} [GeV/c]; Entries", 5000, 0, 1000);
hist_PtHardWeightedPostEventSelection->Sumw2();
hist_PtHardWeightedPostEventSelection->SetLineColor(6);
hist_PtHardWeightedPostEventSelection->SetMarkerStyle(20);
hist_PtHardWeightedPostEventSelection->SetMarkerSize(0.6);
hist_PtHardWeightedPostEventSelection->SetMarkerColor(6);
TH1F* histogram_PtHardWeightedPostEventSelection = (TH1F*)hist_PtHardWeightedPostEventSelection->Clone();
fOutputProductionCheck->Add(histogram_PtHardWeightedPostEventSelection);
TString name_PtHardWeightedPostCuts = "PtHardWeightedPostCuts";
TH1F* hist_PtHardWeightedPostCuts = new TH1F(name_PtHardWeightedPostCuts.Data(), "PtHardWeightedPostCuts; p_{T} [GeV/c]; Entries", 5000, 0, 1000);
hist_PtHardWeightedPostCuts->Sumw2();
hist_PtHardWeightedPostCuts->SetLineColor(6);
hist_PtHardWeightedPostCuts->SetMarkerStyle(20);
hist_PtHardWeightedPostCuts->SetMarkerSize(0.6);
hist_PtHardWeightedPostCuts->SetMarkerColor(6);
TH1F* histogram_PtHardWeightedPostCuts = (TH1F*)hist_PtHardWeightedPostCuts->Clone();
fOutputProductionCheck->Add(histogram_PtHardWeightedPostCuts);
TString name_WeightsPreEventSelection = "WeightsPreEventSelection";
TH1F* hist_WeightsPreEventSelection = new TH1F(name_WeightsPreEventSelection.Data(), "WeightsPreEventSelection; p_{T} [GeV/c]; Entries", 50000, 0, 1000);
hist_WeightsPreEventSelection->Sumw2();
hist_WeightsPreEventSelection->SetLineColor(6);
hist_WeightsPreEventSelection->SetMarkerStyle(20);
hist_WeightsPreEventSelection->SetMarkerSize(0.6);
hist_WeightsPreEventSelection->SetMarkerColor(6);
TH1F* histogram_WeightsPreEventSelection = (TH1F*)hist_WeightsPreEventSelection->Clone();
fOutputProductionCheck->Add(histogram_WeightsPreEventSelection);
TString name_WeightsPostEventSelection = "WeightsPostEventSelection";
TH1F* hist_WeightsPostEventSelection = new TH1F(name_WeightsPostEventSelection.Data(), "WeightsPostEventSelection; p_{T} [GeV/c]; Entries", 50000, 0, 1000);
hist_WeightsPostEventSelection->Sumw2();
hist_WeightsPostEventSelection->SetLineColor(6);
hist_WeightsPostEventSelection->SetMarkerStyle(20);
hist_WeightsPostEventSelection->SetMarkerSize(0.6);
hist_WeightsPostEventSelection->SetMarkerColor(6);
TH1F* histogram_WeightsPostEventSelection = (TH1F*)hist_WeightsPostEventSelection->Clone();
fOutputProductionCheck->Add(histogram_WeightsPostEventSelection);
TString name_WeightsPostCuts = "WeightsPostCuts";
TH1F* hist_WeightsPostCuts = new TH1F(name_WeightsPostCuts.Data(), "WeightsPostCuts; p_{T} [GeV/c]; Entries", 50000, 0, 1000);
hist_WeightsPostCuts->Sumw2();
hist_WeightsPostCuts->SetLineColor(6);
hist_WeightsPostCuts->SetMarkerStyle(20);
hist_WeightsPostCuts->SetMarkerSize(0.6);
hist_WeightsPostCuts->SetMarkerColor(6);
TH1F* histogram_WeightsPostCuts = (TH1F*)hist_WeightsPostCuts->Clone();
fOutputProductionCheck->Add(histogram_WeightsPostCuts);
TString name_TrialsPreEventSelection = "TrialsPreEventSelection";
TH1F* hist_TrialsPreEventSelection = new TH1F(name_TrialsPreEventSelection.Data(), "TrialsPreEventSelection; p_{T} [GeV/c]; Entries", 1, 0, 1);
hist_TrialsPreEventSelection->Sumw2();
hist_TrialsPreEventSelection->SetLineColor(6);
hist_TrialsPreEventSelection->SetMarkerStyle(20);
hist_TrialsPreEventSelection->SetMarkerSize(0.6);
hist_TrialsPreEventSelection->SetMarkerColor(6);
TH1F* histogram_TrialsPreEventSelection = (TH1F*)hist_TrialsPreEventSelection->Clone();
fOutputProductionCheck->Add(histogram_TrialsPreEventSelection);
TString name_TrialsPostEventSelection = "TrialsPostEventSelection";
TH1F* hist_TrialsPostEventSelection = new TH1F(name_TrialsPostEventSelection.Data(), "TrialsPostEventSelection; p_{T} [GeV/c]; Entries", 1, 0, 1);
hist_TrialsPostEventSelection->Sumw2();
hist_TrialsPostEventSelection->SetLineColor(6);
hist_TrialsPostEventSelection->SetMarkerStyle(20);
hist_TrialsPostEventSelection->SetMarkerSize(0.6);
hist_TrialsPostEventSelection->SetMarkerColor(6);
TH1F* histogram_TrialsPostEventSelection = (TH1F*)hist_TrialsPostEventSelection->Clone();
fOutputProductionCheck->Add(histogram_TrialsPostEventSelection);
TString name_TrialsPostCuts = "TrialsPostCuts";
TH1F* hist_TrialsPostCuts = new TH1F(name_TrialsPostCuts.Data(), "TrialsPostCuts; p_{T} [GeV/c]; Entries", 1, 0, 1);
hist_TrialsPostCuts->Sumw2();
hist_TrialsPostCuts->SetLineColor(6);
hist_TrialsPostCuts->SetMarkerStyle(20);
hist_TrialsPostCuts->SetMarkerSize(0.6);
hist_TrialsPostCuts->SetMarkerColor(6);
TH1F* histogram_TrialsPostCuts = (TH1F*)hist_TrialsPostCuts->Clone();
fOutputProductionCheck->Add(histogram_TrialsPostCuts);
Int_t nPtBins = fCuts->GetNPtBins();
// const Int_t nPtBinLimits = nPtBins + 1;
Float_t * PtBinLimits = fCuts->GetPtBinLimits();
TString name_DStar_per_bin_true_PreEventSelection ="DStar_per_bin_true_PreEventSelection";
TH1F* hist_DStar_per_bin_true_PreEventSelection = new TH1F(name_DStar_per_bin_true_PreEventSelection.Data(),"DStar_per_bin_true_PreEventSelection; Entries",nPtBins,PtBinLimits);
TH1F* histogram_DStar_per_bin_true_PreEventSelection = (TH1F*)hist_DStar_per_bin_true_PreEventSelection->Clone();
fOutputProductionCheck->Add(histogram_DStar_per_bin_true_PreEventSelection);
TString name_DStar_per_bin_true_PreEventSelection_weighted ="DStar_per_bin_true_PreEventSelection_weighted";
TH1F* hist_DStar_per_bin_true_PreEventSelection_weighted = new TH1F(name_DStar_per_bin_true_PreEventSelection_weighted.Data(),"DStar_per_bin_true_PreEventSelection_weighted; Entries",nPtBins,PtBinLimits);
TH1F* histogram_DStar_per_bin_true_PreEventSelection_weighted = (TH1F*)hist_DStar_per_bin_true_PreEventSelection_weighted->Clone();
fOutputProductionCheck->Add(histogram_DStar_per_bin_true_PreEventSelection_weighted);
TString name_DStar_per_bin_true_PostEventSelection ="DStar_per_bin_true_PostEventSelection";
TH1F* hist_DStar_per_bin_true_PostEventSelection = new TH1F(name_DStar_per_bin_true_PostEventSelection.Data(),"DStar_per_bin_true_PostEventSelection; Entries",nPtBins,PtBinLimits);
TH1F* histogram_DStar_per_bin_true_PostEventSelection = (TH1F*)hist_DStar_per_bin_true_PostEventSelection->Clone();
fOutputProductionCheck->Add(histogram_DStar_per_bin_true_PostEventSelection);
TString name_DStar_per_bin_true_PostEventSelection_weighted ="DStar_per_bin_true_PostEventSelection_weighted";
TH1F* hist_DStar_per_bin_true_PostEventSelection_weighted = new TH1F(name_DStar_per_bin_true_PostEventSelection_weighted.Data(),"DStar_per_bin_true_PostEventSelection_weighted; Entries",nPtBins,PtBinLimits);
TH1F* histogram_DStar_per_bin_true_PostEventSelection_weighted = (TH1F*)hist_DStar_per_bin_true_PostEventSelection_weighted->Clone();
fOutputProductionCheck->Add(histogram_DStar_per_bin_true_PostEventSelection_weighted);
TString name_DStar_per_bin_true_PostCuts ="DStar_per_bin_true_PostCuts";
TH1F* hist_DStar_per_bin_true_PostCuts = new TH1F(name_DStar_per_bin_true_PostCuts.Data(),"DStar_per_bin_true_PostCuts; Entries",nPtBins,PtBinLimits);
TH1F* histogram_DStar_per_bin_true_PostCuts = (TH1F*)hist_DStar_per_bin_true_PostCuts->Clone();
fOutputProductionCheck->Add(histogram_DStar_per_bin_true_PostCuts);
TString name_DStar_per_bin_true_PostCuts_weighted ="DStar_per_bin_true_PostCuts_weighted";
TH1F* hist_DStar_per_bin_true_PostCuts_weighted = new TH1F(name_DStar_per_bin_true_PostCuts_weighted.Data(),"DStar_per_bin_true_PostCuts_weighted; Entries",nPtBins,PtBinLimits);
TH1F* histogram_DStar_per_bin_true_PostCuts_weighted = (TH1F*)hist_DStar_per_bin_true_PostCuts_weighted->Clone();
fOutputProductionCheck->Add(histogram_DStar_per_bin_true_PostCuts_weighted);
TString name_DStar_per_bin_PostCuts ="DStar_per_bin_PostCuts";
TH1F* hist_DStar_per_bin_PostCuts = new TH1F(name_DStar_per_bin_PostCuts.Data(),"DStar_per_bin_PostCuts; Entries",nPtBins,PtBinLimits);
TH1F* histogram_DStar_per_bin_PostCuts = (TH1F*)hist_DStar_per_bin_PostCuts->Clone();
fOutputProductionCheck->Add(histogram_DStar_per_bin_PostCuts);
TString name_DStar_per_bin_PostCuts_weighted ="DStar_per_bin_PostCuts_weighted";
TH1F* hist_DStar_per_bin_PostCuts_weighted = new TH1F(name_DStar_per_bin_PostCuts_weighted.Data(),"DStar_per_bin_PostCuts_weighted; Entries",nPtBins,PtBinLimits);
TH1F* histogram_DStar_per_bin_PostCuts_weighted = (TH1F*)hist_DStar_per_bin_PostCuts_weighted->Clone();
fOutputProductionCheck->Add(histogram_DStar_per_bin_PostCuts_weighted);
TString name_fHistClusPosition ="fHistClusPosition";
TH3F* hist_fHistClusPosition = new TH3F(name_fHistClusPosition.Data(),";#it{x} (cm);#it{y} (cm);#it{z} (cm)", 50, -500, 500, 50, -500, 500, 50, -500, 500);
TH3F* histogram_fHistClusPosition = (TH3F*)hist_fHistClusPosition->Clone();
fOutputProductionCheck->Add(histogram_fHistClusPosition);
return;
}
//________________________________________________________________________
void AliAnalysisTaskSEDStarEMCALProductionCheck::FillSpectrum(AliAODRecoCascadeHF *part, Int_t isDStar, AliRDHFCutsDStartoKpipi *cuts, Int_t isSel, TList *listout, TH1F** histlist) {
//
/// Fill histos for D* spectrum
//
if (!isSel) return;
// D0 window
Double_t mPDGD0 = TDatabasePDG::Instance()->GetParticle(421)->Mass();
Double_t invmassD0 = part->InvMassD0();
Int_t ptbin = cuts->PtBin(part->Pt());
Double_t pt = part->Pt();
Double_t eta = part->Eta();
Double_t invmassDelta = part->DeltaInvMass();
Double_t invmassDstar = part->InvMassDstarKpipi();
TString fillthis = "";
Bool_t massInRange = kFALSE;
Double_t mPDGDstar = TDatabasePDG::Instance()->GetParticle(413)->Mass();
// delta M(Kpipi)-M(Kpi)
if (TMath::Abs(invmassDelta - (mPDGDstar - mPDGD0)) < fPeakWindow) massInRange = kTRUE;
if (fUseMCInfo) {
if (isDStar == 1) {
histlist[ptbin + 1 + ((fNPtBins + 2)*kDzSgn)]->Fill(invmassD0);
histlist[(fNPtBins + 2)*kDzSgn]->Fill(invmassD0);
histlist[ptbin + 1 + ((fNPtBins + 2)*kDstarSgn)]->Fill(invmassDstar);
histlist[(fNPtBins + 2)*kDstarSgn]->Fill(invmassDstar);
histlist[ptbin + 1 + ((fNPtBins + 2)*kDeltaSgn)]->Fill(invmassDelta);
histlist[(fNPtBins + 2)*kDeltaSgn]->Fill(invmassDelta);
if (massInRange) {
histlist[(fNPtBins + 2)*kptSgn]->Fill(pt);
histlist[(fNPtBins + 2)*ketaSgn]->Fill(eta);
}
}
else {//background
histlist[ptbin + 1 + ((fNPtBins + 2)*kDzBkg)]->Fill(invmassD0);
histlist[(fNPtBins + 2)*kDzBkg]->Fill(invmassD0);
histlist[ptbin + 1 + ((fNPtBins + 2)*kDstarBkg)]->Fill(invmassDstar);
histlist[(fNPtBins + 2)*kDstarBkg]->Fill(invmassDstar);
histlist[ptbin + 1 + ((fNPtBins + 2)*kDeltaBkg)]->Fill(invmassDelta);
histlist[(fNPtBins + 2)*kDeltaBkg]->Fill(invmassDelta);
if (massInRange) {
histlist[(fNPtBins + 2)*kptBkg]->Fill(pt);
histlist[(fNPtBins + 2)*ketaBkg]->Fill(eta);
}
}
}
//no MC info, just cut selection
histlist[ptbin + 1 + ((fNPtBins + 2)*kDzMass)]->Fill(invmassD0);
histlist[(fNPtBins + 2)*kDzMass]->Fill(invmassD0);
histlist[ptbin + 1 + ((fNPtBins + 2)*kDstarMass)]->Fill(invmassDstar);
histlist[(fNPtBins + 2)*kDstarMass]->Fill(invmassDstar);
histlist[ptbin + 1 + ((fNPtBins + 2)*kDeltaMass)]->Fill(invmassDelta);
histlist[(fNPtBins + 2)*kDeltaMass]->Fill(invmassDelta);
if (massInRange) {
histlist[(fNPtBins + 2)*kptMass]->Fill(pt);
histlist[(fNPtBins + 2)*ketaMass]->Fill(eta);
}
return;
}
//______________________________ side band background for D*___________________________________
void AliAnalysisTaskSEDStarEMCALProductionCheck::SideBandBackground(AliAODRecoCascadeHF *part, AliRDHFCutsDStartoKpipi *cuts, Int_t isSel, TList *listout, TH1F** histlist) {
/// D* side band background method. Two side bands, in M(Kpi) are taken at ~6 sigmas
/// (expected detector resolution) on the left and right frm the D0 mass. Each band
/// has a width of ~5 sigmas. Two band needed for opening angle considerations
if (!isSel) return;
Int_t ptbin = cuts->PtBin(part->Pt());
// select the side bands intervall
Double_t invmassD0 = part->InvMassD0();
if (TMath::Abs(invmassD0 - 1.865) > 4 * fD0Window && TMath::Abs(invmassD0 - 1.865) < 8 * fD0Window) {
// for pt and eta
Double_t invmassDelta = part->DeltaInvMass();
histlist[ptbin + 1 + ((fNPtBins + 2)*kSideBandMass)]->Fill(invmassDelta);
histlist[(fNPtBins + 2)*kSideBandMass]->Fill(invmassDelta);
}
}
//________________________________________________________________________________________________________________
void AliAnalysisTaskSEDStarEMCALProductionCheck::WrongSignForDStar(AliAODRecoCascadeHF *part, AliRDHFCutsDStartoKpipi *cuts, TList *listout) {
//
/// assign the wrong charge to the soft pion to create background
//
Int_t ptbin = cuts->PtBin(part->Pt());
Double_t mPDGD0 = TDatabasePDG::Instance()->GetParticle(421)->Mass();
Double_t invmassD0 = part->InvMassD0();
if (TMath::Abs(invmassD0 - mPDGD0) > fD0Window) return;
AliAODRecoDecayHF2Prong* theD0particle = (AliAODRecoDecayHF2Prong*)part->Get2Prong();
Int_t okDzWrongSign;
Double_t wrongMassD0 = 0.;
Int_t isSelected = cuts->IsSelected(part, AliRDHFCuts::kCandidate); //selected
if (!isSelected) {
return;
}
okDzWrongSign = 1;
//if is D*+ than assume D0bar
if (part->Charge() > 0 && (isSelected == 1)) {
okDzWrongSign = 0;
}
// assign the wrong mass in case the cuts return both D0 and D0bar
if (part->Charge() > 0 && (isSelected == 3)) {
okDzWrongSign = 0;
}
//wrong D0 inv mass
if (okDzWrongSign != 0) {
wrongMassD0 = theD0particle->InvMassD0();
} else if (okDzWrongSign == 0) {
wrongMassD0 = theD0particle->InvMassD0bar();
}
if (TMath::Abs(wrongMassD0 - 1.865) < fD0Window) {
// wrong D* inv mass
Double_t e[3];
if (part->Charge() > 0) {
e[0] = theD0particle->EProng(0, 321);
e[1] = theD0particle->EProng(1, 211);
} else {
e[0] = theD0particle->EProng(0, 211);
e[1] = theD0particle->EProng(1, 321);
}
e[2] = part->EProng(0, 211);
Double_t esum = e[0] + e[1] + e[2];
Double_t pds = part->P();
Double_t wrongMassDstar = TMath::Sqrt(esum * esum - pds * pds);
TString fillthis = "";
fillthis = "histWrongSignMass_";
fillthis += ptbin;
((TH1F*)(listout->FindObject(fillthis)))->Fill(wrongMassDstar - wrongMassD0);
fillthis = "histWrongSignMass";
((TH1F*)(listout->FindObject(fillthis)))->Fill(wrongMassDstar - wrongMassD0);
}
}
//-------------------------------------------------------------------------------
Int_t AliAnalysisTaskSEDStarEMCALProductionCheck::CheckOrigin(TClonesArray* arrayMC, const AliAODMCParticle *mcPartCandidate) const {
//
// checking whether the mother of the particles come from a charm or a bottom quark
//
Int_t pdgGranma = 0;
Int_t mother = 0;
mother = mcPartCandidate->GetMother();
Int_t istep = 0;
Int_t abspdgGranma = 0;
Bool_t isFromB = kFALSE;
while (mother > 0 ) {
istep++;
AliAODMCParticle* mcGranma = dynamic_cast<AliAODMCParticle*>(arrayMC->At(mother));
if (mcGranma) {
pdgGranma = mcGranma->GetPdgCode();
abspdgGranma = TMath::Abs(pdgGranma);
if ((abspdgGranma > 500 && abspdgGranma < 600) || (abspdgGranma > 5000 && abspdgGranma < 6000)) {
isFromB = kTRUE;
}
mother = mcGranma->GetMother();
} else {
AliError("Failed casting the mother particle!");
break;
}
}
if (isFromB) return 5;
else return 4;
}
//-------------------------------------------------------------------------------------
Float_t AliAnalysisTaskSEDStarEMCALProductionCheck::GetTrueImpactParameterD0(const AliAODMCHeader *mcHeader, TClonesArray* arrayMC, const AliAODMCParticle *partDp) const {
/// true impact parameter calculation
Double_t vtxTrue[3];
mcHeader->GetVertex(vtxTrue);
Double_t origD[3];
partDp->XvYvZv(origD);
Short_t charge = partDp->Charge();
Double_t pXdauTrue[3], pYdauTrue[3], pZdauTrue[3];
Int_t labelFirstDau = partDp->GetDaughterLabel(0);
Int_t nDau = partDp->GetNDaughters();
Int_t theDau = 0;
if (nDau == 2) {
for (Int_t iDau = 0; iDau < 2; iDau++) {
Int_t ind = labelFirstDau + iDau;
AliAODMCParticle* part = dynamic_cast<AliAODMCParticle*>(arrayMC->At(ind));
if (!part) {
AliError("Daughter particle not found in MC array");
return 99999.;
}
Int_t pdgCode = TMath::Abs(part->GetPdgCode());
if (pdgCode == 211 || pdgCode == 321) {
pXdauTrue[theDau] = part->Px();
pYdauTrue[theDau] = part->Py();
pZdauTrue[theDau] = part->Pz();
++theDau;
}
}
}
if (theDau != 2) {
AliError("Wrong number of decay prongs");
return 99999.;
}
Double_t d0dummy[3] = {0., 0., 0.};
AliAODRecoDecayHF aodD0MC(vtxTrue, origD, 3, charge, pXdauTrue, pYdauTrue, pZdauTrue, d0dummy);
return aodD0MC.ImpParXY();
}
//______________________________________________________-
void AliAnalysisTaskSEDStarEMCALProductionCheck::CreateImpactParameterHistos() {
/// Histos for impact paramter study
Int_t nbins[3] = {400, 200, fNImpParBins};
Double_t xmin[3] = {1.75, 0., fLowerImpPar};
Double_t xmax[3] = {1.98, 20., fHigherImpPar};
fHistMassPtImpParTCDs[0] = new THnSparseF("hMassPtImpParAll",
"Mass vs. pt vs.imppar - All",
3, nbins, xmin, xmax);
fHistMassPtImpParTCDs[1] = new THnSparseF("hMassPtImpParPrompt",
"Mass vs. pt vs.imppar - promptD",
3, nbins, xmin, xmax);
fHistMassPtImpParTCDs[2] = new THnSparseF("hMassPtImpParBfeed",
"Mass vs. pt vs.imppar - DfromB",
3, nbins, xmin, xmax);
fHistMassPtImpParTCDs[3] = new THnSparseF("hMassPtImpParTrueBfeed",
"Mass vs. pt vs.true imppar -DfromB",
3, nbins, xmin, xmax);
fHistMassPtImpParTCDs[4] = new THnSparseF("hMassPtImpParBkg",
"Mass vs. pt vs.imppar - backgr.",
3, nbins, xmin, xmax);
for (Int_t i = 0; i < 5; i++) {
fOutput->Add(fHistMassPtImpParTCDs[i]);
}
}
|
/*===================================================================
The Medical Imaging Interaction Toolkit (MITK)
Copyright (c) German Cancer Research Center,
Division of Medical and Biological Informatics.
All rights reserved.
This software is distributed WITHOUT ANY WARRANTY; without
even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE.
See LICENSE.txt or http://www.mitk.org for details.
===================================================================*/
/*=========================================================================
*
* Copyright Insight Software Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
#ifndef __itkEnhancedScalarImageToRunLengthMatrixFilter_hxx
#define __itkEnhancedScalarImageToRunLengthMatrixFilter_hxx
#include "itkEnhancedScalarImageToRunLengthMatrixFilter.h"
#include "itkConstNeighborhoodIterator.h"
#include "itkNeighborhood.h"
#include "vnl/vnl_math.h"
#include "itkMacro.h"
namespace itk
{
namespace Statistics
{
template<typename TImageType, typename THistogramFrequencyContainer>
EnhancedScalarImageToRunLengthMatrixFilter<TImageType, THistogramFrequencyContainer>
::EnhancedScalarImageToRunLengthMatrixFilter() :
m_NumberOfBinsPerAxis( itkGetStaticConstMacro( DefaultBinsPerAxis ) ),
m_Min( NumericTraits<PixelType>::NonpositiveMin() ),
m_Max( NumericTraits<PixelType>::max() ),
m_MinDistance( NumericTraits<RealType>::ZeroValue() ),
m_MaxDistance( NumericTraits<RealType>::max() ),
m_InsidePixelValue( NumericTraits<PixelType>::OneValue() )
{
this->SetNumberOfRequiredInputs( 1 );
this->SetNumberOfRequiredOutputs( 1 );
const unsigned int measurementVectorSize = 2;
this->ProcessObject::SetNthOutput( 0, this->MakeOutput( 0 ) );
HistogramType *output = const_cast<HistogramType *>( this->GetOutput() );
output->SetMeasurementVectorSize( measurementVectorSize );
this->m_LowerBound.SetSize( measurementVectorSize );
this->m_UpperBound.SetSize( measurementVectorSize );
this->m_LowerBound[0] = this->m_Min;
this->m_LowerBound[1] = this->m_MinDistance;
this->m_UpperBound[0] = this->m_Max;
this->m_UpperBound[1] = this->m_MaxDistance;
}
template<typename TImageType, typename THistogramFrequencyContainer>
void
EnhancedScalarImageToRunLengthMatrixFilter<TImageType, THistogramFrequencyContainer>
::SetOffset( const OffsetType offset )
{
OffsetVectorPointer offsetVector = OffsetVector::New();
offsetVector->push_back( offset );
this->SetOffsets( offsetVector );
}
template<typename TImageType, typename THistogramFrequencyContainer>
void
EnhancedScalarImageToRunLengthMatrixFilter<TImageType, THistogramFrequencyContainer>
::SetInput( const ImageType *image )
{
// Process object is not const-correct so the const_cast is required here
this->ProcessObject::SetNthInput( 0, const_cast<ImageType *>( image ) );
}
template<typename TImageType, typename THistogramFrequencyContainer>
void
EnhancedScalarImageToRunLengthMatrixFilter<TImageType, THistogramFrequencyContainer>
::SetMaskImage( const ImageType *image )
{
// Process object is not const-correct so the const_cast is required here
this->ProcessObject::SetNthInput( 1, const_cast<ImageType *>( image ) );
}
template<typename TImageType, typename THistogramFrequencyContainer>
const TImageType *
EnhancedScalarImageToRunLengthMatrixFilter<TImageType, THistogramFrequencyContainer>
::GetInput() const
{
if( this->GetNumberOfInputs() < 1 )
{
return ITK_NULLPTR;
}
return static_cast<const ImageType *>( this->ProcessObject::GetInput( 0 ) );
}
template<typename TImageType, typename THistogramFrequencyContainer>
const TImageType *
EnhancedScalarImageToRunLengthMatrixFilter<TImageType, THistogramFrequencyContainer>
::GetMaskImage() const
{
if( this->GetNumberOfInputs() < 2 )
{
return ITK_NULLPTR;
}
return static_cast<const ImageType *>( this->ProcessObject::GetInput( 1 ) );
}
template<typename TImageType, typename THistogramFrequencyContainer>
const typename EnhancedScalarImageToRunLengthMatrixFilter<TImageType,
THistogramFrequencyContainer >::HistogramType *
EnhancedScalarImageToRunLengthMatrixFilter<TImageType, THistogramFrequencyContainer>
::GetOutput() const
{
const HistogramType *output =
static_cast<const HistogramType *>( this->ProcessObject::GetOutput( 0 ) );
return output;
}
template<typename TImageType, typename THistogramFrequencyContainer>
typename EnhancedScalarImageToRunLengthMatrixFilter<TImageType,
THistogramFrequencyContainer>::DataObjectPointer
EnhancedScalarImageToRunLengthMatrixFilter<TImageType, THistogramFrequencyContainer>
::MakeOutput( DataObjectPointerArraySizeType itkNotUsed( idx ) )
{
return HistogramType::New().GetPointer();
}
template<typename TImageType, typename THistogramFrequencyContainer>
void
EnhancedScalarImageToRunLengthMatrixFilter<TImageType, THistogramFrequencyContainer>
::GenerateData()
{
HistogramType *output =
static_cast<HistogramType *>( this->ProcessObject::GetOutput( 0 ) );
const ImageType * inputImage = this->GetInput();
// First, create an appropriate histogram with the right number of bins
// and mins and maxes correct for the image type.
typename HistogramType::SizeType size( output->GetMeasurementVectorSize() );
size.Fill( this->m_NumberOfBinsPerAxis );
this->m_LowerBound[0] = this->m_Min;
this->m_LowerBound[1] = this->m_MinDistance;
this->m_UpperBound[0] = this->m_Max;
this->m_UpperBound[1] = this->m_MaxDistance;
output->Initialize( size, this->m_LowerBound, this->m_UpperBound );
MeasurementVectorType run( output->GetMeasurementVectorSize() );
typename HistogramType::IndexType hIndex;
// Iterate over all of those pixels and offsets, adding each
// distance/intensity pair to the histogram
typedef ConstNeighborhoodIterator<ImageType> NeighborhoodIteratorType;
typename NeighborhoodIteratorType::RadiusType radius;
radius.Fill( 1 );
NeighborhoodIteratorType neighborIt( radius,
inputImage, inputImage->GetRequestedRegion() );
// this temp image has the same dimension for each offset
// moving the allocation out of loop of offsets
// while keeping FillBuffer with boolean false in each loop
typedef Image<bool, ImageDimension> BoolImageType;
typename BoolImageType::Pointer alreadyVisitedImage = BoolImageType::New();
alreadyVisitedImage->CopyInformation( inputImage );
alreadyVisitedImage->SetRegions( inputImage->GetRequestedRegion() );
alreadyVisitedImage->Allocate();
typename OffsetVector::ConstIterator offsets;
for( offsets = this->GetOffsets()->Begin();
offsets != this->GetOffsets()->End(); offsets++ )
{
alreadyVisitedImage->FillBuffer( false );
neighborIt.GoToBegin();
OffsetType offset = offsets.Value();
this->NormalizeOffsetDirection(offset);
for( neighborIt.GoToBegin(); !neighborIt.IsAtEnd(); ++neighborIt )
{
const PixelType centerPixelIntensity = neighborIt.GetCenterPixel();
IndexType centerIndex = neighborIt.GetIndex();
if( centerPixelIntensity < this->m_Min ||
centerPixelIntensity > this->m_Max ||
alreadyVisitedImage->GetPixel( centerIndex ) || ( this->GetMaskImage() &&
this->GetMaskImage()->GetPixel( centerIndex ) !=
this->m_InsidePixelValue ) )
{
continue; // don't put a pixel in the histogram if the value
// is out-of-bounds or is outside the mask.
}
itkDebugMacro("===> offset = " << offset << std::endl);
MeasurementType centerBinMin = this->GetOutput()->
GetBinMinFromValue( 0, centerPixelIntensity );
MeasurementType centerBinMax = this->GetOutput()->
GetBinMaxFromValue( 0, centerPixelIntensity );
MeasurementType lastBinMax = this->GetOutput()->
GetDimensionMaxs( 0 )[ this->GetOutput()->GetSize( 0 ) - 1 ];
PixelType pixelIntensity( NumericTraits<PixelType>::ZeroValue() );
IndexType index;
index = centerIndex + offset;
IndexType lastGoodIndex = centerIndex;
bool runLengthSegmentAlreadyVisited = false;
// Scan from the current pixel at index, following
// the direction of offset. Run length is computed as the
// length of continuous pixels whose pixel values are
// in the same bin.
while ( inputImage->GetRequestedRegion().IsInside(index) )
{
// For the same offset, each run length segment can
// only be visited once
if (alreadyVisitedImage->GetPixel( index ) )
{
runLengthSegmentAlreadyVisited = true;
break;
}
pixelIntensity = inputImage->GetPixel( index );
// Special attention paid to boundaries of bins.
// For the last bin,
// it is left close and right close (following the previous
// gerrit patch).
// For all
// other bins,
// the bin is left close and right open.
if ( pixelIntensity >= centerBinMin
&& ( pixelIntensity < centerBinMax || ( pixelIntensity == centerBinMax && centerBinMax == lastBinMax ) ) )
{
alreadyVisitedImage->SetPixel( index, true );
lastGoodIndex = index;
index += offset;
}
else
{
break;
}
}
if ( runLengthSegmentAlreadyVisited )
{
continue;
}
IndexType lastGoodIndex2 = lastGoodIndex;
index = centerIndex - offset;
lastGoodIndex = centerIndex;
while ( inputImage->GetRequestedRegion().IsInside(index) )
{
if (alreadyVisitedImage->GetPixel( index ) )
{
runLengthSegmentAlreadyVisited = true;
break;
}
pixelIntensity = inputImage->GetPixel( index );
if ( pixelIntensity >= centerBinMin
&& ( pixelIntensity < centerBinMax || ( pixelIntensity == centerBinMax && centerBinMax == lastBinMax ) ) )
{
alreadyVisitedImage->SetPixel( index, true );
lastGoodIndex = index;
index -= offset;
}
else
break;
}
if ( runLengthSegmentAlreadyVisited )
continue;
PointType centerPoint;
inputImage->TransformIndexToPhysicalPoint(
centerIndex, centerPoint );
PointType point;
inputImage->TransformIndexToPhysicalPoint( lastGoodIndex, point );
PointType point2;
inputImage->TransformIndexToPhysicalPoint( lastGoodIndex2, point2 );
run[0] = centerPixelIntensity;
run[1] = point.EuclideanDistanceTo( point2 );
if( run[1] >= this->m_MinDistance && run[1] <= this->m_MaxDistance )
{
output->GetIndex( run, hIndex );
output->IncreaseFrequencyOfIndex( hIndex, 1 );
itkDebugStatement(typename HistogramType::IndexType tempMeasurementIndex;)
itkDebugStatement(output->GetIndex(run,tempMeasurementIndex);)
itkDebugMacro( "centerIndex<->index: "
<< static_cast<int>( centerPixelIntensity )
<< "@"<< centerIndex
<< "<->" << static_cast<int>( pixelIntensity ) << "@" << index
<<", Bin# " << tempMeasurementIndex
<< ", Measurement: (" << run[0] << ", " << run[1] << ")"
<< ", Center bin [" << this->GetOutput()->GetBinMinFromValue( 0, run[0] )
<< "," << this->GetOutput()->GetBinMaxFromValue( 0, run[0] ) << "]"
<< "~[" << this->GetOutput()->GetBinMinFromValue( 1, run[1] )
<< "," << this->GetOutput()->GetBinMaxFromValue( 1, run[1] ) << "]"
<< std::endl );
}
}
}
}
template<typename TImageType, typename THistogramFrequencyContainer>
void
EnhancedScalarImageToRunLengthMatrixFilter<TImageType, THistogramFrequencyContainer>
::SetPixelValueMinMax( PixelType min, PixelType max )
{
if( this->m_Min != min || this->m_Max != max )
{
itkDebugMacro( "setting Min to " << min << "and Max to " << max );
this->m_Min = min;
this->m_Max = max;
this->Modified();
}
}
template<typename TImageType, typename THistogramFrequencyContainer>
void
EnhancedScalarImageToRunLengthMatrixFilter<TImageType, THistogramFrequencyContainer>
::SetDistanceValueMinMax( RealType min, RealType max )
{
if( this->m_MinDistance != min || this->m_MaxDistance != max )
{
itkDebugMacro( "setting MinDistance to " << min << "and MaxDistance to "
<< max );
this->m_MinDistance = min;
this->m_MaxDistance = max;
this->Modified();
}
}
template<typename TImageType, typename THistogramFrequencyContainer>
void
EnhancedScalarImageToRunLengthMatrixFilter<TImageType, THistogramFrequencyContainer>
::PrintSelf( std::ostream& os, Indent indent ) const
{
Superclass::PrintSelf( os,indent );
os << indent << "Offsets: " << this->GetOffsets() << std::endl;
os << indent << "Min: " << this->m_Min << std::endl;
os << indent << "Max: " << this->m_Max << std::endl;
os << indent << "Min distance: " << this->m_MinDistance << std::endl;
os << indent << "Max distance: " << this->m_MaxDistance << std::endl;
os << indent << "NumberOfBinsPerAxis: " << this->m_NumberOfBinsPerAxis
<< std::endl;
os << indent << "InsidePixelValue: " << this->m_InsidePixelValue << std::endl;
}
template<typename TImageType, typename THistogramFrequencyContainer>
void
EnhancedScalarImageToRunLengthMatrixFilter<TImageType, THistogramFrequencyContainer>
::NormalizeOffsetDirection(OffsetType &offset)
{
itkDebugMacro("old offset = " << offset << std::endl);
int sign = 1;
bool metLastNonZero = false;
for (int i = offset.GetOffsetDimension()-1; i>=0; i--)
{
if (metLastNonZero)
{
offset[i] *= sign;
}
else if (offset[i] != 0)
{
sign = (offset[i] > 0 ) ? 1 : -1;
metLastNonZero = true;
offset[i] *= sign;
}
}
itkDebugMacro("new offset = " << offset << std::endl);
}
} // end of namespace Statistics
} // end of namespace itk
#endif
|
/*
Copyright (c) 2013, Project OSRM contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef XOR_FAST_HASH_STORAGE_HPP
#define XOR_FAST_HASH_STORAGE_HPP
#include "xor_fast_hash.hpp"
#include <limits>
#include <vector>
template <typename NodeID, typename Key> class XORFastHashStorage
{
public:
struct HashCell
{
unsigned time;
NodeID id;
Key key;
HashCell()
: time(std::numeric_limits<unsigned>::max()), id(std::numeric_limits<unsigned>::max()),
key(std::numeric_limits<unsigned>::max())
{
}
HashCell(const HashCell &other) : time(other.key), id(other.id), key(other.time) {}
operator Key() const { return key; }
void operator=(const Key key_to_insert) { key = key_to_insert; }
};
XORFastHashStorage() = delete;
explicit XORFastHashStorage(size_t) : positions(2 << 16), current_timestamp(0) {}
HashCell &operator[](const NodeID node)
{
unsigned short position = fast_hasher(node);
while ((positions[position].time == current_timestamp) && (positions[position].id != node))
{
++position %= (2 << 16);
}
positions[position].time = current_timestamp;
positions[position].id = node;
return positions[position];
}
// peek into table, get key for node, think of it as a read-only operator[]
Key peek_index(const NodeID node) const
{
unsigned short position = fast_hasher(node);
while ((positions[position].time == current_timestamp) && (positions[position].id != node))
{
++position %= (2 << 16);
}
return positions[position].key;
}
void Clear()
{
++current_timestamp;
if (std::numeric_limits<unsigned>::max() == current_timestamp)
{
positions.clear();
positions.resize(2 << 16);
}
}
private:
std::vector<HashCell> positions;
XORFastHash fast_hasher;
unsigned current_timestamp;
};
#endif // XOR_FAST_HASH_STORAGE_HPP
|
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Generated by the Codegen C++ plugin.
// If you make any local changes, they will be lost.
// source: google/cloud/accessapproval/v1/accessapproval.proto
#include "google/cloud/accessapproval/access_approval_client.h"
#include "google/cloud/accessapproval/internal/access_approval_option_defaults.h"
#include <memory>
namespace google {
namespace cloud {
namespace accessapproval {
GOOGLE_CLOUD_CPP_INLINE_NAMESPACE_BEGIN
AccessApprovalClient::AccessApprovalClient(
std::shared_ptr<AccessApprovalConnection> connection, Options opts)
: connection_(std::move(connection)),
options_(internal::MergeOptions(
std::move(opts),
accessapproval_internal::AccessApprovalDefaultOptions(
connection_->options()))) {}
AccessApprovalClient::~AccessApprovalClient() = default;
StreamRange<google::cloud::accessapproval::v1::ApprovalRequest>
AccessApprovalClient::ListApprovalRequests(std::string const& parent,
Options opts) {
internal::OptionsSpan span(internal::MergeOptions(std::move(opts), options_));
google::cloud::accessapproval::v1::ListApprovalRequestsMessage request;
request.set_parent(parent);
return connection_->ListApprovalRequests(request);
}
StreamRange<google::cloud::accessapproval::v1::ApprovalRequest>
AccessApprovalClient::ListApprovalRequests(
google::cloud::accessapproval::v1::ListApprovalRequestsMessage request,
Options opts) {
internal::OptionsSpan span(internal::MergeOptions(std::move(opts), options_));
return connection_->ListApprovalRequests(std::move(request));
}
StatusOr<google::cloud::accessapproval::v1::ApprovalRequest>
AccessApprovalClient::GetApprovalRequest(std::string const& name,
Options opts) {
internal::OptionsSpan span(internal::MergeOptions(std::move(opts), options_));
google::cloud::accessapproval::v1::GetApprovalRequestMessage request;
request.set_name(name);
return connection_->GetApprovalRequest(request);
}
StatusOr<google::cloud::accessapproval::v1::ApprovalRequest>
AccessApprovalClient::GetApprovalRequest(
google::cloud::accessapproval::v1::GetApprovalRequestMessage const& request,
Options opts) {
internal::OptionsSpan span(internal::MergeOptions(std::move(opts), options_));
return connection_->GetApprovalRequest(request);
}
StatusOr<google::cloud::accessapproval::v1::ApprovalRequest>
AccessApprovalClient::ApproveApprovalRequest(
google::cloud::accessapproval::v1::ApproveApprovalRequestMessage const&
request,
Options opts) {
internal::OptionsSpan span(internal::MergeOptions(std::move(opts), options_));
return connection_->ApproveApprovalRequest(request);
}
StatusOr<google::cloud::accessapproval::v1::ApprovalRequest>
AccessApprovalClient::DismissApprovalRequest(
google::cloud::accessapproval::v1::DismissApprovalRequestMessage const&
request,
Options opts) {
internal::OptionsSpan span(internal::MergeOptions(std::move(opts), options_));
return connection_->DismissApprovalRequest(request);
}
StatusOr<google::cloud::accessapproval::v1::AccessApprovalSettings>
AccessApprovalClient::GetAccessApprovalSettings(std::string const& name,
Options opts) {
internal::OptionsSpan span(internal::MergeOptions(std::move(opts), options_));
google::cloud::accessapproval::v1::GetAccessApprovalSettingsMessage request;
request.set_name(name);
return connection_->GetAccessApprovalSettings(request);
}
StatusOr<google::cloud::accessapproval::v1::AccessApprovalSettings>
AccessApprovalClient::GetAccessApprovalSettings(
google::cloud::accessapproval::v1::GetAccessApprovalSettingsMessage const&
request,
Options opts) {
internal::OptionsSpan span(internal::MergeOptions(std::move(opts), options_));
return connection_->GetAccessApprovalSettings(request);
}
StatusOr<google::cloud::accessapproval::v1::AccessApprovalSettings>
AccessApprovalClient::UpdateAccessApprovalSettings(
google::cloud::accessapproval::v1::AccessApprovalSettings const& settings,
google::protobuf::FieldMask const& update_mask, Options opts) {
internal::OptionsSpan span(internal::MergeOptions(std::move(opts), options_));
google::cloud::accessapproval::v1::UpdateAccessApprovalSettingsMessage
request;
*request.mutable_settings() = settings;
*request.mutable_update_mask() = update_mask;
return connection_->UpdateAccessApprovalSettings(request);
}
StatusOr<google::cloud::accessapproval::v1::AccessApprovalSettings>
AccessApprovalClient::UpdateAccessApprovalSettings(
google::cloud::accessapproval::v1::
UpdateAccessApprovalSettingsMessage const& request,
Options opts) {
internal::OptionsSpan span(internal::MergeOptions(std::move(opts), options_));
return connection_->UpdateAccessApprovalSettings(request);
}
Status AccessApprovalClient::DeleteAccessApprovalSettings(
std::string const& name, Options opts) {
internal::OptionsSpan span(internal::MergeOptions(std::move(opts), options_));
google::cloud::accessapproval::v1::DeleteAccessApprovalSettingsMessage
request;
request.set_name(name);
return connection_->DeleteAccessApprovalSettings(request);
}
Status AccessApprovalClient::DeleteAccessApprovalSettings(
google::cloud::accessapproval::v1::
DeleteAccessApprovalSettingsMessage const& request,
Options opts) {
internal::OptionsSpan span(internal::MergeOptions(std::move(opts), options_));
return connection_->DeleteAccessApprovalSettings(request);
}
GOOGLE_CLOUD_CPP_INLINE_NAMESPACE_END
} // namespace accessapproval
} // namespace cloud
} // namespace google
|
/*
----------------------
BeDC License
----------------------
Copyright 2002, The BeDC team.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions, and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the BeDC team nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "DCStringTokenizer.h"
#include <stdio.h>
DCStringTokenizer::DCStringTokenizer(const BString & str, char token)
{
BString temp = str;
int32 i = 0, j = 0;
while ((i = str.FindFirst(token, j)) != B_ERROR)
{
BString cpy;
str.CopyInto(cpy, j, i - j);
fTokens.push_back(cpy);
j = i + 1;
}
if (j < temp.Length())
{
BString cpy;
temp.CopyInto(cpy, j, temp.Length() - j);
fTokens.push_back(cpy);
}
}
|
/*
SoLoud audio engine
Copyright (c) 2013-2015 Jari Komppa
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source
distribution.
*/
#undef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS
#include <stdio.h>
#include <string.h>
#include "soloud.h"
#include "soloud_file.h"
namespace SoLoud
{
unsigned int File::read8()
{
unsigned char d = 0;
read((unsigned char*)&d, 1);
return d;
}
unsigned int File::read16()
{
unsigned short d = 0;
read((unsigned char*)&d, 2);
#ifdef __amigaos4__
d = (d >> 8) | (d << 8);
#endif
return d;
}
unsigned int File::read32()
{
unsigned int d = 0;
read((unsigned char*)&d, 4);
#ifdef __amigaos4__
d = (d >> 24) | ((d >> 8) & 0xFF00) | ((d << 8) & 0xFF0000) | (d << 24);
#endif
return d;
}
unsigned int DiskFile::read(unsigned char *aDst, unsigned int aBytes)
{
return (unsigned int)fread(aDst, 1, aBytes, mFileHandle);
}
unsigned int DiskFile::length()
{
int pos = ftell(mFileHandle);
fseek(mFileHandle, 0, SEEK_END);
int len = ftell(mFileHandle);
fseek(mFileHandle, pos, SEEK_SET);
return len;
}
void DiskFile::seek(int aOffset)
{
fseek(mFileHandle, aOffset, SEEK_SET);
}
unsigned int DiskFile::pos()
{
return ftell(mFileHandle);
}
FILE *DiskFile::getFilePtr()
{
return mFileHandle;
}
DiskFile::~DiskFile()
{
if (mFileHandle)
fclose(mFileHandle);
}
DiskFile::DiskFile()
{
mFileHandle = 0;
}
result DiskFile::open(const char *aFilename)
{
if (!aFilename)
return INVALID_PARAMETER;
mFileHandle = fopen(aFilename, "rb");
if (!mFileHandle)
return FILE_NOT_FOUND;
return SO_NO_ERROR;
}
int DiskFile::eof()
{
return feof(mFileHandle);
}
unsigned int MemoryFile::read(unsigned char *aDst, unsigned int aBytes)
{
if (mOffset + aBytes >= mDataLength)
aBytes = mDataLength - mOffset;
memcpy(aDst, mDataPtr + mOffset, aBytes);
mOffset += aBytes;
return aBytes;
}
unsigned int MemoryFile::length()
{
return mDataLength;
}
void MemoryFile::seek(int aOffset)
{
if (aOffset >= 0)
mOffset = aOffset;
else
mOffset = mDataLength + aOffset;
if (mOffset > mDataLength-1)
mOffset = mDataLength-1;
}
unsigned int MemoryFile::pos()
{
return mOffset;
}
unsigned char * MemoryFile::getMemPtr()
{
return mDataPtr;
}
MemoryFile::~MemoryFile()
{
if (mDataOwned)
delete[] mDataPtr;
}
MemoryFile::MemoryFile()
{
mDataPtr = 0;
mDataLength = 0;
mOffset = 0;
mDataOwned = false;
}
result MemoryFile::openMem(unsigned char *aData, unsigned int aDataLength, bool aCopy, bool aTakeOwnership)
{
if (aData == NULL || aDataLength == 0)
return INVALID_PARAMETER;
if (mDataOwned)
delete[] mDataPtr;
mDataPtr = 0;
mOffset = 0;
mDataLength = aDataLength;
if (aCopy)
{
mDataOwned = true;
mDataPtr = new unsigned char[aDataLength];
if (mDataPtr == NULL)
return OUT_OF_MEMORY;
memcpy(mDataPtr, aData, aDataLength);
return SO_NO_ERROR;
}
mDataPtr = aData;
mDataOwned = aTakeOwnership;
return SO_NO_ERROR;
}
result MemoryFile::openToMem(const char *aFile)
{
if (!aFile)
return INVALID_PARAMETER;
if (mDataOwned)
delete[] mDataPtr;
mDataPtr = 0;
mOffset = 0;
DiskFile df;
int res = df.open(aFile);
if (res != SO_NO_ERROR)
return res;
mDataLength = df.length();
mDataPtr = new unsigned char[mDataLength];
if (mDataPtr == NULL)
return OUT_OF_MEMORY;
df.read(mDataPtr, mDataLength);
mDataOwned = true;
return SO_NO_ERROR;
}
result MemoryFile::openFileToMem(File *aFile)
{
if (mDataOwned)
delete[] mDataPtr;
mDataPtr = 0;
mOffset = 0;
mDataLength = aFile->length();
mDataPtr = new unsigned char[mDataLength];
if (mDataPtr == NULL)
return OUT_OF_MEMORY;
aFile->read(mDataPtr, mDataLength);
mDataOwned = true;
return SO_NO_ERROR;
}
int MemoryFile::eof()
{
if (mOffset >= mDataLength)
return 1;
return 0;
}
}
extern "C"
{
int Soloud_Filehack_fgetc(Soloud_Filehack *f)
{
SoLoud::File *fp = (SoLoud::File *)f;
if (fp->eof())
return EOF;
return fp->read8();
}
int Soloud_Filehack_fread(void *dst, int s, int c, Soloud_Filehack *f)
{
SoLoud::File *fp = (SoLoud::File *)f;
return fp->read((unsigned char*)dst, s*c) / s;
}
int Soloud_Filehack_fseek(Soloud_Filehack *f, int idx, int base)
{
SoLoud::File *fp = (SoLoud::File *)f;
switch (base)
{
case SEEK_CUR:
fp->seek(fp->pos() + idx);
break;
case SEEK_END:
fp->seek(fp->length() + idx);
break;
default:
fp->seek(idx);
}
return 0;
}
int Soloud_Filehack_ftell(Soloud_Filehack *f)
{
SoLoud::File *fp = (SoLoud::File *)f;
return fp->pos();
}
int Soloud_Filehack_fclose(Soloud_Filehack *f)
{
SoLoud::File *fp = (SoLoud::File *)f;
delete fp;
return 0;
}
Soloud_Filehack * Soloud_Filehack_fopen(const char *aFilename, char *aMode)
{
SoLoud::DiskFile *df = new SoLoud::DiskFile();
int res = df->open(aFilename);
if (res != SoLoud::SO_NO_ERROR)
{
delete df;
df = 0;
}
return (Soloud_Filehack*)df;
}
}
|
#pragma once
#include <chrono>
#include <vector>
#include "../collider/collider.hpp"
#include "../rigidbody/rigidbody.hpp"
#include "../field/field.hpp"
#include "../material/material.hpp"
typedef std::chrono::high_resolution_clock Clock;
typedef std::chrono::duration<double, std::ratio<1, 1>> Duration;
struct CollisionInformation {
Rigidbody *b;
arma::vec2 normal;
arma::vec2 at;
double when;
CollisionDirection adir, bdir;
};
class Game;
class PhysicsEngine {
public:
static PhysicsEngine &ref();
~PhysicsEngine();
void timestep();
void timestep_objects();
void resolve_collision(Rigidbody *a, Rigidbody *b, const arma::vec2 &normal);
static PhysicsMaterial get_overall_material_properties(const PhysicsMaterial &a, const PhysicsMaterial &b);
void check_set_time();
void set_timescale(double v);
[[nodiscard]] double get_time() const;
[[nodiscard]] double get_dt() const;
[[nodiscard]] bool check_will_collide(const Rigidbody *a, const Rigidbody *b, CollisionInformation &ci) const;
private:
void resolve_collision_free_bodies(Rigidbody *a, Rigidbody *b, const arma::vec2 &normal);
void resolve_collision_one_fixed(Rigidbody *free_body, Rigidbody *fixed_body, const arma::vec2 &normal);
[[nodiscard]] bool check_will_collide_circle_circle(const Rigidbody *a, const Rigidbody *b, CollisionInformation &ci) const;
[[nodiscard]] bool check_will_collide_circle_rect(const Rigidbody *circle, const Rigidbody *rect, CollisionInformation &ci) const;
[[nodiscard]] bool check_will_collide_rect_rect(const Rigidbody *a, const Rigidbody *b, CollisionInformation &ci) const;
PhysicsEngine();
double dt, time, irl_time, timescale;
Clock::time_point epoch;
};
|
#include "WorkerManager.h"
using namespace MyBot;
WorkerManager::WorkerManager()
{
currentRepairWorker = nullptr;
}
WorkerManager & WorkerManager::Instance()
{
static WorkerManager instance;
return instance;
}
void WorkerManager::update()
{
// 1초에 1번만 실행한다
if (BWAPI::Broodwar->getFrameCount() % 24 != 0) return;
updateWorkerStatus();
handleGasWorkers();
handleIdleWorkers();
handleMoveWorkers();
handleCombatWorkers();
handleRepairWorkers();
}
void WorkerManager::updateWorkerStatus()
{
// Drone 은 건설을 위해 isConstructing = true 상태로 건설장소까지 이동한 후,
// 잠깐 getBuildType() == none 가 되었다가, isConstructing = true, isMorphing = true 가 된 후, 건설을 시작한다
// for each of our Workers
for (auto & worker : workerData.getWorkers())
{
//if (workerData.getWorkerJob(worker) == WorkerData::Build && worker->getBuildType() == BWAPI::UnitTypes::None)
//{
// std::cout << "construction worker " << worker->getID() << "buildtype BWAPI::UnitTypes::None " << std::endl;
//}
/*
if (worker->isCarryingMinerals()) {
std::cout << "mineral worker isCarryingMinerals " << worker->getID()
<< " isIdle: " << worker->isIdle()
<< " isCompleted: " << worker->isCompleted()
<< " isInterruptible: " << worker->isInterruptible()
<< " target Name: " << worker->getTarget()->getType().getName()
<< " job: " << workerData.getWorkerJob(worker)
<< " exists " << worker->exists()
<< " isConstructing " << worker->isConstructing()
<< " isMorphing " << worker->isMorphing()
<< " isMoving " << worker->isMoving()
<< " isBeingConstructed " << worker->isBeingConstructed()
<< " isStuck " << worker->isStuck()
<< std::endl;
}
*/
if (!worker->isCompleted())
{
continue;
}
// 게임상에서 worker가 isIdle 상태가 되었으면 (새로 탄생했거나, 그전 임무가 끝난 경우), WorkerData 도 Idle 로 맞춘 후, handleGasWorkers, handleIdleWorkers 등에서 새 임무를 지정한다
if ( worker->isIdle() )
{
/*
if ((workerData.getWorkerJob(worker) == WorkerData::Build)
|| (workerData.getWorkerJob(worker) == WorkerData::Move)
|| (workerData.getWorkerJob(worker) == WorkerData::Scout)) {
std::cout << "idle worker " << worker->getID()
<< " job: " << workerData.getWorkerJob(worker)
<< " exists " << worker->exists()
<< " isConstructing " << worker->isConstructing()
<< " isMorphing " << worker->isMorphing()
<< " isMoving " << worker->isMoving()
<< " isBeingConstructed " << worker->isBeingConstructed()
<< " isStuck " << worker->isStuck()
<< std::endl;
}
*/
// workerData 에서 Build / Move / Scout 로 임무지정한 경우, worker 는 즉 임무 수행 도중 (임무 완료 전) 에 일시적으로 isIdle 상태가 될 수 있다
if ((workerData.getWorkerJob(worker) != WorkerData::Build)
&& (workerData.getWorkerJob(worker) != WorkerData::Move)
&& (workerData.getWorkerJob(worker) != WorkerData::Scout))
{
workerData.setWorkerJob(worker, WorkerData::Idle, nullptr);
}
}
// if its job is gas
if (workerData.getWorkerJob(worker) == WorkerData::Gas)
{
BWAPI::Unit refinery = workerData.getWorkerResource(worker);
// if the refinery doesn't exist anymore (파괴되었을 경우)
if (!refinery || !refinery->exists() || refinery->getHitPoints() <= 0)
{
workerData.setWorkerJob(worker, WorkerData::Idle, nullptr);
}
}
// if its job is repair
if (workerData.getWorkerJob(worker) == WorkerData::Repair)
{
BWAPI::Unit repairTargetUnit = workerData.getWorkerRepairUnit(worker);
// 대상이 파괴되었거나, 수리가 다 끝난 경우
if (!repairTargetUnit || !repairTargetUnit->exists() || repairTargetUnit->getHitPoints() <= 0 || repairTargetUnit->getHitPoints() == repairTargetUnit->getType().maxHitPoints())
{
workerData.setWorkerJob(worker, WorkerData::Idle, nullptr);
}
}
}
}
void WorkerManager::handleGasWorkers()
{
// for each unit we have
for (auto & unit : BWAPI::Broodwar->self()->getUnits())
{
// refinery 가 건설 completed 되었으면,
if (unit->getType().isRefinery() && unit->isCompleted() )
{
// get the number of workers currently assigned to it
int numAssigned = workerData.getNumAssignedWorkers(unit);
// if it's less than we want it to be, fill 'er up
// 미네랄 일꾼은 적은데 가스 일꾼은 무조건 3~4명인 경우 -> Config::Macro::WorkersPerRefinery 값을 조정해야함
for (int i = 0; i<(Config::Macro::WorkersPerRefinery - numAssigned); ++i)
{
BWAPI::Unit gasWorker = chooseGasWorkerFromMineralWorkers(unit);
if (gasWorker)
{
//std::cout << "set gasWorker " << gasWorker->getID() << std::endl;
workerData.setWorkerJob(gasWorker, WorkerData::Gas, unit);
}
}
}
}
}
void WorkerManager::handleIdleWorkers()
{
// for each of our workers
for (auto & worker : workerData.getWorkers())
{
if (!worker) continue;
// if worker's job is idle
if (workerData.getWorkerJob(worker) == WorkerData::Idle || workerData.getWorkerJob(worker) == WorkerData::Default )
{
// send it to the nearest mineral patch
setMineralWorker(worker);
}
}
}
void WorkerManager::handleMoveWorkers()
{
// for each of our workers
for (auto & worker : workerData.getWorkers())
{
if (!worker) continue;
// if it is a move worker
if (workerData.getWorkerJob(worker) == WorkerData::Move)
{
WorkerMoveData data = workerData.getWorkerMoveData(worker);
// 목적지에 도착한 경우 이동 명령을 해제한다
if (worker->getPosition().getDistance(data.position) < 4) {
setIdleWorker(worker);
}
else {
CommandUtil::move(worker, data.position);
}
}
}
}
// bad micro for combat workers
void WorkerManager::handleCombatWorkers()
{
for (auto & worker : workerData.getWorkers())
{
if (!worker) continue;
if (workerData.getWorkerJob(worker) == WorkerData::Combat)
{
BWAPI::Broodwar->drawCircleMap(worker->getPosition().x, worker->getPosition().y, 4, BWAPI::Colors::Yellow, true);
BWAPI::Unit target = getClosestEnemyUnitFromWorker(worker);
if (target)
{
CommandUtil::attackUnit(worker, target);
}
}
}
}
BWAPI::Unit WorkerManager::getClosestEnemyUnitFromWorker(BWAPI::Unit worker)
{
if (!worker) return nullptr;
BWAPI::Unit closestUnit = nullptr;
double closestDist = 1000000000;
for (auto & unit : BWAPI::Broodwar->enemy()->getUnits())
{
double dist = unit->getDistance(worker);
if ((dist < 400) && (!closestUnit || (dist < closestDist)))
{
closestUnit = unit;
closestDist = dist;
}
}
return closestUnit;
}
void WorkerManager::stopCombat()
{
for (auto & worker : workerData.getWorkers())
{
if (!worker) continue;
if (workerData.getWorkerJob(worker) == WorkerData::Combat)
{
setMineralWorker(worker);
}
}
}
void WorkerManager::handleRepairWorkers()
{
if (BWAPI::Broodwar->self()->getRace() != BWAPI::Races::Terran)
{
return;
}
for (auto & unit : BWAPI::Broodwar->self()->getUnits())
{
// 건물의 경우 아무리 멀어도 무조건 수리. 일꾼 한명이 순서대로 수리
if (unit->getType().isBuilding() && unit->isCompleted() == true && unit->getHitPoints() < unit->getType().maxHitPoints())
{
BWAPI::Unit repairWorker = chooseRepairWorkerClosestTo(unit->getPosition());
setRepairWorker(repairWorker, unit);
break;
}
// 메카닉 유닛 (SCV, 시즈탱크, 레이쓰 등)의 경우 근처에 SCV가 있는 경우 수리. 일꾼 한명이 순서대로 수리
else if (unit->getType().isMechanical() && unit->isCompleted() == true && unit->getHitPoints() < unit->getType().maxHitPoints())
{
// SCV 는 수리 대상에서 제외. 전투 유닛만 수리하도록 한다
if (unit->getType() != BWAPI::UnitTypes::Terran_SCV) {
BWAPI::Unit repairWorker = chooseRepairWorkerClosestTo(unit->getPosition(), 10 * TILE_SIZE);
setRepairWorker(repairWorker, unit);
break;
}
}
}
}
BWAPI::Unit WorkerManager::chooseRepairWorkerClosestTo(BWAPI::Position p, int maxRange)
{
if (!p.isValid()) return nullptr;
BWAPI::Unit closestWorker = nullptr;
// BasicBot 1.1 Patch Start ////////////////////////////////////////////////
// 변수 기본값 수정
double closestDist = 1000000000;
// BasicBot 1.1 Patch End //////////////////////////////////////////////////
if (currentRepairWorker != nullptr && currentRepairWorker->exists() && currentRepairWorker->getHitPoints() > 0)
{
return currentRepairWorker;
}
// for each of our workers
for (auto & worker : workerData.getWorkers())
{
if (!worker)
{
continue;
}
if (worker->isCompleted()
&& (workerData.getWorkerJob(worker) == WorkerData::Minerals || workerData.getWorkerJob(worker) == WorkerData::Idle || workerData.getWorkerJob(worker) == WorkerData::Move))
{
double dist = worker->getDistance(p);
if (!closestWorker || dist < closestDist)
{
closestWorker = worker;
dist = closestDist;
}
}
}
if (currentRepairWorker == nullptr || currentRepairWorker->exists() == false || currentRepairWorker->getHitPoints() <= 0) {
currentRepairWorker = closestWorker;
}
return closestWorker;
}
BWAPI::Unit WorkerManager::getScoutWorker()
{
// for each of our workers
for (auto & worker : workerData.getWorkers())
{
if (!worker)
{
continue;
}
// if it is a scout worker
if (workerData.getWorkerJob(worker) == WorkerData::Scout)
{
return worker;
}
}
return nullptr;
}
// set a worker to mine minerals
void WorkerManager::setMineralWorker(BWAPI::Unit unit)
{
if (!unit) return;
// check if there is a mineral available to send the worker to
BWAPI::Unit depot = getClosestResourceDepotFromWorker(unit);
// if there is a valid ResourceDepot (Command Center, Nexus, Hatchery)
if (depot)
{
// update workerData with the new job
workerData.setWorkerJob(unit, WorkerData::Minerals, depot);
}
}
BWAPI::Unit WorkerManager::getClosestMineralWorkerTo(BWAPI::Position target)
{
BWAPI::Unit closestUnit = nullptr;
// BasicBot 1.1 Patch Start ////////////////////////////////////////////////
// 변수 기본값 수정
double closestDist = 1000000000;
// BasicBot 1.1 Patch End //////////////////////////////////////////////////
for (auto & unit : BWAPI::Broodwar->self()->getUnits())
{
if (!unit)
{
continue;
}
if (unit->isCompleted()
&& unit->getHitPoints() > 0
&& unit->exists()
&& unit->getType().isWorker()
&& WorkerManager::Instance().isMineralWorker(unit))
{
double dist = unit->getDistance(target);
if (!closestUnit || dist < closestDist)
{
closestUnit = unit;
closestDist = dist;
}
}
}
return closestUnit;
}
BWAPI::Unit WorkerManager::getClosestResourceDepotFromWorker(BWAPI::Unit worker)
{
// BasicBot 1.1 Patch Start ////////////////////////////////////////////////
// 멀티 기지간 일꾼 숫자 리밸런싱이 잘 일어나도록 버그 수정
if (!worker) return nullptr;
BWAPI::Unit closestDepot = nullptr;
double closestDistance = 1000000000;
// 완성된, 공중에 떠있지 않고 땅에 정착해있는, ResourceDepot 혹은 Lair 나 Hive로 변형중인 Hatchery 중에서
// 첫째로 미네랄 일꾼수가 꽉 차지않은 곳
// 둘째로 가까운 곳을 찾는다
for (auto & unit : BWAPI::Broodwar->self()->getUnits())
{
if (!unit) continue;
if (unit->getType().isResourceDepot()
&& (unit->isCompleted() || unit->getType() == BWAPI::UnitTypes::Zerg_Lair || unit->getType() == BWAPI::UnitTypes::Zerg_Hive)
&& unit->isLifted() == false)
{
if (workerData.depotHasEnoughMineralWorkers(unit) == false) {
double distance = unit->getDistance(worker);
if (closestDistance > distance) {
closestDepot = unit;
closestDistance = distance;
}
}
}
}
// 모든 ResourceDepot 이 다 일꾼수가 꽉 차있거나, 완성된 ResourceDepot 이 하나도 없고 건설중이라면,
// ResourceDepot 주위에 미네랄이 남아있는 곳 중에서 가까운 곳이 선택되도록 한다
if (closestDepot == nullptr) {
for (auto & unit : BWAPI::Broodwar->self()->getUnits())
{
if (!unit) continue;
if (unit->getType().isResourceDepot())
{
if (workerData.getMineralsNearDepot(unit) > 0) {
double distance = unit->getDistance(worker);
if (closestDistance > distance) {
closestDepot = unit;
closestDistance = distance;
}
}
}
}
}
// 모든 ResourceDepot 주위에 미네랄이 하나도 없다면, 일꾼에게 가장 가까운 곳을 선택한다
if (closestDepot == nullptr) {
for (auto & unit : BWAPI::Broodwar->self()->getUnits())
{
if (!unit) continue;
if (unit->getType().isResourceDepot())
{
double distance = unit->getDistance(worker);
if (closestDistance > distance) {
closestDepot = unit;
closestDistance = distance;
}
}
}
}
return closestDepot;
// BasicBot 1.1 Patch End //////////////////////////////////////////////////
}
// other managers that need workers call this when they're done with a unit
void WorkerManager::setIdleWorker(BWAPI::Unit unit)
{
if (!unit) return;
workerData.setWorkerJob(unit, WorkerData::Idle, nullptr);
}
// 해당 refinery 로부터 가장 가까운, Mineral 캐고있던 일꾼을 리턴한다
BWAPI::Unit WorkerManager::chooseGasWorkerFromMineralWorkers(BWAPI::Unit refinery)
{
if (!refinery) return nullptr;
BWAPI::Unit closestWorker = nullptr;
// BasicBot 1.1 Patch Start ////////////////////////////////////////////////
// 변수 기본값 수정
double closestDistance = 1000000000;
// BasicBot 1.1 Patch End //////////////////////////////////////////////////
for (auto & unit : workerData.getWorkers())
{
if (!unit) continue;
if (unit->isCompleted() && (workerData.getWorkerJob(unit) == WorkerData::Minerals))
{
double distance = unit->getDistance(refinery);
if (!closestWorker || distance < closestDistance)
{
closestWorker = unit;
closestDistance = distance;
}
}
}
return closestWorker;
}
void WorkerManager::setConstructionWorker(BWAPI::Unit worker, BWAPI::UnitType buildingType)
{
if (!worker) return;
workerData.setWorkerJob(worker, WorkerData::Build, buildingType);
}
BWAPI::Unit WorkerManager::chooseConstuctionWorkerClosestTo(BWAPI::UnitType buildingType, BWAPI::TilePosition buildingPosition, bool setJobAsConstructionWorker, int avoidWorkerID)
{
// variables to hold the closest worker of each type to the building
BWAPI::Unit closestMovingWorker = nullptr;
BWAPI::Unit closestMiningWorker = nullptr;
// BasicBot 1.1 Patch Start ////////////////////////////////////////////////
// 변수 기본값 수정
double closestMovingWorkerDistance = 1000000000;
double closestMiningWorkerDistance = 1000000000;
// BasicBot 1.1 Patch End //////////////////////////////////////////////////
// look through each worker that had moved there first
for (auto & unit : workerData.getWorkers())
{
if (!unit) continue;
// worker 가 2개 이상이면, avoidWorkerID 는 피한다
if (workerData.getWorkers().size() >= 2 && avoidWorkerID != 0 && unit->getID() == avoidWorkerID) continue;
// Move / Idle Worker
if (unit->isCompleted() && (workerData.getWorkerJob(unit) == WorkerData::Move || workerData.getWorkerJob(unit) == WorkerData::Idle))
{
// if it is a new closest distance, set the pointer
double distance = unit->getDistance(BWAPI::Position(buildingPosition));
if (!closestMovingWorker || distance < closestMovingWorkerDistance)
{
if (BWTA::isConnected(unit->getTilePosition(), buildingPosition)) {
closestMovingWorker = unit;
closestMovingWorkerDistance = distance;
}
}
}
// Move / Idle Worker 가 없을때, 다른 Worker 중에서 차출한다
if (unit->isCompleted() && workerData.getWorkerJob(unit) != WorkerData::Move && workerData.getWorkerJob(unit) != WorkerData::Idle && workerData.getWorkerJob(unit) != WorkerData::Build)
{
// if it is a new closest distance, set the pointer
double distance = unit->getDistance(BWAPI::Position(buildingPosition));
if (!closestMiningWorker || distance < closestMiningWorkerDistance)
{
if (BWTA::isConnected(unit->getTilePosition(), buildingPosition)) {
closestMiningWorker = unit;
closestMiningWorkerDistance = distance;
}
}
}
}
/*
if (closestMiningWorker)
std::cout << "closestMiningWorker " << closestMiningWorker->getID() << std::endl;
if (closestMovingWorker)
std::cout << "closestMovingWorker " << closestMovingWorker->getID() << std::endl;
*/
BWAPI::Unit chosenWorker = closestMovingWorker ? closestMovingWorker : closestMiningWorker;
// if the worker exists (one may not have been found in rare cases)
if (chosenWorker && setJobAsConstructionWorker)
{
workerData.setWorkerJob(chosenWorker, WorkerData::Build, buildingType);
}
return chosenWorker;
}
// sets a worker as a scout
void WorkerManager::setScoutWorker(BWAPI::Unit worker)
{
if (!worker) return;
workerData.setWorkerJob(worker, WorkerData::Scout, nullptr);
}
// get a worker which will move to a current location
BWAPI::Unit WorkerManager::chooseMoveWorkerClosestTo(BWAPI::Position p)
{
// set up the pointer
BWAPI::Unit closestWorker = nullptr;
// BasicBot 1.1 Patch Start ////////////////////////////////////////////////
// 변수 기본값 수정
double closestDistance = 1000000000;
// BasicBot 1.1 Patch End //////////////////////////////////////////////////
// for each worker we currently have
for (auto & unit : workerData.getWorkers())
{
if (!unit) continue;
// only consider it if it's a mineral worker
if (unit->isCompleted() && (workerData.getWorkerJob(unit) == WorkerData::Minerals || workerData.getWorkerJob(unit) == WorkerData::Idle))
{
// if it is a new closest distance, set the pointer
double distance = unit->getDistance(p);
if (!closestWorker || distance < closestDistance)
{
closestWorker = unit;
closestDistance = distance;
}
}
}
// return the worker
return closestWorker;
}
// sets a worker to move to a given location
void WorkerManager::setMoveWorker(BWAPI::Unit worker, int mineralsNeeded, int gasNeeded, BWAPI::Position p)
{
// set up the pointer
BWAPI::Unit closestWorker = nullptr;
// BasicBot 1.1 Patch Start ////////////////////////////////////////////////
// 변수 기본값 수정
double closestDistance = 1000000000;
// BasicBot 1.1 Patch End //////////////////////////////////////////////////
// for each worker we currently have
for (auto & unit : workerData.getWorkers())
{
if (!unit) continue;
// only consider it if it's a mineral worker or idle worker
if (unit->isCompleted() && (workerData.getWorkerJob(unit) == WorkerData::Minerals || workerData.getWorkerJob(unit) == WorkerData::Idle))
{
// if it is a new closest distance, set the pointer
double distance = unit->getDistance(p);
if (!closestWorker || distance < closestDistance)
{
closestWorker = unit;
closestDistance = distance;
}
}
}
if (closestWorker)
{
workerData.setWorkerJob(closestWorker, WorkerData::Move, WorkerMoveData(mineralsNeeded, gasNeeded, p));
}
else
{
//BWAPI::Broodwar->printf("Error, no worker found");
}
}
void WorkerManager::setCombatWorker(BWAPI::Unit worker)
{
if (!worker) return;
workerData.setWorkerJob(worker, WorkerData::Combat, nullptr);
}
void WorkerManager::setRepairWorker(BWAPI::Unit worker, BWAPI::Unit unitToRepair)
{
workerData.setWorkerJob(worker, WorkerData::Repair, unitToRepair);
}
void WorkerManager::stopRepairing(BWAPI::Unit worker)
{
workerData.setWorkerJob(worker, WorkerData::Idle, nullptr);
}
void WorkerManager::onUnitMorph(BWAPI::Unit unit)
{
if (!unit) return;
// BasicBot 1.1 Patch Start ////////////////////////////////////////////////
// 일꾼 탄생/파괴 등에 대한 업데이트 로직 버그 수정
// onUnitComplete 에서 처리하도록 수정
// if something morphs into a worker, add it
//if (unit->getType().isWorker() && unit->getPlayer() == BWAPI::Broodwar->self() && unit->getHitPoints() >= 0)
//{
// workerData.addWorker(unit);
//}
// if something morphs into a building, it was a worker (Zerg Drone)
if (unit->getType().isBuilding() && unit->getPlayer() == BWAPI::Broodwar->self() && unit->getPlayer()->getRace() == BWAPI::Races::Zerg)
{
// 해당 worker 를 workerData 에서 삭제한다
workerData.workerDestroyed(unit);
rebalanceWorkers();
}
// BasicBot 1.1 Patch End //////////////////////////////////////////////////
}
// BasicBot 1.1 Patch Start ////////////////////////////////////////////////
// 일꾼 탄생/파괴 등에 대한 업데이트 로직 버그 수정 : onUnitShow 가 아니라 onUnitComplete 에서 처리하도록 수정
// onUnitShow 메소드 제거
/*
void WorkerManager::onUnitShow(BWAPI::Unit unit)
{
if (!unit) return;
// add the depot if it exists
if (unit->getType().isResourceDepot() && unit->getPlayer() == BWAPI::Broodwar->self())
{
workerData.addDepot(unit);
}
// add the worker
if (unit->getType().isWorker() && unit->getPlayer() == BWAPI::Broodwar->self() && unit->getHitPoints() >= 0)
{
workerData.addWorker(unit);
}
if (unit->getType().isResourceDepot() && unit->getPlayer() == BWAPI::Broodwar->self())
{
rebalanceWorkers();
}
}
*/
// onUnitComplete 메소드 추가
void WorkerManager::onUnitComplete(BWAPI::Unit unit)
{
if (!unit) return;
// ResourceDepot 건물이 신규 생성되면, 자료구조 추가 처리를 한 후, rebalanceWorkers 를 한다
if (unit->getType().isResourceDepot() && unit->getPlayer() == BWAPI::Broodwar->self())
{
workerData.addDepot(unit);
rebalanceWorkers();
}
// 일꾼이 신규 생성되면, 자료구조 추가 처리를 한다.
if (unit->getType().isWorker() && unit->getPlayer() == BWAPI::Broodwar->self() && unit->getHitPoints() >= 0)
{
workerData.addWorker(unit);
rebalanceWorkers();
}
}
// BasicBot 1.1 Patch End //////////////////////////////////////////////////
// 일하고있는 resource depot 에 충분한 수의 mineral worker 들이 지정되어 있다면, idle 상태로 만든다
// idle worker 에게 mineral job 을 부여할 때, mineral worker 가 부족한 resource depot 으로 이동하게 된다
void WorkerManager::rebalanceWorkers()
{
for (auto & worker : workerData.getWorkers())
{
if (!workerData.getWorkerJob(worker) == WorkerData::Minerals)
{
continue;
}
BWAPI::Unit depot = workerData.getWorkerDepot(worker);
if (depot && workerData.depotHasEnoughMineralWorkers(depot))
{
workerData.setWorkerJob(worker, WorkerData::Idle, nullptr);
}
else if (!depot)
{
workerData.setWorkerJob(worker, WorkerData::Idle, nullptr);
}
}
}
// BasicBot 1.1 Patch Start ////////////////////////////////////////////////
// 일꾼 탄생/파괴 등에 대한 업데이트 로직 버그 수정 및 멀티 기지간 일꾼 숫자 리밸런싱이 잘 일어나도록 수정
void WorkerManager::onUnitDestroy(BWAPI::Unit unit)
{
if (!unit) return;
// ResourceDepot 건물이 파괴되면, 자료구조 삭제 처리를 한 후, 일꾼들을 Idle 상태로 만들어 rebalanceWorkers 한 효과가 나게 한다
if (unit->getType().isResourceDepot() && unit->getPlayer() == BWAPI::Broodwar->self())
{
workerData.removeDepot(unit);
}
// 일꾼이 죽으면, 자료구조 삭제 처리를 한 후, rebalanceWorkers 를 한다
if (unit->getType().isWorker() && unit->getPlayer() == BWAPI::Broodwar->self())
{
workerData.workerDestroyed(unit);
rebalanceWorkers();
}
// 미네랄을 다 채취하면 rebalanceWorkers를 한다
if (unit->getType() == BWAPI::UnitTypes::Resource_Mineral_Field)
{
rebalanceWorkers();
}
}
// BasicBot 1.1 Patch End //////////////////////////////////////////////////
bool WorkerManager::isMineralWorker(BWAPI::Unit worker)
{
if (!worker) return false;
return workerData.getWorkerJob(worker) == WorkerData::Minerals || workerData.getWorkerJob(worker) == WorkerData::Idle;
}
bool WorkerManager::isScoutWorker(BWAPI::Unit worker)
{
if (!worker) return false;
return (workerData.getWorkerJob(worker) == WorkerData::Scout);
}
bool WorkerManager::isConstructionWorker(BWAPI::Unit worker)
{
if (!worker) return false;
return (workerData.getWorkerJob(worker) == WorkerData::Build);
}
int WorkerManager::getNumMineralWorkers()
{
return workerData.getNumMineralWorkers();
}
int WorkerManager::getNumIdleWorkers()
{
return workerData.getNumIdleWorkers();
}
int WorkerManager::getNumGasWorkers()
{
return workerData.getNumGasWorkers();
}
WorkerData WorkerManager::getWorkerData()
{
return workerData;
}
|
/*
* Copyright (c) 2003-2019 Rony Shapiro <ronys@pwsafe.org>.
* All rights reserved. Use of the code is allowed under the
* Artistic License 2.0 terms, as specified in the LICENSE file
* distributed with this code, or available from
* http://www.opensource.org/licenses/artistic-license-2.0.php
*/
/**
* \file Linux-specific implementation of some wide-string related functionality
*/
#include <wchar.h>
#include "pws_str.h"
#include "../utf8conv.h"
#include "../../core/PwsPlatform.h"
#include <algorithm>
int pws_os::wctoi(const wchar_t *s)
{
return int(wcstol(s, nullptr, 10));
}
double pws_os::wctof(const wchar_t *s)
{
return double(wcstold(s, nullptr));
}
TCHAR* pws_os::pws_itot(int val, TCHAR* out, unsigned base)
{
const TCHAR digits[] = _T("0123456789abcdef");
assert(base > 0 && base <= NumberOf(digits));
TCHAR* p = out;
do {
*p++ = digits[val % base];
}
while( (val /= base) != 0);
*p++ = 0;
std::reverse(out, p);
return out;
}
|
//
// __ __ __
// / / / /__ __ ____/ /_____ ____
// / /_/ // / / // __ // ___// __ \
// / __ // /_/ // /_/ // / / /_/ /
// /_/ /_/ \__, / \__,_//_/ \____/
// /____/
//
// The Hydro Programming Language
//
#ifndef __h3o_engine_PDocumentPrinter__
#define __h3o_engine_PDocumentPrinter__
#include <iostream>
#include "DocumentVisitor.hpp"
namespace hydro::engine
{
/**
* The PDocumentPrinter class writes a document representation to an output stream.
*/
class PDocumentPrinter : public DocumentVisitor
{
public:
/**
* Creates a PDocumentPrinter object with an output stream.
*/
PDocumentPrinter(std::ostream &out);
/**
* Destroys the PDocumentPrinter object.
*/
virtual ~PDocumentPrinter();
/**
* Gets the output stream that the printer writes to.
*/
const std::ostream &getOutputStream() const { return mOut; }
protected:
/**
* The outut stream.
*/
std::ostream &mOut;
};
} // namespace hydro::engine
#endif /* __h3o_engine_PDocumentPrinter__ */
|
//
// request_parser.hpp
// ~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef HTTP_SERVER3_REQUEST_PARSER_HPP
#define HTTP_SERVER3_REQUEST_PARSER_HPP
#include <boost/logic/tribool.hpp>
#include <boost/tuple/tuple.hpp>
namespace http {
namespace server3 {
struct request;
/// Parser for incoming requests.
class request_parser
{
public:
/// Construct ready to parse the request method.
request_parser();
/// Reset to initial parser state.
void reset();
/// Parse some data. The tribool return value is true when a complete request
/// has been parsed, false if the data is invalid, indeterminate when more
/// data is required. The InputIterator return value indicates how much of the
/// input has been consumed.
template <typename InputIterator>
boost::tuple<boost::tribool, InputIterator> parse(request& req,
InputIterator begin, InputIterator end)
{
while (begin != end)
{
boost::tribool result = consume(req, *begin++);
if (result || !result)
return boost::make_tuple(result, begin);
}
boost::tribool result = boost::indeterminate;
return boost::make_tuple(result, begin);
}
private:
/// Handle the next character of input.
boost::tribool consume(request& req, char input);
/// Check if a byte is an HTTP character.
static bool is_char(int c);
/// Check if a byte is an HTTP control character.
static bool is_ctl(int c);
/// Check if a byte is defined as an HTTP tspecial character.
static bool is_tspecial(int c);
/// Check if a byte is a digit.
static bool is_digit(int c);
/// The current state of the parser.
enum state
{
method_start,
method,
uri,
http_version_h,
http_version_t_1,
http_version_t_2,
http_version_p,
http_version_slash,
http_version_major_start,
http_version_major,
http_version_minor_start,
http_version_minor,
expecting_newline_1,
header_line_start,
header_lws,
header_name,
space_before_header_value,
header_value,
expecting_newline_2,
expecting_newline_3
} state_;
};
} // namespace server3
} // namespace http
#endif // HTTP_SERVER3_REQUEST_PARSER_HPP
|
/*************************************************************************/
/* NodePath.cpp */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/*************************************************************************/
/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */
/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#include "NodePath.hpp"
#include "GodotGlobal.hpp"
#include "String.hpp"
#include <gdnative/node_path.h>
namespace godot {
NodePath::NodePath() {
String from = "";
godot::api->godot_node_path_new(&_node_path, (godot_string *)&from);
}
NodePath::NodePath(const NodePath &other) {
String from = other;
godot::api->godot_node_path_new(&_node_path, (godot_string *)&from);
}
NodePath::NodePath(const String &from) {
godot::api->godot_node_path_new(&_node_path, (godot_string *)&from);
}
NodePath::NodePath(const char *contents) {
String from = contents;
godot::api->godot_node_path_new(&_node_path, (godot_string *)&from);
}
String NodePath::get_name(const int idx) const {
godot_string str = godot::api->godot_node_path_get_name(&_node_path, idx);
return String(str);
}
int NodePath::get_name_count() const {
return godot::api->godot_node_path_get_name_count(&_node_path);
}
String NodePath::get_subname(const int idx) const {
godot_string str = godot::api->godot_node_path_get_subname(&_node_path, idx);
return String(str);
}
int NodePath::get_subname_count() const {
return godot::api->godot_node_path_get_subname_count(&_node_path);
}
bool NodePath::is_absolute() const {
return godot::api->godot_node_path_is_absolute(&_node_path);
}
bool NodePath::is_empty() const {
return godot::api->godot_node_path_is_empty(&_node_path);
}
NodePath NodePath::get_as_property_path() const {
godot_node_path path = godot::core_1_1_api->godot_node_path_get_as_property_path(&_node_path);
return NodePath(path);
}
String NodePath::get_concatenated_subnames() const {
godot_string str = godot::api->godot_node_path_get_concatenated_subnames(&_node_path);
return String(str);
}
NodePath::operator String() const {
godot_string str = godot::api->godot_node_path_as_string(&_node_path);
return String(str);
}
bool NodePath::operator==(const NodePath &other) {
return godot::api->godot_node_path_operator_equal(&_node_path, &other._node_path);
}
void NodePath::operator=(const NodePath &other) {
godot::api->godot_node_path_destroy(&_node_path);
String other_string = (String)other;
godot::api->godot_node_path_new(&_node_path, (godot_string *)&other_string);
}
NodePath::~NodePath() {
godot::api->godot_node_path_destroy(&_node_path);
}
} // namespace godot
|
/*
Copyright (c) 2003-2016, Arvid Norberg, Daniel Wallin
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include <string>
#include <cstdio> // for snprintf
#include <cinttypes> // for PRId64 et.al.
#include "libtorrent/config.hpp"
#include "libtorrent/alert.hpp"
#include "libtorrent/alert_types.hpp"
#include "libtorrent/socket_io.hpp"
#include "libtorrent/time.hpp"
#include "libtorrent/error_code.hpp"
#include "libtorrent/torrent.hpp"
#include "libtorrent/performance_counters.hpp"
#include "libtorrent/stack_allocator.hpp"
#include "libtorrent/piece_picker.hpp" // for piece_block
#include "libtorrent/hex.hpp" // to_hex
#include "libtorrent/aux_/escape_string.hpp" // for convert_from_native
#include "libtorrent/aux_/max_path.hpp" // for TORRENT_MAX_PATH
namespace libtorrent {
alert::alert() : m_timestamp(clock_type::now()) {}
alert::~alert() = default;
time_point alert::timestamp() const { return m_timestamp; }
torrent_alert::torrent_alert(aux::stack_allocator& alloc
, torrent_handle const& h)
: handle(h)
, m_alloc(alloc)
{
boost::shared_ptr<torrent> t = h.native_handle();
if (t)
{
std::string name_str = t->name();
if (!name_str.empty()) {
m_name_idx = alloc.copy_string(name_str);
}
else
{
char msg[41];
aux::to_hex(t->info_hash(), msg);
m_name_idx = alloc.copy_string(msg);
}
}
else
{
m_name_idx = alloc.copy_string("");
}
#ifndef TORRENT_NO_DEPRECATE
name = torrent_name();
#endif
}
char const* torrent_alert::torrent_name() const
{
#ifndef TORRENT_NO_DEPRECATE
return name.c_str();
#else
return m_alloc.get().ptr(m_name_idx);
#endif
}
std::string torrent_alert::message() const
{
if (!handle.is_valid()) return " - ";
return torrent_name();
}
peer_alert::peer_alert(aux::stack_allocator& alloc
, torrent_handle const& h
, tcp::endpoint const& i
, peer_id const& pi)
: torrent_alert(alloc, h)
, ip(i)
, pid(pi)
{}
std::string peer_alert::message() const
{
error_code ec;
return torrent_alert::message() + " peer (" + print_endpoint(ip)
+ ", " + identify_client(pid) + ")";
}
tracker_alert::tracker_alert(aux::stack_allocator& alloc
, torrent_handle const& h
, std::string const& u)
: torrent_alert(alloc, h)
#ifndef TORRENT_NO_DEPRECATE
, url(u)
#endif
, m_url_idx(alloc.copy_string(u))
{}
char const* tracker_alert::tracker_url() const
{
#ifndef TORRENT_NO_DEPRECATE
return url.c_str();
#else
return m_alloc.get().ptr(m_url_idx);
#endif
}
std::string tracker_alert::message() const
{
return torrent_alert::message() + " (" + tracker_url() + ")";
}
read_piece_alert::read_piece_alert(aux::stack_allocator& alloc
, torrent_handle const& h
, int p, boost::shared_array<char> d, int s)
: torrent_alert(alloc, h)
, buffer(std::move(d))
, piece(p)
, size(s)
{}
read_piece_alert::read_piece_alert(aux::stack_allocator& alloc
, torrent_handle h, int p, error_code e)
: torrent_alert(alloc, h)
, ec(e)
, piece(p)
, size(0)
{}
std::string read_piece_alert::message() const
{
char msg[200];
if (ec)
{
std::snprintf(msg, sizeof(msg), "%s: read_piece %u failed: %s"
, torrent_alert::message().c_str() , piece
, convert_from_native(ec.message()).c_str());
}
else
{
std::snprintf(msg, sizeof(msg), "%s: read_piece %u successful"
, torrent_alert::message().c_str() , piece);
}
return msg;
}
file_completed_alert::file_completed_alert(aux::stack_allocator& alloc
, torrent_handle const& h
, int idx)
: torrent_alert(alloc, h)
, index(idx)
{}
std::string file_completed_alert::message() const
{
char msg[200 + TORRENT_MAX_PATH];
std::snprintf(msg, sizeof(msg), "%s: file %d finished downloading"
, torrent_alert::message().c_str(), index);
return msg;
}
file_renamed_alert::file_renamed_alert(aux::stack_allocator& alloc
, torrent_handle const& h
, std::string const& n
, int idx)
: torrent_alert(alloc, h)
#ifndef TORRENT_NO_DEPRECATE
, name(n)
#endif
, index(idx)
, m_name_idx(alloc.copy_string(n))
{}
char const* file_renamed_alert::new_name() const
{
#ifndef TORRENT_NO_DEPRECATE
return name.c_str();
#else
return m_alloc.get().ptr(m_name_idx);
#endif
}
std::string file_renamed_alert::message() const
{
char msg[200 + TORRENT_MAX_PATH * 2];
std::snprintf(msg, sizeof(msg), "%s: file %d renamed to %s"
, torrent_alert::message().c_str(), index, new_name());
return msg;
}
file_rename_failed_alert::file_rename_failed_alert(aux::stack_allocator& alloc
, torrent_handle const& h
, int idx
, error_code ec)
: torrent_alert(alloc, h)
, index(idx)
, error(ec)
{}
std::string file_rename_failed_alert::message() const
{
char ret[200 + TORRENT_MAX_PATH * 2];
std::snprintf(ret, sizeof(ret), "%s: failed to rename file %d: %s"
, torrent_alert::message().c_str(), index, convert_from_native(error.message()).c_str());
return ret;
}
performance_alert::performance_alert(aux::stack_allocator& alloc
, torrent_handle const& h
, performance_warning_t w)
: torrent_alert(alloc, h)
, warning_code(w)
{}
std::string performance_alert::message() const
{
static char const* warning_str[] =
{
"max outstanding disk writes reached",
"max outstanding piece requests reached",
"upload limit too low (download rate will suffer)",
"download limit too low (upload rate will suffer)",
"send buffer watermark too low (upload rate will suffer)",
"too many optimistic unchoke slots",
"using bittyrant unchoker with no upload rate limit set",
"the disk queue limit is too high compared to the cache size. The disk queue eats into the cache size",
"outstanding AIO operations limit reached",
"too few ports allowed for outgoing connections",
"too few file descriptors are allowed for this process. connection limit lowered"
};
return torrent_alert::message() + ": performance warning: "
+ warning_str[warning_code];
}
state_changed_alert::state_changed_alert(aux::stack_allocator& alloc
, torrent_handle const& h
, torrent_status::state_t st
, torrent_status::state_t prev_st)
: torrent_alert(alloc, h)
, state(st)
, prev_state(prev_st)
{}
std::string state_changed_alert::message() const
{
static char const* state_str[] =
{"checking (q)", "checking", "dl metadata"
, "downloading", "finished", "seeding", "allocating"
, "checking (r)"};
return torrent_alert::message() + ": state changed to: "
+ state_str[state];
}
tracker_error_alert::tracker_error_alert(aux::stack_allocator& alloc
, torrent_handle const& h
, int times
, int status
, std::string const& u
, error_code const& e
, std::string const& m)
: tracker_alert(alloc, h, u)
, times_in_row(times)
, status_code(status)
, error(e)
#ifndef TORRENT_NO_DEPRECATE
, msg(m)
#endif
, m_msg_idx(alloc.copy_string(m))
{
TORRENT_ASSERT(!u.empty());
}
char const* tracker_error_alert::error_message() const
{
#ifndef TORRENT_NO_DEPRECATE
return msg.c_str();
#else
return m_alloc.get().ptr(m_msg_idx);
#endif
}
std::string tracker_error_alert::message() const
{
char ret[400];
std::snprintf(ret, sizeof(ret), "%s (%d) %s \"%s\" (%d)"
, tracker_alert::message().c_str(), status_code
, convert_from_native(error.message()).c_str(), error_message()
, times_in_row);
return ret;
}
tracker_warning_alert::tracker_warning_alert(aux::stack_allocator& alloc
, torrent_handle const& h
, std::string const& u
, std::string const& m)
: tracker_alert(alloc, h, u)
#ifndef TORRENT_NO_DEPRECATE
, msg(m)
#endif
, m_msg_idx(alloc.copy_string(m))
{
TORRENT_ASSERT(!u.empty());
}
char const* tracker_warning_alert::warning_message() const
{
#ifndef TORRENT_NO_DEPRECATE
return msg.c_str();
#else
return m_alloc.get().ptr(m_msg_idx);
#endif
}
std::string tracker_warning_alert::message() const
{
return tracker_alert::message() + " warning: " + warning_message();
}
scrape_reply_alert::scrape_reply_alert(aux::stack_allocator& alloc
, torrent_handle const& h
, int incomp
, int comp
, std::string const& u)
: tracker_alert(alloc, h, u)
, incomplete(incomp)
, complete(comp)
{
TORRENT_ASSERT(!u.empty());
}
std::string scrape_reply_alert::message() const
{
char ret[400];
std::snprintf(ret, sizeof(ret), "%s scrape reply: %u %u"
, tracker_alert::message().c_str(), incomplete, complete);
return ret;
}
scrape_failed_alert::scrape_failed_alert(aux::stack_allocator& alloc
, torrent_handle const& h
, std::string const& u
, error_code const& e)
: tracker_alert(alloc, h, u)
#ifndef TORRENT_NO_DEPRECATE
, msg(convert_from_native(e.message()))
#endif
, error(e)
, m_msg_idx(-1)
{
TORRENT_ASSERT(!u.empty());
}
scrape_failed_alert::scrape_failed_alert(aux::stack_allocator& alloc
, torrent_handle const& h
, std::string const& u
, std::string const& m)
: tracker_alert(alloc, h, u)
#ifndef TORRENT_NO_DEPRECATE
, msg(m)
#endif
, error(errors::tracker_failure)
, m_msg_idx(alloc.copy_string(m))
{
TORRENT_ASSERT(!u.empty());
}
char const* scrape_failed_alert::error_message() const
{
#ifndef TORRENT_NO_DEPRECATE
return msg.c_str();
#else
if (m_msg_idx == -1) return "";
else return m_alloc.get().ptr(m_msg_idx);
#endif
}
std::string scrape_failed_alert::message() const
{
return tracker_alert::message() + " scrape failed: " + error_message();
}
tracker_reply_alert::tracker_reply_alert(aux::stack_allocator& alloc
, torrent_handle const& h
, int np
, std::string const& u)
: tracker_alert(alloc, h, u)
, num_peers(np)
{
TORRENT_ASSERT(!u.empty());
}
std::string tracker_reply_alert::message() const
{
char ret[400];
std::snprintf(ret, sizeof(ret), "%s received peers: %u"
, tracker_alert::message().c_str(), num_peers);
return ret;
}
dht_reply_alert::dht_reply_alert(aux::stack_allocator& alloc
, torrent_handle const& h
, int np)
: tracker_alert(alloc, h, "")
, num_peers(np)
{}
std::string dht_reply_alert::message() const
{
char ret[400];
std::snprintf(ret, sizeof(ret), "%s received DHT peers: %u"
, tracker_alert::message().c_str(), num_peers);
return ret;
}
tracker_announce_alert::tracker_announce_alert(aux::stack_allocator& alloc
, torrent_handle const& h
, std::string const& u, int e)
: tracker_alert(alloc, h, u)
, event(e)
{
TORRENT_ASSERT(!u.empty());
}
std::string tracker_announce_alert::message() const
{
static const char* event_str[] = {"none", "completed", "started", "stopped", "paused"};
TORRENT_ASSERT_VAL(event < int(sizeof(event_str)/sizeof(event_str[0])), event);
return tracker_alert::message() + " sending announce (" + event_str[event] + ")";
}
hash_failed_alert::hash_failed_alert(
aux::stack_allocator& alloc
, torrent_handle const& h
, int index)
: torrent_alert(alloc, h)
, piece_index(index)
{
TORRENT_ASSERT(index >= 0);
}
std::string hash_failed_alert::message() const
{
char ret[400];
std::snprintf(ret, sizeof(ret), "%s hash for piece %u failed"
, torrent_alert::message().c_str(), piece_index);
return ret;
}
peer_ban_alert::peer_ban_alert(aux::stack_allocator& alloc
, torrent_handle h, tcp::endpoint const& ep
, peer_id const& peer_id)
: peer_alert(alloc, h, ep, peer_id)
{}
std::string peer_ban_alert::message() const
{
return peer_alert::message() + " banned peer";
}
peer_unsnubbed_alert::peer_unsnubbed_alert(aux::stack_allocator& alloc
, torrent_handle h, tcp::endpoint const& ep
, peer_id const& peer_id)
: peer_alert(alloc, h, ep, peer_id)
{}
std::string peer_unsnubbed_alert::message() const
{
return peer_alert::message() + " peer unsnubbed";
}
peer_snubbed_alert::peer_snubbed_alert(aux::stack_allocator& alloc
, torrent_handle h, tcp::endpoint const& ep
, peer_id const& peer_id)
: peer_alert(alloc, h, ep, peer_id)
{}
std::string peer_snubbed_alert::message() const
{
return peer_alert::message() + " peer snubbed";
}
invalid_request_alert::invalid_request_alert(aux::stack_allocator& alloc
, torrent_handle const& h, tcp::endpoint const& ep
, peer_id const& peer_id, peer_request const& r
, bool _have, bool _peer_interested, bool _withheld)
: peer_alert(alloc, h, ep, peer_id)
, request(r)
, we_have(_have)
, peer_interested(_peer_interested)
, withheld(_withheld)
{}
std::string invalid_request_alert::message() const
{
char ret[200];
std::snprintf(ret, sizeof(ret), "%s peer sent an invalid piece request "
"(piece: %u start: %u len: %u)%s"
, peer_alert::message().c_str(), request.piece, request.start
, request.length
, withheld ? ": super seeding withheld piece"
: !we_have ? ": we don't have piece"
: !peer_interested ? ": peer is not interested"
: "");
return ret;
}
torrent_finished_alert::torrent_finished_alert(aux::stack_allocator& alloc
, torrent_handle h)
: torrent_alert(alloc, h)
{}
std::string torrent_finished_alert::message() const
{
return torrent_alert::message() + " torrent finished downloading";
}
piece_finished_alert::piece_finished_alert(aux::stack_allocator& alloc
, torrent_handle const& h, int piece_num)
: torrent_alert(alloc, h)
, piece_index(piece_num)
{
TORRENT_ASSERT(piece_index >= 0);
}
std::string piece_finished_alert::message() const
{
char ret[200];
std::snprintf(ret, sizeof(ret), "%s piece: %u finished downloading"
, torrent_alert::message().c_str(), piece_index);
return ret;
}
request_dropped_alert::request_dropped_alert(aux::stack_allocator& alloc, torrent_handle h
, tcp::endpoint const& ep, peer_id const& peer_id, int block_num
, int piece_num)
: peer_alert(alloc, h, ep, peer_id)
, block_index(block_num)
, piece_index(piece_num)
{
TORRENT_ASSERT(block_index >= 0 && piece_index >= 0);
}
std::string request_dropped_alert::message() const
{
char ret[200];
std::snprintf(ret, sizeof(ret), "%s peer dropped block ( piece: %u block: %u)"
, torrent_alert::message().c_str(), piece_index, block_index);
return ret;
}
block_timeout_alert::block_timeout_alert(aux::stack_allocator& alloc, torrent_handle h
, tcp::endpoint const& ep, peer_id const& peer_id, int block_num
, int piece_num)
: peer_alert(alloc, h, ep, peer_id)
, block_index(block_num)
, piece_index(piece_num)
{
TORRENT_ASSERT(block_index >= 0 && piece_index >= 0);
}
std::string block_timeout_alert::message() const
{
char ret[200];
std::snprintf(ret, sizeof(ret), "%s peer timed out request ( piece: %u block: %u)"
, torrent_alert::message().c_str(), piece_index, block_index);
return ret;
}
block_finished_alert::block_finished_alert(aux::stack_allocator& alloc, torrent_handle h
, tcp::endpoint const& ep, peer_id const& peer_id, int block_num
, int piece_num)
: peer_alert(alloc, h, ep, peer_id)
, block_index(block_num)
, piece_index(piece_num)
{
TORRENT_ASSERT(block_index >= 0 && piece_index >= 0);
}
std::string block_finished_alert::message() const
{
char ret[200];
std::snprintf(ret, sizeof(ret), "%s block finished downloading (piece: %u block: %u)"
, torrent_alert::message().c_str(), piece_index, block_index);
return ret;
}
block_downloading_alert::block_downloading_alert(aux::stack_allocator& alloc, torrent_handle h
, tcp::endpoint const& ep
, peer_id const& peer_id, int block_num, int piece_num)
: peer_alert(alloc, h, ep, peer_id)
#ifndef TORRENT_NO_DEPRECATE
, peer_speedmsg("")
#endif
, block_index(block_num)
, piece_index(piece_num)
{
TORRENT_ASSERT(block_index >= 0 && piece_index >= 0);
}
std::string block_downloading_alert::message() const
{
char ret[200];
std::snprintf(ret, sizeof(ret), "%s requested block (piece: %u block: %u)"
, torrent_alert::message().c_str(), piece_index, block_index);
return ret;
}
unwanted_block_alert::unwanted_block_alert(aux::stack_allocator& alloc, torrent_handle h
, tcp::endpoint const& ep
, peer_id const& peer_id, int block_num, int piece_num)
: peer_alert(alloc, h, ep, peer_id)
, block_index(block_num)
, piece_index(piece_num)
{
TORRENT_ASSERT(block_index >= 0 && piece_index >= 0);
}
std::string unwanted_block_alert::message() const
{
char ret[200];
std::snprintf(ret, sizeof(ret), "%s received block not in download queue (piece: %u block: %u)"
, torrent_alert::message().c_str(), piece_index, block_index);
return ret;
}
storage_moved_alert::storage_moved_alert(aux::stack_allocator& alloc
, torrent_handle const& h, std::string const& p)
: torrent_alert(alloc, h)
#ifndef TORRENT_NO_DEPRECATE
, path(p)
#endif
, m_path_idx(alloc.copy_string(p))
{}
std::string storage_moved_alert::message() const
{
return torrent_alert::message() + " moved storage to: "
+ storage_path();
}
char const* storage_moved_alert::storage_path() const
{
#ifndef TORRENT_NO_DEPRECATE
return path.c_str();
#else
return m_alloc.get().ptr(m_path_idx);
#endif
}
storage_moved_failed_alert::storage_moved_failed_alert(
aux::stack_allocator& alloc
, torrent_handle const& h
, error_code const& e
, std::string const& f
, char const* op)
: torrent_alert(alloc, h)
, error(e)
#ifndef TORRENT_NO_DEPRECATE
, file(f)
#endif
, operation(op)
, m_file_idx(alloc.copy_string(f))
{}
char const* storage_moved_failed_alert::file_path() const
{
#ifndef TORRENT_NO_DEPRECATE
return file.c_str();
#else
return m_alloc.get().ptr(m_file_idx);
#endif
}
std::string storage_moved_failed_alert::message() const
{
return torrent_alert::message() + " storage move failed. "
+ (operation?operation:"") + " (" + file_path() + "): "
+ convert_from_native(error.message());
}
torrent_deleted_alert::torrent_deleted_alert(aux::stack_allocator& alloc
, torrent_handle const& h, sha1_hash const& ih)
: torrent_alert(alloc, h)
, info_hash(ih)
{}
std::string torrent_deleted_alert::message() const
{
return torrent_alert::message() + " deleted";
}
torrent_delete_failed_alert::torrent_delete_failed_alert(aux::stack_allocator& alloc
, torrent_handle const& h, error_code const& e, sha1_hash const& ih)
: torrent_alert(alloc, h)
, error(e)
, info_hash(ih)
{
#ifndef TORRENT_NO_DEPRECATE
msg = convert_from_native(error.message());
#endif
}
std::string torrent_delete_failed_alert::message() const
{
return torrent_alert::message() + " torrent deletion failed: "
+convert_from_native(error.message());
}
save_resume_data_alert::save_resume_data_alert(aux::stack_allocator& alloc
, boost::shared_ptr<entry> const& rd
, torrent_handle const& h)
: torrent_alert(alloc, h)
, resume_data(rd)
{}
std::string save_resume_data_alert::message() const
{
return torrent_alert::message() + " resume data generated";
}
save_resume_data_failed_alert::save_resume_data_failed_alert(aux::stack_allocator& alloc
, torrent_handle const& h, error_code const& e)
: torrent_alert(alloc, h)
, error(e)
{
#ifndef TORRENT_NO_DEPRECATE
msg = convert_from_native(error.message());
#endif
}
std::string save_resume_data_failed_alert::message() const
{
return torrent_alert::message() + " resume data was not generated: "
+ convert_from_native(error.message());
}
torrent_paused_alert::torrent_paused_alert(aux::stack_allocator& alloc
, torrent_handle const& h)
: torrent_alert(alloc, h)
{}
std::string torrent_paused_alert::message() const
{
return torrent_alert::message() + " paused";
}
torrent_resumed_alert::torrent_resumed_alert(aux::stack_allocator& alloc
, torrent_handle const& h)
: torrent_alert(alloc, h)
{}
std::string torrent_resumed_alert::message() const
{
return torrent_alert::message() + " resumed";
}
torrent_checked_alert::torrent_checked_alert(aux::stack_allocator& alloc
, torrent_handle const& h)
: torrent_alert(alloc, h)
{}
std::string torrent_checked_alert::message() const
{
return torrent_alert::message() + " checked";
}
namespace
{
static char const* const sock_type_str[] =
{
"TCP", "TCP/SSL", "UDP", "I2P", "Socks5", "uTP/SSL"
};
static char const* const nat_type_str[] = {"NAT-PMP", "UPnP"};
static char const* const protocol_str[] = {"TCP", "UDP"};
static char const* const socket_type_str[] = {
"null",
"TCP",
"Socks5/TCP",
"HTTP",
"uTP",
"i2p",
"SSL/TCP",
"SSL/Socks5",
"HTTPS",
"SSL/uTP"
};
}
listen_failed_alert::listen_failed_alert(
aux::stack_allocator& alloc
, std::string const& iface
, libtorrent::address const& listen_addr
, int listen_port
, int op
, error_code const& ec
, socket_type_t t)
: error(ec)
, operation(op)
, sock_type(t)
, address(listen_addr)
, port(listen_port)
#ifndef TORRENT_NO_DEPRECATE
, endpoint(listen_addr, listen_port)
#endif
, m_alloc(alloc)
, m_interface_idx(alloc.copy_string(iface))
{}
listen_failed_alert::listen_failed_alert(
aux::stack_allocator& alloc
, std::string const& iface
, tcp::endpoint const& ep
, int op
, error_code const& ec
, socket_type_t t)
: listen_failed_alert(alloc
, iface
, ep.address()
, ep.port()
, op
, ec
, t)
{}
listen_failed_alert::listen_failed_alert(
aux::stack_allocator& alloc
, std::string const& iface
, udp::endpoint const& ep
, int op
, error_code const& ec
, socket_type_t t)
: listen_failed_alert(alloc
, iface
, ep.address()
, ep.port()
, op
, ec
, t)
{}
listen_failed_alert::listen_failed_alert(
aux::stack_allocator& alloc
, std::string const& iface
, int op
, error_code const& ec
, socket_type_t t)
: listen_failed_alert(alloc
, iface
, libtorrent::address()
, 0
, op
, ec
, t)
{}
char const* listen_failed_alert::listen_interface() const
{
return m_alloc.get().ptr(m_interface_idx);
}
std::string listen_failed_alert::message() const
{
static char const* op_str[] =
{
"parse_addr",
"open",
"bind",
"listen",
"get_socket_name",
"accept",
"enum_if",
"bind_to_device"
};
char ret[300];
std::snprintf(ret, sizeof(ret), "listening on %s (device: %s) failed: [%s] [%s] %s"
, print_endpoint(address, port).c_str()
, listen_interface()
, op_str[operation]
, sock_type_str[sock_type]
, convert_from_native(error.message()).c_str());
return ret;
}
metadata_failed_alert::metadata_failed_alert(aux::stack_allocator& alloc
, const torrent_handle& h, error_code const& e)
: torrent_alert(alloc, h)
, error(e)
{}
std::string metadata_failed_alert::message() const
{
return torrent_alert::message() + " invalid metadata received";
}
metadata_received_alert::metadata_received_alert(aux::stack_allocator& alloc
, const torrent_handle& h)
: torrent_alert(alloc, h)
{}
std::string metadata_received_alert::message() const
{
return torrent_alert::message() + " metadata successfully received";
}
udp_error_alert::udp_error_alert(
aux::stack_allocator&
, udp::endpoint const& ep
, error_code const& ec)
: endpoint(ep)
, error(ec)
{}
std::string udp_error_alert::message() const
{
error_code ec;
return "UDP error: " + convert_from_native(error.message()) + " from: " + endpoint.address().to_string(ec);
}
external_ip_alert::external_ip_alert(aux::stack_allocator&
, address const& ip)
: external_address(ip)
{}
std::string external_ip_alert::message() const
{
error_code ec;
return "external IP received: " + external_address.to_string(ec);
}
listen_succeeded_alert::listen_succeeded_alert(aux::stack_allocator&
, libtorrent::address const& listen_addr
, int listen_port
, socket_type_t t)
: address(listen_addr)
, port(listen_port)
#ifndef TORRENT_NO_DEPRECATE
, endpoint(listen_addr, listen_port)
#endif
, sock_type(t)
{}
listen_succeeded_alert::listen_succeeded_alert(aux::stack_allocator& alloc
, tcp::endpoint const& ep
, socket_type_t t)
: listen_succeeded_alert(alloc
, ep.address()
, ep.port()
, t)
{}
listen_succeeded_alert::listen_succeeded_alert(aux::stack_allocator& alloc
, udp::endpoint const& ep
, socket_type_t t)
: listen_succeeded_alert(alloc
, ep.address()
, ep.port()
, t)
{}
std::string listen_succeeded_alert::message() const
{
char const* type_str[] = { "TCP", "SSL/TCP", "UDP", "i2p", "socks5", "SSL/uTP" };
char ret[200];
std::snprintf(ret, sizeof(ret), "successfully listening on [%s] %s"
, type_str[sock_type], print_endpoint(address, port).c_str());
return ret;
}
portmap_error_alert::portmap_error_alert(aux::stack_allocator&
, int i, int t, error_code const& e)
: mapping(i), map_type(t), error(e)
{
#ifndef TORRENT_NO_DEPRECATE
msg = convert_from_native(error.message());
#endif
}
std::string portmap_error_alert::message() const
{
return std::string("could not map port using ") + nat_type_str[map_type]
+ ": " + convert_from_native(error.message());
}
portmap_alert::portmap_alert(aux::stack_allocator&, int i, int port, int t
, int proto)
: mapping(i), external_port(port), map_type(t), protocol(proto)
{}
std::string portmap_alert::message() const
{
char ret[200];
std::snprintf(ret, sizeof(ret), "successfully mapped port using %s. external port: %s/%u"
, nat_type_str[map_type], protocol_str[protocol], external_port);
return ret;
}
#ifndef TORRENT_DISABLE_LOGGING
portmap_log_alert::portmap_log_alert(aux::stack_allocator& alloc, int t, const char* m)
: map_type(t)
#ifndef TORRENT_NO_DEPRECATE
, msg(m)
#endif
, m_alloc(alloc)
, m_log_idx(alloc.copy_string(m))
{}
char const* portmap_log_alert::log_message() const
{
#ifndef TORRENT_NO_DEPRECATE
return msg.c_str();
#else
return m_alloc.get().ptr(m_log_idx);
#endif
}
std::string portmap_log_alert::message() const
{
char ret[600];
std::snprintf(ret, sizeof(ret), "%s: %s", nat_type_str[map_type]
, log_message());
return ret;
}
#endif
fastresume_rejected_alert::fastresume_rejected_alert(
aux::stack_allocator& alloc
, torrent_handle const& h
, error_code const& ec
, std::string const& f
, char const* op)
: torrent_alert(alloc, h)
, error(ec)
#ifndef TORRENT_NO_DEPRECATE
, file(f)
#endif
, operation(op)
, m_path_idx(alloc.copy_string(f))
{
#ifndef TORRENT_NO_DEPRECATE
msg = convert_from_native(error.message());
#endif
}
std::string fastresume_rejected_alert::message() const
{
return torrent_alert::message() + " fast resume rejected. "
+ (operation?operation:"") + "(" + file_path() + "): "
+ convert_from_native(error.message());
}
char const* fastresume_rejected_alert::file_path() const
{
#ifndef TORRENT_NO_DEPRECATE
return file.c_str();
#else
return m_alloc.get().ptr(m_path_idx);
#endif
}
peer_blocked_alert::peer_blocked_alert(aux::stack_allocator& alloc
, torrent_handle const& h, tcp::endpoint const& ep, int r)
: peer_alert(alloc, h, ep, peer_id(nullptr))
, reason(r)
{}
std::string peer_blocked_alert::message() const
{
error_code ec;
char ret[600];
static char const* reason_str[] =
{
"ip_filter",
"port_filter",
"i2p_mixed",
"privileged_ports",
"utp_disabled",
"tcp_disabled",
"invalid_local_interface"
};
std::snprintf(ret, sizeof(ret), "%s: blocked peer [%s]"
, peer_alert::message().c_str(), reason_str[reason]);
return ret;
}
dht_announce_alert::dht_announce_alert(aux::stack_allocator&
, address const& i, int p
, sha1_hash const& ih)
: ip(i)
, port(p)
, info_hash(ih)
{}
std::string dht_announce_alert::message() const
{
error_code ec;
char ih_hex[41];
aux::to_hex(info_hash, ih_hex);
char msg[200];
std::snprintf(msg, sizeof(msg), "incoming dht announce: %s:%u (%s)"
, ip.to_string(ec).c_str(), port, ih_hex);
return msg;
}
dht_get_peers_alert::dht_get_peers_alert(aux::stack_allocator&
, sha1_hash const& ih)
: info_hash(ih)
{}
std::string dht_get_peers_alert::message() const
{
char ih_hex[41];
aux::to_hex(info_hash, ih_hex);
char msg[200];
std::snprintf(msg, sizeof(msg), "incoming dht get_peers: %s", ih_hex);
return msg;
}
stats_alert::stats_alert(aux::stack_allocator& alloc
, torrent_handle const& h, int in, stat const& s)
: torrent_alert(alloc, h)
, interval(in)
{
transferred[upload_payload] = s[stat::upload_payload].counter();
transferred[upload_protocol] = s[stat::upload_protocol].counter();
transferred[download_payload] = s[stat::download_payload].counter();
transferred[download_protocol] = s[stat::download_protocol].counter();
transferred[upload_ip_protocol] = s[stat::upload_ip_protocol].counter();
transferred[download_ip_protocol] = s[stat::download_ip_protocol].counter();
#ifndef TORRENT_NO_DEPRECATE
transferred[upload_dht_protocol] = 0;
transferred[upload_tracker_protocol] = 0;
transferred[download_dht_protocol] = 0;
transferred[download_tracker_protocol] = 0;
#else
transferred[deprecated1] = 0;
transferred[deprecated2] = 0;
transferred[deprecated3] = 0;
transferred[deprecated4] = 0;
#endif
}
std::string stats_alert::message() const
{
char msg[200];
std::snprintf(msg, sizeof(msg), "%s: [%d] %d %d %d %d %d %d"
#ifndef TORRENT_NO_DEPRECATE
" %d %d %d %d"
#endif
, torrent_alert::message().c_str()
, interval
, transferred[0]
, transferred[1]
, transferred[2]
, transferred[3]
, transferred[4]
, transferred[5]
#ifndef TORRENT_NO_DEPRECATE
, transferred[6]
, transferred[7]
, transferred[8]
, transferred[9]
#endif
);
return msg;
}
cache_flushed_alert::cache_flushed_alert(aux::stack_allocator& alloc
, torrent_handle const& h)
: torrent_alert(alloc, h) {}
anonymous_mode_alert::anonymous_mode_alert(aux::stack_allocator& alloc
, torrent_handle const& h, int k, std::string const& s)
: torrent_alert(alloc, h)
, kind(k)
, str(s)
{}
std::string anonymous_mode_alert::message() const
{
char msg[200];
char const* msgs[] = {
"tracker is not anonymous, set a proxy"
};
std::snprintf(msg, sizeof(msg), "%s: %s: %s"
, torrent_alert::message().c_str()
, msgs[kind], str.c_str());
return msg;
}
lsd_peer_alert::lsd_peer_alert(aux::stack_allocator& alloc, torrent_handle const& h
, tcp::endpoint const& i)
: peer_alert(alloc, h, i, peer_id(nullptr))
{}
std::string lsd_peer_alert::message() const
{
char msg[200];
std::snprintf(msg, sizeof(msg), "%s: received peer from local service discovery"
, peer_alert::message().c_str());
return msg;
}
trackerid_alert::trackerid_alert(
aux::stack_allocator& alloc
, torrent_handle const& h
, std::string const& u
, const std::string& id)
: tracker_alert(alloc, h, u)
#ifndef TORRENT_NO_DEPRECATE
, trackerid(id)
#endif
, m_tracker_idx(alloc.copy_string(id))
{}
char const* trackerid_alert::tracker_id() const
{
#ifndef TORRENT_NO_DEPRECATE
return trackerid.c_str();
#else
return m_alloc.get().ptr(m_tracker_idx);
#endif
}
std::string trackerid_alert::message() const
{
return std::string("trackerid received: ") + tracker_id();
}
dht_bootstrap_alert::dht_bootstrap_alert(aux::stack_allocator&)
{}
std::string dht_bootstrap_alert::message() const
{
return "DHT bootstrap complete";
}
torrent_error_alert::torrent_error_alert(
aux::stack_allocator& alloc
, torrent_handle const& h
, error_code const& e, std::string const& f)
: torrent_alert(alloc, h)
, error(e)
#ifndef TORRENT_NO_DEPRECATE
, error_file(f)
#endif
, m_file_idx(alloc.copy_string(f))
{}
std::string torrent_error_alert::message() const
{
char msg[200];
std::snprintf(msg, sizeof(msg), " ERROR: %s", convert_from_native(error.message()).c_str());
return torrent_alert::message() + msg;
}
char const* torrent_error_alert::filename() const
{
return m_alloc.get().ptr(m_file_idx);
}
torrent_added_alert::torrent_added_alert(aux::stack_allocator& alloc
, torrent_handle const& h)
: torrent_alert(alloc, h)
{}
std::string torrent_added_alert::message() const
{
return torrent_alert::message() + " added";
}
torrent_removed_alert::torrent_removed_alert(aux::stack_allocator& alloc
, torrent_handle const& h, sha1_hash const& ih)
: torrent_alert(alloc, h)
, info_hash(ih)
{}
std::string torrent_removed_alert::message() const
{
return torrent_alert::message() + " removed";
}
torrent_need_cert_alert::torrent_need_cert_alert(aux::stack_allocator& alloc
, torrent_handle const& h)
: torrent_alert(alloc, h)
{}
std::string torrent_need_cert_alert::message() const
{
return torrent_alert::message() + " needs SSL certificate";
}
incoming_connection_alert::incoming_connection_alert(aux::stack_allocator&, int t
, tcp::endpoint const& i)
: socket_type(t)
, ip(i)
{}
std::string incoming_connection_alert::message() const
{
char msg[600];
error_code ec;
std::snprintf(msg, sizeof(msg), "incoming connection from %s (%s)"
, print_endpoint(ip).c_str(), socket_type_str[socket_type]);
return msg;
}
peer_connect_alert::peer_connect_alert(aux::stack_allocator& alloc, torrent_handle h
, tcp::endpoint const& ep, peer_id const& peer_id, int type)
: peer_alert(alloc, h, ep, peer_id)
, socket_type(type)
{}
std::string peer_connect_alert::message() const
{
char msg[600];
error_code ec;
std::snprintf(msg, sizeof(msg), "%s connecting to peer (%s)"
, peer_alert::message().c_str(), socket_type_str[socket_type]);
return msg;
}
add_torrent_alert::add_torrent_alert(aux::stack_allocator& alloc, torrent_handle h
, add_torrent_params const& p, error_code ec)
: torrent_alert(alloc, h)
, params(p)
, error(ec)
{}
std::string add_torrent_alert::message() const
{
char msg[600];
char info_hash[41];
char const* torrent_name = info_hash;
if (params.ti) torrent_name = params.ti->name().c_str();
else if (!params.name.empty()) torrent_name = params.name.c_str();
else if (!params.url.empty()) torrent_name = params.url.c_str();
else aux::to_hex(params.info_hash, info_hash);
if (error)
{
std::snprintf(msg, sizeof(msg), "failed to add torrent \"%s\": [%s] %s"
, torrent_name, error.category().name()
, convert_from_native(error.message()).c_str());
}
else
{
std::snprintf(msg, sizeof(msg), "added torrent: %s", torrent_name);
}
return msg;
}
state_update_alert::state_update_alert(aux::stack_allocator&
, std::vector<torrent_status> st)
: status(std::move(st))
{}
std::string state_update_alert::message() const
{
char msg[600];
std::snprintf(msg, sizeof(msg), "state updates for %d torrents", int(status.size()));
return msg;
}
mmap_cache_alert::mmap_cache_alert(aux::stack_allocator&
, error_code const& ec): error(ec)
{}
std::string mmap_cache_alert::message() const
{
char msg[600];
std::snprintf(msg, sizeof(msg), "mmap cache failed: (%d) %s", error.value()
, convert_from_native(error.message()).c_str());
return msg;
}
peer_error_alert::peer_error_alert(aux::stack_allocator& alloc, torrent_handle const& h
, tcp::endpoint const& ep, peer_id const& peer_id, int op
, error_code const& e)
: peer_alert(alloc, h, ep, peer_id)
, operation(op)
, error(e)
{
#ifndef TORRENT_NO_DEPRECATE
msg = convert_from_native(error.message());
#endif
}
std::string peer_error_alert::message() const
{
char buf[200];
std::snprintf(buf, sizeof(buf), "%s peer error [%s] [%s]: %s"
, peer_alert::message().c_str()
, operation_name(operation), error.category().name()
, convert_from_native(error.message()).c_str());
return buf;
}
char const* operation_name(int op)
{
static char const* names[] = {
"bittorrent",
"iocontrol",
"getpeername",
"getname",
"alloc_recvbuf",
"alloc_sndbuf",
"file_write",
"file_read",
"file",
"sock_write",
"sock_read",
"sock_open",
"sock_bind",
"available",
"encryption",
"connect",
"ssl_handshake",
"get_interface",
};
if (op < 0 || op >= int(sizeof(names)/sizeof(names[0])))
return "unknown operation";
return names[op];
}
#ifndef TORRENT_NO_DEPRECATE
torrent_update_alert::torrent_update_alert(aux::stack_allocator& alloc, torrent_handle h
, sha1_hash const& old_hash, sha1_hash const& new_hash)
: torrent_alert(alloc, h)
, old_ih(old_hash)
, new_ih(new_hash)
{}
std::string torrent_update_alert::message() const
{
char msg[200];
std::snprintf(msg, sizeof(msg), " torrent changed info-hash from: %s to %s"
, aux::to_hex(old_ih).c_str()
, aux::to_hex(new_ih).c_str());
return torrent_alert::message() + msg;
}
#endif
peer_disconnected_alert::peer_disconnected_alert(aux::stack_allocator& alloc
, torrent_handle const& h, tcp::endpoint const& ep
, peer_id const& peer_id, operation_t op, int type, error_code const& e
, close_reason_t r)
: peer_alert(alloc, h, ep, peer_id)
, socket_type(type)
, operation(op)
, error(e)
, reason(r)
{
#ifndef TORRENT_NO_DEPRECATE
msg = convert_from_native(error.message());
#endif
}
std::string peer_disconnected_alert::message() const
{
char buf[600];
std::snprintf(buf, sizeof(buf), "%s disconnecting (%s) [%s] [%s]: %s (reason: %d)"
, peer_alert::message().c_str()
, socket_type_str[socket_type]
, operation_name(operation), error.category().name()
, convert_from_native(error.message()).c_str()
, int(reason));
return buf;
}
dht_error_alert::dht_error_alert(aux::stack_allocator&, int op
, error_code const& ec)
: error(ec), operation(op_t(op))
{}
std::string dht_error_alert::message() const
{
static const char* const operation_names[] =
{
"unknown",
"hostname lookup"
};
int op = operation;
if (op < 0 || op >= int(sizeof(operation_names)/sizeof(operation_names[0])))
op = 0;
char msg[600];
std::snprintf(msg, sizeof(msg), "DHT error [%s] (%d) %s"
, operation_names[op]
, error.value()
, convert_from_native(error.message()).c_str());
return msg;
}
dht_immutable_item_alert::dht_immutable_item_alert(aux::stack_allocator&
, sha1_hash const& t, entry const& i)
: target(t), item(i)
{}
std::string dht_immutable_item_alert::message() const
{
char msg[1050];
std::snprintf(msg, sizeof(msg), "DHT immutable item %s [ %s ]"
, aux::to_hex(target).c_str()
, item.to_string().c_str());
return msg;
}
// TODO: 2 the salt here is allocated on the heap. It would be nice to
// allocate in in the stack_allocator
dht_mutable_item_alert::dht_mutable_item_alert(aux::stack_allocator&
, std::array<char, 32> k
, std::array<char, 64> sig
, std::uint64_t sequence
, std::string const& s
, entry const& i
, bool a)
: key(k), signature(sig), seq(sequence), salt(s), item(i), authoritative(a)
{}
std::string dht_mutable_item_alert::message() const
{
char msg[1050];
std::snprintf(msg, sizeof(msg), "DHT mutable item (key=%s salt=%s seq=%" PRId64 " %s) [ %s ]"
, aux::to_hex(key).c_str()
, salt.c_str()
, seq
, authoritative ? "auth" : "non-auth"
, item.to_string().c_str());
return msg;
}
dht_put_alert::dht_put_alert(aux::stack_allocator&, sha1_hash const& t, int n)
: target(t)
, seq(0)
, num_success(n)
{}
dht_put_alert::dht_put_alert(aux::stack_allocator&
, std::array<char, 32> key
, std::array<char, 64> sig
, std::string s
, std::uint64_t sequence_number
, int n)
: target(nullptr)
, public_key(key)
, signature(sig)
, salt(std::move(s))
, seq(sequence_number)
, num_success(n)
{}
std::string dht_put_alert::message() const
{
char msg[1050];
if (target.is_all_zeros())
{
std::snprintf(msg, sizeof(msg), "DHT put complete (success=%d key=%s sig=%s salt=%s seq=%" PRId64 ")"
, num_success
, aux::to_hex(public_key).c_str()
, aux::to_hex(signature).c_str()
, salt.c_str()
, seq);
return msg;
}
std::snprintf(msg, sizeof(msg), "DHT put commplete (success=%d hash=%s)"
, num_success
, aux::to_hex(target).c_str());
return msg;
}
i2p_alert::i2p_alert(aux::stack_allocator&, error_code const& ec)
: error(ec)
{}
std::string i2p_alert::message() const
{
char msg[600];
std::snprintf(msg, sizeof(msg), "i2p_error: [%s] %s"
, error.category().name(), convert_from_native(error.message()).c_str());
return msg;
}
dht_outgoing_get_peers_alert::dht_outgoing_get_peers_alert(aux::stack_allocator&
, sha1_hash const& ih, sha1_hash const& obfih
, udp::endpoint ep)
: info_hash(ih)
, obfuscated_info_hash(obfih)
, ip(std::move(ep))
{}
std::string dht_outgoing_get_peers_alert::message() const
{
char msg[600];
char obf[70];
obf[0] = '\0';
if (obfuscated_info_hash != info_hash)
{
std::snprintf(obf, sizeof(obf), " [obfuscated: %s]"
, aux::to_hex(obfuscated_info_hash).c_str());
}
std::snprintf(msg, sizeof(msg), "outgoing dht get_peers : %s%s -> %s"
, aux::to_hex(info_hash).c_str()
, obf
, print_endpoint(ip).c_str());
return msg;
}
#ifndef TORRENT_DISABLE_LOGGING
log_alert::log_alert(aux::stack_allocator& alloc, char const* log)
: m_alloc(alloc)
, m_str_idx(alloc.copy_string(log))
{}
log_alert::log_alert(aux::stack_allocator& alloc, char const* fmt, va_list v)
: m_alloc(alloc)
, m_str_idx(alloc.format_string(fmt, v))
{}
char const* log_alert::msg() const
{
return m_alloc.get().ptr(m_str_idx);
}
std::string log_alert::message() const
{
return msg();
}
torrent_log_alert::torrent_log_alert(aux::stack_allocator& alloc, torrent_handle const& h
, char const* fmt, va_list v)
: torrent_alert(alloc, h)
, m_str_idx(alloc.format_string(fmt, v))
{}
char const* torrent_log_alert::msg() const
{
return m_alloc.get().ptr(m_str_idx);
}
std::string torrent_log_alert::message() const
{
return torrent_alert::message() + ": " + msg();
}
peer_log_alert::peer_log_alert(aux::stack_allocator& alloc
, torrent_handle const& h
, tcp::endpoint const& i, peer_id const& pi
, peer_log_alert::direction_t dir
, char const* event, char const* fmt, va_list v)
: peer_alert(alloc, h, i, pi)
, event_type(event)
, direction(dir)
, m_str_idx(alloc.format_string(fmt, v))
{}
char const* peer_log_alert::msg() const
{
return m_alloc.get().ptr(m_str_idx);
}
std::string peer_log_alert::message() const
{
static char const* mode[] =
{ "<==", "==>", "<<<", ">>>", "***" };
return torrent_alert::message() + " [" + print_endpoint(ip) + "] "
+ mode[direction] + " " + event_type + " [ " + msg() + " ]";
}
#endif
lsd_error_alert::lsd_error_alert(aux::stack_allocator&, error_code const& ec)
: alert()
, error(ec)
{}
std::string lsd_error_alert::message() const
{
return "Local Service Discovery error: " + error.message();
}
session_stats_alert::session_stats_alert(aux::stack_allocator&, counters const& cnt)
{
for (int i = 0; i < counters::num_counters; ++i)
values[i] = cnt[i];
}
std::string session_stats_alert::message() const
{
// this specific output is parsed by tools/parse_session_stats.py
// if this is changed, that parser should also be changed
char msg[100];
std::snprintf(msg, sizeof(msg), "session stats (%d values): "
, int(sizeof(values)/sizeof(values[0])));
std::string ret = msg;
bool first = true;
for (int i = 0; i < sizeof(values)/sizeof(values[0]); ++i)
{
std::snprintf(msg, sizeof(msg), first ? "%" PRIu64 : ", %" PRIu64, values[i]);
first = false;
ret += msg;
}
return ret;
}
dht_stats_alert::dht_stats_alert(aux::stack_allocator&
, std::vector<dht_routing_bucket> table
, std::vector<dht_lookup> requests)
: alert()
, active_requests(std::move(requests))
, routing_table(std::move(table))
{}
std::string dht_stats_alert::message() const
{
char buf[2048];
std::snprintf(buf, sizeof(buf), "DHT stats: reqs: %d buckets: %d"
, int(active_requests.size())
, int(routing_table.size()));
return buf;
}
url_seed_alert::url_seed_alert(aux::stack_allocator& alloc, torrent_handle const& h
, std::string const& u, error_code const& e)
: torrent_alert(alloc, h)
#ifndef TORRENT_NO_DEPRECATE
, url(u)
, msg(convert_from_native(e.message()))
#endif
, error(e)
, m_url_idx(alloc.copy_string(u))
, m_msg_idx(-1)
{}
url_seed_alert::url_seed_alert(aux::stack_allocator& alloc, torrent_handle const& h
, std::string const& u, std::string const& m)
: torrent_alert(alloc, h)
#ifndef TORRENT_NO_DEPRECATE
, url(u)
, msg(m)
#endif
, m_url_idx(alloc.copy_string(u))
, m_msg_idx(alloc.copy_string(m))
{}
std::string url_seed_alert::message() const
{
return torrent_alert::message() + " url seed ("
+ server_url() + ") failed: " + convert_from_native(error.message());
}
char const* url_seed_alert::server_url() const
{
return m_alloc.get().ptr(m_url_idx);
}
char const* url_seed_alert::error_message() const
{
#ifndef TORRENT_NO_DEPRECATE
return msg.c_str();
#else
if (m_msg_idx == -1) return "";
return m_alloc.get().ptr(m_msg_idx);
#endif
}
file_error_alert::file_error_alert(aux::stack_allocator& alloc
, error_code const& ec
, std::string const& f
, char const* op
, torrent_handle const& h)
: torrent_alert(alloc, h)
#ifndef TORRENT_NO_DEPRECATE
, file(f)
#endif
, error(ec)
, operation(op)
, m_file_idx(alloc.copy_string(f))
{
#ifndef TORRENT_NO_DEPRECATE
msg = convert_from_native(error.message());
#endif
}
char const* file_error_alert::filename() const
{
return m_alloc.get().ptr(m_file_idx);
}
std::string file_error_alert::message() const
{
return torrent_alert::message() + " "
+ (operation?operation:"") + " (" + filename()
+ ") error: " + convert_from_native(error.message());
}
incoming_request_alert::incoming_request_alert(aux::stack_allocator& alloc
, peer_request r, torrent_handle h
, tcp::endpoint const& ep, peer_id const& peer_id)
: peer_alert(alloc, h, ep, peer_id)
, req(r)
{}
std::string incoming_request_alert::message() const
{
char msg[1024];
std::snprintf(msg, sizeof(msg), "%s: incoming request [ piece: %d start: %d length: %d ]"
, peer_alert::message().c_str(), req.piece, req.start, req.length);
return msg;
}
dht_log_alert::dht_log_alert(aux::stack_allocator& alloc
, dht_log_alert::dht_module_t m, const char* fmt, va_list v)
: module(m)
, m_alloc(alloc)
, m_msg_idx(alloc.format_string(fmt, v))
{}
char const* dht_log_alert::log_message() const
{
return m_alloc.get().ptr(m_msg_idx);
}
std::string dht_log_alert::message() const
{
static char const* const dht_modules[] =
{
"tracker",
"node",
"routing_table",
"rpc_manager",
"traversal"
};
char ret[900];
std::snprintf(ret, sizeof(ret), "DHT %s: %s", dht_modules[module]
, log_message());
return ret;
}
// TODO: 3 use span<> here
dht_pkt_alert::dht_pkt_alert(aux::stack_allocator& alloc
, char const* buf, int size, dht_pkt_alert::direction_t d, udp::endpoint ep)
: dir(d)
, node(std::move(ep))
, m_alloc(alloc)
, m_msg_idx(alloc.copy_buffer(buf, size))
, m_size(size)
{}
char const* dht_pkt_alert::pkt_buf() const
{
return m_alloc.get().ptr(m_msg_idx);
}
int dht_pkt_alert::pkt_size() const
{
return m_size;
}
std::string dht_pkt_alert::message() const
{
bdecode_node print;
error_code ec;
// ignore errors here. This is best-effort. It may be a broken encoding
// but at least we'll print the valid parts
bdecode(pkt_buf(), pkt_buf() + pkt_size(), print, ec, nullptr, 100, 100);
std::string msg = print_entry(print, true);
char const* prefix[2] = { "<==", "==>"};
char buf[1024];
std::snprintf(buf, sizeof(buf), "%s [%s] %s", prefix[dir]
, print_endpoint(node).c_str(), msg.c_str());
return buf;
}
dht_get_peers_reply_alert::dht_get_peers_reply_alert(aux::stack_allocator& alloc
, sha1_hash const& ih
, std::vector<tcp::endpoint> const& peers)
: info_hash(ih)
, m_alloc(alloc)
, m_num_peers(int(peers.size()))
{
std::size_t total_size = m_num_peers; // num bytes for sizes
for (int i = 0; i < m_num_peers; i++) {
total_size += peers[i].size();
}
m_peers_idx = alloc.allocate(int(total_size));
char *ptr = alloc.ptr(m_peers_idx);
for (int i = 0; i < m_num_peers; i++) {
tcp::endpoint endp = peers[i];
std::size_t size = endp.size();
TORRENT_ASSERT(size < 0x100);
detail::write_uint8(uint8_t(size), ptr);
memcpy(ptr, endp.data(), size);
ptr += size;
}
}
std::string dht_get_peers_reply_alert::message() const
{
char ih_hex[41];
aux::to_hex(info_hash, ih_hex);
char msg[200];
std::snprintf(msg, sizeof(msg), "incoming dht get_peers reply: %s, peers %d", ih_hex, m_num_peers);
return msg;
}
int dht_get_peers_reply_alert::num_peers() const
{
return m_num_peers;
}
#ifndef TORRENT_NO_DEPRECATE
void dht_get_peers_reply_alert::peers(std::vector<tcp::endpoint> &v) const {
std::vector<tcp::endpoint> p(peers());
v.reserve(p.size());
std::copy(p.begin(), p.end(), std::back_inserter(v));
}
#endif
std::vector<tcp::endpoint> dht_get_peers_reply_alert::peers() const {
std::vector<tcp::endpoint> peers(m_num_peers);
const char *ptr = m_alloc.get().ptr(m_peers_idx);
for (int i = 0; i < m_num_peers; i++) {
std::size_t size = detail::read_uint8(ptr);
memcpy(peers[i].data(), ptr, size);
ptr += size;
}
return peers;
}
dht_direct_response_alert::dht_direct_response_alert(
aux::stack_allocator& alloc, void* userdata_
, udp::endpoint const& addr_, bdecode_node const& response)
: userdata(userdata_), addr(addr_), m_alloc(alloc)
, m_response_idx(alloc.copy_buffer(response.data_section().data()
, int(response.data_section().size())))
, m_response_size(int(response.data_section().size()))
{}
dht_direct_response_alert::dht_direct_response_alert(
aux::stack_allocator& alloc
, void* userdata_
, udp::endpoint const& addr_)
: userdata(userdata_), addr(addr_), m_alloc(alloc)
, m_response_idx(-1), m_response_size(0)
{}
std::string dht_direct_response_alert::message() const
{
char msg[1050];
std::snprintf(msg, sizeof(msg), "DHT direct response (address=%s) [ %s ]"
, addr.address().to_string().c_str()
, m_response_size ? std::string(m_alloc.get().ptr(m_response_idx), m_response_size).c_str() : "");
return msg;
}
bdecode_node dht_direct_response_alert::response() const
{
if (m_response_size == 0) return bdecode_node();
char const* start = m_alloc.get().ptr(m_response_idx);
char const* end = start + m_response_size;
error_code ec;
bdecode_node ret;
bdecode(start, end, ret, ec);
TORRENT_ASSERT(!ec);
return ret;
}
#ifndef TORRENT_DISABLE_LOGGING
picker_log_alert::picker_log_alert(aux::stack_allocator& alloc, torrent_handle const& h
, tcp::endpoint const& ep, peer_id const& peer_id, std::uint32_t flags
, piece_block const* blocks, int num_blocks)
: peer_alert(alloc, h, ep, peer_id)
, picker_flags(flags)
, m_array_idx(alloc.copy_buffer(reinterpret_cast<char const*>(blocks)
, num_blocks * sizeof(piece_block)))
, m_num_blocks(num_blocks)
{}
std::vector<piece_block> picker_log_alert::blocks() const
{
// we need to copy this array to make sure the structures are properly
// aigned, not just to have a nice API
std::vector<piece_block> ret;
ret.resize(m_num_blocks);
char const* start = m_alloc.get().ptr(m_array_idx);
memcpy(&ret[0], start, m_num_blocks * sizeof(piece_block));
return ret;
}
std::string picker_log_alert::message() const
{
static char const* const flag_names[] =
{
"partial_ratio ",
"prioritize_partials ",
"rarest_first_partials ",
"rarest_first ",
"reverse_rarest_first ",
"suggested_pieces ",
"prio_sequential_pieces ",
"sequential_pieces ",
"reverse_pieces ",
"time_critical ",
"random_pieces ",
"prefer_contiguous ",
"reverse_sequential ",
"backup1 ",
"backup2 ",
"end_game "
};
std::string ret = peer_alert::message();
std::uint32_t flags = picker_flags;
int idx = 0;
ret += " picker_log [ ";
for (; flags != 0; flags >>= 1, ++idx)
{
if ((flags & 1) == 0) continue;
ret += flag_names[idx];
}
ret += "] ";
std::vector<piece_block> b = blocks();
for (int i = 0; i < int(b.size()); ++i)
{
char buf[50];
std::snprintf(buf, sizeof(buf), "(%d,%d) "
, b[i].piece_index, b[i].block_index);
ret += buf;
}
return ret;
}
#endif // TORRENT_DISABLE_LOGGING
} // namespace libtorrent
|
//===- SparseTensorPipelines.cpp - Pipelines for sparse tensor code -------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/SparseTensor/Pipelines/Passes.h"
#include "mlir/Conversion/Passes.h"
#include "mlir/Dialect/Arithmetic/Transforms/Passes.h"
#include "mlir/Dialect/Bufferization/Transforms/Passes.h"
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
#include "mlir/Dialect/StandardOps/Transforms/Passes.h"
#include "mlir/Dialect/Tensor/Transforms/Passes.h"
#include "mlir/Dialect/Vector/Transforms/Passes.h"
#include "mlir/Pass/PassManager.h"
using namespace mlir;
using namespace mlir::sparse_tensor;
//===----------------------------------------------------------------------===//
// Pipeline implementation.
//===----------------------------------------------------------------------===//
void mlir::sparse_tensor::buildSparseCompiler(
OpPassManager &pm, const SparseCompilerOptions &options) {
// TODO(wrengr): ensure the original `pm` is for ModuleOp
pm.addNestedPass<FuncOp>(createLinalgGeneralizationPass());
pm.addPass(createLinalgElementwiseOpFusionPass());
pm.addPass(createSparsificationPass(options.sparsificationOptions()));
pm.addPass(createSparseTensorConversionPass());
pm.addNestedPass<FuncOp>(createLinalgBufferizePass());
pm.addNestedPass<FuncOp>(vector::createVectorBufferizePass());
pm.addNestedPass<FuncOp>(createConvertLinalgToLoopsPass());
pm.addNestedPass<FuncOp>(createConvertVectorToSCFPass());
pm.addNestedPass<FuncOp>(createConvertSCFToCFPass());
pm.addPass(createFuncBufferizePass());
pm.addPass(arith::createConstantBufferizePass());
pm.addNestedPass<FuncOp>(createTensorBufferizePass());
pm.addNestedPass<FuncOp>(
mlir::bufferization::createFinalizingBufferizePass());
pm.addPass(createLowerAffinePass());
pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions()));
pm.addPass(createMemRefToLLVMPass());
pm.addNestedPass<FuncOp>(createConvertMathToLLVMPass());
pm.addPass(createLowerToLLVMPass()); // --convert-std-to-llvm
pm.addPass(createReconcileUnrealizedCastsPass());
}
//===----------------------------------------------------------------------===//
// Pipeline registration.
//===----------------------------------------------------------------------===//
void mlir::sparse_tensor::registerSparseTensorPipelines() {
PassPipelineRegistration<SparseCompilerOptions>(
"sparse-compiler",
"The standard pipeline for taking sparsity-agnostic IR using the"
" sparse-tensor type, and lowering it to LLVM IR with concrete"
" representations and algorithms for sparse tensors.",
buildSparseCompiler);
}
|
/*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-2013 Facebook, Inc. (http://www.facebook.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#include "hphp/runtime/vm/jit/frame-state.h"
#include "hphp/util/trace.h"
#include "hphp/runtime/vm/jit/ir-instruction.h"
#include "hphp/runtime/vm/jit/simplifier.h"
#include "hphp/runtime/vm/jit/ssa-tmp.h"
TRACE_SET_MOD(hhir);
namespace HPHP {
namespace JIT {
FrameState::FrameState(IRUnit& unit)
: FrameState(unit, unit.entry()->front().marker())
{
}
FrameState::FrameState(IRUnit& unit, BCMarker marker)
: FrameState(unit, marker.spOff, marker.func)
{
}
FrameState::FrameState(IRUnit& unit, Offset initialSpOffset, const Func* func)
: m_unit(unit)
, m_curFunc(func)
, m_spValue(nullptr)
, m_fpValue(nullptr)
, m_spOffset(initialSpOffset)
, m_thisAvailable(false)
, m_frameSpansCall(false)
, m_locals(func->numLocals())
, m_enableCse(false)
, m_snapshots()
{
}
FrameState::~FrameState() {
}
void FrameState::update(const IRInstruction* inst) {
if (auto* taken = inst->taken()) {
save(taken);
}
auto const opc = inst->op();
getLocalEffects(inst, *this);
switch (opc) {
case DefInlineFP: trackDefInlineFP(inst); break;
case InlineReturn: trackInlineReturn(inst); break;
case Call:
m_spValue = inst->dst();
m_frameSpansCall = true;
// A call pops the ActRec and pushes a return value.
m_spOffset -= kNumActRecCells;
m_spOffset += 1;
assert(m_spOffset >= 0);
clearCse();
break;
case CallArray:
m_spValue = inst->dst();
m_frameSpansCall = true;
// A CallArray pops the ActRec an array arg and pushes a return value.
m_spOffset -= kNumActRecCells;
assert(m_spOffset >= 0);
clearCse();
break;
case ContEnter:
clearCse();
break;
case DefFP:
case FreeActRec:
m_fpValue = inst->dst();
break;
case ReDefGeneratorSP:
m_spValue = inst->dst();
break;
case ReDefSP:
m_spValue = inst->dst();
m_spOffset = inst->extra<ReDefSP>()->spOffset;
break;
case DefInlineSP:
case DefSP:
m_spValue = inst->dst();
m_spOffset = inst->extra<StackOffset>()->offset;
break;
case AssertStk:
case CastStk:
case CoerceStk:
case CheckStk:
case GuardStk:
case ExceptionBarrier:
m_spValue = inst->dst();
break;
case SpillStack: {
m_spValue = inst->dst();
// Push the spilled values but adjust for the popped values
int64_t stackAdjustment = inst->src(1)->getValInt();
m_spOffset -= stackAdjustment;
m_spOffset += spillValueCells(inst);
break;
}
case SpillFrame:
case CufIterSpillFrame:
m_spValue = inst->dst();
m_spOffset += kNumActRecCells;
break;
case InterpOne:
case InterpOneCF: {
m_spValue = inst->dst();
auto const& extra = *inst->extra<InterpOneData>();
int64_t stackAdjustment = extra.cellsPopped - extra.cellsPushed;
// push the return value if any and adjust for the popped values
m_spOffset -= stackAdjustment;
break;
}
case AssertLoc:
case GuardLoc:
case CheckLoc:
m_fpValue = inst->dst();
break;
case LdThis:
m_thisAvailable = true;
break;
default:
break;
}
if (inst->modifiesStack()) {
m_spValue = inst->modifiedStkPtr();
}
// update the CSE table
if (m_enableCse && inst->canCSE()) {
cseInsert(inst);
}
// if the instruction kills any of its sources, remove them from the
// CSE table
if (inst->killsSources()) {
for (int i = 0; i < inst->numSrcs(); ++i) {
if (inst->killsSource(i)) {
cseKill(inst->src(i));
}
}
}
}
void FrameState::getLocalEffects(const IRInstruction* inst,
LocalStateHook& hook) const {
auto killIterLocals = [&](const std::initializer_list<uint32_t>& ids) {
for (auto id : ids) {
hook.setLocalValue(inst->src(id)->getValInt(), nullptr);
}
};
auto killedCallLocals = false;
if ((inst->is(CallArray) && inst->extra<CallArrayData>()->destroyLocals) ||
(inst->is(Call, CallBuiltin) && inst->extra<CallData>()->destroyLocals)) {
clearLocals(hook);
killedCallLocals = true;
}
switch (inst->op()) {
case Call:
case CallArray:
case ContEnter:
killLocalsForCall(hook, killedCallLocals);
break;
case StRefNT:
case StRef: {
SSATmp* newRef = inst->dst();
SSATmp* prevRef = inst->src(0);
// update other tracked locals that also contain prevRef
updateLocalRefValues(hook, prevRef, newRef);
break;
}
case StLocNT:
case StLoc:
hook.setLocalValue(inst->extra<LocalId>()->locId, inst->src(1));
break;
case LdLoc:
hook.setLocalValue(inst->extra<LdLoc>()->locId, inst->dst());
break;
case AssertLoc:
case GuardLoc:
case CheckLoc:
hook.refineLocalType(inst->extra<LocalId>()->locId, inst->typeParam());
break;
case CheckType: {
SSATmp* newVal = inst->dst();
SSATmp* oldVal = inst->src(0);
refineLocalValues(hook, oldVal, newVal);
break;
}
case IterInitK:
case WIterInitK:
// kill the locals to which this instruction stores iter's key and value
killIterLocals({3, 4});
break;
case IterInit:
case WIterInit:
// kill the local to which this instruction stores iter's value
killIterLocals({3});
break;
case IterNextK:
case WIterNextK:
// kill the locals to which this instruction stores iter's key and value
killIterLocals({2, 3});
break;
case IterNext:
case WIterNext:
// kill the local to which this instruction stores iter's value
killIterLocals({2});
break;
case InterpOne:
case InterpOneCF: {
auto const& id = *inst->extra<InterpOneData>();
assert(!id.smashesAllLocals || id.nChangedLocals == 0);
if (id.smashesAllLocals) {
clearLocals(hook);
} else {
auto it = id.changedLocals;
auto const end = it + id.nChangedLocals;
for (; it != end; ++it) {
auto& loc = *it;
// If changing the inner type of a boxed local, also drop the
// information about inner types for any other boxed locals.
if (loc.type.isBoxed()) dropLocalRefsInnerTypes(hook);
hook.setLocalType(loc.id, loc.type);
}
}
break;
}
default:
break;
}
if (MInstrEffects::supported(inst)) MInstrEffects::get(inst, hook);
}
///// Support helpers for getLocalEffects /////
void FrameState::clearLocals(LocalStateHook& hook) const {
for (unsigned i = 0; i < m_locals.size(); ++i) {
hook.setLocalValue(i, nullptr);
}
}
void FrameState::refineLocalValues(LocalStateHook& hook,
SSATmp* oldVal, SSATmp* newVal) const {
assert(newVal->inst()->is(CheckType));
assert(newVal->inst()->src(0) == oldVal);
for (unsigned i = 0, n = m_locals.size(); i < n; ++i) {
if (m_locals[i].value == oldVal) {
hook.refineLocalValue(i, oldVal, newVal);
}
}
}
void FrameState::forEachFrame(FrameFunc body) const {
body(m_fpValue, m_spOffset);
// We push each new frame onto the end of m_inlineSavedStates, so walk it
// backwards to go from inner frames to outer frames.
for (auto it = m_inlineSavedStates.rbegin();
it != m_inlineSavedStates.rend(); ++it) {
auto const& state = *it;
body(state.fpValue, state.spOffset);
}
}
template<typename L>
void FrameState::walkAllInlinedLocals(L body, bool skipThisFrame) const {
auto doBody = [&](const LocalVec& locals, unsigned inlineIdx) {
for (uint32_t i = 0, n = locals.size(); i < n; ++i) {
body(i, inlineIdx, locals[i]);
}
};
if (!skipThisFrame) {
doBody(m_locals, 0);
}
for (int i = 0, n = m_inlineSavedStates.size(); i < n; ++i) {
doBody(m_inlineSavedStates[i].locals, i + 1);
}
}
void FrameState::forEachLocal(LocalFunc body) const {
walkAllInlinedLocals(
[&](uint32_t i, unsigned inlineIdx, const LocalState& local) {
auto* value = local.unsafe ? nullptr : local.value;
body(i, value);
});
}
/**
* Called to clear out the tracked local values at a call site. Calls kill all
* registers, so we don't want to keep locals in registers across calls. We do
* continue tracking the types in locals, however.
*/
void FrameState::killLocalsForCall(LocalStateHook& hook,
bool skipThisFrame) const {
walkAllInlinedLocals(
[&](uint32_t i, unsigned inlineIdx, const LocalState& local) {
auto* value = local.value;
if (local.unsafe || !value || value->inst()->is(DefConst)) return;
hook.killLocalForCall(i, inlineIdx, value);
},
skipThisFrame);
}
//
// This method updates the tracked values and types of all locals that contain
// oldRef so that they now contain newRef.
// This should only be called for ref/boxed types.
//
void FrameState::updateLocalRefValues(LocalStateHook& hook,
SSATmp* oldRef, SSATmp* newRef) const {
assert(oldRef->type().isBoxed());
assert(newRef->type().isBoxed());
walkAllInlinedLocals(
[&](uint32_t i, unsigned inlineIdx, const LocalState& local) {
if (local.value != oldRef) return;
hook.updateLocalRefValue(i, inlineIdx, oldRef, newRef);
});
}
/**
* This method changes any boxed local into a BoxedInitCell type. It's safe to
* assume they're init because you can never have a reference to uninit.
*/
void FrameState::dropLocalRefsInnerTypes(LocalStateHook& hook) const {
walkAllInlinedLocals(
[&](uint32_t i, unsigned inlineIdx, const LocalState& local) {
if (local.type.isBoxed()) {
hook.dropLocalInnerType(i, inlineIdx);
}
});
}
///// Methods for managing and merge block state /////
void FrameState::startBlock(Block* block) {
auto it = m_snapshots.find(block);
if (it != m_snapshots.end()) {
load(it->second);
FTRACE(4, "Loading state for B{}: {}\n", block->id(), show(*this));
m_inlineSavedStates = it->second.inlineSavedStates;
m_snapshots.erase(it);
}
}
void FrameState::finishBlock(Block* block) {
assert(block->back().isTerminal() == !block->next());
if (!block->back().isTerminal()) {
save(block->next());
}
}
FrameState::Snapshot FrameState::createSnapshot() const {
Snapshot state;
state.spValue = m_spValue;
state.fpValue = m_fpValue;
state.curFunc = m_curFunc;
state.spOffset = m_spOffset;
state.thisAvailable = m_thisAvailable;
state.locals = m_locals;
state.curMarker = m_marker;
state.frameSpansCall = m_frameSpansCall;
assert(state.curMarker.valid());
return state;
}
/*
* Save current state for block. If this is the first time saving state for
* block, create a new snapshot. Otherwise merge the current state into the
* existing snapshot.
*/
void FrameState::save(Block* block) {
FTRACE(4, "Saving state for B{}: {}\n", block->id(), show(*this));
auto it = m_snapshots.find(block);
if (it != m_snapshots.end()) {
merge(it->second);
FTRACE(4, "Merged state: {}\n", show(*this));
} else {
auto& snapshot = m_snapshots[block] = createSnapshot();
snapshot.inlineSavedStates = m_inlineSavedStates;
}
}
void FrameState::load(Snapshot& state) {
m_spValue = state.spValue;
m_fpValue = state.fpValue;
m_spOffset = state.spOffset;
m_curFunc = state.curFunc;
m_thisAvailable = state.thisAvailable;
m_locals = std::move(state.locals);
m_marker = state.curMarker;
m_frameSpansCall = m_frameSpansCall || state.frameSpansCall;
// If spValue is null, we merged two different but equivalent values. We
// could define a new sp but that would drop a lot of useful information on
// the floor. Let's cross this bridge when we reach it.
always_assert(m_spValue &&
"Attempted to merge two states with different stack pointers");
}
/*
* Merge current state into state. Frame pointers and stack depth must match.
* If the stack pointer tmps are different, clear the tracked value (we can
* make a new one, given fp and spOffset).
*
* thisIsAvailable remains true if it's true in both states.
* local variable values are preserved if the match in both states.
* types are combined using Type::unionOf.
*/
void FrameState::merge(Snapshot& state) {
// cannot merge fp or spOffset state, so assert they match
assert(state.fpValue == m_fpValue);
assert(state.spOffset == m_spOffset);
assert(state.curFunc == m_curFunc);
if (state.spValue != m_spValue) {
// we have two different sp definitions but we know they're equal
// because spOffset matched.
state.spValue = nullptr;
}
// this is available iff it's available in both states
state.thisAvailable &= m_thisAvailable;
assert(m_locals.size() == state.locals.size());
for (unsigned i = 0; i < m_locals.size(); ++i) {
auto& local = state.locals[i];
// preserve local values if they're the same in both states,
// This would be the place to insert phi nodes (jmps+deflabels) if we want
// to avoid clearing state, which triggers a downstream reload.
if (local.value != m_locals[i].value) local.value = nullptr;
local.type = Type::unionOf(local.type, m_locals[i].type);
local.unsafe = local.unsafe || m_locals[i].unsafe;
local.written = local.written || m_locals[i].written;
}
// We should not be merging states that have different hhbc bytecode
// boundaries.
assert(m_marker.valid() && state.curMarker == m_marker);
// For now, we shouldn't be merging states with different inline states.
assert(m_inlineSavedStates == state.inlineSavedStates);
}
void FrameState::trackDefInlineFP(const IRInstruction* inst) {
auto const target = inst->extra<DefInlineFP>()->target;
auto const savedSPOff = inst->extra<DefInlineFP>()->retSPOff;
auto const calleeFP = inst->dst();
auto const calleeSP = inst->src(0);
auto const savedSP = inst->src(1);
// Saved tracebuilder state will include the "return" fp/sp.
// Whatever the current fpValue is is good enough, but we have to be
// passed in the StkPtr that represents the stack prior to the
// ActRec being allocated.
m_spOffset = savedSPOff;
m_spValue = savedSP;
auto const stackValues = collectStackValues(m_spValue, m_spOffset);
for (DEBUG_ONLY auto& val : stackValues) {
FTRACE(4, " marking caller stack value available: {}\n",
val->toString());
}
m_inlineSavedStates.emplace_back(createSnapshot());
/*
* Set up the callee state.
*
* We set m_thisIsAvailable to true on any object method, because we
* just don't inline calls to object methods with a null $this.
*/
m_fpValue = calleeFP;
m_spValue = calleeSP;
m_thisAvailable = target->cls() != nullptr && !target->isStatic();
m_curFunc = target;
m_frameSpansCall = false;
m_locals.clear();
m_locals.resize(target->numLocals());
}
void FrameState::trackInlineReturn(const IRInstruction* inst) {
assert(m_inlineSavedStates.size());
assert(m_inlineSavedStates.back().inlineSavedStates.empty());
load(m_inlineSavedStates.back());
m_inlineSavedStates.pop_back();
}
CSEHash* FrameState::cseHashTable(const IRInstruction* inst) {
return inst->is(DefConst) ? &m_unit.constTable() : &m_cseHash;
}
void FrameState::cseInsert(const IRInstruction* inst) {
cseHashTable(inst)->insert(inst->dst());
}
void FrameState::cseKill(SSATmp* src) {
if (src->inst()->canCSE()) {
cseHashTable(src->inst())->erase(src);
}
}
void FrameState::clearCse() {
m_cseHash.clear();
}
SSATmp* FrameState::cseLookup(IRInstruction* inst,
const folly::Optional<IdomVector>& idoms) {
auto tmp = cseHashTable(inst)->lookup(inst);
if (tmp && idoms) {
// During a reoptimize pass, we need to make sure that any values
// we want to reuse for CSE are only reused in blocks dominated by
// the block that defines it.
if (!dominates(tmp->inst()->block(), inst->block(), *idoms)) {
return nullptr;
}
}
return tmp;
}
void FrameState::clear() {
clearCse();
clearLocals(*this);
m_frameSpansCall = false;
m_spValue = m_fpValue = nullptr;
m_spOffset = 0;
m_thisAvailable = false;
m_marker = BCMarker();
m_snapshots.clear();
assert(m_inlineSavedStates.empty());
}
SSATmp* FrameState::localValue(uint32_t id) const {
always_assert(id < m_locals.size());
return m_locals[id].unsafe ? nullptr : m_locals[id].value;
}
SSATmp* FrameState::localValueSource(uint32_t id) const {
always_assert(id < m_locals.size());
auto const& local = m_locals[id];
if (local.value) return local.value;
if (local.written) return nullptr;
return fp();
}
Type FrameState::localType(uint32_t id) const {
always_assert(id < m_locals.size());
assert(m_locals[id].type != Type::None);
return m_locals[id].type;
}
void FrameState::setLocalValue(uint32_t id, SSATmp* value) {
always_assert(id < m_locals.size());
m_locals[id].value = value;
m_locals[id].type = value ? value->type() : Type::Gen;
m_locals[id].written = true;
m_locals[id].unsafe = false;
}
void FrameState::refineLocalValue(uint32_t id, SSATmp* oldVal, SSATmp* newVal) {
always_assert(id < m_locals.size());
auto& local = m_locals[id];
local.value = newVal;
local.type = newVal->type();
}
void FrameState::refineLocalType(uint32_t id, Type type) {
always_assert(id < m_locals.size());
auto& local = m_locals[id];
if (type.isBoxed() && local.type.isBoxed()) {
// It's OK for the old and new inner types of boxed values not to
// intersect, since the inner type is really just a prediction.
local.type = type;
} else {
always_assert((local.type & type) != Type::Bottom);
local.type = local.type & type;
}
}
void FrameState::setLocalType(uint32_t id, Type type) {
always_assert(id < m_locals.size());
m_locals[id].value = nullptr;
m_locals[id].type = type;
m_locals[id].written = true;
m_locals[id].unsafe = false;
}
/*
* Get a reference to the LocalVec from an inline index. 0 means the current
* frame, otherwise it's index (inlineIdx - 1) in m_inlineSavedStates.
*/
FrameState::LocalVec& FrameState::locals(unsigned inlineIdx) {
if (inlineIdx == 0) {
return m_locals;
} else {
--inlineIdx;
assert(inlineIdx < m_inlineSavedStates.size());
return m_inlineSavedStates[inlineIdx].locals;
}
}
void FrameState::killLocalForCall(uint32_t id, unsigned inlineIdx,
SSATmp* val) {
auto& locs = locals(inlineIdx);
always_assert(id < locs.size());
locs[id].unsafe = true;
}
void FrameState::updateLocalRefValue(uint32_t id, unsigned inlineIdx,
SSATmp* oldRef, SSATmp* newRef) {
auto& local = locals(inlineIdx)[id];
assert(!local.unsafe);
assert(local.value == oldRef);
local.value = newRef;
local.type = newRef->type();
}
void FrameState::dropLocalInnerType(uint32_t id, unsigned inlineIdx) {
auto& local = locals(inlineIdx)[id];
assert(local.type.isBoxed());
local.type = Type::BoxedInitCell;
}
std::string show(const FrameState& state) {
return folly::format("func: {}, bcOff: {}, spOff: {}{}{}",
state.func()->fullName()->data(),
state.marker().bcOff,
state.spOffset(),
state.thisAvailable() ? ", thisAvailable" : "",
state.frameSpansCall() ? ", frameSpansCall" : ""
).str();
}
} }
|
/*******************************************************************************
*
* MIT License
*
* Copyright (c) 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#include <miopen/errors.hpp>
#include <miopen/lock_file.hpp>
#include <miopen/logger.hpp>
#include <miopen/md5.hpp>
namespace fs = boost::filesystem;
namespace miopen {
inline void LogFsError(const fs::filesystem_error& ex, const std::string& from)
{
// clang-format off
MIOPEN_LOG_E_FROM(from, "File system operation error in LockFile. "
"Error code: " << ex.code() << ". "
"Description: '" << ex.what() << "'");
// clang-format on
}
std::string LockFilePath(const fs::path& filename_)
{
try
{
const auto directory = fs::temp_directory_path() / "miopen-lockfiles";
if(!fs::exists(directory))
{
fs::create_directories(directory);
fs::permissions(directory, fs::all_all);
}
const auto hash = md5(filename_.parent_path().string());
const auto file = directory / (hash + "_" + filename_.filename().string() + ".lock");
return file.string();
}
catch(const fs::filesystem_error& ex)
{
LogFsError(ex, MIOPEN_GET_FN_NAME());
throw;
}
}
LockFile::LockFile(const char* path_, PassKey) : path(path_)
{
try
{
if(!fs::exists(path))
{
if(!std::ofstream{path})
MIOPEN_THROW(std::string("Error creating file <") + path + "> for locking.");
fs::permissions(path, fs::all_all);
}
flock = path;
}
catch(const fs::filesystem_error& ex)
{
LogFsError(ex, MIOPEN_GET_FN_NAME());
throw;
}
catch(const boost::interprocess::interprocess_exception& ex)
{
LogFlockError(ex, "lock initialization", MIOPEN_GET_FN_NAME());
throw;
}
}
LockFile& LockFile::Get(const char* path)
{
static std::mutex mutex;
std::lock_guard<std::mutex> lock(mutex);
{ // To guarantee that construction won't be called if not required.
auto found = LockFiles().find(path);
if(found != LockFiles().end())
return found->second;
}
auto emplaced = LockFiles().emplace(std::piecewise_construct,
std::forward_as_tuple(path),
std::forward_as_tuple(path, PassKey{}));
return emplaced.first->second;
}
} // namespace miopen
|
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/ui/app_icon_loader.h"
AppIconLoader::AppIconLoader() {}
AppIconLoader::AppIconLoader(Profile* profile,
int icon_size,
AppIconLoaderDelegate* delegate)
: profile_(profile), icon_size_(icon_size), delegate_(delegate) {}
AppIconLoader::~AppIconLoader() {}
|
// Autogenerated from CppHeaderCreator
// Created by Sc2ad
// =========================================================================
#pragma once
// Begin includes
#include "extern/beatsaber-hook/shared/utils/typedefs.h"
#include "extern/beatsaber-hook/shared/utils/byref.hpp"
// Including type: System.Enum
#include "System/Enum.hpp"
// Completed includes
// Type namespace:
namespace GlobalNamespace {
// Forward declaring type: NodePose
struct NodePoseSyncState_NodePose;
}
#include "extern/beatsaber-hook/shared/utils/il2cpp-type-check.hpp"
DEFINE_IL2CPP_ARG_TYPE(GlobalNamespace::NodePoseSyncState_NodePose, "", "NodePoseSyncState/NodePose");
// Type namespace:
namespace GlobalNamespace {
// Size: 0x4
#pragma pack(push, 1)
// Autogenerated type: NodePoseSyncState/NodePose
// [TokenAttribute] Offset: FFFFFFFF
struct NodePoseSyncState_NodePose/*, public System::Enum*/ {
public:
#ifdef USE_CODEGEN_FIELDS
public:
#else
protected:
#endif
// public System.Int32 value__
// Size: 0x4
// Offset: 0x0
int value;
// Field size check
static_assert(sizeof(int) == 0x4);
public:
// Creating value type constructor for type: NodePoseSyncState_NodePose
constexpr NodePoseSyncState_NodePose(int value_ = {}) noexcept : value{value_} {}
// Creating interface conversion operator: operator System::Enum
operator System::Enum() noexcept {
return *reinterpret_cast<System::Enum*>(this);
}
// Creating conversion operator: operator int
constexpr operator int() const noexcept {
return value;
}
// static field const value: static public NodePoseSyncState/NodePose Head
static constexpr const int Head = 0;
// Get static field: static public NodePoseSyncState/NodePose Head
static GlobalNamespace::NodePoseSyncState_NodePose _get_Head();
// Set static field: static public NodePoseSyncState/NodePose Head
static void _set_Head(GlobalNamespace::NodePoseSyncState_NodePose value);
// static field const value: static public NodePoseSyncState/NodePose LeftController
static constexpr const int LeftController = 1;
// Get static field: static public NodePoseSyncState/NodePose LeftController
static GlobalNamespace::NodePoseSyncState_NodePose _get_LeftController();
// Set static field: static public NodePoseSyncState/NodePose LeftController
static void _set_LeftController(GlobalNamespace::NodePoseSyncState_NodePose value);
// static field const value: static public NodePoseSyncState/NodePose RightController
static constexpr const int RightController = 2;
// Get static field: static public NodePoseSyncState/NodePose RightController
static GlobalNamespace::NodePoseSyncState_NodePose _get_RightController();
// Set static field: static public NodePoseSyncState/NodePose RightController
static void _set_RightController(GlobalNamespace::NodePoseSyncState_NodePose value);
// static field const value: static public NodePoseSyncState/NodePose Count
static constexpr const int Count = 3;
// Get static field: static public NodePoseSyncState/NodePose Count
static GlobalNamespace::NodePoseSyncState_NodePose _get_Count();
// Set static field: static public NodePoseSyncState/NodePose Count
static void _set_Count(GlobalNamespace::NodePoseSyncState_NodePose value);
// Get instance field reference: public System.Int32 value__
int& dyn_value__();
}; // NodePoseSyncState/NodePose
#pragma pack(pop)
static check_size<sizeof(NodePoseSyncState_NodePose), 0 + sizeof(int)> __GlobalNamespace_NodePoseSyncState_NodePoseSizeCheck;
static_assert(sizeof(NodePoseSyncState_NodePose) == 0x4);
}
#include "extern/beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp"
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/renderer/gpu//compositor_forwarding_message_filter.h"
#include "base/bind.h"
#include "base/location.h"
#include "content/common/view_messages.h"
#include "ipc/ipc_message.h"
namespace content {
CompositorForwardingMessageFilter::CompositorForwardingMessageFilter(
base::TaskRunner* compositor_task_runner)
: compositor_task_runner_(compositor_task_runner) {
DCHECK(compositor_task_runner_.get());
// This check will only be used by functions running on compositor thread.
compositor_thread_checker_.DetachFromThread();
}
CompositorForwardingMessageFilter::~CompositorForwardingMessageFilter() {
}
void CompositorForwardingMessageFilter::AddHandlerOnCompositorThread(
int routing_id,
const Handler& handler) {
DCHECK(compositor_thread_checker_.CalledOnValidThread());
DCHECK(!handler.is_null());
multi_handlers_.insert(std::make_pair(routing_id, handler));
}
void CompositorForwardingMessageFilter::RemoveHandlerOnCompositorThread(
int routing_id,
const Handler& handler) {
DCHECK(compositor_thread_checker_.CalledOnValidThread());
auto handlers = multi_handlers_.equal_range(routing_id);
for (auto it = handlers.first; it != handlers.second; ++it) {
if (it->second.Equals(handler)) {
multi_handlers_.erase(it);
return;
}
}
NOTREACHED();
}
bool CompositorForwardingMessageFilter::OnMessageReceived(
const IPC::Message& message) {
switch(message.type()) {
case ViewMsg_BeginFrame::ID: // Fall through.
case ViewMsg_ReclaimCompositorResources::ID: // Fall through.
case ViewMsg_SwapCompositorFrameAck::ID: // Fall through.
case ViewMsg_UpdateVSyncParameters::ID:
break;
default:
return false;
}
compositor_task_runner_->PostTask(
FROM_HERE,
base::Bind(
&CompositorForwardingMessageFilter::ProcessMessageOnCompositorThread,
this,
message));
return true;
}
void CompositorForwardingMessageFilter::ProcessMessageOnCompositorThread(
const IPC::Message& message) {
DCHECK(compositor_thread_checker_.CalledOnValidThread());
auto handlers = multi_handlers_.equal_range(message.routing_id());
if (handlers.first == handlers.second)
return;
for (auto it = handlers.first; it != handlers.second; ++it)
it->second.Run(message);
}
} // namespace content
|
// Copyright 2016 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// See the README.md in this directory for documentation.
#include <assert.h>
#include <cpuid.h>
#include <fuchsia/hardware/cpu/insntrace/llcpp/fidl.h>
#include <fuchsia/hardware/platform/device/c/banjo.h>
#include <inttypes.h>
#include <lib/ddk/debug.h>
#include <lib/ddk/device.h>
#include <lib/ddk/driver.h>
#include <lib/ddk/io-buffer.h>
#include <lib/ddk/platform-defs.h>
#include <lib/zircon-internal/device/cpu-trace/intel-pt.h>
#include <lib/zircon-internal/mtrace.h>
#include <lib/zx/bti.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <threads.h>
#include <zircon/status.h>
#include <zircon/syscalls.h>
#include <zircon/syscalls/resource.h>
#include <zircon/types.h>
#include <memory>
#include <new>
#include <ddktl/device.h>
#include <ddktl/fidl.h>
#include <fbl/alloc_checker.h>
namespace insntrace {
namespace fuchsia_insntrace = fuchsia_hardware_cpu_insntrace;
// Shorten some long fidl names.
using BufferConfig = fuchsia_insntrace::wire::BufferConfig;
using BufferState = fuchsia_insntrace::wire::BufferState;
// This is defined in insntrace.fidl but not emitted.
using BufferDescriptor = uint32_t;
typedef struct ipt_per_trace_state {
// the cpu or thread this buffer is assigned to
// Which value to use is determined by the trace mode.
union {
uint32_t cpu;
zx_handle_t thread;
} owner;
// number of chunks, each 2^|chunk_order| pages in size
uint32_t num_chunks;
// log2 size of each chunk, in pages
uint32_t chunk_order;
// if true then the buffer is circular, otherwise tracing stops when the
// buffer fills
bool is_circular;
// true if allocated
bool allocated;
// true if buffer is assigned to a cpu/thread
bool assigned;
// number of ToPA tables needed
uint32_t num_tables;
// msrs
uint64_t ctl;
uint64_t status;
uint64_t output_base;
uint64_t output_mask_ptrs;
uint64_t cr3_match;
struct {
uint64_t a, b;
} addr_ranges[IPT_MAX_NUM_ADDR_RANGES];
// trace buffers and ToPA tables
// ToPA: Table of Physical Addresses
// A "trace buffer" is a set of N chunks.
std::unique_ptr<io_buffer_t[]> chunks;
std::unique_ptr<io_buffer_t[]> topas;
} ipt_per_trace_state_t;
// TODO(dje): add unbindable?
class InsntraceDevice;
using DeviceType = ddk::Device<InsntraceDevice, ddk::Openable, ddk::Closable, ddk::Messageable>;
class InsntraceDevice : public DeviceType, fidl::WireInterface<fuchsia_insntrace::Controller> {
public:
explicit InsntraceDevice(zx_device_t* parent, zx::bti bti)
: DeviceType(parent), bti_(std::move(bti)) {}
~InsntraceDevice() = default;
void DdkRelease();
// Fidl handlers
zx_status_t IptInitialize(const fuchsia_insntrace::wire::Allocation* allocation);
zx_status_t IptTerminate();
zx_status_t IptGetAllocation(fuchsia_insntrace::wire::Allocation* config);
zx_status_t IptAllocateBuffer(const BufferConfig* config, BufferDescriptor* out_descriptor);
zx_status_t IptAssignThreadBuffer(BufferDescriptor descriptor, zx_handle_t thread);
zx_status_t IptReleaseThreadBuffer(BufferDescriptor descriptor, zx_handle_t thread);
zx_status_t IptGetBufferConfig(BufferDescriptor descriptor, BufferConfig* out_config);
zx_status_t IptGetBufferState(BufferDescriptor descriptor, BufferState* out_state);
zx_status_t IptGetChunkHandle(BufferDescriptor descriptor, uint32_t chunk_num,
zx_handle_t* out_handle);
zx_status_t IptFreeBuffer(BufferDescriptor descriptor);
zx_status_t IptStart();
zx_status_t IptStop();
// Fidl server interface implementation
void Initialize(fuchsia_insntrace::wire::Allocation allocation,
InitializeCompleter::Sync& completer) override;
void Terminate(TerminateCompleter::Sync& completer) override;
void GetAllocation(GetAllocationCompleter::Sync& completer) override;
void AllocateBuffer(BufferConfig config, AllocateBufferCompleter::Sync& completer) override;
void AssignThreadBuffer(BufferDescriptor descriptor, zx::thread thread,
AssignThreadBufferCompleter::Sync& completer) override;
void ReleaseThreadBuffer(BufferDescriptor descriptor, zx::thread thread,
ReleaseThreadBufferCompleter::Sync& completer) override;
void GetBufferConfig(BufferDescriptor descriptor,
GetBufferConfigCompleter::Sync& completer) override;
void GetBufferState(BufferDescriptor descriptor,
GetBufferStateCompleter::Sync& completer) override;
void GetChunkHandle(BufferDescriptor descriptor, uint32_t chunk_num,
GetChunkHandleCompleter::Sync& completer) override;
void FreeBuffer(BufferDescriptor descriptor, FreeBufferCompleter::Sync& completer) override;
void Start(StartCompleter::Sync& completer) override;
void Stop(StopCompleter::Sync& completer) override;
// Device protocol implementation
zx_status_t DdkOpen(zx_device_t** dev_out, uint32_t flags);
zx_status_t DdkClose(uint32_t flags);
zx_status_t DdkMessage(fidl_incoming_msg_t* msg, fidl_txn_t* txn);
private:
// Low level routines
void MakeTopa(ipt_per_trace_state_t* per_trace);
uint32_t ComputeTopaEntryCount(ipt_per_trace_state_t* per_trace);
size_t ComputeCaptureSize(const ipt_per_trace_state_t* per_trace);
zx_status_t X86PtAllocBuffer1(ipt_per_trace_state_t* per_trace, uint32_t num, uint32_t order,
bool is_circular);
void X86PtFreeBuffer1(ipt_per_trace_state_t* per_trace);
zx_status_t X86PtAllocBuffer(const BufferConfig* config, BufferDescriptor* out_descriptor);
zx_status_t X86PtAssignThreadBuffer(BufferDescriptor descriptor, zx_handle_t thread);
zx_status_t X86PtReleaseThreadBuffer(BufferDescriptor descriptor, zx_handle_t thread);
zx_status_t X86PtFreeBuffer(BufferDescriptor descriptor);
zx_status_t X86PtStageTraceData(zx_handle_t resource, BufferDescriptor descriptor);
zx_status_t X86PtGetTraceData(zx_handle_t resource, BufferDescriptor descriptor);
mtx_t lock_{};
// Only one open of this device is supported at a time. KISS for now.
bool opened_ = false;
// Once tracing has started various things are not allowed until it stops.
bool active_ = false;
zx_insntrace_trace_mode_t mode_ = IPT_MODE_CPU;
// # of entries in |per_trace_state|.
// When tracing by cpu, this is the max number of cpus.
// When tracing by thread, this is the max number of threads.
// TODO(dje): Add support for dynamically growing the vector.
uint16_t num_traces_ = 0;
// one entry for each trace
std::unique_ptr<ipt_per_trace_state_t[]> per_trace_state_;
zx::bti bti_;
};
static uint32_t ipt_config_family;
static uint32_t ipt_config_model;
static uint32_t ipt_config_stepping;
static uint32_t ipt_config_addr_cfg_max = 0;
static uint32_t ipt_config_mtc_freq_mask = 0;
static uint32_t ipt_config_cyc_thresh_mask = 0;
static uint32_t ipt_config_psb_freq_mask = 0;
static uint32_t ipt_config_num_addr_ranges = 0;
static uint32_t ipt_config_bus_freq = 0;
static bool ipt_config_supported = false;
static bool ipt_config_cr3_filtering = false;
static bool ipt_config_psb = false;
static bool ipt_config_ip_filtering = false;
static bool ipt_config_mtc = false;
static bool ipt_config_ptwrite = false;
static bool ipt_config_power_events = false;
static bool ipt_config_output_topa = false;
static bool ipt_config_output_topa_multi = false;
static bool ipt_config_output_single = false;
static bool ipt_config_output_transport = false;
static bool ipt_config_lip = false;
// maximum space, in bytes, for trace buffers (per cpu)
// This isn't necessarily
// MAX_NUM_CHUNKS * (1 << (MAX_CHUNK_ORDER + PAGE_SIZE_SHIFT)).
// Buffers have to be naturally aligned contiguous pages, but we can have
// a lot of them. Supporting large buffers and/or lots of them is for
// experimentation.
#define MAX_PER_TRACE_SPACE (256 * 1024 * 1024)
// maximum number of buffers
#define MAX_NUM_CHUNKS 4096
// maximum size of each buffer, in pages (1MB)
#define MAX_CHUNK_ORDER 8
#if PAGE_SIZE == 4096
#define PAGE_SIZE_SHIFT 12
#else
#error "unsupported page size"
#endif
#define BIT(x, b) ((x) & (1u << (b)))
// The userspace side of the driver
static zx_status_t InsntraceInitOnce() {
unsigned a, b, c, d, max_leaf;
max_leaf = __get_cpuid_max(0, nullptr);
if (max_leaf < 0x14) {
zxlogf(INFO, "IntelPT: No PT support");
return ZX_ERR_NOT_SUPPORTED;
}
__cpuid(1, a, b, c, d);
ipt_config_stepping = a & 0xf;
ipt_config_model = (a >> 4) & 0xf;
ipt_config_family = (a >> 8) & 0xf;
if (ipt_config_family == 0xf)
ipt_config_family += (a >> 20) & 0xff;
if (ipt_config_family == 6 || ipt_config_family == 0xf)
ipt_config_model += ((a >> 16) & 0xf) << 4;
__cpuid_count(0x07, 0, a, b, c, d);
if (!BIT(b, 25)) {
zxlogf(INFO, "IntelPT: No PT support");
return ZX_ERR_NOT_SUPPORTED;
}
ipt_config_supported = true;
__cpuid_count(0x14, 0, a, b, c, d);
if (BIT(b, 2))
ipt_config_addr_cfg_max = 2;
if (BIT(b, 1) && a >= 1) {
unsigned a1, b1, c1, d1;
__cpuid_count(0x14, 1, a1, b1, c1, d1);
ipt_config_mtc_freq_mask = (a1 >> 16) & 0xffff;
ipt_config_cyc_thresh_mask = b1 & 0xffff;
ipt_config_psb_freq_mask = (b1 >> 16) & 0xffff;
ipt_config_num_addr_ranges = a1 & 0x7;
}
if (max_leaf >= 0x15) {
unsigned a1 = 0, b1 = 0, c1 = 0, d1 = 0;
__cpuid(0x15, a1, b1, c1, d1);
if (a1 && b1)
ipt_config_bus_freq = static_cast<uint32_t>(1. / ((float)a1 / (float)b1));
}
ipt_config_cr3_filtering = !!BIT(b, 0);
ipt_config_psb = !!BIT(b, 1);
ipt_config_ip_filtering = !!BIT(b, 2);
ipt_config_mtc = !!BIT(b, 3);
ipt_config_ptwrite = !!BIT(b, 4);
ipt_config_power_events = !!BIT(b, 5);
ipt_config_output_topa = !!BIT(c, 0);
ipt_config_output_topa_multi = !!BIT(c, 1);
ipt_config_output_single = !!BIT(c, 2);
ipt_config_output_transport = !!BIT(c, 3);
ipt_config_lip = !!BIT(c, 31);
zxlogf(INFO, "Intel Processor Trace configuration for this chipset:");
// No need to print everything, but these are useful.
zxlogf(INFO, "mtc_freq_mask: 0x%x", ipt_config_mtc_freq_mask);
zxlogf(INFO, "cyc_thresh_mask: 0x%x", ipt_config_cyc_thresh_mask);
zxlogf(INFO, "psb_freq_mask: 0x%x", ipt_config_psb_freq_mask);
zxlogf(INFO, "num addr ranges: %u", ipt_config_num_addr_ranges);
return ZX_OK;
}
// Create the ToPA for the configured number of pages for |cpu|.
// A circular collection of buffers is set up, even if we're going to apply
// the stop bit to the last entry.
void InsntraceDevice::MakeTopa(ipt_per_trace_state_t* per_trace) {
const size_t run_len_log2 = per_trace->chunk_order;
assert(run_len_log2 + PAGE_SIZE_SHIFT <= IPT_TOPA_MAX_SHIFT);
assert(run_len_log2 + PAGE_SIZE_SHIFT >= IPT_TOPA_MIN_SHIFT);
uint32_t curr_table = 0;
uint32_t curr_idx = 0;
uint64_t* last_entry = nullptr;
// Note: An early version of this patch auto-computed the desired grouping
// of pages with sufficient alignment. If you find yourself needing this
// functionality again, see change 9470.
for (uint32_t i = 0; i < per_trace->num_chunks; ++i) {
io_buffer_t* buffer = &per_trace->chunks[i];
io_buffer_t* topa = &per_trace->topas[curr_table];
zx_paddr_t pa = io_buffer_phys(buffer);
uint64_t val =
IPT_TOPA_ENTRY_PHYS_ADDR(pa) | IPT_TOPA_ENTRY_SIZE(run_len_log2 + PAGE_SIZE_SHIFT);
auto table = reinterpret_cast<uint64_t*>(io_buffer_virt(topa));
table[curr_idx] = val;
last_entry = &table[curr_idx];
// Make sure we leave one at the end of the table for the END marker.
if (unlikely(curr_idx >= IPT_TOPA_MAX_TABLE_ENTRIES - 2)) {
curr_idx = 0;
curr_table++;
} else {
curr_idx++;
}
}
assert(curr_table + 1 == per_trace->num_tables ||
// If the last table is full curr_table will be the next one.
(curr_table == per_trace->num_tables && curr_idx == 0));
// Populate END entries for completed tables
// Assume the table is circular. We'll set the stop bit on the last
// entry later.
for (uint32_t i = 0; i < curr_table; ++i) {
io_buffer_t* this_table = &per_trace->topas[i];
io_buffer_t* next_table;
if (i == per_trace->num_tables - 1) {
next_table = &per_trace->topas[0];
} else {
next_table = &per_trace->topas[i + 1];
}
zx_paddr_t next_table_pa = io_buffer_phys(next_table);
uint64_t val = IPT_TOPA_ENTRY_PHYS_ADDR(next_table_pa) | IPT_TOPA_ENTRY_END;
auto table = reinterpret_cast<uint64_t*>(io_buffer_virt(this_table));
table[IPT_TOPA_MAX_TABLE_ENTRIES - 1] = val;
}
// Populate the END entry for a possibly non-full last table
if (curr_table < per_trace->num_tables) {
io_buffer_t* this_table = &per_trace->topas[curr_table];
io_buffer_t* first_table = &per_trace->topas[0];
zx_paddr_t first_table_pa = io_buffer_phys(first_table);
uint64_t val = IPT_TOPA_ENTRY_PHYS_ADDR(first_table_pa) | IPT_TOPA_ENTRY_END;
auto table = reinterpret_cast<uint64_t*>(io_buffer_virt(this_table));
table[curr_idx] = val;
}
// Add the STOP flag to the last non-END entry in the tables
assert(last_entry);
if (!per_trace->is_circular)
*last_entry |= IPT_TOPA_ENTRY_STOP;
}
// Compute the number of ToPA entries needed for the configured number of
// buffers.
// The output count includes the END entries across all needed tables.
uint32_t InsntraceDevice::ComputeTopaEntryCount(ipt_per_trace_state_t* per_trace) {
uint32_t num_entries = per_trace->num_chunks;
uint32_t num_end_entries =
(num_entries + IPT_TOPA_MAX_TABLE_ENTRIES - 2) / (IPT_TOPA_MAX_TABLE_ENTRIES - 1);
uint32_t result = num_entries + num_end_entries;
zxlogf(TRACE, "IPT: compute_topa_entry_count: num_entries: %u", num_entries);
zxlogf(TRACE, "IPT: compute_topa_entry_count: num_end_entries: %u", num_end_entries);
zxlogf(TRACE, "IPT: compute_topa_entry_count: total entries: %u", result);
return result;
}
// Walk the tables to discover how much data has been captured for |per_trace|.
// Note: If this is a circular buffer this is just where tracing stopped.
size_t InsntraceDevice::ComputeCaptureSize(const ipt_per_trace_state_t* per_trace) {
uint64_t curr_table_paddr = per_trace->output_base;
uint32_t curr_table_entry_idx = (uint32_t)per_trace->output_mask_ptrs >> 7;
uint32_t curr_entry_offset = (uint32_t)(per_trace->output_mask_ptrs >> 32);
zxlogf(TRACE, "IPT: compute_capture_size: trace %tu", per_trace - per_trace_state_.get());
zxlogf(TRACE,
"IPT: curr_table_paddr 0x%" PRIx64 ", curr_table_entry_idx %u, curr_entry_offset %u\n",
curr_table_paddr, curr_table_entry_idx, curr_entry_offset);
size_t total_size = 0;
for (uint32_t table = 0; table < per_trace->num_tables; ++table) {
// Get the physical address so that we can compare it with the value
// in output_base.
zx_paddr_t table_paddr = io_buffer_phys(&per_trace->topas[table]);
for (uint32_t entry = 0; entry < IPT_TOPA_MAX_TABLE_ENTRIES - 1; ++entry) {
if (table_paddr == curr_table_paddr && entry >= curr_table_entry_idx) {
total_size += curr_entry_offset;
return total_size;
}
auto table_ptr = reinterpret_cast<uint64_t*>(io_buffer_virt(&per_trace->topas[table]));
uint64_t topa_entry = table_ptr[entry];
total_size += 1UL << IPT_TOPA_ENTRY_EXTRACT_SIZE(topa_entry);
}
}
// Should be unreachable.
// TODO(dje): Later flag state as broken.
zxlogf(ERROR, "IPT: unexpectedly exited capture loop");
return 0;
}
zx_status_t InsntraceDevice::X86PtAllocBuffer1(ipt_per_trace_state_t* per_trace, uint32_t num,
uint32_t order, bool is_circular) {
zx_status_t status;
size_t chunk_pages = 1 << order;
fbl::AllocChecker ac;
per_trace->chunks = std::unique_ptr<io_buffer_t[]>(new (&ac) io_buffer_t[num]{});
if (!ac.check()) {
return ZX_ERR_NO_MEMORY;
}
for (uint32_t i = 0; i < num; ++i) {
// ToPA entries of size N must be aligned to N, too.
uint32_t alignment_log2 = PAGE_SIZE_SHIFT + order;
status = io_buffer_init_aligned(&per_trace->chunks[i], bti_.get(), chunk_pages * PAGE_SIZE,
alignment_log2, IO_BUFFER_RW | IO_BUFFER_CONTIG);
if (status != ZX_OK)
return status;
// Keep track of allocated buffers as we go in case we later fail:
// we want to be able to free those that got allocated.
++per_trace->num_chunks;
// Catch bugs in io_buffer_init_aligned. If it doesn't give us a
// properly aligned buffer we'll get an "operational error" later.
// See Intel Vol3 36.2.6.2.
zx_paddr_t pa = io_buffer_phys(&per_trace->chunks[i]);
zx_paddr_t align_mask = (1ull << alignment_log2) - 1;
if (pa & align_mask) {
zxlogf(ERROR, "%s: WARNING: chunk has bad alignment: alignment %u, got 0x%" PRIx64 "",
__func__, alignment_log2, pa);
return ZX_ERR_INTERNAL;
}
}
assert(per_trace->num_chunks == num);
per_trace->chunk_order = order;
per_trace->is_circular = is_circular;
// TODO(dje): No need to allocate the max on the last table.
uint32_t entry_count = ComputeTopaEntryCount(per_trace);
uint32_t table_count =
(entry_count + IPT_TOPA_MAX_TABLE_ENTRIES - 1) / IPT_TOPA_MAX_TABLE_ENTRIES;
if (entry_count < 2) {
zxlogf(INFO, "IPT: INVALID ENTRY COUNT: %u", entry_count);
return ZX_ERR_INVALID_ARGS;
}
// Some early Processor Trace implementations only supported having a
// table with a single real entry and an END.
if (!ipt_config_output_topa_multi && entry_count > 2)
return ZX_ERR_NOT_SUPPORTED;
// Allocate Table(s) of Physical Addresses (ToPA) for each cpu.
per_trace->topas = std::unique_ptr<io_buffer_t[]>(new (&ac) io_buffer_t[table_count]{});
if (!ac.check()) {
return ZX_ERR_NO_MEMORY;
}
for (uint32_t i = 0; i < table_count; ++i) {
status = io_buffer_init(&per_trace->topas[i], bti_.get(),
sizeof(uint64_t) * IPT_TOPA_MAX_TABLE_ENTRIES,
IO_BUFFER_RW | IO_BUFFER_CONTIG);
if (status != ZX_OK)
return ZX_ERR_NO_MEMORY;
// Keep track of allocated tables as we go in case we later fail:
// we want to be able to free those that got allocated.
++per_trace->num_tables;
}
assert(per_trace->num_tables == table_count);
MakeTopa(per_trace);
return ZX_OK;
}
void InsntraceDevice::X86PtFreeBuffer1(ipt_per_trace_state_t* per_trace) {
assert(!per_trace->assigned);
if (per_trace->chunks) {
for (uint32_t i = 0; i < per_trace->num_chunks; ++i) {
io_buffer_release(&per_trace->chunks[i]);
}
}
per_trace->chunks.reset();
if (per_trace->topas) {
for (uint32_t i = 0; i < per_trace->num_tables; ++i) {
io_buffer_release(&per_trace->topas[i]);
}
}
per_trace->topas.reset();
per_trace->allocated = false;
}
zx_status_t InsntraceDevice::X86PtAllocBuffer(const BufferConfig* config,
BufferDescriptor* out_descriptor) {
zxlogf(TRACE, "%s: num_chunks %u, chunk_order %u", __func__, config->num_chunks,
config->chunk_order);
if (config->num_chunks == 0 || config->num_chunks > MAX_NUM_CHUNKS)
return ZX_ERR_INVALID_ARGS;
if (config->chunk_order > MAX_CHUNK_ORDER)
return ZX_ERR_INVALID_ARGS;
size_t chunk_pages = 1 << config->chunk_order;
size_t nr_pages = config->num_chunks * chunk_pages;
size_t total_per_trace = nr_pages * PAGE_SIZE;
if (total_per_trace > MAX_PER_TRACE_SPACE)
return ZX_ERR_INVALID_ARGS;
uint64_t settable_ctl_mask =
(IPT_CTL_OS_ALLOWED_MASK | IPT_CTL_USER_ALLOWED_MASK | IPT_CTL_TSC_EN_MASK |
IPT_CTL_DIS_RETC_MASK | IPT_CTL_BRANCH_EN_MASK);
if (ipt_config_ptwrite)
settable_ctl_mask |= IPT_CTL_PTW_EN_MASK | IPT_CTL_FUP_ON_PTW_MASK;
if (ipt_config_cr3_filtering)
settable_ctl_mask |= IPT_CTL_CR3_FILTER_MASK;
if (ipt_config_mtc)
settable_ctl_mask |= IPT_CTL_MTC_EN_MASK | IPT_CTL_MTC_FREQ_MASK;
if (ipt_config_power_events)
settable_ctl_mask |= IPT_CTL_POWER_EVENT_EN_MASK;
if (ipt_config_ip_filtering) {
if (ipt_config_num_addr_ranges >= 1)
settable_ctl_mask |= IPT_CTL_ADDR0_MASK;
if (ipt_config_num_addr_ranges >= 2)
settable_ctl_mask |= IPT_CTL_ADDR1_MASK;
if (ipt_config_num_addr_ranges >= 3)
settable_ctl_mask |= IPT_CTL_ADDR2_MASK;
if (ipt_config_num_addr_ranges >= 4)
settable_ctl_mask |= IPT_CTL_ADDR3_MASK;
}
if (ipt_config_psb)
settable_ctl_mask |= (IPT_CTL_CYC_EN_MASK | IPT_CTL_PSB_FREQ_MASK | IPT_CTL_CYC_THRESH_MASK);
if ((config->ctl & ~settable_ctl_mask) != 0) {
zxlogf(ERROR, "bad ctl, requested 0x%" PRIx64 ", valid 0x%" PRIx64 "", config->ctl,
settable_ctl_mask);
return ZX_ERR_INVALID_ARGS;
}
uint32_t mtc_freq = (uint32_t)((config->ctl & IPT_CTL_MTC_FREQ_MASK) >> IPT_CTL_MTC_FREQ_SHIFT);
if (mtc_freq != 0 && ((1 << mtc_freq) & ipt_config_mtc_freq_mask) == 0) {
zxlogf(ERROR, "bad mtc_freq value, requested 0x%x, valid mask 0x%x", mtc_freq,
ipt_config_mtc_freq_mask);
return ZX_ERR_INVALID_ARGS;
}
uint32_t cyc_thresh =
(uint32_t)((config->ctl & IPT_CTL_CYC_THRESH_MASK) >> IPT_CTL_CYC_THRESH_SHIFT);
if (cyc_thresh != 0 && ((1 << cyc_thresh) & ipt_config_cyc_thresh_mask) == 0) {
zxlogf(ERROR, "bad cyc_thresh value, requested 0x%x, valid mask 0x%x", cyc_thresh,
ipt_config_cyc_thresh_mask);
return ZX_ERR_INVALID_ARGS;
}
uint32_t psb_freq = (uint32_t)((config->ctl & IPT_CTL_PSB_FREQ_MASK) >> IPT_CTL_PSB_FREQ_SHIFT);
if (psb_freq != 0 && ((1 << psb_freq) & ipt_config_psb_freq_mask) == 0) {
zxlogf(ERROR, "bad psb_freq value, requested 0x%x, valid mask 0x%x", psb_freq,
ipt_config_psb_freq_mask);
return ZX_ERR_INVALID_ARGS;
}
// Find an unallocated buffer entry.
BufferDescriptor descriptor;
for (descriptor = 0; descriptor < num_traces_; ++descriptor) {
if (!per_trace_state_[descriptor].allocated)
break;
}
if (descriptor == num_traces_)
return ZX_ERR_NO_RESOURCES;
ipt_per_trace_state_t* per_trace = &per_trace_state_[descriptor];
zx_status_t status =
X86PtAllocBuffer1(per_trace, config->num_chunks, config->chunk_order, config->is_circular);
if (status != ZX_OK) {
X86PtFreeBuffer1(per_trace);
return status;
}
per_trace->ctl = config->ctl;
per_trace->status = 0;
per_trace->output_base = io_buffer_phys(&per_trace->topas[0]);
per_trace->output_mask_ptrs = 0;
per_trace->cr3_match = config->address_space_match;
// TODO(dje): insntrace.fidl can't use vectors (yet) so the address ranges
// are individually spelled out.
static_assert(fuchsia_insntrace::wire::MAX_NUM_ADDR_RANGES == 2);
static_assert(fuchsia_insntrace::wire::MAX_NUM_ADDR_RANGES == IPT_MAX_NUM_ADDR_RANGES);
per_trace->addr_ranges[0].a = config->address_range_0.start;
per_trace->addr_ranges[0].b = config->address_range_0.end;
per_trace->addr_ranges[1].a = config->address_range_1.start;
per_trace->addr_ranges[1].b = config->address_range_1.end;
per_trace->allocated = true;
*out_descriptor = descriptor;
return ZX_OK;
}
zx_status_t InsntraceDevice::X86PtAssignThreadBuffer(BufferDescriptor descriptor,
zx_handle_t thread) {
zx_handle_close(thread);
// TODO(dje): Thread support is still work-in-progress.
return ZX_ERR_NOT_SUPPORTED;
}
zx_status_t InsntraceDevice::X86PtReleaseThreadBuffer(BufferDescriptor descriptor,
zx_handle_t thread) {
zx_handle_close(thread);
// TODO(dje): Thread support is still work-in-progress.
return ZX_ERR_NOT_SUPPORTED;
}
zx_status_t InsntraceDevice::X86PtFreeBuffer(BufferDescriptor descriptor) {
if (active_)
return ZX_ERR_BAD_STATE;
if (descriptor >= num_traces_)
return ZX_ERR_INVALID_ARGS;
assert(per_trace_state_);
ipt_per_trace_state_t* per_trace = &per_trace_state_[descriptor];
if (!per_trace->allocated)
return ZX_ERR_INVALID_ARGS;
if (per_trace->assigned)
return ZX_ERR_BAD_STATE;
X86PtFreeBuffer1(per_trace);
return ZX_OK;
}
zx_status_t InsntraceDevice::X86PtStageTraceData(zx_handle_t resource,
BufferDescriptor descriptor) {
if (descriptor >= num_traces_)
return ZX_ERR_INVALID_ARGS;
assert(per_trace_state_);
const ipt_per_trace_state_t* per_trace = &per_trace_state_[descriptor];
zx_x86_pt_regs_t regs;
regs.ctl = per_trace->ctl;
regs.ctl |= IPT_CTL_TOPA_MASK | IPT_CTL_TRACE_EN_MASK;
regs.status = per_trace->status;
regs.output_base = per_trace->output_base;
regs.output_mask_ptrs = per_trace->output_mask_ptrs;
regs.cr3_match = per_trace->cr3_match;
static_assert(sizeof(regs.addr_ranges) == sizeof(per_trace->addr_ranges),
"addr range size mismatch");
memcpy(regs.addr_ranges, per_trace->addr_ranges, sizeof(per_trace->addr_ranges));
return zx_mtrace_control(resource, MTRACE_KIND_INSNTRACE, MTRACE_INSNTRACE_STAGE_TRACE_DATA,
descriptor, ®s, sizeof(regs));
}
zx_status_t InsntraceDevice::X86PtGetTraceData(zx_handle_t resource, BufferDescriptor descriptor) {
if (descriptor >= num_traces_)
return ZX_ERR_INVALID_ARGS;
assert(per_trace_state_);
ipt_per_trace_state_t* per_trace = &per_trace_state_[descriptor];
zx_x86_pt_regs_t regs;
zx_status_t status =
zx_mtrace_control(resource, MTRACE_KIND_INSNTRACE, MTRACE_INSNTRACE_GET_TRACE_DATA,
descriptor, ®s, sizeof(regs));
if (status != ZX_OK)
return status;
per_trace->ctl = regs.ctl;
per_trace->status = regs.status;
per_trace->output_base = regs.output_base;
per_trace->output_mask_ptrs = regs.output_mask_ptrs;
per_trace->cr3_match = regs.cr3_match;
static_assert(sizeof(per_trace->addr_ranges) == sizeof(regs.addr_ranges),
"addr range size mismatch");
memcpy(per_trace->addr_ranges, regs.addr_ranges, sizeof(regs.addr_ranges));
return ZX_OK;
}
// fidl message handlers
zx_status_t InsntraceDevice::IptInitialize(const fuchsia_insntrace::wire::Allocation* allocation) {
if (!ipt_config_supported)
return ZX_ERR_NOT_SUPPORTED;
// For now we only support ToPA, though there are no current plans to
// support anything else.
if (!ipt_config_output_topa)
return ZX_ERR_NOT_SUPPORTED;
if (per_trace_state_)
return ZX_ERR_BAD_STATE;
// TODO(dje): Until thread tracing is supported.
if (allocation->mode == fuchsia_insntrace::wire::Mode::THREAD)
return ZX_ERR_NOT_SUPPORTED;
zx_insntrace_trace_mode_t internal_mode;
switch (allocation->mode) {
case fuchsia_insntrace::wire::Mode::CPU:
internal_mode = IPT_MODE_CPU;
break;
case fuchsia_insntrace::wire::Mode::THREAD:
internal_mode = IPT_MODE_THREAD;
break;
default:
return ZX_ERR_INVALID_ARGS;
}
if (allocation->num_traces > fuchsia_insntrace::wire::MAX_NUM_TRACES)
return ZX_ERR_INVALID_ARGS;
if (internal_mode == IPT_MODE_CPU) {
// TODO(dje): KISS. No point in allowing anything else for now.
if (allocation->num_traces != zx_system_get_num_cpus())
return ZX_ERR_INVALID_ARGS;
}
fbl::AllocChecker ac;
auto per_trace_state = new (&ac) ipt_per_trace_state_t[allocation->num_traces]{};
if (!ac.check()) {
return ZX_ERR_NO_MEMORY;
}
per_trace_state_.reset(per_trace_state);
// Please do not use get_root_resource() in new code. See fxbug.dev/31358.
zx_handle_t resource = get_root_resource();
zx_insntrace_trace_config_t config{};
config.mode = internal_mode;
config.num_traces = allocation->num_traces;
zx_status_t status = zx_mtrace_control(resource, MTRACE_KIND_INSNTRACE,
MTRACE_INSNTRACE_ALLOC_TRACE, 0, &config, sizeof(config));
if (status != ZX_OK) {
per_trace_state_.reset();
return status;
}
mode_ = internal_mode;
num_traces_ = allocation->num_traces;
return ZX_OK;
}
zx_status_t InsntraceDevice::IptTerminate() {
if (!per_trace_state_)
return ZX_ERR_BAD_STATE;
if (active_)
return ZX_ERR_BAD_STATE;
// Don't make any changes until we know it's going to work.
for (uint32_t i = 0; i < num_traces_; ++i) {
ipt_per_trace_state_t* per_trace = &per_trace_state_[i];
if (per_trace->assigned)
return ZX_ERR_BAD_STATE;
}
for (uint32_t i = 0; i < num_traces_; ++i) {
ipt_per_trace_state_t* per_trace = &per_trace_state_[i];
if (per_trace->allocated)
X86PtFreeBuffer1(per_trace);
}
// Please do not use get_root_resource() in new code. See fxbug.dev/31358.
zx_handle_t resource = get_root_resource();
zx_status_t status = zx_mtrace_control(resource, MTRACE_KIND_INSNTRACE,
MTRACE_INSNTRACE_FREE_TRACE, 0, nullptr, 0);
// TODO(dje): This really shouldn't fail. What to do?
// For now flag things as busted and prevent further use.
if (status != ZX_OK)
return ZX_OK;
per_trace_state_.reset();
return ZX_OK;
}
zx_status_t InsntraceDevice::IptGetAllocation(fuchsia_insntrace::wire::Allocation* out_config) {
if (!per_trace_state_)
return ZX_ERR_BAD_STATE;
switch (mode_) {
case IPT_MODE_CPU:
out_config->mode = fuchsia_insntrace::wire::Mode::CPU;
break;
case IPT_MODE_THREAD:
out_config->mode = fuchsia_insntrace::wire::Mode::THREAD;
break;
default:
__UNREACHABLE;
}
out_config->num_traces = num_traces_;
return ZX_OK;
}
zx_status_t InsntraceDevice::IptAllocateBuffer(const BufferConfig* config,
BufferDescriptor* out_descriptor) {
if (!per_trace_state_)
return ZX_ERR_BAD_STATE;
return X86PtAllocBuffer(config, out_descriptor);
}
zx_status_t InsntraceDevice::IptAssignThreadBuffer(BufferDescriptor descriptor,
zx_handle_t thread) {
if (!per_trace_state_)
return ZX_ERR_BAD_STATE;
return X86PtAssignThreadBuffer(descriptor, thread);
}
zx_status_t InsntraceDevice::IptReleaseThreadBuffer(BufferDescriptor descriptor,
zx_handle_t thread) {
if (!per_trace_state_)
return ZX_ERR_BAD_STATE;
return X86PtReleaseThreadBuffer(descriptor, thread);
}
zx_status_t InsntraceDevice::IptGetBufferConfig(BufferDescriptor descriptor,
BufferConfig* out_config) {
if (!per_trace_state_)
return ZX_ERR_BAD_STATE;
if (descriptor >= num_traces_)
return ZX_ERR_INVALID_ARGS;
const ipt_per_trace_state_t* per_trace = &per_trace_state_[descriptor];
if (!per_trace->allocated)
return ZX_ERR_INVALID_ARGS;
*out_config = {};
out_config->num_chunks = per_trace->num_chunks;
out_config->chunk_order = per_trace->chunk_order;
out_config->is_circular = per_trace->is_circular;
out_config->ctl = per_trace->ctl;
out_config->address_space_match = per_trace->cr3_match;
// TODO(dje): insntrace.fidl can't use vectors (yet) so the address ranges
// are individually spelled out.
static_assert(fuchsia_insntrace::wire::MAX_NUM_ADDR_RANGES == 2);
static_assert(fuchsia_insntrace::wire::MAX_NUM_ADDR_RANGES == IPT_MAX_NUM_ADDR_RANGES);
out_config->address_range_0.start = per_trace->addr_ranges[0].a;
out_config->address_range_0.end = per_trace->addr_ranges[0].b;
out_config->address_range_1.start = per_trace->addr_ranges[1].a;
out_config->address_range_1.end = per_trace->addr_ranges[1].b;
return ZX_OK;
}
zx_status_t InsntraceDevice::IptGetBufferState(BufferDescriptor descriptor,
BufferState* out_state) {
if (!per_trace_state_)
return ZX_ERR_BAD_STATE;
// In thread-mode we need to get buffer info while tracing is active.
if (mode_ == IPT_MODE_CPU && active_)
return ZX_ERR_BAD_STATE;
if (descriptor >= num_traces_)
return ZX_ERR_INVALID_ARGS;
const ipt_per_trace_state_t* per_trace = &per_trace_state_[descriptor];
if (!per_trace->allocated)
return ZX_ERR_INVALID_ARGS;
// Note: If this is a circular buffer this is just where tracing stopped.
*out_state = {};
out_state->capture_end = ComputeCaptureSize(per_trace);
return ZX_OK;
}
zx_status_t InsntraceDevice::IptGetChunkHandle(BufferDescriptor descriptor, uint32_t chunk_num,
zx_handle_t* out_handle) {
if (!per_trace_state_)
return ZX_ERR_BAD_STATE;
if (descriptor >= num_traces_)
return ZX_ERR_INVALID_ARGS;
const ipt_per_trace_state_t* per_trace = &per_trace_state_[descriptor];
if (!per_trace->allocated)
return ZX_ERR_INVALID_ARGS;
if (chunk_num >= per_trace->num_chunks)
return ZX_ERR_INVALID_ARGS;
zx_handle_t vmo_handle = per_trace->chunks[chunk_num].vmo_handle;
zx_info_handle_basic_t handle_info;
zx_status_t status = zx_object_get_info(vmo_handle, ZX_INFO_HANDLE_BASIC, &handle_info,
sizeof(handle_info), nullptr, nullptr);
if (status != ZX_OK) {
// This could only fail if vmo_handle is invalid.
printf("%s: WARNING: unexpected error reading vmo handle rights: %d/%s\n", __func__, status,
zx_status_get_string(status));
return status;
}
zx_rights_t allowed_rights = (ZX_RIGHT_TRANSFER | ZX_RIGHT_WAIT | ZX_RIGHT_INSPECT |
ZX_RIGHT_GET_PROPERTY | ZX_RIGHT_READ | ZX_RIGHT_MAP);
return zx_handle_duplicate(vmo_handle, handle_info.rights & allowed_rights, out_handle);
}
zx_status_t InsntraceDevice::IptFreeBuffer(BufferDescriptor descriptor) {
if (!per_trace_state_)
return ZX_ERR_BAD_STATE;
return X86PtFreeBuffer(descriptor);
}
// Begin tracing.
// This is basically a nop in thread mode, it is still used for thread-mode
// for consistency and in case we some day need it to do something.
zx_status_t InsntraceDevice::IptStart() {
if (!per_trace_state_)
return ZX_ERR_BAD_STATE;
if (active_)
return ZX_ERR_BAD_STATE;
if (mode_ != IPT_MODE_CPU)
return ZX_ERR_BAD_STATE;
// Please do not use get_root_resource() in new code. See fxbug.dev/31358.
zx_handle_t resource = get_root_resource();
zx_status_t status;
// In cpu-mode, until we support tracing particular cpus, auto-assign
// buffers to each cpu.
if (mode_ == IPT_MODE_CPU) {
// First verify a buffer has been allocated for each cpu,
// and not yet assigned.
for (uint32_t cpu = 0; cpu < num_traces_; ++cpu) {
const ipt_per_trace_state_t* per_trace = &per_trace_state_[cpu];
if (!per_trace->allocated)
return ZX_ERR_BAD_STATE;
if (per_trace->assigned)
return ZX_ERR_BAD_STATE;
}
for (uint32_t cpu = 0; cpu < num_traces_; ++cpu) {
status = X86PtStageTraceData(resource, cpu);
if (status != ZX_OK) {
// TODO(dje): Unstage ones already done.
return status;
}
ipt_per_trace_state_t* per_trace = &per_trace_state_[cpu];
per_trace->owner.cpu = cpu;
per_trace->assigned = true;
}
}
status =
zx_mtrace_control(resource, MTRACE_KIND_INSNTRACE, MTRACE_INSNTRACE_START, 0, nullptr, 0);
if (status != ZX_OK)
return status;
active_ = true;
return ZX_OK;
}
// Stop tracing.
// In thread-mode all buffers must be released first. That is how we know that
// if we return ZX_OK then all threads are no longer being traced. Otherwise,
// this is basically a nop in thread-mode.
zx_status_t InsntraceDevice::IptStop() {
if (!per_trace_state_)
return ZX_ERR_BAD_STATE;
if (!active_)
return ZX_ERR_BAD_STATE;
// Please do not use get_root_resource() in new code. See fxbug.dev/31358.
zx_handle_t resource = get_root_resource();
zx_status_t status =
zx_mtrace_control(resource, MTRACE_KIND_INSNTRACE, MTRACE_INSNTRACE_STOP, 0, nullptr, 0);
if (status != ZX_OK)
return status;
active_ = false;
// Until we support tracing individual cpus, auto-unassign the buffers
// in cpu-mode.
if (mode_ == IPT_MODE_CPU) {
for (uint32_t cpu = 0; cpu < num_traces_; ++cpu) {
status = X86PtGetTraceData(resource, cpu);
if (status != ZX_OK)
return status;
ipt_per_trace_state_t* per_trace = &per_trace_state_[cpu];
per_trace->assigned = false;
per_trace->owner.cpu = 0;
// If there was an operational error, report it.
if (per_trace->status & IPT_STATUS_ERROR_MASK) {
printf("%s: WARNING: operational error detected on cpu %d\n", __func__, cpu);
}
}
}
return ZX_OK;
}
// Fidl interface.
void InsntraceDevice::Initialize(fuchsia_insntrace::wire::Allocation allocation,
InitializeCompleter::Sync& completer) {
zx_status_t status = IptInitialize(&allocation);
if (status == ZX_OK) {
completer.ReplySuccess();
} else {
completer.ReplyError(status);
}
}
void InsntraceDevice::Terminate(TerminateCompleter::Sync& completer) {
zx_status_t status = IptTerminate();
if (status == ZX_OK) {
completer.ReplySuccess();
} else {
completer.ReplyError(status);
}
}
void InsntraceDevice::GetAllocation(GetAllocationCompleter::Sync& completer) {
fuchsia_insntrace::wire::Allocation config{};
zx_status_t status = IptGetAllocation(&config);
completer.Reply(status == ZX_OK
? fidl::ObjectView<fuchsia_insntrace::wire::Allocation>::FromExternal(&config)
: nullptr);
}
void InsntraceDevice::AllocateBuffer(BufferConfig config,
AllocateBufferCompleter::Sync& completer) {
BufferDescriptor descriptor;
zx_status_t status = IptAllocateBuffer(&config, &descriptor);
if (status == ZX_OK) {
completer.ReplySuccess(descriptor);
} else {
completer.ReplyError(status);
}
}
void InsntraceDevice::AssignThreadBuffer(BufferDescriptor descriptor, zx::thread thread,
AssignThreadBufferCompleter::Sync& completer) {
zx_status_t status = IptAssignThreadBuffer(thread.release(), descriptor);
if (status == ZX_OK) {
completer.ReplySuccess();
} else {
completer.ReplyError(status);
}
}
void InsntraceDevice::ReleaseThreadBuffer(BufferDescriptor descriptor, zx::thread thread,
ReleaseThreadBufferCompleter::Sync& completer) {
zx_status_t status = IptReleaseThreadBuffer(thread.release(), descriptor);
if (status == ZX_OK) {
completer.ReplySuccess();
} else {
completer.ReplyError(status);
}
}
void InsntraceDevice::GetBufferConfig(BufferDescriptor descriptor,
GetBufferConfigCompleter::Sync& completer) {
BufferConfig config;
zx_status_t status = IptGetBufferConfig(descriptor, &config);
completer.Reply(status == ZX_OK ? fidl::ObjectView<BufferConfig>::FromExternal(&config)
: nullptr);
}
void InsntraceDevice::GetBufferState(BufferDescriptor descriptor,
GetBufferStateCompleter::Sync& completer) {
BufferState state;
zx_status_t status = IptGetBufferState(descriptor, &state);
completer.Reply(status == ZX_OK ? fidl::ObjectView<BufferState>::FromExternal(&state) : nullptr);
}
void InsntraceDevice::GetChunkHandle(BufferDescriptor descriptor, uint32_t chunk_num,
GetChunkHandleCompleter::Sync& completer) {
zx_handle_t handle;
zx_status_t status = IptGetChunkHandle(descriptor, chunk_num, &handle);
completer.Reply(zx::vmo(status == ZX_OK ? handle : ZX_HANDLE_INVALID));
}
void InsntraceDevice::FreeBuffer(BufferDescriptor descriptor,
FreeBufferCompleter::Sync& completer) {
zx_status_t status = IptFreeBuffer(descriptor);
if (status == ZX_OK) {
completer.Reply();
}
}
void InsntraceDevice::Start(StartCompleter::Sync& completer) {
zx_status_t status = IptStart();
if (status == ZX_OK) {
completer.Reply();
}
}
void InsntraceDevice::Stop(StopCompleter::Sync& completer) {
zx_status_t status = IptStop();
if (status == ZX_OK) {
completer.Reply();
}
}
// Devhost interface.
zx_status_t InsntraceDevice::DdkOpen(zx_device_t** dev_out, uint32_t flags) {
if (opened_)
return ZX_ERR_ALREADY_BOUND;
opened_ = true;
return ZX_OK;
}
zx_status_t InsntraceDevice::DdkClose(uint32_t flags) {
opened_ = false;
return ZX_OK;
}
zx_status_t InsntraceDevice::DdkMessage(fidl_incoming_msg_t* msg, fidl_txn_t* txn) {
DdkTransaction transaction(txn);
mtx_lock(&lock_);
fidl::WireDispatch<fuchsia_insntrace::Controller>(this, msg, &transaction);
mtx_unlock(&lock_);
return transaction.Status();
}
void InsntraceDevice::DdkRelease() {
IptStop();
IptTerminate();
delete this;
}
} // namespace insntrace
zx_status_t insntrace_bind(void* ctx, zx_device_t* parent) {
zx_status_t status = insntrace::InsntraceInitOnce();
if (status != ZX_OK) {
return status;
}
pdev_protocol_t pdev;
status = device_get_protocol(parent, ZX_PROTOCOL_PDEV, &pdev);
if (status != ZX_OK) {
return status;
}
zx::bti bti;
status = pdev_get_bti(&pdev, 0, bti.reset_and_get_address());
if (status != ZX_OK) {
return status;
}
fbl::AllocChecker ac;
auto dev = std::unique_ptr<insntrace::InsntraceDevice>(
new (&ac) insntrace::InsntraceDevice(parent, std::move(bti)));
if (!ac.check()) {
return ZX_ERR_NO_MEMORY;
}
status = dev->DdkAdd("insntrace");
if (status != ZX_OK) {
zxlogf(ERROR, "%s: could not add device: %d", __func__, status);
} else {
// devmgr owns the memory now
__UNUSED auto ptr = dev.release();
}
return status;
}
|
/**
* @copybrief
* MIT License
* Copyright (c) 2020 NeilKleistGao
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/// @file enum.hpp
#ifndef SILHOUETTES_ENUM_HPP
#define SILHOUETTES_ENUM_HPP
#endif //SILHOUETTES_ENUM_HPP
|
/*
* Copyright (C) 1999 Lars Knoll (knoll@kde.org)
* Copyright (C) 2000 Dirk Mueller (mueller@kde.org)
* Copyright (C) 2004, 2006, 2007 Apple Inc. All rights reserved.
* Copyright (C) Research In Motion Limited 2011-2012. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
#include "core/layout/LayoutReplaced.h"
#include "core/editing/PositionWithAffinity.h"
#include "core/layout/LayoutAnalyzer.h"
#include "core/layout/LayoutBlock.h"
#include "core/layout/LayoutImage.h"
#include "core/layout/LayoutInline.h"
#include "core/layout/api/LineLayoutBlockFlow.h"
#include "core/paint/PaintInfo.h"
#include "core/paint/PaintLayer.h"
#include "core/paint/ReplacedPainter.h"
#include "platform/LengthFunctions.h"
namespace blink {
const int LayoutReplaced::defaultWidth = 300;
const int LayoutReplaced::defaultHeight = 150;
LayoutReplaced::LayoutReplaced(Element* element)
: LayoutBox(element)
, m_intrinsicSize(defaultWidth, defaultHeight)
{
// TODO(jchaffraix): We should not set this boolean for block-level
// replaced elements (crbug.com/567964).
setIsAtomicInlineLevel(true);
}
LayoutReplaced::LayoutReplaced(Element* element, const LayoutSize& intrinsicSize)
: LayoutBox(element)
, m_intrinsicSize(intrinsicSize)
{
// TODO(jchaffraix): We should not set this boolean for block-level
// replaced elements (crbug.com/567964).
setIsAtomicInlineLevel(true);
}
LayoutReplaced::~LayoutReplaced()
{
}
void LayoutReplaced::willBeDestroyed()
{
if (!documentBeingDestroyed() && parent())
parent()->dirtyLinesFromChangedChild(this);
LayoutBox::willBeDestroyed();
}
void LayoutReplaced::styleDidChange(StyleDifference diff, const ComputedStyle* oldStyle)
{
LayoutBox::styleDidChange(diff, oldStyle);
bool hadStyle = (oldStyle != 0);
float oldZoom = hadStyle ? oldStyle->effectiveZoom() : ComputedStyle::initialZoom();
if (style() && style()->effectiveZoom() != oldZoom)
intrinsicSizeChanged();
}
void LayoutReplaced::layout()
{
ASSERT(needsLayout());
LayoutAnalyzer::Scope analyzer(*this);
LayoutRect oldContentRect = replacedContentRect();
setHeight(minimumReplacedHeight());
updateLogicalWidth();
updateLogicalHeight();
m_overflow.reset();
addVisualEffectOverflow();
updateLayerTransformAfterLayout();
invalidateBackgroundObscurationStatus();
clearNeedsLayout();
if (!RuntimeEnabledFeatures::slimmingPaintV2Enabled() && replacedContentRect() != oldContentRect)
setShouldDoFullPaintInvalidation();
}
void LayoutReplaced::intrinsicSizeChanged()
{
int scaledWidth = static_cast<int>(defaultWidth * style()->effectiveZoom());
int scaledHeight = static_cast<int>(defaultHeight * style()->effectiveZoom());
m_intrinsicSize = LayoutSize(scaledWidth, scaledHeight);
setNeedsLayoutAndPrefWidthsRecalcAndFullPaintInvalidation(LayoutInvalidationReason::SizeChanged);
}
void LayoutReplaced::paint(const PaintInfo& paintInfo, const LayoutPoint& paintOffset) const
{
ReplacedPainter(*this).paint(paintInfo, paintOffset);
}
bool LayoutReplaced::hasReplacedLogicalHeight() const
{
if (style()->logicalHeight().isAuto())
return false;
if (style()->logicalHeight().isSpecified()) {
if (hasAutoHeightOrContainingBlockWithAutoHeight())
return false;
return true;
}
if (style()->logicalHeight().isIntrinsic())
return true;
return false;
}
bool LayoutReplaced::needsPreferredWidthsRecalculation() const
{
// If the height is a percentage and the width is auto, then the containingBlocks's height changing can cause
// this node to change it's preferred width because it maintains aspect ratio.
return hasRelativeLogicalHeight() && style()->logicalWidth().isAuto() && !hasAutoHeightOrContainingBlockWithAutoHeight();
}
static inline bool layoutObjectHasAspectRatio(const LayoutObject* layoutObject)
{
ASSERT(layoutObject);
return layoutObject->isImage() || layoutObject->isCanvas() || layoutObject->isVideo();
}
void LayoutReplaced::computeIntrinsicSizingInfoForReplacedContent(LayoutReplaced* contentLayoutObject, IntrinsicSizingInfo& intrinsicSizingInfo) const
{
if (contentLayoutObject) {
contentLayoutObject->computeIntrinsicSizingInfo(intrinsicSizingInfo);
// Handle zoom & vertical writing modes here, as the embedded document doesn't know about them.
intrinsicSizingInfo.size.scale(style()->effectiveZoom());
if (isLayoutImage())
intrinsicSizingInfo.size.scale(toLayoutImage(this)->imageDevicePixelRatio());
// Update our intrinsic size to match what the content layoutObject has computed, so that when we
// constrain the size below, the correct intrinsic size will be obtained for comparison against
// min and max widths.
if (!intrinsicSizingInfo.aspectRatio.isEmpty() && !intrinsicSizingInfo.size.isEmpty())
m_intrinsicSize = LayoutSize(intrinsicSizingInfo.size);
if (!isHorizontalWritingMode())
intrinsicSizingInfo.transpose();
} else {
computeIntrinsicSizingInfo(intrinsicSizingInfo);
if (!intrinsicSizingInfo.aspectRatio.isEmpty() && !intrinsicSizingInfo.size.isEmpty())
m_intrinsicSize = LayoutSize(isHorizontalWritingMode() ? intrinsicSizingInfo.size : intrinsicSizingInfo.size.transposedSize());
}
}
FloatSize LayoutReplaced::constrainIntrinsicSizeToMinMax(const IntrinsicSizingInfo& intrinsicSizingInfo) const
{
// Constrain the intrinsic size along each axis according to minimum and maximum width/heights along the opposite
// axis. So for example a maximum width that shrinks our width will result in the height we compute here having
// to shrink in order to preserve the aspect ratio. Because we compute these values independently along each
// axis, the final returned size may in fact not preserve the aspect ratio.
// TODO(davve): Investigate using only the intrinsic aspect ratio here.
FloatSize constrainedSize = intrinsicSizingInfo.size;
if (!intrinsicSizingInfo.aspectRatio.isEmpty() && !intrinsicSizingInfo.size.isEmpty() && style()->logicalWidth().isAuto() && style()->logicalHeight().isAuto()) {
// We can't multiply or divide by 'intrinsicSizingInfo.aspectRatio' here, it breaks tests, like fast/images/zoomed-img-size.html, which
// can only be fixed once subpixel precision is available for things like intrinsicWidth/Height - which include zoom!
constrainedSize.setWidth(LayoutBox::computeReplacedLogicalHeight() * intrinsicSizingInfo.size.width() / intrinsicSizingInfo.size.height());
constrainedSize.setHeight(LayoutBox::computeReplacedLogicalWidth() * intrinsicSizingInfo.size.height() / intrinsicSizingInfo.size.width());
}
return constrainedSize;
}
void LayoutReplaced::computePositionedLogicalWidth(LogicalExtentComputedValues& computedValues) const
{
// The following is based off of the W3C Working Draft from April 11, 2006 of
// CSS 2.1: Section 10.3.8 "Absolutely positioned, replaced elements"
// <http://www.w3.org/TR/2005/WD-CSS21-20050613/visudet.html#abs-replaced-width>
// (block-style-comments in this function correspond to text from the spec and
// the numbers correspond to numbers in spec)
// We don't use containingBlock(), since we may be positioned by an enclosing
// relative positioned inline.
const LayoutBoxModelObject* containerBlock = toLayoutBoxModelObject(container());
const LayoutUnit containerLogicalWidth = containingBlockLogicalWidthForPositioned(containerBlock);
const LayoutUnit containerRelativeLogicalWidth = containingBlockLogicalWidthForPositioned(containerBlock, false);
// To match WinIE, in quirks mode use the parent's 'direction' property
// instead of the the container block's.
TextDirection containerDirection = containerBlock->style()->direction();
// Variables to solve.
bool isHorizontal = isHorizontalWritingMode();
Length logicalLeft = style()->logicalLeft();
Length logicalRight = style()->logicalRight();
Length marginLogicalLeft = isHorizontal ? style()->marginLeft() : style()->marginTop();
Length marginLogicalRight = isHorizontal ? style()->marginRight() : style()->marginBottom();
LayoutUnit& marginLogicalLeftAlias = style()->isLeftToRightDirection() ? computedValues.m_margins.m_start : computedValues.m_margins.m_end;
LayoutUnit& marginLogicalRightAlias = style()->isLeftToRightDirection() ? computedValues.m_margins.m_end : computedValues.m_margins.m_start;
/*-----------------------------------------------------------------------*\
* 1. The used value of 'width' is determined as for inline replaced
* elements.
\*-----------------------------------------------------------------------*/
// NOTE: This value of width is final in that the min/max width calculations
// are dealt with in computeReplacedWidth(). This means that the steps to produce
// correct max/min in the non-replaced version, are not necessary.
computedValues.m_extent = computeReplacedLogicalWidth() + borderAndPaddingLogicalWidth();
const LayoutUnit availableSpace = containerLogicalWidth - computedValues.m_extent;
/*-----------------------------------------------------------------------*\
* 2. If both 'left' and 'right' have the value 'auto', then if 'direction'
* of the containing block is 'ltr', set 'left' to the static position;
* else if 'direction' is 'rtl', set 'right' to the static position.
\*-----------------------------------------------------------------------*/
// see FIXME 1
computeInlineStaticDistance(logicalLeft, logicalRight, this, containerBlock, containerLogicalWidth);
/*-----------------------------------------------------------------------*\
* 3. If 'left' or 'right' are 'auto', replace any 'auto' on 'margin-left'
* or 'margin-right' with '0'.
\*-----------------------------------------------------------------------*/
if (logicalLeft.isAuto() || logicalRight.isAuto()) {
if (marginLogicalLeft.isAuto())
marginLogicalLeft.setValue(Fixed, 0);
if (marginLogicalRight.isAuto())
marginLogicalRight.setValue(Fixed, 0);
}
/*-----------------------------------------------------------------------*\
* 4. If at this point both 'margin-left' and 'margin-right' are still
* 'auto', solve the equation under the extra constraint that the two
* margins must get equal values, unless this would make them negative,
* in which case when the direction of the containing block is 'ltr'
* ('rtl'), set 'margin-left' ('margin-right') to zero and solve for
* 'margin-right' ('margin-left').
\*-----------------------------------------------------------------------*/
LayoutUnit logicalLeftValue;
LayoutUnit logicalRightValue;
if (marginLogicalLeft.isAuto() && marginLogicalRight.isAuto()) {
// 'left' and 'right' cannot be 'auto' due to step 3
ASSERT(!(logicalLeft.isAuto() && logicalRight.isAuto()));
logicalLeftValue = valueForLength(logicalLeft, containerLogicalWidth);
logicalRightValue = valueForLength(logicalRight, containerLogicalWidth);
LayoutUnit difference = availableSpace - (logicalLeftValue + logicalRightValue);
if (difference > LayoutUnit()) {
marginLogicalLeftAlias = difference / 2; // split the difference
marginLogicalRightAlias = difference - marginLogicalLeftAlias; // account for odd valued differences
} else {
// Use the containing block's direction rather than the parent block's
// per CSS 2.1 reference test abspos-replaced-width-margin-000.
if (containerDirection == LTR) {
marginLogicalLeftAlias = LayoutUnit();
marginLogicalRightAlias = difference; // will be negative
} else {
marginLogicalLeftAlias = difference; // will be negative
marginLogicalRightAlias = LayoutUnit();
}
}
/*-----------------------------------------------------------------------*\
* 5. If at this point there is an 'auto' left, solve the equation for
* that value.
\*-----------------------------------------------------------------------*/
} else if (logicalLeft.isAuto()) {
marginLogicalLeftAlias = valueForLength(marginLogicalLeft, containerRelativeLogicalWidth);
marginLogicalRightAlias = valueForLength(marginLogicalRight, containerRelativeLogicalWidth);
logicalRightValue = valueForLength(logicalRight, containerLogicalWidth);
// Solve for 'left'
logicalLeftValue = availableSpace - (logicalRightValue + marginLogicalLeftAlias + marginLogicalRightAlias);
} else if (logicalRight.isAuto()) {
marginLogicalLeftAlias = valueForLength(marginLogicalLeft, containerRelativeLogicalWidth);
marginLogicalRightAlias = valueForLength(marginLogicalRight, containerRelativeLogicalWidth);
logicalLeftValue = valueForLength(logicalLeft, containerLogicalWidth);
// Solve for 'right'
logicalRightValue = availableSpace - (logicalLeftValue + marginLogicalLeftAlias + marginLogicalRightAlias);
} else if (marginLogicalLeft.isAuto()) {
marginLogicalRightAlias = valueForLength(marginLogicalRight, containerRelativeLogicalWidth);
logicalLeftValue = valueForLength(logicalLeft, containerLogicalWidth);
logicalRightValue = valueForLength(logicalRight, containerLogicalWidth);
// Solve for 'margin-left'
marginLogicalLeftAlias = availableSpace - (logicalLeftValue + logicalRightValue + marginLogicalRightAlias);
} else if (marginLogicalRight.isAuto()) {
marginLogicalLeftAlias = valueForLength(marginLogicalLeft, containerRelativeLogicalWidth);
logicalLeftValue = valueForLength(logicalLeft, containerLogicalWidth);
logicalRightValue = valueForLength(logicalRight, containerLogicalWidth);
// Solve for 'margin-right'
marginLogicalRightAlias = availableSpace - (logicalLeftValue + logicalRightValue + marginLogicalLeftAlias);
} else {
// Nothing is 'auto', just calculate the values.
marginLogicalLeftAlias = valueForLength(marginLogicalLeft, containerRelativeLogicalWidth);
marginLogicalRightAlias = valueForLength(marginLogicalRight, containerRelativeLogicalWidth);
logicalRightValue = valueForLength(logicalRight, containerLogicalWidth);
logicalLeftValue = valueForLength(logicalLeft, containerLogicalWidth);
// If the containing block is right-to-left, then push the left position as far to the right as possible
if (containerDirection == RTL) {
int totalLogicalWidth = computedValues.m_extent + logicalLeftValue + logicalRightValue + marginLogicalLeftAlias + marginLogicalRightAlias;
logicalLeftValue = containerLogicalWidth - (totalLogicalWidth - logicalLeftValue);
}
}
/*-----------------------------------------------------------------------*\
* 6. If at this point the values are over-constrained, ignore the value
* for either 'left' (in case the 'direction' property of the
* containing block is 'rtl') or 'right' (in case 'direction' is
* 'ltr') and solve for that value.
\*-----------------------------------------------------------------------*/
// NOTE: Constraints imposed by the width of the containing block and its content have already been accounted for above.
// FIXME: Deal with differing writing modes here. Our offset needs to be in the containing block's coordinate space, so that
// can make the result here rather complicated to compute.
// Use computed values to calculate the horizontal position.
// FIXME: This hack is needed to calculate the logical left position for a 'rtl' relatively
// positioned, inline containing block because right now, it is using the logical left position
// of the first line box when really it should use the last line box. When
// this is fixed elsewhere, this block should be removed.
if (containerBlock->isLayoutInline() && !containerBlock->style()->isLeftToRightDirection()) {
const LayoutInline* flow = toLayoutInline(containerBlock);
InlineFlowBox* firstLine = flow->firstLineBox();
InlineFlowBox* lastLine = flow->lastLineBox();
if (firstLine && lastLine && firstLine != lastLine) {
computedValues.m_position = logicalLeftValue + marginLogicalLeftAlias + lastLine->borderLogicalLeft() + (lastLine->logicalLeft() - firstLine->logicalLeft());
return;
}
}
LayoutUnit logicalLeftPos = logicalLeftValue + marginLogicalLeftAlias;
computeLogicalLeftPositionedOffset(logicalLeftPos, this, computedValues.m_extent, containerBlock, containerLogicalWidth);
computedValues.m_position = logicalLeftPos;
}
void LayoutReplaced::computePositionedLogicalHeight(LogicalExtentComputedValues& computedValues) const
{
// The following is based off of the W3C Working Draft from April 11, 2006 of
// CSS 2.1: Section 10.6.5 "Absolutely positioned, replaced elements"
// <http://www.w3.org/TR/2005/WD-CSS21-20050613/visudet.html#abs-replaced-height>
// (block-style-comments in this function correspond to text from the spec and
// the numbers correspond to numbers in spec)
// We don't use containingBlock(), since we may be positioned by an enclosing relpositioned inline.
const LayoutBoxModelObject* containerBlock = toLayoutBoxModelObject(container());
const LayoutUnit containerLogicalHeight = containingBlockLogicalHeightForPositioned(containerBlock);
const LayoutUnit containerRelativeLogicalWidth = containingBlockLogicalWidthForPositioned(containerBlock, false);
// Variables to solve.
Length marginBefore = style()->marginBefore();
Length marginAfter = style()->marginAfter();
LayoutUnit& marginBeforeAlias = computedValues.m_margins.m_before;
LayoutUnit& marginAfterAlias = computedValues.m_margins.m_after;
Length logicalTop = style()->logicalTop();
Length logicalBottom = style()->logicalBottom();
/*-----------------------------------------------------------------------*\
* 1. The used value of 'height' is determined as for inline replaced
* elements.
\*-----------------------------------------------------------------------*/
// NOTE: This value of height is final in that the min/max height calculations
// are dealt with in computeReplacedHeight(). This means that the steps to produce
// correct max/min in the non-replaced version, are not necessary.
computedValues.m_extent = computeReplacedLogicalHeight() + borderAndPaddingLogicalHeight();
const LayoutUnit availableSpace = containerLogicalHeight - computedValues.m_extent;
/*-----------------------------------------------------------------------*\
* 2. If both 'top' and 'bottom' have the value 'auto', replace 'top'
* with the element's static position.
\*-----------------------------------------------------------------------*/
// see FIXME 1
computeBlockStaticDistance(logicalTop, logicalBottom, this, containerBlock);
/*-----------------------------------------------------------------------*\
* 3. If 'bottom' is 'auto', replace any 'auto' on 'margin-top' or
* 'margin-bottom' with '0'.
\*-----------------------------------------------------------------------*/
// FIXME: The spec. says that this step should only be taken when bottom is
// auto, but if only top is auto, this makes step 4 impossible.
if (logicalTop.isAuto() || logicalBottom.isAuto()) {
if (marginBefore.isAuto())
marginBefore.setValue(Fixed, 0);
if (marginAfter.isAuto())
marginAfter.setValue(Fixed, 0);
}
/*-----------------------------------------------------------------------*\
* 4. If at this point both 'margin-top' and 'margin-bottom' are still
* 'auto', solve the equation under the extra constraint that the two
* margins must get equal values.
\*-----------------------------------------------------------------------*/
LayoutUnit logicalTopValue;
LayoutUnit logicalBottomValue;
if (marginBefore.isAuto() && marginAfter.isAuto()) {
// 'top' and 'bottom' cannot be 'auto' due to step 2 and 3 combined.
ASSERT(!(logicalTop.isAuto() || logicalBottom.isAuto()));
logicalTopValue = valueForLength(logicalTop, containerLogicalHeight);
logicalBottomValue = valueForLength(logicalBottom, containerLogicalHeight);
LayoutUnit difference = availableSpace - (logicalTopValue + logicalBottomValue);
// NOTE: This may result in negative values.
marginBeforeAlias = difference / 2; // split the difference
marginAfterAlias = difference - marginBeforeAlias; // account for odd valued differences
/*-----------------------------------------------------------------------*\
* 5. If at this point there is only one 'auto' left, solve the equation
* for that value.
\*-----------------------------------------------------------------------*/
} else if (logicalTop.isAuto()) {
marginBeforeAlias = valueForLength(marginBefore, containerRelativeLogicalWidth);
marginAfterAlias = valueForLength(marginAfter, containerRelativeLogicalWidth);
logicalBottomValue = valueForLength(logicalBottom, containerLogicalHeight);
// Solve for 'top'
logicalTopValue = availableSpace - (logicalBottomValue + marginBeforeAlias + marginAfterAlias);
} else if (logicalBottom.isAuto()) {
marginBeforeAlias = valueForLength(marginBefore, containerRelativeLogicalWidth);
marginAfterAlias = valueForLength(marginAfter, containerRelativeLogicalWidth);
logicalTopValue = valueForLength(logicalTop, containerLogicalHeight);
// Solve for 'bottom'
// NOTE: It is not necessary to solve for 'bottom' because we don't ever
// use the value.
} else if (marginBefore.isAuto()) {
marginAfterAlias = valueForLength(marginAfter, containerRelativeLogicalWidth);
logicalTopValue = valueForLength(logicalTop, containerLogicalHeight);
logicalBottomValue = valueForLength(logicalBottom, containerLogicalHeight);
// Solve for 'margin-top'
marginBeforeAlias = availableSpace - (logicalTopValue + logicalBottomValue + marginAfterAlias);
} else if (marginAfter.isAuto()) {
marginBeforeAlias = valueForLength(marginBefore, containerRelativeLogicalWidth);
logicalTopValue = valueForLength(logicalTop, containerLogicalHeight);
logicalBottomValue = valueForLength(logicalBottom, containerLogicalHeight);
// Solve for 'margin-bottom'
marginAfterAlias = availableSpace - (logicalTopValue + logicalBottomValue + marginBeforeAlias);
} else {
// Nothing is 'auto', just calculate the values.
marginBeforeAlias = valueForLength(marginBefore, containerRelativeLogicalWidth);
marginAfterAlias = valueForLength(marginAfter, containerRelativeLogicalWidth);
logicalTopValue = valueForLength(logicalTop, containerLogicalHeight);
// NOTE: It is not necessary to solve for 'bottom' because we don't ever
// use the value.
}
/*-----------------------------------------------------------------------*\
* 6. If at this point the values are over-constrained, ignore the value
* for 'bottom' and solve for that value.
\*-----------------------------------------------------------------------*/
// NOTE: It is not necessary to do this step because we don't end up using
// the value of 'bottom' regardless of whether the values are over-constrained
// or not.
// Use computed values to calculate the vertical position.
LayoutUnit logicalTopPos = logicalTopValue + marginBeforeAlias;
computeLogicalTopPositionedOffset(logicalTopPos, this, computedValues.m_extent, containerBlock, containerLogicalHeight);
computedValues.m_position = logicalTopPos;
}
LayoutRect LayoutReplaced::replacedContentRect(const LayoutSize* overriddenIntrinsicSize) const
{
LayoutRect contentRect = contentBoxRect();
ObjectFit objectFit = style()->getObjectFit();
if (objectFit == ObjectFitFill && style()->objectPosition() == ComputedStyle::initialObjectPosition()) {
return contentRect;
}
// TODO(davve): intrinsicSize doubles as both intrinsic size and intrinsic ratio. In the case of
// SVG images this isn't correct since they can have intrinsic ratio but no intrinsic size. In
// order to maintain aspect ratio, the intrinsic size for SVG might be faked from the aspect
// ratio, see SVGImage::containerSize().
LayoutSize intrinsicSize = overriddenIntrinsicSize ? *overriddenIntrinsicSize : this->intrinsicSize();
if (!intrinsicSize.width() || !intrinsicSize.height())
return contentRect;
LayoutRect finalRect = contentRect;
switch (objectFit) {
case ObjectFitContain:
case ObjectFitScaleDown:
case ObjectFitCover:
finalRect.setSize(finalRect.size().fitToAspectRatio(intrinsicSize, objectFit == ObjectFitCover ? AspectRatioFitGrow : AspectRatioFitShrink));
if (objectFit != ObjectFitScaleDown || finalRect.width() <= intrinsicSize.width())
break;
// fall through
case ObjectFitNone:
finalRect.setSize(intrinsicSize);
break;
case ObjectFitFill:
break;
default:
ASSERT_NOT_REACHED();
}
LayoutUnit xOffset = minimumValueForLength(style()->objectPosition().x(), contentRect.width() - finalRect.width());
LayoutUnit yOffset = minimumValueForLength(style()->objectPosition().y(), contentRect.height() - finalRect.height());
finalRect.move(xOffset, yOffset);
return finalRect;
}
void LayoutReplaced::computeIntrinsicSizingInfo(IntrinsicSizingInfo& intrinsicSizingInfo) const
{
// If there's an embeddedReplacedContent() of a remote, referenced document available, this code-path should never be used.
ASSERT(!embeddedReplacedContent());
intrinsicSizingInfo.size = FloatSize(intrinsicLogicalWidth().toFloat(), intrinsicLogicalHeight().toFloat());
// Figure out if we need to compute an intrinsic ratio.
if (intrinsicSizingInfo.size.isEmpty() || !layoutObjectHasAspectRatio(this))
return;
intrinsicSizingInfo.aspectRatio = intrinsicSizingInfo.size;
}
static inline LayoutUnit resolveWidthForRatio(LayoutUnit height, const FloatSize& aspectRatio)
{
return LayoutUnit(height * aspectRatio.width() / aspectRatio.height());
}
static inline LayoutUnit resolveHeightForRatio(LayoutUnit width, const FloatSize& aspectRatio)
{
return LayoutUnit(width * aspectRatio.height() / aspectRatio.width());
}
LayoutUnit LayoutReplaced::computeConstrainedLogicalWidth(ShouldComputePreferred shouldComputePreferred) const
{
if (shouldComputePreferred == ComputePreferred)
return computeReplacedLogicalWidthRespectingMinMaxWidth(LayoutUnit(), ComputePreferred);
// The aforementioned 'constraint equation' used for block-level, non-replaced elements in normal flow:
// 'margin-left' + 'border-left-width' + 'padding-left' + 'width' + 'padding-right' + 'border-right-width' + 'margin-right' = width of containing block
LayoutUnit logicalWidth = containingBlock()->availableLogicalWidth();
// This solves above equation for 'width' (== logicalWidth).
LayoutUnit marginStart = minimumValueForLength(style()->marginStart(), logicalWidth);
LayoutUnit marginEnd = minimumValueForLength(style()->marginEnd(), logicalWidth);
logicalWidth = (logicalWidth - (marginStart + marginEnd + (size().width() - clientWidth()))).clampNegativeToZero();
return computeReplacedLogicalWidthRespectingMinMaxWidth(logicalWidth, shouldComputePreferred);
}
LayoutUnit LayoutReplaced::computeReplacedLogicalWidth(ShouldComputePreferred shouldComputePreferred) const
{
if (style()->logicalWidth().isSpecified() || style()->logicalWidth().isIntrinsic())
return computeReplacedLogicalWidthRespectingMinMaxWidth(computeReplacedLogicalWidthUsing(MainOrPreferredSize, style()->logicalWidth()), shouldComputePreferred);
LayoutReplaced* contentLayoutObject = embeddedReplacedContent();
// 10.3.2 Inline, replaced elements: http://www.w3.org/TR/CSS21/visudet.html#inline-replaced-width
IntrinsicSizingInfo intrinsicSizingInfo;
computeIntrinsicSizingInfoForReplacedContent(contentLayoutObject, intrinsicSizingInfo);
FloatSize constrainedSize = constrainIntrinsicSizeToMinMax(intrinsicSizingInfo);
if (style()->logicalWidth().isAuto()) {
bool computedHeightIsAuto = style()->logicalHeight().isAuto();
// If 'height' and 'width' both have computed values of 'auto' and the element also has an intrinsic width, then that intrinsic width is the used value of 'width'.
if (computedHeightIsAuto && intrinsicSizingInfo.hasWidth)
return computeReplacedLogicalWidthRespectingMinMaxWidth(LayoutUnit(constrainedSize.width()), shouldComputePreferred);
if (!intrinsicSizingInfo.aspectRatio.isEmpty()) {
// If 'height' and 'width' both have computed values of 'auto' and the element has no intrinsic width, but does have an intrinsic height and intrinsic ratio;
// or if 'width' has a computed value of 'auto', 'height' has some other computed value, and the element does have an intrinsic ratio; then the used value
// of 'width' is: (used height) * (intrinsic ratio)
if ((computedHeightIsAuto && !intrinsicSizingInfo.hasWidth && intrinsicSizingInfo.hasHeight) || !computedHeightIsAuto) {
LayoutUnit estimatedUsedWidth = intrinsicSizingInfo.hasWidth ? LayoutUnit(constrainedSize.width()) : computeConstrainedLogicalWidth(shouldComputePreferred);
LayoutUnit logicalHeight = computeReplacedLogicalHeight(estimatedUsedWidth);
return computeReplacedLogicalWidthRespectingMinMaxWidth(resolveWidthForRatio(logicalHeight, intrinsicSizingInfo.aspectRatio), shouldComputePreferred);
}
// If 'height' and 'width' both have computed values of 'auto' and the element has an intrinsic ratio but no intrinsic height or width, then the used value of
// 'width' is undefined in CSS 2.1. However, it is suggested that, if the containing block's width does not itself depend on the replaced element's width, then
// the used value of 'width' is calculated from the constraint equation used for block-level, non-replaced elements in normal flow.
if (computedHeightIsAuto && !intrinsicSizingInfo.hasWidth && !intrinsicSizingInfo.hasHeight)
return computeConstrainedLogicalWidth(shouldComputePreferred);
}
// Otherwise, if 'width' has a computed value of 'auto', and the element has an intrinsic width, then that intrinsic width is the used value of 'width'.
if (intrinsicSizingInfo.hasWidth)
return computeReplacedLogicalWidthRespectingMinMaxWidth(LayoutUnit(constrainedSize.width()), shouldComputePreferred);
// Otherwise, if 'width' has a computed value of 'auto', but none of the conditions above are met, then the used value of 'width' becomes 300px. If 300px is too
// wide to fit the device, UAs should use the width of the largest rectangle that has a 2:1 ratio and fits the device instead.
// Note: We fall through and instead return intrinsicLogicalWidth() here - to preserve existing WebKit behavior, which might or might not be correct, or desired.
// Changing this to return cDefaultWidth, will affect lots of test results. Eg. some tests assume that a blank <img> tag (which implies width/height=auto)
// has no intrinsic size, which is wrong per CSS 2.1, but matches our behavior since a long time.
}
return computeReplacedLogicalWidthRespectingMinMaxWidth(intrinsicLogicalWidth(), shouldComputePreferred);
}
LayoutUnit LayoutReplaced::computeReplacedLogicalHeight(LayoutUnit estimatedUsedWidth) const
{
// 10.5 Content height: the 'height' property: http://www.w3.org/TR/CSS21/visudet.html#propdef-height
if (hasReplacedLogicalHeight())
return computeReplacedLogicalHeightRespectingMinMaxHeight(computeReplacedLogicalHeightUsing(MainOrPreferredSize, style()->logicalHeight()));
LayoutReplaced* contentLayoutObject = embeddedReplacedContent();
// 10.6.2 Inline, replaced elements: http://www.w3.org/TR/CSS21/visudet.html#inline-replaced-height
IntrinsicSizingInfo intrinsicSizingInfo;
computeIntrinsicSizingInfoForReplacedContent(contentLayoutObject, intrinsicSizingInfo);
FloatSize constrainedSize = constrainIntrinsicSizeToMinMax(intrinsicSizingInfo);
bool widthIsAuto = style()->logicalWidth().isAuto();
// If 'height' and 'width' both have computed values of 'auto' and the element also has an intrinsic height, then that intrinsic height is the used value of 'height'.
if (widthIsAuto && intrinsicSizingInfo.hasHeight)
return computeReplacedLogicalHeightRespectingMinMaxHeight(LayoutUnit(constrainedSize.height()));
// Otherwise, if 'height' has a computed value of 'auto', and the element has an intrinsic ratio then the used value of 'height' is:
// (used width) / (intrinsic ratio)
if (!intrinsicSizingInfo.aspectRatio.isEmpty()) {
LayoutUnit usedWidth = estimatedUsedWidth ? estimatedUsedWidth : availableLogicalWidth();
return computeReplacedLogicalHeightRespectingMinMaxHeight(resolveHeightForRatio(usedWidth, intrinsicSizingInfo.aspectRatio));
}
// Otherwise, if 'height' has a computed value of 'auto', and the element has an intrinsic height, then that intrinsic height is the used value of 'height'.
if (intrinsicSizingInfo.hasHeight)
return computeReplacedLogicalHeightRespectingMinMaxHeight(LayoutUnit(constrainedSize.height()));
// Otherwise, if 'height' has a computed value of 'auto', but none of the conditions above are met, then the used value of 'height' must be set to the height
// of the largest rectangle that has a 2:1 ratio, has a height not greater than 150px, and has a width not greater than the device width.
return computeReplacedLogicalHeightRespectingMinMaxHeight(intrinsicLogicalHeight());
}
void LayoutReplaced::computeIntrinsicLogicalWidths(LayoutUnit& minLogicalWidth, LayoutUnit& maxLogicalWidth) const
{
minLogicalWidth = maxLogicalWidth = intrinsicLogicalWidth();
}
void LayoutReplaced::computePreferredLogicalWidths()
{
ASSERT(preferredLogicalWidthsDirty());
// We cannot resolve some logical width here (i.e. percent, fill-available or fit-content)
// as the available logical width may not be set on our containing block.
const Length& logicalWidth = style()->logicalWidth();
if (logicalWidth.hasPercent() || logicalWidth.isFillAvailable() || logicalWidth.isFitContent())
computeIntrinsicLogicalWidths(m_minPreferredLogicalWidth, m_maxPreferredLogicalWidth);
else
m_minPreferredLogicalWidth = m_maxPreferredLogicalWidth = computeReplacedLogicalWidth(ComputePreferred);
const ComputedStyle& styleToUse = styleRef();
if (styleToUse.logicalWidth().hasPercent() || styleToUse.logicalMaxWidth().hasPercent())
m_minPreferredLogicalWidth = LayoutUnit();
if (styleToUse.logicalMinWidth().isFixed() && styleToUse.logicalMinWidth().value() > 0) {
m_maxPreferredLogicalWidth = std::max(m_maxPreferredLogicalWidth, adjustContentBoxLogicalWidthForBoxSizing(styleToUse.logicalMinWidth().value()));
m_minPreferredLogicalWidth = std::max(m_minPreferredLogicalWidth, adjustContentBoxLogicalWidthForBoxSizing(styleToUse.logicalMinWidth().value()));
}
if (styleToUse.logicalMaxWidth().isFixed()) {
m_maxPreferredLogicalWidth = std::min(m_maxPreferredLogicalWidth, adjustContentBoxLogicalWidthForBoxSizing(styleToUse.logicalMaxWidth().value()));
m_minPreferredLogicalWidth = std::min(m_minPreferredLogicalWidth, adjustContentBoxLogicalWidthForBoxSizing(styleToUse.logicalMaxWidth().value()));
}
LayoutUnit borderAndPadding = borderAndPaddingLogicalWidth();
m_minPreferredLogicalWidth += borderAndPadding;
m_maxPreferredLogicalWidth += borderAndPadding;
clearPreferredLogicalWidthsDirty();
}
PositionWithAffinity LayoutReplaced::positionForPoint(const LayoutPoint& point)
{
// FIXME: This code is buggy if the replaced element is relative positioned.
InlineBox* box = inlineBoxWrapper();
RootInlineBox* rootBox = box ? &box->root() : 0;
LayoutUnit top = rootBox ? rootBox->selectionTop() : logicalTop();
LayoutUnit bottom = rootBox ? rootBox->selectionBottom() : logicalBottom();
LayoutUnit blockDirectionPosition = isHorizontalWritingMode() ? point.y() + location().y() : point.x() + location().x();
LayoutUnit lineDirectionPosition = isHorizontalWritingMode() ? point.x() + location().x() : point.y() + location().y();
if (blockDirectionPosition < top)
return createPositionWithAffinity(caretMinOffset()); // coordinates are above
if (blockDirectionPosition >= bottom)
return createPositionWithAffinity(caretMaxOffset()); // coordinates are below
if (node()) {
if (lineDirectionPosition <= logicalLeft() + (logicalWidth() / 2))
return createPositionWithAffinity(0);
return createPositionWithAffinity(1);
}
return LayoutBox::positionForPoint(point);
}
LayoutRect LayoutReplaced::localSelectionRect() const
{
if (getSelectionState() == SelectionNone)
return LayoutRect();
if (!inlineBoxWrapper()) {
// We're a block-level replaced element. Just return our own dimensions.
return LayoutRect(LayoutPoint(), size());
}
RootInlineBox& root = inlineBoxWrapper()->root();
LayoutUnit newLogicalTop = root.block().style()->isFlippedBlocksWritingMode() ? inlineBoxWrapper()->logicalBottom() - root.selectionBottom() : root.selectionTop() - inlineBoxWrapper()->logicalTop();
if (root.block().style()->isHorizontalWritingMode())
return LayoutRect(LayoutUnit(), newLogicalTop, size().width(), root.selectionHeight());
return LayoutRect(newLogicalTop, LayoutUnit(), root.selectionHeight(), size().height());
}
void LayoutReplaced::setSelectionState(SelectionState state)
{
// The selection state for our containing block hierarchy is updated by the base class call.
LayoutBox::setSelectionState(state);
if (!inlineBoxWrapper())
return;
// We only include the space below the baseline in our layer's cached paint invalidation rect if the
// image is selected. Since the selection state has changed update the rect.
if (hasLayer()) {
LayoutRect rect = localOverflowRectForPaintInvalidation();
PaintLayer::mapRectToPaintInvalidationBacking(*this, containerForPaintInvalidation(), rect);
setPreviousPaintInvalidationRect(rect);
}
if (canUpdateSelectionOnRootLineBoxes())
inlineBoxWrapper()->root().setHasSelectedChildren(state != SelectionNone);
}
void LayoutReplaced::IntrinsicSizingInfo::transpose()
{
size = size.transposedSize();
aspectRatio = aspectRatio.transposedSize();
std::swap(hasWidth, hasHeight);
}
} // namespace blink
|
// Copyright (c) 2011-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "util.h"
#include "clientversion.h"
#include "primitives/transaction.h"
#include "random.h"
#include "sync.h"
#include "utilstrencodings.h"
#include "utilmoneystr.h"
#include "test/test_BTS.h"
#include <stdint.h>
#include <vector>
#include <boost/test/unit_test.hpp>
using namespace std;
BOOST_FIXTURE_TEST_SUITE(util_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(util_criticalsection)
{
CCriticalSection cs;
do {
LOCK(cs);
break;
BOOST_ERROR("break was swallowed!");
} while(0);
do {
TRY_LOCK(cs, lockTest);
if (lockTest)
break;
BOOST_ERROR("break was swallowed!");
} while(0);
}
static const unsigned char ParseHex_expected[65] = {
0x04, 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, 0x48, 0x27, 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, 0xb7,
0x10, 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, 0x09, 0xa6, 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, 0xde,
0xb6, 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, 0x38, 0xc4, 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, 0x12,
0xde, 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, 0x8d, 0x57, 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, 0x1d,
0x5f
};
BOOST_AUTO_TEST_CASE(util_ParseHex)
{
std::vector<unsigned char> result;
std::vector<unsigned char> expected(ParseHex_expected, ParseHex_expected + sizeof(ParseHex_expected));
// Basic test vector
result = ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f");
BOOST_CHECK_EQUAL_COLLECTIONS(result.begin(), result.end(), expected.begin(), expected.end());
// Spaces between bytes must be supported
result = ParseHex("12 34 56 78");
BOOST_CHECK(result.size() == 4 && result[0] == 0x12 && result[1] == 0x34 && result[2] == 0x56 && result[3] == 0x78);
// Leading space must be supported (used in CDBEnv::Salvage)
result = ParseHex(" 89 34 56 78");
BOOST_CHECK(result.size() == 4 && result[0] == 0x89 && result[1] == 0x34 && result[2] == 0x56 && result[3] == 0x78);
// Stop parsing at invalid value
result = ParseHex("1234 invalid 1234");
BOOST_CHECK(result.size() == 2 && result[0] == 0x12 && result[1] == 0x34);
}
BOOST_AUTO_TEST_CASE(util_HexStr)
{
BOOST_CHECK_EQUAL(
HexStr(ParseHex_expected, ParseHex_expected + sizeof(ParseHex_expected)),
"04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f");
BOOST_CHECK_EQUAL(
HexStr(ParseHex_expected, ParseHex_expected + 5, true),
"04 67 8a fd b0");
BOOST_CHECK_EQUAL(
HexStr(ParseHex_expected, ParseHex_expected, true),
"");
std::vector<unsigned char> ParseHex_vec(ParseHex_expected, ParseHex_expected + 5);
BOOST_CHECK_EQUAL(
HexStr(ParseHex_vec, true),
"04 67 8a fd b0");
}
BOOST_AUTO_TEST_CASE(util_DateTimeStrFormat)
{
BOOST_CHECK_EQUAL(DateTimeStrFormat("%Y-%m-%d %H:%M:%S", 0), "1970-01-01 00:00:00");
BOOST_CHECK_EQUAL(DateTimeStrFormat("%Y-%m-%d %H:%M:%S", 0x7FFFFFFF), "2038-01-19 03:14:07");
BOOST_CHECK_EQUAL(DateTimeStrFormat("%Y-%m-%d %H:%M:%S", 1317425777), "2011-09-30 23:36:17");
BOOST_CHECK_EQUAL(DateTimeStrFormat("%Y-%m-%d %H:%M", 1317425777), "2011-09-30 23:36");
BOOST_CHECK_EQUAL(DateTimeStrFormat("%a, %d %b %Y %H:%M:%S +0000", 1317425777), "Fri, 30 Sep 2011 23:36:17 +0000");
}
BOOST_AUTO_TEST_CASE(util_ParseParameters)
{
const char *argv_test[] = {"-ignored", "-a", "-b", "-ccc=argument", "-ccc=multiple", "f", "-d=e"};
ParseParameters(0, (char**)argv_test);
BOOST_CHECK(mapArgs.empty() && mapMultiArgs.empty());
ParseParameters(1, (char**)argv_test);
BOOST_CHECK(mapArgs.empty() && mapMultiArgs.empty());
ParseParameters(5, (char**)argv_test);
// expectation: -ignored is ignored (program name argument),
// -a, -b and -ccc end up in map, -d ignored because it is after
// a non-option argument (non-GNU option parsing)
BOOST_CHECK(mapArgs.size() == 3 && mapMultiArgs.size() == 3);
BOOST_CHECK(mapArgs.count("-a") && mapArgs.count("-b") && mapArgs.count("-ccc")
&& !mapArgs.count("f") && !mapArgs.count("-d"));
BOOST_CHECK(mapMultiArgs.count("-a") && mapMultiArgs.count("-b") && mapMultiArgs.count("-ccc")
&& !mapMultiArgs.count("f") && !mapMultiArgs.count("-d"));
BOOST_CHECK(mapArgs["-a"] == "" && mapArgs["-ccc"] == "multiple");
BOOST_CHECK(mapMultiArgs["-ccc"].size() == 2);
}
BOOST_AUTO_TEST_CASE(util_GetArg)
{
mapArgs.clear();
mapArgs["strtest1"] = "string...";
// strtest2 undefined on purpose
mapArgs["inttest1"] = "12345";
mapArgs["inttest2"] = "81985529216486895";
// inttest3 undefined on purpose
mapArgs["booltest1"] = "";
// booltest2 undefined on purpose
mapArgs["booltest3"] = "0";
mapArgs["booltest4"] = "1";
BOOST_CHECK_EQUAL(GetArg("strtest1", "default"), "string...");
BOOST_CHECK_EQUAL(GetArg("strtest2", "default"), "default");
BOOST_CHECK_EQUAL(GetArg("inttest1", -1), 12345);
BOOST_CHECK_EQUAL(GetArg("inttest2", -1), 81985529216486895LL);
BOOST_CHECK_EQUAL(GetArg("inttest3", -1), -1);
BOOST_CHECK_EQUAL(GetBoolArg("booltest1", false), true);
BOOST_CHECK_EQUAL(GetBoolArg("booltest2", false), false);
BOOST_CHECK_EQUAL(GetBoolArg("booltest3", false), false);
BOOST_CHECK_EQUAL(GetBoolArg("booltest4", false), true);
}
BOOST_AUTO_TEST_CASE(util_FormatMoney)
{
BOOST_CHECK_EQUAL(FormatMoney(0), "0.00");
BOOST_CHECK_EQUAL(FormatMoney((COIN/10000)*123456789), "12345.6789");
BOOST_CHECK_EQUAL(FormatMoney(-COIN), "-1.00");
BOOST_CHECK_EQUAL(FormatMoney(COIN*100000000), "100000000.00");
BOOST_CHECK_EQUAL(FormatMoney(COIN*10000000), "10000000.00");
BOOST_CHECK_EQUAL(FormatMoney(COIN*1000000), "1000000.00");
BOOST_CHECK_EQUAL(FormatMoney(COIN*100000), "100000.00");
BOOST_CHECK_EQUAL(FormatMoney(COIN*10000), "10000.00");
BOOST_CHECK_EQUAL(FormatMoney(COIN*1000), "1000.00");
BOOST_CHECK_EQUAL(FormatMoney(COIN*100), "100.00");
BOOST_CHECK_EQUAL(FormatMoney(COIN*10), "10.00");
BOOST_CHECK_EQUAL(FormatMoney(COIN), "1.00");
BOOST_CHECK_EQUAL(FormatMoney(COIN/10), "0.10");
BOOST_CHECK_EQUAL(FormatMoney(COIN/100), "0.01");
BOOST_CHECK_EQUAL(FormatMoney(COIN/1000), "0.001");
BOOST_CHECK_EQUAL(FormatMoney(COIN/10000), "0.0001");
BOOST_CHECK_EQUAL(FormatMoney(COIN/100000), "0.00001");
BOOST_CHECK_EQUAL(FormatMoney(COIN/1000000), "0.000001");
BOOST_CHECK_EQUAL(FormatMoney(COIN/10000000), "0.0000001");
BOOST_CHECK_EQUAL(FormatMoney(COIN/100000000), "0.00000001");
}
BOOST_AUTO_TEST_CASE(util_ParseMoney)
{
CAmount ret = 0;
BOOST_CHECK(ParseMoney("0.0", ret));
BOOST_CHECK_EQUAL(ret, 0);
BOOST_CHECK(ParseMoney("12345.6789", ret));
BOOST_CHECK_EQUAL(ret, (COIN/10000)*123456789);
BOOST_CHECK(ParseMoney("100000000.00", ret));
BOOST_CHECK_EQUAL(ret, COIN*100000000);
BOOST_CHECK(ParseMoney("10000000.00", ret));
BOOST_CHECK_EQUAL(ret, COIN*10000000);
BOOST_CHECK(ParseMoney("1000000.00", ret));
BOOST_CHECK_EQUAL(ret, COIN*1000000);
BOOST_CHECK(ParseMoney("100000.00", ret));
BOOST_CHECK_EQUAL(ret, COIN*100000);
BOOST_CHECK(ParseMoney("10000.00", ret));
BOOST_CHECK_EQUAL(ret, COIN*10000);
BOOST_CHECK(ParseMoney("1000.00", ret));
BOOST_CHECK_EQUAL(ret, COIN*1000);
BOOST_CHECK(ParseMoney("100.00", ret));
BOOST_CHECK_EQUAL(ret, COIN*100);
BOOST_CHECK(ParseMoney("10.00", ret));
BOOST_CHECK_EQUAL(ret, COIN*10);
BOOST_CHECK(ParseMoney("1.00", ret));
BOOST_CHECK_EQUAL(ret, COIN);
BOOST_CHECK(ParseMoney("1", ret));
BOOST_CHECK_EQUAL(ret, COIN);
BOOST_CHECK(ParseMoney("0.1", ret));
BOOST_CHECK_EQUAL(ret, COIN/10);
BOOST_CHECK(ParseMoney("0.01", ret));
BOOST_CHECK_EQUAL(ret, COIN/100);
BOOST_CHECK(ParseMoney("0.001", ret));
BOOST_CHECK_EQUAL(ret, COIN/1000);
BOOST_CHECK(ParseMoney("0.0001", ret));
BOOST_CHECK_EQUAL(ret, COIN/10000);
BOOST_CHECK(ParseMoney("0.00001", ret));
BOOST_CHECK_EQUAL(ret, COIN/100000);
BOOST_CHECK(ParseMoney("0.000001", ret));
BOOST_CHECK_EQUAL(ret, COIN/1000000);
BOOST_CHECK(ParseMoney("0.0000001", ret));
BOOST_CHECK_EQUAL(ret, COIN/10000000);
BOOST_CHECK(ParseMoney("0.00000001", ret));
BOOST_CHECK_EQUAL(ret, COIN/100000000);
// Attempted 63 bit overflow should fail
BOOST_CHECK(!ParseMoney("92233720368.54775808", ret));
// Parsing negative amounts must fail
BOOST_CHECK(!ParseMoney("-1", ret));
}
BOOST_AUTO_TEST_CASE(util_IsHex)
{
BOOST_CHECK(IsHex("00"));
BOOST_CHECK(IsHex("00112233445566778899aabbccddeeffAABBCCDDEEFF"));
BOOST_CHECK(IsHex("ff"));
BOOST_CHECK(IsHex("FF"));
BOOST_CHECK(!IsHex(""));
BOOST_CHECK(!IsHex("0"));
BOOST_CHECK(!IsHex("a"));
BOOST_CHECK(!IsHex("eleven"));
BOOST_CHECK(!IsHex("00xx00"));
BOOST_CHECK(!IsHex("0x0000"));
}
BOOST_AUTO_TEST_CASE(util_seed_insecure_rand)
{
int i;
int count=0;
seed_insecure_rand(true);
for (int mod=2;mod<11;mod++)
{
int mask = 1;
// Really rough binomal confidence approximation.
int err = 30*10000./mod*sqrt((1./mod*(1-1./mod))/10000.);
//mask is 2^ceil(log2(mod))-1
while(mask<mod-1)mask=(mask<<1)+1;
count = 0;
//How often does it get a zero from the uniform range [0,mod)?
for (i=0;i<10000;i++)
{
uint32_t rval;
do{
rval=insecure_rand()&mask;
}while(rval>=(uint32_t)mod);
count += rval==0;
}
BOOST_CHECK(count<=10000/mod+err);
BOOST_CHECK(count>=10000/mod-err);
}
}
BOOST_AUTO_TEST_CASE(util_TimingResistantEqual)
{
BOOST_CHECK(TimingResistantEqual(std::string(""), std::string("")));
BOOST_CHECK(!TimingResistantEqual(std::string("abc"), std::string("")));
BOOST_CHECK(!TimingResistantEqual(std::string(""), std::string("abc")));
BOOST_CHECK(!TimingResistantEqual(std::string("a"), std::string("aa")));
BOOST_CHECK(!TimingResistantEqual(std::string("aa"), std::string("a")));
BOOST_CHECK(TimingResistantEqual(std::string("abc"), std::string("abc")));
BOOST_CHECK(!TimingResistantEqual(std::string("abc"), std::string("aba")));
}
/* Test strprintf formatting directives.
* Put a string before and after to ensure sanity of element sizes on stack. */
#define B "check_prefix"
#define E "check_postfix"
BOOST_AUTO_TEST_CASE(strprintf_numbers)
{
int64_t s64t = -9223372036854775807LL; /* signed 64 bit test value */
uint64_t u64t = 18446744073709551615ULL; /* unsigned 64 bit test value */
BOOST_CHECK(strprintf("%s %d %s", B, s64t, E) == B" -9223372036854775807 " E);
BOOST_CHECK(strprintf("%s %u %s", B, u64t, E) == B" 18446744073709551615 " E);
BOOST_CHECK(strprintf("%s %x %s", B, u64t, E) == B" ffffffffffffffff " E);
size_t st = 12345678; /* unsigned size_t test value */
ssize_t sst = -12345678; /* signed size_t test value */
BOOST_CHECK(strprintf("%s %d %s", B, sst, E) == B" -12345678 " E);
BOOST_CHECK(strprintf("%s %u %s", B, st, E) == B" 12345678 " E);
BOOST_CHECK(strprintf("%s %x %s", B, st, E) == B" bc614e " E);
ptrdiff_t pt = 87654321; /* positive ptrdiff_t test value */
ptrdiff_t spt = -87654321; /* negative ptrdiff_t test value */
BOOST_CHECK(strprintf("%s %d %s", B, spt, E) == B" -87654321 " E);
BOOST_CHECK(strprintf("%s %u %s", B, pt, E) == B" 87654321 " E);
BOOST_CHECK(strprintf("%s %x %s", B, pt, E) == B" 5397fb1 " E);
}
#undef B
#undef E
/* Check for mingw/wine issue #3494
* Remove this test before time.ctime(0xffffffff) == 'Sun Feb 7 07:28:15 2106'
*/
BOOST_AUTO_TEST_CASE(gettime)
{
BOOST_CHECK((GetTime() & ~0xFFFFFFFFLL) == 0);
}
BOOST_AUTO_TEST_CASE(test_ParseInt32)
{
int32_t n;
// Valid values
BOOST_CHECK(ParseInt32("1234", NULL));
BOOST_CHECK(ParseInt32("0", &n) && n == 0);
BOOST_CHECK(ParseInt32("1234", &n) && n == 1234);
BOOST_CHECK(ParseInt32("01234", &n) && n == 1234); // no octal
BOOST_CHECK(ParseInt32("2147483647", &n) && n == 2147483647);
BOOST_CHECK(ParseInt32("-2147483648", &n) && n == -2147483648);
BOOST_CHECK(ParseInt32("-1234", &n) && n == -1234);
// Invalid values
BOOST_CHECK(!ParseInt32("", &n));
BOOST_CHECK(!ParseInt32(" 1", &n)); // no padding inside
BOOST_CHECK(!ParseInt32("1 ", &n));
BOOST_CHECK(!ParseInt32("1a", &n));
BOOST_CHECK(!ParseInt32("aap", &n));
BOOST_CHECK(!ParseInt32("0x1", &n)); // no hex
BOOST_CHECK(!ParseInt32("0x1", &n)); // no hex
const char test_bytes[] = {'1', 0, '1'};
std::string teststr(test_bytes, sizeof(test_bytes));
BOOST_CHECK(!ParseInt32(teststr, &n)); // no embedded NULs
// Overflow and underflow
BOOST_CHECK(!ParseInt32("-2147483649", NULL));
BOOST_CHECK(!ParseInt32("2147483648", NULL));
BOOST_CHECK(!ParseInt32("-32482348723847471234", NULL));
BOOST_CHECK(!ParseInt32("32482348723847471234", NULL));
}
BOOST_AUTO_TEST_CASE(test_ParseInt64)
{
int64_t n;
// Valid values
BOOST_CHECK(ParseInt64("1234", NULL));
BOOST_CHECK(ParseInt64("0", &n) && n == 0LL);
BOOST_CHECK(ParseInt64("1234", &n) && n == 1234LL);
BOOST_CHECK(ParseInt64("01234", &n) && n == 1234LL); // no octal
BOOST_CHECK(ParseInt64("2147483647", &n) && n == 2147483647LL);
BOOST_CHECK(ParseInt64("-2147483648", &n) && n == -2147483648LL);
BOOST_CHECK(ParseInt64("9223372036854775807", &n) && n == (int64_t)9223372036854775807);
BOOST_CHECK(ParseInt64("-9223372036854775808", &n) && n == (int64_t)-9223372036854775807-1);
BOOST_CHECK(ParseInt64("-1234", &n) && n == -1234LL);
// Invalid values
BOOST_CHECK(!ParseInt64("", &n));
BOOST_CHECK(!ParseInt64(" 1", &n)); // no padding inside
BOOST_CHECK(!ParseInt64("1 ", &n));
BOOST_CHECK(!ParseInt64("1a", &n));
BOOST_CHECK(!ParseInt64("aap", &n));
BOOST_CHECK(!ParseInt64("0x1", &n)); // no hex
const char test_bytes[] = {'1', 0, '1'};
std::string teststr(test_bytes, sizeof(test_bytes));
BOOST_CHECK(!ParseInt64(teststr, &n)); // no embedded NULs
// Overflow and underflow
BOOST_CHECK(!ParseInt64("-9223372036854775809", NULL));
BOOST_CHECK(!ParseInt64("9223372036854775808", NULL));
BOOST_CHECK(!ParseInt64("-32482348723847471234", NULL));
BOOST_CHECK(!ParseInt64("32482348723847471234", NULL));
}
BOOST_AUTO_TEST_CASE(test_ParseUInt32)
{
uint32_t n;
// Valid values
BOOST_CHECK(ParseUInt32("1234", NULL));
BOOST_CHECK(ParseUInt32("0", &n) && n == 0);
BOOST_CHECK(ParseUInt32("1234", &n) && n == 1234);
BOOST_CHECK(ParseUInt32("01234", &n) && n == 1234); // no octal
BOOST_CHECK(ParseUInt32("2147483647", &n) && n == 2147483647);
BOOST_CHECK(ParseUInt32("2147483648", &n) && n == (uint32_t)2147483648);
BOOST_CHECK(ParseUInt32("4294967295", &n) && n == (uint32_t)4294967295);
// Invalid values
BOOST_CHECK(!ParseUInt32("", &n));
BOOST_CHECK(!ParseUInt32(" 1", &n)); // no padding inside
BOOST_CHECK(!ParseUInt32(" -1", &n));
BOOST_CHECK(!ParseUInt32("1 ", &n));
BOOST_CHECK(!ParseUInt32("1a", &n));
BOOST_CHECK(!ParseUInt32("aap", &n));
BOOST_CHECK(!ParseUInt32("0x1", &n)); // no hex
BOOST_CHECK(!ParseUInt32("0x1", &n)); // no hex
const char test_bytes[] = {'1', 0, '1'};
std::string teststr(test_bytes, sizeof(test_bytes));
BOOST_CHECK(!ParseUInt32(teststr, &n)); // no embedded NULs
// Overflow and underflow
BOOST_CHECK(!ParseUInt32("-2147483648", &n));
BOOST_CHECK(!ParseUInt32("4294967296", &n));
BOOST_CHECK(!ParseUInt32("-1234", &n));
BOOST_CHECK(!ParseUInt32("-32482348723847471234", NULL));
BOOST_CHECK(!ParseUInt32("32482348723847471234", NULL));
}
BOOST_AUTO_TEST_CASE(test_ParseUInt64)
{
uint64_t n;
// Valid values
BOOST_CHECK(ParseUInt64("1234", NULL));
BOOST_CHECK(ParseUInt64("0", &n) && n == 0LL);
BOOST_CHECK(ParseUInt64("1234", &n) && n == 1234LL);
BOOST_CHECK(ParseUInt64("01234", &n) && n == 1234LL); // no octal
BOOST_CHECK(ParseUInt64("2147483647", &n) && n == 2147483647LL);
BOOST_CHECK(ParseUInt64("9223372036854775807", &n) && n == 9223372036854775807ULL);
BOOST_CHECK(ParseUInt64("9223372036854775808", &n) && n == 9223372036854775808ULL);
BOOST_CHECK(ParseUInt64("18446744073709551615", &n) && n == 18446744073709551615ULL);
// Invalid values
BOOST_CHECK(!ParseUInt64("", &n));
BOOST_CHECK(!ParseUInt64(" 1", &n)); // no padding inside
BOOST_CHECK(!ParseUInt64(" -1", &n));
BOOST_CHECK(!ParseUInt64("1 ", &n));
BOOST_CHECK(!ParseUInt64("1a", &n));
BOOST_CHECK(!ParseUInt64("aap", &n));
BOOST_CHECK(!ParseUInt64("0x1", &n)); // no hex
const char test_bytes[] = {'1', 0, '1'};
std::string teststr(test_bytes, sizeof(test_bytes));
BOOST_CHECK(!ParseUInt64(teststr, &n)); // no embedded NULs
// Overflow and underflow
BOOST_CHECK(!ParseUInt64("-9223372036854775809", NULL));
BOOST_CHECK(!ParseUInt64("18446744073709551616", NULL));
BOOST_CHECK(!ParseUInt64("-32482348723847471234", NULL));
BOOST_CHECK(!ParseUInt64("-2147483648", &n));
BOOST_CHECK(!ParseUInt64("-9223372036854775808", &n));
BOOST_CHECK(!ParseUInt64("-1234", &n));
}
BOOST_AUTO_TEST_CASE(test_ParseDouble)
{
double n;
// Valid values
BOOST_CHECK(ParseDouble("1234", NULL));
BOOST_CHECK(ParseDouble("0", &n) && n == 0.0);
BOOST_CHECK(ParseDouble("1234", &n) && n == 1234.0);
BOOST_CHECK(ParseDouble("01234", &n) && n == 1234.0); // no octal
BOOST_CHECK(ParseDouble("2147483647", &n) && n == 2147483647.0);
BOOST_CHECK(ParseDouble("-2147483648", &n) && n == -2147483648.0);
BOOST_CHECK(ParseDouble("-1234", &n) && n == -1234.0);
BOOST_CHECK(ParseDouble("1e6", &n) && n == 1e6);
BOOST_CHECK(ParseDouble("-1e6", &n) && n == -1e6);
// Invalid values
BOOST_CHECK(!ParseDouble("", &n));
BOOST_CHECK(!ParseDouble(" 1", &n)); // no padding inside
BOOST_CHECK(!ParseDouble("1 ", &n));
BOOST_CHECK(!ParseDouble("1a", &n));
BOOST_CHECK(!ParseDouble("aap", &n));
BOOST_CHECK(!ParseDouble("0x1", &n)); // no hex
const char test_bytes[] = {'1', 0, '1'};
std::string teststr(test_bytes, sizeof(test_bytes));
BOOST_CHECK(!ParseDouble(teststr, &n)); // no embedded NULs
// Overflow and underflow
BOOST_CHECK(!ParseDouble("-1e10000", NULL));
BOOST_CHECK(!ParseDouble("1e10000", NULL));
}
BOOST_AUTO_TEST_CASE(test_FormatParagraph)
{
BOOST_CHECK_EQUAL(FormatParagraph("", 79, 0), "");
BOOST_CHECK_EQUAL(FormatParagraph("test", 79, 0), "test");
BOOST_CHECK_EQUAL(FormatParagraph(" test", 79, 0), " test");
BOOST_CHECK_EQUAL(FormatParagraph("test test", 79, 0), "test test");
BOOST_CHECK_EQUAL(FormatParagraph("test test", 4, 0), "test\ntest");
BOOST_CHECK_EQUAL(FormatParagraph("testerde test", 4, 0), "testerde\ntest");
BOOST_CHECK_EQUAL(FormatParagraph("test test", 4, 4), "test\n test");
// Make sure we don't indent a fully-new line following a too-long line ending
BOOST_CHECK_EQUAL(FormatParagraph("test test\nabc", 4, 4), "test\n test\nabc");
BOOST_CHECK_EQUAL(FormatParagraph("This_is_a_very_long_test_string_without_any_spaces_so_it_should_just_get_returned_as_is_despite_the_length until it gets here", 79), "This_is_a_very_long_test_string_without_any_spaces_so_it_should_just_get_returned_as_is_despite_the_length\nuntil it gets here");
// Test wrap length is exact
BOOST_CHECK_EQUAL(FormatParagraph("a b c d e f g h i j k l m n o p q r s t u v w x y z 1 2 3 4 5 6 7 8 9 a b c de f g h i j k l m n o p", 79), "a b c d e f g h i j k l m n o p q r s t u v w x y z 1 2 3 4 5 6 7 8 9 a b c de\nf g h i j k l m n o p");
BOOST_CHECK_EQUAL(FormatParagraph("x\na b c d e f g h i j k l m n o p q r s t u v w x y z 1 2 3 4 5 6 7 8 9 a b c de f g h i j k l m n o p", 79), "x\na b c d e f g h i j k l m n o p q r s t u v w x y z 1 2 3 4 5 6 7 8 9 a b c de\nf g h i j k l m n o p");
// Indent should be included in length of lines
BOOST_CHECK_EQUAL(FormatParagraph("x\na b c d e f g h i j k l m n o p q r s t u v w x y z 1 2 3 4 5 6 7 8 9 a b c de f g h i j k l m n o p q r s t u v w x y z 0 1 2 3 4 5 6 7 8 9 a b c d e fg h i j k", 79, 4), "x\na b c d e f g h i j k l m n o p q r s t u v w x y z 1 2 3 4 5 6 7 8 9 a b c de\n f g h i j k l m n o p q r s t u v w x y z 0 1 2 3 4 5 6 7 8 9 a b c d e fg\n h i j k");
BOOST_CHECK_EQUAL(FormatParagraph("This is a very long test string. This is a second sentence in the very long test string.", 79), "This is a very long test string. This is a second sentence in the very long\ntest string.");
BOOST_CHECK_EQUAL(FormatParagraph("This is a very long test string.\nThis is a second sentence in the very long test string. This is a third sentence in the very long test string.", 79), "This is a very long test string.\nThis is a second sentence in the very long test string. This is a third\nsentence in the very long test string.");
BOOST_CHECK_EQUAL(FormatParagraph("This is a very long test string.\n\nThis is a second sentence in the very long test string. This is a third sentence in the very long test string.", 79), "This is a very long test string.\n\nThis is a second sentence in the very long test string. This is a third\nsentence in the very long test string.");
BOOST_CHECK_EQUAL(FormatParagraph("Testing that normal newlines do not get indented.\nLike here.", 79), "Testing that normal newlines do not get indented.\nLike here.");
}
BOOST_AUTO_TEST_CASE(test_FormatSubVersion)
{
std::vector<std::string> comments;
comments.push_back(std::string("comment1"));
std::vector<std::string> comments2;
comments2.push_back(std::string("comment1"));
comments2.push_back(SanitizeString(std::string("Comment2; .,_?@-; !\"#$%&'()*+/<=>[]\\^`{|}~"), SAFE_CHARS_UA_COMMENT)); // Semicolon is discouraged but not forbidden by BIP-0014
BOOST_CHECK_EQUAL(FormatSubVersion("Test", 99900, std::vector<std::string>()),std::string("/Test:0.9.99/"));
BOOST_CHECK_EQUAL(FormatSubVersion("Test", 99900, comments),std::string("/Test:0.9.99(comment1)/"));
BOOST_CHECK_EQUAL(FormatSubVersion("Test", 99900, comments2),std::string("/Test:0.9.99(comment1; Comment2; .,_?@-; )/"));
}
BOOST_AUTO_TEST_CASE(test_ParseFixedPoint)
{
int64_t amount = 0;
BOOST_CHECK(ParseFixedPoint("0", 8, &amount));
BOOST_CHECK_EQUAL(amount, 0LL);
BOOST_CHECK(ParseFixedPoint("1", 8, &amount));
BOOST_CHECK_EQUAL(amount, 100000000LL);
BOOST_CHECK(ParseFixedPoint("0.0", 8, &amount));
BOOST_CHECK_EQUAL(amount, 0LL);
BOOST_CHECK(ParseFixedPoint("-0.1", 8, &amount));
BOOST_CHECK_EQUAL(amount, -10000000LL);
BOOST_CHECK(ParseFixedPoint("1.1", 8, &amount));
BOOST_CHECK_EQUAL(amount, 110000000LL);
BOOST_CHECK(ParseFixedPoint("1.10000000000000000", 8, &amount));
BOOST_CHECK_EQUAL(amount, 110000000LL);
BOOST_CHECK(ParseFixedPoint("1.1e1", 8, &amount));
BOOST_CHECK_EQUAL(amount, 1100000000LL);
BOOST_CHECK(ParseFixedPoint("1.1e-1", 8, &amount));
BOOST_CHECK_EQUAL(amount, 11000000LL);
BOOST_CHECK(ParseFixedPoint("1000", 8, &amount));
BOOST_CHECK_EQUAL(amount, 100000000000LL);
BOOST_CHECK(ParseFixedPoint("-1000", 8, &amount));
BOOST_CHECK_EQUAL(amount, -100000000000LL);
BOOST_CHECK(ParseFixedPoint("0.00000001", 8, &amount));
BOOST_CHECK_EQUAL(amount, 1LL);
BOOST_CHECK(ParseFixedPoint("0.0000000100000000", 8, &amount));
BOOST_CHECK_EQUAL(amount, 1LL);
BOOST_CHECK(ParseFixedPoint("-0.00000001", 8, &amount));
BOOST_CHECK_EQUAL(amount, -1LL);
BOOST_CHECK(ParseFixedPoint("1000000000.00000001", 8, &amount));
BOOST_CHECK_EQUAL(amount, 100000000000000001LL);
BOOST_CHECK(ParseFixedPoint("9999999999.99999999", 8, &amount));
BOOST_CHECK_EQUAL(amount, 999999999999999999LL);
BOOST_CHECK(ParseFixedPoint("-9999999999.99999999", 8, &amount));
BOOST_CHECK_EQUAL(amount, -999999999999999999LL);
BOOST_CHECK(!ParseFixedPoint("", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("-", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("a-1000", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("-a1000", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("-1000a", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("-01000", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("00.1", 8, &amount));
BOOST_CHECK(!ParseFixedPoint(".1", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("--0.1", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("0.000000001", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("-0.000000001", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("0.00000001000000001", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("-10000000000.00000000", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("10000000000.00000000", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("-10000000000.00000001", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("10000000000.00000001", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("-10000000000.00000009", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("10000000000.00000009", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("-99999999999.99999999", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("99999909999.09999999", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("92233720368.54775807", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("92233720368.54775808", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("-92233720368.54775808", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("-92233720368.54775809", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("1.1e", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("1.1e-", 8, &amount));
BOOST_CHECK(!ParseFixedPoint("1.", 8, &amount));
}
BOOST_AUTO_TEST_SUITE_END()
|
#include "readFileList.h"
FileList::FileList(std::string checkSumFile)
{
m_fp = fopen(checkSumFile.c_str(), "r");
if(!m_fp)
{
std::cout << "Failed to open checksum file!" << std::endl;
m_bValid = false;
}
else
m_bValid = true;
}
bool FileList::bIsValid()
{
return m_bValid;
}
std::vector<sFileEntry> FileList::lSerialize()
{
std::vector<sFileEntry> pVec;
char *md5sum = new char[1000];
char *fileName = new char[1000];
while(!feof(m_fp))
{
fscanf(m_fp,"%s %s",md5sum, fileName);
sFileEntry dummy;
dummy.szMd5sum = md5sum;
dummy.szFileName = fileName;
pVec.push_back(dummy);
}
fclose(m_fp);
delete md5sum;
delete fileName;
m_bValid = false;
return pVec;
}
|
#include "image.h"
#include <TDebug>
#include <cstring>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
#ifdef STANDALONE_USE
# undef tError
# undef tWarn
# undef tDebug
# define tError qCritical
# define tWarn qWarning
# define tDebug qDebug
#endif
Image::Image(const QString &path, bool color)
: _mat(imread(qPrintable(path), (color ? 1 : 0)))
{ }
Image::Image(const Image &other) // shallow copy
: _mat(other._mat)
{ }
Image &Image::operator=(const Image &other) // shallow copy
{
_mat.release();
_mat = other._mat;
return *this;
}
Image::Image(const Mat &other)
: _mat(other)
{ }
Image::~Image() {}
Image Image::clone() const
{
return Image(_mat.clone());
}
int Image::width() const { return _mat.cols; }
int Image::height() const { return _mat.rows; }
int Image::channels() const { return _mat.channels(); }
bool Image::isEmpty() const { return _mat.empty(); }
// 明度の正規化
void Image::normalize()
{
if (isEmpty()) {
return;
}
operator=(normalized());
}
// 明度の正規化
Image Image::normalized() const
{
if (isEmpty()) {
return clone();
}
double min, max;
cv::minMaxLoc(_mat, &min, &max);
if ((int)min == 0 && (int)max == 255) {
return clone();
}
Mat dst;
cv::convertScaleAbs(_mat, dst, 255.0/(max-min), -255.0*min/(max-min));
return Image(dst);
}
// 回転 (拡大縮小も可)
void Image::rotate(float angle, float scale)
{
if (isEmpty()) {
return;
}
operator=(rotated(angle, scale));
}
// 回転 (拡大縮小も可)
Image Image::rotated(float angle, float scale) const
{
if (isEmpty()) {
return clone();
}
if (scale < 1.0) {
// zoom out
int type = (channels() == 3) ? CV_8UC3 : CV_8U;
Mat dst = Mat::ones(height(), width(), type); // 元のサイズ
int w = _mat.cols * scale;
int h = _mat.rows * scale;
int x = (width()-w)/2;
int y = (height()-h)/2;
Image img = resized(w, h);
cv::Rect rect(x,y,w,h);
cv::Mat submat = dst(rect);
img._mat.copyTo(submat); // 貼り付け
return img.rotated(angle, 1.0);
}
// zoom in
Mat dst;
Point2f center(_mat.cols/2.0, _mat.rows/2.0); // 画像の中心
Mat matrix = cv::getRotationMatrix2D(center, angle, scale);
warpAffine(_mat, dst, matrix, _mat.size(), cv::INTER_CUBIC);
return Image(dst);
}
// リサイズ
void Image::resize(int w, int h)
{
if ((w == width() && h == height()) || isEmpty()) {
return;
}
operator=(resized(w, h)); // shallow copy
}
// リサイズ
Image Image::resized(int w, int h) const
{
if ((w == width() && h == height()) || isEmpty()) {
return clone();
}
// 出力画像
int type = (channels() == 3) ? CV_8UC3 : CV_8U;
Mat dst = Mat::ones(h, w, type);
if (w < width() || h < height()) {
// 縮小
cv::resize(_mat, dst, dst.size(), 0.5, 0.5, cv::INTER_AREA);
} else {
// 拡大
cv::resize(_mat, dst, dst.size(), cv::INTER_CUBIC);
}
return Image(dst); // shallow copy
}
// 切り抜き
void Image::crop(int x, int y, int width, int height)
{
if (isEmpty()) {
return;
}
operator=(cropped(x, y, width, height)); // shallow copy
}
void Image::crop(const cv::Rect &rect)
{
if (isEmpty()) {
return;
}
operator=(cropped(rect)); // shallow copy
}
// 切り抜き
Image Image::cropped(int x, int y, int width, int height) const
{
if (isEmpty()) {
return clone();
}
return cropped(cv::Rect(x, y, width, height));
}
// 切り抜き
Image Image::cropped(const cv::Rect &rect) const
{
if (isEmpty()) {
return clone();
}
if (rect.x + rect.width > width() || rect.y + rect.height > height() || rect.x < 0 || rect.y < 0) {
tError("Invalid crop size x:%d y:%d w:%d h:%d orig img w:%d, h:%d", rect.x, rect.y,
rect.width, rect.height, width(), height());
return Image();
}
Image img(Mat(_mat, rect));
tDebug("cropped image w:%d h:%d", img.width(), img.height());
return img;
}
// トリミング
void Image::trim()
{
if (isEmpty()) {
return;
}
operator=(trimmed()); // shallow copy
}
// トリミング
Image Image::trimmed() const
{
if (isEmpty()) {
return clone();
}
auto rect = getValidRect();
return cropped(rect.x(), rect.y(), rect.width(), rect.height());
}
QRect Image::getValidRect() const
{
const int THRESHOLD_MIN = 16;
if (isEmpty()) {
return QRect();
}
cv::Mat gray, binary;
cvtColor(_mat, gray,CV_RGB2GRAY);
cv::threshold(gray, binary, THRESHOLD_MIN, 255, cv::THRESH_BINARY);
vector<vector<Point>> contours;
findContours(binary, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
vector<Point> points;
for (auto &con : contours) {
for (auto &p : con) {
points.push_back(p);
}
}
cv::Rect brect = cv::boundingRect(cv::Mat(points));
int x = qMax(brect.x-1, 0);
int y = qMax(brect.y-1, 0);
tDebug("trim image x:%d ,y:%d w:%d h:%d", x, y, brect.width+2, brect.height+2);
return QRect(x, y, brect.width+2, brect.height+2);
}
// 矩形描画 thickness<0:塗りつぶし
void Image::drawRectangle(int x1, int y1, int x2, int y2, const Scalar &color, int thickness, int lineType)
{
cv::rectangle(_mat, cv::Point(x1,y1), cv::Point(x2, y2), color, thickness, lineType);
}
// 文字列描画
void Image::putText(const QString &text, int x, int y, int fontFace, double fontScale, const Scalar &color, int thickness)
{
cv::putText(_mat, text.toStdString(), cv::Point(x, y), fontFace, fontScale, color, thickness);
}
// ラベル描画
void Image::drawLabel(const QString &text, int x, int y, int fontFace, double fontScale, const cv::Scalar &color,
int thickness, const cv::Scalar &bgColor, double alpha)
{
int baseline = 0;
Size textSize = getTextSize(text.toStdString(), fontFace, fontScale, thickness, &baseline);
int textX = x;
int textY = y + textSize.height;
Image alphaMat(cv::Mat(_mat.rows, _mat.cols, _mat.type(), cv::Scalar(0)));
alphaMat.drawRectangle(x, y, x+textSize.width, y+textSize.height+thickness, bgColor, -1);
cv::addWeighted(_mat, 1.0, alphaMat.mat(), alpha, 0, _mat);
putText(text, textX, textY, fontFace, fontScale, color, thickness);
}
// JPEG保存
bool Image::save(const QString &path) const
{
if (isEmpty()) {
tError("Cannot save empty image");
return false;
}
std::vector<int> params(2);
params[0] = CV_IMWRITE_JPEG_QUALITY; // JPEG品質
params[1] = 95;
return imwrite(qPrintable(path), _mat, params);
}
// エンコード. ext:拡張子
QByteArray Image::toEncoded(const QString &ext, const QVector<int> ¶ms)
{
QByteArray encoded;
std::vector<uchar> buf;
auto e = ext;
if (! e.startsWith(".")) {
e.prepend(".");
}
if (! isEmpty() && imencode(e.toStdString(), _mat, buf, params.toStdVector())) {
encoded.resize(buf.size());
std::memcpy(encoded.data(), buf.data(), buf.size());
}
return encoded;
}
|
//
// Copyright 2020 Pixar
//
// Licensed under the Apache License, Version 2.0 (the "Apache License")
// with the following modification; you may not use this file except in
// compliance with the Apache License and the following modification to it:
// Section 6. Trademarks. is deleted and replaced with:
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor
// and its affiliates, except as required to comply with Section 4(c) of
// the License and to reproduce the content of the NOTICE file.
//
// You may obtain a copy of the Apache License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the Apache License with the above modification is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the Apache License for the specific
// language governing permissions and limitations under the Apache License.
//
#include "pxr/imaging/hdSt/subtextureIdentifier.h"
#include "pxr/base/tf/hash.h"
PXR_NAMESPACE_OPEN_SCOPE
////////////////////////////////////////////////////////////////////////////
// HdStSubtextureIdentifier
HdStSubtextureIdentifier::~HdStSubtextureIdentifier() = default;
size_t
hash_value(const HdStSubtextureIdentifier &subId)
{
return subId._Hash();
}
////////////////////////////////////////////////////////////////////////////
// HdStFieldBaseSubtextureIdentifier
HdStFieldBaseSubtextureIdentifier::HdStFieldBaseSubtextureIdentifier(
TfToken const &fieldName, const int fieldIndex)
: _fieldName(fieldName), _fieldIndex(fieldIndex)
{
}
HdStFieldBaseSubtextureIdentifier::~HdStFieldBaseSubtextureIdentifier()
= default;
HdStSubtextureIdentifier::ID
HdStFieldBaseSubtextureIdentifier::_Hash() const {
return TfHash::Combine(_fieldName, _fieldIndex);
}
////////////////////////////////////////////////////////////////////////////
// HdStAssetUvSubtextureIdentifier
HdStAssetUvSubtextureIdentifier::HdStAssetUvSubtextureIdentifier(
const bool flipVertically,
const bool premultiplyAlpha,
const TfToken& sourceColorSpace)
: _flipVertically(flipVertically),
_premultiplyAlpha(premultiplyAlpha),
_sourceColorSpace(sourceColorSpace)
{
}
HdStAssetUvSubtextureIdentifier::~HdStAssetUvSubtextureIdentifier()
= default;
std::unique_ptr<HdStSubtextureIdentifier>
HdStAssetUvSubtextureIdentifier::Clone() const
{
return std::make_unique<HdStAssetUvSubtextureIdentifier>(
GetFlipVertically(), GetPremultiplyAlpha(), GetSourceColorSpace());
}
HdStSubtextureIdentifier::ID
HdStAssetUvSubtextureIdentifier::_Hash() const
{
static ID typeHash =
TfHash()(std::string("HdStAssetUvSubtextureIdentifier"));
return TfHash::Combine(
typeHash,
GetFlipVertically(),
GetPremultiplyAlpha(),
GetSourceColorSpace());
}
////////////////////////////////////////////////////////////////////////////
// HdStDynamicUvSubtextureIdentifier
HdStDynamicUvSubtextureIdentifier::HdStDynamicUvSubtextureIdentifier()
= default;
HdStDynamicUvSubtextureIdentifier::~HdStDynamicUvSubtextureIdentifier()
= default;
std::unique_ptr<HdStSubtextureIdentifier>
HdStDynamicUvSubtextureIdentifier::Clone() const
{
return std::make_unique<HdStDynamicUvSubtextureIdentifier>();
}
HdStSubtextureIdentifier::ID
HdStDynamicUvSubtextureIdentifier::_Hash() const
{
static ID typeHash =
TfHash()(std::string("HdStDynamicUvSubtextureIdentifier"));
return typeHash;
}
HdStDynamicUvTextureImplementation *
HdStDynamicUvSubtextureIdentifier::GetTextureImplementation() const
{
return nullptr;
}
////////////////////////////////////////////////////////////////////////////
// HdStUdimSubtextureIdentifier
HdStUdimSubtextureIdentifier::HdStUdimSubtextureIdentifier(
const bool premultiplyAlpha, const TfToken &sourceColorSpace)
: _premultiplyAlpha(premultiplyAlpha), _sourceColorSpace(sourceColorSpace)
{
}
HdStUdimSubtextureIdentifier::~HdStUdimSubtextureIdentifier()
= default;
std::unique_ptr<HdStSubtextureIdentifier>
HdStUdimSubtextureIdentifier::Clone() const
{
return std::make_unique<HdStUdimSubtextureIdentifier>(
GetPremultiplyAlpha(), GetSourceColorSpace());
}
HdStSubtextureIdentifier::ID
HdStUdimSubtextureIdentifier::_Hash() const
{
static ID typeHash =
TfHash()(std::string("HdStUdimSubtextureIdentifier"));
return TfHash::Combine(
typeHash,
GetPremultiplyAlpha(),
GetSourceColorSpace());
}
////////////////////////////////////////////////////////////////////////////
// HdStPtexSubtextureIdentifier
HdStPtexSubtextureIdentifier::HdStPtexSubtextureIdentifier(
const bool premultiplyAlpha)
: _premultiplyAlpha(premultiplyAlpha)
{
}
HdStPtexSubtextureIdentifier::~HdStPtexSubtextureIdentifier()
= default;
std::unique_ptr<HdStSubtextureIdentifier>
HdStPtexSubtextureIdentifier::Clone() const
{
return std::make_unique<HdStPtexSubtextureIdentifier>(
GetPremultiplyAlpha());
}
HdStSubtextureIdentifier::ID
HdStPtexSubtextureIdentifier::_Hash() const
{
static ID typeHash =
TfHash()(std::string("HdStPtexSubtextureIdentifier"));
return TfHash::Combine(
typeHash,
GetPremultiplyAlpha());
}
PXR_NAMESPACE_CLOSE_SCOPE
|
#include "IAnimation.h"
#include "AnimationGroup.h"
///////////////////////////////////////////////////////////
// CONSTRUCTORS and DESTRUCTOR
///////////////////////////////////////////////////////////
IAnimation::IAnimation()
: state_(State::STOPPED), parent_(nullptr),
delay_(0.0f), currentDelay_(0.0f)
{}
///////////////////////////////////////////////////////////
// PUBLIC FUNCTIONS
///////////////////////////////////////////////////////////
void IAnimation::stop()
{
state_ = State::STOPPED;
currentDelay_ = 0.0f;
}
bool IAnimation::shouldWaitDelay(float deltaTime)
{
currentDelay_ += deltaTime;
if (currentDelay_ < delay_)
return true;
else
{
currentDelay_ = delay_;
return false;
}
}
int IAnimation::indexInParent() const
{
if (parent_ == nullptr)
return -1;
int index = -1;
const nctl::Array<nctl::UniquePtr<IAnimation>> &anims = parent_->anims();
for (unsigned int i = 0; i < anims.size(); i++)
{
if (anims[i].get() == this)
{
index = static_cast<int>(i);
break;
}
}
return index;
}
///////////////////////////////////////////////////////////
// PROTECTED FUNCTIONS
///////////////////////////////////////////////////////////
void IAnimation::cloneTo(IAnimation &other) const
{
other.name = name;
other.enabled = enabled;
// Animation state is not cloned
other.state_ = State::STOPPED;
other.parent_ = parent_;
other.delay_ = delay_;
other.currentDelay_ = 0.0f;
}
bool IAnimation::insideSequential() const
{
IAnimation *parent = parent_;
while (parent != nullptr)
{
if (parent->type() == Type::SEQUENTIAL_GROUP)
return true;
parent = parent->parent_;
}
return false;
}
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/command_line.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/platform_thread.h"
#include "content/browser/device_sensors/data_fetcher_shared_memory.h"
#include "content/browser/device_sensors/device_inertial_sensor_service.h"
#include "content/common/device_sensors/device_motion_hardware_buffer.h"
#include "content/common/device_sensors/device_orientation_hardware_buffer.h"
#include "content/public/browser/browser_thread.h"
#include "content/public/browser/web_contents.h"
#include "content/public/common/content_switches.h"
#include "content/public/test/content_browser_test.h"
#include "content/public/test/content_browser_test_utils.h"
#include "content/public/test/test_navigation_observer.h"
#include "content/public/test/test_utils.h"
#include "content/shell/browser/shell.h"
#include "content/shell/browser/shell_javascript_dialog_manager.h"
namespace content {
namespace {
class FakeDataFetcher : public DataFetcherSharedMemory {
public:
FakeDataFetcher()
: started_orientation_(false, false),
stopped_orientation_(false, false),
started_motion_(false, false),
stopped_motion_(false, false),
sensor_data_available_(true) {
}
virtual ~FakeDataFetcher() { }
virtual bool Start(ConsumerType consumer_type, void* buffer) OVERRIDE {
EXPECT_TRUE(buffer);
switch (consumer_type) {
case CONSUMER_TYPE_MOTION:
{
DeviceMotionHardwareBuffer* motion_buffer =
static_cast<DeviceMotionHardwareBuffer*>(buffer);
if (sensor_data_available_)
UpdateMotion(motion_buffer);
SetMotionBufferReady(motion_buffer);
started_motion_.Signal();
}
break;
case CONSUMER_TYPE_ORIENTATION:
{
DeviceOrientationHardwareBuffer* orientation_buffer =
static_cast<DeviceOrientationHardwareBuffer*>(buffer);
if (sensor_data_available_)
UpdateOrientation(orientation_buffer);
SetOrientationBufferReady(orientation_buffer);
started_orientation_.Signal();
}
break;
default:
return false;
}
return true;
}
virtual bool Stop(ConsumerType consumer_type) OVERRIDE {
switch (consumer_type) {
case CONSUMER_TYPE_MOTION:
stopped_motion_.Signal();
break;
case CONSUMER_TYPE_ORIENTATION:
stopped_orientation_.Signal();
break;
default:
return false;
}
return true;
}
virtual void Fetch(unsigned consumer_bitmask) OVERRIDE {
FAIL() << "fetch should not be called";
}
virtual FetcherType GetType() const OVERRIDE {
return FETCHER_TYPE_DEFAULT;
}
void SetSensorDataAvailable(bool available) {
sensor_data_available_ = available;
}
void SetMotionBufferReady(DeviceMotionHardwareBuffer* buffer) {
buffer->seqlock.WriteBegin();
buffer->data.allAvailableSensorsAreActive = true;
buffer->seqlock.WriteEnd();
}
void SetOrientationBufferReady(DeviceOrientationHardwareBuffer* buffer) {
buffer->seqlock.WriteBegin();
buffer->data.allAvailableSensorsAreActive = true;
buffer->seqlock.WriteEnd();
}
void UpdateMotion(DeviceMotionHardwareBuffer* buffer) {
buffer->seqlock.WriteBegin();
buffer->data.accelerationX = 1;
buffer->data.hasAccelerationX = true;
buffer->data.accelerationY = 2;
buffer->data.hasAccelerationY = true;
buffer->data.accelerationZ = 3;
buffer->data.hasAccelerationZ = true;
buffer->data.accelerationIncludingGravityX = 4;
buffer->data.hasAccelerationIncludingGravityX = true;
buffer->data.accelerationIncludingGravityY = 5;
buffer->data.hasAccelerationIncludingGravityY = true;
buffer->data.accelerationIncludingGravityZ = 6;
buffer->data.hasAccelerationIncludingGravityZ = true;
buffer->data.rotationRateAlpha = 7;
buffer->data.hasRotationRateAlpha = true;
buffer->data.rotationRateBeta = 8;
buffer->data.hasRotationRateBeta = true;
buffer->data.rotationRateGamma = 9;
buffer->data.hasRotationRateGamma = true;
buffer->data.interval = 100;
buffer->data.allAvailableSensorsAreActive = true;
buffer->seqlock.WriteEnd();
}
void UpdateOrientation(DeviceOrientationHardwareBuffer* buffer) {
buffer->seqlock.WriteBegin();
buffer->data.alpha = 1;
buffer->data.hasAlpha = true;
buffer->data.beta = 2;
buffer->data.hasBeta = true;
buffer->data.gamma = 3;
buffer->data.hasGamma = true;
buffer->data.allAvailableSensorsAreActive = true;
buffer->seqlock.WriteEnd();
}
base::WaitableEvent started_orientation_;
base::WaitableEvent stopped_orientation_;
base::WaitableEvent started_motion_;
base::WaitableEvent stopped_motion_;
bool sensor_data_available_;
private:
DISALLOW_COPY_AND_ASSIGN(FakeDataFetcher);
};
class DeviceInertialSensorBrowserTest : public ContentBrowserTest {
public:
DeviceInertialSensorBrowserTest()
: fetcher_(NULL),
io_loop_finished_event_(false, false) {
}
virtual void SetUpOnMainThread() OVERRIDE {
BrowserThread::PostTask(
BrowserThread::IO, FROM_HERE,
base::Bind(&DeviceInertialSensorBrowserTest::SetUpOnIOThread, this));
io_loop_finished_event_.Wait();
}
void SetUpOnIOThread() {
fetcher_ = new FakeDataFetcher();
DeviceInertialSensorService::GetInstance()->
SetDataFetcherForTesting(fetcher_);
io_loop_finished_event_.Signal();
}
void DelayAndQuit(base::TimeDelta delay) {
base::PlatformThread::Sleep(delay);
base::MessageLoop::current()->QuitWhenIdle();
}
void WaitForAlertDialogAndQuitAfterDelay(base::TimeDelta delay) {
ShellJavaScriptDialogManager* dialog_manager=
static_cast<ShellJavaScriptDialogManager*>(
shell()->GetJavaScriptDialogManager());
scoped_refptr<MessageLoopRunner> runner = new MessageLoopRunner();
dialog_manager->set_dialog_request_callback(
base::Bind(&DeviceInertialSensorBrowserTest::DelayAndQuit, this,
delay));
runner->Run();
}
FakeDataFetcher* fetcher_;
private:
base::WaitableEvent io_loop_finished_event_;
};
IN_PROC_BROWSER_TEST_F(DeviceInertialSensorBrowserTest, OrientationTest) {
// The test page will register an event handler for orientation events,
// expects to get an event with fake values, then removes the event
// handler and navigates to #pass.
GURL test_url = GetTestUrl(
"device_orientation", "device_orientation_test.html");
NavigateToURLBlockUntilNavigationsComplete(shell(), test_url, 2);
EXPECT_EQ("pass", shell()->web_contents()->GetLastCommittedURL().ref());
fetcher_->started_orientation_.Wait();
fetcher_->stopped_orientation_.Wait();
}
IN_PROC_BROWSER_TEST_F(DeviceInertialSensorBrowserTest, MotionTest) {
// The test page will register an event handler for motion events,
// expects to get an event with fake values, then removes the event
// handler and navigates to #pass.
GURL test_url = GetTestUrl(
"device_orientation", "device_motion_test.html");
NavigateToURLBlockUntilNavigationsComplete(shell(), test_url, 2);
EXPECT_EQ("pass", shell()->web_contents()->GetLastCommittedURL().ref());
fetcher_->started_motion_.Wait();
fetcher_->stopped_motion_.Wait();
}
// Flaking in the android try bot. See http://crbug.com/360578.
#if defined(OS_ANDROID)
#define MAYBE_OrientationNullTestWithAlert DISABLED_OrientationNullTestWithAlert
#else
#define MAYBE_OrientationNullTestWithAlert OrientationNullTestWithAlert
#endif
IN_PROC_BROWSER_TEST_F(DeviceInertialSensorBrowserTest,
MAYBE_OrientationNullTestWithAlert) {
// The test page will register an event handler for orientation events,
// expects to get an event with null values. The test raises a modal alert
// dialog with a delay to test that the one-off null-event still propagates
// to window after the alert is dismissed and the callback is invoked which
// navigates to #pass.
fetcher_->SetSensorDataAvailable(false);
TestNavigationObserver same_tab_observer(shell()->web_contents(), 2);
GURL test_url = GetTestUrl(
"device_orientation", "device_orientation_null_test_with_alert.html");
shell()->LoadURL(test_url);
// TODO(timvolodine): investigate if it is possible to test this without
// delay, crbug.com/360044.
WaitForAlertDialogAndQuitAfterDelay(base::TimeDelta::FromMilliseconds(1000));
fetcher_->started_orientation_.Wait();
fetcher_->stopped_orientation_.Wait();
same_tab_observer.Wait();
EXPECT_EQ("pass", shell()->web_contents()->GetLastCommittedURL().ref());
}
// Flaking in the android try bot. See http://crbug.com/360578.
#if defined(OS_ANDROID)
#define MAYBE_MotionNullTestWithAlert DISABLED_MotionNullTestWithAlert
#else
#define MAYBE_MotionNullTestWithAlert MotionNullTestWithAlert
#endif
IN_PROC_BROWSER_TEST_F(DeviceInertialSensorBrowserTest,
MAYBE_MotionNullTestWithAlert) {
// The test page will register an event handler for motion events,
// expects to get an event with null values. The test raises a modal alert
// dialog with a delay to test that the one-off null-event still propagates
// to window after the alert is dismissed and the callback is invoked which
// navigates to #pass.
fetcher_->SetSensorDataAvailable(false);
TestNavigationObserver same_tab_observer(shell()->web_contents(), 2);
GURL test_url = GetTestUrl(
"device_orientation", "device_motion_null_test_with_alert.html");
shell()->LoadURL(test_url);
// TODO(timvolodine): investigate if it is possible to test this without
// delay, crbug.com/360044.
WaitForAlertDialogAndQuitAfterDelay(base::TimeDelta::FromMilliseconds(1000));
fetcher_->started_motion_.Wait();
fetcher_->stopped_motion_.Wait();
same_tab_observer.Wait();
EXPECT_EQ("pass", shell()->web_contents()->GetLastCommittedURL().ref());
}
} // namespace
} // namespace content
|
#pragma once
#include "routine.hpp"
#include "model/graph/undirected_graph.hpp"
#include "model/boundary.hpp"
#include "model/types.hpp"
#include "io/reader/header_reader.hpp"
#include "io/reader/osm_reader.hpp"
#include "io/writer/map_writer.hpp"
#include "io/writer/mapdata_writer.hpp"
#include "mapmaker/assembler.hpp"
#include "mapmaker/builder.hpp"
#include "mapmaker/calculator.hpp"
#include "mapmaker/compressor.hpp"
#include "mapmaker/converter.hpp"
#include "mapmaker/counter.hpp"
#include "mapmaker/filter.hpp"
#include "mapmaker/inspector.hpp"
#include "functions/transform.hpp"
#include "util/log.hpp"
#include "util/title.hpp"
#include "util/validate.hpp"
namespace fs = boost::filesystem;
namespace po = boost::program_options;
using namespace model;
/**
* The upload routine uploads map metadata contained in a JSON file to Warzone
* through the Warzone API.
*
* More informtion on the API can be found under the following link:
* https://www.warzone.com/wiki/Set_map_details_API
*/
class Create : public Routine
{
/* Types */
using T = double;
using buffer_t = osmium::memory::Buffer;
using graph_t = graph::UndirectedGraph;
using component_t = std::vector<std::set<osmium::object_id_type>>;
using container_t = std::map<object_id_type, Boundary<T>>;
using hierarchy_t = std::map<object_id_type, std::set<object_id_type>>;
/* Members */
/**
* The path to the input OSM file.
*/
fs::path m_input;
/**
* The output directory for the warzone map geometry and mapdata.
*/
fs::path m_outdir;
/**
* The admin_level for territories.
*/
level_type m_territory_level;
/**
* The admin_levels for bonuses.
*/
std::vector<level_type> m_bonus_levels;
/**
* The width of the generated map in pixels.
*/
int m_width;
/**
* The height of the generated map in pixels.
*/
int m_height;
/**
* The compression distance tolerance for the Douglas-Peucker algorithm.
*/
double m_compression_tolerance;
/**
* The surface area tolerance for the filter algorithm.
*/
double m_filter_tolerance;
/**
* The verbose logging flag.
*/
bool m_verbose;
/**
* The logger.
*/
util::Logger<std::ostream> m_log{ std::cout };
public:
/* Constructors */
Create() : Routine()
{
m_options.add_options()
("input", po::value<fs::path>()->required(), "Sets the input file path.\nAllowed file formats: .osm, .pbf")
("outdir,o", po::value<fs::path>()->default_value(""), "Sets the output folder for the generated map files.")
("territory-level,t", po::value<level_type>()->default_value(0), "Sets the admin_level of boundaries that will be be used as territories.\nInteger between 1 and 12.")
("bonus-levels,b", po::value<std::vector<level_type>>()->multitoken(), "Sets the admin_level of boundaries that will be be used as bonus links.\nInteger between 1 and 12. If none are specified, no bonus links will be generated.")
("width", po::value<int>()->default_value(1000), "Sets the generated map width in pixels.\nIf set to 0, the width will be determined automatically with the height.")
("height", po::value<int>()->default_value(0), "Sets the generated map height in pixels.\nIf set to 0, the height will be determined automatically with the width.")
("compression-tolerance,c", po::value<double>()->default_value(0.0), "Sets the minimum distance tolerance for the compression algorithm.\nIf set to 0, no compression will be applied.")
("filter-tolerance,f", po::value<double>()->default_value(0.0), "Sets the surface area ratio tolerance for filtering boundaries.\nIf set to 0, no filter will be applied.")
("verbose", po::bool_switch()->default_value(false), "Enables verbose logging.")
("help,h", "Shows this help message.");
m_positional.add("input", 1);
}
/* Override Methods */
const std::string name() const noexcept override
{
return "upload";
}
void setup() override
{
Routine::setup();
this->set<fs::path>(&m_input, "input", util::validate_file);
this->set<fs::path>(&m_outdir, "outdir", m_dir, util::validate_dir);
this->set<level_type>(&m_territory_level, "territory-level");
this->set<std::vector<level_type>>(&m_bonus_levels, "bonus-levels", std::vector<level_type>{});
std::sort(m_bonus_levels.begin(), m_bonus_levels.end());
util::validate_levels(m_territory_level, m_bonus_levels);
this->set<int>(&m_width, "width");
this->set<int>(&m_height, "height");
util::validate_dimensions(m_width, m_height);
this->set<double>(&m_compression_tolerance, "compression-tolerance", util::validate_epsilon);
this->set<double>(&m_filter_tolerance, "filter-tolerance", util::validate_epsilon);
this->set<bool>(&m_verbose, "verbose");
// fs::create_directory(m_dir / "out");#
// Calculate the total number of steps for the routine
std::size_t steps = 10 + (m_compression_tolerance > 0.0)
+ (m_filter_tolerance > 0.0)
+ (!m_bonus_levels.empty());
m_log.set_steps(steps);
}
private:
/* Helper methods */
Header read_header(const fs::path& file_path)
{
// Prepare the header reader for the input file and retrieve the header
io::HeaderReader reader{ file_path.string() };
return reader.read();
}
osmium::memory::Buffer read_data(const fs::path& file_path, std::set<level_type> levels)
{
// Retrieve the administrative boundaries with and admin_level that
// matches the prepared level filter from the input file
io::BoundaryReader reader{ m_input, levels };
return reader.read();
}
void compress(buffer_t& buffer)
{
// Count the nodes before the compression
mapmaker::NodeCounter counter;
std::size_t before = counter.run(buffer);
// Compress the extracted ways using the specified compression
// tolerance
mapmaker::Compressor compressor{ m_compression_tolerance };
compressor.run(buffer);
// Count the nodes after the compression
std::size_t after = counter.run(buffer);
m_log.step() << "Compressed " << before << " nodes to " << after << " nodes.\n";
}
void assemble(buffer_t& buffer, std::set<level_type> levels, bool split)
{
// Create the assembler depending on the split strategy.
mapmaker::Assembler assembler{ levels, split };
assembler.run(buffer);
}
graph_t get_neighbors(const buffer_t& buffer, level_type level)
{
mapmaker::NeighborInspector inspector{ level };
return inspector.run(buffer);
}
component_t get_components(const graph_t& neighbors)
{
mapmaker::ComponentInspector inspector;
return inspector.run(neighbors);
}
void filter(buffer_t& buffer, graph_t& neighbors, component_t& components){
// Count the areas before the filter process
mapmaker::AreaCounter counter;
std::size_t before = counter.run(buffer);
// Apply the area filter on the area buffer using the specified tolerance
mapmaker::AreaFilter filter{ m_filter_tolerance };
filter.run(buffer, neighbors, components);
// Count the nodes after the filter process
std::size_t after = counter.run(buffer);
m_log.step() << "Compressed " << before << " nodes to " << after << " nodes.\n";
}
template <typename T>
void transform(functions::Transformation<T>& transformation, geometry::Rectangle<T>& bounds)
{
transformation.transform(bounds.min().x(), bounds.min().y());
transformation.transform(bounds.max().x(), bounds.max().y());
}
container_t convert(buffer_t& buffer)
{
// Prepare the transformations that will be applied on the buffer before
// the geometry conversion. At first, calculate the bounding box of the
// nodes in the buffer.
mapmaker::BoundsCalculator<T> bounds_calculator{};
geometry::Rectangle<T> bounds = bounds_calculator.run(buffer);
// The radian transformation converts the nodes, for which the locations
// are specified in degrees, to radians, for futher usage in the Mercator
// projection.
functions::RadianTransformation<T> radian_transformation{};
// The Mercator projection maps the spherical earth coordinates to two-
// dimensional planar coordinates.
functions::MercatorProjection<T> mercator_transformation{};
// The normalization transformations normalizes and fits the locations within
// the unit interval.
transform(radian_transformation, bounds);
transform(mercator_transformation, bounds);
functions::UnitTransformation<T> normalize_transformation{
{ bounds.min().x(), bounds.max().x() },
{ bounds.min().y(), bounds.max().y() }
};
// The mirror transformation mirrors the map coordinates on the horizontal
// axis, so that they are displayed correctly in the svg coordinate system.
functions::MirrorTransformation<T> mirror_transformation{ false, true };
// Check if a dimension is set to auto and calculate its value
// depending on the transformed map bounds
if (m_width == 0 || m_height == 0)
{
if (m_width == 0)
{
m_width = bounds.width() / bounds.height() * m_height;
}
else
{
m_height = bounds.height() / bounds.width() * m_width;
}
}
// The scaling transformation maps the normalized
functions::ScaleTransformation<T> scale_transformation{ (double) m_width, (double) m_height };
// Create the converter, which will apply the specified transformations
// and convert the areas to multipolygon geometries afterwards.
mapmaker::BoundaryConverter<T> converter{
std::make_shared<functions::RadianTransformation<T>>(radian_transformation),
std::make_shared<functions::MercatorProjection<T>>(mercator_transformation),
std::make_shared<functions::UnitTransformation<T>>(normalize_transformation),
// std::make_shared<functions::MirrorTransformation<T>>(mirror_transformation),
std::make_shared<functions::ScaleTransformation<T>>(scale_transformation)
};
return converter.run(buffer);
}
void calculate_centers(container_t& boundaries)
{
mapmaker::CenterCalculator<T> calculator;
calculator.run(boundaries);
}
hierarchy_t calculate_hierarchy(const container_t& boundaries)
{
mapmaker::HierarchyInspector<T> inspector;
return inspector.run(boundaries);
}
warzone::Map<T> build_map(std::string name, container_t& boundaries, const graph_t& neighbors, const hierarchy_t& hierarchy)
{
mapmaker::MapBuilder<T> builder{};
builder.name(name);
builder.width(m_width);
builder.height(m_height);
builder.territory_level(m_territory_level);
if (!m_bonus_levels.empty())
{
builder.bonus_level(m_bonus_levels.at(0));
if (m_bonus_levels.size() > 1)
{
builder.super_bonus_level(m_bonus_levels.at(1));
}
}
builder.neighbors(neighbors);
builder.hierarchy(hierarchy);
return builder.run(boundaries);
}
void export_map(warzone::Map<T>&& map)
{
fs::path file_path = m_outdir / fs::path(map.name).replace_extension(".svg");
io::MapWriter<T> writer{ file_path };
m_log.step() << "Exporting map to " << file_path << ".\n";
writer.write(std::move(map));
m_log.step() << "Map export finished.\n";
}
void export_mapdata(warzone::Map<T>&& map)
{
fs::path file_path = m_outdir / fs::path(map.name).replace_extension(".json");
io::MapdataWriter<T> writer{ file_path };
m_log.step() << "Exporting map data to " << file_path << ".\n";
writer.write(std::move(map));
m_log.step() << "Map data export finished\n.";
}
public:
void run() override
{
// Print the title
std::cout << util::title() << std::endl;
// Step 1: Read the file header and determine the territory level
// automatically if it was not set.
m_log.start() << "Retrieving headers from file " << m_input << ".\n";
Header header = read_header(m_input);
if (m_territory_level == 0)
{
auto [l, c] = *std::max_element(header.levels.cbegin(), header.levels.cend(),
[](const std::pair<short, std::size_t>& e1, const std::pair<short, std::size_t>& e2)
{
return e1.second < e2.second;
}
);
m_territory_level = l;
}
// Prepare the level filter with the specified territory and bonus
// levels
std::set<level_type> levels{ m_bonus_levels.begin(), m_bonus_levels.end() };
levels.insert(m_territory_level);
m_log.finish();
// Step 2: Prepare the level filter and read the boundaries from
// the specified input file.
m_log.start() << "Reading boundaries from file " << m_input << ".\n";
buffer_t buffer = read_data(m_input, levels);
m_log.finish();
// Step 3: Compress the extracted ways using the Douglas-Peucker
// algorithm if a compression threshold was specified.
if (m_compression_tolerance > 0)
{
m_log.start() << "Compressing ways with tolerance " << m_compression_tolerance << ".\n";
compress(buffer);
m_log.finish();
}
// Step 4: Assemble the territory boundaries using the built-in
// multipolygon assembler.
m_log.start() << "Assembling territories with level " << m_territory_level << ".\n";
assemble(buffer, { m_territory_level }, true);
m_log.finish();
// Step 5: Create the neighbor graph for the assembled territories.
m_log.start() << "Calculating neighborships for territories.\n";
graph::UndirectedGraph neighbors = get_neighbors(buffer, m_territory_level);
m_log.finish();
// Step 6: Calculate the connected components for the neighbor graph.
// This yields the islands of the map.
m_log.start() << "Finding territory islands.\n";
component_t components = get_components(neighbors);
m_log.finish();
// Step 7: Filter connected components by their surface area if a filter
// threshold was specified.
if (m_filter_tolerance > 0)
{
m_log.start() << "Compressing ways with tolerance " << m_filter_tolerance << ".\n";
filter(buffer, neighbors, components);
m_log.finish();
}
// Step 8: Assemble the bonus boundarties using the built-in multipolygon
// assembler if any bonus levels were specified.
if (!m_bonus_levels.empty())
{
m_log.start() << "Assembling bonuses with the levels " << util::join(m_bonus_levels) << ".\n";
assemble(buffer, std::set<level_type>(m_bonus_levels.begin(), m_bonus_levels.end()), false);
m_log.finish();
}
// Step 9: Create the boundary geometries from the assembled boundaries by
// applying the map projections and transformations first and converting
// the osmium objects to geometry objects afterwards.
m_log.start() << "Building the boundary geometries from the OpenStreetMap objects.\n";
std::map<object_id_type, Boundary<T>> boundaries = convert(buffer);
m_log.finish();
// Step 10: Calculate the center points for each boundary
m_log.start() << "Calculating the center points for " << boundaries.size() << " boundaries.\n";
calculate_centers(boundaries);
m_log.finish();
// Step 11: Calculate the hirarchy of territories, bonuses and super bonuses
// if any bonus levels were specified
hierarchy_t hierarchy = {};
if (!m_bonus_levels.empty())
{
m_log.start() << "Calculating the hierarchy for " << boundaries.size() << " boundaries.\n";
hierarchy = calculate_hierarchy(boundaries);
m_log.finish();
}
// Step 12: Build the map with the generated data
m_log.start() << "Building the Warzone map.\n";
// Create the map name from the input file name
std::string name = std::regex_replace(
m_input.filename().string(),
std::regex("(\\.osm|\\.pbf)"),
""
);
// Build the map
warzone::Map map = build_map(name, boundaries, neighbors, hierarchy);
m_log.finish();
// Step 13: Export the generated Warzone map and the calculated mapdata
// to the specified output directory
m_log.start() << "Exporting the generated map files.\n";
export_map(std::move(map));
export_mapdata(std::move(map));
m_log.finish();
// Routine finished, print the total duration.
m_log.end();
}
};
|
/*
* Copyright (c) 2016, The OpenThread Authors.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* This file includes definitions for MLE functionality required by the Thread Child, Router, and Leader roles.
*/
#ifndef MLE_CONSTANTS_HPP_
#define MLE_CONSTANTS_HPP_
#include "openthread-core-config.h"
namespace ot {
namespace Mle {
/**
* @addtogroup core-mle-core
*
*/
enum
{
kMaxChildren = OPENTHREAD_CONFIG_MAX_CHILDREN,
kMaxChildKeepAliveAttempts = 4, ///< Maximum keep alive attempts before attempting to reattach to a new Parent
kFailedChildTransmissions = OPENTHREAD_CONFIG_FAILED_CHILD_TRANSMISSIONS, ///< FAILED_CHILD_TRANSMISSIONS
};
/**
* MLE Protocol Constants
*
*/
enum
{
kThreadVersion = 2, ///< Thread Version
kUdpPort = 19788, ///< MLE UDP Port
kParentRequestRouterTimeout = 750, ///< Router Parent Request timeout
kParentRequestReedTimeout = 1250, ///< Router and REEDs Parent Request timeout
kAttachStartJitter = 50, ///< Maximum jitter time added to start of attach.
kAnnounceProcessTimeout = 250, ///< Timeout after receiving Announcement before channel/pan-id change
kAnnounceTimeout = 1400, ///< Total timeout used for sending Announcement messages
kMinAnnounceDelay = 80, ///< Minimum delay between Announcement messages
kParentResponseMaxDelayRouters = 500, ///< Maximum delay for response for Parent Request sent to routers only
kParentResponseMaxDelayAll = 1000, ///< Maximum delay for response for Parent Request sent to all devices
kUnicastRetransmissionDelay = 1000, ///< Base delay before retransmitting an MLE unicast.
kChildUpdateRequestPendingDelay = 100, ///< Delay (in ms) for aggregating Child Update Request.
kMaxTransmissionCount = 3, ///< Maximum number of times an MLE message may be transmitted.
kMaxResponseDelay = 1000, ///< Maximum delay before responding to a multicast request
kMaxChildIdRequestTimeout = 5000, ///< Maximum delay for receiving a Child ID Request
kMaxChildUpdateResponseTimeout = 2000, ///< Maximum delay for receiving a Child Update Response
kMaxLinkRequestTimeout = 2000, ///< Maximum delay for receiving a Link Accept
kMinTimeoutKeepAlive = (((kMaxChildKeepAliveAttempts + 1) * kUnicastRetransmissionDelay) /
1000), ///< Minimum timeout(s) for keep alive
kMinTimeoutDataPoll = (OPENTHREAD_CONFIG_MINIMUM_POLL_PERIOD +
OPENTHREAD_CONFIG_FAILED_CHILD_TRANSMISSIONS * OPENTHREAD_CONFIG_RETX_POLL_PERIOD) /
1000, ///< Minimum timeout(s) for data poll
kMinTimeout = (kMinTimeoutKeepAlive >= kMinTimeoutDataPoll ? kMinTimeoutKeepAlive
: kMinTimeoutDataPoll), ///< Minimum timeout(s)
};
enum
{
kMinChildId = 1, ///< Minimum Child ID
kMaxChildId = 511, ///< Maximum Child ID
kRouterIdOffset = 10, ///< Bit offset of Router ID in RLOC16
kRlocPrefixLength = 14, ///< Prefix length of RLOC in bytes
};
/**
* Routing Protocol Constants
*
*/
enum
{
kAdvertiseIntervalMin = 1, ///< ADVERTISEMENT_I_MIN (sec)
#if OPENTHREAD_CONFIG_ENABLE_LONG_ROUTES
kAdvertiseIntervalMax = 5, ///< ADVERTISEMENT_I_MAX (sec) proposal
#else
kAdvertiseIntervalMax = 32, ///< ADVERTISEMENT_I_MAX (sec)
#endif
kFailedRouterTransmissions = 4, ///< FAILED_ROUTER_TRANSMISSIONS
kRouterIdReuseDelay = 100, ///< ID_REUSE_DELAY (sec)
kRouterIdSequencePeriod = 10, ///< ID_SEQUENCE_PERIOD (sec)
kMaxNeighborAge = 100, ///< MAX_NEIGHBOR_AGE (sec)
#if OPENTHREAD_CONFIG_ENABLE_LONG_ROUTES
kMaxRouteCost = 127, ///< MAX_ROUTE_COST proposal
#else
kMaxRouteCost = 16, ///< MAX_ROUTE_COST
#endif
kMaxRouterId = 62, ///< MAX_ROUTER_ID
kInvalidRouterId = kMaxRouterId + 1, ///< Value indicating incorrect Router Id
kMaxRouters = OPENTHREAD_CONFIG_MAX_ROUTERS, ///< MAX_ROUTERS
kMinDowngradeNeighbors = 7, ///< MIN_DOWNGRADE_NEIGHBORS
kNetworkIdTimeout = 120, ///< NETWORK_ID_TIMEOUT (sec)
kParentRouteToLeaderTimeout = 20, ///< PARENT_ROUTE_TO_LEADER_TIMEOUT (sec)
kRouterSelectionJitter = 120, ///< ROUTER_SELECTION_JITTER (sec)
kRouterDowngradeThreshold = 23, ///< ROUTER_DOWNGRADE_THRESHOLD (routers)
kRouterUpgradeThreshold = 16, ///< ROUTER_UPGRADE_THRESHOLD (routers)
kMaxLeaderToRouterTimeout = 90, ///< INFINITE_COST_TIMEOUT (sec)
kReedAdvertiseInterval = 570, ///< REED_ADVERTISEMENT_INTERVAL (sec)
kReedAdvertiseJitter = 60, ///< REED_ADVERTISEMENT_JITTER (sec)
kLeaderWeight = 64, ///< Default leader weight
kMleEndDeviceTimeout = OPENTHREAD_CONFIG_DEFAULT_CHILD_TIMEOUT, ///< MLE_END_DEVICE_TIMEOUT (sec)
kMeshLocalPrefixContextId = 0, ///< 0 is reserved for Mesh Local Prefix
};
/**
* Parent Priority values
*
*/
enum
{
kParentPriorityHigh = 1, // Parent Priority High
kParentPriorityMedium = 0, // Parent Priority Medium (default)
kParentPriorityLow = -1, // Parent Priority Low
kParentPriorityUnspecified = -2, // Parent Priority Unspecified
};
enum
{
kLinkQuality3LinkCost = 1, ///< Link Cost for Link Quality 3
kLinkQuality2LinkCost = 2, ///< Link Cost for Link Quality 2
kLinkQuality1LinkCost = 4, ///< Link Cost for Link Quality 1
kLinkQuality0LinkCost = kMaxRouteCost, ///< Link Cost for Link Quality 0
};
/**
* Multicast Forwarding Constants
*
*/
enum
{
kMplChildDataMessageTimerExpirations = 0, ///< Number of MPL retransmissions for Children.
kMplRouterDataMessageTimerExpirations = 2, ///< Number of MPL retransmissions for Routers.
};
} // namespace Mle
/**
* @}
*
*/
} // namespace ot
#endif // MLE_CONSTANTS_HPP_
|
/*
* Distributed under the OSI-approved Apache License, Version 2.0. See
* accompanying file Copyright.txt for details.
*
* CppWriter.cpp
*
* Created on: Jan 29, 2018
* Author: William F Godoy godoywf@ornl.gov
*/
#include <adios2.h>
#include <mpi.h>
#include <iostream>
int main(int argc, char *argv[])
{
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
const size_t nx = 4;
const size_t ny = 3;
std::vector<float> data(nx * ny);
for (size_t i = 0; i < nx; ++i)
{
for (size_t j = 0; j < ny; ++j)
{
data[i * ny + j] = static_cast<float>(rank * nx * ny + i * ny + j);
}
}
adios2::ADIOS adios(MPI_COMM_WORLD);
adios2::IO io = adios.DeclareIO("CppWriter");
adios2::Variable<float> bpFloats =
io.DefineVariable<float>("data2D", {size * nx, ny}, {rank * nx, 0},
{nx, ny}, adios2::ConstantDims);
adios2::Engine engine = io.Open("CppWriter.bp", adios2::Mode::Write);
engine.Put(bpFloats, data.data());
engine.Close();
MPI_Finalize();
}
|
#include "../dialog/Dialog.h"
#include "Tribe.h"
namespace lust {
Tribe::Tribe(const Dialog *firstDialog, const Dialog *repeatingDialog)
: m_firstDialog(firstDialog),
m_repeatingDialog(repeatingDialog),
m_wasViewed(false)
{
}
Tribe::~Tribe()
{
delete m_firstDialog;
delete m_repeatingDialog;
}
const Dialog * Tribe::contact()
{
if (m_wasViewed)
{
return m_firstDialog;
}
else
{
return m_repeatingDialog;
m_wasViewed = true;
}
}
}
|
#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include <asapo/database/db_error.h>
#include "asapo/unittests/MockIO.h"
#include "asapo/unittests/MockDatabase.h"
#include "asapo/unittests/MockLogger.h"
#include "../../src/receiver_error.h"
#include "../../src/request.h"
#include "../../src/request_handler/request_factory.h"
#include "../../src/request_handler/request_handler.h"
#include "../../src/request_handler/request_handler_db_delete_stream.h"
#include "../../../common/cpp/src/database/mongodb_client.h"
#include "../mock_receiver_config.h"
#include "asapo/common/data_structs.h"
#include "asapo/common/networking.h"
#include "../receiver_mocking.h"
using namespace testing;
using namespace asapo;
namespace {
class DbMetaDeleteStreamTests : public Test {
public:
RequestHandlerDbDeleteStream handler{asapo::kDBDataCollectionNamePrefix};
std::unique_ptr<NiceMock<MockRequest>> mock_request;
NiceMock<MockDatabase> mock_db;
NiceMock<asapo::MockLogger> mock_logger;
ReceiverConfig config;
std::string expected_beamtime_id = "beamtime_id";
std::string expected_data_source = "source";
std::string expected_stream = "stream";
CustomRequestData expected_custom_data {0, 0, 0};
void SetUp() override {
GenericRequestHeader request_header;
handler.db_client__ = std::unique_ptr<asapo::Database> {&mock_db};
handler.log__ = &mock_logger;
mock_request.reset(new NiceMock<MockRequest> {request_header, 1, "", nullptr});
ON_CALL(*mock_request, GetBeamtimeId()).WillByDefault(ReturnRef(expected_beamtime_id));
}
void TearDown() override {
handler.db_client__.release();
}
void ExpectDelete(uint64_t flag, const asapo::DBErrorTemplate* errorTemplate) {
expected_custom_data[0] = flag;
SetReceiverConfig(config, "none");
EXPECT_CALL(*mock_request, GetCustomData_t()).WillOnce(Return(expected_custom_data));
EXPECT_CALL(*mock_request, GetDataSource()).WillOnce(ReturnRef(expected_data_source));
EXPECT_CALL(*mock_request, GetStream()).WillOnce(Return(expected_stream));
asapo::DeleteStreamOptions opt;
opt.Decode(flag);
if (!opt.delete_meta) {
EXPECT_CALL(mock_logger, Debug(AllOf(HasSubstr("skipped deleting stream meta"),
HasSubstr(config.database_uri),
HasSubstr(expected_data_source),
HasSubstr(expected_stream),
HasSubstr(expected_beamtime_id)
)
)
);
return;
}
EXPECT_CALL(mock_db, Connect_t(config.database_uri, expected_beamtime_id + "_" + expected_data_source)).
WillOnce(testing::Return(nullptr));
EXPECT_CALL(mock_db, DeleteStream_t(expected_stream)).
WillOnce(testing::Return(errorTemplate == nullptr ? nullptr : errorTemplate->Generate().release()));
if (errorTemplate == nullptr) {
EXPECT_CALL(mock_logger, Debug(AllOf(HasSubstr("deleted stream meta"),
HasSubstr(config.database_uri),
HasSubstr(expected_data_source),
HasSubstr(expected_stream),
HasSubstr(expected_beamtime_id)
)
)
);
}
}
};
TEST_F(DbMetaDeleteStreamTests, CallsDeleteOk) {
ExpectDelete(3, nullptr);
auto err = handler.ProcessRequest(mock_request.get());
ASSERT_THAT(err, Eq(nullptr));
}
TEST_F(DbMetaDeleteStreamTests, CallsDeleteErrorAlreadyExist) {
ExpectDelete(3, &asapo::DBErrorTemplates::kNoRecord);
auto err = handler.ProcessRequest(mock_request.get());
ASSERT_THAT(err, Eq(asapo::ReceiverErrorTemplates::kBadRequest));
}
TEST_F(DbMetaDeleteStreamTests, CallsDeleteNoErrorAlreadyExist) {
ExpectDelete(1, &asapo::DBErrorTemplates::kNoRecord);
auto err = handler.ProcessRequest(mock_request.get());
ASSERT_THAT(err, Eq(nullptr));
}
TEST_F(DbMetaDeleteStreamTests, CallsDeleteNoOp) {
ExpectDelete(0, &asapo::DBErrorTemplates::kNoRecord);
auto err = handler.ProcessRequest(mock_request.get());
ASSERT_THAT(err, Eq(nullptr));
}
}
|
// $Id: test_i.cpp 79850 2007-10-25 09:51:28Z johnnyw $
#include "test_i.h"
test_i::test_i (CORBA::ORB_ptr orb)
: orb_ (CORBA::ORB::_duplicate (orb))
{
}
CORBA::Long
test_i::test_method ()
{
ACE_DEBUG ((LM_DEBUG, "client called us.\n"));
return 0;
}
void
test_i::shutdown (void)
{
this->orb_->shutdown (0);
}
|
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "operation/fully_connected.h"
#include "driver/amlogic_npu/converter/converter.h"
#include "utility/debug.h"
#include "utility/logging.h"
#include "utility/utility.h"
namespace nnadapter {
namespace amlogic_npu {
int ConvertFullyConnected(Converter* converter, core::Operation* operation) {
FULLY_CONNECTED_OPERATION_EXTRACT_INPUTS_OUTPUTS
// Convert to amlnpu tensors and operators
auto input_tensor = converter->GetMappedTensor(input_operand);
if (!input_tensor) {
input_tensor = converter->ConvertOperand(input_operand);
}
auto weight_tensor = converter->ConvertOperand(weight_operand);
auto bias_tensor = converter->ConvertOperand(bias_operand);
auto output_tensor = converter->ConvertOperand(output_operand);
aml::nn::FCAttr attr;
attr.weights = num_units;
// fuse RELU ?
if (fuse_code == NNADAPTER_FUSED_NONE) {
attr.has_relu = false;
} else if (fuse_code == NNADAPTER_FUSED_RELU) {
attr.has_relu = true;
} else {
NNADAPTER_LOG(FATAL) << "Unsupported fuse_code(" << fuse_code
<< ") is found.";
}
std::vector<std::shared_ptr<aml::nn::Tensor>> input_tensors = {
input_tensor, weight_tensor, bias_tensor};
std::vector<std::shared_ptr<aml::nn::Tensor>> output_tensors = {
output_tensor};
converter->AddOperator(
aml::nn::OperatorType::FULLCONNECT, input_tensors, output_tensors, &attr);
return NNADAPTER_NO_ERROR;
}
} // namespace amlogic_npu
} // namespace nnadapter
|
/*
* Copyright (c) 2013, Hernan Saez
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "OrbitComponent.hpp"
#include "Mathematics/Numeric.hpp"
#include "SceneGraph/Node.hpp"
using namespace crimild;
OrbitComponent::OrbitComponent( float x0, float y0, float major, float minor, float speed, float gamma )
{
_t = 0.0f;
_x0 = x0;
_y0 = y0;
_major = major;
_minor = minor;
_speed = speed;
_gamma = gamma;
}
OrbitComponent::~OrbitComponent( void )
{
}
void OrbitComponent::update( const Clock &c )
{
getNode()->local().translate()[0] = _x0 + _major * std::cos( _t ) * std::cos( _gamma ) - _minor * std::sin( _t ) * std::sin( _gamma );
getNode()->local().translate()[1] = _y0 + _major * std::cos( _t ) * std::sin( _gamma ) + _minor * std::sin( _t ) * std::cos( _gamma );
_t += _speed * c.getDeltaTime();
}
void OrbitComponent::encode( coding::Encoder &encoder )
{
NodeComponent::encode( encoder );
encoder.encode( "x0", _x0 );
encoder.encode( "y0", _y0 );
encoder.encode( "major", _major );
encoder.encode( "minor", _minor );
encoder.encode( "speed", _speed );
encoder.encode( "gamma", _gamma );
}
void OrbitComponent::decode( coding::Decoder &decoder )
{
NodeComponent::decode( decoder );
decoder.decode( "x0", _x0 );
decoder.decode( "y0", _y0 );
decoder.decode( "major", _major );
decoder.decode( "minor", _minor );
decoder.decode( "speed", _speed );
decoder.decode( "gamma", _gamma );
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.