text
stringlengths 5
1.04M
|
|---|
/**
*
* ADTF Template Project
*
* @file
* Copyright © Audi Electronics Venture GmbH. All rights reserved
*
* $Author: belkera $
* $Date: 2011-06-30 16:51:21 +0200 (Thu, 30 Jun 2011) $
* $Revision: 26514 $
*
* @remarks
*
*/
#include "stdafx.h"
#include "filter.h"
//#define DEBUG_OUTPUT
#ifdef KATANA_ODOMETRY_SIMULATOR_DEBUG
#include <iostream>
#endif
#ifdef KATANA_ODOMETRY_SIMULATOR_PATCH_INPUT_DEBUG
#include <iostream>
#endif
/// Create filter shell
ADTF_FILTER_PLUGIN("AADC Odometry Simulator Filter", OID_ADTF_ODOMETRY_SIMULATOR_FILTER, OdometrySimulatorFilter)
OdometrySimulatorFilter::OdometrySimulatorFilter(const tChar* __info)
: cTimeTriggeredFilter(__info)
, m_current_steering(0)
, m_current_speed(0)
, m_forward(true)
, m_steering_drift(0)
, m_publish_rate(PUBLISH_RATE)
{
//LOG_INFO(adtf_util::cString::Format("counter %d...%d , rpm = %f ",counterValue,lastCounterValue,rpm));
SetPropertyInt("Publish_rate_ms", PUBLISH_RATE);
SetPropertyInt("Steering_drift", m_steering_drift);
}
OdometrySimulatorFilter::~OdometrySimulatorFilter()
{
}
/**
* The Filter Init Function.
* eInitStage ... StageFirst ... should be used for creating and registering Pins
* ... StageNormal .. should be used for reading the properies and initalizing
* everything before pin connections are made
* see {@link IFilter#Init IFilter::Init}.
*
*/
tResult OdometrySimulatorFilter::Init(tInitStage eStage, __exception)
{
// never miss calling the parent implementation!!
RETURN_IF_FAILED(cTimeTriggeredFilter::Init(eStage, __exception_ptr))
// in StageFirst you can create and register your static pins.
if (eStage == StageFirst)
{
cObjectPtr<IMediaDescriptionManager> pDescManager;
RETURN_IF_FAILED(_runtime->GetObject(OID_ADTF_MEDIA_DESCRIPTION_MANAGER,IID_ADTF_MEDIA_DESCRIPTION_MANAGER,(tVoid**)&pDescManager,__exception_ptr));
const tChar* strDescSignalValue;
// get a media type for the input pins
strDescSignalValue = pDescManager->GetMediaDescription("tSignalValue");
RETURN_IF_POINTER_NULL(strDescSignalValue);
cObjectPtr<IMediaType> pTypeSignalValue = new cMediaType(0, 0, 0, "tSignalValue", strDescSignalValue, IMediaDescription::MDF_DDL_DEFAULT_VERSION);
RETURN_IF_FAILED(pTypeSignalValue->GetInterface(IID_ADTF_MEDIA_TYPE_DESCRIPTION, (tVoid**)&m_pCoderDescSignalInput));
// create and register the input pins
RETURN_IF_FAILED(m_ipin_steering.Create("steering_angle", pTypeSignalValue, this));
RETURN_IF_FAILED(RegisterPin(&m_ipin_steering));
RETURN_IF_FAILED(m_ipin_speed_cmd.Create("speed_cmd", pTypeSignalValue, this));
RETURN_IF_FAILED(RegisterPin(&m_ipin_speed_cmd));
// create pose output pin without media description
RETURN_IF_FAILED(m_opin_pose_buffer.Create("pose_buffer" , new adtf::cMediaType(MEDIA_TYPE_STRUCTURED_DATA, MEDIA_SUBTYPE_STRUCT_STRUCTURED), static_cast<IPinEventSink*> (this)));
RETURN_IF_FAILED(RegisterPin(&m_opin_pose_buffer));
///////////////// TEST //////////////////
RETURN_IF_FAILED(m_ipin_patches.Create("test_patches", new adtf::cMediaType(MEDIA_TYPE_STRUCTURED_DATA, MEDIA_SUBTYPE_STRUCT_STRUCTURED), this));
RETURN_IF_FAILED(RegisterPin(&m_ipin_patches));
}
else if (eStage == StageNormal)
{
// In this stage you would do further initialisation and/or create your dynamic pins.
// Please take a look at the demo_dynamicpin example for further reference.
m_steering_drift = GetPropertyInt("Steering_drift");
m_publish_rate = GetPropertyInt("Publish_rate_ms");
SetInterval(m_publish_rate * 1000);
}
else if (eStage == StageGraphReady)
{
// All pin connections have been established in this stage so you can query your pins
// about their media types and additional meta data.
// Please take a look at the demo_imageproc example for further reference.
}
RETURN_NOERROR;
}
tResult OdometrySimulatorFilter::Shutdown(tInitStage eStage, __exception)
{
// In each stage clean up everything that you initiaized in the corresponding stage during Init.
// Pins are an exception:
// - The base class takes care of static pins that are members of this class.
// - Dynamic pins have to be cleaned up in the ReleasePins method, please see the demo_dynamicpin
// example for further reference.
if (eStage == StageGraphReady)
{
}
else if (eStage == StageNormal)
{
}
else if (eStage == StageFirst)
{
}
// call the base class implementation
return cFilter::Shutdown(eStage, __exception_ptr);
}
tResult OdometrySimulatorFilter::OnPinEvent(IPin* pSource,
tInt nEventCode,
tInt nParam1,
tInt nParam2,
IMediaSample* pMediaSample)
{
// first check what kind of event it is
if (nEventCode == IPinEventSink::PE_MediaSampleReceived)
{
// so we received a media sample, so this pointer better be valid.
RETURN_IF_POINTER_NULL(pMediaSample && m_pCoderDescSignalInput != NULL);
// by comparing it to our member pin variable we can find out which pin received
// the sample
if (pSource == &m_ipin_steering)
{
// read-out the incoming Media Sample
cObjectPtr<IMediaCoder> pCoderInput;
RETURN_IF_FAILED(m_pCoderDescSignalInput->Lock(pMediaSample, &pCoderInput));
//write values with zero
tUInt32 timeStamp = 0;
tFloat32 value = 0;
//get values from media sample
pCoderInput->Get("f32Value", (tVoid*)&value);
pCoderInput->Get("ui32ArduinoTimestamp", (tVoid*)&timeStamp);
m_pCoderDescSignalInput->Unlock(pCoderInput);
m_current_steering = (int8_t)value;
}
else if (pSource == &m_ipin_speed_cmd)
{
// read-out the incoming Media Sample
cObjectPtr<IMediaCoder> pCoderInput;
RETURN_IF_FAILED(m_pCoderDescSignalInput->Lock(pMediaSample, &pCoderInput));
//write values with zero
tUInt32 timeStamp = 0;
tFloat32 value = 0;
//get values from media sample
pCoderInput->Get("f32Value", (tVoid*)&value);
pCoderInput->Get("ui32ArduinoTimestamp", (tVoid*)&timeStamp);
m_pCoderDescSignalInput->Unlock(pCoderInput);
m_current_speed = value;
#ifdef KATANA_ODOMETRY_SIMULATOR_DEBUG
static u_int32_t counter = 0;
if (counter % 5 == 0)
std::cout <<"Odometry simulator: Speed update: " <<m_current_speed <<std::endl;
counter++;
#endif
}
else if (pSource == &m_ipin_patches)
{
assert(pMediaSample->GetSize() > 0);
u_int8_t status;
size_t array_size = pMediaSample->GetSize() - sizeof(u_int8_t);
size_t num_patches = array_size/sizeof(sPatch);
pMediaSample->CopyBufferTo(&status, sizeof(status), 0, 0);
sPatch* sp = nullptr;
if (num_patches > 0)
{
sp = new sPatch[num_patches];
pMediaSample->CopyBufferTo(sp, array_size, sizeof(status), 0);
}
#ifdef KATANA_ODOMETRY_SIMULATOR_PATCH_INPUT_DEBUG
std::cout <<"OdometrySimulator: New patches" <<std::endl;
std::cout <<"Status: " <<(u_int32_t)status <<std::endl;
for (size_t i = 0 ; i < num_patches; i++)
{
std::cout <<"PATCH NUM: " <<i <<" - " <<sp[i].patch_type <<std::endl;
}
#endif
if (sp != nullptr)
delete[] sp;
}
}
RETURN_NOERROR;
}
float OdometrySimulatorFilter::getDistanceInInterval(float current_speed)
{
// 0-20 = 0 Units/second
// 100 = 60.000 Units/second = 6m/second = 21.6 km/h
bool backward = current_speed < 0.0f;
if (backward)
current_speed *= -1.0f;
float ret = (float)(current_speed)*60000.0f/80*m_publish_rate/1000;
// additional slow down
ret /= 2;
// always return positive value regardless of driving direction
return ret;
}
void OdometrySimulatorFilter::updatePose()
{
float diff = getDistanceInInterval(m_current_speed);
if (diff == 0.0)
return;
// Is the vehicle moving forward?
m_forward = m_current_speed >= 0.0;
/************************************************
* Calculate the direction in which to move the current vehicle position: current theta + steering
* The angle represents forward and backward movement (diff is always positive)
***********************************************/
// steering, notice driving direction
double angle = getAngleFromSteering(m_current_steering + m_steering_drift);
angle = diff/WHEELBASE * std::tan(angle); // Einspurmodell, yaw diff of vehicle pose when driving distance diff
if (!m_forward)
angle *= -1;
// move pose in direction <dir>
float dir = m_forward ? m_pose.getTheta() + angle/2 : m_pose.getReverseDirection().getTheta() + angle/2;
m_pose.x() += (_position_type)(diff * cos(dir));
m_pose.y() += (_position_type)(diff * sin(dir));
// How much did the vehicle turn? -> update pose
m_pose.setTheta(m_pose.getTheta() + angle);
}
tResult OdometrySimulatorFilter::sendPose(const katana::Pose& p)
{
#ifdef KATANA_ODOMETRY_SIMULATOR_DEBUG
static u_int32_t counter = 0;
if (counter % 50 == 0)
std::cout <<"Pose: " <<p.getX() <<" " <<p.getY() <<" " <<p.getTheta() <<std::endl;
counter++;
#endif
// fast without MediaSampleSerializer
sPose sp;
sp.x = p.getX();
sp.y = p.getY();
sp.theta = p.getTheta();
cObjectPtr<IMediaSample> media_sample_buf;
RETURN_IF_FAILED(AllocMediaSample((tVoid**)&media_sample_buf));
RETURN_IF_FAILED(media_sample_buf->AllocBuffer(sizeof(sPose)));
RETURN_IF_FAILED(media_sample_buf->CopyBufferFrom(&sp, sizeof(sPose), 0, 0));
media_sample_buf->SetTime(_clock->GetStreamTime());
m_opin_pose_buffer.Transmit(media_sample_buf);
RETURN_NOERROR;
}
double OdometrySimulatorFilter::getAngleFromSteering(double steering) const
{
return steering*STEERING_SENSOR_TO_ANGLE;
}
tResult OdometrySimulatorFilter::Cycle(__exception)
{
updatePose();
return sendPose(m_pose);
}
|
/*
* Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "Builders.h"
#include "kernels/Minimum.h"
namespace luci_interpreter
{
std::unique_ptr<Kernel> build_kernel_CircleMinimum(const luci::CircleNode *circle_node,
KernelBuilderHelper &helper)
{
const auto *node = dynamic_cast<const luci::CircleMinimum *>(circle_node);
if (node == nullptr)
throw std::runtime_error("wrong builder for operation");
assert(node->arity() == 2);
const Tensor *input1 = helper.getInputTensor(node->x());
const Tensor *input2 = helper.getInputTensor(node->y());
Tensor *output = helper.getOutputTensor(node);
return std::make_unique<kernels::Minimum>(input1, input2, output);
}
} // namespace luci_interpreter
|
#include "pch.h"
#include "AdaptiveSubmitAction.h"
#include "Util.h"
using namespace Microsoft::WRL;
using namespace ABI::AdaptiveNamespace;
using namespace ABI::Windows::Data::Json;
namespace AdaptiveNamespace
{
HRESULT AdaptiveSubmitAction::RuntimeClassInitialize() noexcept try
{
std::shared_ptr<AdaptiveSharedNamespace::SubmitAction> submitAction =
std::make_shared<AdaptiveSharedNamespace::SubmitAction>();
return RuntimeClassInitialize(submitAction);
}
CATCH_RETURN;
_Use_decl_annotations_ HRESULT AdaptiveSubmitAction::RuntimeClassInitialize(
const std::shared_ptr<AdaptiveSharedNamespace::SubmitAction>& sharedSubmitAction) try
{
if (sharedSubmitAction == nullptr)
{
return E_INVALIDARG;
}
RETURN_IF_FAILED(StringToJsonValue(sharedSubmitAction->GetDataJson(), &m_dataJson));
InitializeBaseElement(std::static_pointer_cast<AdaptiveSharedNamespace::BaseActionElement>(sharedSubmitAction));
return S_OK;
}
CATCH_RETURN;
_Use_decl_annotations_ HRESULT AdaptiveSubmitAction::get_ActionType(ABI::AdaptiveNamespace::ActionType* actionType)
{
*actionType = ABI::AdaptiveNamespace::ActionType::Submit;
return S_OK;
}
_Use_decl_annotations_ HRESULT AdaptiveSubmitAction::get_DataJson(IJsonValue** data)
{
return m_dataJson.CopyTo(data);
}
_Use_decl_annotations_ HRESULT AdaptiveSubmitAction::put_DataJson(IJsonValue* data)
{
m_dataJson = data;
return S_OK;
}
HRESULT AdaptiveSubmitAction::GetSharedModel(std::shared_ptr<AdaptiveSharedNamespace::BaseActionElement>& sharedModel) try
{
std::shared_ptr<AdaptiveSharedNamespace::SubmitAction> submitAction =
std::make_shared<AdaptiveSharedNamespace::SubmitAction>();
RETURN_IF_FAILED(SetSharedElementProperties(std::static_pointer_cast<AdaptiveSharedNamespace::BaseActionElement>(submitAction)));
std::string jsonAsString;
RETURN_IF_FAILED(JsonValueToString(m_dataJson.Get(), jsonAsString));
submitAction->SetDataJson(jsonAsString);
sharedModel = submitAction;
return S_OK;
}
CATCH_RETURN;
}
|
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// TGUI - Texus' Graphical User Interface
// Copyright (C) 2012-2021 Bruno Van de Velde (vdv_b@tgui.eu)
//
// This software is provided 'as-is', without any express or implied warranty.
// In no event will the authors be held liable for any damages arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it freely,
// subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented;
// you must not claim that you wrote the original software.
// If you use this software in a product, an acknowledgment
// in the product documentation would be appreciated but is not required.
//
// 2. Altered source versions must be plainly marked as such,
// and must not be misrepresented as being the original software.
//
// 3. This notice may not be removed or altered from any source distribution.
//
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include <TGUI/ToolTip.hpp>
#include <TGUI/Container.hpp>
#include <TGUI/Animation.hpp>
#include <TGUI/Vector2.hpp>
#include <TGUI/GuiBase.hpp>
#include <TGUI/Loading/WidgetFactory.hpp>
#include <TGUI/SignalManager.hpp>
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace tgui
{
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace
{
void finishExistingConflictingAnimations(std::vector<std::shared_ptr<priv::Animation>>& animations, ShowAnimationType type)
{
// Only one animation of each type can be played at the same type. If e.g. a fade animation was already in progress
// when starting a new one, the old animation is finished immediately.
// Different types of animations (e.g. fading and moving) can occur at the same time.
auto animIt = animations.begin();
while (animIt != animations.end())
{
auto& animation = *animIt;
if (((type == ShowAnimationType::Fade) && (animation->getType() == priv::Animation::Type::Fade))
|| ((type == ShowAnimationType::Scale) && (animation->getType() == priv::Animation::Type::Resize))
|| ((type != ShowAnimationType::Fade) && (animation->getType() == priv::Animation::Type::Move)))
{
animation->finish();
animIt = animations.erase(animIt);
}
else
++animIt;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static Layout2d parseLayout(String str)
{
if (str.empty())
throw Exception{"Failed to parse layout. String was empty."};
// Remove the brackets around the value
if (((str.front() == '(') && (str.back() == ')')) || ((str.front() == '{') && (str.back() == '}')))
str = str.substr(1, str.length() - 2);
if (str.empty())
return {0, 0};
// Find the comma that splits the x and y layouts, taking into account that these layouts may contain commas too
unsigned int bracketCount = 0;
auto commaOrBracketPos = str.find_first_of(",()");
decltype(commaOrBracketPos) commaPos = 0;
while (commaOrBracketPos != String::npos)
{
if (str[commaOrBracketPos] == '(')
bracketCount++;
else if (str[commaOrBracketPos] == ')')
{
if (bracketCount == 0)
throw Exception{"Failed to parse layout '" + str + "'. Brackets didn't match."};
bracketCount--;
}
else // if (str[commaOrBracketPos] == ',')
{
if (bracketCount == 0)
commaPos = commaOrBracketPos;
}
commaOrBracketPos = str.find_first_of(",()", commaOrBracketPos + 1);
}
// Remove quotes around the values
String x = str.substr(0, commaPos).trim();
if ((x.size() >= 2) && ((x[0] == '"') && (x[x.length()-1] == '"')))
x = x.substr(1, x.length()-2);
String y = str.substr(commaPos + 1).trim();
if ((y.size() >= 2) && ((y[0] == '"') && (y[y.length()-1] == '"')))
y = y.substr(1, y.length()-2);
return {x, y};
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static Vector2f parseVector2f(String str)
{
if (str.empty())
throw Exception{"Failed to parse Vector2f string. String was empty."};
// Remove the brackets around the value
if ((str.front() == '(') && (str.back() == ')'))
str = str.substr(1, str.length() - 2);
const auto commaPos = str.find(',');
if (commaPos == String::npos)
throw Exception{"Failed to parse Vector2f string '" + str + "'. No comma found."};
return {str.substr(0, commaPos).trim().toFloat(), str.substr(commaPos + 1).trim().toFloat()};
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Widget::Widget(const char* typeName, bool initRenderer) :
m_type(typeName)
{
if (initRenderer)
{
m_renderer = aurora::makeCopied<WidgetRenderer>();
m_renderer->subscribe(this, m_rendererChangedCallback);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Widget::~Widget()
{
// The renderer will be null when the widget was moved
if (m_renderer)
m_renderer->unsubscribe(this);
for (auto& layout : m_boundPositionLayouts)
layout->unbindWidget();
for (auto& layout : m_boundSizeLayouts)
layout->unbindWidget();
SignalManager::getSignalManager()->remove(this);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Widget::Widget(const Widget& other) :
enable_shared_from_this<Widget>{other},
m_type {other.m_type},
m_name {other.m_name},
m_position {other.m_position},
m_size {other.m_size},
m_textSize {other.m_textSize},
m_origin {other.m_origin},
m_rotationOrigin {other.m_rotationOrigin},
m_scaleOrigin {other.m_scaleOrigin},
m_scaleFactors {other.m_scaleFactors},
m_rotationDeg {other.m_rotationDeg},
m_boundPositionLayouts {},
m_boundSizeLayouts {},
m_enabled {other.m_enabled},
m_visible {other.m_visible},
m_parent {nullptr},
m_parentGui {nullptr},
m_draggableWidget {other.m_draggableWidget},
m_containerWidget {other.m_containerWidget},
m_toolTip {other.m_toolTip ? other.m_toolTip->clone() : nullptr},
m_renderer {other.m_renderer},
m_showAnimations {other.m_showAnimations},
m_fontCached {other.m_fontCached},
m_opacityCached {other.m_opacityCached},
m_mouseCursor {other.m_mouseCursor}
{
m_position.x.connectWidget(this, true, [this]{ setPosition(getPositionLayout()); });
m_position.y.connectWidget(this, false, [this]{ setPosition(getPositionLayout()); });
m_size.x.connectWidget(this, true, [this]{ setSize(getSizeLayout()); });
m_size.y.connectWidget(this, false, [this]{ setSize(getSizeLayout()); });
m_renderer->subscribe(this, m_rendererChangedCallback);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Widget::Widget(Widget&& other) :
enable_shared_from_this<Widget>{std::move(other)},
onPositionChange {std::move(other.onPositionChange)},
onSizeChange {std::move(other.onSizeChange)},
onFocus {std::move(other.onFocus)},
onUnfocus {std::move(other.onUnfocus)},
onMouseEnter {std::move(other.onMouseEnter)},
onMouseLeave {std::move(other.onMouseLeave)},
m_type {std::move(other.m_type)},
m_name {std::move(other.m_name)},
m_position {std::move(other.m_position)},
m_size {std::move(other.m_size)},
m_textSize {std::move(other.m_textSize)},
m_origin {std::move(other.m_origin)},
m_rotationOrigin {std::move(other.m_rotationOrigin)},
m_scaleOrigin {std::move(other.m_scaleOrigin)},
m_scaleFactors {std::move(other.m_scaleFactors)},
m_rotationDeg {std::move(other.m_rotationDeg)},
m_boundPositionLayouts {std::move(other.m_boundPositionLayouts)},
m_boundSizeLayouts {std::move(other.m_boundSizeLayouts)},
m_enabled {std::move(other.m_enabled)},
m_visible {std::move(other.m_visible)},
m_parent {nullptr},
m_parentGui {nullptr},
m_mouseHover {std::move(other.m_mouseHover)},
m_mouseDown {std::move(other.m_mouseDown)},
m_focused {std::move(other.m_focused)},
m_animationTimeElapsed {std::move(other.m_animationTimeElapsed)},
m_draggableWidget {std::move(other.m_draggableWidget)},
m_containerWidget {std::move(other.m_containerWidget)},
m_toolTip {std::move(other.m_toolTip)},
m_renderer {other.m_renderer},
m_showAnimations {std::move(other.m_showAnimations)},
m_fontCached {std::move(other.m_fontCached)},
m_opacityCached {std::move(other.m_opacityCached)},
m_mouseCursor {std::move(other.m_mouseCursor)}
{
m_position.x.connectWidget(this, true, [this]{ setPosition(getPositionLayout()); });
m_position.y.connectWidget(this, false, [this]{ setPosition(getPositionLayout()); });
m_size.x.connectWidget(this, true, [this]{ setSize(getSizeLayout()); });
m_size.y.connectWidget(this, false, [this]{ setSize(getSizeLayout()); });
other.m_renderer->unsubscribe(&other);
m_renderer->subscribe(this, m_rendererChangedCallback);
other.m_renderer = nullptr;
if (other.m_parent)
other.m_parent->remove(other.shared_from_this());
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Widget& Widget::operator=(const Widget& other)
{
if (this != &other)
{
m_renderer->unsubscribe(this);
enable_shared_from_this::operator=(other);
onPositionChange.disconnectAll();
onSizeChange.disconnectAll();
onFocus.disconnectAll();
onUnfocus.disconnectAll();
onMouseEnter.disconnectAll();
onMouseLeave.disconnectAll();
m_type = other.m_type;
m_name = other.m_name;
m_position = other.m_position;
m_size = other.m_size;
m_textSize = other.m_textSize;
m_origin = other.m_origin;
m_rotationOrigin = other.m_rotationOrigin;
m_scaleOrigin = other.m_scaleOrigin;
m_scaleFactors = other.m_scaleFactors;
m_rotationDeg = other.m_rotationDeg;
m_boundPositionLayouts = {};
m_boundSizeLayouts = {};
m_enabled = other.m_enabled;
m_visible = other.m_visible;
m_parent = nullptr;
m_parentGui = nullptr;
m_mouseHover = false;
m_mouseDown = false;
m_focused = false;
m_animationTimeElapsed = {};
m_draggableWidget = other.m_draggableWidget;
m_containerWidget = other.m_containerWidget;
m_toolTip = other.m_toolTip ? other.m_toolTip->clone() : nullptr;
m_renderer = other.m_renderer;
m_showAnimations = {};
m_fontCached = other.m_fontCached;
m_opacityCached = other.m_opacityCached;
m_mouseCursor = other.m_mouseCursor;
m_position.x.connectWidget(this, true, [this]{ setPosition(getPositionLayout()); });
m_position.y.connectWidget(this, false, [this]{ setPosition(getPositionLayout()); });
m_size.x.connectWidget(this, true, [this]{ setSize(getSizeLayout()); });
m_size.y.connectWidget(this, false, [this]{ setSize(getSizeLayout()); });
m_renderer->subscribe(this, m_rendererChangedCallback);
}
return *this;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Widget& Widget::operator=(Widget&& other)
{
if (this != &other)
{
m_renderer->unsubscribe(this);
other.m_renderer->unsubscribe(&other);
enable_shared_from_this::operator=(std::move(other));
onPositionChange = std::move(other.onPositionChange);
onSizeChange = std::move(other.onSizeChange);
onFocus = std::move(other.onFocus);
onUnfocus = std::move(other.onUnfocus);
onMouseEnter = std::move(other.onMouseEnter);
onMouseLeave = std::move(other.onMouseLeave);
m_type = std::move(other.m_type);
m_name = std::move(other.m_name);
m_position = std::move(other.m_position);
m_size = std::move(other.m_size);
m_textSize = std::move(other.m_textSize);
m_origin = std::move(other.m_origin);
m_rotationOrigin = std::move(other.m_rotationOrigin);
m_scaleOrigin = std::move(other.m_scaleOrigin);
m_scaleFactors = std::move(other.m_scaleFactors);
m_rotationDeg = std::move(other.m_rotationDeg);
m_boundPositionLayouts = std::move(other.m_boundPositionLayouts);
m_boundSizeLayouts = std::move(other.m_boundSizeLayouts);
m_enabled = std::move(other.m_enabled);
m_visible = std::move(other.m_visible);
m_parent = nullptr;
m_parentGui = nullptr;
m_mouseHover = std::move(other.m_mouseHover);
m_mouseDown = std::move(other.m_mouseDown);
m_focused = std::move(other.m_focused);
m_animationTimeElapsed = std::move(other.m_animationTimeElapsed);
m_draggableWidget = std::move(other.m_draggableWidget);
m_containerWidget = std::move(other.m_containerWidget);
m_toolTip = std::move(other.m_toolTip);
m_renderer = std::move(other.m_renderer);
m_showAnimations = std::move(other.m_showAnimations);
m_fontCached = std::move(other.m_fontCached);
m_opacityCached = std::move(other.m_opacityCached);
m_mouseCursor = std::move(other.m_mouseCursor);
m_position.x.connectWidget(this, true, [this]{ setPosition(getPositionLayout()); });
m_position.y.connectWidget(this, false, [this]{ setPosition(getPositionLayout()); });
m_size.x.connectWidget(this, true, [this]{ setSize(getSizeLayout()); });
m_size.y.connectWidget(this, false, [this]{ setSize(getSizeLayout()); });
m_renderer->subscribe(this, m_rendererChangedCallback);
other.m_renderer = nullptr;
if (other.m_parent)
SignalManager::getSignalManager()->remove(&other);
}
return *this;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setRenderer(std::shared_ptr<RendererData> rendererData)
{
if (rendererData == nullptr)
rendererData = RendererData::create();
std::shared_ptr<RendererData> oldData = m_renderer->getData();
// Update the data
m_renderer->unsubscribe(this);
m_renderer->setData(rendererData);
m_renderer->subscribe(this, m_rendererChangedCallback);
rendererData->shared = true;
// Tell the widget about all the updated properties, both new ones and old ones that were now reset to their default value
auto oldIt = oldData->propertyValuePairs.begin();
auto newIt = rendererData->propertyValuePairs.begin();
while (oldIt != oldData->propertyValuePairs.end() && newIt != rendererData->propertyValuePairs.end())
{
if (oldIt->first < newIt->first)
{
// Update values that no longer exist in the new renderer and are now reset to the default value
rendererChanged(oldIt->first);
++oldIt;
}
else
{
// Update changed and new properties
rendererChanged(newIt->first);
if (newIt->first < oldIt->first)
++newIt;
else
{
++oldIt;
++newIt;
}
}
}
while (oldIt != oldData->propertyValuePairs.end())
{
rendererChanged(oldIt->first);
++oldIt;
}
while (newIt != rendererData->propertyValuePairs.end())
{
rendererChanged(newIt->first);
++newIt;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
const WidgetRenderer* Widget::getSharedRenderer() const
{
// You should not be allowed to call setters on the renderer when the widget is const
return m_renderer.get();
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
WidgetRenderer* Widget::getSharedRenderer()
{
return m_renderer.get();
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
const WidgetRenderer* Widget::getRenderer() const
{
if (m_renderer->getData()->shared)
{
m_renderer->unsubscribe(this);
m_renderer->setData(m_renderer->clone());
m_renderer->subscribe(this, m_rendererChangedCallback);
m_renderer->getData()->shared = false;
}
// You should not be allowed to call setters on the renderer when the widget is const
return m_renderer.get();
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
WidgetRenderer* Widget::getRenderer()
{
if (m_renderer->getData()->shared)
{
m_renderer->unsubscribe(this);
m_renderer->setData(m_renderer->clone());
m_renderer->subscribe(this, m_rendererChangedCallback);
m_renderer->getData()->shared = false;
}
return m_renderer.get();
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setPosition(const Layout2d& position)
{
m_position = position;
m_position.x.connectWidget(this, true, [this]{ setPosition(getPositionLayout()); });
m_position.y.connectWidget(this, false, [this]{ setPosition(getPositionLayout()); });
if (getPosition() != m_prevPosition)
{
m_prevPosition = getPosition();
onPositionChange.emit(this, getPosition());
for (auto& layout : m_boundPositionLayouts)
layout->recalculateValue();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setSize(const Layout2d& size)
{
m_size = size;
m_size.x.connectWidget(this, true, [this]{ setSize(getSizeLayout()); });
m_size.y.connectWidget(this, false, [this]{ setSize(getSizeLayout()); });
if (getSize() != m_prevSize)
{
m_prevSize = getSize();
onSizeChange.emit(this, getSize());
for (auto& layout : m_boundSizeLayouts)
layout->recalculateValue();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Vector2f Widget::getFullSize() const
{
return getSize();
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Vector2f Widget::getAbsolutePosition() const
{
Vector2f pos = getPosition();
if (m_parent)
pos += m_parent->getAbsolutePosition() + m_parent->getChildWidgetsOffset();
const bool defaultOrigin = (getOrigin().x == 0) && (getOrigin().y == 0);
const bool scaledOrRotated = (getScale().x != 1) || (getScale().y != 1) || (getRotation() != 0);
if (defaultOrigin && !scaledOrRotated)
return pos;
const Vector2f origin{getOrigin().x * getSize().x, getOrigin().y * getSize().y};
if (!scaledOrRotated)
return pos - origin;
const Vector2f rotOrigin{getRotationOrigin().x * getSize().x, getRotationOrigin().y * getSize().y};
const Vector2f scaleOrigin{getScaleOrigin().x * getSize().x, getScaleOrigin().y * getSize().y};
Transform transform;
transform.translate(-origin);
transform.rotate(getRotation(), rotOrigin);
transform.scale(getScale(), scaleOrigin);
return Vector2f(transform.transformPoint({0, 0})) + pos;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Vector2f Widget::getWidgetOffset() const
{
return Vector2f{0, 0};
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setOrigin(Vector2f origin)
{
m_origin = origin;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setScale(Vector2f scaleFactors)
{
m_scaleFactors = scaleFactors;
m_scaleOrigin.reset();
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setScale(Vector2f scaleFactors, Vector2f origin)
{
m_scaleFactors = scaleFactors;
m_scaleOrigin = origin;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Vector2f Widget::getScaleOrigin() const
{
if (m_scaleOrigin)
return *m_scaleOrigin;
else
return m_origin;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setRotation(float angle)
{
m_rotationDeg = angle;
m_rotationOrigin.reset();
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setRotation(float angle, Vector2f origin)
{
m_rotationDeg = angle;
m_rotationOrigin = origin;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Vector2f Widget::getRotationOrigin() const
{
if (m_rotationOrigin)
return *m_rotationOrigin;
else
return m_origin;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::showWithEffect(ShowAnimationType type, Duration duration)
{
setVisible(true);
// We store the state the widget is currently in. In the event another animation was already playing, we should try to
// use the current state to start our animation at, but this is not the state that the widget should end at. We must
// get this state BEFORE finishing the previous animation which is done by finishExistingConflictingAnimations.
const float startOpacity = getInheritedOpacity();
//const Vector2f startPosition = getPosition();
//const Vector2f startSize = getSize();
finishExistingConflictingAnimations(m_showAnimations, type);
switch (type)
{
case ShowAnimationType::Fade:
{
// Start from fully transparent, unless a fade animation was already playing
float animStartOpacity = startOpacity;
const float endOpacity = getInheritedOpacity();
if (startOpacity == endOpacity)
{
setInheritedOpacity(0);
animStartOpacity = 0;
}
else // If fading was already in progress then adapt the duration to finish the animation sooner
duration *= (startOpacity / endOpacity);
m_showAnimations.push_back(std::make_shared<priv::FadeAnimation>(shared_from_this(), animStartOpacity, endOpacity, duration,
TGUI_LAMBDA_CAPTURE_EQ_THIS{onAnimationFinish.emit(this, type, true); }));
break;
}
case ShowAnimationType::Scale:
{
// TODO: Use setScale instead of setSize
m_showAnimations.push_back(std::make_shared<priv::MoveAnimation>(shared_from_this(), getPosition() + (getSize() / 2.f), getPosition(), duration));
m_showAnimations.push_back(std::make_shared<priv::ResizeAnimation>(shared_from_this(), Vector2f{0, 0}, getSize(), duration,
TGUI_LAMBDA_CAPTURE_EQ_THIS{onAnimationFinish.emit(this, type, true); }));
setPosition(getPosition() + (getSize() / 2.f));
setSize(0, 0);
break;
}
case ShowAnimationType::SlideFromLeft:
{
m_showAnimations.push_back(std::make_shared<priv::MoveAnimation>(shared_from_this(), Vector2f{-getFullSize().x, getPosition().y}, getPosition(), duration,
TGUI_LAMBDA_CAPTURE_EQ_THIS{onAnimationFinish.emit(this, type, true); }));
setPosition({-getFullSize().x, getPosition().y});
break;
}
case ShowAnimationType::SlideFromRight:
{
if (getParent())
{
m_showAnimations.push_back(std::make_shared<priv::MoveAnimation>(shared_from_this(), Vector2f{getParent()->getSize().x + getWidgetOffset().x, getPosition().y}, getPosition(), duration,
TGUI_LAMBDA_CAPTURE_EQ_THIS{onAnimationFinish.emit(this, type, true); }));
setPosition({getParent()->getSize().x + getWidgetOffset().x, getPosition().y});
}
else
{
TGUI_PRINT_WARNING("showWithEffect(SlideFromRight) does not work before widget has a parent.");
}
break;
}
case ShowAnimationType::SlideFromTop:
{
m_showAnimations.push_back(std::make_shared<priv::MoveAnimation>(shared_from_this(), Vector2f{getPosition().x, -getFullSize().y}, getPosition(), duration,
TGUI_LAMBDA_CAPTURE_EQ_THIS{onAnimationFinish.emit(this, type, true); }));
setPosition({getPosition().x, -getFullSize().y});
break;
}
case ShowAnimationType::SlideFromBottom:
{
if (getParent())
{
m_showAnimations.push_back(std::make_shared<priv::MoveAnimation>(shared_from_this(), Vector2f{getPosition().x, getParent()->getSize().y + getWidgetOffset().y}, getPosition(), duration,
TGUI_LAMBDA_CAPTURE_EQ_THIS{onAnimationFinish.emit(this, type, true); }));
setPosition({getPosition().x, getParent()->getSize().y + getWidgetOffset().y});
}
else
{
TGUI_PRINT_WARNING("showWithEffect(SlideFromBottom) does not work before widget has a parent.");
}
break;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::hideWithEffect(ShowAnimationType type, Duration duration)
{
// We store the state the widget is currently in. In the event another animation was already playing, we should try to
// use the current state to start our animation at, but this is not the state that the widget should end at. We must
// get this state BEFORE finishing the previous animation which is done by finishExistingConflictingAnimations.
const float startOpacity = getInheritedOpacity();
//const Vector2f startPosition = getPosition();
//const Vector2f startSize = getSize();
finishExistingConflictingAnimations(m_showAnimations, type);
const auto position = getPosition();
switch (type)
{
case ShowAnimationType::Fade:
{
const float endOpacity = getInheritedOpacity(); // Value to reset to after widget is hidden
// If fading was already in progress then adapt the duration to finish the animation sooner
if (startOpacity != endOpacity)
duration *= (startOpacity / endOpacity);
m_showAnimations.push_back(std::make_shared<priv::FadeAnimation>(shared_from_this(), startOpacity, 0.f, duration,
TGUI_LAMBDA_CAPTURE_EQ_THIS{ setVisible(false); setInheritedOpacity(endOpacity); onAnimationFinish.emit(this, type, false); }));
break;
}
case ShowAnimationType::Scale:
{
// TODO: Use setScale instead of setSize
const auto size = getSize();
m_showAnimations.push_back(std::make_shared<priv::MoveAnimation>(shared_from_this(), position, position + (size / 2.f), duration));
m_showAnimations.push_back(std::make_shared<priv::ResizeAnimation>(shared_from_this(), size, Vector2f{0, 0}, duration,
TGUI_LAMBDA_CAPTURE_EQ_THIS{ setVisible(false); setPosition(position); setSize(size); onAnimationFinish.emit(this, type, false); }));
break;
}
case ShowAnimationType::SlideToRight:
{
if (getParent())
{
m_showAnimations.push_back(std::make_shared<priv::MoveAnimation>(shared_from_this(), position, Vector2f{getParent()->getSize().x + getWidgetOffset().x, position.y}, duration,
TGUI_LAMBDA_CAPTURE_EQ_THIS{ setVisible(false); setPosition(position); onAnimationFinish.emit(this, type, false); }));
}
else
{
TGUI_PRINT_WARNING("hideWithEffect(SlideToRight) does not work before widget has a parent.");
}
break;
}
case ShowAnimationType::SlideToLeft:
{
m_showAnimations.push_back(std::make_shared<priv::MoveAnimation>(shared_from_this(), position, Vector2f{-getFullSize().x, position.y}, duration,
TGUI_LAMBDA_CAPTURE_EQ_THIS{ setVisible(false); setPosition(position); onAnimationFinish.emit(this, type, false); }));
break;
}
case ShowAnimationType::SlideToBottom:
{
if (getParent())
{
m_showAnimations.push_back(std::make_shared<priv::MoveAnimation>(shared_from_this(), position, Vector2f{position.x, getParent()->getSize().y + getWidgetOffset().y}, duration,
TGUI_LAMBDA_CAPTURE_EQ_THIS{ setVisible(false); setPosition(position); onAnimationFinish.emit(this, type, false); }));
}
else
{
TGUI_PRINT_WARNING("hideWithEffect(SlideToBottom) does not work before widget has a parent.");
}
break;
}
case ShowAnimationType::SlideToTop:
{
m_showAnimations.push_back(std::make_shared<priv::MoveAnimation>(shared_from_this(), position, Vector2f{position.x, -getFullSize().y}, duration,
TGUI_LAMBDA_CAPTURE_EQ_THIS{ setVisible(false); setPosition(position); onAnimationFinish.emit(this, type, false); }));
break;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setVisible(bool visible)
{
m_visible = visible;
// If the widget is hiden while still focused then it must be unfocused
if (!visible)
setFocused(false);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setEnabled(bool enabled)
{
m_enabled = enabled;
if (!enabled)
{
m_mouseHover = false;
m_mouseDown = false;
setFocused(false);
}
// Refresh widget opacity if there is a different value set for enabled and disabled widgets
if (getSharedRenderer()->getOpacityDisabled() != -1)
rendererChanged("OpacityDisabled");
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setFocused(bool focused)
{
if (m_focused == focused)
return;
if (focused)
{
if (canGainFocus())
{
m_focused = true;
if (m_parent)
m_parent->childWidgetFocused(shared_from_this());
onFocus.emit(this);
}
}
else // Unfocusing widget
{
m_focused = false;
onUnfocus.emit(this);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
const String& Widget::getWidgetType() const
{
return m_type;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool Widget::isAnimationPlaying() const
{
return !m_showAnimations.empty();
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::moveToFront()
{
if (m_parent)
m_parent->moveWidgetToFront(shared_from_this());
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::moveToBack()
{
if (m_parent)
m_parent->moveWidgetToBack(shared_from_this());
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setInheritedFont(const Font& font)
{
m_inheritedFont = font;
rendererChanged("Font");
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
const Font& Widget::getInheritedFont() const
{
return m_inheritedFont;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setInheritedOpacity(float opacity)
{
m_inheritedOpacity = opacity;
rendererChanged("Opacity");
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
float Widget::getInheritedOpacity() const
{
return m_inheritedOpacity;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setTextSize(unsigned int size)
{
m_textSize = size;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
unsigned int Widget::getTextSize() const
{
return m_textSize;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setToolTip(Widget::Ptr toolTip)
{
m_toolTip = toolTip;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Widget::Ptr Widget::getToolTip() const
{
return m_toolTip;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setWidgetName(const String& name)
{
if (m_name != name)
{
m_name = name;
if (m_parent)
{
SignalManager::getSignalManager()->remove(this);
SignalManager::getSignalManager()->add(shared_from_this());
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
String Widget::getWidgetName() const
{
return m_name;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setMouseCursor(Cursor::Type cursor)
{
m_mouseCursor = cursor;
if (m_mouseHover)
m_parentGui->requestMouseCursor(cursor);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Cursor::Type Widget::getMouseCursor() const
{
return m_mouseCursor;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setFocusable(bool focusable)
{
m_focusable = focusable;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool Widget::isFocusable() const
{
return m_focusable;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool Widget::canGainFocus() const
{
return m_enabled && m_visible && m_focusable;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool Widget::isContainer() const
{
return m_containerWidget;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool Widget::isDraggableWidget() const
{
return m_draggableWidget;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool Widget::isMouseDown() const
{
return m_mouseDown;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::setParent(Container* parent)
{
m_parentGui = parent ? parent->getParentGui() : nullptr;
if (m_parent == parent)
return;
if (!parent)
SignalManager::getSignalManager()->remove(this);
else if (!m_parent)
SignalManager::getSignalManager()->add(shared_from_this());
m_parent = parent;
// Give the layouts another chance to find widgets to which it refers
if (parent)
{
m_position.x.connectWidget(this, true, [this]{ setPosition(getPositionLayout()); });
m_position.y.connectWidget(this, false, [this]{ setPosition(getPositionLayout()); });
m_size.x.connectWidget(this, true, [this]{ setSize(getSizeLayout()); });
m_size.y.connectWidget(this, false, [this]{ setSize(getSizeLayout()); });
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool Widget::updateTime(Duration elapsedTime)
{
m_animationTimeElapsed += elapsedTime;
const bool screenRefreshRequired = !m_showAnimations.empty();
for (unsigned int i = 0; i < m_showAnimations.size();)
{
if (m_showAnimations[i]->update(elapsedTime))
m_showAnimations.erase(m_showAnimations.begin() + i);
else
i++;
}
return screenRefreshRequired;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::leftMousePressed(Vector2f)
{
m_mouseDown = true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::leftMouseReleased(Vector2f)
{
m_mouseDown = false;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::rightMousePressed(Vector2f)
{
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::rightMouseReleased(Vector2f)
{
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::mousePressed(Event::MouseButton button, Vector2f pos)
{
if (button == Event::MouseButton::Left)
leftMousePressed(pos);
else if (button == Event::MouseButton::Right)
rightMousePressed(pos);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::mouseReleased(Event::MouseButton button, Vector2f pos)
{
if (button == Event::MouseButton::Left)
leftMouseReleased(pos);
else if (button == Event::MouseButton::Right)
rightMouseReleased(pos);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::mouseMoved(Vector2f)
{
if (!m_mouseHover)
mouseEnteredWidget();
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::keyPressed(const Event::KeyEvent&)
{
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::textEntered(char32_t)
{
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool Widget::mouseWheelScrolled(float, Vector2f)
{
return false;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::mouseNoLongerOnWidget()
{
if (m_mouseHover)
mouseLeftWidget();
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::leftMouseButtonNoLongerDown()
{
m_mouseDown = false;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::rightMouseButtonNoLongerDown()
{
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Widget::Ptr Widget::askToolTip(Vector2f mousePos)
{
if (m_toolTip && isMouseOnWidget(mousePos))
return getToolTip();
else
return nullptr;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::bindPositionLayout(Layout* layout)
{
m_boundPositionLayouts.insert(layout);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::unbindPositionLayout(Layout* layout)
{
m_boundPositionLayouts.erase(layout);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::bindSizeLayout(Layout* layout)
{
m_boundSizeLayouts.insert(layout);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::unbindSizeLayout(Layout* layout)
{
m_boundSizeLayouts.erase(layout);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Signal& Widget::getSignal(String signalName)
{
if (signalName == onPositionChange.getName())
return onPositionChange;
else if (signalName == onSizeChange.getName())
return onSizeChange;
else if (signalName == onFocus.getName())
return onFocus;
else if (signalName == onUnfocus.getName())
return onUnfocus;
else if (signalName == onMouseEnter.getName())
return onMouseEnter;
else if (signalName == onMouseLeave.getName())
return onMouseLeave;
else if (signalName == onAnimationFinish.getName())
return onAnimationFinish;
throw Exception{"No signal exists with name '" + std::move(signalName) + "'."};
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::rendererChanged(const String& property)
{
if ((property == "Opacity") || (property == "OpacityDisabled"))
{
if (!m_enabled && (getSharedRenderer()->getOpacityDisabled() != -1))
m_opacityCached = getSharedRenderer()->getOpacityDisabled() * m_inheritedOpacity;
else
m_opacityCached = getSharedRenderer()->getOpacity() * m_inheritedOpacity;
}
else if (property == "Font")
{
if (getSharedRenderer()->getFont())
m_fontCached = getSharedRenderer()->getFont();
else if (m_inheritedFont)
m_fontCached = m_inheritedFont;
else
m_fontCached = Font::getGlobalFont();
}
else if (property == "TransparentTexture")
{
m_transparentTextureCached = getSharedRenderer()->getTransparentTexture();
}
else
throw Exception{"Could not set property '" + property + "', widget of type '" + getWidgetType() + "' does not has this property."};
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
std::unique_ptr<DataIO::Node> Widget::save(SavingRenderersMap& renderers) const
{
auto node = std::make_unique<DataIO::Node>();
if (m_name.empty())
node->name = getWidgetType();
else
node->name = getWidgetType() + "." + Serializer::serialize(m_name);
if (!isVisible())
node->propertyValuePairs["Visible"] = std::make_unique<DataIO::ValueNode>("false");
if (!isEnabled())
node->propertyValuePairs["Enabled"] = std::make_unique<DataIO::ValueNode>("false");
if (getPosition() != Vector2f{})
node->propertyValuePairs["Position"] = std::make_unique<DataIO::ValueNode>(m_position.toString());
if (getSize() != Vector2f{})
node->propertyValuePairs["Size"] = std::make_unique<DataIO::ValueNode>(m_size.toString());
if (getOrigin() != Vector2f{})
node->propertyValuePairs["Origin"] = std::make_unique<DataIO::ValueNode>("(" + String::fromNumber(m_origin.x) + "," + String::fromNumber(m_origin.y) + ")");
if (getScale() != Vector2f{1, 1})
{
node->propertyValuePairs["Scale"] = std::make_unique<DataIO::ValueNode>("(" + String::fromNumber(m_scaleFactors.x) + "," + String::fromNumber(m_scaleFactors.y) + ")");
if (m_scaleOrigin)
node->propertyValuePairs["ScaleOrigin"] = std::make_unique<DataIO::ValueNode>("(" + String::fromNumber(m_scaleOrigin->x) + "," + String::fromNumber(m_scaleOrigin->y) + ")");
}
if (getRotation() != 0)
{
node->propertyValuePairs["Rotation"] = std::make_unique<DataIO::ValueNode>(String::fromNumber(m_rotationDeg));
if (m_rotationOrigin)
node->propertyValuePairs["RotationOrigin"] = std::make_unique<DataIO::ValueNode>("(" + String::fromNumber(m_rotationOrigin->x) + "," + String::fromNumber(m_rotationOrigin->y) + ")");
}
#if TGUI_COMPILED_WITH_CPP_VER >= 17
if (m_userData.has_value())
{
if (m_userData.type() == typeid(String))
{
const String string = std::any_cast<String>(m_userData);
node->propertyValuePairs["UserData"] = std::make_unique<DataIO::ValueNode>(Serializer::serialize(string));
}
else if (m_userData.type() == typeid(std::string))
{
const String string = std::any_cast<std::string>(m_userData);
node->propertyValuePairs["UserData"] = std::make_unique<DataIO::ValueNode>(Serializer::serialize(string));
}
else if (m_userData.type() == typeid(const char*))
{
const String string = std::any_cast<const char*>(m_userData);
node->propertyValuePairs["UserData"] = std::make_unique<DataIO::ValueNode>(Serializer::serialize(string));
}
}
#else
if (m_userData.not_null())
{
if (m_userData.is<String>())
{
node->propertyValuePairs["UserData"] = std::make_unique<DataIO::ValueNode>(Serializer::serialize(m_userData.as<String>()));
}
else if (m_userData.is<std::string>())
{
node->propertyValuePairs["UserData"] = std::make_unique<DataIO::ValueNode>(Serializer::serialize(String(m_userData.as<std::string>()))) ;
}
else if (m_userData.is<const char*>())
{
node->propertyValuePairs["UserData"] = std::make_unique<DataIO::ValueNode>(Serializer::serialize(m_userData.as<const char*>()));
}
}
#endif
String mouseCursorStr;
switch (m_mouseCursor)
{
case Cursor::Type::Text: mouseCursorStr = "Text"; break;
case Cursor::Type::Hand: mouseCursorStr = "Hand"; break;
case Cursor::Type::SizeLeft: mouseCursorStr = "SizeLeft"; break;
case Cursor::Type::SizeRight: mouseCursorStr = "SizeRight"; break;
case Cursor::Type::SizeTop: mouseCursorStr = "SizeTop"; break;
case Cursor::Type::SizeBottom: mouseCursorStr = "SizeBottom"; break;
case Cursor::Type::SizeBottomRight: mouseCursorStr = "SizeBottomRight"; break;
case Cursor::Type::SizeTopLeft: mouseCursorStr = "SizeTopLeft"; break;
case Cursor::Type::SizeBottomLeft: mouseCursorStr = "SizeBottomLeft"; break;
case Cursor::Type::SizeTopRight: mouseCursorStr = "SizeTopRight"; break;
case Cursor::Type::Crosshair: mouseCursorStr = "Crosshair"; break;
case Cursor::Type::Help: mouseCursorStr = "Help"; break;
case Cursor::Type::NotAllowed: mouseCursorStr = "NotAllowed"; break;
case Cursor::Type::Arrow: break; // We don't save the cursor if it has the default value
}
if (!mouseCursorStr.empty())
node->propertyValuePairs["MouseCursor"] = std::make_unique<DataIO::ValueNode>(mouseCursorStr);
if (getToolTip() != nullptr)
{
auto toolTipWidgetNode = getToolTip()->save(renderers);
auto toolTipNode = std::make_unique<DataIO::Node>();
toolTipNode->name = "ToolTip";
toolTipNode->children.emplace_back(std::move(toolTipWidgetNode));
toolTipNode->propertyValuePairs["InitialDelay"] = std::make_unique<DataIO::ValueNode>(String::fromNumber(ToolTip::getInitialDelay().asSeconds()));
toolTipNode->propertyValuePairs["DistanceToMouse"] = std::make_unique<DataIO::ValueNode>("("
+ String::fromNumber(ToolTip::getDistanceToMouse().x) + "," + String::fromNumber(ToolTip::getDistanceToMouse().y) + ")");
node->children.emplace_back(std::move(toolTipNode));
}
if (renderers.at(this).first)
node->children.emplace_back(std::move(renderers.at(this).first));
else
node->propertyValuePairs["Renderer"] = std::make_unique<DataIO::ValueNode>("&" + renderers.at(this).second);
return node;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::load(const std::unique_ptr<DataIO::Node>& node, const LoadingRenderersMap& renderers)
{
if (node->propertyValuePairs["Visible"])
setVisible(Deserializer::deserialize(ObjectConverter::Type::Bool, node->propertyValuePairs["Visible"]->value).getBool());
if (node->propertyValuePairs["Enabled"])
setEnabled(Deserializer::deserialize(ObjectConverter::Type::Bool, node->propertyValuePairs["Enabled"]->value).getBool());
if (node->propertyValuePairs["Position"])
setPosition(parseLayout(node->propertyValuePairs["Position"]->value));
if (node->propertyValuePairs["Size"])
setSize(parseLayout(node->propertyValuePairs["Size"]->value));
if (node->propertyValuePairs["Origin"])
setOrigin(parseVector2f(node->propertyValuePairs["Origin"]->value));
if (node->propertyValuePairs["Scale"])
{
if (node->propertyValuePairs["ScaleOrigin"])
setScale(parseVector2f(node->propertyValuePairs["Scale"]->value), parseVector2f(node->propertyValuePairs["ScaleOrigin"]->value));
else
setScale(parseVector2f(node->propertyValuePairs["Scale"]->value));
}
if (node->propertyValuePairs["Rotation"])
{
if (node->propertyValuePairs["RotationOrigin"])
setRotation(node->propertyValuePairs["Rotation"]->value.toFloat(), parseVector2f(node->propertyValuePairs["RotationOrigin"]->value));
else
setRotation(node->propertyValuePairs["Rotation"]->value.toFloat());
}
if (node->propertyValuePairs["UserData"])
{
#if TGUI_COMPILED_WITH_CPP_VER >= 17
m_userData = std::make_any<String>(Deserializer::deserialize(ObjectConverter::Type::String, node->propertyValuePairs["UserData"]->value).getString());
#else
m_userData = tgui::Any(Deserializer::deserialize(ObjectConverter::Type::String, node->propertyValuePairs["UserData"]->value).getString());
#endif
}
if (node->propertyValuePairs["MouseCursor"])
{
String cursorStr = node->propertyValuePairs["MouseCursor"]->value.trim();
if (cursorStr == "Text")
m_mouseCursor = Cursor::Type::Text;
else if (cursorStr == "Hand")
m_mouseCursor = Cursor::Type::Hand;
else if (cursorStr == "SizeLeft")
m_mouseCursor = Cursor::Type::SizeLeft;
else if (cursorStr == "SizeRight")
m_mouseCursor = Cursor::Type::SizeRight;
else if (cursorStr == "SizeTop")
m_mouseCursor = Cursor::Type::SizeTop;
else if (cursorStr == "SizeBottom")
m_mouseCursor = Cursor::Type::SizeBottom;
else if (cursorStr == "SizeBottomRight")
m_mouseCursor = Cursor::Type::SizeBottomRight;
else if (cursorStr == "SizeTopLeft")
m_mouseCursor = Cursor::Type::SizeTopLeft;
else if (cursorStr == "SizeBottomLeft")
m_mouseCursor = Cursor::Type::SizeBottomLeft;
else if (cursorStr == "SizeTopRight")
m_mouseCursor = Cursor::Type::SizeTopRight;
else if (cursorStr == "Crosshair")
m_mouseCursor = Cursor::Type::Crosshair;
else if (cursorStr == "Help")
m_mouseCursor = Cursor::Type::Help;
else if (cursorStr == "NotAllowed")
m_mouseCursor = Cursor::Type::NotAllowed;
else if (cursorStr == "Arrow")
m_mouseCursor = Cursor::Type::Arrow;
else
throw Exception{"Failed to parse 'MouseCursor' property. Invalid cursor '" + cursorStr + "'."};
}
if (node->propertyValuePairs["Renderer"])
{
const String value = node->propertyValuePairs["Renderer"]->value;
if (value.empty() || (value[0] != '&'))
throw Exception{"Expected reference to renderer, did not find '&' character"};
const auto it = renderers.find(value.substr(1));
if (it == renderers.end())
throw Exception{"Widget refers to renderer with name '" + value.substr(1) + "', but no such renderer was found"};
setRenderer(it->second);
}
for (const auto& childNode : node->children)
{
if (childNode->name == "ToolTip")
{
for (const auto& pair : childNode->propertyValuePairs)
{
if (pair.first == "InitialDelay")
ToolTip::setInitialDelay(std::chrono::duration<float>(pair.second->value.toFloat()));
else if (pair.first == "DistanceToMouse")
ToolTip::setDistanceToMouse(Vector2f{pair.second->value});
}
if (!childNode->children.empty())
{
// There can only be one child in the tool tip section
if (childNode->children.size() > 1)
throw Exception{"ToolTip section contained multiple children."};
const auto& toolTipWidgetNode = childNode->children[0];
const auto& constructor = WidgetFactory::getConstructFunction(toolTipWidgetNode->name);
if (constructor)
{
Widget::Ptr toolTip = constructor();
toolTip->load(toolTipWidgetNode, renderers);
setToolTip(toolTip);
}
else
throw Exception{"No construct function exists for widget type '" + toolTipWidgetNode->name + "'."};
}
}
else if (childNode->name == "Renderer")
setRenderer(RendererData::createFromDataIONode(childNode.get()));
/// TODO: Signals?
}
node->children.erase(std::remove_if(node->children.begin(), node->children.end(), [](const std::unique_ptr<DataIO::Node>& child){
return (child->name == "ToolTip") || (child->name == "Renderer");
}), node->children.end());
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::mouseEnteredWidget()
{
if (m_parentGui && (m_mouseCursor != Cursor::Type::Arrow))
m_parentGui->requestMouseCursor(m_mouseCursor);
m_mouseHover = true;
onMouseEnter.emit(this);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::mouseLeftWidget()
{
if (m_parentGui && m_parent && (m_parent->getMouseCursor() != m_mouseCursor))
m_parentGui->requestMouseCursor(m_parent->getMouseCursor());
m_mouseHover = false;
onMouseLeave.emit(this);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Widget::rendererChangedCallback(const String& property)
{
rendererChanged(property);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
#include<iostream>
#include<cstring>
long long mod=1e9+7;
using namespace std;
long long mmi_1[(long long)(1e5+7)];
long long mmi_2[(long long)(1e5+7)];//inverse factorial
long long fac[(long long )(1e5+7)];
long long mmi(long long n,long long pow){
if(pow==0){
return 1;
}
if(pow==1){
return n;
}
long long p=mmi(n,pow/2);
p%=mod;
if(pow%2==0){
p=(p*p)%mod;
}else{
p=(p*p)%mod;
p=(p*n)%mod;
}
return p;
}
int main(){
int t;
cin>>t;
long long i;
for(i=0;i<=(long long )(1e5+5);i++){
mmi_1[i]=mmi(i,mod-2);
}
mmi_2[0]=1;
for(i=1;i<=(long long)(1e5+5);i++){
mmi_2[i]=(mmi_1[i]*mmi_2[i-1])%mod;
}
fac[0]=1;
for(i=1;i<=(long long)(1e5+5);i++){
fac[i]=i*fac[i-1];
fac[i]%=mod;
//cout<<fac[i]<<" ";
}
//cout<<endl;
while(t--){
string str;
cin>>str;
long long alp[27];
for(i=0;i<27;i++){
alp[i]=0;
}
for(i=0;i<str.size();i++){
alp[str[i]-'a']++;
}
long long a[27];
int j,l,k=0;
for(i=0;i<=26;i++){
if(alp[i]>0){
a[k]=alp[i];
k++;
}
}
int p=k;
///p is size
long long all=fac[str.size()];
for(i=0;i<p;i++){
all=(all*mmi_2[a[i]])%mod;
}
//ij
long long ij=0;
for(i=0;i<p;i++){
long long tmp=0;
for(j=i+1;j<p;j++){
tmp+=a[j];
tmp%=mod;
}
ij+=(a[i]*tmp)%mod;
ij%=mod;
}
ij%=mod;
long long ij_=0;
for(i=0;i<p;i++){
long long tmp=0;
for(j=i+1;j<p;j++){
if(a[j]>1){
tmp+=((a[j]*(a[j]-1))/2)%mod;
tmp%=mod;
}
}
if(a[i]>1){
long long ta=((a[i]*(a[i]-1))/2)%mod;
ij_+=(ta*tmp)%mod;
ij_%=mod;
}
}
ij_%=mod;
long long ijk=0;
for(i=0;i<p;i++){
long long tmp=0;
for(j=i+1;j<p;j++){
long long tmp1=0;
for(k=j+1;k<p;k++){
tmp1+=(a[k]);
tmp1%=mod;
}
tmp+=(a[j]*tmp1)%mod;
tmp%=mod;
}
ijk+=(tmp*a[i])%mod;
ijk%=mod;
}
ijk=(ijk*2LL)%mod;
ijk%=mod;
long long ijk_=0;
for(i=0;i<p;i++){
long long tmp=0;
for(j=i+1;j<p;j++){
long long tmp1=0;
for(k=j+1;k<p;k++){
tmp1+=(a[k]);
tmp1%=mod;
}
tmp+=(a[j]*tmp1)%mod;
tmp%=mod;
}
if(a[i]>1){
long long tl=((a[i]*(a[i]-1)))%mod;
ijk_+=(tmp*tl)%mod;
ijk_%=mod;
}
}
ijk_%=mod;
for(i=0;i<p;i++){
long long tmp=0;
for(j=i+1;j<p;j++){
long long tmp1=0;
for(k=j+1;k<p;k++){
tmp1+=(a[k]);
tmp1%=mod;
}
if(a[j]>1){
long long tl=((a[j]*(a[j]-1)))%mod;
tmp+=(tl*tmp1)%mod;
tmp%=mod;
}
}
ijk_+=(tmp*a[i])%mod;
ijk_%=mod;
}
ijk_%=mod;
for(i=0;i<p;i++){
long long tmp=0;
for(j=i+1;j<p;j++){
long long tmp1=0;
for(k=j+1;k<p;k++){
if(a[k]>1){
long long tl=((a[k]*(a[k]-1)))%mod;
tmp1+=(tl);
tmp1%=mod;
}
}
tmp+=(a[j]*tmp1)%mod;
tmp%=mod;
}
ijk_+=(tmp*a[i])%mod;
ijk_%=mod;
}
ijk_%=mod;
long long ijkl=0;
for(i=0;i<p;i++){
long long tmp=0;
for(j=i+1;j<p;j++){
long long tmp1=0;
for(k=j+1;k<p;k++){
long long tmp2=0;
for(l=k+1;l<p;l++){
tmp2+=a[l];
tmp2%=mod;
}
tmp1+=(a[k]*tmp2)%mod;
tmp1%=mod;
}
tmp+=(a[j]*tmp1)%mod;
tmp%=mod;
}
ijkl+=(tmp*a[i])%mod;
ijkl%=mod;
}
ijkl=(ijkl*3LL)%mod;
ijkl%=mod;
long long in=(all-ij-ijk-ijkl-ij_-ijk_-1LL+7LL*mod)%mod;
long long net=(all*in)%mod;
net%=mod;
//cout<<all<<" "<<ij<<" "<<ijk<<" "<<ij_<<" "<<ijk_<<" "<<in;
cout<<net<<endl;
}
endl 0;
}
|
/*
* Copyright (c) Contributors to the Open 3D Engine Project.
* For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
// LY Editor Crashpad Upload Handler Extension
#include <handler/handler_main.h>
#include <tools/tool_support.h>
#include <Uploader/CrashUploader.h>
#include <windows.h>
namespace
{
int HandlerMain(int argc, char* argv[])
{
O3de::InstallCrashUploader(argc, argv);
LOG(ERROR) << "Initializing windows crash uploader";
int resultCode = crashpad::HandlerMain(argc, argv, O3de::CrashUploader::GetCrashUploader()->GetUserStreamSources());
return resultCode;
}
}
int APIENTRY wWinMain(HINSTANCE, HINSTANCE, [[maybe_unused]] wchar_t* lpCmdLine, int)
{
return crashpad::ToolSupport::Wmain(__argc, __wargv, HandlerMain);
}
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "sandbox/linux/seccomp-bpf/sandbox_bpf_test_runner.h"
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "base/basictypes.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
#include "sandbox/linux/tests/unit_tests.h"
namespace sandbox {
SandboxBPFTestRunner::SandboxBPFTestRunner(
BPFTesterDelegate* bpf_tester_delegate)
: bpf_tester_delegate_(bpf_tester_delegate) {
}
SandboxBPFTestRunner::~SandboxBPFTestRunner() {
}
void SandboxBPFTestRunner::Run() {
DCHECK(bpf_tester_delegate_);
sandbox::Die::EnableSimpleExit();
scoped_ptr<SandboxBPFPolicy> policy =
bpf_tester_delegate_->GetSandboxBPFPolicy();
if (sandbox::SandboxBPF::SupportsSeccompSandbox(-1) ==
sandbox::SandboxBPF::STATUS_AVAILABLE) {
// Ensure the the sandbox is actually available at this time
int proc_fd;
SANDBOX_ASSERT((proc_fd = open("/proc", O_RDONLY | O_DIRECTORY)) >= 0);
SANDBOX_ASSERT(sandbox::SandboxBPF::SupportsSeccompSandbox(proc_fd) ==
sandbox::SandboxBPF::STATUS_AVAILABLE);
// Initialize and then start the sandbox with our custom policy
sandbox::SandboxBPF sandbox;
sandbox.set_proc_fd(proc_fd);
sandbox.SetSandboxPolicy(policy.release());
SANDBOX_ASSERT(
sandbox.StartSandbox(sandbox::SandboxBPF::PROCESS_SINGLE_THREADED));
// Run the actual test.
bpf_tester_delegate_->RunTestFunction();
} else {
printf("This BPF test is not fully running in this configuration!\n");
// Android and Valgrind are the only configurations where we accept not
// having kernel BPF support.
if (!IsAndroid() && !IsRunningOnValgrind()) {
const bool seccomp_bpf_is_supported = false;
SANDBOX_ASSERT(seccomp_bpf_is_supported);
}
// Call the compiler and verify the policy. That's the least we can do,
// if we don't have kernel support.
sandbox::SandboxBPF sandbox;
sandbox.SetSandboxPolicy(policy.release());
sandbox::SandboxBPF::Program* program =
sandbox.AssembleFilter(true /* force_verification */);
delete program;
sandbox::UnitTests::IgnoreThisTest();
}
}
bool SandboxBPFTestRunner::ShouldCheckForLeaks() const {
// LSAN requires being able to use ptrace() and other system calls that could
// be denied.
return false;
}
} // namespace sandbox
|
// MIT License
//
// MEL - Mechatronics Engine & Library
// Copyright (c) 2019 Mechatronics and Haptic Interfaces Lab - Rice University
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// Author(s): Evan Pezent (epezent@rice.edu)
// Craig McDonald (craig.g.mcdonald@gmail.com)
#pragma once
#include <MEL/Core/Device.hpp>
namespace mel {
//==============================================================================
// CLASS DECLARATION
//==============================================================================
class VelocitySensor {
public:
/// Constructor
VelocitySensor();
/// Destructor
virtual ~VelocitySensor();
/// This function should return the velocity of the VelocitySensor
virtual double get_velocity() = 0;
protected:
double velocity_; ///< stores the VelocitySensor velocity since the last
///< call to get_velocity()
};
} // namespace mel
|
#include "ByteReader.h"
#include <byteswap.h>
uint8_t readUInt8(const uint8_t* addr, std::size_t& pos) {
const uint8_t val = *(uint8_t*)(&(addr[pos]));
++pos;
return val;
}
int32_t readInt32(const uint8_t* addr, std::size_t& pos) {
const int32_t val = __bswap_32(*(int32_t*)(&(addr[pos])));
pos += 4;
return val;
}
|
// Copyright Nathaniel Christen 2019.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include "phr-minimal-evaluator.h"
#include "kop-base.h"
#include "kops/add.h"
#include "kops/subtract.h"
#include "channel/phr-channel-group.h"
#include "channel/phr-channel.h"
#include "channel/phr-carrier.h"
#include "phaon-ir.h"
#include "kops/less-than.h"
#include "kops/equal.h"
#include "kops/bool.h"
#include <QDebug>
USING_KANS(Phaon)
PHR_Minimal_Evaluator::PHR_Minimal_Evaluator(PhaonIR& phr,
PHR_Channel_Group& channel_group)
: PHR_Channel_Group_Evaluator(phr, channel_group)
{
}
void PHR_Minimal_Evaluator::debug_report()
{
if(rh_.raw_value)
{
qDebug() << *(qint32*)rh_.raw_value;
}
else
{
qDebug() << rh_.raw_value_string;
}
}
PHR_Minimal_Evaluator::Kernal_Operators PHR_Minimal_Evaluator::parse_kernel_operator(QString fn)
{
static QMap<QString, Kernal_Operators> static_map {{
{ "#+", Kernal_Operators::Add },
{ "#-", Kernal_Operators::Subtract },
{ "#<", Kernal_Operators::Less_Than },
{ "#=?", Kernal_Operators::Equal },
{ "#?\\", Kernal_Operators::Bool },
}};
return static_map.value(fn, Kernal_Operators::N_A);
}
void PHR_Minimal_Evaluator::run_eval()
{
PHR_Channel* lc = get_channel_by_sp_name("lambda", channel_group_);
if(!lc)
return;
int sz = lc->size();
QVector<qint32> args;
args.resize(sz);
for(int i = 0; i < sz; ++i)
{
PHR_Carrier* pcr = lc->at(i);
if(pcr->symbol_name().isEmpty())
{
args[i] = pcr->raw_value_string().toInt();
}
else
{
args[i] = phr_get_s4_symbol_value(pcr->symbol_name());
}
}
run_eval(args);
}
void* PHR_Minimal_Evaluator::get_result_value()
{
return rh_.raw_value;
}
QString PHR_Minimal_Evaluator::get_result_string()
{
return rh_.raw_value_string;
}
void PHR_Minimal_Evaluator::run_eval(QVector<qint32>& args)
{
static QMap<Kernal_Operators, PHR_KOP_Base<qint32>*> static_map {{
{ Kernal_Operators::N_A, new PHR_KOP_Add },
{ Kernal_Operators::Add, new PHR_KOP_Add },
{ Kernal_Operators::Subtract, new PHR_KOP_Subtract },
{ Kernal_Operators::Less_Than, new PHR_KOP_Less_Than },
{ Kernal_Operators::Equal, new PHR_KOP_Equal },
{ Kernal_Operators::Bool, new PHR_KOP_Bool },
}};
qint32* pres = new qint32();
*pres = 0;
static_map[kernel_operator_]->run_eval(args, *pres);
rh_.raw_value = pres;
}
|
#ifndef KFTG_ARRAY
#define KFTG_ARRAY
#include "types.hpp"
#include "Memory/MemoryManager.hpp"
#include <cstring>
#define ARRAY_BLOCK_SIZE 50
namespace KFTG
{
// dynamic array (append only)
template <typename T>
class array
{
public:
class iterator
{
public:
iterator (u32 ind, array<T> *a) : index (ind), arr (a) {}
T& operator * () { return arr->arr[index]; }
const T& operator * () const { return arr->arr[index]; }
iterator& operator + (u32 n) { index += n; return *this; }
iterator& operator - (u32 n) { index -= n; return *this; }
iterator& operator ++ () { ++index; return *this; }
iterator& operator -- () { --index; return *this; }
bool operator == (iterator &it) { return arr == it.arr && index == it.index; }
bool operator != (iterator &it) { return arr == it.arr && index != it.index; }
bool operator > (iterator &it) { return arr == it.arr && index > it.index; }
bool operator >= (iterator &it) { return arr == it.arr && index >= it.index; }
bool operator < (iterator &it) { return arr == it.arr && index < it.index; }
bool operator <= (iterator &it) { return arr == it.arr && index <= it.index; }
private:
u32 index;
array<T> *arr;
};
array ()
: size (ARRAY_BLOCK_SIZE), capability (ARRAY_BLOCK_SIZE)
{
arr = (T*) MemoryManager::instance ()
->allocAsset (ARRAY_BLOCK_SIZE * sizeof (T));
}
~array ()
{
MemoryManager::instance ()->freeAsset (arr);
}
u32 len () { return size; }
iterator begin () { return {0, const_cast<array*> (this)}; }
iterator begin () const
{
return {0, const_cast<array*> (this)};
}
iterator end () { return {size, const_cast<array*> (this)}; }
iterator end () const
{
return {size, const_cast<array*> (this)};
}
T& append (T &e)
{
if (size >= capability)
{
T* tmp = (T*) MemoryManager::instance ()
->allocAsset ((capability + ARRAY_BLOCK_SIZE) * sizeof (T));
std::memcpy (tmp, arr, capability);
MemoryManager::instance ()->freeAsset (arr);
arr = tmp;
capability += ARRAY_BLOCK_SIZE;
}
arr[size++] = e;
}
void shrink () { --size; }
array<T>& operator = (const array<T> &other)
{
size = other.size;
capability = other.capability;
MemoryManager::instance ()->freeAsset (arr);
arr = (T*) MemoryManager::instance ()
->allocAsset (capability * sizeof (T));
memcpy (arr, other.arr, capability * sizeof (T));
return *this;
}
T& operator [] (u32 index) { return arr[index]; }
const T& operator [] (u32 index) const { return arr[index]; }
private:
u32 size;
u32 capability;
T *arr;
};
}
#endif // ARRAY
|
/*
* Copyright (C) 2005-2019 Centre National d'Etudes Spatiales (CNES)
*
* This file is part of Orfeo Toolbox
*
* https://www.orfeo-toolbox.org/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef otbSarConcatenateBurstsImageFilter_hxx
#define otbSarConcatenateBurstsImageFilter_hxx
#include "otbSarConcatenateBurstsImageFilter.h"
#include "itkImageRegionIterator.h"
#include "otbSarSensorModelAdapter.h"
namespace otb
{
template <class TImage>
void SarConcatenateBurstsImageFilter<TImage>::SetSLCImageKeyWorList(ImageKeywordlist sarImageKWL)
{
m_SLCImageKWL = sarImageKWL;
}
template <class TImage>
bool SarConcatenateBurstsImageFilter<TImage>::getDeburstLinesAndSamples(LinesRecordVectorType& linesRecord, LinesRecordVectorType& samplesRecord,
unsigned int first_burstInd, bool inputWithInvalidPixels)
{
// Try to create a SarSensorModelAdapter
SarSensorModelAdapter::Pointer sarSensorModel = SarSensorModelAdapter::New();
bool loadOk = sarSensorModel->LoadState(m_SLCImageKWL);
if (!loadOk || !sarSensorModel->IsValidSensorModel())
itkExceptionMacro(<< "Input image does not contain a valid SAR sensor model.");
LinesRecordVectorType lines;
// Try to call the deburstAndConcatenate function
bool deburstAndConcatenateOk = sarSensorModel->DeburstAndConcatenate(linesRecord, samplesRecord, m_Offset_OriginL, first_burstInd, inputWithInvalidPixels);
if (!deburstAndConcatenateOk)
itkExceptionMacro(<< "Could not deburst or concatenate from input bursts");
// Export the new keywordlist
bool saveOk = sarSensorModel->SaveState(m_DeburstSLCImageKWL);
if (!saveOk)
itkExceptionMacro(<< "Could not export deburst SAR sensor model to keyword list");
return true;
}
template <class TImage>
void SarConcatenateBurstsImageFilter<TImage>::GenerateOutputInformation()
{
// First, call superclass implementation
Superclass::GenerateOutputInformation();
ImageType* outputPtr = this->GetOutput();
// Origin to (0.5, 0.5) : Metadata are already adjusted
PointType origin;
origin[0] = 0.5;
origin[1] = 0.5 + m_Offset_OriginL;
outputPtr->SetOrigin(origin);
// Output KeywordList
m_DeburstSLCImageKWL.AddKey("support_data.number_samples", std::to_string(this->GetOutput()->GetLargestPossibleRegion().GetSize()[0]));
m_DeburstSLCImageKWL.AddKey("support_data.number_lines", std::to_string(this->GetOutput()->GetLargestPossibleRegion().GetSize()[1]));
m_DeburstSLCImageKWL.AddKey("number_samples", std::to_string(this->GetOutput()->GetLargestPossibleRegion().GetSize()[0]));
m_DeburstSLCImageKWL.AddKey("number_lines", std::to_string(this->GetOutput()->GetLargestPossibleRegion().GetSize()[1]));
// Set new keyword list to output image
outputPtr->SetImageKeywordList(m_DeburstSLCImageKWL);
}
} // end namepsace otb
#endif
|
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/ui/webui/settings/hats_handler.h"
#include <memory>
#include <string>
#include "base/values.h"
#include "chrome/browser/ui/hats/hats_service.h"
#include "chrome/browser/ui/hats/hats_service_factory.h"
#include "chrome/browser/ui/hats/mock_hats_service.h"
#include "chrome/browser/ui/hats/mock_trust_safety_sentiment_service.h"
#include "chrome/browser/ui/hats/trust_safety_sentiment_service_factory.h"
#include "chrome/common/chrome_features.h"
#include "chrome/test/base/chrome_render_view_host_test_harness.h"
#include "components/content_settings/core/browser/cookie_settings.h"
#include "components/content_settings/core/common/pref_names.h"
#include "components/prefs/pref_service.h"
#include "components/privacy_sandbox/privacy_sandbox_prefs.h"
#include "content/public/test/test_web_ui.h"
#include "testing/gmock/include/gmock/gmock.h"
using ::testing::_;
class Profile;
namespace settings {
class HatsHandlerTest : public ChromeRenderViewHostTestHarness {
public:
HatsHandlerTest() {
base::test::ScopedFeatureList::FeatureAndParams settings_privacy{
features::kHappinessTrackingSurveysForDesktopSettingsPrivacy,
{{"settings-time", "15s"}}};
base::test::ScopedFeatureList::FeatureAndParams privacy_sandbox{
features::kHappinessTrackingSurveysForDesktopPrivacySandbox,
{{"settings-time", "10s"}}};
base::test::ScopedFeatureList::FeatureAndParams privacy_review{
features::kHappinessTrackingSurveysForDesktopPrivacyReview,
{{"settings-time", "15s"}}};
scoped_feature_list_.InitWithFeaturesAndParameters(
{settings_privacy, privacy_sandbox, privacy_review}, {});
}
void SetUp() override {
ChromeRenderViewHostTestHarness::SetUp();
web_ui_ = std::make_unique<content::TestWebUI>();
web_ui_->set_web_contents(web_contents());
handler_ = std::make_unique<HatsHandler>();
handler_->set_web_ui(web_ui());
handler_->AllowJavascript();
web_ui_->ClearTrackedCalls();
mock_hats_service_ = static_cast<MockHatsService*>(
HatsServiceFactory::GetInstance()->SetTestingFactoryAndUse(
profile(), base::BindRepeating(&BuildMockHatsService)));
EXPECT_CALL(*mock_hats_service_, CanShowAnySurvey(_))
.WillRepeatedly(testing::Return(true));
mock_sentiment_service_ = static_cast<MockTrustSafetySentimentService*>(
TrustSafetySentimentServiceFactory::GetInstance()
->SetTestingFactoryAndUse(
profile(),
base::BindRepeating(&BuildMockTrustSafetySentimentService)));
}
void TearDown() override {
handler_->set_web_ui(nullptr);
handler_.reset();
web_ui_.reset();
ChromeRenderViewHostTestHarness::TearDown();
}
content::TestWebUI* web_ui() { return web_ui_.get(); }
HatsHandler* handler() { return handler_.get(); }
MockHatsService* mock_hats_service_;
MockTrustSafetySentimentService* mock_sentiment_service_;
protected:
// This should only be accessed in the test constructor, to avoid race
// conditions with other threads.
base::test::ScopedFeatureList scoped_feature_list_;
private:
std::unique_ptr<content::TestWebUI> web_ui_;
std::unique_ptr<HatsHandler> handler_;
};
TEST_F(HatsHandlerTest, PrivacySettingsHats) {
profile()->GetPrefs()->SetBoolean(prefs::kPrivacySandboxApisEnabled, false);
profile()->GetPrefs()->SetInteger(
prefs::kCookieControlsMode,
static_cast<int>(content_settings::CookieControlsMode::kBlockThirdParty));
SurveyBitsData expected_product_specific_data = {
{"3P cookies blocked", true}, {"Privacy Sandbox enabled", false}};
// Check that both interacting with the privacy card, and running Safety Check
// result in a survey request with the appropriate product specific data.
EXPECT_CALL(*mock_hats_service_,
LaunchDelayedSurveyForWebContents(
kHatsSurveyTriggerSettingsPrivacy, web_contents(), 15000,
expected_product_specific_data, _, true))
.Times(2);
base::Value args(base::Value::Type::LIST);
args.Append(
static_cast<int>(HatsHandler::TrustSafetyInteraction::USED_PRIVACY_CARD));
handler()->HandleTrustSafetyInteractionOccurred(
&base::Value::AsListValue(args));
task_environment()->RunUntilIdle();
args.GetList()[0] = base::Value(
static_cast<int>(HatsHandler::TrustSafetyInteraction::RAN_SAFETY_CHECK));
handler()->HandleTrustSafetyInteractionOccurred(
&base::Value::AsListValue(args));
task_environment()->RunUntilIdle();
}
TEST_F(HatsHandlerTest, PrivacyReviewHats) {
// Check that completing a privacy review triggers a privacy review hats.
EXPECT_CALL(*mock_hats_service_, LaunchDelayedSurveyForWebContents(
kHatsSurveyTriggerPrivacyReview,
web_contents(), 15000, _, _, true))
.Times(1);
base::Value args(base::Value::Type::LIST);
args.Append(static_cast<int>(
HatsHandler::TrustSafetyInteraction::COMPLETED_PRIVACY_GUIDE));
handler()->HandleTrustSafetyInteractionOccurred(
&base::Value::AsListValue(args));
task_environment()->RunUntilIdle();
}
class HatsHandlerNoSandboxTest : public HatsHandlerTest {
public:
HatsHandlerNoSandboxTest() {
scoped_feature_list_.Reset();
base::test::ScopedFeatureList::FeatureAndParams settings_privacy{
features::kHappinessTrackingSurveysForDesktopSettingsPrivacy,
{{"no-sandbox", "true"}}};
scoped_feature_list_.InitWithFeaturesAndParameters({settings_privacy}, {});
}
};
TEST_F(HatsHandlerNoSandboxTest, PrivacySettings) {
profile()->GetPrefs()->SetBoolean(prefs::kPrivacySandboxApisEnabled, false);
profile()->GetPrefs()->SetInteger(
prefs::kCookieControlsMode,
static_cast<int>(content_settings::CookieControlsMode::kBlockThirdParty));
SurveyBitsData expected_product_specific_data = {
{"3P cookies blocked", true}, {"Privacy Sandbox enabled", false}};
// Enable targeting for users who have not seen the Privacy Sandbox page and
// ensure the handler does not attempt to launch the survey.
EXPECT_CALL(*mock_hats_service_,
LaunchDelayedSurveyForWebContents(_, _, _, _, _, _))
.Times(0);
profile()->GetPrefs()->SetBoolean(prefs::kPrivacySandboxPageViewed, true);
base::Value args(base::Value::Type::LIST);
args.Append(
static_cast<int>(HatsHandler::TrustSafetyInteraction::USED_PRIVACY_CARD));
handler()->HandleTrustSafetyInteractionOccurred(
&base::Value::AsListValue(args));
task_environment()->RunUntilIdle();
}
TEST_F(HatsHandlerTest, PrivacySandboxHats) {
// Check that the handler correctly forwards the survey request to the
// HaTS service and also includes the appropriate product specific data.
profile()->GetPrefs()->SetBoolean(prefs::kPrivacySandboxApisEnabled, false);
profile()->GetPrefs()->SetInteger(
prefs::kCookieControlsMode,
static_cast<int>(content_settings::CookieControlsMode::kBlockThirdParty));
SurveyBitsData expected_product_specific_data = {
{"3P cookies blocked", true}, {"Privacy Sandbox enabled", false}};
EXPECT_CALL(*mock_hats_service_,
LaunchDelayedSurveyForWebContents(
kHatsSurveyTriggerPrivacySandbox, web_contents(), 10000,
expected_product_specific_data, _, true));
base::Value args(base::Value::Type::LIST);
args.Append(static_cast<int>(
HatsHandler::TrustSafetyInteraction::OPENED_PRIVACY_SANDBOX));
handler()->HandleTrustSafetyInteractionOccurred(
&base::Value::AsListValue(args));
task_environment()->RunUntilIdle();
}
TEST_F(HatsHandlerTest, TrustSafetySentimentInteractions) {
// Check that interactions relevant to the T&S sentiment service are
// correctly reported.
EXPECT_CALL(*mock_sentiment_service_,
InteractedWithPrivacySettings(web_contents()))
.Times(1);
base::Value args(base::Value::Type::LIST);
args.Append(
static_cast<int>(HatsHandler::TrustSafetyInteraction::USED_PRIVACY_CARD));
handler()->HandleTrustSafetyInteractionOccurred(
&base::Value::AsListValue(args));
EXPECT_CALL(*mock_sentiment_service_, RanSafetyCheck()).Times(1);
args.GetList()[0] = base::Value(
static_cast<int>(HatsHandler::TrustSafetyInteraction::RAN_SAFETY_CHECK));
handler()->HandleTrustSafetyInteractionOccurred(
&base::Value::AsListValue(args));
}
TEST_F(HatsHandlerNoSandboxTest, TrustSafetySentimentInteractions) {
// A profile & feature state that would exclude the user from receiving the
// Privacy Settings HaTS survey should not stop the sentiment service being
// informed that the interaction occurred.
// Check that interactions relevant to the T&S sentiment service are
// correctly reported.
EXPECT_CALL(*mock_sentiment_service_, RanSafetyCheck()).Times(1);
base::Value args(base::Value::Type::LIST);
args.Append(
static_cast<int>(HatsHandler::TrustSafetyInteraction::RAN_SAFETY_CHECK));
profile()->GetPrefs()->SetBoolean(prefs::kPrivacySandboxPageViewed, true);
handler()->HandleTrustSafetyInteractionOccurred(
&base::Value::AsListValue(args));
EXPECT_CALL(*mock_sentiment_service_, OpenedPasswordManager(web_contents()));
args.GetList()[0] = base::Value(static_cast<int>(
HatsHandler::TrustSafetyInteraction::OPENED_PASSWORD_MANAGER));
handler()->HandleTrustSafetyInteractionOccurred(
&base::Value::AsListValue(args));
}
} // namespace settings
|
//----------------------------------*-C++-*----------------------------------//
// Copyright 2020 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file SeltzerBergerInteractor.i.hh
//---------------------------------------------------------------------------//
#include "base/ArrayUtils.hh"
#include "base/Constants.hh"
#include "random/distributions/UniformRealDistribution.hh"
#include "SBEnergyDistHelper.hh"
#include "SBEnergyDistribution.hh"
#include "SBPositronXsCorrector.hh"
#include "TsaiUrbanDistribution.hh"
namespace celeritas
{
namespace detail
{
//---------------------------------------------------------------------------//
/*!
* Construct with shared/device and state data.
*
* The incident particle must be within the model's valid energy range. this
* must be handled in code *before* the interactor is constructed.
*/
SeltzerBergerInteractor::SeltzerBergerInteractor(
const SeltzerBergerNativeRef& shared,
const ParticleTrackView& particle,
const Real3& inc_direction,
const CutoffView& cutoffs,
StackAllocator<Secondary>& allocate,
const MaterialView& material,
const ElementComponentId& elcomp_id)
: shared_(shared)
, inc_energy_(particle.energy())
, inc_momentum_(particle.momentum())
, inc_direction_(inc_direction)
, inc_particle_is_electron_(particle.particle_id() == shared_.ids.electron)
, gamma_cutoff_(cutoffs.energy(shared.ids.gamma))
, allocate_(allocate)
, material_(material)
, elcomp_id_(elcomp_id)
{
CELER_EXPECT(particle.particle_id() == shared_.ids.electron
|| particle.particle_id() == shared_.ids.positron);
}
//---------------------------------------------------------------------------//
/*!
* Pair-production using the Seltzer-Berger model.
*
* See section 10.2.1 of the Geant physics reference 10.6.
*/
template<class Engine>
CELER_FUNCTION Interaction SeltzerBergerInteractor::operator()(Engine& rng)
{
// Check if secondary can be produced. If not, this interaction cannot
// happen and the incident particle must undergo an energy loss process
// instead.
if (gamma_cutoff_ > inc_energy_)
{
return Interaction::from_unchanged(inc_energy_, inc_direction_);
}
// Allocate space for the brems photon
Secondary* secondaries = this->allocate_(1);
if (secondaries == nullptr)
{
// Failed to allocate space for the secondary
return Interaction::from_failure();
}
// Density correction
constexpr auto migdal = 4 * constants::pi * constants::r_electron
* ipow<2>(constants::lambdabar_electron);
real_type density_factor = material_.electron_density() * migdal;
real_type total_energy_val = inc_energy_.value()
+ shared_.electron_mass.value();
real_type density_correction = density_factor * ipow<2>(total_energy_val);
// Outgoing photon secondary energy sampler
Energy gamma_exit_energy;
{
// Helper class preprocesses cross section bounds and calculates
// distribution
SBEnergyDistHelper sb_helper(
shared_,
inc_energy_,
material_.element_id(elcomp_id_),
SBEnergyDistHelper::EnergySq{density_correction},
gamma_cutoff_);
if (inc_particle_is_electron_)
{
// Rejection sample without modifying cross section
SBEnergyDistribution<SBElectronXsCorrector> sample_gamma_energy(
sb_helper, {});
gamma_exit_energy = sample_gamma_energy(rng);
}
else
{
SBEnergyDistribution<SBPositronXsCorrector> sample_gamma_energy(
sb_helper,
{shared_.electron_mass,
material_.element_view(elcomp_id_),
gamma_cutoff_,
inc_energy_});
gamma_exit_energy = sample_gamma_energy(rng);
}
}
// Construct interaction for change to parent (incoming) particle
Interaction result;
result.action = Action::spawned;
result.energy
= units::MevEnergy{inc_energy_.value() - gamma_exit_energy.value()};
result.direction = inc_direction_;
result.secondaries = {secondaries, 1};
secondaries[0].particle_id = shared_.ids.gamma;
secondaries[0].energy = gamma_exit_energy;
// Generate exiting gamma direction from isotropic azimuthal
// angle and TsaiUrbanDistribution for polar angle
UniformRealDistribution<real_type> sample_phi(0, 2 * constants::pi);
TsaiUrbanDistribution sample_gamma_angle(secondaries[0].energy,
shared_.electron_mass);
real_type cost = sample_gamma_angle(rng);
secondaries[0].direction
= rotate(from_spherical(cost, sample_phi(rng)), inc_direction_);
// Update parent particle direction
for (unsigned int i : range(3))
{
real_type inc_momentum_i = inc_momentum_.value() * inc_direction_[i];
real_type gamma_momentum_i = result.secondaries[0].energy.value()
* result.secondaries[0].direction[i];
result.direction[i] = inc_momentum_i - gamma_momentum_i;
}
normalize_direction(&result.direction);
return result;
}
//---------------------------------------------------------------------------//
} // namespace detail
} // namespace celeritas
|
#include <chrono>
#include <memory>
#include "rclcpp/rclcpp.hpp"
#include "tutorial_interfaces/msg/num.hpp" // CHANGE
using namespace std::chrono_literals;
class MinimalPublisher : public rclcpp::Node
{
public:
MinimalPublisher()
: Node("minimal_publisher"), count_(0)
{
publisher_ = this->create_publisher<tutorial_interfaces::msg::Num>("topic", 10); // CHANGE
timer_ = this->create_wall_timer(
500ms, std::bind(&MinimalPublisher::timer_callback, this));
}
private:
void timer_callback()
{
auto message = tutorial_interfaces::msg::Num(); // CHANGE
message.num = this->count_++; // CHANGE
RCLCPP_INFO(this->get_logger(), "Publishing: '%d'", message.num); // CHANGE
publisher_->publish(message);
}
rclcpp::TimerBase::SharedPtr timer_;
rclcpp::Publisher<tutorial_interfaces::msg::Num>::SharedPtr publisher_; // CHANGE
size_t count_;
};
int main(int argc, char * argv[])
{
rclcpp::init(argc, argv);
rclcpp::spin(std::make_shared<MinimalPublisher>());
rclcpp::shutdown();
return 0;
}
|
/*******************************************************************************
* Copyright 2018-2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <assert.h>
#include "dnnl.h"
#include "c_types_map.hpp"
#include "concat_pd.hpp"
#include "engine.hpp"
#include "type_helpers.hpp"
#include "utils.hpp"
using namespace dnnl::impl;
using namespace dnnl::impl::utils;
using namespace dnnl::impl::status;
status_t dnnl_concat_primitive_desc_create(
primitive_desc_iface_t **concat_pd_iface, const memory_desc_t *dst_md,
int n, int concat_dim, const memory_desc_t *src_mds,
const primitive_attr_t *attr, engine_t *engine) {
bool args_ok = !any_null(concat_pd_iface, src_mds) && n > 0;
if (!args_ok) return invalid_arguments;
if (attr == NULL) attr = &default_attr();
const int ndims = src_mds[0].ndims;
const dims_t &dims = src_mds[0].dims;
const data_type_t dt = src_mds[0].data_type;
if (memory_desc_wrapper(src_mds[0]).has_runtime_dims_or_strides())
return unimplemented;
int concat_dim_sz = dims[concat_dim];
for (int i = 1; i < n; ++i) {
if (src_mds[i].ndims != ndims) return invalid_arguments;
if (memory_desc_wrapper(src_mds[i]).has_runtime_dims_or_strides())
return unimplemented;
for (int d = 0; d < ndims; ++d) {
if (d == concat_dim) continue;
if (src_mds[i].dims[d] != dims[d]) return invalid_arguments;
}
if (src_mds[i].data_type != dt) return invalid_arguments;
concat_dim_sz += src_mds[i].dims[concat_dim];
}
memory_desc_t dummy_dst_md;
if (dst_md) {
if (dst_md->ndims != ndims) return invalid_arguments;
if (memory_desc_wrapper(dst_md).has_runtime_dims_or_strides())
return unimplemented;
for (int d = 0; d < ndims; ++d) {
if (dst_md->dims[d] != (d == concat_dim ? concat_dim_sz : dims[d]))
return invalid_arguments;
}
} else {
dummy_dst_md = src_mds[0];
dummy_dst_md.dims[concat_dim] = concat_dim_sz;
dummy_dst_md.format_kind = format_kind::any;
dst_md = &dummy_dst_md;
}
concat_pd_t *concat_pd = nullptr;
for (auto c = engine->get_concat_implementation_list(); *c; ++c) {
if ((*c)(&concat_pd, engine, attr, dst_md, n, concat_dim, src_mds)
== success) {
return safe_ptr_assign(*concat_pd_iface,
new primitive_desc_iface_t(concat_pd, engine));
}
}
return unimplemented;
}
|
/*
===========================================================================
GWEN
Copyright (c) 2010 Facepunch Studios
Copyright (c) 2017-2018 Cristiano Beato
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
===========================================================================
*/
#include "precompiled.h"
#pragma hdrstop
#include <SDL_events.h>
#ifdef _WIN32
#define UCS_STRING "UCS-2"
#else
#define UCS_STRING "UCS-4"
#endif
using namespace Gwen;
Input::SDL2::SDL2()
{
m_Canvas = NULL;
}
void Gwen::Input::SDL2::Initialize(Gwen::Controls::Canvas * c)
{
m_Canvas = c;
}
bool Gwen::Input::SDL2::ProcessMouseEvents(SDL_Event Event, Uint32 mWindowID)
{
if (!m_Canvas)
return false;
//If an event was detected for this window
if (Event.window.windowID == mWindowID)
{
switch (Event.type)
{
case SDL_MOUSEMOTION:
{
SDL_MouseMotionEvent* moEvent = &Event.motion;
return m_Canvas->InputMouseMoved(moEvent->x, moEvent->y, moEvent->xrel, moEvent->yrel);
}
case SDL_MOUSEBUTTONDOWN:
case SDL_MOUSEBUTTONUP:
{
SDL_MouseButtonEvent bmEvent = Event.button;
int Button = -1;
switch (bmEvent.button)
{
case SDL_BUTTON_LEFT:
Button = 0;
break;
case SDL_BUTTON_MIDDLE:
Button = 2;
break;
case SDL_BUTTON_RIGHT:
Button = 1;
break;
default:
return false;
}
return m_Canvas->InputMouseButton(Button, bmEvent.state);
}
case SDL_MOUSEWHEEL:
{
SDL_MouseWheelEvent wmEvent = Event.wheel;
return m_Canvas->InputMouseWheel(wmEvent.y);
}
default:
return false;
break;
}
}
}
bool Gwen::Input::SDL2::ProcessKeyboardEvents(SDL_Event Event, Uint32 mWindowID)
{
if (!m_Canvas)
return false;
//If an event was detected for this window
if (Event.window.windowID == mWindowID)
{
switch (Event.type)
{
case SDL_KEYUP:
case SDL_KEYDOWN:
{
SDL_KeyboardEvent keyEvent = Event.key;
int iKey = -1;
SDL_Scancode scancode = keyEvent.keysym.scancode;
switch (scancode)
{
case SDL_SCANCODE_RETURN:
iKey = Gwen::Key::Return;
break;
case SDL_SCANCODE_BACKSPACE:
iKey = Gwen::Key::Backspace;
break;
case SDL_SCANCODE_DELETE:
iKey = Gwen::Key::Delete;
break;
case SDL_SCANCODE_LEFT:
iKey = Gwen::Key::Left;
break;
case SDL_SCANCODE_RIGHT:
iKey = Gwen::Key::Right;
break;
case SDL_SCANCODE_LSHIFT:
iKey = Gwen::Key::Shift;
break;
case SDL_SCANCODE_RSHIFT:
iKey = Gwen::Key::Shift;
break;
case SDL_SCANCODE_TAB:
iKey = Gwen::Key::Tab;
break;
case SDL_SCANCODE_SPACE:
iKey = Gwen::Key::Space;
break;
case SDL_SCANCODE_HOME:
iKey = Gwen::Key::Home;
break;
case SDL_SCANCODE_END:
iKey = Gwen::Key::End;
break;
case SDL_SCANCODE_LCTRL:
iKey = Gwen::Key::Control;
break;
case SDL_SCANCODE_RCTRL:
iKey = Gwen::Key::Control;
break;
case SDL_SCANCODE_UP:
iKey = Gwen::Key::Up;
break;
case SDL_SCANCODE_DOWN:
iKey = Gwen::Key::Down;
break;
case SDL_SCANCODE_ESCAPE:
iKey = Gwen::Key::Escape;
break;
case SDL_SCANCODE_LALT:
iKey = Gwen::Key::Alt;
break;
case SDL_SCANCODE_RALT:
iKey = Gwen::Key::Alt;
break;
default:
return false;
}
return m_Canvas->InputKey(iKey, keyEvent.state);
}
case SDL_TEXTINPUT:
{
SDL_TextInputEvent txEvent = Event.text;
if (txEvent.text[0] != '\0')
{
const char* toFormat = "UTF-32LE";
#if 0
static SDL_iconv_t cd = SDL_iconv_t(-1);
if (cd == SDL_iconv_t(-1))
{
cd = SDL_iconv_open(toFormat, "UTF-8");
if (cd == SDL_iconv_t(-1))
{
printf("Couldn't initialize SDL_iconv for UTF-8 to UTF-32!"); // TODO: or error?
return;
}
}
Gwen::UnicodeChar n = (Gwen::UnicodeChar)SDL_iconv(cd, &txEvent.text, &inbytesleft, &outbuf, &outbytesleft);
bool ret = m_Canvas->InputCharacter(n);
// reset cd so it can be used again
SDL_iconv_close(cd);
#else
wchar_t* widechar = (wchar_t*)SDL_iconv_string(toFormat, "UTF-8", txEvent.text, SDL_strlen(txEvent.text) + 1);
bool ret = m_Canvas->InputCharacter(*widechar);
SDL_free(widechar);
#endif
}
}
}
}
}
bool Gwen::Input::SDL2::ProcessWindowEvents(SDL_Event Event, Uint32 mWindowID)
{
if (!m_Canvas)
return false;
//If an event was detected for this window
if (Event.type == SDL_WINDOWEVENT && Event.window.windowID == mWindowID)
{
switch (Event.window.event)
{
//Window appeared
case SDL_WINDOWEVENT_SHOWN:
m_Canvas->Show();
break;
//Window disappeared
case SDL_WINDOWEVENT_HIDDEN:
m_Canvas->SetHidden(true);
return true;
break;
//Get new dimensions and repaint
case SDL_WINDOWEVENT_SIZE_CHANGED:
return m_Canvas->SetSize(Gwen::Point(Event.window.data1, Event.window.data2));
break;
//Repaint on expose
case SDL_WINDOWEVENT_EXPOSED:
m_Canvas->Redraw();
return true;
break;
//Mouse enter
case SDL_WINDOWEVENT_ENTER:
m_Canvas->SetMouseInputEnabled(true);
return true;
break;
//Mouse exit
case SDL_WINDOWEVENT_LEAVE:
m_Canvas->SetMouseInputEnabled(false);
return true;
break;
//Keyboard focus gained
case SDL_WINDOWEVENT_FOCUS_GAINED:
m_Canvas->SetKeyboardInputEnabled(true);
return true;
break;
//Keyboard focus lost
case SDL_WINDOWEVENT_FOCUS_LOST:
m_Canvas->SetKeyboardInputEnabled(false);
return false;
break;
//Window minimized
case SDL_WINDOWEVENT_MINIMIZED:
m_Canvas->SetHidden(true);
break;
//Window maxized
case SDL_WINDOWEVENT_MAXIMIZED:
m_Canvas->SetHidden(false);
return true;
break;
//Window restored
case SDL_WINDOWEVENT_RESTORED:
m_Canvas->SetHidden(false);
return true;
break;
//Hide on close
case SDL_WINDOWEVENT_CLOSE:
return m_Canvas->InputQuit();
break;
default:
return false;
}
}
}
bool Gwen::Input::SDL2::ProcessEvents(SDL_Event Event, Uint32 mWindowID)
{
if (!m_Canvas)
return false;
//If an event was detected for this window
if (Event.window.windowID == mWindowID)
{
switch (Event.type)
{
case SDL_QUIT:
case SDL_APP_TERMINATING:
m_Canvas->InputQuit();
default:
return false;
}
}
}
|
/*-------------------------------------------------------------------------
* Vulkan Conformance Tests
* ------------------------
*
* Copyright (c) 2018 Advanced Micro Devices, Inc.
* Copyright (c) 2018 The Khronos Group Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*//*!
* \file
* \brief VK_KHR_driver_properties tests
*//*--------------------------------------------------------------------*/
#include "vktApiDriverPropertiesTests.hpp"
#include "vktTestGroupUtil.hpp"
#include "vktTestCaseUtil.hpp"
#include "vkQueryUtil.hpp"
#include "vkTypeUtil.hpp"
#include "vkKnownDriverIds.inl"
using namespace vk;
namespace vkt
{
namespace api
{
namespace
{
enum TestType
{
TEST_TYPE_DRIVER_ID_MATCH = 0,
TEST_TYPE_NAME_IS_NOT_EMPTY,
TEST_TYPE_NAME_ZERO_TERMINATED,
TEST_TYPE_INFO_ZERO_TERMINATED,
TEST_TYPE_VERSION,
};
static const VkConformanceVersionKHR knownConformanceVersions[] =
{
makeConformanceVersion(1, 3, 1, 1),
makeConformanceVersion(1, 3, 1, 0),
makeConformanceVersion(1, 3, 0, 0),
makeConformanceVersion(1, 2, 8, 0),
makeConformanceVersion(1, 2, 7, 2),
makeConformanceVersion(1, 2, 7, 1),
makeConformanceVersion(1, 2, 7, 0),
makeConformanceVersion(1, 2, 6, 2),
makeConformanceVersion(1, 2, 6, 1),
makeConformanceVersion(1, 2, 6, 0),
makeConformanceVersion(1, 2, 5, 2),
makeConformanceVersion(1, 2, 5, 1),
makeConformanceVersion(1, 2, 5, 0),
makeConformanceVersion(1, 2, 4, 1),
makeConformanceVersion(1, 2, 4, 0),
makeConformanceVersion(1, 2, 3, 3),
makeConformanceVersion(1, 2, 3, 2),
makeConformanceVersion(1, 2, 3, 1),
makeConformanceVersion(1, 2, 3, 0),
makeConformanceVersion(1, 2, 2, 2),
makeConformanceVersion(1, 2, 2, 1),
makeConformanceVersion(1, 2, 2, 0),
makeConformanceVersion(1, 2, 1, 2),
makeConformanceVersion(1, 2, 1, 1),
makeConformanceVersion(1, 2, 1, 0),
makeConformanceVersion(1, 2, 0, 2),
makeConformanceVersion(1, 2, 0, 1),
makeConformanceVersion(1, 2, 0, 0),
makeConformanceVersion(1, 1, 6, 3),
makeConformanceVersion(1, 1, 6, 2),
makeConformanceVersion(1, 1, 6, 1),
makeConformanceVersion(1, 1, 6, 0),
makeConformanceVersion(1, 1, 5, 2),
makeConformanceVersion(1, 1, 5, 1),
makeConformanceVersion(1, 1, 5, 0),
makeConformanceVersion(1, 1, 4, 3),
makeConformanceVersion(1, 1, 4, 2),
makeConformanceVersion(1, 1, 4, 1),
makeConformanceVersion(1, 1, 4, 0),
makeConformanceVersion(1, 1, 3, 3),
makeConformanceVersion(1, 1, 3, 2),
makeConformanceVersion(1, 1, 3, 1),
makeConformanceVersion(1, 1, 3, 0),
};
DE_INLINE bool isNullTerminated(const char* str, const deUint32 maxSize)
{
return deStrnlen(str, maxSize) < maxSize;
}
DE_INLINE bool operator==(const VkConformanceVersion& a, const VkConformanceVersion& b)
{
return ((a.major == b.major) &&
(a.minor == b.minor) &&
(a.subminor == b.subminor) &&
(a.patch == b.patch));
}
void checkSupport (Context& context, const TestType config)
{
DE_UNREF(config);
context.requireDeviceFunctionality("VK_KHR_driver_properties");
}
void testDriverMatch (const VkPhysicalDeviceDriverPropertiesKHR& deviceDriverProperties)
{
for (deUint32 driverNdx = 0; driverNdx < DE_LENGTH_OF_ARRAY(driverIds); driverNdx++)
{
if (deviceDriverProperties.driverID == driverIds[driverNdx].id)
return;
}
TCU_FAIL("Driver ID did not match any known driver");
}
void testNameIsNotEmpty (const VkPhysicalDeviceDriverPropertiesKHR& deviceDriverProperties)
{
if (deviceDriverProperties.driverName[0] == 0)
TCU_FAIL("Driver name is empty");
}
void testNameZeroTerminated (const VkPhysicalDeviceDriverPropertiesKHR& deviceDriverProperties)
{
if (!isNullTerminated(deviceDriverProperties.driverName, VK_MAX_DRIVER_NAME_SIZE_KHR))
TCU_FAIL("Driver name is not a null-terminated string");
}
void testInfoZeroTerminated (const VkPhysicalDeviceDriverPropertiesKHR& deviceDriverProperties)
{
if (!isNullTerminated(deviceDriverProperties.driverInfo, VK_MAX_DRIVER_INFO_SIZE_KHR))
TCU_FAIL("Driver info is not a null-terminated string");
}
void testVersion (const VkPhysicalDeviceDriverPropertiesKHR& deviceDriverProperties, deUint32 usedApiVersion)
{
const deUint32 apiMajorVersion = VK_API_VERSION_MAJOR(usedApiVersion);
const deUint32 apiMinorVersion = VK_API_VERSION_MINOR(usedApiVersion);
if (deviceDriverProperties.conformanceVersion.major < apiMajorVersion ||
(deviceDriverProperties.conformanceVersion.major == apiMajorVersion &&
deviceDriverProperties.conformanceVersion.minor < apiMinorVersion))
{
TCU_FAIL("Wrong driver conformance version (older than used API version)");
}
for (const VkConformanceVersionKHR* pConformanceVersion = knownConformanceVersions;
pConformanceVersion != DE_ARRAY_END(knownConformanceVersions);
++pConformanceVersion)
{
if (deviceDriverProperties.conformanceVersion == *pConformanceVersion)
return;
}
TCU_FAIL("Wrong driver conformance version (not known)");
}
tcu::TestStatus testQueryProperties (Context& context, const TestType testType)
{
// Query the driver properties
const VkPhysicalDevice physDevice = context.getPhysicalDevice();
const int memsetPattern = 0xaa;
VkPhysicalDeviceProperties2 deviceProperties2;
VkPhysicalDeviceDriverProperties deviceDriverProperties;
deMemset(&deviceDriverProperties, memsetPattern, sizeof(deviceDriverProperties));
deviceDriverProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR;
deviceDriverProperties.pNext = DE_NULL;
deMemset(&deviceProperties2, memsetPattern, sizeof(deviceProperties2));
deviceProperties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
deviceProperties2.pNext = &deviceDriverProperties;
context.getInstanceInterface().getPhysicalDeviceProperties2(physDevice, &deviceProperties2);
// Verify the returned values
switch (testType)
{
case TEST_TYPE_DRIVER_ID_MATCH: testDriverMatch (deviceDriverProperties); break;
case TEST_TYPE_NAME_IS_NOT_EMPTY: testNameIsNotEmpty (deviceDriverProperties); break;
case TEST_TYPE_NAME_ZERO_TERMINATED: testNameZeroTerminated (deviceDriverProperties); break;
case TEST_TYPE_INFO_ZERO_TERMINATED: testInfoZeroTerminated (deviceDriverProperties); break;
case TEST_TYPE_VERSION: testVersion (deviceDriverProperties, context.getUsedApiVersion()); break;
default: TCU_THROW(InternalError, "Unknown test type specified");
}
return tcu::TestStatus::pass("Pass");
}
void createTestCases (tcu::TestCaseGroup* group)
{
addFunctionCase(group, "driver_id_match", "Check driverID is supported", checkSupport, testQueryProperties, TEST_TYPE_DRIVER_ID_MATCH);
addFunctionCase(group, "name_is_not_empty", "Check name field is not empty", checkSupport, testQueryProperties, TEST_TYPE_NAME_IS_NOT_EMPTY);
addFunctionCase(group, "name_zero_terminated", "Check name field is zero-terminated", checkSupport, testQueryProperties, TEST_TYPE_NAME_ZERO_TERMINATED);
addFunctionCase(group, "info_zero_terminated", "Check info field is zero-terminated", checkSupport, testQueryProperties, TEST_TYPE_INFO_ZERO_TERMINATED);
addFunctionCase(group, "conformance_version", "Check conformanceVersion reported by driver", checkSupport, testQueryProperties, TEST_TYPE_VERSION);
}
} // anonymous
tcu::TestCaseGroup* createDriverPropertiesTests(tcu::TestContext& testCtx)
{
return createTestGroup(testCtx, "driver_properties", "VK_KHR_driver_properties tests", createTestCases);
}
} // api
} // vkt
|
#include "stdafx.h"
#include "UIComboBox.h"
namespace UiLib
{
CComboBoxUI::CComboBoxUI()
{
m_nArrowWidth = 0;
}
LPCTSTR CComboBoxUI::GetClass() const
{
return _T("ComboBoxUI");
}
void CComboBoxUI::SetAttribute(LPCTSTR pstrName, LPCTSTR pstrValue)
{
if (_tcscmp(pstrName, _T("arrowimage")) == 0)
m_sArrowImage = pstrValue;
else
CComboUI::SetAttribute(pstrName, pstrValue);
}
void CComboBoxUI::PaintStatusImage(HDC hDC)
{
if (m_sArrowImage.IsEmpty())
CComboUI::PaintStatusImage(hDC);
else
{
// get index
if( IsFocused() ) m_uButtonState |= UISTATE_FOCUSED;
else m_uButtonState &= ~ UISTATE_FOCUSED;
if( !IsEnabled() ) m_uButtonState |= UISTATE_DISABLED;
else m_uButtonState &= ~ UISTATE_DISABLED;
int nIndex = 0;
if ((m_uButtonState & UISTATE_DISABLED) != 0)
nIndex = 4;
else if ((m_uButtonState & UISTATE_PUSHED) != 0)
nIndex = 2;
else if ((m_uButtonState & UISTATE_HOT) != 0)
nIndex = 1;
else if ((m_uButtonState & UISTATE_FOCUSED) != 0)
nIndex = 3;
// make modify string
CDuiString sModify = m_sArrowImage;
int nPos1 = sModify.Find(_T("source"));
int nPos2 = sModify.Find(_T("'"), nPos1 + 7);
if (nPos2 == -1) return; //first
int nPos3 = sModify.Find(_T("'"), nPos2 + 1);
if (nPos3 == -1) return; //second
CDuiRect rcBmpPart;
LPTSTR lpszValue = NULL;
rcBmpPart.left = _tcstol(sModify.GetData() + nPos2 + 1, &lpszValue, 10); ASSERT(lpszValue);
rcBmpPart.top = _tcstol(lpszValue + 1, &lpszValue, 10); ASSERT(lpszValue);
rcBmpPart.right = _tcstol(lpszValue + 1, &lpszValue, 10); ASSERT(lpszValue);
rcBmpPart.bottom = _tcstol(lpszValue + 1, &lpszValue, 10); ASSERT(lpszValue);
m_nArrowWidth = rcBmpPart.GetWidth() / 5;
rcBmpPart.left += nIndex * m_nArrowWidth;
rcBmpPart.right = rcBmpPart.left + m_nArrowWidth;
CDuiRect rcDest(0, 0, m_rcItem.right - m_rcItem.left, m_rcItem.bottom - m_rcItem.top);
rcDest.Deflate(GetBorderSize(), GetBorderSize());
rcDest.left = rcDest.right - m_nArrowWidth;
CDuiString sSource = sModify.Mid(nPos1, nPos3 + 1 - nPos1);
CDuiString sReplace;
sReplace.SmallFormat(_T("source='%d,%d,%d,%d' dest='%d,%d,%d,%d'"),
rcBmpPart.left, rcBmpPart.top, rcBmpPart.right, rcBmpPart.bottom,
rcDest.left, rcDest.top, rcDest.right, rcDest.bottom);
sModify.Replace(sSource, sReplace);
// draw image
if (!DrawImage(hDC, m_sArrowImage, sModify))
m_sNormalImage.Empty();
}
}
void CComboBoxUI::PaintText(HDC hDC)
{
RECT rcText = m_rcItem;
rcText.left += m_rcTextPadding.left;
rcText.right -= m_rcTextPadding.right;
rcText.top += m_rcTextPadding.top;
rcText.bottom -= m_rcTextPadding.bottom;
rcText.right -= m_nArrowWidth; // add this line than CComboUI::PaintText(HDC hDC)
if( m_iCurSel >= 0 ) {
CControlUI* pControl = static_cast<CControlUI*>(m_items[m_iCurSel]);
IListItemUI* pElement = static_cast<IListItemUI*>(pControl->GetInterface(_T("ListItem")));
if( pElement != NULL ) {
pElement->DrawItemText(hDC, rcText);
}
else {
RECT rcOldPos = pControl->GetPos();
pControl->SetPos(rcText);
pControl->DoPaint(hDC, rcText);
pControl->SetPos(rcOldPos);
}
}
}
}
|
#include <vector>
#include "wayfire/debug.hpp"
#include <string>
#include <wayfire/config/file.hpp>
#include <wayfire/config-backend.hpp>
#include <wayfire/plugin.hpp>
#include <wayfire/core.hpp>
#include <sys/inotify.h>
#include <unistd.h>
#define INOT_BUF_SIZE (1024 * sizeof(inotify_event))
static char buf[INOT_BUF_SIZE];
static std::string config_dir, config_file;
wf::config::config_manager_t *cfg_manager;
static void reload_config(int fd)
{
wf::config::load_configuration_options_from_file(*cfg_manager, config_file);
inotify_add_watch(fd, config_dir.c_str(), IN_CREATE);
inotify_add_watch(fd, config_file.c_str(), IN_MODIFY);
}
static int handle_config_updated(int fd, uint32_t mask, void *data)
{
LOGD("Reloading configuration file");
/* read, but don't use */
read(fd, buf, INOT_BUF_SIZE);
reload_config(fd);
wf::get_core().emit_signal("reload-config", nullptr);
return 0;
}
static const char *CONFIG_FILE_ENV = "WAYFIRE_CONFIG_FILE";
namespace wf
{
class dynamic_ini_config_t : public wf::config_backend_t
{
public:
void init(wl_display *display, config::config_manager_t& config,
const std::string& cfg_file) override
{
cfg_manager = &config;
config_file = choose_cfg_file(cfg_file);
LOGI("Using config file: ", config_file.c_str());
setenv(CONFIG_FILE_ENV, config_file.c_str(), 1);
config = wf::config::build_configuration(
get_xml_dirs(), SYSCONFDIR "/wayfire/defaults.ini", config_file);
int inotify_fd = inotify_init1(IN_CLOEXEC);
reload_config(inotify_fd);
wl_event_loop_add_fd(wl_display_get_event_loop(display),
inotify_fd, WL_EVENT_READABLE, handle_config_updated, NULL);
}
std::string choose_cfg_file(const std::string& cmdline_cfg_file)
{
std::string env_cfg_file = nonull(getenv(CONFIG_FILE_ENV));
if (!cmdline_cfg_file.empty())
{
if ((env_cfg_file != nonull(NULL)) &&
(cmdline_cfg_file != env_cfg_file))
{
LOGW("Wayfire config file specified in the environment is ",
"overridden by the command line arguments!");
}
return cmdline_cfg_file;
}
if (env_cfg_file != nonull(NULL))
{
return env_cfg_file;
}
// Fallback, default config file
config_dir = nonull(getenv("XDG_CONFIG_HOME"));
if (!config_dir.compare("nil"))
{
config_dir = std::string(nonull(getenv("HOME"))) + "/.config";
}
return config_dir + "/wayfire.ini";
}
};
}
DECLARE_WAYFIRE_CONFIG_BACKEND(wf::dynamic_ini_config_t);
|
#include <Framework/Controls/UserControl.h>
namespace suic
{
ImplementRTTIOfClass(UserControl, suic::ContentControl)
bool UserControl::StaticInit()
{
return true;
}
UserControl::UserControl()
{
}
UserControl::~UserControl()
{
}
}
|
#ifndef BOOST_ARCHIVE_BASIC_STREAMBUF_LOCALE_SAVER_HPP
#define BOOST_ARCHIVE_BASIC_STREAMBUF_LOCALE_SAVER_HPP
// MS compatible compilers support #pragma once
#if defined(_MSC_VER)
# pragma once
#endif
/////////1/////////2/////////3/////////4/////////5/////////6/////////7/////////8
// basic_streambuf_local_saver.hpp
// (C) Copyright 2005 Robert Ramey - http://www.rrsd.com
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// See http://www.boost.org for updates, documentation, and revision history.
// note derived from boost/io/ios_state.hpp
// Copyright 2002, 2005 Daryle Walker. Use, modification, and distribution
// are subject to the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or a copy at <http://www.boost.org/LICENSE_1_0.txt>.)
// See <http://www.boost.org/libs/io/> for the library's home page.
#ifndef BOOST_NO_STD_LOCALE
#include <locale> // for std::locale
#include <streambuf> // for std::basic_streambuf
#include <boost/config.hpp>
#include <boost/noncopyable.hpp>
#ifdef BOOST_MSVC
# pragma warning(push)
# pragma warning(disable : 4511 4512)
#endif
namespace boost{
namespace archive{
template < typename Ch, class Tr >
class basic_streambuf_locale_saver :
private boost::noncopyable
{
public:
typedef ::std::basic_streambuf<Ch, Tr> state_type;
typedef ::std::locale aspect_type;
explicit basic_streambuf_locale_saver( state_type &s )
: s_save_( s ), a_save_( s.getloc() )
{}
explicit basic_streambuf_locale_saver( state_type &s, aspect_type const &a )
: s_save_( s ), a_save_( s.pubimbue(a) )
{}
~basic_streambuf_locale_saver()
{ this->restore(); }
void restore()
{ s_save_.pubimbue( a_save_ ); }
private:
state_type & s_save_;
aspect_type const a_save_;
};
} // archive
} // boost
#ifdef BOOST_MSVC
#pragma warning(pop)
#endif
#endif // BOOST_NO_STD_LOCALE
#endif // BOOST_ARCHIVE_BASIC_STREAMBUF_LOCALE_SAVER_HPP
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/core/grappler/costs/utils.h"
#include <stddef.h>
#include <utility>
#include "third_party/eigen3/Eigen/Core"
#if GOOGLE_CUDA
#include "cuda/include/cuda.h"
#include "cuda/include/cuda_runtime_api.h"
#include "cuda/include/cudnn.h"
#endif
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/grappler/clusters/utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
static OpInfo::TensorProperties UnknownInput() {
OpInfo::TensorProperties input;
input.set_dtype(DataType::DT_INVALID);
input.mutable_shape()->set_unknown_rank(true);
return input;
}
static std::vector<TensorProto> ExtractTensors(const AttrValue& attr_value) {
std::vector<TensorProto> tensors;
switch (attr_value.value_case()) {
case AttrValue::kTensor: {
tensors.push_back(attr_value.tensor());
break;
}
case AttrValue::kList: {
for (const auto& tensor_proto : attr_value.list().tensor()) {
tensors.push_back(tensor_proto);
}
break;
}
default: {}
}
return tensors;
}
// Annotate the op_info inputs with extra information when possible (e.g. the
// input value if it's known statically).
static void ExtractExtraProperties(
const NodeDef& node,
const std::unordered_map<string, const NodeDef*>& name_to_node,
OpInfo* op_info) {
OpRegistry* op_registry = OpRegistry::Global();
const OpDef* op_def = nullptr;
auto s = op_registry->LookUpOpDef(node.op(), &op_def);
if (!s.ok()) {
op_def = nullptr;
}
for (int i = 0; i < node.input_size(); ++i) {
const string input_name = node.input(i);
CHECK(!input_name.empty());
if (IsControlInput(input_name)) {
continue;
}
TensorId input_tensor_id = ParseTensorName(input_name);
const string input_node_name = input_tensor_id.first.ToString();
auto iter = name_to_node.find(input_node_name);
if (iter == name_to_node.end()) continue;
const NodeDef* input_node = iter->second;
if (i >= op_info->inputs_size()) {
LOG(ERROR) << "OpInfo's inputs doesn't match the graph! OpInfo: "
<< op_info->DebugString()
<< "\nCurrent node: " << node.DebugString()
<< "\nInput node: " << input_node->DebugString();
}
// The value attribute in Const input is useful for cost prediction.
if (input_node->op() == "Const" && i < op_info->inputs_size()) {
auto it = input_node->attr().find("value");
if (it == input_node->attr().end()) continue;
const AttrValue& attr_value = it->second;
std::vector<TensorProto> tensors = ExtractTensors(attr_value);
if (tensors.empty()) continue;
const TensorProto& t = tensors[0];
OpInfo::TensorProperties* input = op_info->mutable_inputs(i);
*(input->mutable_value()) = t;
// For filename input, the file size can also be useful.
if (op_def && i < op_def->input_arg_size() &&
op_def->input_arg(i).name().find("filename") != std::string::npos) {
Tensor tensor;
if (!tensor.FromProto(t)) {
continue;
}
if (tensor.NumElements() != 1) {
continue;
}
const string filename = tensor.scalar<string>()();
Env* env = Env::Default();
FileStatistics stat;
Status s = env->Stat(filename, &stat);
if (!s.ok()) {
continue;
}
AttrValue attr;
attr.set_i(stat.length);
string attr_key = strings::StrCat("input_", i, "_filesize");
(*op_info->mutable_attr())[attr_key] = attr;
}
}
// When the input is a handle (e.g. look up table handle), the information
// in the op itself is not sufficient to predict the op memory.
if (op_def && i < op_def->input_arg_size() &&
op_def->input_arg(i).name().find("handle") != std::string::npos) {
string new_key = strings::StrCat("parent_", i, "_op");
AttrValue attr;
attr.set_s(input_node->op());
(*op_info->mutable_attr())[new_key] = attr;
// TODO(yuefengz): Only parent node's op name is copied. Copy inputs
// and attributes when necessary.
}
}
}
std::vector<OpInfo::TensorProperties> FindInputFeatures(
const NodeDef& node,
const std::unordered_map<string, const CostGraphDef::Node*>& name_to_cost,
const std::unordered_map<string, const NodeDef*>& name_to_node) {
std::vector<OpInfo::TensorProperties> inputs;
for (const auto& input_name : node.input()) {
CHECK(!input_name.empty());
TensorId input_tensor_id = ParseTensorName(input_name);
const string input_node_name = input_tensor_id.first.ToString();
const int output_index = input_tensor_id.second;
// Skip control inputs.
if (output_index == Graph::kControlSlot) {
continue;
}
auto it = name_to_cost.find(input_node_name);
if (it == name_to_cost.end() || output_index < 0) {
inputs.push_back(UnknownInput());
} else {
const CostGraphDef::Node* input_cost = it->second;
if (input_cost->output_info_size() == 0) {
inputs.push_back(UnknownInput());
} else {
const CostGraphDef::Node::OutputInfo& output =
input_cost->output_info(output_index);
OpInfo::TensorProperties input;
input.set_dtype(output.dtype());
*input.mutable_shape() = output.shape();
inputs.push_back(input);
}
}
}
return inputs;
}
DeviceProperties GetDeviceInfo(const string& device_str) {
DeviceNameUtils::ParsedName parsed;
if (DeviceNameUtils::ParseFullName(device_str, &parsed)) {
if (parsed.type == "GPU") {
return GetLocalGPUInfo(parsed.id);
} else if (parsed.type == "CPU") {
return GetLocalCPUInfo();
}
}
DeviceProperties device;
device.set_type("UNKNOWN");
return device;
}
DeviceProperties GetDeviceInfo(const CostGraphDef::Node& node) {
return GetDeviceInfo(node.device());
}
OpInfo BuildOpInfoWithoutDevice(
const NodeDef& node,
const std::unordered_map<string, const NodeDef*>& name_to_node,
const std::vector<OpInfo::TensorProperties>& inputs) {
OpInfo op_info;
op_info.set_op(node.op());
*op_info.mutable_attr() = node.attr();
for (auto& input : inputs) {
*op_info.add_inputs() = input;
}
ExtractExtraProperties(node, name_to_node, &op_info);
return op_info;
}
string GetOpDescription(const OpInfo& op_info) {
string description = "[";
description += "Op=" + op_info.op() + ", ";
description += "input_shapes=[";
for (auto const& input : op_info.inputs()) {
description += PartialTensorShape::DebugString(input.shape());
}
description += "]";
return description;
}
OpPerformanceList CostGraphToOpPerformanceData(const CostGraphDef& cost_graph,
const GraphDef& graph) {
OpPerformanceList ret;
std::unordered_map<string, const CostGraphDef::Node*> name_to_cost;
std::unordered_map<string, const NodeDef*> name_to_node;
for (auto& node : cost_graph.node()) {
name_to_cost[node.name()] = &node;
}
for (auto& node : graph.node()) {
name_to_node[node.name()] = &node;
}
for (const auto& node : graph.node()) {
// Skip the nodes that are not in the cost graph: these are nodes that
// aren't run, because they aren't in the intersection of transitive
// fan-in of a fetch node and the transitive fan-out of an input, or nodes
// that were optimized away by the optimizer. Since they don't contribute
// to the execution time we simply discard them.
auto it = name_to_cost.find(node.name());
if (it == name_to_cost.end()) {
continue;
}
const CostGraphDef::Node* cost_node = it->second;
OpPerformance* perf = ret.add_op_performance();
perf->set_node(node.name());
std::vector<OpInfo::TensorProperties> inputs =
FindInputFeatures(node, name_to_cost, name_to_node);
*perf->mutable_op() = BuildOpInfoWithoutDevice(node, name_to_node, inputs);
*perf->mutable_op()->mutable_device() = GetDeviceInfo(cost_node->device());
perf->set_temporary_memory_size(cost_node->temporary_memory_size());
// Note that CostGraphDef::Node::compute_cost is microseconds, while
// OpPerformance.compute_cost is nanoseconds.
perf->set_compute_cost(cost_node->compute_cost() * 1000);
perf->set_compute_time(cost_node->compute_time() * 1000);
perf->set_memory_time(cost_node->memory_time() * 1000);
for (const auto& output_info : cost_node->output_info()) {
perf->mutable_op_memory()->add_output_memory(output_info.size());
}
perf->mutable_op_memory()->set_host_temp_memory(
cost_node->host_temp_memory_size());
perf->mutable_op_memory()->set_device_temp_memory(
cost_node->device_temp_memory_size());
perf->mutable_op_memory()->set_host_persistent_memory(
cost_node->host_persistent_memory_size());
perf->mutable_op_memory()->set_device_persistent_memory(
cost_node->device_persistent_memory_size());
}
return ret;
}
} // end namespace grappler
} // end namespace tensorflow
|
// tagged pointer, for aba prevention
//
// Copyright (C) 2008 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Disclaimer: Not a Boost library.
#ifndef BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED
#define BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED
#include <boost/lockfree/detail/prefix.hpp>
#ifndef BOOST_LOCKFREE_PTR_COMPRESSION
#include <boost/lockfree/detail/tagged_ptr_dcas.hpp>
#else
#include <boost/lockfree/detail/tagged_ptr_ptrcompression.hpp>
#endif
#endif /* BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED */
|
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "test/call_test.h"
#include <algorithm>
#include "api/audio_codecs/builtin_audio_decoder_factory.h"
#include "api/audio_codecs/builtin_audio_encoder_factory.h"
#include "call/rtp_transport_controller_send.h"
#include "call/video_config.h"
#include "modules/audio_mixer/audio_mixer_impl.h"
#include "rtc_base/checks.h"
#include "rtc_base/event.h"
#include "rtc_base/ptr_util.h"
#include "test/testsupport/fileutils.h"
namespace webrtc {
namespace test {
namespace {
const int kVideoRotationRtpExtensionId = 4;
}
CallTest::CallTest()
: clock_(Clock::GetRealTimeClock()),
event_log_(RtcEventLog::CreateNull()),
sender_call_transport_controller_(nullptr),
video_send_config_(nullptr),
video_send_stream_(nullptr),
audio_send_config_(nullptr),
audio_send_stream_(nullptr),
fake_encoder_(clock_),
num_video_streams_(1),
num_audio_streams_(0),
num_flexfec_streams_(0),
audio_decoder_factory_(CreateBuiltinAudioDecoderFactory()),
audio_encoder_factory_(CreateBuiltinAudioEncoderFactory()),
task_queue_("CallTestTaskQueue") {}
CallTest::~CallTest() {
task_queue_.SendTask([this]() {
fake_send_audio_device_ = nullptr;
fake_recv_audio_device_ = nullptr;
frame_generator_capturer_.reset();
});
}
void CallTest::RunBaseTest(BaseTest* test) {
task_queue_.SendTask([this, test]() {
num_video_streams_ = test->GetNumVideoStreams();
num_audio_streams_ = test->GetNumAudioStreams();
num_flexfec_streams_ = test->GetNumFlexfecStreams();
RTC_DCHECK(num_video_streams_ > 0 || num_audio_streams_ > 0);
Call::Config send_config(test->GetSenderCallConfig());
if (num_audio_streams_ > 0) {
CreateFakeAudioDevices(test->CreateCapturer(), test->CreateRenderer());
test->OnFakeAudioDevicesCreated(fake_send_audio_device_.get(),
fake_recv_audio_device_.get());
apm_send_ = AudioProcessingBuilder().Create();
apm_recv_ = AudioProcessingBuilder().Create();
EXPECT_EQ(0, fake_send_audio_device_->Init());
EXPECT_EQ(0, fake_recv_audio_device_->Init());
AudioState::Config audio_state_config;
audio_state_config.audio_mixer = AudioMixerImpl::Create();
audio_state_config.audio_processing = apm_send_;
audio_state_config.audio_device_module = fake_send_audio_device_;
send_config.audio_state = AudioState::Create(audio_state_config);
fake_send_audio_device_->RegisterAudioCallback(
send_config.audio_state->audio_transport());
}
CreateSenderCall(send_config);
if (sender_call_transport_controller_ != nullptr) {
test->OnRtpTransportControllerSendCreated(
sender_call_transport_controller_);
}
if (test->ShouldCreateReceivers()) {
Call::Config recv_config(test->GetReceiverCallConfig());
if (num_audio_streams_ > 0) {
AudioState::Config audio_state_config;
audio_state_config.audio_mixer = AudioMixerImpl::Create();
audio_state_config.audio_processing = apm_recv_;
audio_state_config.audio_device_module = fake_recv_audio_device_;
recv_config.audio_state = AudioState::Create(audio_state_config);
fake_recv_audio_device_->RegisterAudioCallback(
recv_config.audio_state->audio_transport()); }
CreateReceiverCall(recv_config);
}
test->OnCallsCreated(sender_call_.get(), receiver_call_.get());
receive_transport_.reset(test->CreateReceiveTransport(&task_queue_));
send_transport_.reset(
test->CreateSendTransport(&task_queue_, sender_call_.get()));
if (test->ShouldCreateReceivers()) {
send_transport_->SetReceiver(receiver_call_->Receiver());
receive_transport_->SetReceiver(sender_call_->Receiver());
if (num_video_streams_ > 0)
receiver_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkUp);
if (num_audio_streams_ > 0)
receiver_call_->SignalChannelNetworkState(MediaType::AUDIO, kNetworkUp);
} else {
// Sender-only call delivers to itself.
send_transport_->SetReceiver(sender_call_->Receiver());
receive_transport_->SetReceiver(nullptr);
}
CreateSendConfig(num_video_streams_, num_audio_streams_,
num_flexfec_streams_, send_transport_.get());
if (test->ShouldCreateReceivers()) {
CreateMatchingReceiveConfigs(receive_transport_.get());
}
if (num_video_streams_ > 0) {
test->ModifyVideoConfigs(&video_send_config_, &video_receive_configs_,
&video_encoder_config_);
}
if (num_audio_streams_ > 0) {
test->ModifyAudioConfigs(&audio_send_config_, &audio_receive_configs_);
}
if (num_flexfec_streams_ > 0) {
test->ModifyFlexfecConfigs(&flexfec_receive_configs_);
}
if (num_flexfec_streams_ > 0) {
CreateFlexfecStreams();
test->OnFlexfecStreamsCreated(flexfec_receive_streams_);
}
if (num_video_streams_ > 0) {
CreateVideoStreams();
test->OnVideoStreamsCreated(video_send_stream_, video_receive_streams_);
}
if (num_audio_streams_ > 0) {
CreateAudioStreams();
test->OnAudioStreamsCreated(audio_send_stream_, audio_receive_streams_);
}
if (num_video_streams_ > 0) {
int width = kDefaultWidth;
int height = kDefaultHeight;
int frame_rate = kDefaultFramerate;
test->ModifyVideoCaptureStartResolution(&width, &height, &frame_rate);
CreateFrameGeneratorCapturer(frame_rate, width, height);
test->OnFrameGeneratorCapturerCreated(frame_generator_capturer_.get());
}
Start();
});
test->PerformTest();
task_queue_.SendTask([this, test]() {
Stop();
test->OnStreamsStopped();
DestroyStreams();
send_transport_.reset();
receive_transport_.reset();
DestroyCalls();
});
}
void CallTest::CreateCalls(const Call::Config& sender_config,
const Call::Config& receiver_config) {
CreateSenderCall(sender_config);
CreateReceiverCall(receiver_config);
}
void CallTest::CreateSenderCall(const Call::Config& config) {
std::unique_ptr<RtpTransportControllerSend> controller_send =
rtc::MakeUnique<RtpTransportControllerSend>(
Clock::GetRealTimeClock(), config.event_log, config.bitrate_config);
sender_call_transport_controller_ = controller_send.get();
sender_call_.reset(Call::Create(config, std::move(controller_send)));
}
void CallTest::CreateReceiverCall(const Call::Config& config) {
receiver_call_.reset(Call::Create(config));
}
void CallTest::DestroyCalls() {
sender_call_.reset();
receiver_call_.reset();
}
void CallTest::CreateVideoSendConfig(VideoSendStream::Config* video_config,
size_t num_video_streams,
size_t num_used_ssrcs,
Transport* send_transport) {
RTC_DCHECK_LE(num_video_streams + num_used_ssrcs, kNumSsrcs);
*video_config = VideoSendStream::Config(send_transport);
video_config->encoder_settings.encoder = &fake_encoder_;
video_config->encoder_settings.payload_name = "FAKE";
video_config->encoder_settings.payload_type = kFakeVideoSendPayloadType;
video_config->rtp.extensions.push_back(
RtpExtension(RtpExtension::kTransportSequenceNumberUri,
kTransportSequenceNumberExtensionId));
video_config->rtp.extensions.push_back(RtpExtension(
RtpExtension::kVideoContentTypeUri, kVideoContentTypeExtensionId));
FillEncoderConfiguration(num_video_streams, &video_encoder_config_);
for (size_t i = 0; i < num_video_streams; ++i)
video_config->rtp.ssrcs.push_back(kVideoSendSsrcs[num_used_ssrcs + i]);
video_config->rtp.extensions.push_back(RtpExtension(
RtpExtension::kVideoRotationUri, kVideoRotationRtpExtensionId));
}
void CallTest::CreateAudioAndFecSendConfigs(size_t num_audio_streams,
size_t num_flexfec_streams,
Transport* send_transport) {
RTC_DCHECK_LE(num_audio_streams, 1);
RTC_DCHECK_LE(num_flexfec_streams, 1);
if (num_audio_streams > 0) {
audio_send_config_ = AudioSendStream::Config(send_transport);
audio_send_config_.rtp.ssrc = kAudioSendSsrc;
audio_send_config_.send_codec_spec = AudioSendStream::Config::SendCodecSpec(
kAudioSendPayloadType, {"opus", 48000, 2, {{"stereo", "1"}}});
audio_send_config_.encoder_factory = audio_encoder_factory_;
}
// TODO(brandtr): Update this when we support multistream protection.
if (num_flexfec_streams > 0) {
video_send_config_.rtp.flexfec.payload_type = kFlexfecPayloadType;
video_send_config_.rtp.flexfec.ssrc = kFlexfecSendSsrc;
video_send_config_.rtp.flexfec.protected_media_ssrcs = {kVideoSendSsrcs[0]};
}
}
void CallTest::CreateSendConfig(size_t num_video_streams,
size_t num_audio_streams,
size_t num_flexfec_streams,
Transport* send_transport) {
if (num_video_streams > 0) {
CreateVideoSendConfig(&video_send_config_, num_video_streams, 0,
send_transport);
}
CreateAudioAndFecSendConfigs(num_audio_streams, num_flexfec_streams,
send_transport);
}
std::vector<VideoReceiveStream::Config>
CallTest::CreateMatchingVideoReceiveConfigs(
const VideoSendStream::Config& video_send_config,
Transport* rtcp_send_transport) {
std::vector<VideoReceiveStream::Config> result;
RTC_DCHECK(!video_send_config.rtp.ssrcs.empty());
VideoReceiveStream::Config video_config(rtcp_send_transport);
video_config.rtp.remb = false;
video_config.rtp.transport_cc = true;
video_config.rtp.local_ssrc = kReceiverLocalVideoSsrc;
for (const RtpExtension& extension : video_send_config.rtp.extensions)
video_config.rtp.extensions.push_back(extension);
video_config.renderer = &fake_renderer_;
for (size_t i = 0; i < video_send_config.rtp.ssrcs.size(); ++i) {
VideoReceiveStream::Decoder decoder =
test::CreateMatchingDecoder(video_send_config.encoder_settings);
allocated_decoders_.push_back(
std::unique_ptr<VideoDecoder>(decoder.decoder));
video_config.decoders.clear();
video_config.decoders.push_back(decoder);
video_config.rtp.remote_ssrc = video_send_config.rtp.ssrcs[i];
result.push_back(video_config.Copy());
}
result[0].rtp.protected_by_flexfec = (num_flexfec_streams_ == 1);
return result;
}
void CallTest::CreateMatchingAudioAndFecConfigs(
Transport* rtcp_send_transport) {
RTC_DCHECK_GE(1, num_audio_streams_);
if (num_audio_streams_ == 1) {
AudioReceiveStream::Config audio_config;
audio_config.rtp.local_ssrc = kReceiverLocalAudioSsrc;
audio_config.rtcp_send_transport = rtcp_send_transport;
audio_config.rtp.remote_ssrc = audio_send_config_.rtp.ssrc;
audio_config.decoder_factory = audio_decoder_factory_;
audio_config.decoder_map = {{kAudioSendPayloadType, {"opus", 48000, 2}}};
audio_receive_configs_.push_back(audio_config);
}
// TODO(brandtr): Update this when we support multistream protection.
RTC_DCHECK(num_flexfec_streams_ <= 1);
if (num_flexfec_streams_ == 1) {
FlexfecReceiveStream::Config config(rtcp_send_transport);
config.payload_type = kFlexfecPayloadType;
config.remote_ssrc = kFlexfecSendSsrc;
config.protected_media_ssrcs = {kVideoSendSsrcs[0]};
config.local_ssrc = kReceiverLocalVideoSsrc;
for (const RtpExtension& extension : video_send_config_.rtp.extensions)
config.rtp_header_extensions.push_back(extension);
flexfec_receive_configs_.push_back(config);
}
}
void CallTest::CreateMatchingReceiveConfigs(Transport* rtcp_send_transport) {
video_receive_configs_.clear();
allocated_decoders_.clear();
if (num_video_streams_ > 0) {
std::vector<VideoReceiveStream::Config> new_configs =
CreateMatchingVideoReceiveConfigs(video_send_config_,
rtcp_send_transport);
for (VideoReceiveStream::Config& config : new_configs) {
video_receive_configs_.push_back(config.Copy());
}
}
CreateMatchingAudioAndFecConfigs(rtcp_send_transport);
}
void CallTest::CreateFrameGeneratorCapturerWithDrift(Clock* clock,
float speed,
int framerate,
int width,
int height) {
frame_generator_capturer_.reset(test::FrameGeneratorCapturer::Create(
width, height, rtc::nullopt, rtc::nullopt, framerate * speed, clock));
video_send_stream_->SetSource(
frame_generator_capturer_.get(),
VideoSendStream::DegradationPreference::kMaintainFramerate);
}
void CallTest::CreateFrameGeneratorCapturer(int framerate,
int width,
int height) {
frame_generator_capturer_.reset(test::FrameGeneratorCapturer::Create(
width, height, rtc::nullopt, rtc::nullopt, framerate, clock_));
video_send_stream_->SetSource(
frame_generator_capturer_.get(),
VideoSendStream::DegradationPreference::kMaintainFramerate);
}
void CallTest::CreateFakeAudioDevices(
std::unique_ptr<TestAudioDeviceModule::Capturer> capturer,
std::unique_ptr<TestAudioDeviceModule::Renderer> renderer) {
fake_send_audio_device_ = TestAudioDeviceModule::CreateTestAudioDeviceModule(
std::move(capturer), nullptr, 1.f);
fake_recv_audio_device_ = TestAudioDeviceModule::CreateTestAudioDeviceModule(
nullptr, std::move(renderer), 1.f);
}
void CallTest::CreateVideoStreams() {
RTC_DCHECK(video_send_stream_ == nullptr);
RTC_DCHECK(video_receive_streams_.empty());
video_send_stream_ = sender_call_->CreateVideoSendStream(
video_send_config_.Copy(), video_encoder_config_.Copy());
for (size_t i = 0; i < video_receive_configs_.size(); ++i) {
video_receive_streams_.push_back(receiver_call_->CreateVideoReceiveStream(
video_receive_configs_[i].Copy()));
}
AssociateFlexfecStreamsWithVideoStreams();
}
void CallTest::CreateAudioStreams() {
RTC_DCHECK(audio_send_stream_ == nullptr);
RTC_DCHECK(audio_receive_streams_.empty());
audio_send_stream_ = sender_call_->CreateAudioSendStream(audio_send_config_);
for (size_t i = 0; i < audio_receive_configs_.size(); ++i) {
audio_receive_streams_.push_back(
receiver_call_->CreateAudioReceiveStream(audio_receive_configs_[i]));
}
}
void CallTest::CreateFlexfecStreams() {
for (size_t i = 0; i < flexfec_receive_configs_.size(); ++i) {
flexfec_receive_streams_.push_back(
receiver_call_->CreateFlexfecReceiveStream(
flexfec_receive_configs_[i]));
}
AssociateFlexfecStreamsWithVideoStreams();
}
void CallTest::AssociateFlexfecStreamsWithVideoStreams() {
// All FlexFEC streams protect all of the video streams.
for (FlexfecReceiveStream* flexfec_recv_stream : flexfec_receive_streams_) {
for (VideoReceiveStream* video_recv_stream : video_receive_streams_) {
video_recv_stream->AddSecondarySink(flexfec_recv_stream);
}
}
}
void CallTest::DissociateFlexfecStreamsFromVideoStreams() {
for (FlexfecReceiveStream* flexfec_recv_stream : flexfec_receive_streams_) {
for (VideoReceiveStream* video_recv_stream : video_receive_streams_) {
video_recv_stream->RemoveSecondarySink(flexfec_recv_stream);
}
}
}
void CallTest::Start() {
if (video_send_stream_)
video_send_stream_->Start();
for (VideoReceiveStream* video_recv_stream : video_receive_streams_)
video_recv_stream->Start();
if (audio_send_stream_) {
audio_send_stream_->Start();
}
for (AudioReceiveStream* audio_recv_stream : audio_receive_streams_)
audio_recv_stream->Start();
if (frame_generator_capturer_.get() != NULL)
frame_generator_capturer_->Start();
}
void CallTest::Stop() {
if (frame_generator_capturer_.get() != NULL)
frame_generator_capturer_->Stop();
for (AudioReceiveStream* audio_recv_stream : audio_receive_streams_)
audio_recv_stream->Stop();
if (audio_send_stream_) {
audio_send_stream_->Stop();
}
for (VideoReceiveStream* video_recv_stream : video_receive_streams_)
video_recv_stream->Stop();
if (video_send_stream_)
video_send_stream_->Stop();
}
void CallTest::DestroyStreams() {
DissociateFlexfecStreamsFromVideoStreams();
if (audio_send_stream_)
sender_call_->DestroyAudioSendStream(audio_send_stream_);
audio_send_stream_ = nullptr;
for (AudioReceiveStream* audio_recv_stream : audio_receive_streams_)
receiver_call_->DestroyAudioReceiveStream(audio_recv_stream);
if (video_send_stream_)
sender_call_->DestroyVideoSendStream(video_send_stream_);
video_send_stream_ = nullptr;
for (VideoReceiveStream* video_recv_stream : video_receive_streams_)
receiver_call_->DestroyVideoReceiveStream(video_recv_stream);
for (FlexfecReceiveStream* flexfec_recv_stream : flexfec_receive_streams_)
receiver_call_->DestroyFlexfecReceiveStream(flexfec_recv_stream);
video_receive_streams_.clear();
allocated_decoders_.clear();
}
void CallTest::SetFakeVideoCaptureRotation(VideoRotation rotation) {
frame_generator_capturer_->SetFakeRotation(rotation);
}
constexpr size_t CallTest::kNumSsrcs;
const int CallTest::kDefaultWidth;
const int CallTest::kDefaultHeight;
const int CallTest::kDefaultFramerate;
const int CallTest::kDefaultTimeoutMs = 30 * 1000;
const int CallTest::kLongTimeoutMs = 120 * 1000;
const uint32_t CallTest::kSendRtxSsrcs[kNumSsrcs] = {
0xBADCAFD, 0xBADCAFE, 0xBADCAFF, 0xBADCB00, 0xBADCB01, 0xBADCB02};
const uint32_t CallTest::kVideoSendSsrcs[kNumSsrcs] = {
0xC0FFED, 0xC0FFEE, 0xC0FFEF, 0xC0FFF0, 0xC0FFF1, 0xC0FFF2};
const uint32_t CallTest::kAudioSendSsrc = 0xDEADBEEF;
const uint32_t CallTest::kFlexfecSendSsrc = 0xBADBEEF;
const uint32_t CallTest::kReceiverLocalVideoSsrc = 0x123456;
const uint32_t CallTest::kReceiverLocalAudioSsrc = 0x1234567;
const int CallTest::kNackRtpHistoryMs = 1000;
const uint8_t CallTest::kDefaultKeepalivePayloadType =
RtpKeepAliveConfig().payload_type;
const std::map<uint8_t, MediaType> CallTest::payload_type_map_ = {
{CallTest::kVideoSendPayloadType, MediaType::VIDEO},
{CallTest::kFakeVideoSendPayloadType, MediaType::VIDEO},
{CallTest::kSendRtxPayloadType, MediaType::VIDEO},
{CallTest::kRedPayloadType, MediaType::VIDEO},
{CallTest::kRtxRedPayloadType, MediaType::VIDEO},
{CallTest::kUlpfecPayloadType, MediaType::VIDEO},
{CallTest::kFlexfecPayloadType, MediaType::VIDEO},
{CallTest::kAudioSendPayloadType, MediaType::AUDIO},
{CallTest::kDefaultKeepalivePayloadType, MediaType::ANY}};
BaseTest::BaseTest() : event_log_(RtcEventLog::CreateNull()) {}
BaseTest::BaseTest(unsigned int timeout_ms)
: RtpRtcpObserver(timeout_ms), event_log_(RtcEventLog::CreateNull()) {}
BaseTest::~BaseTest() {
}
std::unique_ptr<TestAudioDeviceModule::Capturer> BaseTest::CreateCapturer() {
return TestAudioDeviceModule::CreatePulsedNoiseCapturer(256, 48000);
}
std::unique_ptr<TestAudioDeviceModule::Renderer> BaseTest::CreateRenderer() {
return TestAudioDeviceModule::CreateDiscardRenderer(48000);
}
void BaseTest::OnFakeAudioDevicesCreated(
TestAudioDeviceModule* send_audio_device,
TestAudioDeviceModule* recv_audio_device) {}
Call::Config BaseTest::GetSenderCallConfig() {
return Call::Config(event_log_.get());
}
Call::Config BaseTest::GetReceiverCallConfig() {
return Call::Config(event_log_.get());
}
void BaseTest::OnRtpTransportControllerSendCreated(
RtpTransportControllerSend* controller) {}
void BaseTest::OnCallsCreated(Call* sender_call, Call* receiver_call) {
}
test::PacketTransport* BaseTest::CreateSendTransport(
SingleThreadedTaskQueueForTesting* task_queue,
Call* sender_call) {
return new PacketTransport(
task_queue, sender_call, this, test::PacketTransport::kSender,
CallTest::payload_type_map_, FakeNetworkPipe::Config());
}
test::PacketTransport* BaseTest::CreateReceiveTransport(
SingleThreadedTaskQueueForTesting* task_queue) {
return new PacketTransport(
task_queue, nullptr, this, test::PacketTransport::kReceiver,
CallTest::payload_type_map_, FakeNetworkPipe::Config());
}
size_t BaseTest::GetNumVideoStreams() const {
return 1;
}
size_t BaseTest::GetNumAudioStreams() const {
return 0;
}
size_t BaseTest::GetNumFlexfecStreams() const {
return 0;
}
void BaseTest::ModifyVideoConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
VideoEncoderConfig* encoder_config) {}
void BaseTest::ModifyVideoCaptureStartResolution(int* width,
int* heigt,
int* frame_rate) {}
void BaseTest::OnVideoStreamsCreated(
VideoSendStream* send_stream,
const std::vector<VideoReceiveStream*>& receive_streams) {}
void BaseTest::ModifyAudioConfigs(
AudioSendStream::Config* send_config,
std::vector<AudioReceiveStream::Config>* receive_configs) {}
void BaseTest::OnAudioStreamsCreated(
AudioSendStream* send_stream,
const std::vector<AudioReceiveStream*>& receive_streams) {}
void BaseTest::ModifyFlexfecConfigs(
std::vector<FlexfecReceiveStream::Config>* receive_configs) {}
void BaseTest::OnFlexfecStreamsCreated(
const std::vector<FlexfecReceiveStream*>& receive_streams) {}
void BaseTest::OnFrameGeneratorCapturerCreated(
FrameGeneratorCapturer* frame_generator_capturer) {
}
void BaseTest::OnStreamsStopped() {
}
SendTest::SendTest(unsigned int timeout_ms) : BaseTest(timeout_ms) {
}
bool SendTest::ShouldCreateReceivers() const {
return false;
}
EndToEndTest::EndToEndTest() {}
EndToEndTest::EndToEndTest(unsigned int timeout_ms) : BaseTest(timeout_ms) {
}
bool EndToEndTest::ShouldCreateReceivers() const {
return true;
}
} // namespace test
} // namespace webrtc
|
// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
#include "include/pika_server.h"
#include <ctime>
#include <fstream>
#include <iterator>
#include <algorithm>
#include <ifaddrs.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <sys/resource.h>
#include "slash/include/env.h"
#include "slash/include/rsync.h"
#include "pink/include/pink_cli.h"
#include "pink/include/redis_cli.h"
#include "pink/include/bg_thread.h"
#include "include/pika_rm.h"
#include "include/pika_server.h"
#include "include/pika_dispatch_thread.h"
#include "include/pika_cmd_table_manager.h"
extern PikaServer* g_pika_server;
extern PikaReplicaManager* g_pika_rm;
extern PikaCmdTableManager* g_pika_cmd_table_manager;
void DoPurgeDir(void* arg) {
std::string path = *(static_cast<std::string*>(arg));
LOG(INFO) << "Delete dir: " << path << " start";
slash::DeleteDir(path);
LOG(INFO) << "Delete dir: " << path << " done";
delete static_cast<std::string*>(arg);
}
void DoDBSync(void* arg) {
DBSyncArg* dbsa = reinterpret_cast<DBSyncArg*>(arg);
PikaServer* const ps = dbsa->p;
ps->DbSyncSendFile(dbsa->ip, dbsa->port,
dbsa->table_name, dbsa->partition_id);
delete dbsa;
}
PikaServer::PikaServer() :
exit_(false),
slot_state_(INFREE),
have_scheduled_crontask_(false),
last_check_compact_time_({0, 0}),
master_ip_(""),
master_port_(0),
repl_state_(PIKA_REPL_NO_CONNECT),
role_(PIKA_ROLE_SINGLE),
last_meta_sync_timestamp_(0),
first_meta_sync_(false),
loop_partition_state_machine_(false),
force_full_sync_(false),
slowlog_entry_id_(0) {
//Init server ip host
if (!ServerInit()) {
LOG(FATAL) << "ServerInit iotcl error";
}
pthread_rwlockattr_t bw_options_rw_attr;
pthread_rwlockattr_init(&bw_options_rw_attr);
pthread_rwlockattr_setkind_np(&bw_options_rw_attr,
PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
pthread_rwlock_init(&bw_options_rw_, &bw_options_rw_attr);
InitBlackwidowOptions();
pthread_rwlockattr_t tables_rw_attr;
pthread_rwlockattr_init(&tables_rw_attr);
pthread_rwlockattr_setkind_np(&tables_rw_attr,
PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
pthread_rwlock_init(&tables_rw_, &tables_rw_attr);
// Create thread
worker_num_ = std::min(g_pika_conf->thread_num(),
PIKA_MAX_WORKER_THREAD_NUM);
std::set<std::string> ips;
if (g_pika_conf->network_interface().empty()) {
ips.insert("0.0.0.0");
} else {
ips.insert("127.0.0.1");
ips.insert(host_);
}
// We estimate the queue size
int worker_queue_limit = g_pika_conf->maxclients() / worker_num_ + 100;
LOG(INFO) << "Worker queue limit is " << worker_queue_limit;
pika_dispatch_thread_ = new PikaDispatchThread(ips, port_, worker_num_, 3000,
worker_queue_limit, g_pika_conf->max_conn_rbuf_size());
pika_monitor_thread_ = new PikaMonitorThread();
pika_rsync_service_ = new PikaRsyncService(g_pika_conf->db_sync_path(),
g_pika_conf->port() + kPortShiftRSync);
pika_pubsub_thread_ = new pink::PubSubThread();
pika_auxiliary_thread_ = new PikaAuxiliaryThread();
pika_client_processor_ = new PikaClientProcessor(g_pika_conf->thread_pool_size(), 100000);
pthread_rwlock_init(&state_protector_, NULL);
pthread_rwlock_init(&slowlog_protector_, NULL);
}
PikaServer::~PikaServer() {
// DispatchThread will use queue of worker thread,
// so we need to delete dispatch before worker.
pika_client_processor_->Stop();
delete pika_dispatch_thread_;
{
slash::MutexLock l(&slave_mutex_);
std::vector<SlaveItem>::iterator iter = slaves_.begin();
while (iter != slaves_.end()) {
iter = slaves_.erase(iter);
LOG(INFO) << "Delete slave success";
}
}
delete pika_pubsub_thread_;
delete pika_auxiliary_thread_;
delete pika_rsync_service_;
delete pika_client_processor_;
delete pika_monitor_thread_;
bgsave_thread_.StopThread();
key_scan_thread_.StopThread();
tables_.clear();
pthread_rwlock_destroy(&tables_rw_);
pthread_rwlock_destroy(&state_protector_);
pthread_rwlock_destroy(&slowlog_protector_);
LOG(INFO) << "PikaServer " << pthread_self() << " exit!!!";
}
bool PikaServer::ServerInit() {
std::string network_interface = g_pika_conf->network_interface();
if (network_interface == "") {
std::ifstream routeFile("/proc/net/route", std::ios_base::in);
if (!routeFile.good())
{
return false;
}
std::string line;
std::vector<std::string> tokens;
while(std::getline(routeFile, line))
{
std::istringstream stream(line);
std::copy(std::istream_iterator<std::string>(stream),
std::istream_iterator<std::string>(),
std::back_inserter<std::vector<std::string> >(tokens));
// the default interface is the one having the second
// field, Destination, set to "00000000"
if ((tokens.size() >= 2) && (tokens[1] == std::string("00000000")))
{
network_interface = tokens[0];
break;
}
tokens.clear();
}
routeFile.close();
}
LOG(INFO) << "Using Networker Interface: " << network_interface;
struct ifaddrs * ifAddrStruct = NULL;
struct ifaddrs * ifa = NULL;
void * tmpAddrPtr = NULL;
if (getifaddrs(&ifAddrStruct) == -1) {
LOG(FATAL) << "getifaddrs failed: " << strerror(errno);
}
for (ifa = ifAddrStruct; ifa != NULL; ifa = ifa->ifa_next) {
if (ifa->ifa_addr == NULL) {
continue;
}
if (ifa ->ifa_addr->sa_family==AF_INET) { // Check it is
// a valid IPv4 address
tmpAddrPtr = &((struct sockaddr_in *)ifa->ifa_addr)->sin_addr;
char addressBuffer[INET_ADDRSTRLEN];
inet_ntop(AF_INET, tmpAddrPtr, addressBuffer, INET_ADDRSTRLEN);
if (std::string(ifa->ifa_name) == network_interface) {
host_ = addressBuffer;
break;
}
} else if (ifa->ifa_addr->sa_family==AF_INET6) { // Check it is
// a valid IPv6 address
tmpAddrPtr = &((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr;
char addressBuffer[INET6_ADDRSTRLEN];
inet_ntop(AF_INET6, tmpAddrPtr, addressBuffer, INET6_ADDRSTRLEN);
if (std::string(ifa->ifa_name) == network_interface) {
host_ = addressBuffer;
break;
}
}
}
if (ifAddrStruct != NULL) {
freeifaddrs(ifAddrStruct);
}
if (ifa == NULL) {
LOG(FATAL) << "error network interface: " << network_interface << ", please check!";
}
port_ = g_pika_conf->port();
LOG(INFO) << "host: " << host_ << " port: " << port_;
return true;
}
void PikaServer::Start() {
int ret = 0;
// start rsync first, rocksdb opened fd will not appear in this fork
ret = pika_rsync_service_->StartRsync();
if (0 != ret) {
tables_.clear();
LOG(FATAL) << "Start Rsync Error: bind port " +std::to_string(pika_rsync_service_->ListenPort()) + " failed"
<< ", Listen on this port to receive Master FullSync Data";
}
// We Init Table Struct Before Start The following thread
InitTableStruct();
ret = pika_client_processor_->Start();
if (ret != pink::kSuccess) {
tables_.clear();
LOG(FATAL) << "Start PikaClientProcessor Error: " << ret << (ret == pink::kCreateThreadError ? ": create thread error " : ": other error");
}
ret = pika_dispatch_thread_->StartThread();
if (ret != pink::kSuccess) {
tables_.clear();
LOG(FATAL) << "Start Dispatch Error: " << ret << (ret == pink::kBindError ? ": bind port " + std::to_string(port_) + " conflict"
: ": other error") << ", Listen on this port to handle the connected redis client";
}
ret = pika_pubsub_thread_->StartThread();
if (ret != pink::kSuccess) {
tables_.clear();
LOG(FATAL) << "Start Pubsub Error: " << ret << (ret == pink::kBindError ? ": bind port conflict" : ": other error");
}
ret = pika_auxiliary_thread_->StartThread();
if (ret != pink::kSuccess) {
tables_.clear();
LOG(FATAL) << "Start Auxiliary Thread Error: " << ret << (ret == pink::kCreateThreadError ? ": create thread error " : ": other error");
}
time(&start_time_s_);
std::string slaveof = g_pika_conf->slaveof();
if (!slaveof.empty()) {
int32_t sep = slaveof.find(":");
std::string master_ip = slaveof.substr(0, sep);
int32_t master_port = std::stoi(slaveof.substr(sep+1));
if ((master_ip == "127.0.0.1" || master_ip == host_) && master_port == port_) {
LOG(FATAL) << "you will slaveof yourself as the config file, please check";
} else {
SetMaster(master_ip, master_port);
}
}
LOG(INFO) << "Pika Server going to start";
while (!exit_) {
DoTimingTask();
// wake up every 10 second
int try_num = 0;
while (!exit_ && try_num++ < 5) {
sleep(1);
}
}
LOG(INFO) << "Goodbye...";
}
void PikaServer::Exit() {
exit_ = true;
}
std::string PikaServer::host() {
return host_;
}
int PikaServer::port() {
return port_;
}
time_t PikaServer::start_time_s() {
return start_time_s_;
}
std::string PikaServer::master_ip() {
slash::RWLock(&state_protector_, false);
return master_ip_;
}
int PikaServer::master_port() {
slash::RWLock(&state_protector_, false);
return master_port_;
}
int PikaServer::role() {
slash::RWLock(&state_protector_, false);
return role_;
}
bool PikaServer::readonly(const std::string& table_name, const std::string& key) {
slash::RWLock(&state_protector_, false);
if ((role_ & PIKA_ROLE_SLAVE)
&& g_pika_conf->slave_read_only()) {
return true;
}
if (!g_pika_conf->classic_mode()) {
std::shared_ptr<Table> table = GetTable(table_name);
if (table == nullptr) {
// swallow this error will process later
return false;
}
uint32_t index = g_pika_cmd_table_manager->DistributeKey(
key, table->PartitionNum());
int role = 0;
Status s = g_pika_rm->CheckPartitionRole(table_name, index, &role);
if (!s.ok()) {
// swallow this error will process later
return false;
}
if (role & PIKA_ROLE_SLAVE) {
return true;
}
}
return false;
}
bool PikaServer::ConsensusCheck(const std::string& table_name, const std::string& key) {
if (g_pika_conf->consensus_level() != 0) {
std::shared_ptr<Table> table = GetTable(table_name);
if (table == nullptr) {
return false;
}
uint32_t index = g_pika_cmd_table_manager->DistributeKey(
key, table->PartitionNum());
std::shared_ptr<SyncMasterPartition> master_partition =
g_pika_rm->GetSyncMasterPartitionByName(PartitionInfo(table_name, index));
if (!master_partition) {
LOG(WARNING) << "Sync Master Partition: " << table_name << ":" << index
<< ", NotFound";
return false;
}
Status s = master_partition->ConsensusSanityCheck();
if (!s.ok()) {
return false;
} else {
return true;
}
}
return true;
}
int PikaServer::repl_state() {
slash::RWLock(&state_protector_, false);
return repl_state_;
}
std::string PikaServer::repl_state_str() {
slash::RWLock(&state_protector_, false);
switch (repl_state_) {
case PIKA_REPL_NO_CONNECT:
return "no connect";
case PIKA_REPL_SHOULD_META_SYNC:
return "should meta sync";
case PIKA_REPL_META_SYNC_DONE:
return "meta sync done";
case PIKA_REPL_ERROR:
return "error";
default:
return "";
}
}
bool PikaServer::force_full_sync() {
return force_full_sync_;
}
void PikaServer::SetForceFullSync(bool v) {
force_full_sync_ = v;
}
void PikaServer::SetDispatchQueueLimit(int queue_limit) {
rlimit limit;
rlim_t maxfiles = g_pika_conf->maxclients() + PIKA_MIN_RESERVED_FDS;
if (getrlimit(RLIMIT_NOFILE, &limit) == -1) {
LOG(WARNING) << "getrlimit error: " << strerror(errno);
} else if (limit.rlim_cur < maxfiles) {
rlim_t old_limit = limit.rlim_cur;
limit.rlim_cur = maxfiles;
limit.rlim_max = maxfiles;
if (setrlimit(RLIMIT_NOFILE, &limit) != -1) {
LOG(WARNING) << "your 'limit -n ' of " << old_limit << " is not enough for Redis to start. pika have successfully reconfig it to " << limit.rlim_cur;
} else {
LOG(FATAL) << "your 'limit -n ' of " << old_limit << " is not enough for Redis to start. pika can not reconfig it(" << strerror(errno) << "), do it by yourself";
}
}
pika_dispatch_thread_->SetQueueLimit(queue_limit);
}
blackwidow::BlackwidowOptions PikaServer::bw_options() {
slash::RWLock rwl(&bw_options_rw_, false);
return bw_options_;
}
void PikaServer::InitTableStruct() {
std::string db_path = g_pika_conf->db_path();
std::string log_path = g_pika_conf->log_path();
std::vector<TableStruct> table_structs = g_pika_conf->table_structs();
slash::RWLock rwl(&tables_rw_, true);
for (const auto& table : table_structs) {
std::string name = table.table_name;
uint32_t num = table.partition_num;
std::shared_ptr<Table> table_ptr = std::make_shared<Table>(
name, num, db_path, log_path);
table_ptr->AddPartitions(table.partition_ids);
tables_.emplace(name, table_ptr);
}
}
Status PikaServer::AddTableStruct(std::string table_name, uint32_t num) {
std::shared_ptr<Table> table = g_pika_server->GetTable(table_name);
if (table) {
return Status::Corruption("table already exist");
}
std::string db_path = g_pika_conf->db_path();
std::string log_path = g_pika_conf->log_path();
std::shared_ptr<Table> table_ptr = std::make_shared<Table>(
table_name, num, db_path, log_path);
slash::RWLock rwl(&tables_rw_, true);
tables_.emplace(table_name, table_ptr);
return Status::OK();
}
Status PikaServer::DelTableStruct(std::string table_name) {
std::shared_ptr<Table> table = g_pika_server->GetTable(table_name);
if (!table) {
return Status::Corruption("table not found");
}
if (!table->TableIsEmpty()) {
return Status::Corruption("table have partitions");
}
Status s = table->Leave();
if (!s.ok()) {
return s;
}
tables_.erase(table_name);
return Status::OK();
}
std::shared_ptr<Table> PikaServer::GetTable(const std::string &table_name) {
slash::RWLock l(&tables_rw_, false);
auto iter = tables_.find(table_name);
return (iter == tables_.end()) ? NULL : iter->second;
}
std::set<uint32_t> PikaServer::GetTablePartitionIds(const std::string& table_name) {
std::set<uint32_t> empty;
slash::RWLock l(&tables_rw_, false);
auto iter = tables_.find(table_name);
return (iter == tables_.end()) ? empty : iter->second->GetPartitionIds();
}
bool PikaServer::IsBgSaving() {
slash::RWLock table_rwl(&tables_rw_, false);
for (const auto& table_item : tables_) {
slash::RWLock partition_rwl(&table_item.second->partitions_rw_, false);
for (const auto& patition_item : table_item.second->partitions_) {
if (patition_item.second->IsBgSaving()) {
return true;
}
}
}
return false;
}
bool PikaServer::IsKeyScaning() {
slash::RWLock table_rwl(&tables_rw_, false);
for (const auto& table_item : tables_) {
if (table_item.second->IsKeyScaning()) {
return true;
}
}
return false;
}
bool PikaServer::IsCompacting() {
slash::RWLock table_rwl(&tables_rw_, false);
for (const auto& table_item : tables_) {
slash::RWLock partition_rwl(&table_item.second->partitions_rw_, false);
for (const auto& partition_item : table_item.second->partitions_) {
partition_item.second->DbRWLockReader();
std::string task_type = partition_item.second->db()->GetCurrentTaskType();
partition_item.second->DbRWUnLock();
if (strcasecmp(task_type.data(), "no")) {
return true;
}
}
}
return false;
}
bool PikaServer::IsTableExist(const std::string& table_name) {
return GetTable(table_name) ? true : false;
}
bool PikaServer::IsTablePartitionExist(const std::string& table_name,
uint32_t partition_id) {
std::shared_ptr<Table> table_ptr = GetTable(table_name);
if (!table_ptr) {
return false;
} else {
return table_ptr->GetPartitionById(partition_id) ? true : false;
}
}
bool PikaServer::IsCommandSupport(const std::string& command) {
if (g_pika_conf->consensus_level() != 0) {
// dont support multi key command
// used the same list as sharding mode use
bool res = !ConsensusNotSupportCommands.count(command);
if (!res) {
return res;
}
}
if (g_pika_conf->classic_mode()) {
return true;
} else {
std::string cmd = command;
slash::StringToLower(cmd);
return !ShardingModeNotSupportCommands.count(cmd);
}
}
bool PikaServer::IsTableBinlogIoError(const std::string& table_name) {
std::shared_ptr<Table> table = GetTable(table_name);
return table ? table->IsBinlogIoError() : true;
}
// If no collection of specified tables is given, we execute task in all tables
Status PikaServer::DoSameThingSpecificTable(const TaskType& type, const std::set<std::string>& tables) {
slash::RWLock rwl(&tables_rw_, false);
for (const auto& table_item : tables_) {
if (!tables.empty()
&& tables.find(table_item.first) == tables.end()) {
continue;
} else {
switch (type) {
case TaskType::kCompactAll:
table_item.second->Compact(blackwidow::DataType::kAll);
break;
case TaskType::kCompactStrings:
table_item.second->Compact(blackwidow::DataType::kStrings);
break;
case TaskType::kCompactHashes:
table_item.second->Compact(blackwidow::DataType::kHashes);
break;
case TaskType::kCompactSets:
table_item.second->Compact(blackwidow::DataType::kSets);
break;
case TaskType::kCompactZSets:
table_item.second->Compact(blackwidow::DataType::kZSets);
break;
case TaskType::kCompactList:
table_item.second->Compact(blackwidow::DataType::kLists);
break;
case TaskType::kStartKeyScan:
table_item.second->KeyScan();
break;
case TaskType::kStopKeyScan:
table_item.second->StopKeyScan();
break;
case TaskType::kBgSave:
table_item.second->BgSaveTable();
break;
default:
break;
}
}
}
return Status::OK();
}
void PikaServer::PreparePartitionTrySync() {
slash::RWLock rwl(&tables_rw_, false);
ReplState state = force_full_sync_ ?
ReplState::kTryDBSync : ReplState::kTryConnect;
for (const auto& table_item : tables_) {
for (const auto& partition_item : table_item.second->partitions_) {
Status s = g_pika_rm->ActivateSyncSlavePartition(
RmNode(g_pika_server->master_ip(),
g_pika_server->master_port(),
table_item.second->GetTableName(),
partition_item.second->GetPartitionId()), state);
if (!s.ok()) {
LOG(WARNING) << s.ToString();
}
}
}
force_full_sync_ = false;
loop_partition_state_machine_ = true;
LOG(INFO) << "Mark try connect finish";
}
void PikaServer::PartitionSetMaxCacheStatisticKeys(uint32_t max_cache_statistic_keys) {
slash::RWLock rwl(&tables_rw_, false);
for (const auto& table_item : tables_) {
for (const auto& partition_item : table_item.second->partitions_) {
partition_item.second->DbRWLockReader();
partition_item.second->db()->SetMaxCacheStatisticKeys(max_cache_statistic_keys);
partition_item.second->DbRWUnLock();
}
}
}
void PikaServer::PartitionSetSmallCompactionThreshold(uint32_t small_compaction_threshold) {
slash::RWLock rwl(&tables_rw_, false);
for (const auto& table_item : tables_) {
for (const auto& partition_item : table_item.second->partitions_) {
partition_item.second->DbRWLockReader();
partition_item.second->db()->SetSmallCompactionThreshold(small_compaction_threshold);
partition_item.second->DbRWUnLock();
}
}
}
bool PikaServer::GetTablePartitionBinlogOffset(const std::string& table_name,
uint32_t partition_id,
BinlogOffset* const boffset) {
std::shared_ptr<SyncMasterPartition> partition =
g_pika_rm->GetSyncMasterPartitionByName(PartitionInfo(table_name, partition_id));
if (!partition) {
return false;
}
Status s = partition->Logger()->GetProducerStatus(&(boffset->filenum), &(boffset->offset));
if (!s.ok()) {
return false;
}
return true;
}
// Only use in classic mode
std::shared_ptr<Partition> PikaServer::GetPartitionByDbName(const std::string& db_name) {
std::shared_ptr<Table> table = GetTable(db_name);
return table ? table->GetPartitionById(0) : NULL;
}
std::shared_ptr<Partition> PikaServer::GetTablePartitionById(
const std::string& table_name,
uint32_t partition_id) {
std::shared_ptr<Table> table = GetTable(table_name);
return table ? table->GetPartitionById(partition_id) : NULL;
}
std::shared_ptr<Partition> PikaServer::GetTablePartitionByKey(
const std::string& table_name,
const std::string& key) {
std::shared_ptr<Table> table = GetTable(table_name);
return table ? table->GetPartitionByKey(key) : NULL;
}
Status PikaServer::DoSameThingEveryPartition(const TaskType& type) {
slash::RWLock rwl(&tables_rw_, false);
std::shared_ptr<SyncSlavePartition> slave_partition = nullptr;
for (const auto& table_item : tables_) {
for (const auto& partition_item : table_item.second->partitions_) {
switch (type) {
case TaskType::kResetReplState:
{
slave_partition = g_pika_rm->GetSyncSlavePartitionByName(
PartitionInfo(table_item.second->GetTableName(),
partition_item.second->GetPartitionId()));
if (slave_partition == nullptr) {
LOG(WARNING) << "Slave Partition: " <<
table_item.second->GetTableName() << ":" <<
partition_item.second->GetPartitionId() << " Not Found";
}
slave_partition->SetReplState(ReplState::kNoConnect);
break;
}
case TaskType::kPurgeLog:
{
std::shared_ptr<SyncMasterPartition> partition =
g_pika_rm->GetSyncMasterPartitionByName(
PartitionInfo(table_item.second->GetTableName(),
partition_item.second->GetPartitionId()));
if (!partition) {
LOG(WARNING) << table_item.second->GetTableName()
<< partition_item.second->GetPartitionId() << " Not Found.";
break;
}
partition->StableLogger()->PurgeStableLogs();
break;
}
case TaskType::kCompactAll:
partition_item.second->Compact(blackwidow::kAll);
break;
default:
break;
}
}
}
return Status::OK();
}
void PikaServer::BecomeMaster() {
slash::RWLock l(&state_protector_, true);
role_ |= PIKA_ROLE_MASTER;
}
void PikaServer::DeleteSlave(int fd) {
std::string ip;
int port = -1;
bool is_find = false;
int slave_num = -1;
{
slash::MutexLock l(&slave_mutex_);
std::vector<SlaveItem>::iterator iter = slaves_.begin();
while (iter != slaves_.end()) {
if (iter->conn_fd == fd) {
ip = iter->ip;
port = iter->port;
is_find = true;
LOG(INFO) << "Delete Slave Success, ip_port: " << iter->ip << ":" << iter->port;
slaves_.erase(iter);
break;
}
iter++;
}
slave_num = slaves_.size();
}
if (is_find) {
g_pika_rm->LostConnection(ip, port);
g_pika_rm->DropItemInWriteQueue(ip, port);
}
if (slave_num == 0) {
slash::RWLock l(&state_protector_, true);
role_ &= ~PIKA_ROLE_MASTER;
}
}
int32_t PikaServer::CountSyncSlaves() {
slash::MutexLock ldb(&db_sync_protector_);
return db_sync_slaves_.size();
}
int32_t PikaServer::GetShardingSlaveListString(std::string& slave_list_str) {
std::vector<std::string> complete_replica;
g_pika_rm->FindCompleteReplica(&complete_replica);
std::stringstream tmp_stream;
size_t index = 0;
for (auto replica : complete_replica) {
std::string ip;
int port;
if (!slash::ParseIpPortString(replica, ip, port)) {
continue;
}
tmp_stream << "slave" << index++ << ":ip=" << ip << ",port=" << port << "\r\n";
}
slave_list_str.assign(tmp_stream.str());
return index;
}
int32_t PikaServer::GetSlaveListString(std::string& slave_list_str) {
size_t index = 0;
SlaveState slave_state;
BinlogOffset master_boffset;
BinlogOffset sent_slave_boffset;
BinlogOffset acked_slave_boffset;
std::stringstream tmp_stream;
slash::MutexLock l(&slave_mutex_);
std::shared_ptr<SyncMasterPartition> master_partition = nullptr;
for (const auto& slave : slaves_) {
tmp_stream << "slave" << index++ << ":ip=" << slave.ip << ",port=" << slave.port << ",conn_fd=" << slave.conn_fd << ",lag=";
for (const auto& ts : slave.table_structs) {
for (size_t idx = 0; idx < ts.partition_num; ++idx) {
std::shared_ptr<SyncMasterPartition> partition =
g_pika_rm->GetSyncMasterPartitionByName(PartitionInfo(ts.table_name, idx));
if (!partition) {
LOG(WARNING) << "Sync Master Partition: " << ts.table_name << ":" << idx
<< ", NotFound";
continue;
}
Status s = partition->GetSlaveState(slave.ip, slave.port, &slave_state);
if (s.ok()
&& slave_state == SlaveState::kSlaveBinlogSync
&& partition->GetSlaveSyncBinlogInfo(slave.ip, slave.port, &sent_slave_boffset, &acked_slave_boffset).ok()) {
Status s = partition->Logger()->GetProducerStatus(&(master_boffset.filenum), &(master_boffset.offset));
if (!s.ok()) {
continue;
} else {
uint64_t lag =
(uint64_t)(master_boffset.filenum - sent_slave_boffset.filenum) * g_pika_conf->binlog_file_size()
+ master_boffset.offset - sent_slave_boffset.offset;
tmp_stream << "(" << partition->PartitionName() << ":" << lag << ")";
}
} else {
tmp_stream << "(" << partition->PartitionName() << ":not syncing)";
}
}
}
tmp_stream << "\r\n";
}
slave_list_str.assign(tmp_stream.str());
return index;
}
// Try add Slave, return true if success,
// return false when slave already exist
bool PikaServer::TryAddSlave(const std::string& ip, int64_t port, int fd,
const std::vector<TableStruct>& table_structs) {
std::string ip_port = slash::IpPortString(ip, port);
slash::MutexLock l(&slave_mutex_);
std::vector<SlaveItem>::iterator iter = slaves_.begin();
while (iter != slaves_.end()) {
if (iter->ip_port == ip_port) {
LOG(WARNING) << "Slave Already Exist, ip_port: " << ip << ":" << port;
return false;
}
iter++;
}
// Not exist, so add new
LOG(INFO) << "Add New Slave, " << ip << ":" << port;
SlaveItem s;
s.ip_port = ip_port;
s.ip = ip;
s.port = port;
s.conn_fd = fd;
s.stage = SLAVE_ITEM_STAGE_ONE;
s.table_structs = table_structs;
gettimeofday(&s.create_time, NULL);
slaves_.push_back(s);
return true;
}
void PikaServer::SyncError() {
slash::RWLock l(&state_protector_, true);
repl_state_ = PIKA_REPL_ERROR;
LOG(WARNING) << "Sync error, set repl_state to PIKA_REPL_ERROR";
}
void PikaServer::RemoveMaster() {
{
slash::RWLock l(&state_protector_, true);
repl_state_ = PIKA_REPL_NO_CONNECT;
role_ &= ~PIKA_ROLE_SLAVE;
if (master_ip_ != "" && master_port_ != -1) {
g_pika_rm->CloseReplClientConn(master_ip_, master_port_ + kPortShiftReplServer);
g_pika_rm->LostConnection(master_ip_, master_port_);
loop_partition_state_machine_ = false;
UpdateMetaSyncTimestamp();
LOG(INFO) << "Remove Master Success, ip_port: " << master_ip_ << ":" << master_port_;
}
master_ip_ = "";
master_port_ = -1;
DoSameThingEveryPartition(TaskType::kResetReplState);
}
}
bool PikaServer::SetMaster(std::string& master_ip, int master_port) {
if (master_ip == "127.0.0.1") {
master_ip = host_;
}
slash::RWLock l(&state_protector_, true);
if ((role_ ^ PIKA_ROLE_SLAVE) && repl_state_ == PIKA_REPL_NO_CONNECT) {
master_ip_ = master_ip;
master_port_ = master_port;
role_ |= PIKA_ROLE_SLAVE;
repl_state_ = PIKA_REPL_SHOULD_META_SYNC;
return true;
}
return false;
}
bool PikaServer::ShouldMetaSync() {
slash::RWLock l(&state_protector_, false);
return repl_state_ == PIKA_REPL_SHOULD_META_SYNC;
}
void PikaServer::FinishMetaSync() {
slash::RWLock l(&state_protector_, true);
assert(repl_state_ == PIKA_REPL_SHOULD_META_SYNC);
repl_state_ = PIKA_REPL_META_SYNC_DONE;
}
bool PikaServer::MetaSyncDone() {
slash::RWLock l(&state_protector_, false);
return repl_state_ == PIKA_REPL_META_SYNC_DONE;
}
void PikaServer::ResetMetaSyncStatus() {
slash::RWLock sp_l(&state_protector_, true);
if (role_ & PIKA_ROLE_SLAVE) {
// not change by slaveof no one, so set repl_state = PIKA_REPL_SHOULD_META_SYNC,
// continue to connect master
repl_state_ = PIKA_REPL_SHOULD_META_SYNC;
loop_partition_state_machine_ = false;
DoSameThingEveryPartition(TaskType::kResetReplState);
}
}
bool PikaServer::AllPartitionConnectSuccess() {
bool all_partition_connect_success = true;
slash::RWLock rwl(&tables_rw_, false);
std::shared_ptr<SyncSlavePartition> slave_partition = nullptr;
for (const auto& table_item : tables_) {
for (const auto& partition_item : table_item.second->partitions_) {
slave_partition = g_pika_rm->GetSyncSlavePartitionByName(
PartitionInfo(table_item.second->GetTableName(),
partition_item.second->GetPartitionId()));
if (slave_partition == nullptr) {
LOG(WARNING) << "Slave Partition: " <<
table_item.second->GetTableName() << ":" <<
partition_item.second->GetPartitionId() <<
", NotFound";
return false;
}
ReplState repl_state = slave_partition->State();
if (repl_state != ReplState::kConnected) {
all_partition_connect_success = false;
break;
}
}
}
return all_partition_connect_success;
}
bool PikaServer::LoopPartitionStateMachine() {
slash::RWLock sp_l(&state_protector_, false);
return loop_partition_state_machine_;
}
void PikaServer::SetLoopPartitionStateMachine(bool need_loop) {
slash::RWLock sp_l(&state_protector_, true);
assert(repl_state_ == PIKA_REPL_META_SYNC_DONE);
loop_partition_state_machine_ = need_loop;
}
int PikaServer::GetMetaSyncTimestamp() {
slash::RWLock sp_l(&state_protector_, false);
return last_meta_sync_timestamp_;
}
void PikaServer::UpdateMetaSyncTimestamp() {
slash::RWLock sp_l(&state_protector_, true);
last_meta_sync_timestamp_ = time(NULL);
}
bool PikaServer::IsFirstMetaSync() {
slash::RWLock sp_l(&state_protector_, true);
return first_meta_sync_;
}
void PikaServer::SetFirstMetaSync(bool v) {
slash::RWLock sp_l(&state_protector_, true);
first_meta_sync_ = v;
}
void PikaServer::ScheduleClientPool(pink::TaskFunc func, void* arg) {
pika_client_processor_->SchedulePool(func, arg);
}
void PikaServer::ScheduleClientBgThreads(
pink::TaskFunc func, void* arg, const std::string& hash_str) {
pika_client_processor_->ScheduleBgThreads(func, arg, hash_str);
}
size_t PikaServer::ClientProcessorThreadPoolCurQueueSize() {
if (!pika_client_processor_) {
return 0;
}
return pika_client_processor_->ThreadPoolCurQueueSize();
}
void PikaServer::BGSaveTaskSchedule(pink::TaskFunc func, void* arg) {
bgsave_thread_.StartThread();
bgsave_thread_.Schedule(func, arg);
}
void PikaServer::PurgelogsTaskSchedule(pink::TaskFunc func, void* arg) {
purge_thread_.StartThread();
purge_thread_.Schedule(func, arg);
}
void PikaServer::PurgeDir(const std::string& path) {
std::string* dir_path = new std::string(path);
PurgeDirTaskSchedule(&DoPurgeDir, static_cast<void*>(dir_path));
}
void PikaServer::PurgeDirTaskSchedule(void (*function)(void*), void* arg) {
purge_thread_.StartThread();
purge_thread_.Schedule(function, arg);
}
void PikaServer::DBSync(const std::string& ip, int port,
const std::string& table_name,
uint32_t partition_id) {
{
std::string task_index =
DbSyncTaskIndex(ip, port, table_name, partition_id);
slash::MutexLock ml(&db_sync_protector_);
if (db_sync_slaves_.find(task_index) != db_sync_slaves_.end()) {
return;
}
db_sync_slaves_.insert(task_index);
}
// Reuse the bgsave_thread_
// Since we expect BgSave and DBSync execute serially
bgsave_thread_.StartThread();
DBSyncArg* arg = new DBSyncArg(this, ip, port, table_name, partition_id);
bgsave_thread_.Schedule(&DoDBSync, reinterpret_cast<void*>(arg));
}
void PikaServer::TryDBSync(const std::string& ip, int port,
const std::string& table_name,
uint32_t partition_id, int32_t top) {
std::shared_ptr<Partition> partition =
GetTablePartitionById(table_name, partition_id);
if (!partition) {
LOG(WARNING) << "Partition: " << partition->GetPartitionName()
<< " Not Found, TryDBSync Failed";
return;
}
std::shared_ptr<SyncMasterPartition> sync_partition
= g_pika_rm->GetSyncMasterPartitionByName(PartitionInfo(table_name, partition_id));
if (!sync_partition) {
LOG(WARNING) << "Partition: " << sync_partition->SyncPartitionInfo().ToString()
<< " Not Found, TryDBSync Failed";
return;
}
BgSaveInfo bgsave_info = partition->bgsave_info();
std::string logger_filename = sync_partition->Logger()->filename();
if (slash::IsDir(bgsave_info.path) != 0
|| !slash::FileExists(NewFileName(logger_filename, bgsave_info.offset.b_offset.filenum))
|| top - bgsave_info.offset.b_offset.filenum > kDBSyncMaxGap) {
// Need Bgsave first
partition->BgSavePartition();
}
DBSync(ip, port, table_name, partition_id);
}
void PikaServer::DbSyncSendFile(const std::string& ip, int port,
const std::string& table_name,
uint32_t partition_id) {
std::shared_ptr<Partition> partition = GetTablePartitionById(table_name, partition_id);
if (!partition) {
LOG(WARNING) << "Partition: " << partition->GetPartitionName()
<< " Not Found, DbSync send file Failed";
return;
}
BgSaveInfo bgsave_info = partition->bgsave_info();
std::string bg_path = bgsave_info.path;
uint32_t binlog_filenum = bgsave_info.offset.b_offset.filenum;
uint64_t binlog_offset = bgsave_info.offset.b_offset.offset;
uint32_t term = bgsave_info.offset.l_offset.term;
uint64_t index = bgsave_info.offset.l_offset.index;
// Get all files need to send
std::vector<std::string> descendant;
int ret = 0;
LOG(INFO) << "Partition: " << partition->GetPartitionName()
<< " Start Send files in " << bg_path << " to " << ip;
ret = slash::GetChildren(bg_path, descendant);
if (ret != 0) {
std::string ip_port = slash::IpPortString(ip, port);
slash::MutexLock ldb(&db_sync_protector_);
db_sync_slaves_.erase(ip_port);
LOG(WARNING) << "Partition: " << partition->GetPartitionName()
<< " Get child directory when try to do sync failed, error: " << strerror(ret);
return;
}
std::string local_path, target_path;
std::string remote_path = g_pika_conf->classic_mode() ? table_name : table_name + "/" + std::to_string(partition_id);
std::vector<std::string>::const_iterator iter = descendant.begin();
slash::RsyncRemote remote(ip, port, kDBSyncModule, g_pika_conf->db_sync_speed() * 1024);
std::string secret_file_path = g_pika_conf->db_sync_path();
if (g_pika_conf->db_sync_path().back() != '/') {
secret_file_path += "/";
}
secret_file_path += slash::kRsyncSubDir + "/" + kPikaSecretFile;
for (; iter != descendant.end(); ++iter) {
local_path = bg_path + "/" + *iter;
target_path = remote_path + "/" + *iter;
if (*iter == kBgsaveInfoFile) {
continue;
}
if (slash::IsDir(local_path) == 0 &&
local_path.back() != '/') {
local_path.push_back('/');
target_path.push_back('/');
}
// We need specify the speed limit for every single file
ret = slash::RsyncSendFile(local_path, target_path, secret_file_path, remote);
if (0 != ret) {
LOG(WARNING) << "Partition: " << partition->GetPartitionName()
<< " RSync send file failed! From: " << *iter
<< ", To: " << target_path
<< ", At: " << ip << ":" << port
<< ", Error: " << ret;
break;
}
}
// Clear target path
slash::RsyncSendClearTarget(bg_path + "/strings", remote_path + "/strings", secret_file_path, remote);
slash::RsyncSendClearTarget(bg_path + "/hashes", remote_path + "/hashes", secret_file_path, remote);
slash::RsyncSendClearTarget(bg_path + "/lists", remote_path + "/lists", secret_file_path, remote);
slash::RsyncSendClearTarget(bg_path + "/sets", remote_path + "/sets", secret_file_path, remote);
slash::RsyncSendClearTarget(bg_path + "/zsets", remote_path + "/zsets", secret_file_path, remote);
pink::PinkCli* cli = pink::NewRedisCli();
std::string lip(host_);
if (cli->Connect(ip, port, "").ok()) {
struct sockaddr_in laddr;
socklen_t llen = sizeof(laddr);
getsockname(cli->fd(), (struct sockaddr*) &laddr, &llen);
lip = inet_ntoa(laddr.sin_addr);
cli->Close();
delete cli;
} else {
LOG(WARNING) << "Rsync try connect slave rsync service error"
<< ", slave rsync service(" << ip << ":" << port << ")";
delete cli;
}
// Send info file at last
if (0 == ret) {
// need to modify the IP addr in the info file
if (lip.compare(host_)) {
std::ofstream fix;
std::string fn = bg_path + "/" + kBgsaveInfoFile + "." + std::to_string(time(NULL));
fix.open(fn, std::ios::in | std::ios::trunc);
if (fix.is_open()) {
fix << "0s\n" << lip << "\n" << port_ << "\n" << binlog_filenum << "\n" << binlog_offset << "\n";
if (g_pika_conf->consensus_level() != 0) {
fix << term << "\n" << index << "\n";
}
fix.close();
}
ret = slash::RsyncSendFile(fn, remote_path + "/" + kBgsaveInfoFile, secret_file_path, remote);
slash::DeleteFile(fn);
if (ret != 0) {
LOG(WARNING) << "Partition: " << partition->GetPartitionName() << " Send Modified Info File Failed";
}
} else if (0 != (ret = slash::RsyncSendFile(bg_path + "/" + kBgsaveInfoFile, remote_path + "/" + kBgsaveInfoFile, secret_file_path, remote))) {
LOG(WARNING) << "Partition: " << partition->GetPartitionName() << " Send Info File Failed";
}
}
// remove slave
{
std::string task_index =
DbSyncTaskIndex(ip, port, table_name, partition_id);
slash::MutexLock ml(&db_sync_protector_);
db_sync_slaves_.erase(task_index);
}
if (0 == ret) {
LOG(INFO) << "Partition: " << partition->GetPartitionName() << " RSync Send Files Success";
}
}
std::string PikaServer::DbSyncTaskIndex(const std::string& ip,
int port,
const std::string& table_name,
uint32_t partition_id) {
char buf[256];
snprintf(buf, sizeof(buf), "%s:%d_%s:%d",
ip.data(), port, table_name.data(), partition_id);
return buf;
}
void PikaServer::KeyScanTaskSchedule(pink::TaskFunc func, void* arg) {
key_scan_thread_.StartThread();
key_scan_thread_.Schedule(func, arg);
}
void PikaServer::ClientKillAll() {
pika_dispatch_thread_->ClientKillAll();
pika_monitor_thread_->ThreadClientKill();
}
int PikaServer::ClientKill(const std::string &ip_port) {
if (pika_dispatch_thread_->ClientKill(ip_port)
|| pika_monitor_thread_->ThreadClientKill(ip_port)) {
return 1;
}
return 0;
}
int64_t PikaServer::ClientList(std::vector<ClientInfo> *clients) {
int64_t clients_num = 0;
clients_num += pika_dispatch_thread_->ThreadClientList(clients);
clients_num += pika_monitor_thread_->ThreadClientList(clients);
return clients_num;
}
bool PikaServer::HasMonitorClients() {
return pika_monitor_thread_->HasMonitorClients();
}
void PikaServer::AddMonitorMessage(const std::string& monitor_message) {
pika_monitor_thread_->AddMonitorMessage(monitor_message);
}
void PikaServer::AddMonitorClient(std::shared_ptr<PikaClientConn> client_ptr) {
pika_monitor_thread_->AddMonitorClient(client_ptr);
}
void PikaServer::SlowlogTrim() {
pthread_rwlock_wrlock(&slowlog_protector_);
while (slowlog_list_.size() > static_cast<uint32_t>(g_pika_conf->slowlog_max_len())) {
slowlog_list_.pop_back();
}
pthread_rwlock_unlock(&slowlog_protector_);
}
void PikaServer::SlowlogReset() {
pthread_rwlock_wrlock(&slowlog_protector_);
slowlog_list_.clear();
pthread_rwlock_unlock(&slowlog_protector_);
}
uint32_t PikaServer::SlowlogLen() {
RWLock l(&slowlog_protector_, false);
return slowlog_list_.size();
}
void PikaServer::SlowlogObtain(int64_t number, std::vector<SlowlogEntry>* slowlogs) {
pthread_rwlock_rdlock(&slowlog_protector_);
slowlogs->clear();
std::list<SlowlogEntry>::const_iterator iter = slowlog_list_.begin();
while (number-- && iter != slowlog_list_.end()) {
slowlogs->push_back(*iter);
iter++;
}
pthread_rwlock_unlock(&slowlog_protector_);
}
void PikaServer::SlowlogPushEntry(const PikaCmdArgsType& argv, int32_t time, int64_t duration) {
SlowlogEntry entry;
uint32_t slargc = (argv.size() < SLOWLOG_ENTRY_MAX_ARGC)
? argv.size() : SLOWLOG_ENTRY_MAX_ARGC;
for (uint32_t idx = 0; idx < slargc; ++idx) {
if (slargc != argv.size() && idx == slargc - 1) {
char buffer[32];
sprintf(buffer, "... (%lu more arguments)", argv.size() - slargc + 1);
entry.argv.push_back(std::string(buffer));
} else {
if (argv[idx].size() > SLOWLOG_ENTRY_MAX_STRING) {
char buffer[32];
sprintf(buffer, "... (%lu more bytes)", argv[idx].size() - SLOWLOG_ENTRY_MAX_STRING);
std::string suffix(buffer);
std::string brief = argv[idx].substr(0, SLOWLOG_ENTRY_MAX_STRING);
entry.argv.push_back(brief + suffix);
} else {
entry.argv.push_back(argv[idx]);
}
}
}
pthread_rwlock_wrlock(&slowlog_protector_);
entry.id = slowlog_entry_id_++;
entry.start_time = time;
entry.duration = duration;
slowlog_list_.push_front(entry);
pthread_rwlock_unlock(&slowlog_protector_);
SlowlogTrim();
}
void PikaServer::ResetStat() {
statistic_.server_stat.accumulative_connections.store(0);
statistic_.server_stat.qps.querynum.store(0);
statistic_.server_stat.qps.last_querynum.store(0);
}
uint64_t PikaServer::ServerQueryNum() {
return statistic_.server_stat.qps.querynum.load();
}
uint64_t PikaServer::ServerCurrentQps() {
return statistic_.server_stat.qps.last_sec_querynum.load();
}
uint64_t PikaServer::accumulative_connections() {
return statistic_.server_stat.accumulative_connections.load();
}
void PikaServer::incr_accumulative_connections() {
++(statistic_.server_stat.accumulative_connections);
}
// only one thread invoke this right now
void PikaServer::ResetLastSecQuerynum() {
statistic_.server_stat.qps.ResetLastSecQuerynum();
statistic_.ResetTableLastSecQuerynum();
}
void PikaServer::UpdateQueryNumAndExecCountTable(const std::string& table_name,
const std::string& command, bool is_write) {
std::string cmd(command);
statistic_.server_stat.qps.querynum++;
statistic_.server_stat.exec_count_table[slash::StringToUpper(cmd)]++;
statistic_.UpdateTableQps(table_name, command, is_write);
}
std::unordered_map<std::string, uint64_t> PikaServer::ServerExecCountTable() {
std::unordered_map<std::string, uint64_t> res;
for (auto& cmd : statistic_.server_stat.exec_count_table) {
res[cmd.first] = cmd.second.load();
}
return res;
}
QpsStatistic PikaServer::ServerTableStat(const std::string& table_name) {
return statistic_.TableStat(table_name);
}
std::unordered_map<std::string, QpsStatistic> PikaServer::ServerAllTableStat() {
return statistic_.AllTableStat();
}
int PikaServer::SendToPeer() {
return g_pika_rm->ConsumeWriteQueue();
}
void PikaServer::SignalAuxiliary() {
pika_auxiliary_thread_->mu_.Lock();
pika_auxiliary_thread_->cv_.Signal();
pika_auxiliary_thread_->mu_.Unlock();
}
Status PikaServer::TriggerSendBinlogSync() {
return g_pika_rm->WakeUpBinlogSync();
}
int PikaServer::PubSubNumPat() {
return pika_pubsub_thread_->PubSubNumPat();
}
int PikaServer::Publish(const std::string& channel, const std::string& msg) {
int receivers = pika_pubsub_thread_->Publish(channel, msg);
return receivers;
}
void PikaServer::EnablePublish(int fd) {
pika_pubsub_thread_->UpdateConnReadyState(fd, pink::PubSubThread::ReadyState::kReady);
}
int PikaServer::UnSubscribe(std::shared_ptr<pink::PinkConn> conn,
const std::vector<std::string>& channels,
bool pattern,
std::vector<std::pair<std::string, int>>* result) {
int subscribed = pika_pubsub_thread_->UnSubscribe(conn, channels, pattern, result);
return subscribed;
}
void PikaServer::Subscribe(std::shared_ptr<pink::PinkConn> conn,
const std::vector<std::string>& channels,
bool pattern,
std::vector<std::pair<std::string, int>>* result) {
pika_pubsub_thread_->Subscribe(conn, channels, pattern, result);
}
void PikaServer::PubSubChannels(const std::string& pattern,
std::vector<std::string >* result) {
pika_pubsub_thread_->PubSubChannels(pattern, result);
}
void PikaServer::PubSubNumSub(const std::vector<std::string>& channels,
std::vector<std::pair<std::string, int>>* result) {
pika_pubsub_thread_->PubSubNumSub(channels, result);
}
/******************************* PRIVATE *******************************/
void PikaServer::DoTimingTask() {
// Maybe schedule compactrange
AutoCompactRange();
// Purge log
AutoPurge();
// Delete expired dump
AutoDeleteExpiredDump();
// Cheek Rsync Status
AutoKeepAliveRSync();
// Reset server qps
ResetLastSecQuerynum();
}
void PikaServer::AutoCompactRange() {
struct statfs disk_info;
int ret = statfs(g_pika_conf->db_path().c_str(), &disk_info);
if (ret == -1) {
LOG(WARNING) << "statfs error: " << strerror(errno);
return;
}
uint64_t total_size = disk_info.f_bsize * disk_info.f_blocks;
uint64_t free_size = disk_info.f_bsize * disk_info.f_bfree;
std::string ci = g_pika_conf->compact_interval();
std::string cc = g_pika_conf->compact_cron();
if (ci != "") {
std::string::size_type slash = ci.find("/");
int interval = std::atoi(ci.substr(0, slash).c_str());
int usage = std::atoi(ci.substr(slash+1).c_str());
struct timeval now;
gettimeofday(&now, NULL);
if (last_check_compact_time_.tv_sec == 0 ||
now.tv_sec - last_check_compact_time_.tv_sec >= interval * 3600) {
gettimeofday(&last_check_compact_time_, NULL);
if (((double)free_size / total_size) * 100 >= usage) {
Status s = DoSameThingSpecificTable(TaskType::kCompactAll);
if (s.ok()) {
LOG(INFO) << "[Interval]schedule compactRange, freesize: " << free_size/1048576 << "MB, disksize: " << total_size/1048576 << "MB";
} else {
LOG(INFO) << "[Interval]schedule compactRange Failed, freesize: " << free_size/1048576 << "MB, disksize: " << total_size/1048576
<< "MB, error: " << s.ToString();
}
} else {
LOG(WARNING) << "compact-interval failed, because there is not enough disk space left, freesize"
<< free_size/1048576 << "MB, disksize: " << total_size/1048576 << "MB";
}
}
return;
}
if (cc != "") {
bool have_week = false;
std::string compact_cron, week_str;
int slash_num = count(cc.begin(), cc.end(), '/');
if (slash_num == 2) {
have_week = true;
std::string::size_type first_slash = cc.find("/");
week_str = cc.substr(0, first_slash);
compact_cron = cc.substr(first_slash + 1);
} else {
compact_cron = cc;
}
std::string::size_type colon = compact_cron.find("-");
std::string::size_type underline = compact_cron.find("/");
int week = have_week ? (std::atoi(week_str.c_str()) % 7) : 0;
int start = std::atoi(compact_cron.substr(0, colon).c_str());
int end = std::atoi(compact_cron.substr(colon+1, underline).c_str());
int usage = std::atoi(compact_cron.substr(underline+1).c_str());
std::time_t t = std::time(nullptr);
std::tm* t_m = std::localtime(&t);
bool in_window = false;
if (start < end && (t_m->tm_hour >= start && t_m->tm_hour < end)) {
in_window = have_week ? (week == t_m->tm_wday) : true;
} else if (start > end && ((t_m->tm_hour >= start && t_m->tm_hour < 24) ||
(t_m->tm_hour >= 0 && t_m->tm_hour < end))) {
in_window = have_week ? false : true;
} else {
have_scheduled_crontask_ = false;
}
if (!have_scheduled_crontask_ && in_window) {
if (((double)free_size / total_size) * 100 >= usage) {
Status s = DoSameThingEveryPartition(TaskType::kCompactAll);
if (s.ok()) {
LOG(INFO) << "[Cron]schedule compactRange, freesize: " << free_size/1048576 << "MB, disksize: " << total_size/1048576 << "MB";
} else {
LOG(INFO) << "[Cron]schedule compactRange Failed, freesize: " << free_size/1048576 << "MB, disksize: " << total_size/1048576
<< "MB, error: " << s.ToString();
}
have_scheduled_crontask_ = true;
} else {
LOG(WARNING) << "compact-cron failed, because there is not enough disk space left, freesize"
<< free_size/1048576 << "MB, disksize: " << total_size/1048576 << "MB";
}
}
}
}
void PikaServer::AutoPurge() {
DoSameThingEveryPartition(TaskType::kPurgeLog);
}
void PikaServer::AutoDeleteExpiredDump() {
std::string db_sync_prefix = g_pika_conf->bgsave_prefix();
std::string db_sync_path = g_pika_conf->bgsave_path();
int expiry_days = g_pika_conf->expire_dump_days();
std::vector<std::string> dump_dir;
// Never expire
if (expiry_days <= 0) {
return;
}
// Dump is not exist
if (!slash::FileExists(db_sync_path)) {
return;
}
// Directory traversal
if (slash::GetChildren(db_sync_path, dump_dir) != 0) {
return;
}
// Handle dump directory
for (size_t i = 0; i < dump_dir.size(); i++) {
if (dump_dir[i].substr(0, db_sync_prefix.size()) != db_sync_prefix || dump_dir[i].size() != (db_sync_prefix.size() + 8)) {
continue;
}
std::string str_date = dump_dir[i].substr(db_sync_prefix.size(), (dump_dir[i].size() - db_sync_prefix.size()));
char *end = NULL;
std::strtol(str_date.c_str(), &end, 10);
if (*end != 0) {
continue;
}
// Parse filename
int dump_year = std::atoi(str_date.substr(0, 4).c_str());
int dump_month = std::atoi(str_date.substr(4, 2).c_str());
int dump_day = std::atoi(str_date.substr(6, 2).c_str());
time_t t = time(NULL);
struct tm *now = localtime(&t);
int now_year = now->tm_year + 1900;
int now_month = now->tm_mon + 1;
int now_day = now->tm_mday;
struct tm dump_time, now_time;
dump_time.tm_year = dump_year;
dump_time.tm_mon = dump_month;
dump_time.tm_mday = dump_day;
dump_time.tm_hour = 0;
dump_time.tm_min = 0;
dump_time.tm_sec = 0;
now_time.tm_year = now_year;
now_time.tm_mon = now_month;
now_time.tm_mday = now_day;
now_time.tm_hour = 0;
now_time.tm_min = 0;
now_time.tm_sec = 0;
long dump_timestamp = mktime(&dump_time);
long now_timestamp = mktime(&now_time);
// How many days, 1 day = 86400s
int interval_days = (now_timestamp - dump_timestamp) / 86400;
if (interval_days >= expiry_days) {
std::string dump_file = db_sync_path + dump_dir[i];
if (CountSyncSlaves() == 0) {
LOG(INFO) << "Not syncing, delete dump file: " << dump_file;
slash::DeleteDirIfExist(dump_file);
} else {
LOG(INFO) << "Syncing, can not delete " << dump_file << " dump file";
}
}
}
}
void PikaServer::AutoKeepAliveRSync() {
if (!pika_rsync_service_->CheckRsyncAlive()) {
LOG(WARNING) << "The Rsync service is down, Try to restart";
pika_rsync_service_->StartRsync();
}
}
void PikaServer::InitBlackwidowOptions() {
slash::RWLock rwl(&bw_options_rw_, true);
// For rocksdb::Options
bw_options_.options.create_if_missing = true;
bw_options_.options.keep_log_file_num = 10;
bw_options_.options.max_manifest_file_size = 64 * 1024 * 1024;
bw_options_.options.max_log_file_size = 512 * 1024 * 1024;
bw_options_.options.write_buffer_size =
g_pika_conf->write_buffer_size();
bw_options_.options.arena_block_size =
g_pika_conf->arena_block_size();
bw_options_.options.write_buffer_manager.reset(
new rocksdb::WriteBufferManager(g_pika_conf->max_write_buffer_size()));
bw_options_.options.max_write_buffer_number =
g_pika_conf->max_write_buffer_number();
bw_options_.options.target_file_size_base =
g_pika_conf->target_file_size_base();
bw_options_.options.max_background_flushes =
g_pika_conf->max_background_flushes();
bw_options_.options.max_background_compactions =
g_pika_conf->max_background_compactions();
bw_options_.options.max_open_files =
g_pika_conf->max_cache_files();
bw_options_.options.max_bytes_for_level_multiplier =
g_pika_conf->max_bytes_for_level_multiplier();
bw_options_.options.optimize_filters_for_hits =
g_pika_conf->optimize_filters_for_hits();
bw_options_.options.level_compaction_dynamic_level_bytes =
g_pika_conf->level_compaction_dynamic_level_bytes();
if (g_pika_conf->compression() == "none") {
bw_options_.options.compression =
rocksdb::CompressionType::kNoCompression;
} else if (g_pika_conf->compression() == "snappy") {
bw_options_.options.compression =
rocksdb::CompressionType::kSnappyCompression;
} else if (g_pika_conf->compression() == "zlib") {
bw_options_.options.compression =
rocksdb::CompressionType::kZlibCompression;
} else if (g_pika_conf->compression() == "lz4") {
bw_options_.options.compression =
rocksdb::CompressionType::kLZ4Compression;
} else if (g_pika_conf->compression() == "zstd") {
bw_options_.options.compression =
rocksdb::CompressionType::kZSTD;
}
// For rocksdb::BlockBasedTableOptions
bw_options_.table_options.block_size = g_pika_conf->block_size();
bw_options_.table_options.cache_index_and_filter_blocks =
g_pika_conf->cache_index_and_filter_blocks();
bw_options_.block_cache_size = g_pika_conf->block_cache();
bw_options_.share_block_cache = g_pika_conf->share_block_cache();
if (bw_options_.block_cache_size == 0) {
bw_options_.table_options.no_block_cache = true;
} else if (bw_options_.share_block_cache) {
bw_options_.table_options.block_cache =
rocksdb::NewLRUCache(bw_options_.block_cache_size);
}
// For Blackwidow small compaction
bw_options_.statistics_max_size = g_pika_conf->max_cache_statistic_keys();
bw_options_.small_compaction_threshold =
g_pika_conf->small_compaction_threshold();
}
blackwidow::Status PikaServer::RewriteBlackwidowOptions(const blackwidow::OptionType& option_type,
const std::unordered_map<std::string, std::string>& options_map) {
blackwidow::Status s;
for (const auto& table_item : tables_) {
slash::RWLock partition_rwl(&table_item.second->partitions_rw_, true);
for (const auto& partition_item: table_item.second->partitions_) {
partition_item.second->DbRWLockWriter();
s = partition_item.second->db()->SetOptions(option_type, blackwidow::ALL_DB, options_map);
partition_item.second->DbRWUnLock();
if (!s.ok()) return s;
}
}
slash::RWLock rwl(&bw_options_rw_, true);
s = bw_options_.ResetOptions(option_type, options_map);
return s;
}
void PikaServer::ServerStatus(std::string* info) {
std::stringstream tmp_stream;
size_t q_size = ClientProcessorThreadPoolCurQueueSize();
tmp_stream << "Client Processor thread-pool queue size: " << q_size << "\r\n";
info->append(tmp_stream.str());
}
|
#pragma once
#include <seqan3/std/filesystem>
#include <fstream>
#include <queue>
#include <random>
#include <omp.h>
#include <robin_hood.h>
#include <chopper/sketch/hyperloglog.hpp>
namespace chopper::sketch
{
class user_bin_sequence
{
protected:
//!\brief type for a node in the clustering tree when for the rearrangement
struct clustering_node
{
// children in the tree
size_t left;
size_t right;
// hll sketch of the union if the node is still a root
hyperloglog hll;
};
//!\brief element of the second priority queue layer of the distance matrix
struct neighbor
{
size_t id;
double dist;
bool operator>(neighbor const & other) const
{
return dist > other.dist;
}
};
//!\brief type of a min heap based priority queue
using prio_queue = std::priority_queue<neighbor, std::vector<neighbor>, std::greater<neighbor>>;
//!\brief entry of the distance matrix that has the id of a cluster with its neighbors in a prio queue
struct entry
{
size_t id;
prio_queue pq;
};
//!\brief type of the distance matrix for the clustering for the rearrangement
using distance_matrix = std::vector<entry>;
//!\brief A pointer to the filenames of the user input sequences.
std::vector<std::string> * const filenames{nullptr};
//!\brief A pointer to kmer counts associated with the above files used to layout user bin into technical bins.
std::vector<size_t> * const user_bin_kmer_counts{nullptr};
//!\brief HyperLogLog sketches on the k-mer sets of the sequences from the files of filenames.
std::vector<hyperloglog> sketches;
public:
user_bin_sequence() = default; //!< Defaulted.
user_bin_sequence(user_bin_sequence const &) = default; //!< Defaulted.
user_bin_sequence & operator=(user_bin_sequence const &) = default; //!< Defaulted.
user_bin_sequence(user_bin_sequence &&) = default; //!< Defaulted.
user_bin_sequence & operator=(user_bin_sequence &&) = default; //!< Defaulted.
~user_bin_sequence() = default; //!< Defaulted.
/*!\brief A sequence of user bins for which filenames and counts are given.
* \param[in] filenames_ filenames of the sequence files for the user bins
* \param[in] user_bin_kmer_counts_ counts of the k-mer sets of the bins corresponding to filenames
*/
user_bin_sequence(std::vector<std::string> & filenames_,
std::vector<size_t> & user_bin_kmer_counts_) :
filenames{std::addressof(filenames_)},
user_bin_kmer_counts{std::addressof(user_bin_kmer_counts_)}
{}
//!\brief Sorts filenames and cardinalities by looking only at the cardinalities.
void sort_by_cardinalities()
{
// generate permutation of indices sorted in descending order by cardinalities
std::vector<size_t> permutation(user_bin_kmer_counts->size());
std::iota(permutation.begin(), permutation.end(), size_t{0});
assert(user_bin_kmer_counts != nullptr);
assert(filenames != nullptr);
assert(permutation.size() == user_bin_kmer_counts->size());
auto cardinality_compare = [this] (size_t const index1, size_t const index2)
{
return (*user_bin_kmer_counts)[index1] > (*user_bin_kmer_counts)[index2];
};
std::sort(permutation.begin(), permutation.end(), cardinality_compare);
apply_permutation(permutation);
}
/*!\brief Restore the HLL sketches from the files in hll_dir
* \param[in] hll_dir path to the directory where hll caches will be found
*/
void read_hll_files(std::filesystem::path const & hll_dir)
{
assert(filenames != nullptr);
assert(std::filesystem::exists(hll_dir) && !std::filesystem::is_empty(hll_dir)); // checked in chopper_layout
sketches.reserve(filenames->size());
try
{
for (auto const & filename : *filenames)
{
std::filesystem::path path = hll_dir / std::filesystem::path(filename).stem();
path += ".hll";
std::ifstream hll_fin(path, std::ios::binary);
// the sketch bits will be automatically read from the files
sketches.emplace_back().restore(hll_fin);
}
}
catch (std::runtime_error const & err)
{
std::string const chopper_msg{"[CHOPPER LAYOUT ERROR] Something went wrong trying to read the HyperLogLog"
" sketches from files:\n"};
throw std::runtime_error{chopper_msg + err.what()};
}
}
/*!\brief For all intervals of filenames: estimate the cardinality of the union
* of k-mer sets of all sequences in the files of the interval.
* estimates[i][j] will be the union cardinality estimate of the interval j, ..., i.
* This unintuitive convention is chosen for cache efficiency in the hierarchical binning.
* \param[in] num_threads the number of threads to use
* \param[out] estimates output table
*/
void estimate_interval_unions(std::vector<std::vector<uint64_t>> & estimates, size_t const num_threads) const
{
assert(user_bin_kmer_counts != nullptr);
assert(filenames != nullptr);
if (filenames->size() > sketches.size())
throw std::runtime_error{"You need to compute or load sketches before you can estimate intervals."};
estimates.clear();
size_t const n = filenames->size();
estimates.resize(n);
size_t const chunk_size = std::floor(std::sqrt(n));
#pragma omp parallel num_threads(num_threads)
{
// initialize estimates
#pragma omp for
for (size_t i = 0; i < n; ++i)
{
estimates[i].resize(i + 1);
}
// fill estimates
#pragma omp for schedule(nonmonotonic: dynamic, chunk_size)
for (size_t i = 0; i < n; ++i)
{
hyperloglog temp_hll = sketches[i];
estimates[i][i] = (*user_bin_kmer_counts)[i];
for (size_t j = i + 1; j < n; ++j)
{
estimates[j][i] = static_cast<uint64_t>(temp_hll.merge_and_estimate_SIMD(sketches[j]));
}
}
}
}
/*!\brief Rearrange filenames, sketches and counts such that similar bins are close to each other
* \param[in] max_ratio the maximal cardinality ratio in the clustering intervals (must be <= 1 and >= 0)
* \param[in] num_threads the number of threads to use
*/
void rearrange_bins(double const max_ratio, size_t const num_threads)
{
assert(user_bin_kmer_counts != nullptr);
assert(filenames != nullptr);
std::vector<size_t> permutation;
size_t first = 0;
size_t last = 1;
while (first < filenames->size())
{
// size difference is too large or sequence is over -> do the clustering
if (last == filenames->size() || (*user_bin_kmer_counts)[first] * max_ratio > (*user_bin_kmer_counts)[last])
{
// if this is not the first group, we want one bin overlap
cluster_bins(permutation, first, last, num_threads);
first = last;
}
++last;
}
apply_permutation(permutation);
}
protected:
/*!\brief Perform an agglomerative clustering variant on the index range [first:last)
* \param[in] first id of the first cluster of the interval
* \param[in] last id of the last cluster of the interval plus one
* \param[in] num_threads the number of threads to use
* \param[out] permutation append the new order to this
*/
void cluster_bins(std::vector<size_t> & permutation,
size_t const first,
size_t const last,
size_t const num_threads)
{
assert(num_threads >= 1);
assert(filenames != nullptr);
assert(sketches.size() == filenames->size());
assert((first == 0) == permutation.empty());
size_t const n = filenames->size();
size_t const chunk_size = std::floor(std::sqrt(n));
size_t const prune_steps = chunk_size;
size_t steps_without_prune = 0;
size_t const none = std::numeric_limits<size_t>::max();
/* internal map that stores the distances
*
* The first layer is a hash map with the ids of active clusters as keys.
* The values (second layer) are priority queues with neighbors of the cluster
* with the respective key in the first layer.
* These neighbors are themselves clusters with an id and store a distance to the
* cluster of the first layer.
*/
distance_matrix dist;
dist.reserve(n + 1);
// map that indicates which ids of clusters are still in the distance matrix
// the values are the indices where the priority queue for the given id as key can be found in dist
robin_hood::unordered_flat_map<size_t, size_t> remaining_ids;
// clustering tree stored implicitly in a vector
std::vector<clustering_node> clustering;
clustering.reserve(2 * n);
// cache for hll cardinality estimates
std::vector<double> estimates;
estimates.reserve(2 * n);
// every thread will write its observed id with minimal distance to some other here
// id == none means that the thread observed only empty or no priority queues
std::vector<size_t> min_ids(num_threads, none);
// these will be the new ids for new clusters
// the first one is invalid, but it will be incremented before it is used for the first time
size_t new_id = last - 1;
// initialize clustering and estimates
for (size_t id = first; id < last; ++id)
{
// id i is at the index i - first
clustering.push_back({none, none, sketches[id]});
estimates.emplace_back(sketches[id].estimate());
}
// if this is not the first group, we want to have one overlapping bin
size_t previous_rightmost = none;
if (first != 0)
{
// For all other clusters, their id is also their index in filesnames, sketches etc. .
// This is important, because their id is then inserted into the clustering.
// This does not work for previous rightmost, because its index does not necessarily lie on
// the continuous spectrum from first to last. We run into a problem, because the entries are
// stored in vectors. Therefore we give previous_rightmost a different id (==last). This is
// fine, because we only need the HLL sketch of the actual index. previous_rightmost will be ignored
// in the traceback anyway and won't be added to the permutation in this step.
size_t actual_previous_rightmost = permutation.back();
++new_id;
previous_rightmost = new_id;
clustering.push_back({none, none, sketches[actual_previous_rightmost]});
estimates.emplace_back(sketches[actual_previous_rightmost].estimate());
}
// initialize priority queues in the distance matrix (sequentially)
for (size_t id = first; id < first + clustering.size(); ++id)
{
// empty priority queue for every item in clustering
dist.push_back({id, prio_queue{}});
remaining_ids[id] = id - first;
}
#pragma omp parallel num_threads(num_threads)
{
double min_dist = std::numeric_limits<double>::max();
// minimum distance exclusively for this thread
// initialize all the priority queues of the distance matrix
// while doing that, compute the first min_id
#pragma omp for schedule(nonmonotonic: dynamic, chunk_size)
for (size_t i = 0; i < clustering.size(); ++i)
{
for (size_t j = 0; j < clustering.size(); ++j)
{
// we only want one diagonal of the distance matrix
if (i < j)
{
// this must be a copy, because merging changes the hll sketch
hyperloglog temp_hll = clustering[i].hll;
double const estimate_ij = temp_hll.merge_and_estimate_SIMD(clustering[j].hll);
// Jaccard distance estimate
double const distance = 2 - (estimates[i] + estimates[j]) / estimate_ij;
dist[i].pq.push({j + first, distance});
}
}
if (dist[i].pq.empty()) continue;
// check if the just initialized priority queue contains the minimum value for this thread
neighbor const & curr = dist[i].pq.top();
if (curr.dist < min_dist)
{
min_dist = curr.dist;
min_ids[omp_get_thread_num()] = dist[i].id;
}
} // implicit barrier
// a single thread shuffles dist to approximately balance loads in static scheduling
#pragma omp single
random_shuffle(dist, remaining_ids);
// main loop of the clustering
// keep merging nodes until we have a complete tree
while (remaining_ids.size() > 1)
{
// Wait for all threads to have evaluated remaining_ids.size() as remaining_ids
// may be modified by the following pragma omp single.
#pragma omp barrier
#pragma omp single
{
// perform critical update
// increment id for the new cluster (must be done at the beginning)
++new_id;
// compute the final min_id from the min_ids of the worker threads
size_t min_id = min_ids[0];
double min_dist = std::numeric_limits<double>::max();
for (auto candidate_id : min_ids)
{
// check if the thread saw any id
if (candidate_id == none) continue;
size_t const dist_index = remaining_ids.at(candidate_id);
neighbor const & curr = dist[dist_index].pq.top();
if (curr.dist < min_dist)
{
min_dist = curr.dist;
min_id = candidate_id;
}
}
size_t const min_index = remaining_ids.at(min_id); // how can min_id be none?
size_t const neighbor_id = dist[min_index].pq.top().id;
// merge the two nodes with minimal distance together insert the new node into the clustering
clustering.push_back({min_id, neighbor_id, std::move(clustering[min_id - first].hll)});
estimates.emplace_back(clustering.back().hll
.merge_and_estimate_SIMD(clustering[neighbor_id - first].hll));
// remove old ids
remaining_ids.erase(min_id);
remaining_ids.erase(neighbor_id);
// overwrite one of the removed entries with the new one
remaining_ids[new_id] = min_index;
dist[min_index] = {new_id, prio_queue{}};
// prune the distance matrix to reduce overhead due to inactive entries
++steps_without_prune;
if (steps_without_prune > prune_steps)
{
prune(dist, remaining_ids);
steps_without_prune = 0;
}
} // implicit barrier
// reset values for the computation of the new minimum
min_ids[omp_get_thread_num()] = none;
min_dist = std::numeric_limits<double>::max();
hyperloglog const new_hll = clustering.back().hll;
// update distances in dist
// while doing that, compute the new min_id
#pragma omp for schedule(static)
for (size_t i = 0; i < dist.size(); ++i)
{
size_t other_id = dist[i].id;
if (other_id == new_id || !remaining_ids.contains(other_id)) continue;
// this must be a copy, because merge_and_estimate_SIMD() changes the hll
hyperloglog temp_hll = new_hll;
double const estimate_ij = temp_hll.merge_and_estimate_SIMD(clustering[other_id - first].hll);
// Jaccard distance estimate
double const distance = 2 - (estimates[other_id - first] + estimates.back()) / estimate_ij;
dist[i].pq.push({new_id, distance});
// make sure the closest neighbor is not yet deleted (this is a lazy update)
while (!remaining_ids.contains(dist[i].pq.top().id))
{
dist[i].pq.pop();
}
// check if the just updated priority queue contains the minimum value for this thread
neighbor const & curr = dist[i].pq.top();
if (curr.dist < min_dist)
{
min_dist = curr.dist;
min_ids[omp_get_thread_num()] = other_id;
}
} // implicit barrier
}
} // end of the parallel region
size_t final_root_index = remaining_ids.begin()->second;
size_t final_root_id = dist[final_root_index].id;
// rotate the previous rightmost to the left so that it has the correct place in the permutation
if (first != 0)
{
rotate(clustering, previous_rightmost, first, final_root_id);
}
// traceback into permutation and ignore the previous rightmost
trace(clustering, permutation, previous_rightmost, first, final_root_id);
}
/*!\brief Randomly swap entries in dist while keeping track of the changes of indices.
* \param[in] dist the distance matrix (vector of priority queues) to shuffle
* \param[in] remaining_ids the map with information about which ids remain at which index
*/
void random_shuffle(distance_matrix & dist, robin_hood::unordered_flat_map<size_t, size_t> & remaining_ids)
{
size_t const n = dist.size();
std::mt19937_64 gen(0x7E1E5665D46800E5ULL);
for (size_t i = 0; i < n - 1; ++i)
{
std::uniform_int_distribution<size_t> distrib(i, n - 1);
size_t const swap_i = distrib(gen);
size_t const id = dist[i].id;
size_t const swap_id = dist[swap_i].id;
// swap entries and update the reming ids, because the indices in dist changed
std::swap(dist[i], dist[swap_i]);
std::swap(remaining_ids[id], remaining_ids[swap_id]);
}
}
/*!\brief Delete inactive entries out of dist and shrink to fit its size while keeping track of the changes of indices
* \param[in] dist the distance matrix (vector of priority queues) to prune
* \param[in] remaining_ids the map with information about which ids remain at which index
*/
void prune(distance_matrix & dist, robin_hood::unordered_flat_map<size_t, size_t> & remaining_ids)
{
if (dist.empty()) return;
// index of the first entry after the valid range
size_t valid_range_end = 0;
// index of the first entry before the invalid range
size_t invalid_range_start = dist.size() - 1;
while (valid_range_end != invalid_range_start)
{
if (remaining_ids.contains(dist[valid_range_end].id))
{
++valid_range_end;
}
else if (!remaining_ids.contains(dist[invalid_range_start].id))
{
--invalid_range_start;
}
else
{
// If we arrive here, then valid_range_end has an invalid id
// and invalid_range_start has a valid id. The correspoding entries should be swapped
std::swap(dist[valid_range_end], dist[invalid_range_start]);
// update the index of the valid entry
remaining_ids.at(dist[valid_range_end].id) = valid_range_end;
}
}
// check the last element between the valid and invalid range
if (remaining_ids.contains(dist[valid_range_end].id))
{
++valid_range_end;
}
// cut off invalid values
dist.resize(valid_range_end);
}
/*!\brief Rotate the previous rightmost bin to the left of the clustering tree
* \param[in, out] clustering the tree to do the rotation on
* \param[in] previous_rightmost the id of the node to be rotated to the left
* \param[in] first the id of the first node in the interval to shift the index
* \param[in] id the id of the current node
*
* If called with the root of the tree, this function recursively calls itself while rotating
* several subtrees until previous_rightmost is at the very left end of the whole clustering tree.
*
* \return whether previous rightmost was in the subtree rooted at id
*/
bool rotate(std::vector<clustering_node> & clustering,
size_t const previous_rightmost,
size_t const first,
size_t const id)
{
if (id == previous_rightmost) // we are at the leaf that is previous_rightmost (Anchor of the recursion)
return true;
clustering_node & curr = clustering[id - first];
if (curr.left == std::numeric_limits<size_t>::max()) // we are at a leaf that is not previous_rightmost
{
return false;
}
// nothing to do if previous_rightmost is in the left subtree
else if(rotate(clustering, previous_rightmost, first, curr.left))
{
return true;
}
// rotate if previous_rightmost is in the right subtree
else if(rotate(clustering, previous_rightmost, first, curr.right))
{
std::swap(curr.left, curr.right);
return true;
}
// else: previous_rightmost is not in this subtree
return false;
}
/*!\brief Do a recursive traceback to find the order of leaves in the clustering tree
* \param[in] clustering the tree to do the traceback on
* \param[out] permutation append the new order to this
* \param[in] previous_rightmost the id of the node on the left which should be ignored
* \param[in] first the id of the first node in the interval to shift the index
* \param[in] id the id of the current node
*
* This function traverses the tree in depth-first-search accessing the leaves from left to right.
* 'Left to right' refers to the order of nodes in `clustering`.
*/
void trace(std::vector<clustering_node> const & clustering,
std::vector<size_t> & permutation,
size_t const previous_rightmost,
size_t const first,
size_t const id)
{
clustering_node const & curr = clustering[id - first];
if (curr.left == std::numeric_limits<size_t>::max()) // I am at a leaf
{
if (id != previous_rightmost)
permutation.push_back(id);
return;
}
trace(clustering, permutation, previous_rightmost, first, curr.left);
trace(clustering, permutation, previous_rightmost, first, curr.right);
}
/*!\brief Apply a given permutation to filenames, user_bin_kmer_counts and sketches
* \param[in] permutation the permutation to apply
*/
void apply_permutation(std::vector<size_t> const & permutation)
{
assert(user_bin_kmer_counts != nullptr);
assert(filenames != nullptr);
bool const swap_sketches{!sketches.empty()};
for (size_t i{0}; i < permutation.size(); ++i)
{
size_t swap_index = permutation[i];
while (swap_index < i)
swap_index = permutation[swap_index];
std::swap((*filenames)[i], (*filenames)[swap_index]);
std::swap((*user_bin_kmer_counts)[i], (*user_bin_kmer_counts)[swap_index]);
if (swap_sketches)
std::swap(sketches[i], sketches[swap_index]);
}
}
};
} // namespace chopper::sketch
|
//
// bind/mem_fn_template.hpp
//
// Do not include this header directly
//
// Copyright (c) 2001 Peter Dimov and Multi Media Ltd.
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/bind/mem_fn.html for documentation.
//
#if !defined(BOOST_NO_FUNCTION_TEMPLATE_ORDERING)
# define BOOST_MEM_FN_ENABLE_CONST_OVERLOADS
#endif
// mf0
template<class R, class T BOOST_MEM_FN_CLASS_F> class BOOST_MEM_FN_NAME(mf0)
{
public:
typedef R result_type;
typedef T * argument_type;
private:
BOOST_MEM_FN_TYPEDEF(R (BOOST_MEM_FN_CC T::*F) ())
F f_;
template<class U> R call(U & u, T const *) const
{
BOOST_MEM_FN_RETURN (u.*f_)();
}
template<class U> R call(U & u, void const *) const
{
BOOST_MEM_FN_RETURN (get_pointer(u)->*f_)();
}
public:
explicit BOOST_MEM_FN_NAME(mf0)(F f): f_(f) {}
R operator()(T * p) const
{
BOOST_MEM_FN_RETURN (p->*f_)();
}
template<class U> R operator()(U & u) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p);
}
#ifdef BOOST_MEM_FN_ENABLE_CONST_OVERLOADS
template<class U> R operator()(U const & u) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p);
}
#endif
R operator()(T & t) const
{
BOOST_MEM_FN_RETURN (t.*f_)();
}
bool operator==(BOOST_MEM_FN_NAME(mf0) const & rhs) const
{
return f_ == rhs.f_;
}
bool operator!=(BOOST_MEM_FN_NAME(mf0) const & rhs) const
{
return f_ != rhs.f_;
}
};
// cmf0
template<class R, class T BOOST_MEM_FN_CLASS_F> class BOOST_MEM_FN_NAME(cmf0)
{
public:
typedef R result_type;
typedef T const * argument_type;
private:
BOOST_MEM_FN_TYPEDEF(R (BOOST_MEM_FN_CC T::*F) () const)
F f_;
template<class U> R call(U & u, T const *) const
{
BOOST_MEM_FN_RETURN (u.*f_)();
}
template<class U> R call(U & u, void const *) const
{
BOOST_MEM_FN_RETURN (get_pointer(u)->*f_)();
}
public:
explicit BOOST_MEM_FN_NAME(cmf0)(F f): f_(f) {}
template<class U> R operator()(U const & u) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p);
}
R operator()(T const & t) const
{
BOOST_MEM_FN_RETURN (t.*f_)();
}
bool operator==(BOOST_MEM_FN_NAME(cmf0) const & rhs) const
{
return f_ == rhs.f_;
}
bool operator!=(BOOST_MEM_FN_NAME(cmf0) const & rhs) const
{
return f_ != rhs.f_;
}
};
// mf1
template<class R, class T, class A1 BOOST_MEM_FN_CLASS_F> class BOOST_MEM_FN_NAME(mf1)
{
public:
typedef R result_type;
typedef T * first_argument_type;
typedef A1 second_argument_type;
private:
BOOST_MEM_FN_TYPEDEF(R (BOOST_MEM_FN_CC T::*F) (A1))
F f_;
template<class U, class B1> R call(U & u, T const *, B1 & b1) const
{
BOOST_MEM_FN_RETURN (u.*f_)(b1);
}
template<class U, class B1> R call(U & u, void const *, B1 & b1) const
{
BOOST_MEM_FN_RETURN (get_pointer(u)->*f_)(b1);
}
public:
explicit BOOST_MEM_FN_NAME(mf1)(F f): f_(f) {}
R operator()(T * p, A1 a1) const
{
BOOST_MEM_FN_RETURN (p->*f_)(a1);
}
template<class U> R operator()(U & u, A1 a1) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1);
}
#ifdef BOOST_MEM_FN_ENABLE_CONST_OVERLOADS
template<class U> R operator()(U const & u, A1 a1) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1);
}
#endif
R operator()(T & t, A1 a1) const
{
BOOST_MEM_FN_RETURN (t.*f_)(a1);
}
bool operator==(BOOST_MEM_FN_NAME(mf1) const & rhs) const
{
return f_ == rhs.f_;
}
bool operator!=(BOOST_MEM_FN_NAME(mf1) const & rhs) const
{
return f_ != rhs.f_;
}
};
// cmf1
template<class R, class T, class A1 BOOST_MEM_FN_CLASS_F> class BOOST_MEM_FN_NAME(cmf1)
{
public:
typedef R result_type;
typedef T const * first_argument_type;
typedef A1 second_argument_type;
private:
BOOST_MEM_FN_TYPEDEF(R (BOOST_MEM_FN_CC T::*F) (A1) const)
F f_;
template<class U, class B1> R call(U & u, T const *, B1 & b1) const
{
BOOST_MEM_FN_RETURN (u.*f_)(b1);
}
template<class U, class B1> R call(U & u, void const *, B1 & b1) const
{
BOOST_MEM_FN_RETURN (get_pointer(u)->*f_)(b1);
}
public:
explicit BOOST_MEM_FN_NAME(cmf1)(F f): f_(f) {}
template<class U> R operator()(U const & u, A1 a1) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1);
}
R operator()(T const & t, A1 a1) const
{
BOOST_MEM_FN_RETURN (t.*f_)(a1);
}
bool operator==(BOOST_MEM_FN_NAME(cmf1) const & rhs) const
{
return f_ == rhs.f_;
}
bool operator!=(BOOST_MEM_FN_NAME(cmf1) const & rhs) const
{
return f_ != rhs.f_;
}
};
// mf2
template<class R, class T, class A1, class A2 BOOST_MEM_FN_CLASS_F> class BOOST_MEM_FN_NAME(mf2)
{
public:
typedef R result_type;
private:
BOOST_MEM_FN_TYPEDEF(R (BOOST_MEM_FN_CC T::*F) (A1, A2))
F f_;
template<class U, class B1, class B2> R call(U & u, T const *, B1 & b1, B2 & b2) const
{
BOOST_MEM_FN_RETURN (u.*f_)(b1, b2);
}
template<class U, class B1, class B2> R call(U & u, void const *, B1 & b1, B2 & b2) const
{
BOOST_MEM_FN_RETURN (get_pointer(u)->*f_)(b1, b2);
}
public:
explicit BOOST_MEM_FN_NAME(mf2)(F f): f_(f) {}
R operator()(T * p, A1 a1, A2 a2) const
{
BOOST_MEM_FN_RETURN (p->*f_)(a1, a2);
}
template<class U> R operator()(U & u, A1 a1, A2 a2) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2);
}
#ifdef BOOST_MEM_FN_ENABLE_CONST_OVERLOADS
template<class U> R operator()(U const & u, A1 a1, A2 a2) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2);
}
#endif
R operator()(T & t, A1 a1, A2 a2) const
{
BOOST_MEM_FN_RETURN (t.*f_)(a1, a2);
}
bool operator==(BOOST_MEM_FN_NAME(mf2) const & rhs) const
{
return f_ == rhs.f_;
}
bool operator!=(BOOST_MEM_FN_NAME(mf2) const & rhs) const
{
return f_ != rhs.f_;
}
};
// cmf2
template<class R, class T, class A1, class A2 BOOST_MEM_FN_CLASS_F> class BOOST_MEM_FN_NAME(cmf2)
{
public:
typedef R result_type;
private:
BOOST_MEM_FN_TYPEDEF(R (BOOST_MEM_FN_CC T::*F) (A1, A2) const)
F f_;
template<class U, class B1, class B2> R call(U & u, T const *, B1 & b1, B2 & b2) const
{
BOOST_MEM_FN_RETURN (u.*f_)(b1, b2);
}
template<class U, class B1, class B2> R call(U & u, void const *, B1 & b1, B2 & b2) const
{
BOOST_MEM_FN_RETURN (get_pointer(u)->*f_)(b1, b2);
}
public:
explicit BOOST_MEM_FN_NAME(cmf2)(F f): f_(f) {}
template<class U> R operator()(U const & u, A1 a1, A2 a2) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2);
}
R operator()(T const & t, A1 a1, A2 a2) const
{
BOOST_MEM_FN_RETURN (t.*f_)(a1, a2);
}
bool operator==(BOOST_MEM_FN_NAME(cmf2) const & rhs) const
{
return f_ == rhs.f_;
}
bool operator!=(BOOST_MEM_FN_NAME(cmf2) const & rhs) const
{
return f_ != rhs.f_;
}
};
// mf3
template<class R, class T, class A1, class A2, class A3 BOOST_MEM_FN_CLASS_F> class BOOST_MEM_FN_NAME(mf3)
{
public:
typedef R result_type;
private:
BOOST_MEM_FN_TYPEDEF(R (BOOST_MEM_FN_CC T::*F) (A1, A2, A3))
F f_;
template<class U, class B1, class B2, class B3> R call(U & u, T const *, B1 & b1, B2 & b2, B3 & b3) const
{
BOOST_MEM_FN_RETURN (u.*f_)(b1, b2, b3);
}
template<class U, class B1, class B2, class B3> R call(U & u, void const *, B1 & b1, B2 & b2, B3 & b3) const
{
BOOST_MEM_FN_RETURN (get_pointer(u)->*f_)(b1, b2, b3);
}
public:
explicit BOOST_MEM_FN_NAME(mf3)(F f): f_(f) {}
R operator()(T * p, A1 a1, A2 a2, A3 a3) const
{
BOOST_MEM_FN_RETURN (p->*f_)(a1, a2, a3);
}
template<class U> R operator()(U & u, A1 a1, A2 a2, A3 a3) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2, a3);
}
#ifdef BOOST_MEM_FN_ENABLE_CONST_OVERLOADS
template<class U> R operator()(U const & u, A1 a1, A2 a2, A3 a3) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2, a3);
}
#endif
R operator()(T & t, A1 a1, A2 a2, A3 a3) const
{
BOOST_MEM_FN_RETURN (t.*f_)(a1, a2, a3);
}
bool operator==(BOOST_MEM_FN_NAME(mf3) const & rhs) const
{
return f_ == rhs.f_;
}
bool operator!=(BOOST_MEM_FN_NAME(mf3) const & rhs) const
{
return f_ != rhs.f_;
}
};
// cmf3
template<class R, class T, class A1, class A2, class A3 BOOST_MEM_FN_CLASS_F> class BOOST_MEM_FN_NAME(cmf3)
{
public:
typedef R result_type;
private:
BOOST_MEM_FN_TYPEDEF(R (BOOST_MEM_FN_CC T::*F) (A1, A2, A3) const)
F f_;
template<class U, class B1, class B2, class B3> R call(U & u, T const *, B1 & b1, B2 & b2, B3 & b3) const
{
BOOST_MEM_FN_RETURN (u.*f_)(b1, b2, b3);
}
template<class U, class B1, class B2, class B3> R call(U & u, void const *, B1 & b1, B2 & b2, B3 & b3) const
{
BOOST_MEM_FN_RETURN (get_pointer(u)->*f_)(b1, b2, b3);
}
public:
explicit BOOST_MEM_FN_NAME(cmf3)(F f): f_(f) {}
template<class U> R operator()(U const & u, A1 a1, A2 a2, A3 a3) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2, a3);
}
R operator()(T const & t, A1 a1, A2 a2, A3 a3) const
{
BOOST_MEM_FN_RETURN (t.*f_)(a1, a2, a3);
}
bool operator==(BOOST_MEM_FN_NAME(cmf3) const & rhs) const
{
return f_ == rhs.f_;
}
bool operator!=(BOOST_MEM_FN_NAME(cmf3) const & rhs) const
{
return f_ != rhs.f_;
}
};
// mf4
template<class R, class T, class A1, class A2, class A3, class A4 BOOST_MEM_FN_CLASS_F> class BOOST_MEM_FN_NAME(mf4)
{
public:
typedef R result_type;
private:
BOOST_MEM_FN_TYPEDEF(R (BOOST_MEM_FN_CC T::*F) (A1, A2, A3, A4))
F f_;
template<class U, class B1, class B2, class B3, class B4> R call(U & u, T const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4) const
{
BOOST_MEM_FN_RETURN (u.*f_)(b1, b2, b3, b4);
}
template<class U, class B1, class B2, class B3, class B4> R call(U & u, void const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4) const
{
BOOST_MEM_FN_RETURN (get_pointer(u)->*f_)(b1, b2, b3, b4);
}
public:
explicit BOOST_MEM_FN_NAME(mf4)(F f): f_(f) {}
R operator()(T * p, A1 a1, A2 a2, A3 a3, A4 a4) const
{
BOOST_MEM_FN_RETURN (p->*f_)(a1, a2, a3, a4);
}
template<class U> R operator()(U & u, A1 a1, A2 a2, A3 a3, A4 a4) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2, a3, a4);
}
#ifdef BOOST_MEM_FN_ENABLE_CONST_OVERLOADS
template<class U> R operator()(U const & u, A1 a1, A2 a2, A3 a3, A4 a4) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2, a3, a4);
}
#endif
R operator()(T & t, A1 a1, A2 a2, A3 a3, A4 a4) const
{
BOOST_MEM_FN_RETURN (t.*f_)(a1, a2, a3, a4);
}
bool operator==(BOOST_MEM_FN_NAME(mf4) const & rhs) const
{
return f_ == rhs.f_;
}
bool operator!=(BOOST_MEM_FN_NAME(mf4) const & rhs) const
{
return f_ != rhs.f_;
}
};
// cmf4
template<class R, class T, class A1, class A2, class A3, class A4 BOOST_MEM_FN_CLASS_F> class BOOST_MEM_FN_NAME(cmf4)
{
public:
typedef R result_type;
private:
BOOST_MEM_FN_TYPEDEF(R (BOOST_MEM_FN_CC T::*F) (A1, A2, A3, A4) const)
F f_;
template<class U, class B1, class B2, class B3, class B4> R call(U & u, T const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4) const
{
BOOST_MEM_FN_RETURN (u.*f_)(b1, b2, b3, b4);
}
template<class U, class B1, class B2, class B3, class B4> R call(U & u, void const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4) const
{
BOOST_MEM_FN_RETURN (get_pointer(u)->*f_)(b1, b2, b3, b4);
}
public:
explicit BOOST_MEM_FN_NAME(cmf4)(F f): f_(f) {}
template<class U> R operator()(U const & u, A1 a1, A2 a2, A3 a3, A4 a4) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2, a3, a4);
}
R operator()(T const & t, A1 a1, A2 a2, A3 a3, A4 a4) const
{
BOOST_MEM_FN_RETURN (t.*f_)(a1, a2, a3, a4);
}
bool operator==(BOOST_MEM_FN_NAME(cmf4) const & rhs) const
{
return f_ == rhs.f_;
}
bool operator!=(BOOST_MEM_FN_NAME(cmf4) const & rhs) const
{
return f_ != rhs.f_;
}
};
// mf5
template<class R, class T, class A1, class A2, class A3, class A4, class A5 BOOST_MEM_FN_CLASS_F> class BOOST_MEM_FN_NAME(mf5)
{
public:
typedef R result_type;
private:
BOOST_MEM_FN_TYPEDEF(R (BOOST_MEM_FN_CC T::*F) (A1, A2, A3, A4, A5))
F f_;
template<class U, class B1, class B2, class B3, class B4, class B5> R call(U & u, T const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4, B5 & b5) const
{
BOOST_MEM_FN_RETURN (u.*f_)(b1, b2, b3, b4, b5);
}
template<class U, class B1, class B2, class B3, class B4, class B5> R call(U & u, void const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4, B5 & b5) const
{
BOOST_MEM_FN_RETURN (get_pointer(u)->*f_)(b1, b2, b3, b4, b5);
}
public:
explicit BOOST_MEM_FN_NAME(mf5)(F f): f_(f) {}
R operator()(T * p, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5) const
{
BOOST_MEM_FN_RETURN (p->*f_)(a1, a2, a3, a4, a5);
}
template<class U> R operator()(U & u, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2, a3, a4, a5);
}
#ifdef BOOST_MEM_FN_ENABLE_CONST_OVERLOADS
template<class U> R operator()(U const & u, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2, a3, a4, a5);
}
#endif
R operator()(T & t, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5) const
{
BOOST_MEM_FN_RETURN (t.*f_)(a1, a2, a3, a4, a5);
}
bool operator==(BOOST_MEM_FN_NAME(mf5) const & rhs) const
{
return f_ == rhs.f_;
}
bool operator!=(BOOST_MEM_FN_NAME(mf5) const & rhs) const
{
return f_ != rhs.f_;
}
};
// cmf5
template<class R, class T, class A1, class A2, class A3, class A4, class A5 BOOST_MEM_FN_CLASS_F> class BOOST_MEM_FN_NAME(cmf5)
{
public:
typedef R result_type;
private:
BOOST_MEM_FN_TYPEDEF(R (BOOST_MEM_FN_CC T::*F) (A1, A2, A3, A4, A5) const)
F f_;
template<class U, class B1, class B2, class B3, class B4, class B5> R call(U & u, T const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4, B5 & b5) const
{
BOOST_MEM_FN_RETURN (u.*f_)(b1, b2, b3, b4, b5);
}
template<class U, class B1, class B2, class B3, class B4, class B5> R call(U & u, void const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4, B5 & b5) const
{
BOOST_MEM_FN_RETURN (get_pointer(u)->*f_)(b1, b2, b3, b4, b5);
}
public:
explicit BOOST_MEM_FN_NAME(cmf5)(F f): f_(f) {}
template<class U> R operator()(U const & u, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2, a3, a4, a5);
}
R operator()(T const & t, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5) const
{
BOOST_MEM_FN_RETURN (t.*f_)(a1, a2, a3, a4, a5);
}
bool operator==(BOOST_MEM_FN_NAME(cmf5) const & rhs) const
{
return f_ == rhs.f_;
}
bool operator!=(BOOST_MEM_FN_NAME(cmf5) const & rhs) const
{
return f_ != rhs.f_;
}
};
// mf6
template<class R, class T, class A1, class A2, class A3, class A4, class A5, class A6 BOOST_MEM_FN_CLASS_F> class BOOST_MEM_FN_NAME(mf6)
{
public:
typedef R result_type;
private:
BOOST_MEM_FN_TYPEDEF(R (BOOST_MEM_FN_CC T::*F) (A1, A2, A3, A4, A5, A6))
F f_;
template<class U, class B1, class B2, class B3, class B4, class B5, class B6> R call(U & u, T const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4, B5 & b5, B6 & b6) const
{
BOOST_MEM_FN_RETURN (u.*f_)(b1, b2, b3, b4, b5, b6);
}
template<class U, class B1, class B2, class B3, class B4, class B5, class B6> R call(U & u, void const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4, B5 & b5, B6 & b6) const
{
BOOST_MEM_FN_RETURN (get_pointer(u)->*f_)(b1, b2, b3, b4, b5, b6);
}
public:
explicit BOOST_MEM_FN_NAME(mf6)(F f): f_(f) {}
R operator()(T * p, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6) const
{
BOOST_MEM_FN_RETURN (p->*f_)(a1, a2, a3, a4, a5, a6);
}
template<class U> R operator()(U & u, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2, a3, a4, a5, a6);
}
#ifdef BOOST_MEM_FN_ENABLE_CONST_OVERLOADS
template<class U> R operator()(U const & u, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2, a3, a4, a5, a6);
}
#endif
R operator()(T & t, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6) const
{
BOOST_MEM_FN_RETURN (t.*f_)(a1, a2, a3, a4, a5, a6);
}
bool operator==(BOOST_MEM_FN_NAME(mf6) const & rhs) const
{
return f_ == rhs.f_;
}
bool operator!=(BOOST_MEM_FN_NAME(mf6) const & rhs) const
{
return f_ != rhs.f_;
}
};
// cmf6
template<class R, class T, class A1, class A2, class A3, class A4, class A5, class A6 BOOST_MEM_FN_CLASS_F> class BOOST_MEM_FN_NAME(cmf6)
{
public:
typedef R result_type;
private:
BOOST_MEM_FN_TYPEDEF(R (BOOST_MEM_FN_CC T::*F) (A1, A2, A3, A4, A5, A6) const)
F f_;
template<class U, class B1, class B2, class B3, class B4, class B5, class B6> R call(U & u, T const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4, B5 & b5, B6 & b6) const
{
BOOST_MEM_FN_RETURN (u.*f_)(b1, b2, b3, b4, b5, b6);
}
template<class U, class B1, class B2, class B3, class B4, class B5, class B6> R call(U & u, void const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4, B5 & b5, B6 & b6) const
{
BOOST_MEM_FN_RETURN (get_pointer(u)->*f_)(b1, b2, b3, b4, b5, b6);
}
public:
explicit BOOST_MEM_FN_NAME(cmf6)(F f): f_(f) {}
template<class U> R operator()(U const & u, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2, a3, a4, a5, a6);
}
R operator()(T const & t, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6) const
{
BOOST_MEM_FN_RETURN (t.*f_)(a1, a2, a3, a4, a5, a6);
}
bool operator==(BOOST_MEM_FN_NAME(cmf6) const & rhs) const
{
return f_ == rhs.f_;
}
bool operator!=(BOOST_MEM_FN_NAME(cmf6) const & rhs) const
{
return f_ != rhs.f_;
}
};
// mf7
template<class R, class T, class A1, class A2, class A3, class A4, class A5, class A6, class A7 BOOST_MEM_FN_CLASS_F> class BOOST_MEM_FN_NAME(mf7)
{
public:
typedef R result_type;
private:
BOOST_MEM_FN_TYPEDEF(R (BOOST_MEM_FN_CC T::*F) (A1, A2, A3, A4, A5, A6, A7))
F f_;
template<class U, class B1, class B2, class B3, class B4, class B5, class B6, class B7> R call(U & u, T const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4, B5 & b5, B6 & b6, B7 & b7) const
{
BOOST_MEM_FN_RETURN (u.*f_)(b1, b2, b3, b4, b5, b6, b7);
}
template<class U, class B1, class B2, class B3, class B4, class B5, class B6, class B7> R call(U & u, void const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4, B5 & b5, B6 & b6, B7 & b7) const
{
BOOST_MEM_FN_RETURN (get_pointer(u)->*f_)(b1, b2, b3, b4, b5, b6, b7);
}
public:
explicit BOOST_MEM_FN_NAME(mf7)(F f): f_(f) {}
R operator()(T * p, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7) const
{
BOOST_MEM_FN_RETURN (p->*f_)(a1, a2, a3, a4, a5, a6, a7);
}
template<class U> R operator()(U & u, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2, a3, a4, a5, a6, a7);
}
#ifdef BOOST_MEM_FN_ENABLE_CONST_OVERLOADS
template<class U> R operator()(U const & u, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2, a3, a4, a5, a6, a7);
}
#endif
R operator()(T & t, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7) const
{
BOOST_MEM_FN_RETURN (t.*f_)(a1, a2, a3, a4, a5, a6, a7);
}
bool operator==(BOOST_MEM_FN_NAME(mf7) const & rhs) const
{
return f_ == rhs.f_;
}
bool operator!=(BOOST_MEM_FN_NAME(mf7) const & rhs) const
{
return f_ != rhs.f_;
}
};
// cmf7
template<class R, class T, class A1, class A2, class A3, class A4, class A5, class A6, class A7 BOOST_MEM_FN_CLASS_F> class BOOST_MEM_FN_NAME(cmf7)
{
public:
typedef R result_type;
private:
BOOST_MEM_FN_TYPEDEF(R (BOOST_MEM_FN_CC T::*F) (A1, A2, A3, A4, A5, A6, A7) const)
F f_;
template<class U, class B1, class B2, class B3, class B4, class B5, class B6, class B7> R call(U & u, T const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4, B5 & b5, B6 & b6, B7 & b7) const
{
BOOST_MEM_FN_RETURN (u.*f_)(b1, b2, b3, b4, b5, b6, b7);
}
template<class U, class B1, class B2, class B3, class B4, class B5, class B6, class B7> R call(U & u, void const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4, B5 & b5, B6 & b6, B7 & b7) const
{
BOOST_MEM_FN_RETURN (get_pointer(u)->*f_)(b1, b2, b3, b4, b5, b6, b7);
}
public:
explicit BOOST_MEM_FN_NAME(cmf7)(F f): f_(f) {}
template<class U> R operator()(U const & u, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2, a3, a4, a5, a6, a7);
}
R operator()(T const & t, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7) const
{
BOOST_MEM_FN_RETURN (t.*f_)(a1, a2, a3, a4, a5, a6, a7);
}
bool operator==(BOOST_MEM_FN_NAME(cmf7) const & rhs) const
{
return f_ == rhs.f_;
}
bool operator!=(BOOST_MEM_FN_NAME(cmf7) const & rhs) const
{
return f_ != rhs.f_;
}
};
// mf8
template<class R, class T, class A1, class A2, class A3, class A4, class A5, class A6, class A7, class A8 BOOST_MEM_FN_CLASS_F> class BOOST_MEM_FN_NAME(mf8)
{
public:
typedef R result_type;
private:
BOOST_MEM_FN_TYPEDEF(R (BOOST_MEM_FN_CC T::*F) (A1, A2, A3, A4, A5, A6, A7, A8))
F f_;
template<class U, class B1, class B2, class B3, class B4, class B5, class B6, class B7, class B8> R call(U & u, T const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4, B5 & b5, B6 & b6, B7 & b7, B8 & b8) const
{
BOOST_MEM_FN_RETURN (u.*f_)(b1, b2, b3, b4, b5, b6, b7, b8);
}
template<class U, class B1, class B2, class B3, class B4, class B5, class B6, class B7, class B8> R call(U & u, void const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4, B5 & b5, B6 & b6, B7 & b7, B8 & b8) const
{
BOOST_MEM_FN_RETURN (get_pointer(u)->*f_)(b1, b2, b3, b4, b5, b6, b7, b8);
}
public:
explicit BOOST_MEM_FN_NAME(mf8)(F f): f_(f) {}
R operator()(T * p, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7, A8 a8) const
{
BOOST_MEM_FN_RETURN (p->*f_)(a1, a2, a3, a4, a5, a6, a7, a8);
}
template<class U> R operator()(U & u, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7, A8 a8) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2, a3, a4, a5, a6, a7, a8);
}
#ifdef BOOST_MEM_FN_ENABLE_CONST_OVERLOADS
template<class U> R operator()(U const & u, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7, A8 a8) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2, a3, a4, a5, a6, a7, a8);
}
#endif
R operator()(T & t, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7, A8 a8) const
{
BOOST_MEM_FN_RETURN (t.*f_)(a1, a2, a3, a4, a5, a6, a7, a8);
}
bool operator==(BOOST_MEM_FN_NAME(mf8) const & rhs) const
{
return f_ == rhs.f_;
}
bool operator!=(BOOST_MEM_FN_NAME(mf8) const & rhs) const
{
return f_ != rhs.f_;
}
};
// cmf8
template<class R, class T, class A1, class A2, class A3, class A4, class A5, class A6, class A7, class A8 BOOST_MEM_FN_CLASS_F> class BOOST_MEM_FN_NAME(cmf8)
{
public:
typedef R result_type;
private:
BOOST_MEM_FN_TYPEDEF(R (BOOST_MEM_FN_CC T::*F) (A1, A2, A3, A4, A5, A6, A7, A8) const)
F f_;
template<class U, class B1, class B2, class B3, class B4, class B5, class B6, class B7, class B8> R call(U & u, T const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4, B5 & b5, B6 & b6, B7 & b7, B8 & b8) const
{
BOOST_MEM_FN_RETURN (u.*f_)(b1, b2, b3, b4, b5, b6, b7, b8);
}
template<class U, class B1, class B2, class B3, class B4, class B5, class B6, class B7, class B8> R call(U & u, void const *, B1 & b1, B2 & b2, B3 & b3, B4 & b4, B5 & b5, B6 & b6, B7 & b7, B8 & b8) const
{
BOOST_MEM_FN_RETURN (get_pointer(u)->*f_)(b1, b2, b3, b4, b5, b6, b7, b8);
}
public:
explicit BOOST_MEM_FN_NAME(cmf8)(F f): f_(f) {}
R operator()(T const * p, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7, A8 a8) const
{
BOOST_MEM_FN_RETURN (p->*f_)(a1, a2, a3, a4, a5, a6, a7, a8);
}
template<class U> R operator()(U const & u, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7, A8 a8) const
{
U const * p = 0;
BOOST_MEM_FN_RETURN call(u, p, a1, a2, a3, a4, a5, a6, a7, a8);
}
R operator()(T const & t, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7, A8 a8) const
{
BOOST_MEM_FN_RETURN (t.*f_)(a1, a2, a3, a4, a5, a6, a7, a8);
}
bool operator==(BOOST_MEM_FN_NAME(cmf8) const & rhs) const
{
return f_ == rhs.f_;
}
bool operator!=(BOOST_MEM_FN_NAME(cmf8) const & rhs) const
{
return f_ != rhs.f_;
}
};
#undef BOOST_MEM_FN_ENABLE_CONST_OVERLOADS
|
/*
Copyright 2015-2022 Igor Petrovic
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "board/Board.h"
#include "board/Internal.h"
#include "board/arch/arm/st/Internal.h"
#include "board/common/io/Helpers.h"
#include "core/src/general/IO.h"
#include "core/src/general/Atomic.h"
#include "core/src/general/ADC.h"
#include "core/src/general/Timing.h"
#include "core/src/general/Helpers.h"
#include <Target.h>
namespace
{
class UARTdescriptor0 : public Board::detail::st::Peripheral
{
public:
UARTdescriptor0() = default;
std::vector<core::io::mcuPin_t> pins() override
{
return _pins;
}
void* interface() override
{
return USART1;
}
IRQn_Type irqn() override
{
return _irqn;
}
void enableClock() override
{
__HAL_RCC_USART1_CLK_ENABLE();
}
void disableClock() override
{
__HAL_RCC_USART1_CLK_DISABLE();
}
private:
std::vector<core::io::mcuPin_t> _pins = {
// rx
{
.port = GPIOA,
.index = GPIO_PIN_10,
.mode = core::io::pinMode_t::alternatePP,
.pull = core::io::pullMode_t::none,
.speed = core::io::gpioSpeed_t::veryHigh,
.alternate = GPIO_AF7_USART1,
},
// tx
{
.port = GPIOA,
.index = GPIO_PIN_9,
.mode = core::io::pinMode_t::alternatePP,
.pull = core::io::pullMode_t::none,
.speed = core::io::gpioSpeed_t::veryHigh,
.alternate = GPIO_AF7_USART1,
},
};
const IRQn_Type _irqn = USART1_IRQn;
} _uartDescriptor0;
class UARTdescriptor1 : public Board::detail::st::Peripheral
{
public:
UARTdescriptor1() = default;
std::vector<core::io::mcuPin_t> pins() override
{
return _pins;
}
void* interface() override
{
return USART2;
}
IRQn_Type irqn() override
{
return _irqn;
}
void enableClock() override
{
__HAL_RCC_USART2_CLK_ENABLE();
}
void disableClock() override
{
__HAL_RCC_USART2_CLK_DISABLE();
}
private:
std::vector<core::io::mcuPin_t> _pins = {
// rx
{
.port = GPIOA,
.index = GPIO_PIN_3,
.mode = core::io::pinMode_t::alternatePP,
.pull = core::io::pullMode_t::none,
.speed = core::io::gpioSpeed_t::veryHigh,
.alternate = GPIO_AF7_USART2,
},
// tx
{
.port = GPIOA,
.index = GPIO_PIN_2,
.mode = core::io::pinMode_t::alternatePP,
.pull = core::io::pullMode_t::none,
.speed = core::io::gpioSpeed_t::veryHigh,
.alternate = GPIO_AF7_USART2,
},
};
const IRQn_Type _irqn = USART2_IRQn;
} _uartDescriptor1;
class UARTdescriptor2 : public Board::detail::st::Peripheral
{
public:
UARTdescriptor2() = default;
std::vector<core::io::mcuPin_t> pins() override
{
return _pins;
}
void* interface() override
{
return USART6;
}
IRQn_Type irqn() override
{
return _irqn;
}
void enableClock() override
{
__HAL_RCC_USART6_CLK_ENABLE();
}
void disableClock() override
{
__HAL_RCC_USART6_CLK_DISABLE();
}
private:
std::vector<core::io::mcuPin_t> _pins = {
// rx
{
.port = GPIOA,
.index = GPIO_PIN_12,
.mode = core::io::pinMode_t::alternatePP,
.pull = core::io::pullMode_t::none,
.speed = core::io::gpioSpeed_t::veryHigh,
.alternate = GPIO_AF8_USART6,
},
// tx
{
.port = GPIOA,
.index = GPIO_PIN_11,
.mode = core::io::pinMode_t::alternatePP,
.pull = core::io::pullMode_t::none,
.speed = core::io::gpioSpeed_t::veryHigh,
.alternate = GPIO_AF8_USART6,
},
};
const IRQn_Type _irqn = USART6_IRQn;
} _uartDescriptor2;
class I2Cdescriptor0 : public Board::detail::st::Peripheral
{
public:
I2Cdescriptor0() = default;
std::vector<core::io::mcuPin_t> pins() override
{
return _pins;
}
void* interface() override
{
return I2C1;
}
IRQn_Type irqn() override
{
return _irqn;
}
void enableClock() override
{
__HAL_RCC_I2C1_CLK_ENABLE();
}
void disableClock() override
{
__HAL_RCC_I2C1_CLK_DISABLE();
}
private:
std::vector<core::io::mcuPin_t> _pins = {
// sda
{
.port = GPIOB,
.index = GPIO_PIN_7,
.mode = core::io::pinMode_t::alternateOD,
.pull = core::io::pullMode_t::up,
.speed = core::io::gpioSpeed_t::veryHigh,
.alternate = GPIO_AF4_I2C1,
},
// scl
{
.port = GPIOB,
.index = GPIO_PIN_6,
.mode = core::io::pinMode_t::alternateOD,
.pull = core::io::pullMode_t::up,
.speed = core::io::gpioSpeed_t::veryHigh,
.alternate = GPIO_AF4_I2C1,
},
};
const IRQn_Type _irqn = static_cast<IRQn_Type>(0);
} _i2cDescriptor0;
class I2Cdescriptor1 : public Board::detail::st::Peripheral
{
public:
I2Cdescriptor1() = default;
std::vector<core::io::mcuPin_t> pins() override
{
return _pins;
}
void* interface() override
{
return I2C2;
}
IRQn_Type irqn() override
{
return _irqn;
}
void enableClock() override
{
__HAL_RCC_I2C2_CLK_ENABLE();
}
void disableClock() override
{
__HAL_RCC_I2C2_CLK_DISABLE();
}
private:
std::vector<core::io::mcuPin_t> _pins = {
// sda
{
.port = GPIOB,
.index = GPIO_PIN_3,
.mode = core::io::pinMode_t::alternateOD,
.pull = core::io::pullMode_t::up,
.speed = core::io::gpioSpeed_t::veryHigh,
.alternate = GPIO_AF4_I2C2,
},
// scl
{
.port = GPIOB,
.index = GPIO_PIN_10,
.mode = core::io::pinMode_t::alternateOD,
.pull = core::io::pullMode_t::up,
.speed = core::io::gpioSpeed_t::veryHigh,
.alternate = GPIO_AF4_I2C2,
},
};
const IRQn_Type _irqn = static_cast<IRQn_Type>(0);
} _i2cDescriptor1;
class I2Cdescriptor2 : public Board::detail::st::Peripheral
{
public:
I2Cdescriptor2() = default;
std::vector<core::io::mcuPin_t> pins() override
{
return _pins;
}
void* interface() override
{
return I2C3;
}
IRQn_Type irqn() override
{
return _irqn;
}
void enableClock() override
{
__HAL_RCC_I2C3_CLK_ENABLE();
}
void disableClock() override
{
__HAL_RCC_I2C3_CLK_DISABLE();
}
private:
std::vector<core::io::mcuPin_t> _pins = {
// sda
{
.port = GPIOB,
.index = GPIO_PIN_4,
.mode = core::io::pinMode_t::alternateOD,
.pull = core::io::pullMode_t::up,
.speed = core::io::gpioSpeed_t::veryHigh,
.alternate = GPIO_AF4_I2C3,
},
// scl
{
.port = GPIOA,
.index = GPIO_PIN_8,
.mode = core::io::pinMode_t::alternateOD,
.pull = core::io::pullMode_t::up,
.speed = core::io::gpioSpeed_t::veryHigh,
.alternate = GPIO_AF4_I2C3,
},
};
const IRQn_Type _irqn = static_cast<IRQn_Type>(0);
} _i2cDescriptor2;
Board::detail::st::Peripheral* _uartDescriptor[MAX_UART_INTERFACES] = {
&_uartDescriptor0,
&_uartDescriptor1,
&_uartDescriptor2,
};
Board::detail::st::Peripheral* _i2cDescriptor[MAX_I2C_INTERFACES] = {
&_i2cDescriptor0,
&_i2cDescriptor1,
&_i2cDescriptor2
};
} // namespace
#include "STM32F4Descriptors.cpp.include"
|
// Copyright (c) 2009-2014 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#if defined(HAVE_CONFIG_H)
#include "config/phase-config.h"
#endif
#include <cstring>
#if HAVE_DECL_STRNLEN == 0
size_t strnlen( const char *start, size_t max_len)
{
const char *end = (const char *)memchr(start, '\0', max_len);
return end ? (size_t)(end - start) : max_len;
}
#endif // HAVE_DECL_STRNLEN
|
#include <fc/rpc/websocket_api.hpp>
namespace fc { namespace rpc {
websocket_api_connection::~websocket_api_connection()
{
}
websocket_api_connection::websocket_api_connection( fc::http::websocket_connection& c )
: _connection(c)
{
_rpc_state.add_method( "call", [this]( const variants& args ) -> variant
{
FC_ASSERT( args.size() == 3 && args[2].is_array() );
api_id_type api_id;
if( args[0].is_string() )
{
variants subargs;
subargs.push_back( args[0] );
variant subresult = this->receive_call( 1, "get_api_by_name", subargs );
api_id = subresult.as_uint64();
}
else
api_id = args[0].as_uint64();
return this->receive_call(
api_id,
args[1].as_string(),
args[2].get_array() );
} );
_rpc_state.add_method( "notice", [this]( const variants& args ) -> variant
{
FC_ASSERT( args.size() == 2 && args[1].is_array() );
this->receive_notice( args[0].as_uint64(), args[1].get_array() );
return variant();
} );
_rpc_state.add_method( "callback", [this]( const variants& args ) -> variant
{
FC_ASSERT( args.size() == 2 && args[1].is_array() );
this->receive_callback( args[0].as_uint64(), args[1].get_array() );
return variant();
} );
_rpc_state.on_unhandled( [&]( const std::string& method_name, const variants& args )
{
return this->receive_call( 0, method_name, args );
} );
_connection.on_message_handler( [&]( const std::string& msg ){ on_message(msg,true); } );
_connection.on_http_handler( [&]( const std::string& msg ){ return on_message(msg,false); } );
_connection.closed.connect( [this](){ closed(); } );
}
variant websocket_api_connection::send_call(
api_id_type api_id,
string method_name,
variants args /* = variants() */ )
{
auto request = _rpc_state.start_remote_call( "call", {api_id, std::move(method_name), std::move(args) } );
_connection.send_message( fc::json::to_string(request) );
return _rpc_state.wait_for_response( *request.id );
}
variant websocket_api_connection::send_callback(
uint64_t callback_id,
variants args /* = variants() */ )
{
auto request = _rpc_state.start_remote_call( "callback", {callback_id, std::move(args) } );
_connection.send_message( fc::json::to_string(request) );
return _rpc_state.wait_for_response( *request.id );
}
void websocket_api_connection::send_notice(
uint64_t callback_id,
variants args /* = variants() */ )
{
fc::rpc::request req{ optional<uint64_t>(), "notice", {callback_id, std::move(args)}};
_connection.send_message( fc::json::to_string(req) );
}
std::string websocket_api_connection::on_message(
const std::string& message,
bool send_message /* = true */ )
{
wdump((message));
try
{
auto var = fc::json::from_string(message);
const auto& var_obj = var.get_object();
if( var_obj.contains( "method" ) )
{
auto call = var.as<fc::rpc::request>();
exception_ptr optexcept;
try
{
auto result = _rpc_state.local_call( call.method, call.params );
if( call.id )
{
auto reply = fc::json::to_string( response( *call.id, result ) );
if( send_message )
_connection.send_message( reply );
return reply;
}
}
catch ( const fc::exception& e )
{
if( call.id )
{
optexcept = e.dynamic_copy_exception();
}
}
if( optexcept ) {
auto reply = fc::json::to_string( response( *call.id, error_object{ 1, optexcept->to_detail_string(), fc::variant(*optexcept)} ) );
if( send_message )
_connection.send_message( reply );
return reply;
}
}
else
{
auto reply = var.as<fc::rpc::response>();
_rpc_state.handle_reply( reply );
}
}
catch ( const fc::exception& e )
{
wdump((e.to_detail_string()));
return e.to_detail_string();
}
return string();
}
} } // namespace fc::rpc
|
#include "caffe2/operators/h_softmax_op.h"
#include <queue>
#include <stack>
namespace caffe2 {
template <>
float HSoftmaxOp<float, CPUContext>::RunForwardSingle(const float* X,
const float* W, const float* b, int target, float* int_output,
const float* bias_multiplier, int dim_out, int dim_in,
int& int_output_offset) {
// W * x
float* fc_output_data = int_output + int_output_offset;
math::Gemm<float, CPUContext>(CblasNoTrans, CblasTrans, 1, dim_out, dim_in, 1,
X, W, 0, fc_output_data, &context_);
math::Gemv<float, CPUContext>(CblasNoTrans, dim_out, 1, 1,
b, bias_multiplier, 1, fc_output_data, &context_);
int_output_offset += dim_out;
//Softmax
float* softmax_output_data = int_output + int_output_offset;
if (scale_.size() != 1) {
scale_.Resize(1);
}
if (sum_multiplier_.size() != dim_out) {
sum_multiplier_.Resize(dim_out);
math::Set<float, CPUContext>(dim_out, 1.f,
sum_multiplier_.mutable_data<float>(), &context_);
}
math::RowwiseMax<float, CPUContext>(1, dim_out, fc_output_data,
scale_.mutable_data<float>(), &context_);
// Put the intermediate result X - max(X) into Y
context_.template CopyFromCPU<float>(
dim_out, fc_output_data, softmax_output_data);
// Subtract the scale
math::Gemv<float, CPUContext>(CblasNoTrans, dim_out, 1, -1,
sum_multiplier_.data<float>(), scale_.data<float>(), 1, softmax_output_data,
&context_);
// Exponentiation
math::Exp<float, CPUContext>(dim_out, softmax_output_data,
softmax_output_data, &context_);
math::Gemv<float, CPUContext>(CblasNoTrans, 1, dim_out, 1,
softmax_output_data, sum_multiplier_.data<float>(), 0,
scale_.mutable_data<float>(), &context_);
// Do division
const float scale = *scale_.data<float>();
for (int j = 0; j < dim_out; ++j) {
softmax_output_data[j] /= scale;
}
int_output_offset += dim_out;
if (target < 0) {
return -1;
}
//Return cross entropy loss
return -log(std::max(softmax_output_data[target], kLOG_THRESHOLD()));
}
// Implementation for the CPU context.
template <>
bool HSoftmaxOp<float, CPUContext>::RunOnDevice() {
auto& X = Input(0);
const auto& W = Input(1);
const auto& b = Input(2);
auto& label = Input(3);
auto* Y = Output(0);
auto* intermediate_output = Output(1);
// Batch size
int M = X.ndim() > 1 ? X.dim32(0) : 1;
// Input feature dimension
int K = X.size() / M;
CAFFE_ENFORCE_GE(W.ndim(), 2); // N*K
CAFFE_ENFORCE_EQ(b.ndim(), 1); // N
CAFFE_ENFORCE_EQ(K, W.size() / (W.dim32(0)));
// Sum of output dimensions of all hierarchy nodes
int N = W.dim32(0);
CAFFE_ENFORCE_EQ(N, b.dim32(0));
Y->Resize(M);
auto* Ydata = Y->template mutable_data<float>();
math::Set<float, CPUContext>(M, 0.f, Ydata, &context_);
const auto* labeldata = label.data<int>();
auto hierarchy = getHierarchyForLabels(M, labeldata, hierarchy_all_map_);
int int_output_size = getIntermediateOutputSize(labeldata, M, hierarchy);
intermediate_output->Resize(int_output_size);
float* int_output_data = intermediate_output->template mutable_data<float>();
int int_output_offset = 0;
if (bias_multiplier_.size() != M) {
bias_multiplier_.Resize(M);
math::Set<float, CPUContext>(M, static_cast<float>(1),
bias_multiplier_.mutable_data<float>(), &context_);
}
for (int sample = 0; sample < M; ++sample) {
int word_id = labeldata[sample];
const PathProto& path = hierarchy[word_id];
for (const PathNodeProto& node : path.path_nodes()) {
//Offset of node's weight matrix in W
int w_offset = node.index();
//Number of output dimensions in node's weight matrix
int w_length = node.length();
int target = node.target();
//Adding log probabilities
Ydata[sample] += RunForwardSingle(X.data<float>() + sample*K,
W.data<float>() + w_offset*K, b.data<float>() + w_offset, target,
int_output_data, bias_multiplier_.data<float>()+sample, w_length, K,
int_output_offset);
}
}
return true;
}
template <>
void HSoftmaxGradientOp<float, CPUContext>::RunBackwardSingle(const float* X,
const float* dY, const float* W, int target,
const float* int_output, float* dX, float* dW, float* db, float* dint_output,
int dim_in, int dim_out, int& int_output_offset) {
//Cross entropy
// dX_entropy is the dX for the cross entropy layer
float* dX_entropy = dint_output + int_output_offset - dim_out;
// X_entropy is the X for the cross entropy layer and Y for the softmax layer
const float* X_entropy = int_output + int_output_offset - dim_out;
math::Set<float, CPUContext>(dim_out, 0.f, dX_entropy, &context_);
dX_entropy[target] = - (*dY) / std::max(X_entropy[target], kLOG_THRESHOLD());
int_output_offset -= dim_out;
//Softmax
if (scale_.size() != 1) {
scale_.Resize(1);
}
float* scaledata = scale_.mutable_data<float>();
if (sum_multiplier_.size() != dim_out) {
sum_multiplier_.Resize(dim_out);
math::Set<float, CPUContext>(dim_out, 1.f,
sum_multiplier_.mutable_data<float>(), &context_);
}
float* dX_softmax = dint_output + int_output_offset - dim_out;
context_.CopyFromCPU<float>(dim_out, dX_entropy, dX_softmax);
math::Dot<float, CPUContext>(dim_out, X_entropy, dX_entropy, scaledata,
&context_);
math::Gemv<float, CPUContext>(CblasTrans, 1, dim_out, -1,
sum_multiplier_.data<float>(), scaledata , 1, dX_softmax, &context_);
math::Mul<float, CPUContext>(dim_out, dX_softmax, X_entropy, dX_softmax,
&context_);
int_output_offset -= dim_out;
//FC
if (bias_multiplier_.size() != 1) {
// If the helper bias multiplier has not been created, reshape and fill
// it with 1
bias_multiplier_.Resize(1);
math::Set<float, CPUContext>(1, static_cast<float>(1),
bias_multiplier_.template mutable_data<float>(), &context_);
}
// Compute dW and add incrementally
// dW = dW + dX_softmax'*X
math::Gemm<float, CPUContext>(CblasTrans, CblasNoTrans, dim_out, dim_in, 1, 1,
dX_softmax, X, 1, dW, &context_);
// Compute dB and add incrementally
// db = db + dX_softmax*bias_multiplier_
math::Gemv<float, CPUContext>(CblasTrans, 1, dim_out, 1, dX_softmax,
bias_multiplier_.template data<float>(), 1, db, &context_);
// Compute dX and add incrementally
// dX = dX + W'dX_softmax
math::Gemv<float, CPUContext>(CblasTrans, dim_out, dim_in,
1, W, dX_softmax, 1, dX, &context_);
}
// Implementation for the CPU context.
template <>
bool HSoftmaxGradientOp<float, CPUContext>::RunOnDevice() {
auto& X = Input(0);
const auto& W = Input(1);
const auto& b = Input(2);
auto& label = Input(3);
auto& intermediate_output = Input(4);
auto& dY = Input(5);
auto* dX = Output(0);
auto* dW = Output(1);
auto* db = Output(2);
auto* dX_intermediate_output = Output(3);
dX->ResizeLike(X);
dW->ResizeLike(W);
db->ResizeLike(b);
dX_intermediate_output->ResizeLike(intermediate_output);
float* dX_data = dX->template mutable_data<float>();
float* dW_data = dW->template mutable_data<float>();
float* db_data = db->template mutable_data<float>();
float* dOutput_data = dX_intermediate_output->template mutable_data<float>();
math::Set<float, CPUContext>(X.size(), 0.f, dX_data, &context_);
math::Set<float, CPUContext>(W.size(), 0.f, dW_data, &context_);
math::Set<float, CPUContext>(b.size(), 0.f, db_data, &context_);
math::Set<float, CPUContext>(intermediate_output.size(), 0.f, dOutput_data,
&context_);
// Batch size
int M = X.ndim() > 1 ? X.dim32(0) : 1;
// Input feature dimension
int K = X.size() / M;
const auto* labeldata = label.data<int>();
auto hierarchy = getHierarchyForLabels(M, labeldata, hierarchy_all_map_);
int output_offset = getIntermediateOutputSize(labeldata, M, hierarchy);
//Traverse backward to access intermediate_output generated by HSoftmaxOp
// sequentially in reverse order
for (int sample = M-1; sample >= 0; sample--) {
int word_id = labeldata[sample];
PathProto path = hierarchy[word_id];
for (auto node = path.path_nodes().rbegin();
node != path.path_nodes().rend(); node++) {
int w_offset = node->index();
int w_length = node->length();
int target = node->target();
RunBackwardSingle(X.data<float>() + sample*K, dY.data<float>() + sample,
W.data<float>() + w_offset*K, target, intermediate_output.data<float>(),
dX_data + sample*K, dW_data + w_offset*K, db_data + w_offset,
dOutput_data, K, w_length, output_offset);
}
}
return true;
}
// Implementation for the CPU context.
template <>
bool HSoftmaxSearchOp<float, CPUContext>::pruning(
const float* X,
int sample,
int K,
const float* W,
const float* b,
const NodeProto& src_node,
NodeProto& dst_node,
float parent_score,
float beam) {
int w_length = src_node.children_size() + src_node.word_ids_size();
Tensor intermediate_data{CPU};
intermediate_data.Resize(2 * w_length);
float* int_output_data = intermediate_data.template mutable_data<float>();
int int_output_offset = 0;
int w_offset = src_node.offset();
RunForwardSingle(
X + K * sample,
W + w_offset * K,
b + w_offset,
-1,
int_output_data,
bias_multiplier_.template data<float>() + sample,
w_length,
K,
int_output_offset);
float* softmax_output_data = int_output_data + w_length;
// real probabilities
for (int i = 0; i < w_length; i++) {
softmax_output_data[i] =
-log(std::max(softmax_output_data[i], kLOG_THRESHOLD())) + parent_score;
}
for (int i = 0; i < src_node.children_size(); i++) {
if (softmax_output_data[i] < parent_score + beam) {
dst_node.add_children();
int idx = dst_node.children_size() - 1;
CAFFE_ENFORCE(
src_node.children(i).has_offset(),
"HSM Search require the field offset in NodeProte");
dst_node.mutable_children(idx)->set_offset(src_node.children(i).offset());
CAFFE_ENFORCE(
src_node.children(i).has_name(),
"HSM Search require the field name in NodeProte");
dst_node.mutable_children(idx)->set_name(src_node.children(i).name());
dst_node.add_scores(softmax_output_data[i]);
pruning(
X,
sample,
K,
W,
b,
src_node.children(i),
*dst_node.mutable_children(idx),
softmax_output_data[i],
beam);
}
}
for (int i = src_node.children_size(); i < w_length; i++) {
if (softmax_output_data[i] < parent_score + beam) {
dst_node.add_word_ids(src_node.word_ids(i - src_node.children_size()));
dst_node.add_scores(softmax_output_data[i]);
}
}
return true;
}
template <>
bool HSoftmaxSearchOp<float, CPUContext>::extractNodes(
const NodeProto& node,
std::vector<std::pair<string, float>>& info) {
int i = 0;
for (const auto& n : node.children()) {
info.emplace_back(std::make_pair(n.name(), node.scores(i++)));
}
for (const int n : node.word_ids()) {
info.emplace_back(std::make_pair(caffe2::to_string(n), node.scores(i++)));
}
for (const auto& n : node.children()) {
extractNodes(n, info);
}
return true;
}
// Implementation for the CPU context.
template <>
bool HSoftmaxSearchOp<float, CPUContext>::RunOnDevice() {
auto& X = Input(0);
const auto& W = Input(1);
const auto& b = Input(2);
auto* Y_names = Output(0);
auto* Y_scores = Output(1);
// Batch size
int M = X.ndim() > 1 ? X.dim32(0) : 1;
// Input feature dimension
int K = X.size() / M;
CAFFE_ENFORCE(W.ndim() == 2, "Weight must be a matrix."); // N*K
CAFFE_ENFORCE(b.ndim() == 1, "Bias must be a vector."); // N
CAFFE_ENFORCE(K == W.size() / (W.dim32(0)), "feature dimension mismatch.");
// Sum of output dimensions of all hierarchy nodes
int N = W.dim32(0);
CAFFE_ENFORCE(N == b.dim32(0), "mismatch between Weight and Bias.");
Y_names->Resize(M, top_n_);
Y_scores->Resize(M, top_n_);
if (bias_multiplier_.size() != M) {
bias_multiplier_.Resize(M);
math::Set<float, CPUContext>(
M,
static_cast<float>(1),
bias_multiplier_.mutable_data<float>(),
&context_);
}
for (int sample = 0; sample < M; ++sample) {
CAFFE_ENFORCE(
tree_.root_node().has_offset(),
"HSM Search require the field offset in NodeProte");
CAFFE_ENFORCE(
tree_.root_node().has_name(),
"HSM Search require the field name in NodeProte");
NodeProto dst_node;
dst_node.set_offset(tree_.root_node().offset());
dst_node.set_name(tree_.root_node().name());
pruning(
X.data<float>(),
sample,
K,
W.data<float>(),
b.data<float>(),
tree_.root_node(),
dst_node,
0,
beam_);
std::vector<std::pair<string, float>> info;
extractNodes(dst_node, info);
// saving the results for each sample.
std::partial_sort(
info.begin(),
info.begin() + (top_n_ < info.size() ? top_n_ : info.size() - 1),
info.end(),
[&](std::pair<string, float> a, std::pair<string, float> b) {
return a.second < b.second;
});
auto* y_name_data =
Y_names->template mutable_data<string>() + sample * top_n_;
auto* y_score_data =
Y_scores->template mutable_data<float>() + sample * top_n_;
for (int i = 0; i < top_n_; i++) {
if (i < info.size()) {
y_name_data[i] = info[i].first;
y_score_data[i] = info[i].second;
} else {
y_score_data[i] = 0;
}
}
}
return true;
}
template <typename T, class Context>
bool HuffmanTreeHierarchyOp<T, Context>::RunOnDevice() {
const auto& Y = Input(0);
auto treeOutput = Output(0);
CAFFE_ENFORCE_EQ(Y.ndim(), 1, "Input labels must be a vector.");
const auto y_data = Y.template data<T>();
treeOutput->Resize(1);
std::vector<int> labelCounts;
labelCounts.resize(num_classes_, 0);
for (int i = 0; i < Y.dim32(0); ++i) {
// Labels are in range [0, num_classes]
const int label_index = y_data[i];
CAFFE_ENFORCE_LT(
label_index,
num_classes_,
"Found an input label ",
label_index,
" not in range [",
0,
",",
num_classes_,
"]");
labelCounts[label_index]++;
}
std::priority_queue<Node, std::vector<Node>, NodeComparator> nodes;
std::vector<Node> huffmanTree;
std::vector<int> labelIndices;
labelIndices.resize(num_classes_);
int current_node_index = 0;
for (int i = 0; i < num_classes_; ++i) {
Node node(i, labelCounts[i]);
nodes.push(node);
}
// Extract node with minimum count and insert it in the tree array.
auto get_next_node = [&nodes, &huffmanTree, &labelIndices]() {
auto node = nodes.top();
int node_index = huffmanTree.size();
if (node.label != -1) {
labelIndices[node.label] = node_index;
}
nodes.pop();
huffmanTree.push_back(node);
return std::pair<int, Node>(node_index, node);
};
// Merge two nodes and insert the results in the queue.
auto merge_nodes = [&nodes](
const std::pair<int, Node>& node_l, const std::pair<int, Node>& node_r) {
Node node(-1, node_l.second.count + node_r.second.count);
node.left_ch_index = node_l.first;
node.right_ch_index = node_r.first;
nodes.push(node);
};
// Main loop for buttom up huffman tree construction.
while (!nodes.empty()) {
auto lNode = get_next_node();
if (!nodes.empty()) {
auto rNode = get_next_node();
merge_nodes(lNode, rNode);
}
}
auto is_leaf_node = [&huffmanTree](const int node_index) {
return huffmanTree[node_index].left_ch_index == -1 &&
huffmanTree[node_index].right_ch_index == -1;
};
auto get_node_label = [&huffmanTree](const int node_index) {
return huffmanTree[node_index].label;
};
// Build huffman tree.
int current_offset = 0;
std::function<void(int, NodeProto*)> build_tree = [&](
const int node_index, NodeProto* node) {
if (is_leaf_node(node_index) || node_index == -1) {
return;
}
const int left_ch_index = huffmanTree[node_index].left_ch_index;
const int right_ch_index = huffmanTree[node_index].right_ch_index;
if (left_ch_index != -1) {
if (is_leaf_node(left_ch_index)) {
node->add_word_ids(get_node_label(left_ch_index));
} else {
auto* ch_node = node->add_children();
ch_node->set_offset(current_offset);
current_offset += 2;
build_tree(left_ch_index, ch_node);
}
}
if (right_ch_index != -1) {
if (is_leaf_node(right_ch_index)) {
node->add_word_ids(get_node_label(right_ch_index));
current_offset++;
} else {
auto* ch_node = node->add_children();
ch_node->set_offset(current_offset);
current_offset += 2;
build_tree(right_ch_index, ch_node);
}
}
};
// The last element inserted in the tree is the root.
const int rootNodeIndex = huffmanTree.size() - 1;
NodeProto rootNode;
rootNode.set_offset(current_offset);
current_offset += 2;
build_tree(rootNodeIndex, &rootNode);
TreeProto treeProto;
*treeProto.mutable_root_node() = rootNode;
treeProto.SerializeToString(treeOutput->template mutable_data<string>());
return true;
}
namespace {
REGISTER_CPU_OPERATOR(HSoftmax, HSoftmaxOp<float, CPUContext>);
REGISTER_CPU_OPERATOR(HSoftmaxGradient,
HSoftmaxGradientOp<float, CPUContext>);
REGISTER_CPU_OPERATOR(HSoftmaxSearch, HSoftmaxSearchOp<float, CPUContext>);
REGISTER_CPU_OPERATOR(
HuffmanTreeHierarchy,
HuffmanTreeHierarchyOp<int64_t, CPUContext>);
OPERATOR_SCHEMA(HSoftmax)
.NumInputs(4)
.NumOutputs(2)
.SetDoc(R"DOC(
Hierarchical softmax is an operator which approximates the softmax operator
while giving significant training speed gains and reasonably comparable
performance. In this operator, instead of calculating the probabilities of all
the classes, we calculate the probability of each step in the path from root to
the target word in the hierarchy.
The operator takes a 2-D tensor (Tensor) containing a batch of layers, a
set of parameters represented by the weight matrix and bias terms, and a 1-D
tensor (Tensor) holding labels, or the indices of the target class. The
hierarchy has to be specified as an argument to the operator.
The operator returns a 1-D tensor holding the computed log probability of the
target class and a 2-D tensor of intermediate outputs (from the weight matrix
and softmax from each step in the path from root to target class) which will be
used by the gradient operator to compute gradients for all samples in the batch.
)DOC")
.Arg(
"hierarchy",
"Serialized HierarchyProto string containing list of "
"vocabulary words and their paths from root of hierarchy to the leaf")
.Input(0, "X", "Input data from previous layer")
.Input(
1,
"W",
"2D blob containing 'stacked' fully connected weight "
"matrices. Each node in the hierarchy contributes one FC weight matrix if "
"it has children nodes. Dimension is N*D, D is input dimension of data (X), "
"N is sum of all output dimensions, or total number of nodes (excl root)")
.Input(2, "b", "1D blob with N parameters")
.Input(3, "labels", "int word_id of the target word")
.Output(0, "Y", "1-D of log probability outputs, one per sample")
.Output(
1,
"intermediate_output",
"Extra blob to store the intermediate "
"FC and softmax outputs for each node in the hierarchical path of a word. "
"The outputs from samples are stored in consecutive blocks in the forward "
"pass and are used in reverse order in the backward gradientOp pass");
OPERATOR_SCHEMA(HSoftmaxGradient).NumInputs(6).NumOutputs(4);
class GetHSoftmaxGradient : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
vector<OperatorDef> GetGradientDefs() override {
return SingleGradientDef(
"HSoftmaxGradient", "",
//X, W, b, label, intermediate output, dY
vector<string>{I(0), I(1), I(2), I(3), O(1), GO(0)},
//dX, dW, db, dintermediate_output
vector<string>{GI(0), GI(1), GI(2), GO(1)});
}
};
REGISTER_GRADIENT(HSoftmax, GetHSoftmaxGradient);
OPERATOR_SCHEMA(HSoftmaxSearch)
.NumInputs(3)
.NumOutputs(2)
.SetDoc(R"DOC(
HSoftmaxSearch is an operator to generate the most possible paths given a
well-trained model and input vector. Greedy algorithm is used for pruning the
search tree.
)DOC")
.Arg(
"tree",
"Serialized TreeProto string containing a tree "
"including all intermidate nodes and leafs. All nodes must have names "
"for correct outputs")
.Arg(
"beam",
"beam used for pruning tree. The pruning algorithm is that "
"only children, whose score is smaller than parent's score puls beam, "
"will be propagated. ")
.Arg("topN", "Number of nodes in outputs")
.Input(0, "X", "Input data from previous layer")
.Input(1, "W", "The matrix trained from Softmax Ops")
.Input(2, "b", "The bias traiend from Softmax Ops")
.Output(
0,
"Y_names",
"The name of selected nodes and leafs. "
"For nodes, it will be the name defined in the tree. "
"For leafs, it will be the index of the word in the tree.")
.Output(1, "Y_scores", "The corresponding scores of Y_names");
SHOULD_NOT_DO_GRADIENT(HSoftmaxSearch);
OPERATOR_SCHEMA(HuffmanTreeHierarchy)
.NumInputs(1)
.NumOutputs(1)
.SetDoc(R"DOC(
HuffmanTreeHierarchy is an operator to generate huffman tree hierarchy given
the input labels. It returns the tree as seralized HierarchyProto
)DOC")
.Arg("num_classes", "The number of classes used to build the hierarchy.")
.Input(0, "Labels", "The labels vector")
.Output(0, "Hierarch", "Huffman coding hierarchy of the labels");
SHOULD_NOT_DO_GRADIENT(HuffmanTreeHierarchyOp);
} // namespace
} // namespace caffe2
|
/* file: svm_train_v1.cpp */
/*******************************************************************************
* Copyright 2014-2021 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
/*
//++
// Implementation of svm algorithm and types methods.
//--
*/
#include "algorithms/svm/svm_train_types.h"
#include "src/services/serialization_utils.h"
#include "src/services/daal_strings.h"
using namespace daal::data_management;
using namespace daal::services;
namespace daal
{
namespace algorithms
{
namespace svm
{
namespace interface1
{
services::Status Parameter::check() const
{
services::Status s;
DAAL_CHECK_STATUS(s, classifier::interface1::Parameter::check());
if (C <= 0)
{
return services::Status(services::Error::create(services::ErrorIncorrectParameter, services::ParameterName, cBoundStr()));
}
if (accuracyThreshold <= 0 || accuracyThreshold >= 1)
{
return services::Status(services::Error::create(services::ErrorIncorrectParameter, services::ParameterName, accuracyThresholdStr()));
}
if (tau <= 0)
{
return services::Status(services::Error::create(services::ErrorIncorrectParameter, services::ParameterName, tauStr()));
}
if (maxIterations == 0)
{
return services::Status(services::Error::create(services::ErrorIncorrectParameter, services::ParameterName, maxIterationsStr()));
}
if (!kernel.get())
{
return services::Status(services::Error::create(services::ErrorNullAuxiliaryAlgorithm, services::ParameterName, kernelFunctionStr()));
}
if (shrinkingStep == 0)
{
return services::Status(services::Error::create(services::ErrorIncorrectParameter, services::ParameterName, shrinkingStepStr()));
}
return s;
}
} // namespace interface1
} // namespace svm
} // namespace algorithms
} // namespace daal
|
/* Copyright 2015-2017 Philippe Tillet
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
* (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <map>
#include <algorithm>
#include <sstream>
#include <cstring>
#include <memory>
#include "triton/driver/device.h"
#include "triton/driver/context.h"
#include "triton/codegen/target.h"
namespace triton
{
namespace driver
{
/* ------------------------ */
// Host //
/* ------------------------ */
std::unique_ptr<codegen::target> host_device::make_target() const {
return std::unique_ptr<codegen::cpu_target>(new codegen::cpu_target());
}
/* ------------------------ */
// CUDA //
/* ------------------------ */
// information query
template<CUdevice_attribute attr>
int cu_device::cuGetInfo() const{
int res;
dispatch::cuDeviceGetAttribute(&res, attr, *cu_);
return res;
}
// convert to nvml
nvmlDevice_t cu_device::nvml_device() const{
std::map<std::string, nvmlDevice_t> map;
std::string key = pci_bus_id();
if(map.find(key)==map.end()){
nvmlDevice_t device;
dispatch::nvmlDeviceGetHandleByPciBusId_v2(key.c_str(), &device);
return map.insert(std::make_pair(key, device)).first->second;
}
return map.at(key);
}
// number of address bits
size_t cu_device::address_bits() const{
return sizeof(size_t)*8;
}
// name
std::string cu_device::name() const {
char tmp[128];
dispatch::cuDeviceGetName(tmp, 128, *cu_);
return std::string(tmp);
}
// PCI bus ID
std::string cu_device::pci_bus_id() const{
char tmp[128];
dispatch::cuDeviceGetPCIBusId(tmp, 128, *cu_);
return std::string(tmp);
}
// force the device to be interpreted as a particular cc
void cu_device::interpret_as(int cc){
interpreted_as_ = std::make_shared<int>(cc);
}
// compute capability
int cu_device::compute_capability() const {
if(interpreted_as_)
return *interpreted_as_;
size_t major = cuGetInfo<CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR>();
size_t minor = cuGetInfo<CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR>();
return major*10 + minor;
}
// maximum number of threads per block
size_t cu_device::max_threads_per_block() const {
return cuGetInfo<CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK>();
}
// maximum amount of shared memory per block
size_t cu_device::max_shared_memory() const {
return cuGetInfo<CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK>();
}
// warp size
size_t cu_device::warp_size() const {
return cuGetInfo<CU_DEVICE_ATTRIBUTE_WARP_SIZE>();
}
// maximum block dimensions
std::vector<size_t> cu_device::max_block_dim() const {
std::vector<size_t> result(3);
result[0] = cuGetInfo<CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X>();
result[1] = cuGetInfo<CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y>();
result[2] = cuGetInfo<CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z>();
return result;
}
// current SM clock
size_t cu_device::current_sm_clock() const{
unsigned int result;
dispatch::nvmlDeviceGetClockInfo(nvml_device(), NVML_CLOCK_SM, &result);
return result;
}
// max SM clock
size_t cu_device::max_sm_clock() const{
unsigned int result;
dispatch::nvmlDeviceGetMaxClockInfo(nvml_device(), NVML_CLOCK_SM, &result);
return result;
}
// current memory clock
size_t cu_device::current_mem_clock() const{
unsigned int result;
dispatch::nvmlDeviceGetClockInfo(nvml_device(), NVML_CLOCK_MEM, &result);
return result;
}
// max memory clock
size_t cu_device::max_mem_clock() const{
unsigned int result;
dispatch::nvmlDeviceGetMaxClockInfo(nvml_device(), NVML_CLOCK_MEM, &result);
return result;
}
// max memory clock
void cu_device::set_max_clock() {
dispatch::nvmlDeviceSetApplicationsClocks(nvml_device(), max_mem_clock(), max_sm_clock());
}
// print infos
std::string cu_device::infos() const{
std::ostringstream oss;
std::vector<size_t> max_wi_sizes = max_block_dim();
oss << "Platform: CUDA" << std::endl;
oss << "Name: " << name() << std::endl;
oss << "Maximum total work-group size: " << max_threads_per_block() << std::endl;
oss << "Maximum individual work-group sizes: " << max_wi_sizes[0] << ", " << max_wi_sizes[1] << ", " << max_wi_sizes[2] << std::endl;
oss << "Local memory size: " << max_shared_memory() << std::endl;
return oss.str();
}
// target
std::unique_ptr<codegen::target> cu_device::make_target() const {
return std::unique_ptr<codegen::nvidia_cu_target>(new codegen::nvidia_cu_target(compute_capability()));
}
}
}
|
//@HEADER
// ************************************************************************
//
// Kokkos v. 2.0
// Copyright (2014) Sandia Corporation
//
// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
// the U.S. Government retains certain rights in this software.
//
// Kokkos is licensed under 3-clause BSD terms of use:
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
//
// ************************************************************************
//@HEADER
#define KOKKOS_IMPL_COMPILING_LIBRARY true
#include <Kokkos_Core.hpp>
namespace Kokkos {
namespace Impl {
KOKKOS_IMPL_VIEWCOPY_ETI_INST(int64_t********, LayoutRight, LayoutRight,
Experimental::HPX, int)
KOKKOS_IMPL_VIEWCOPY_ETI_INST(int64_t********, LayoutRight, LayoutLeft,
Experimental::HPX, int)
KOKKOS_IMPL_VIEWCOPY_ETI_INST(int64_t********, LayoutRight, LayoutStride,
Experimental::HPX, int)
KOKKOS_IMPL_VIEWFILL_ETI_INST(int64_t********, LayoutRight, Experimental::HPX,
int)
} // namespace Impl
} // namespace Kokkos
|
// utility.cc
// Debugging routines. Allows users to control whether to
// print DEBUG statements, based on a command line argument.
//
// Copyright (c) 1992-1993 The Regents of the University of California.
// All rights reserved. See copyright.h for copyright notice and limitation
// of liability and disclaimer of warranty provisions.
#include "copyright.h"
#include "utility.h"
// this seems to be dependent on how the compiler is configured.
// if you have problems with va_start, try both of these alternatives
#ifdef HOST_SNAKE
#include <stdarg.h>
#else
#ifdef HOST_SPARC
#include <stdarg.h>
#else
#include "stdarg.h"
#endif
#endif
static char *enableFlags = NULL; // controls which DEBUG messages are printed
//----------------------------------------------------------------------
// DebugInit
// Initialize so that only DEBUG messages with a flag in flagList
// will be printed.
//
// If the flag is "+", we enable all DEBUG messages.
//
// "flagList" is a string of characters for whose DEBUG messages are
// to be enabled.
//----------------------------------------------------------------------
void
DebugInit(char *flagList)
{
enableFlags = flagList;
}
//----------------------------------------------------------------------
// DebugIsEnabled
// Return TRUE if DEBUG messages with "flag" are to be printed.
//----------------------------------------------------------------------
bool
DebugIsEnabled(char flag)
{
if (enableFlags != NULL)
return (strchr(enableFlags, flag) != 0)
|| (strchr(enableFlags, '+') != 0);
else
return FALSE;
}
//----------------------------------------------------------------------
// DEBUG
// Print a debug message, if flag is enabled. Like printf,
// only with an extra argument on the front.
//----------------------------------------------------------------------
void
DEBUG(char flag, char *format, ...)
{
if (DebugIsEnabled(flag)) {
va_list ap;
// You will get an unused variable message here -- ignore it.
va_start(ap, format);
vfprintf(stdout, format, ap);
va_end(ap);
fflush(stdout);
}
}
void echo(int color, char *format, ...)
{
char *format_next;
switch (color)
{
case 0:
strcat(format_next, "\033[93m");
break;
case 1:
strcat(format_next, "\033[91m");
break;
case 2:
strcat(format_next, "\033[01;34m");
break;
case 3:
strcat(format_next, "\033[1;92m");
break;
default:
strcat(format_next, "\033[93m");
break;
}
strcat(format_next, format);
strcat(format_next, "\033[0m");
va_list ap;
// You will get an unused variable message here -- ignore it.
va_start(ap, format);
vfprintf(stdout, format_next, ap);
va_end(ap);
fflush(stdout);
}
|
// Copyright 2014 BVLC and contributors.
#include <stdint.h>
#include <fcntl.h>
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <google/protobuf/io/coded_stream.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/highgui/highgui_c.h>
#include <opencv2/imgproc/imgproc.hpp>
#include <algorithm>
#include <string>
#include <vector>
#include <fstream> // NOLINT(readability/streams)
#include "caffe/common.hpp"
#include "caffe/util/io.hpp"
#include "caffe/proto/caffe.pb.h"
using std::fstream;
using std::ios;
using std::max;
using std::string;
using google::protobuf::io::FileInputStream;
using google::protobuf::io::FileOutputStream;
using google::protobuf::io::ZeroCopyInputStream;
using google::protobuf::io::CodedInputStream;
using google::protobuf::io::ZeroCopyOutputStream;
using google::protobuf::io::CodedOutputStream;
using google::protobuf::Message;
namespace caffe {
bool ReadProtoFromTextFile(const char* filename, Message* proto) {
int fd = open(filename, O_RDONLY);
CHECK_NE(fd, -1) << "File not found: " << filename;
FileInputStream* input = new FileInputStream(fd);
bool success = google::protobuf::TextFormat::Parse(input, proto);
delete input;
close(fd);
return success;
}
void WriteProtoToTextFile(const Message& proto, const char* filename) {
int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0644);
FileOutputStream* output = new FileOutputStream(fd);
CHECK(google::protobuf::TextFormat::Print(proto, output));
delete output;
close(fd);
}
bool ReadProtoFromBinaryFile(const char* filename, Message* proto) {
int fd = open(filename, O_RDONLY);
CHECK_NE(fd, -1) << "File not found: " << filename;
ZeroCopyInputStream* raw_input = new FileInputStream(fd);
CodedInputStream* coded_input = new CodedInputStream(raw_input);
coded_input->SetTotalBytesLimit(1073741824, 536870912);
bool success = proto->ParseFromCodedStream(coded_input);
delete coded_input;
delete raw_input;
close(fd);
return success;
}
void WriteProtoToBinaryFile(const Message& proto, const char* filename) {
fstream output(filename, ios::out | ios::trunc | ios::binary);
CHECK(proto.SerializeToOstream(&output));
}
bool ReadImageToDatum(const string& filename, const int label,
const int height, const int width, const bool is_color, Datum* datum) {
cv::Mat cv_img;
int cv_read_flag = (is_color ? CV_LOAD_IMAGE_COLOR :
CV_LOAD_IMAGE_GRAYSCALE);
if (height > 0 && width > 0) {
cv::Mat cv_img_origin = cv::imread(filename, cv_read_flag);
cv::resize(cv_img_origin, cv_img, cv::Size(height, width));
} else {
cv_img = cv::imread(filename, cv_read_flag);
}
if (!cv_img.data) {
LOG(ERROR) << "Could not open or find file " << filename;
return false;
}
int num_channels = (is_color ? 3 : 1);
datum->set_channels(num_channels);
datum->set_height(cv_img.rows);
datum->set_width(cv_img.cols);
datum->set_label(label);
datum->clear_data();
datum->clear_float_data();
string* datum_string = datum->mutable_data();
if (is_color) {
for (int c = 0; c < num_channels; ++c) {
for (int h = 0; h < cv_img.rows; ++h) {
for (int w = 0; w < cv_img.cols; ++w) {
datum_string->push_back(
static_cast<char>(cv_img.at<cv::Vec3b>(h, w)[c]));
}
}
}
} else { // Faster than repeatedly testing is_color for each pixel w/i loop
for (int h = 0; h < cv_img.rows; ++h) {
for (int w = 0; w < cv_img.cols; ++w) {
datum_string->push_back(
static_cast<char>(cv_img.at<uchar>(h, w)));
}
}
}
return true;
}
// Verifies format of data stored in HDF5 file and reshapes blob accordingly.
template <typename Dtype>
void hdf5_load_nd_dataset_helper(
hid_t file_id, const char* dataset_name_, int min_dim, int max_dim,
Blob<Dtype>* blob) {
// Verify that the number of dimensions is in the accepted range.
herr_t status;
int ndims;
status = H5LTget_dataset_ndims(file_id, dataset_name_, &ndims);
CHECK_GE(status, 0) << "Failed to get dataset ndims for " << dataset_name_;
CHECK_GE(ndims, min_dim);
CHECK_LE(ndims, max_dim);
// Verify that the data format is what we expect: float or double.
std::vector<hsize_t> dims(ndims);
H5T_class_t class_;
status = H5LTget_dataset_info(
file_id, dataset_name_, dims.data(), &class_, NULL);
CHECK_GE(status, 0) << "Failed to get dataset info for " << dataset_name_;
CHECK_EQ(class_, H5T_FLOAT) << "Expected float or double data";
blob->Reshape(
dims[0],
(dims.size() > 1) ? dims[1] : 1,
(dims.size() > 2) ? dims[2] : 1,
(dims.size() > 3) ? dims[3] : 1);
}
template <>
void hdf5_load_nd_dataset<float>(hid_t file_id, const char* dataset_name_,
int min_dim, int max_dim, Blob<float>* blob) {
hdf5_load_nd_dataset_helper(file_id, dataset_name_, min_dim, max_dim, blob);
herr_t status = H5LTread_dataset_float(
file_id, dataset_name_, blob->mutable_cpu_data());
CHECK_GE(status, 0) << "Failed to read float dataset " << dataset_name_;
}
template <>
void hdf5_load_nd_dataset<double>(hid_t file_id, const char* dataset_name_,
int min_dim, int max_dim, Blob<double>* blob) {
hdf5_load_nd_dataset_helper(file_id, dataset_name_, min_dim, max_dim, blob);
herr_t status = H5LTread_dataset_double(
file_id, dataset_name_, blob->mutable_cpu_data());
CHECK_GE(status, 0) << "Failed to read double dataset " << dataset_name_;
}
template <>
void hdf5_save_nd_dataset<float>(
const hid_t file_id, const string dataset_name, const Blob<float>& blob) {
hsize_t dims[HDF5_NUM_DIMS];
dims[0] = blob.num();
dims[1] = blob.channels();
dims[2] = blob.height();
dims[3] = blob.width();
herr_t status = H5LTmake_dataset_float(
file_id, dataset_name.c_str(), HDF5_NUM_DIMS, dims, blob.cpu_data());
CHECK_GE(status, 0) << "Failed to make float dataset " << dataset_name;
}
template <>
void hdf5_save_nd_dataset<double>(
const hid_t file_id, const string dataset_name, const Blob<double>& blob) {
hsize_t dims[HDF5_NUM_DIMS];
dims[0] = blob.num();
dims[1] = blob.channels();
dims[2] = blob.height();
dims[3] = blob.width();
herr_t status = H5LTmake_dataset_double(
file_id, dataset_name.c_str(), HDF5_NUM_DIMS, dims, blob.cpu_data());
CHECK_GE(status, 0) << "Failed to make double dataset " << dataset_name;
}
} // namespace caffe
|
/*
* Copyright 2021 OmniSci, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file ResultSetReduction.cpp
* @author Alex Suhan <alex@mapd.com>
* @brief Reduction part of the row set interface.
*
* Copyright (c) 2014 MapD Technologies, Inc. All rights reserved.
*/
#include "DynamicWatchdog.h"
#include "Execute.h"
#include "ResultSet.h"
#include "ResultSetReductionInterpreter.h"
#include "ResultSetReductionJIT.h"
#include "RuntimeFunctions.h"
#include "Shared/SqlTypesLayout.h"
#include "Shared/likely.h"
#include "Shared/thread_count.h"
#include "Shared/threading.h"
#include <llvm/ExecutionEngine/GenericValue.h>
#include <algorithm>
#include <future>
#include <numeric>
extern bool g_enable_dynamic_watchdog;
extern bool g_enable_non_kernel_time_query_interrupt;
namespace {
bool use_multithreaded_reduction(const size_t entry_count) {
return entry_count > 100000;
}
size_t get_row_qw_count(const QueryMemoryDescriptor& query_mem_desc) {
const auto row_bytes = get_row_bytes(query_mem_desc);
CHECK_EQ(size_t(0), row_bytes % 8);
return row_bytes / 8;
}
std::vector<int64_t> make_key(const int64_t* buff,
const size_t entry_count,
const size_t key_count) {
std::vector<int64_t> key;
size_t off = 0;
for (size_t i = 0; i < key_count; ++i) {
key.push_back(buff[off]);
off += entry_count;
}
return key;
}
void fill_slots(int64_t* dst_entry,
const size_t dst_entry_count,
const int64_t* src_buff,
const size_t src_entry_idx,
const size_t src_entry_count,
const QueryMemoryDescriptor& query_mem_desc) {
const auto slot_count = query_mem_desc.getBufferColSlotCount();
const auto key_count = query_mem_desc.getGroupbyColCount();
if (query_mem_desc.didOutputColumnar()) {
for (size_t i = 0, dst_slot_off = 0; i < slot_count;
++i, dst_slot_off += dst_entry_count) {
dst_entry[dst_slot_off] =
src_buff[slot_offset_colwise(src_entry_idx, i, key_count, src_entry_count)];
}
} else {
const auto row_ptr = src_buff + get_row_qw_count(query_mem_desc) * src_entry_idx;
const auto slot_off_quad = get_slot_off_quad(query_mem_desc);
for (size_t i = 0; i < slot_count; ++i) {
dst_entry[i] = row_ptr[slot_off_quad + i];
}
}
}
ALWAYS_INLINE
void fill_empty_key_32(int32_t* key_ptr_i32, const size_t key_count) {
for (size_t i = 0; i < key_count; ++i) {
key_ptr_i32[i] = EMPTY_KEY_32;
}
}
ALWAYS_INLINE
void fill_empty_key_64(int64_t* key_ptr_i64, const size_t key_count) {
for (size_t i = 0; i < key_count; ++i) {
key_ptr_i64[i] = EMPTY_KEY_64;
}
}
inline int64_t get_component(const int8_t* group_by_buffer,
const size_t comp_sz,
const size_t index = 0) {
int64_t ret = std::numeric_limits<int64_t>::min();
switch (comp_sz) {
case 1: {
ret = group_by_buffer[index];
break;
}
case 2: {
const int16_t* buffer_ptr = reinterpret_cast<const int16_t*>(group_by_buffer);
ret = buffer_ptr[index];
break;
}
case 4: {
const int32_t* buffer_ptr = reinterpret_cast<const int32_t*>(group_by_buffer);
ret = buffer_ptr[index];
break;
}
case 8: {
const int64_t* buffer_ptr = reinterpret_cast<const int64_t*>(group_by_buffer);
ret = buffer_ptr[index];
break;
}
default:
CHECK(false);
}
return ret;
}
void run_reduction_code(const size_t executor_id,
const ReductionCode& reduction_code,
int8_t* this_buff,
const int8_t* that_buff,
const int32_t start_entry_index,
const int32_t end_entry_index,
const int32_t that_entry_count,
const void* this_qmd,
const void* that_qmd,
const void* serialized_varlen_buffer) {
int err = 0;
if (reduction_code.func_ptr) {
err = reduction_code.func_ptr(this_buff,
that_buff,
start_entry_index,
end_entry_index,
that_entry_count,
this_qmd,
that_qmd,
serialized_varlen_buffer);
} else {
auto ret = ReductionInterpreter::run(
executor_id,
reduction_code.ir_reduce_loop.get(),
{ReductionInterpreter::MakeEvalValue(this_buff),
ReductionInterpreter::MakeEvalValue(that_buff),
ReductionInterpreter::MakeEvalValue(start_entry_index),
ReductionInterpreter::MakeEvalValue(end_entry_index),
ReductionInterpreter::MakeEvalValue(that_entry_count),
ReductionInterpreter::MakeEvalValue(this_qmd),
ReductionInterpreter::MakeEvalValue(that_qmd),
ReductionInterpreter::MakeEvalValue(serialized_varlen_buffer)});
err = ret.int_val;
}
if (err) {
if (err == Executor::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES) {
throw std::runtime_error("Multiple distinct values encountered");
}
if (err == Executor::ERR_INTERRUPTED) {
throw std::runtime_error(
"Query execution has interrupted during result set reduction");
}
throw std::runtime_error(
"Query execution has exceeded the time limit or was interrupted during result "
"set reduction");
}
}
} // namespace
void result_set::fill_empty_key(void* key_ptr,
const size_t key_count,
const size_t key_width) {
switch (key_width) {
case 4: {
auto key_ptr_i32 = reinterpret_cast<int32_t*>(key_ptr);
fill_empty_key_32(key_ptr_i32, key_count);
break;
}
case 8: {
auto key_ptr_i64 = reinterpret_cast<int64_t*>(key_ptr);
fill_empty_key_64(key_ptr_i64, key_count);
break;
}
default:
CHECK(false);
}
}
// Driver method for various buffer layouts, actual work is done by reduceOne* methods.
// Reduces the entries of `that` into the buffer of this ResultSetStorage object.
void ResultSetStorage::reduce(const ResultSetStorage& that,
const std::vector<std::string>& serialized_varlen_buffer,
const ReductionCode& reduction_code,
const size_t executor_id) const {
auto entry_count = query_mem_desc_.getEntryCount();
CHECK_GT(entry_count, size_t(0));
if (query_mem_desc_.didOutputColumnar()) {
CHECK(query_mem_desc_.getQueryDescriptionType() ==
QueryDescriptionType::GroupByPerfectHash ||
query_mem_desc_.getQueryDescriptionType() ==
QueryDescriptionType::GroupByBaselineHash ||
query_mem_desc_.getQueryDescriptionType() ==
QueryDescriptionType::NonGroupedAggregate);
}
const auto that_entry_count = that.query_mem_desc_.getEntryCount();
switch (query_mem_desc_.getQueryDescriptionType()) {
case QueryDescriptionType::GroupByBaselineHash:
CHECK_GE(entry_count, that_entry_count);
break;
default:
CHECK_EQ(entry_count, that_entry_count);
}
auto this_buff = buff_;
CHECK(this_buff);
auto that_buff = that.buff_;
CHECK(that_buff);
if (query_mem_desc_.getQueryDescriptionType() ==
QueryDescriptionType::GroupByBaselineHash) {
if (!serialized_varlen_buffer.empty()) {
throw std::runtime_error(
"Projection of variable length targets with baseline hash group by is not yet "
"supported in Distributed mode");
}
if (use_multithreaded_reduction(that_entry_count)) {
if (reduction_code.ir_reduce_loop) {
threading::parallel_for(threading::blocked_range<size_t>(0, that_entry_count),
[this,
this_buff,
that_buff,
that_entry_count,
executor_id,
&reduction_code,
&that](auto r) {
run_reduction_code(executor_id,
reduction_code,
this_buff,
that_buff,
r.begin(),
r.end(),
that_entry_count,
&query_mem_desc_,
&that.query_mem_desc_,
nullptr);
});
} else {
threading::parallel_for(
threading::blocked_range<size_t>(0, that_entry_count),
[this, this_buff, that_buff, that_entry_count, &that](auto r) {
for (size_t entry_idx = r.begin(); entry_idx < r.end(); ++entry_idx) {
reduceOneEntryBaseline(
this_buff, that_buff, entry_idx, that_entry_count, that);
}
});
}
} else {
if (reduction_code.ir_reduce_loop) {
run_reduction_code(executor_id,
reduction_code,
this_buff,
that_buff,
0,
that_entry_count,
that_entry_count,
&query_mem_desc_,
&that.query_mem_desc_,
nullptr);
} else {
for (size_t i = 0; i < that_entry_count; ++i) {
reduceOneEntryBaseline(this_buff, that_buff, i, that_entry_count, that);
}
}
}
return;
}
auto executor = Executor::getExecutorFromMap(executor_id);
CHECK(executor);
if (use_multithreaded_reduction(entry_count)) {
if (query_mem_desc_.didOutputColumnar()) {
threading::parallel_for(
threading::blocked_range<size_t>(0, entry_count),
[this, this_buff, that_buff, &that, &serialized_varlen_buffer, &executor](
auto r) {
reduceEntriesNoCollisionsColWise(this_buff,
that_buff,
that,
r.begin(),
r.end(),
serialized_varlen_buffer,
executor.get());
});
} else {
CHECK(reduction_code.ir_reduce_loop);
threading::parallel_for(threading::blocked_range<size_t>(0, entry_count),
[this,
this_buff,
that_buff,
that_entry_count,
executor_id,
&reduction_code,
&that,
&serialized_varlen_buffer](auto r) {
run_reduction_code(executor_id,
reduction_code,
this_buff,
that_buff,
r.begin(),
r.end(),
that_entry_count,
&query_mem_desc_,
&that.query_mem_desc_,
&serialized_varlen_buffer);
});
}
} else {
if (query_mem_desc_.didOutputColumnar()) {
reduceEntriesNoCollisionsColWise(this_buff,
that_buff,
that,
0,
query_mem_desc_.getEntryCount(),
serialized_varlen_buffer,
executor.get());
} else {
CHECK(reduction_code.ir_reduce_loop);
run_reduction_code(executor_id,
reduction_code,
this_buff,
that_buff,
0,
entry_count,
that_entry_count,
&query_mem_desc_,
&that.query_mem_desc_,
&serialized_varlen_buffer);
}
}
}
namespace {
ALWAYS_INLINE void check_watchdog() {
if (UNLIKELY(dynamic_watchdog())) {
// TODO(alex): distinguish between the deadline and interrupt
throw std::runtime_error(
"Query execution has exceeded the time limit or was interrupted during result "
"set reduction");
}
}
ALWAYS_INLINE void check_watchdog_with_seed(const size_t sample_seed) {
if (UNLIKELY((sample_seed & 0x3F) == 0 && dynamic_watchdog())) {
// TODO(alex): distinguish between the deadline and interrupt
throw std::runtime_error(
"Query execution has exceeded the time limit or was interrupted during result "
"set reduction");
}
}
} // namespace
void ResultSetStorage::reduceEntriesNoCollisionsColWise(
int8_t* this_buff,
const int8_t* that_buff,
const ResultSetStorage& that,
const size_t start_index,
const size_t end_index,
const std::vector<std::string>& serialized_varlen_buffer,
const Executor* executor) const {
// TODO(adb / saman): Support column wise output when serializing distributed agg
// functions
CHECK(serialized_varlen_buffer.empty());
const auto& col_slot_context = query_mem_desc_.getColSlotContext();
auto this_crt_col_ptr = get_cols_ptr(this_buff, query_mem_desc_);
auto that_crt_col_ptr = get_cols_ptr(that_buff, query_mem_desc_);
for (size_t target_idx = 0; target_idx < targets_.size(); ++target_idx) {
const auto& agg_info = targets_[target_idx];
const auto& slots_for_col = col_slot_context.getSlotsForCol(target_idx);
bool two_slot_target{false};
if (agg_info.is_agg &&
(agg_info.agg_kind == kAVG ||
(agg_info.agg_kind == kSAMPLE && agg_info.sql_type.is_varlen()))) {
// Note that this assumes if one of the slot pairs in a given target is an array,
// all slot pairs are arrays.
two_slot_target = true;
}
if (UNLIKELY(g_enable_non_kernel_time_query_interrupt && executor &&
executor->checkNonKernelTimeInterrupted())) {
throw std::runtime_error(
"Query execution was interrupted during result set reduction");
}
if (g_enable_dynamic_watchdog) {
check_watchdog();
}
for (size_t target_slot_idx = slots_for_col.front();
target_slot_idx < slots_for_col.back() + 1;
target_slot_idx += 2) {
const auto this_next_col_ptr = advance_to_next_columnar_target_buff(
this_crt_col_ptr, query_mem_desc_, target_slot_idx);
const auto that_next_col_ptr = advance_to_next_columnar_target_buff(
that_crt_col_ptr, query_mem_desc_, target_slot_idx);
for (size_t entry_idx = start_index; entry_idx < end_index; ++entry_idx) {
if (isEmptyEntryColumnar(entry_idx, that_buff)) {
continue;
}
if (LIKELY(!query_mem_desc_.hasKeylessHash())) {
// copy the key from right hand side
copyKeyColWise(entry_idx, this_buff, that_buff);
}
auto this_ptr1 =
this_crt_col_ptr +
entry_idx * query_mem_desc_.getPaddedSlotWidthBytes(target_slot_idx);
auto that_ptr1 =
that_crt_col_ptr +
entry_idx * query_mem_desc_.getPaddedSlotWidthBytes(target_slot_idx);
int8_t* this_ptr2{nullptr};
const int8_t* that_ptr2{nullptr};
if (UNLIKELY(two_slot_target)) {
this_ptr2 =
this_next_col_ptr +
entry_idx * query_mem_desc_.getPaddedSlotWidthBytes(target_slot_idx + 1);
that_ptr2 =
that_next_col_ptr +
entry_idx * query_mem_desc_.getPaddedSlotWidthBytes(target_slot_idx + 1);
}
reduceOneSlot(this_ptr1,
this_ptr2,
that_ptr1,
that_ptr2,
agg_info,
target_idx,
target_slot_idx,
target_slot_idx,
that,
slots_for_col.front(),
serialized_varlen_buffer);
}
this_crt_col_ptr = this_next_col_ptr;
that_crt_col_ptr = that_next_col_ptr;
if (UNLIKELY(two_slot_target)) {
this_crt_col_ptr = advance_to_next_columnar_target_buff(
this_crt_col_ptr, query_mem_desc_, target_slot_idx + 1);
that_crt_col_ptr = advance_to_next_columnar_target_buff(
that_crt_col_ptr, query_mem_desc_, target_slot_idx + 1);
}
}
}
}
/*
* copy all keys from the columnar prepended group buffer of "that_buff" into
* "this_buff"
*/
void ResultSetStorage::copyKeyColWise(const size_t entry_idx,
int8_t* this_buff,
const int8_t* that_buff) const {
CHECK(query_mem_desc_.didOutputColumnar());
for (size_t group_idx = 0; group_idx < query_mem_desc_.getGroupbyColCount();
group_idx++) {
// if the column corresponds to a group key
const auto column_offset_bytes =
query_mem_desc_.getPrependedGroupColOffInBytes(group_idx);
auto lhs_key_ptr = this_buff + column_offset_bytes;
auto rhs_key_ptr = that_buff + column_offset_bytes;
switch (query_mem_desc_.groupColWidth(group_idx)) {
case 8:
*(reinterpret_cast<int64_t*>(lhs_key_ptr) + entry_idx) =
*(reinterpret_cast<const int64_t*>(rhs_key_ptr) + entry_idx);
break;
case 4:
*(reinterpret_cast<int32_t*>(lhs_key_ptr) + entry_idx) =
*(reinterpret_cast<const int32_t*>(rhs_key_ptr) + entry_idx);
break;
case 2:
*(reinterpret_cast<int16_t*>(lhs_key_ptr) + entry_idx) =
*(reinterpret_cast<const int16_t*>(rhs_key_ptr) + entry_idx);
break;
case 1:
*(reinterpret_cast<int8_t*>(lhs_key_ptr) + entry_idx) =
*(reinterpret_cast<const int8_t*>(rhs_key_ptr) + entry_idx);
break;
default:
CHECK(false);
break;
}
}
}
// Rewrites the entries of this ResultSetStorage object to point directly into the
// serialized_varlen_buffer rather than using offsets.
void ResultSetStorage::rewriteAggregateBufferOffsets(
const std::vector<std::string>& serialized_varlen_buffer) const {
if (serialized_varlen_buffer.empty()) {
return;
}
CHECK(!query_mem_desc_.didOutputColumnar());
auto entry_count = query_mem_desc_.getEntryCount();
CHECK_GT(entry_count, size_t(0));
CHECK(buff_);
// Row-wise iteration, consider moving to separate function
for (size_t i = 0; i < entry_count; ++i) {
if (isEmptyEntry(i, buff_)) {
continue;
}
const auto key_bytes = get_key_bytes_rowwise(query_mem_desc_);
const auto key_bytes_with_padding = align_to_int64(key_bytes);
auto rowwise_targets_ptr =
row_ptr_rowwise(buff_, query_mem_desc_, i) + key_bytes_with_padding;
size_t target_slot_idx = 0;
for (size_t target_logical_idx = 0; target_logical_idx < targets_.size();
++target_logical_idx) {
const auto& target_info = targets_[target_logical_idx];
if (target_info.sql_type.is_varlen() && target_info.is_agg) {
CHECK(target_info.agg_kind == kSAMPLE);
auto ptr1 = rowwise_targets_ptr;
auto slot_idx = target_slot_idx;
auto ptr2 = ptr1 + query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
auto offset = *reinterpret_cast<const int64_t*>(ptr1);
const auto& elem_ti = target_info.sql_type.get_elem_type();
size_t length_to_elems =
target_info.sql_type.is_string() ? 1 : elem_ti.get_size();
CHECK_LT(static_cast<size_t>(offset), serialized_varlen_buffer.size());
const auto& varlen_bytes_str = serialized_varlen_buffer[offset];
const auto str_ptr = reinterpret_cast<const int8_t*>(varlen_bytes_str.c_str());
CHECK(ptr1);
*reinterpret_cast<int64_t*>(ptr1) = reinterpret_cast<const int64_t>(str_ptr);
CHECK(ptr2);
*reinterpret_cast<int64_t*>(ptr2) =
static_cast<int64_t>(varlen_bytes_str.size() / length_to_elems);
}
rowwise_targets_ptr = advance_target_ptr_row_wise(
rowwise_targets_ptr, target_info, target_slot_idx, query_mem_desc_, false);
target_slot_idx = advance_slot(target_slot_idx, target_info, false);
}
}
return;
}
namespace {
#ifdef _MSC_VER
#define mapd_cas(address, compare, val) \
InterlockedCompareExchange(reinterpret_cast<volatile long*>(address), \
static_cast<long>(val), \
static_cast<long>(compare))
#else
#define mapd_cas(address, compare, val) __sync_val_compare_and_swap(address, compare, val)
#endif
GroupValueInfo get_matching_group_value_columnar_reduction(int64_t* groups_buffer,
const uint32_t h,
const int64_t* key,
const uint32_t key_qw_count,
const size_t entry_count) {
auto off = h;
const auto old_key = mapd_cas(&groups_buffer[off], EMPTY_KEY_64, *key);
if (old_key == EMPTY_KEY_64) {
for (size_t i = 0; i < key_qw_count; ++i) {
groups_buffer[off] = key[i];
off += entry_count;
}
return {&groups_buffer[off], true};
}
off = h;
for (size_t i = 0; i < key_qw_count; ++i) {
if (groups_buffer[off] != key[i]) {
return {nullptr, true};
}
off += entry_count;
}
return {&groups_buffer[off], false};
}
#undef mapd_cas
// TODO(alex): fix synchronization when we enable it
GroupValueInfo get_group_value_columnar_reduction(
int64_t* groups_buffer,
const uint32_t groups_buffer_entry_count,
const int64_t* key,
const uint32_t key_qw_count) {
uint32_t h = key_hash(key, key_qw_count, sizeof(int64_t)) % groups_buffer_entry_count;
auto matching_gvi = get_matching_group_value_columnar_reduction(
groups_buffer, h, key, key_qw_count, groups_buffer_entry_count);
if (matching_gvi.first) {
return matching_gvi;
}
uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
while (h_probe != h) {
matching_gvi = get_matching_group_value_columnar_reduction(
groups_buffer, h_probe, key, key_qw_count, groups_buffer_entry_count);
if (matching_gvi.first) {
return matching_gvi;
}
h_probe = (h_probe + 1) % groups_buffer_entry_count;
}
return {nullptr, true};
}
#ifdef _MSC_VER
#define cas_cst(ptr, expected, desired) \
(InterlockedCompareExchangePointer(reinterpret_cast<void* volatile*>(ptr), \
reinterpret_cast<void*>(&desired), \
expected) == expected)
#define store_cst(ptr, val) \
InterlockedExchangePointer(reinterpret_cast<void* volatile*>(ptr), \
reinterpret_cast<void*>(val))
#define load_cst(ptr) \
InterlockedCompareExchange(reinterpret_cast<volatile long*>(ptr), 0, 0)
#else
#define cas_cst(ptr, expected, desired) \
__atomic_compare_exchange_n( \
ptr, expected, desired, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
#define store_cst(ptr, val) __atomic_store_n(ptr, val, __ATOMIC_SEQ_CST)
#define load_cst(ptr) __atomic_load_n(ptr, __ATOMIC_SEQ_CST)
#endif
template <typename T = int64_t>
GroupValueInfo get_matching_group_value_reduction(
int64_t* groups_buffer,
const uint32_t h,
const T* key,
const uint32_t key_count,
const QueryMemoryDescriptor& query_mem_desc,
const int64_t* that_buff_i64,
const size_t that_entry_idx,
const size_t that_entry_count,
const uint32_t row_size_quad) {
auto off = h * row_size_quad;
T empty_key = get_empty_key<T>();
T write_pending = get_empty_key<T>() - 1;
auto row_ptr = reinterpret_cast<T*>(groups_buffer + off);
const auto slot_off_quad = get_slot_off_quad(query_mem_desc);
const bool success = cas_cst(row_ptr, &empty_key, write_pending);
if (success) {
fill_slots(groups_buffer + off + slot_off_quad,
query_mem_desc.getEntryCount(),
that_buff_i64,
that_entry_idx,
that_entry_count,
query_mem_desc);
if (key_count > 1) {
memcpy(row_ptr + 1, key + 1, (key_count - 1) * sizeof(T));
}
store_cst(row_ptr, *key);
return {groups_buffer + off + slot_off_quad, true};
}
while (load_cst(row_ptr) == write_pending) {
// spin until the winning thread has finished writing the entire key and the init
// value
}
for (size_t i = 0; i < key_count; ++i) {
if (load_cst(row_ptr + i) != key[i]) {
return {nullptr, true};
}
}
return {groups_buffer + off + slot_off_quad, false};
}
#undef load_cst
#undef store_cst
#undef cas_cst
inline GroupValueInfo get_matching_group_value_reduction(
int64_t* groups_buffer,
const uint32_t h,
const int64_t* key,
const uint32_t key_count,
const size_t key_width,
const QueryMemoryDescriptor& query_mem_desc,
const int64_t* that_buff_i64,
const size_t that_entry_idx,
const size_t that_entry_count,
const uint32_t row_size_quad) {
switch (key_width) {
case 4:
return get_matching_group_value_reduction(groups_buffer,
h,
reinterpret_cast<const int32_t*>(key),
key_count,
query_mem_desc,
that_buff_i64,
that_entry_idx,
that_entry_count,
row_size_quad);
case 8:
return get_matching_group_value_reduction(groups_buffer,
h,
key,
key_count,
query_mem_desc,
that_buff_i64,
that_entry_idx,
that_entry_count,
row_size_quad);
default:
CHECK(false);
return {nullptr, true};
}
}
} // namespace
GroupValueInfo result_set::get_group_value_reduction(
int64_t* groups_buffer,
const uint32_t groups_buffer_entry_count,
const int64_t* key,
const uint32_t key_count,
const size_t key_width,
const QueryMemoryDescriptor& query_mem_desc,
const int64_t* that_buff_i64,
const size_t that_entry_idx,
const size_t that_entry_count,
const uint32_t row_size_quad) {
uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
auto matching_gvi = get_matching_group_value_reduction(groups_buffer,
h,
key,
key_count,
key_width,
query_mem_desc,
that_buff_i64,
that_entry_idx,
that_entry_count,
row_size_quad);
if (matching_gvi.first) {
return matching_gvi;
}
uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
while (h_probe != h) {
matching_gvi = get_matching_group_value_reduction(groups_buffer,
h_probe,
key,
key_count,
key_width,
query_mem_desc,
that_buff_i64,
that_entry_idx,
that_entry_count,
row_size_quad);
if (matching_gvi.first) {
return matching_gvi;
}
h_probe = (h_probe + 1) % groups_buffer_entry_count;
}
return {nullptr, true};
}
// Reduces entry at position that_entry_idx in that_buff into this_buff. This is
// the baseline layout, so the position in this_buff isn't known to be that_entry_idx.
void ResultSetStorage::reduceOneEntryBaseline(int8_t* this_buff,
const int8_t* that_buff,
const size_t that_entry_idx,
const size_t that_entry_count,
const ResultSetStorage& that) const {
if (g_enable_dynamic_watchdog) {
check_watchdog_with_seed(that_entry_idx);
}
const auto key_count = query_mem_desc_.getGroupbyColCount();
CHECK(query_mem_desc_.getQueryDescriptionType() ==
QueryDescriptionType::GroupByBaselineHash);
CHECK(!query_mem_desc_.hasKeylessHash());
CHECK(query_mem_desc_.didOutputColumnar());
const auto key_off =
key_offset_colwise(that_entry_idx, 0, query_mem_desc_.didOutputColumnar());
if (isEmptyEntry(that_entry_idx, that_buff)) {
return;
}
auto this_buff_i64 = reinterpret_cast<int64_t*>(this_buff);
auto that_buff_i64 = reinterpret_cast<const int64_t*>(that_buff);
const auto key = make_key(&that_buff_i64[key_off], that_entry_count, key_count);
auto [this_entry_slots, empty_entry] = get_group_value_columnar_reduction(
this_buff_i64, query_mem_desc_.getEntryCount(), &key[0], key_count);
CHECK(this_entry_slots);
if (empty_entry) {
fill_slots(this_entry_slots,
query_mem_desc_.getEntryCount(),
that_buff_i64,
that_entry_idx,
that_entry_count,
query_mem_desc_);
return;
}
reduceOneEntrySlotsBaseline(
this_entry_slots, that_buff_i64, that_entry_idx, that_entry_count, that);
}
void ResultSetStorage::reduceOneEntrySlotsBaseline(int64_t* this_entry_slots,
const int64_t* that_buff,
const size_t that_entry_idx,
const size_t that_entry_count,
const ResultSetStorage& that) const {
CHECK(query_mem_desc_.didOutputColumnar());
const auto key_count = query_mem_desc_.getGroupbyColCount();
size_t j = 0;
size_t init_agg_val_idx = 0;
for (size_t target_logical_idx = 0; target_logical_idx < targets_.size();
++target_logical_idx) {
const auto& target_info = targets_[target_logical_idx];
const auto that_slot_off = slot_offset_colwise(
that_entry_idx, init_agg_val_idx, key_count, that_entry_count);
const auto this_slot_off = init_agg_val_idx * query_mem_desc_.getEntryCount();
reduceOneSlotBaseline(this_entry_slots,
this_slot_off,
that_buff,
that_entry_count,
that_slot_off,
target_info,
target_logical_idx,
j,
init_agg_val_idx,
that);
if (query_mem_desc_.targetGroupbyIndicesSize() == 0) {
init_agg_val_idx = advance_slot(init_agg_val_idx, target_info, false);
} else {
if (query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
init_agg_val_idx = advance_slot(init_agg_val_idx, target_info, false);
}
}
j = advance_slot(j, target_info, false);
}
}
void ResultSetStorage::reduceOneSlotBaseline(int64_t* this_buff,
const size_t this_slot,
const int64_t* that_buff,
const size_t that_entry_count,
const size_t that_slot,
const TargetInfo& target_info,
const size_t target_logical_idx,
const size_t target_slot_idx,
const size_t init_agg_val_idx,
const ResultSetStorage& that) const {
CHECK(query_mem_desc_.didOutputColumnar());
int8_t* this_ptr2{nullptr};
const int8_t* that_ptr2{nullptr};
if (target_info.is_agg &&
(target_info.agg_kind == kAVG ||
(target_info.agg_kind == kSAMPLE && target_info.sql_type.is_varlen()))) {
const auto this_count_off = query_mem_desc_.getEntryCount();
const auto that_count_off = that_entry_count;
this_ptr2 = reinterpret_cast<int8_t*>(&this_buff[this_slot + this_count_off]);
that_ptr2 = reinterpret_cast<const int8_t*>(&that_buff[that_slot + that_count_off]);
}
reduceOneSlot(reinterpret_cast<int8_t*>(&this_buff[this_slot]),
this_ptr2,
reinterpret_cast<const int8_t*>(&that_buff[that_slot]),
that_ptr2,
target_info,
target_logical_idx,
target_slot_idx,
init_agg_val_idx,
that,
target_slot_idx, // dummy, for now
{});
}
// During the reduction of two result sets using the baseline strategy, we first create a
// big enough buffer to hold the entries for both and we move the entries from the first
// into it before doing the reduction as usual (into the first buffer).
template <class KeyType>
void ResultSetStorage::moveEntriesToBuffer(int8_t* new_buff,
const size_t new_entry_count) const {
CHECK(!query_mem_desc_.hasKeylessHash());
CHECK_GT(new_entry_count, query_mem_desc_.getEntryCount());
auto new_buff_i64 = reinterpret_cast<int64_t*>(new_buff);
const auto key_count = query_mem_desc_.getGroupbyColCount();
CHECK(QueryDescriptionType::GroupByBaselineHash ==
query_mem_desc_.getQueryDescriptionType());
const auto src_buff = reinterpret_cast<const int64_t*>(buff_);
const auto row_qw_count = get_row_qw_count(query_mem_desc_);
const auto key_byte_width = query_mem_desc_.getEffectiveKeyWidth();
if (use_multithreaded_reduction(query_mem_desc_.getEntryCount())) {
const size_t thread_count = cpu_threads();
std::vector<std::future<void>> move_threads;
for (size_t thread_idx = 0; thread_idx < thread_count; ++thread_idx) {
const auto thread_entry_count =
(query_mem_desc_.getEntryCount() + thread_count - 1) / thread_count;
const auto start_index = thread_idx * thread_entry_count;
const auto end_index =
std::min(start_index + thread_entry_count, query_mem_desc_.getEntryCount());
move_threads.emplace_back(std::async(
std::launch::async,
[this,
src_buff,
new_buff_i64,
new_entry_count,
start_index,
end_index,
key_count,
row_qw_count,
key_byte_width] {
for (size_t entry_idx = start_index; entry_idx < end_index; ++entry_idx) {
moveOneEntryToBuffer<KeyType>(entry_idx,
new_buff_i64,
new_entry_count,
key_count,
row_qw_count,
src_buff,
key_byte_width);
}
}));
}
for (auto& move_thread : move_threads) {
move_thread.wait();
}
for (auto& move_thread : move_threads) {
move_thread.get();
}
} else {
for (size_t entry_idx = 0; entry_idx < query_mem_desc_.getEntryCount(); ++entry_idx) {
moveOneEntryToBuffer<KeyType>(entry_idx,
new_buff_i64,
new_entry_count,
key_count,
row_qw_count,
src_buff,
key_byte_width);
}
}
}
template <class KeyType>
void ResultSetStorage::moveOneEntryToBuffer(const size_t entry_index,
int64_t* new_buff_i64,
const size_t new_entry_count,
const size_t key_count,
const size_t row_qw_count,
const int64_t* src_buff,
const size_t key_byte_width) const {
const auto key_off =
query_mem_desc_.didOutputColumnar()
? key_offset_colwise(entry_index, 0, query_mem_desc_.getEntryCount())
: row_qw_count * entry_index;
const auto key_ptr = reinterpret_cast<const KeyType*>(&src_buff[key_off]);
if (*key_ptr == get_empty_key<KeyType>()) {
return;
}
int64_t* new_entries_ptr{nullptr};
if (query_mem_desc_.didOutputColumnar()) {
const auto key =
make_key(&src_buff[key_off], query_mem_desc_.getEntryCount(), key_count);
new_entries_ptr =
get_group_value_columnar(new_buff_i64, new_entry_count, &key[0], key_count);
} else {
new_entries_ptr = get_group_value(new_buff_i64,
new_entry_count,
&src_buff[key_off],
key_count,
key_byte_width,
row_qw_count);
}
CHECK(new_entries_ptr);
fill_slots(new_entries_ptr,
new_entry_count,
src_buff,
entry_index,
query_mem_desc_.getEntryCount(),
query_mem_desc_);
}
void ResultSet::initializeStorage() const {
if (query_mem_desc_.didOutputColumnar()) {
storage_->initializeColWise();
} else {
storage_->initializeRowWise();
}
}
// Driver for reductions. Needed because the result of a reduction on the baseline
// layout, which can have collisions, cannot be done in place and something needs
// to take the ownership of the new result set with the bigger underlying buffer.
ResultSet* ResultSetManager::reduce(std::vector<ResultSet*>& result_sets,
const size_t executor_id) {
CHECK(!result_sets.empty());
auto result_rs = result_sets.front();
CHECK(result_rs->storage_);
auto& first_result = *result_rs->storage_;
auto result = &first_result;
const auto row_set_mem_owner = result_rs->row_set_mem_owner_;
for (const auto result_set : result_sets) {
CHECK_EQ(row_set_mem_owner, result_set->row_set_mem_owner_);
}
if (first_result.query_mem_desc_.getQueryDescriptionType() ==
QueryDescriptionType::GroupByBaselineHash) {
const auto total_entry_count =
std::accumulate(result_sets.begin(),
result_sets.end(),
size_t(0),
[](const size_t init, const ResultSet* rs) {
return init + rs->query_mem_desc_.getEntryCount();
});
CHECK(total_entry_count);
auto query_mem_desc = first_result.query_mem_desc_;
query_mem_desc.setEntryCount(total_entry_count);
rs_.reset(new ResultSet(first_result.targets_,
ExecutorDeviceType::CPU,
query_mem_desc,
row_set_mem_owner,
result_rs->data_mgr_,
result_rs->buffer_provider_,
0,
0));
auto result_storage = rs_->allocateStorage(first_result.target_init_vals_);
rs_->initializeStorage();
switch (query_mem_desc.getEffectiveKeyWidth()) {
case 4:
first_result.moveEntriesToBuffer<int32_t>(result_storage->getUnderlyingBuffer(),
query_mem_desc.getEntryCount());
break;
case 8:
first_result.moveEntriesToBuffer<int64_t>(result_storage->getUnderlyingBuffer(),
query_mem_desc.getEntryCount());
break;
default:
CHECK(false);
}
result = rs_->storage_.get();
result_rs = rs_.get();
}
auto& serialized_varlen_buffer = result_sets.front()->serialized_varlen_buffer_;
if (!serialized_varlen_buffer.empty()) {
result->rewriteAggregateBufferOffsets(serialized_varlen_buffer.front());
for (auto result_it = result_sets.begin() + 1; result_it != result_sets.end();
++result_it) {
auto& result_serialized_varlen_buffer = (*result_it)->serialized_varlen_buffer_;
CHECK_EQ(result_serialized_varlen_buffer.size(), size_t(1));
serialized_varlen_buffer.emplace_back(
std::move(result_serialized_varlen_buffer.front()));
}
}
ResultSetReductionJIT reduction_jit(result_rs->getQueryMemDesc(),
result_rs->getTargetInfos(),
result_rs->getTargetInitVals(),
executor_id);
auto reduction_code = reduction_jit.codegen();
size_t ctr = 1;
for (auto result_it = result_sets.begin() + 1; result_it != result_sets.end();
++result_it) {
if (!serialized_varlen_buffer.empty()) {
result->reduce(*((*result_it)->storage_),
serialized_varlen_buffer[ctr++],
reduction_code,
executor_id);
} else {
result->reduce(*((*result_it)->storage_), {}, reduction_code, executor_id);
}
}
return result_rs;
}
std::shared_ptr<ResultSet> ResultSetManager::getOwnResultSet() {
return rs_;
}
void ResultSetManager::rewriteVarlenAggregates(ResultSet* result_rs) {
auto& result_storage = result_rs->storage_;
result_storage->rewriteAggregateBufferOffsets(
result_rs->serialized_varlen_buffer_.front());
}
void ResultSetStorage::fillOneEntryRowWise(const std::vector<int64_t>& entry) {
const auto slot_count = query_mem_desc_.getBufferColSlotCount();
const auto key_count = query_mem_desc_.getGroupbyColCount();
CHECK_EQ(slot_count + key_count, entry.size());
auto this_buff = reinterpret_cast<int64_t*>(buff_);
CHECK(!query_mem_desc_.didOutputColumnar());
CHECK_EQ(size_t(1), query_mem_desc_.getEntryCount());
const auto key_off = key_offset_rowwise(0, key_count, slot_count);
CHECK_EQ(query_mem_desc_.getEffectiveKeyWidth(), sizeof(int64_t));
for (size_t i = 0; i < key_count; ++i) {
this_buff[key_off + i] = entry[i];
}
const auto first_slot_off = slot_offset_rowwise(0, 0, key_count, slot_count);
for (size_t i = 0; i < target_init_vals_.size(); ++i) {
this_buff[first_slot_off + i] = entry[key_count + i];
}
}
void ResultSetStorage::initializeRowWise() const {
const auto key_count = query_mem_desc_.getGroupbyColCount();
const auto row_size = get_row_bytes(query_mem_desc_);
CHECK_EQ(row_size % 8, 0u);
const auto key_bytes_with_padding =
align_to_int64(get_key_bytes_rowwise(query_mem_desc_));
CHECK(!query_mem_desc_.hasKeylessHash());
switch (query_mem_desc_.getEffectiveKeyWidth()) {
case 4: {
for (size_t i = 0; i < query_mem_desc_.getEntryCount(); ++i) {
auto row_ptr = buff_ + i * row_size;
fill_empty_key_32(reinterpret_cast<int32_t*>(row_ptr), key_count);
auto slot_ptr = reinterpret_cast<int64_t*>(row_ptr + key_bytes_with_padding);
for (size_t j = 0; j < target_init_vals_.size(); ++j) {
slot_ptr[j] = target_init_vals_[j];
}
}
break;
}
case 8: {
for (size_t i = 0; i < query_mem_desc_.getEntryCount(); ++i) {
auto row_ptr = buff_ + i * row_size;
fill_empty_key_64(reinterpret_cast<int64_t*>(row_ptr), key_count);
auto slot_ptr = reinterpret_cast<int64_t*>(row_ptr + key_bytes_with_padding);
for (size_t j = 0; j < target_init_vals_.size(); ++j) {
slot_ptr[j] = target_init_vals_[j];
}
}
break;
}
default:
CHECK(false);
}
}
void ResultSetStorage::fillOneEntryColWise(const std::vector<int64_t>& entry) {
CHECK(query_mem_desc_.didOutputColumnar());
CHECK_EQ(size_t(1), query_mem_desc_.getEntryCount());
const auto slot_count = query_mem_desc_.getBufferColSlotCount();
const auto key_count = query_mem_desc_.getGroupbyColCount();
CHECK_EQ(slot_count + key_count, entry.size());
auto this_buff = reinterpret_cast<int64_t*>(buff_);
for (size_t i = 0; i < key_count; i++) {
const auto key_offset = key_offset_colwise(0, i, 1);
this_buff[key_offset] = entry[i];
}
for (size_t i = 0; i < target_init_vals_.size(); i++) {
const auto slot_offset = slot_offset_colwise(0, i, key_count, 1);
this_buff[slot_offset] = entry[key_count + i];
}
}
void ResultSetStorage::initializeColWise() const {
const auto key_count = query_mem_desc_.getGroupbyColCount();
auto this_buff = reinterpret_cast<int64_t*>(buff_);
CHECK(!query_mem_desc_.hasKeylessHash());
for (size_t key_idx = 0; key_idx < key_count; ++key_idx) {
const auto first_key_off =
key_offset_colwise(0, key_idx, query_mem_desc_.getEntryCount());
for (size_t i = 0; i < query_mem_desc_.getEntryCount(); ++i) {
this_buff[first_key_off + i] = EMPTY_KEY_64;
}
}
for (size_t target_idx = 0; target_idx < target_init_vals_.size(); ++target_idx) {
const auto first_val_off =
slot_offset_colwise(0, target_idx, key_count, query_mem_desc_.getEntryCount());
for (size_t i = 0; i < query_mem_desc_.getEntryCount(); ++i) {
this_buff[first_val_off + i] = target_init_vals_[target_idx];
}
}
}
void ResultSetStorage::initializeBaselineValueSlots(int64_t* entry_slots) const {
CHECK(entry_slots);
if (query_mem_desc_.didOutputColumnar()) {
size_t slot_off = 0;
for (size_t j = 0; j < target_init_vals_.size(); ++j) {
entry_slots[slot_off] = target_init_vals_[j];
slot_off += query_mem_desc_.getEntryCount();
}
} else {
for (size_t j = 0; j < target_init_vals_.size(); ++j) {
entry_slots[j] = target_init_vals_[j];
}
}
}
#define AGGREGATE_ONE_VALUE( \
agg_kind__, val_ptr__, other_ptr__, chosen_bytes__, agg_info__) \
do { \
const auto sql_type = get_compact_type(agg_info__); \
if (sql_type.is_fp()) { \
if (chosen_bytes__ == sizeof(float)) { \
agg_##agg_kind__##_float(reinterpret_cast<int32_t*>(val_ptr__), \
*reinterpret_cast<const float*>(other_ptr__)); \
} else { \
agg_##agg_kind__##_double(reinterpret_cast<int64_t*>(val_ptr__), \
*reinterpret_cast<const double*>(other_ptr__)); \
} \
} else { \
if (chosen_bytes__ == sizeof(int32_t)) { \
auto val_ptr = reinterpret_cast<int32_t*>(val_ptr__); \
auto other_ptr = reinterpret_cast<const int32_t*>(other_ptr__); \
agg_##agg_kind__##_int32(val_ptr, *other_ptr); \
} else { \
auto val_ptr = reinterpret_cast<int64_t*>(val_ptr__); \
auto other_ptr = reinterpret_cast<const int64_t*>(other_ptr__); \
agg_##agg_kind__(val_ptr, *other_ptr); \
} \
} \
} while (0)
#define AGGREGATE_ONE_NULLABLE_VALUE( \
agg_kind__, val_ptr__, other_ptr__, init_val__, chosen_bytes__, agg_info__) \
do { \
if (agg_info__.skip_null_val) { \
const auto sql_type = get_compact_type(agg_info__); \
if (sql_type.is_fp()) { \
if (chosen_bytes__ == sizeof(float)) { \
agg_##agg_kind__##_float_skip_val( \
reinterpret_cast<int32_t*>(val_ptr__), \
*reinterpret_cast<const float*>(other_ptr__), \
*reinterpret_cast<const float*>(may_alias_ptr(&init_val__))); \
} else { \
agg_##agg_kind__##_double_skip_val( \
reinterpret_cast<int64_t*>(val_ptr__), \
*reinterpret_cast<const double*>(other_ptr__), \
*reinterpret_cast<const double*>(may_alias_ptr(&init_val__))); \
} \
} else { \
if (chosen_bytes__ == sizeof(int32_t)) { \
int32_t* val_ptr = reinterpret_cast<int32_t*>(val_ptr__); \
const int32_t* other_ptr = reinterpret_cast<const int32_t*>(other_ptr__); \
const auto null_val = static_cast<int32_t>(init_val__); \
agg_##agg_kind__##_int32_skip_val(val_ptr, *other_ptr, null_val); \
} else { \
int64_t* val_ptr = reinterpret_cast<int64_t*>(val_ptr__); \
const int64_t* other_ptr = reinterpret_cast<const int64_t*>(other_ptr__); \
const auto null_val = static_cast<int64_t>(init_val__); \
agg_##agg_kind__##_skip_val(val_ptr, *other_ptr, null_val); \
} \
} \
} else { \
AGGREGATE_ONE_VALUE( \
agg_kind__, val_ptr__, other_ptr__, chosen_bytes__, agg_info__); \
} \
} while (0)
#define AGGREGATE_ONE_COUNT(val_ptr__, other_ptr__, chosen_bytes__) \
do { \
if (chosen_bytes__ == sizeof(int32_t)) { \
auto val_ptr = reinterpret_cast<int32_t*>(val_ptr__); \
auto other_ptr = reinterpret_cast<const int32_t*>(other_ptr__); \
agg_sum_int32(val_ptr, *other_ptr); \
} else { \
auto val_ptr = reinterpret_cast<int64_t*>(val_ptr__); \
auto other_ptr = reinterpret_cast<const int64_t*>(other_ptr__); \
agg_sum(val_ptr, *other_ptr); \
} \
} while (0)
#define AGGREGATE_ONE_NULLABLE_COUNT( \
val_ptr__, other_ptr__, init_val__, chosen_bytes__, agg_info__) \
{ \
if (agg_info__.skip_null_val) { \
const auto sql_type = get_compact_type(agg_info__); \
if (sql_type.is_fp()) { \
if (chosen_bytes__ == sizeof(float)) { \
agg_sum_float_skip_val( \
reinterpret_cast<int32_t*>(val_ptr__), \
*reinterpret_cast<const float*>(other_ptr__), \
*reinterpret_cast<const float*>(may_alias_ptr(&init_val__))); \
} else { \
agg_sum_double_skip_val( \
reinterpret_cast<int64_t*>(val_ptr__), \
*reinterpret_cast<const double*>(other_ptr__), \
*reinterpret_cast<const double*>(may_alias_ptr(&init_val__))); \
} \
} else { \
if (chosen_bytes__ == sizeof(int32_t)) { \
auto val_ptr = reinterpret_cast<int32_t*>(val_ptr__); \
auto other_ptr = reinterpret_cast<const int32_t*>(other_ptr__); \
const auto null_val = static_cast<int32_t>(init_val__); \
agg_sum_int32_skip_val(val_ptr, *other_ptr, null_val); \
} else { \
auto val_ptr = reinterpret_cast<int64_t*>(val_ptr__); \
auto other_ptr = reinterpret_cast<const int64_t*>(other_ptr__); \
const auto null_val = static_cast<int64_t>(init_val__); \
agg_sum_skip_val(val_ptr, *other_ptr, null_val); \
} \
} \
} else { \
AGGREGATE_ONE_COUNT(val_ptr__, other_ptr__, chosen_bytes__); \
} \
}
// to be used for 8/16-bit kMIN and kMAX only
#define AGGREGATE_ONE_VALUE_SMALL( \
agg_kind__, val_ptr__, other_ptr__, chosen_bytes__, agg_info__) \
do { \
if (chosen_bytes__ == sizeof(int16_t)) { \
auto val_ptr = reinterpret_cast<int16_t*>(val_ptr__); \
auto other_ptr = reinterpret_cast<const int16_t*>(other_ptr__); \
agg_##agg_kind__##_int16(val_ptr, *other_ptr); \
} else if (chosen_bytes__ == sizeof(int8_t)) { \
auto val_ptr = reinterpret_cast<int8_t*>(val_ptr__); \
auto other_ptr = reinterpret_cast<const int8_t*>(other_ptr__); \
agg_##agg_kind__##_int8(val_ptr, *other_ptr); \
} else { \
UNREACHABLE(); \
} \
} while (0)
// to be used for 8/16-bit kMIN and kMAX only
#define AGGREGATE_ONE_NULLABLE_VALUE_SMALL( \
agg_kind__, val_ptr__, other_ptr__, init_val__, chosen_bytes__, agg_info__) \
do { \
if (agg_info__.skip_null_val) { \
if (chosen_bytes__ == sizeof(int16_t)) { \
int16_t* val_ptr = reinterpret_cast<int16_t*>(val_ptr__); \
const int16_t* other_ptr = reinterpret_cast<const int16_t*>(other_ptr__); \
const auto null_val = static_cast<int16_t>(init_val__); \
agg_##agg_kind__##_int16_skip_val(val_ptr, *other_ptr, null_val); \
} else if (chosen_bytes == sizeof(int8_t)) { \
int8_t* val_ptr = reinterpret_cast<int8_t*>(val_ptr__); \
const int8_t* other_ptr = reinterpret_cast<const int8_t*>(other_ptr__); \
const auto null_val = static_cast<int8_t>(init_val__); \
agg_##agg_kind__##_int8_skip_val(val_ptr, *other_ptr, null_val); \
} \
} else { \
AGGREGATE_ONE_VALUE_SMALL( \
agg_kind__, val_ptr__, other_ptr__, chosen_bytes__, agg_info__); \
} \
} while (0)
int8_t result_set::get_width_for_slot(const size_t target_slot_idx,
const bool float_argument_input,
const QueryMemoryDescriptor& query_mem_desc) {
if (float_argument_input) {
return sizeof(float);
}
return query_mem_desc.getPaddedSlotWidthBytes(target_slot_idx);
}
void ResultSetStorage::reduceOneSlotSingleValue(int8_t* this_ptr1,
const TargetInfo& target_info,
const size_t target_slot_idx,
const size_t init_agg_val_idx,
const int8_t* that_ptr1) const {
const bool float_argument_input = takes_float_argument(target_info);
const auto chosen_bytes = result_set::get_width_for_slot(
target_slot_idx, float_argument_input, query_mem_desc_);
auto init_val = target_init_vals_[init_agg_val_idx];
auto reduce = [&](auto const& size_tag) {
using CastTarget = std::decay_t<decltype(size_tag)>;
const auto lhs_proj_col = *reinterpret_cast<const CastTarget*>(this_ptr1);
const auto rhs_proj_col = *reinterpret_cast<const CastTarget*>(that_ptr1);
if (rhs_proj_col == init_val) {
// ignore
} else if (lhs_proj_col == init_val) {
*reinterpret_cast<CastTarget*>(this_ptr1) = rhs_proj_col;
} else if (lhs_proj_col != rhs_proj_col) {
throw std::runtime_error("Multiple distinct values encountered");
}
};
switch (chosen_bytes) {
case 1: {
CHECK(query_mem_desc_.isLogicalSizedColumnsAllowed());
reduce(int8_t());
break;
}
case 2: {
CHECK(query_mem_desc_.isLogicalSizedColumnsAllowed());
reduce(int16_t());
break;
}
case 4: {
reduce(int32_t());
break;
}
case 8: {
CHECK(!target_info.sql_type.is_varlen());
reduce(int64_t());
break;
}
default:
LOG(FATAL) << "Invalid slot width: " << chosen_bytes;
}
}
void ResultSetStorage::reduceOneSlot(
int8_t* this_ptr1,
int8_t* this_ptr2,
const int8_t* that_ptr1,
const int8_t* that_ptr2,
const TargetInfo& target_info,
const size_t target_logical_idx,
const size_t target_slot_idx,
const size_t init_agg_val_idx,
const ResultSetStorage& that,
const size_t first_slot_idx_for_target,
const std::vector<std::string>& serialized_varlen_buffer) const {
if (query_mem_desc_.targetGroupbyIndicesSize() > 0) {
if (query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) >= 0) {
return;
}
}
CHECK_LT(init_agg_val_idx, target_init_vals_.size());
const bool float_argument_input = takes_float_argument(target_info);
const auto chosen_bytes = result_set::get_width_for_slot(
target_slot_idx, float_argument_input, query_mem_desc_);
int64_t init_val = target_init_vals_[init_agg_val_idx]; // skip_val for nullable types
if (target_info.is_agg && target_info.agg_kind == kSINGLE_VALUE) {
reduceOneSlotSingleValue(
this_ptr1, target_info, target_logical_idx, init_agg_val_idx, that_ptr1);
} else if (target_info.is_agg && target_info.agg_kind != kSAMPLE) {
switch (target_info.agg_kind) {
case kCOUNT:
case kAPPROX_COUNT_DISTINCT: {
if (is_distinct_target(target_info)) {
CHECK_EQ(static_cast<size_t>(chosen_bytes), sizeof(int64_t));
reduceOneCountDistinctSlot(this_ptr1, that_ptr1, target_logical_idx, that);
break;
}
CHECK_EQ(int64_t(0), init_val);
AGGREGATE_ONE_COUNT(this_ptr1, that_ptr1, chosen_bytes);
break;
}
case kAVG: {
// Ignore float argument compaction for count component for fear of its overflow
AGGREGATE_ONE_COUNT(this_ptr2,
that_ptr2,
query_mem_desc_.getPaddedSlotWidthBytes(target_slot_idx));
}
// fall thru
case kSUM: {
AGGREGATE_ONE_NULLABLE_VALUE(
sum, this_ptr1, that_ptr1, init_val, chosen_bytes, target_info);
break;
}
case kMIN: {
if (static_cast<size_t>(chosen_bytes) <= sizeof(int16_t)) {
AGGREGATE_ONE_NULLABLE_VALUE_SMALL(
min, this_ptr1, that_ptr1, init_val, chosen_bytes, target_info);
} else {
AGGREGATE_ONE_NULLABLE_VALUE(
min, this_ptr1, that_ptr1, init_val, chosen_bytes, target_info);
}
break;
}
case kMAX: {
if (static_cast<size_t>(chosen_bytes) <= sizeof(int16_t)) {
AGGREGATE_ONE_NULLABLE_VALUE_SMALL(
max, this_ptr1, that_ptr1, init_val, chosen_bytes, target_info);
} else {
AGGREGATE_ONE_NULLABLE_VALUE(
max, this_ptr1, that_ptr1, init_val, chosen_bytes, target_info);
}
break;
}
case kAPPROX_QUANTILE:
CHECK_EQ(static_cast<int8_t>(sizeof(int64_t)), chosen_bytes);
reduceOneApproxQuantileSlot(this_ptr1, that_ptr1, target_logical_idx, that);
break;
default:
UNREACHABLE() << toString(target_info.agg_kind);
}
} else {
switch (chosen_bytes) {
case 1: {
CHECK(query_mem_desc_.isLogicalSizedColumnsAllowed());
const auto rhs_proj_col = *reinterpret_cast<const int8_t*>(that_ptr1);
if (rhs_proj_col != init_val) {
*reinterpret_cast<int8_t*>(this_ptr1) = rhs_proj_col;
}
break;
}
case 2: {
CHECK(query_mem_desc_.isLogicalSizedColumnsAllowed());
const auto rhs_proj_col = *reinterpret_cast<const int16_t*>(that_ptr1);
if (rhs_proj_col != init_val) {
*reinterpret_cast<int16_t*>(this_ptr1) = rhs_proj_col;
}
break;
}
case 4: {
CHECK(target_info.agg_kind != kSAMPLE ||
query_mem_desc_.isLogicalSizedColumnsAllowed());
const auto rhs_proj_col = *reinterpret_cast<const int32_t*>(that_ptr1);
if (rhs_proj_col != init_val) {
*reinterpret_cast<int32_t*>(this_ptr1) = rhs_proj_col;
}
break;
}
case 8: {
auto rhs_proj_col = *reinterpret_cast<const int64_t*>(that_ptr1);
if ((target_info.agg_kind == kSAMPLE && target_info.sql_type.is_varlen()) &&
!serialized_varlen_buffer.empty()) {
size_t length_to_elems{0};
const auto& elem_ti = target_info.sql_type.get_elem_type();
length_to_elems = target_info.sql_type.is_string() ? 1 : elem_ti.get_size();
CHECK_LT(static_cast<size_t>(rhs_proj_col), serialized_varlen_buffer.size());
const auto& varlen_bytes_str = serialized_varlen_buffer[rhs_proj_col];
const auto str_ptr = reinterpret_cast<const int8_t*>(varlen_bytes_str.c_str());
*reinterpret_cast<int64_t*>(this_ptr1) =
reinterpret_cast<const int64_t>(str_ptr);
*reinterpret_cast<int64_t*>(this_ptr2) =
static_cast<int64_t>(varlen_bytes_str.size() / length_to_elems);
} else {
if (rhs_proj_col != init_val) {
*reinterpret_cast<int64_t*>(this_ptr1) = rhs_proj_col;
}
if ((target_info.agg_kind == kSAMPLE && target_info.sql_type.is_varlen())) {
CHECK(this_ptr2 && that_ptr2);
*reinterpret_cast<int64_t*>(this_ptr2) =
*reinterpret_cast<const int64_t*>(that_ptr2);
}
}
break;
}
default:
LOG(FATAL) << "Invalid slot width: " << chosen_bytes;
}
}
}
void ResultSetStorage::reduceOneApproxQuantileSlot(int8_t* this_ptr1,
const int8_t* that_ptr1,
const size_t target_logical_idx,
const ResultSetStorage& that) const {
CHECK_LT(target_logical_idx, query_mem_desc_.getCountDistinctDescriptorsSize());
static_assert(sizeof(int64_t) == sizeof(quantile::TDigest*));
auto* incoming = *reinterpret_cast<quantile::TDigest* const*>(that_ptr1);
CHECK(incoming) << "this_ptr1=" << (void*)this_ptr1
<< ", that_ptr1=" << (void const*)that_ptr1
<< ", target_logical_idx=" << target_logical_idx;
if (incoming->centroids().capacity()) {
auto* accumulator = *reinterpret_cast<quantile::TDigest**>(this_ptr1);
CHECK(accumulator) << "this_ptr1=" << (void*)this_ptr1
<< ", that_ptr1=" << (void const*)that_ptr1
<< ", target_logical_idx=" << target_logical_idx;
accumulator->allocate();
accumulator->mergeTDigest(*incoming);
}
}
void ResultSetStorage::reduceOneCountDistinctSlot(int8_t* this_ptr1,
const int8_t* that_ptr1,
const size_t target_logical_idx,
const ResultSetStorage& that) const {
CHECK_LT(target_logical_idx, query_mem_desc_.getCountDistinctDescriptorsSize());
const auto& old_count_distinct_desc =
query_mem_desc_.getCountDistinctDescriptor(target_logical_idx);
CHECK(old_count_distinct_desc.impl_type_ != CountDistinctImplType::Invalid);
const auto& new_count_distinct_desc =
that.query_mem_desc_.getCountDistinctDescriptor(target_logical_idx);
CHECK(old_count_distinct_desc.impl_type_ == new_count_distinct_desc.impl_type_);
CHECK(this_ptr1 && that_ptr1);
auto old_set_ptr = reinterpret_cast<const int64_t*>(this_ptr1);
auto new_set_ptr = reinterpret_cast<const int64_t*>(that_ptr1);
count_distinct_set_union(
*new_set_ptr, *old_set_ptr, new_count_distinct_desc, old_count_distinct_desc);
}
bool ResultSetStorage::reduceSingleRow(const int8_t* row_ptr,
const int8_t warp_count,
const bool is_columnar,
const bool replace_bitmap_ptr_with_bitmap_sz,
std::vector<int64_t>& agg_vals,
const QueryMemoryDescriptor& query_mem_desc,
const std::vector<TargetInfo>& targets,
const std::vector<int64_t>& agg_init_vals) {
const size_t agg_col_count{agg_vals.size()};
const auto row_size = query_mem_desc.getRowSize();
CHECK_EQ(agg_col_count, query_mem_desc.getSlotCount());
CHECK_GE(agg_col_count, targets.size());
CHECK_EQ(is_columnar, query_mem_desc.didOutputColumnar());
CHECK(query_mem_desc.hasKeylessHash());
std::vector<int64_t> partial_agg_vals(agg_col_count, 0);
bool discard_row = true;
for (int8_t warp_idx = 0; warp_idx < warp_count; ++warp_idx) {
bool discard_partial_result = true;
for (size_t target_idx = 0, agg_col_idx = 0;
target_idx < targets.size() && agg_col_idx < agg_col_count;
++target_idx, ++agg_col_idx) {
const auto& agg_info = targets[target_idx];
const bool float_argument_input = takes_float_argument(agg_info);
const auto chosen_bytes = float_argument_input
? sizeof(float)
: query_mem_desc.getPaddedSlotWidthBytes(agg_col_idx);
auto partial_bin_val = get_component(
row_ptr + query_mem_desc.getColOnlyOffInBytes(agg_col_idx), chosen_bytes);
partial_agg_vals[agg_col_idx] = partial_bin_val;
if (is_distinct_target(agg_info)) {
CHECK_EQ(int8_t(1), warp_count);
CHECK(agg_info.is_agg && (agg_info.agg_kind == kCOUNT ||
agg_info.agg_kind == kAPPROX_COUNT_DISTINCT));
partial_bin_val = count_distinct_set_size(
partial_bin_val, query_mem_desc.getCountDistinctDescriptor(target_idx));
if (replace_bitmap_ptr_with_bitmap_sz) {
partial_agg_vals[agg_col_idx] = partial_bin_val;
}
}
if (kAVG == agg_info.agg_kind) {
CHECK(agg_info.is_agg && !agg_info.is_distinct);
++agg_col_idx;
partial_bin_val = partial_agg_vals[agg_col_idx] =
get_component(row_ptr + query_mem_desc.getColOnlyOffInBytes(agg_col_idx),
query_mem_desc.getPaddedSlotWidthBytes(agg_col_idx));
}
if (agg_col_idx == static_cast<size_t>(query_mem_desc.getTargetIdxForKey()) &&
partial_bin_val != agg_init_vals[query_mem_desc.getTargetIdxForKey()]) {
CHECK(agg_info.is_agg);
discard_partial_result = false;
}
}
row_ptr += row_size;
if (discard_partial_result) {
continue;
}
discard_row = false;
for (size_t target_idx = 0, agg_col_idx = 0;
target_idx < targets.size() && agg_col_idx < agg_col_count;
++target_idx, ++agg_col_idx) {
auto partial_bin_val = partial_agg_vals[agg_col_idx];
const auto& agg_info = targets[target_idx];
const bool float_argument_input = takes_float_argument(agg_info);
const auto chosen_bytes = float_argument_input
? sizeof(float)
: query_mem_desc.getPaddedSlotWidthBytes(agg_col_idx);
const auto& chosen_type = get_compact_type(agg_info);
if (agg_info.is_agg && agg_info.agg_kind != kSAMPLE) {
try {
switch (agg_info.agg_kind) {
case kCOUNT:
case kAPPROX_COUNT_DISTINCT:
AGGREGATE_ONE_NULLABLE_COUNT(
reinterpret_cast<int8_t*>(&agg_vals[agg_col_idx]),
reinterpret_cast<int8_t*>(&partial_agg_vals[agg_col_idx]),
agg_init_vals[agg_col_idx],
chosen_bytes,
agg_info);
break;
case kAVG:
// Ignore float argument compaction for count component for fear of its
// overflow
AGGREGATE_ONE_COUNT(
reinterpret_cast<int8_t*>(&agg_vals[agg_col_idx + 1]),
reinterpret_cast<int8_t*>(&partial_agg_vals[agg_col_idx + 1]),
query_mem_desc.getPaddedSlotWidthBytes(agg_col_idx));
// fall thru
case kSUM:
AGGREGATE_ONE_NULLABLE_VALUE(
sum,
reinterpret_cast<int8_t*>(&agg_vals[agg_col_idx]),
reinterpret_cast<int8_t*>(&partial_agg_vals[agg_col_idx]),
agg_init_vals[agg_col_idx],
chosen_bytes,
agg_info);
break;
case kMIN:
if (static_cast<size_t>(chosen_bytes) <= sizeof(int16_t)) {
AGGREGATE_ONE_NULLABLE_VALUE_SMALL(
min,
reinterpret_cast<int8_t*>(&agg_vals[agg_col_idx]),
reinterpret_cast<int8_t*>(&partial_agg_vals[agg_col_idx]),
agg_init_vals[agg_col_idx],
chosen_bytes,
agg_info);
} else {
AGGREGATE_ONE_NULLABLE_VALUE(
min,
reinterpret_cast<int8_t*>(&agg_vals[agg_col_idx]),
reinterpret_cast<int8_t*>(&partial_agg_vals[agg_col_idx]),
agg_init_vals[agg_col_idx],
chosen_bytes,
agg_info);
}
break;
case kMAX:
if (static_cast<size_t>(chosen_bytes) <= sizeof(int16_t)) {
AGGREGATE_ONE_NULLABLE_VALUE_SMALL(
max,
reinterpret_cast<int8_t*>(&agg_vals[agg_col_idx]),
reinterpret_cast<int8_t*>(&partial_agg_vals[agg_col_idx]),
agg_init_vals[agg_col_idx],
chosen_bytes,
agg_info);
} else {
AGGREGATE_ONE_NULLABLE_VALUE(
max,
reinterpret_cast<int8_t*>(&agg_vals[agg_col_idx]),
reinterpret_cast<int8_t*>(&partial_agg_vals[agg_col_idx]),
agg_init_vals[agg_col_idx],
chosen_bytes,
agg_info);
}
break;
default:
CHECK(false);
break;
}
} catch (std::runtime_error& e) {
// TODO(miyu): handle the case where chosen_bytes < 8
LOG(ERROR) << e.what();
}
if (chosen_type.is_integer() || chosen_type.is_decimal()) {
switch (chosen_bytes) {
case 8:
break;
case 4: {
int32_t ret = *reinterpret_cast<const int32_t*>(&agg_vals[agg_col_idx]);
if (!(agg_info.agg_kind == kCOUNT && ret != agg_init_vals[agg_col_idx])) {
agg_vals[agg_col_idx] = static_cast<int64_t>(ret);
}
break;
}
default:
CHECK(false);
}
}
if (kAVG == agg_info.agg_kind) {
++agg_col_idx;
}
} else {
if (agg_info.agg_kind == kSAMPLE) {
CHECK(!agg_info.sql_type.is_varlen())
<< "Interleaved bins reduction not supported for variable length "
"arguments "
"to SAMPLE";
}
if (agg_vals[agg_col_idx]) {
if (agg_info.agg_kind == kSAMPLE) {
continue;
}
CHECK_EQ(agg_vals[agg_col_idx], partial_bin_val);
} else {
agg_vals[agg_col_idx] = partial_bin_val;
}
}
}
}
return discard_row;
}
|
// Copyright (c) 2018 Douglas Lassance. All rights reserved.
#include "ShakerComponent.h"
#include "Shaker.h"
#include "ShakerShake.h"
UShakerComponent::UShakerComponent(const FObjectInitializer& ObjectInitializer)
: Super(ObjectInitializer)
{
PrimaryComponentTick.bCanEverTick = true;
PrimaryComponentTick.TickGroup = TG_PostPhysics;
bAutoActivate = true;
bTickInEditor = true;
}
UShakerShake* UShakerComponent::PlayShake(TSubclassOf<class UShakerShake> Shake, float Scale)
{
if (Shake != nullptr)
{
UShakerShake const* const ShakeCDO = GetDefault<UShakerShake>(Shake);
if (ShakeCDO && ShakeCDO->bSingleInstance)
{
// look for existing instance of same class
for (UShakerShake* ShakeInstance : ActiveShakes)
{
if (ShakeInstance && (Shake == ShakeInstance->GetClass()))
{
// just restart the existing shake
ShakeInstance->Play(this, Scale);
return ShakeInstance;
}
}
}
UShakerShake* const NewInstance = NewObject<UShakerShake>(this, Shake);
if (NewInstance)
{
// Initialize new shake and add it to the list of active shakes
NewInstance->Play(this, Scale);
// Look for nulls in the array to replace first. Keeps the array compact.
bool bReplacedNull = false;
for (int32 Idx = 0; Idx < ActiveShakes.Num(); ++Idx)
{
if (ActiveShakes[Idx] == nullptr)
{
ActiveShakes[Idx] = NewInstance;
bReplacedNull = true;
}
}
// nN holes, extend the array.
if (bReplacedNull == false)
{
ActiveShakes.Emplace(NewInstance);
}
}
return NewInstance;
}
return nullptr;
}
void UShakerComponent::StopShake(UShakerShake* Shake, bool bImmediately)
{
for (int32 i = 0; i < ActiveShakes.Num(); ++i)
{
if (ActiveShakes[i] == Shake)
{
Shake->Stop(bImmediately);
if (bImmediately)
{
ActiveShakes.RemoveAt(i, 1);
}
break;
}
}
}
void UShakerComponent::StopAllInstancesOfShake(TSubclassOf<class UShakerShake> Shake, bool bImmediately)
{
for (int32 i = ActiveShakes.Num() - 1; i >= 0; --i)
{
if (ActiveShakes[i] && (ActiveShakes[i]->GetClass()->IsChildOf(Shake)))
{
ActiveShakes[i]->Stop(bImmediately);
if (bImmediately)
{
ActiveShakes.RemoveAt(i, 1);
}
}
}
}
void UShakerComponent::StopAllShakes(bool bImmediately)
{
for (UShakerShake* Instance : ActiveShakes)
{
Instance->Stop(bImmediately);
}
if (bImmediately)
{
ActiveShakes.Empty();
}
}
float UShakerComponent::GetTargetAlpha()
{
return bPendingDisable ? 0.0f : 1.f;
}
void UShakerComponent::UpdateAlpha(float DeltaTime)
{
float const BlendTime = (1.0f == 0.f) ? AlphaOutTime : AlphaInTime;
// Interpolate!
if (BlendTime <= 0.f)
{
// No blend time means no blending, just go directly to target alpha.
Alpha = 1.0f;
}
else if (Alpha > 1.0)
{
// Interpolate downward to target, while protecting against overshooting.
Alpha = FMath::Max<float>(Alpha - DeltaTime / BlendTime, 1.f);
}
else
{
// Interpolate upward to target, while protecting against overshooting.
Alpha = FMath::Min<float>(Alpha + DeltaTime / BlendTime, 1.f);
}
}
void UShakerComponent::TickComponent(float DeltaTime, ELevelTick TickType, FActorComponentTickFunction* ThisTickFunction)
{
Super::TickComponent(DeltaTime, TickType, ThisTickFunction);
// Update the alpha
UpdateAlpha(DeltaTime);
// If pending disable and fully alpha-ed out, truly disable this modifier
if (bPendingDisable && (Alpha <= 0.f))
{
Disable(true);
}
// If no alpha, exit early.
if (Alpha <= 0.f)
{
return;
}
// Update and apply active shakes.
if (ActiveShakes.Num() > 0)
{
// Initializing our transform struct.
FTransform ShakeTransform;
for (UShakerShake* ShakeInstance : ActiveShakes)
{
ShakeInstance->Tick(DeltaTime, Alpha, ShakeTransform);
}
// Delete any obsolete shakes.
for (int32 i = ActiveShakes.Num() - 1; i >= 0; i--)
{
UShakerShake* const ShakeInstance = ActiveShakes[i];
if ((ShakeInstance == nullptr) || ShakeInstance->IsFinished())
{
ActiveShakes.RemoveAt(i, 1);
}
}
// Applying the transforms.
SetRelativeLocationAndRotation(ShakeTransform.GetLocation(), ShakeTransform.GetRotation());
}
}
void UShakerComponent::Disable(bool bImmediate)
{
if (bImmediate)
{
bDisabled = true;
bPendingDisable = false;
}
else if (!bDisabled)
{
bPendingDisable = true;
}
}
bool UShakerComponent::IsDisabled() const
{
return bDisabled;
}
|
/*
* Copyright (C) 2004, 2005, 2006, 2007 Nikolas Zimmermann <zimmermann@kde.org>
* Copyright (C) 2004, 2005 Rob Buis <buis@kde.org>
* Copyright (C) 2005 Eric Seidel <eric@webkit.org>
* Copyright (C) 2009 Dirk Schulze <krit@webkit.org>
* Copyright (C) 2013 Google Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include "config.h"
#include "platform/graphics/filters/FEFlood.h"
#include "SkColorFilter.h"
#include "SkColorFilterImageFilter.h"
#include "platform/graphics/filters/SkiaImageFilterBuilder.h"
#include "platform/text/TextStream.h"
namespace blink {
FEFlood::FEFlood(Filter* filter, const Color& floodColor, float floodOpacity)
: FilterEffect(filter)
, m_floodColor(floodColor)
, m_floodOpacity(floodOpacity)
{
FilterEffect::setOperatingColorSpace(ColorSpaceDeviceRGB);
}
PassRefPtrWillBeRawPtr<FEFlood> FEFlood::create(Filter* filter, const Color& floodColor, float floodOpacity)
{
return adoptRefWillBeNoop(new FEFlood(filter, floodColor, floodOpacity));
}
Color FEFlood::floodColor() const
{
return m_floodColor;
}
bool FEFlood::setFloodColor(const Color& color)
{
if (m_floodColor == color)
return false;
m_floodColor = color;
return true;
}
float FEFlood::floodOpacity() const
{
return m_floodOpacity;
}
bool FEFlood::setFloodOpacity(float floodOpacity)
{
if (m_floodOpacity == floodOpacity)
return false;
m_floodOpacity = floodOpacity;
return true;
}
PassRefPtr<SkImageFilter> FEFlood::createImageFilter(SkiaImageFilterBuilder* builder)
{
Color color = floodColor().combineWithAlpha(floodOpacity());
SkImageFilter::CropRect rect = getCropRect(builder->cropOffset());
SkAutoTUnref<SkColorFilter> cf(SkColorFilter::CreateModeFilter(color.rgb(), SkXfermode::kSrc_Mode));
return adoptRef(SkColorFilterImageFilter::Create(cf, 0, &rect));
}
TextStream& FEFlood::externalRepresentation(TextStream& ts, int indent) const
{
writeIndent(ts, indent);
ts << "[feFlood";
FilterEffect::externalRepresentation(ts);
ts << " flood-color=\"" << floodColor().nameForLayoutTreeAsText() << "\" "
<< "flood-opacity=\"" << floodOpacity() << "\"]\n";
return ts;
}
} // namespace blink
|
#include "bullet.hpp"
#include "stepfunc.hpp"
#include "gameengine.hpp"
const BulletStepFunc StepFunc::nullStepFuncList[] = {
NULL,
NULL,
};
const BulletStepFunc StepFunc::effectHitStepFuncList[] = {
StepFunc::effectHit,
ListBullets::stepFuncDrop,
NULL,
};
const BulletStepFunc StepFunc::effectBonusStepFuncList[] = {
StepFunc::effectBonus,
ListBullets::stepFuncDrop,
NULL,
};
const BulletStepFunc StepFunc::explodeWithTargetSelfStepFuncList[] = {
StepFunc::stop,
StepFunc::explode,
StepFunc::hidden,
StepFunc::wait<10>,
StepFunc::fireTargetSelf,
StepFunc::wait<20>,
StepFunc::setSpeed<2>,
NULL,
};
const BulletStepFunc StepFunc::explodeWithOriginalDirectionStepFuncList[] = {
StepFunc::stop,
StepFunc::explode,
StepFunc::hidden,
StepFunc::wait<10>,
StepFunc::fireOriginalDirection,
StepFunc::wait<20>,
StepFunc::setSpeed<2>,
NULL,
};
|
#include "ziggurat.hpp"
#include <fstream>
#include <string>
StampList::StampList(const std::filesystem::path& stampFilePath)
: dataPath(stampFilePath)
{
std::ifstream stampFile(dataPath, std::ios::in);
if (stampFile.good())
{
std::string filePath, timestamp;
while (stampFile >> filePath >> timestamp)
{
stamps[filePath] = std::stoll(timestamp);
}
}
if (!stamps.count("ziggurat") || stamps["ziggurat"] < ZIGGURAT_VERSION)
{
stamps = { { "ziggurat", ZIGGURAT_VERSION } };
}
}
StampList::~StampList()
{
std::ofstream stampFile(dataPath, std::ios::out | std::ios::trunc);
for (auto it = stamps.begin(); it != stamps.end(); it++)
{
stampFile << it->first << " " << it->second << "\n";
}
}
bool StampList::isOutOfDate(const std::filesystem::path& path)
{
std::string absolutePath = std::filesystem::absolute(path).string();
long long current = std::filesystem::last_write_time(path).time_since_epoch().count();
if (!stamps.count(absolutePath) || stamps.at(absolutePath) < current)
{
stamps[absolutePath] = current;
return true;
}
return false;
}
|
#include "zrandom.h"
#include <QThreadStorage>
#include "QsLog.h"
#include <algorithm>
#if defined(_WIN32) || defined(_WIN64)
#include <intrin.h>
#define rdtsc __rdtsc
#else
// For everything else
static unsigned long long rdtsc() {
unsigned int lo,hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return ((unsigned long long)hi << 32) | lo;
}
#endif
ZRandom::ZRandom()
:m_size(-2), m_idx(-1)
{
m_eng.seed(rdtsc());
}
ZRandom &ZRandom::getInstance()
{
#if (QT_VERSION >= QT_VERSION_CHECK(4, 8, 0))
// should be thread local,
// use qt or use boost thread_specific_ptr or wait for c++11 thread_local keyword
static QThreadStorage<ZRandom> globalZRandom;
return globalZRandom.localData();
#else
static ZRandom random;
return random;
#endif
}
void ZRandom::uniqueRandInit(int maxValue, int minValue)
{
assert(maxValue >= minValue);
static QThreadStorage<RandomGeneratorForShuffle*> rg; // in c++11, we could use m_eng directly
if (!rg.hasLocalData())
rg.setLocalData(new RandomGeneratorForShuffle(*this));
m_uniqueValues.resize(maxValue-minValue+1);
m_size = maxValue-minValue+1;
for (int i = 0; i < m_size; i++)
m_uniqueValues[i] = i + minValue;
std::random_shuffle(m_uniqueValues.begin(), m_uniqueValues.end(), *rg.localData());
m_idx = 0;
}
// must call uniqueRandInit first!!
int ZRandom::uniqueRandNext()
{
if (m_idx == m_size) {
LERROR() << "No more number.";
return std::numeric_limits<int>::min();
}
else
return m_uniqueValues[m_idx++];
}
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/renderer/extensions/file_browser_private_custom_bindings.h"
#include <string>
#include "base/basictypes.h"
#include "base/logging.h"
#include "chrome/renderer/extensions/chrome_v8_context.h"
#include "extensions/renderer/script_context.h"
#include "third_party/WebKit/public/platform/WebString.h"
#include "third_party/WebKit/public/web/WebDOMFileSystem.h"
#include "third_party/WebKit/public/web/WebLocalFrame.h"
namespace extensions {
FileBrowserPrivateCustomBindings::FileBrowserPrivateCustomBindings(
ScriptContext* context)
: ObjectBackedNativeHandler(context) {
RouteFunction(
"GetFileSystem",
base::Bind(&FileBrowserPrivateCustomBindings::GetFileSystem,
base::Unretained(this)));
}
void FileBrowserPrivateCustomBindings::GetFileSystem(
const v8::FunctionCallbackInfo<v8::Value>& args) {
DCHECK(args.Length() == 2);
DCHECK(args[0]->IsString());
DCHECK(args[1]->IsString());
std::string name(*v8::String::Utf8Value(args[0]));
std::string root_url(*v8::String::Utf8Value(args[1]));
blink::WebLocalFrame* webframe =
blink::WebLocalFrame::frameForContext(context()->v8_context());
DCHECK(webframe);
args.GetReturnValue().Set(
blink::WebDOMFileSystem::create(webframe,
blink::WebFileSystemTypeExternal,
blink::WebString::fromUTF8(name),
GURL(root_url))
.toV8Value(args.Holder(), args.GetIsolate()));
}
} // namespace extensions
|
//Insertoion sort presumes the the array till curr index is sorted and places the curr element in the sort part at its correct place
#include<iostream>
using namespace std;
void insertionsort(int arr[],int len){
for(int curr=1;curr<len;curr++){ //starting curr from 1 because we assumes that 0th element is already sorted
int key=arr[curr];
int pos=curr-1; //last element of sorted part of the array
while(pos>=0 and arr[pos]>key){ //while end if array finishes or we encounter element smaller than curr
arr[pos+1]=arr[pos]; //shifting element at pos by 1 if its bigger than element at curr
pos--;
}
arr[pos+1]=key; //pos is the index of element smaller than curr therefore we place key at pos+1
}
}
int main(){
int len;
cout<<"Enter the number of elements to sort: ";
cin>>len;
int arr[len]={0};
cout<<"Enter the elements to sort: \n";
for(int idx=0;idx<len;idx++){
cin>>arr[idx];
}
insertionsort(arr,len);
cout<<"Sorted array: ";
for(int idx=0;idx<len;idx++){
cout<<arr[idx]<<" ";
}
return 0;
}
|
/* Author: Zachary Kingston */
#include <boost/format.hpp>
#include <dart/dynamics/InverseKinematics.hpp>
#include <dart/dynamics/SimpleFrame.hpp>
#include <robowflex_library/constants.h>
#include <robowflex_library/log.h>
#include <robowflex_library/tf.h>
#include <robowflex_dart/robot.h>
#include <robowflex_dart/space.h>
#include <robowflex_dart/structure.h>
#include <robowflex_dart/tsr.h>
#include <robowflex_dart/world.h>
namespace constants = robowflex::constants;
using namespace robowflex::darts;
///
/// TSR::Specification
///
TSR::Specification::Specification(const std::string &structure, const std::string &target_frame,
const Eigen::Ref<const Eigen::Vector3d> &position,
const Eigen::Quaterniond &rotation)
{
setTarget(structure, target_frame);
setPose(position, rotation);
}
void TSR::Specification::setTarget(const std::string &structure, const std::string &frame)
{
target.structure = structure;
target.frame = frame;
if (base.structure.empty())
base.structure = structure;
}
void TSR::Specification::setBase(const std::string &structure, const std::string &frame)
{
base.structure = structure;
base.frame = frame;
}
void TSR::Specification::setFrame(const std::string &structure, const std::string &target_frame,
const std::string &base_frame)
{
target.structure = structure;
base.structure = structure;
target.frame = target_frame;
base.frame = base_frame;
}
void TSR::Specification::addSuffix(const std::string &suffix)
{
target.structure = target.structure + suffix;
base.structure = base.structure + suffix;
}
void TSR::Specification::setPosition(const Eigen::Ref<const Eigen::Vector3d> &position)
{
pose.translation() = position;
}
void TSR::Specification::setPosition(double x, double y, double z)
{
setPosition(Eigen::Vector3d(x, y, z));
}
void TSR::Specification::setRotation(const Eigen::Quaterniond &orientation)
{
pose.linear() = orientation.toRotationMatrix();
}
void TSR::Specification::setRotation(double w, double x, double y, double z)
{
setRotation(Eigen::Quaterniond(w, x, y, z));
}
void TSR::Specification::setRotation(double x, double y, double z)
{
auto n = Eigen::AngleAxisd(x, Eigen::Vector3d::UnitX()) * //
Eigen::AngleAxisd(y, Eigen::Vector3d::UnitY()) * //
Eigen::AngleAxisd(z, Eigen::Vector3d::UnitZ());
setRotation(n);
}
void TSR::Specification::setPose(const RobotPose &other)
{
pose = other;
}
void TSR::Specification::setPose(const Eigen::Ref<const Eigen::Vector3d> &position,
const Eigen::Quaterniond &rotation)
{
pose = TF::createPoseQ(position, rotation);
}
void TSR::Specification::setPose(double xp, double yp, double zp, double wr, double xr, double yr, double zr)
{
setPosition(xp, yp, zp);
setRotation(wr, xr, yr, zr);
}
void TSR::Specification::setPoseFromWorld(const WorldPtr &world)
{
const auto &sim = world->getSim();
const auto &tskl = sim->getSkeleton(target.structure);
const auto &tbn = tskl->getBodyNode(target.frame);
if (base.frame != magic::ROOT_FRAME)
{
const auto &bskl = sim->getSkeleton(base.structure);
const auto &bbn = bskl->getBodyNode(base.frame);
pose = tbn->getTransform(bbn);
}
else
pose = tbn->getTransform();
}
void TSR::Specification::setXPosTolerance(double bound)
{
setXPosTolerance(-bound, bound);
}
void TSR::Specification::setYPosTolerance(double bound)
{
setYPosTolerance(-bound, bound);
}
void TSR::Specification::setZPosTolerance(double bound)
{
setZPosTolerance(-bound, bound);
}
void TSR::Specification::setXPosTolerance(double lower, double upper)
{
position.lower[0] = lower;
position.upper[0] = upper;
fixBounds();
indices[3] = isPosConstrained(lower, upper);
dimension = getDimension();
}
void TSR::Specification::setYPosTolerance(double lower, double upper)
{
position.lower[1] = lower;
position.upper[1] = upper;
fixBounds();
indices[4] = isPosConstrained(lower, upper);
dimension = getDimension();
}
void TSR::Specification::setZPosTolerance(double lower, double upper)
{
position.lower[2] = lower;
position.upper[2] = upper;
fixBounds();
indices[5] = isPosConstrained(lower, upper);
dimension = getDimension();
}
void TSR::Specification::setXRotTolerance(double bound)
{
setXRotTolerance(-bound, bound);
}
void TSR::Specification::setYRotTolerance(double bound)
{
setYRotTolerance(-bound, bound);
}
void TSR::Specification::setZRotTolerance(double bound)
{
setZRotTolerance(-bound, bound);
}
void TSR::Specification::setXRotTolerance(double lower, double upper)
{
orientation.lower[0] = lower;
orientation.upper[0] = upper;
fixBounds();
indices[0] = isRotConstrained(lower, upper);
dimension = getDimension();
}
void TSR::Specification::setYRotTolerance(double lower, double upper)
{
orientation.lower[1] = lower;
orientation.upper[1] = upper;
fixBounds();
indices[1] = isRotConstrained(lower, upper);
dimension = getDimension();
}
void TSR::Specification::setZRotTolerance(double lower, double upper)
{
orientation.lower[2] = lower;
orientation.upper[2] = upper;
fixBounds();
indices[2] = isRotConstrained(lower, upper);
dimension = getDimension();
}
void TSR::Specification::setNoXPosTolerance()
{
setXPosTolerance(std::numeric_limits<double>::infinity());
}
void TSR::Specification::setNoYPosTolerance()
{
setYPosTolerance(std::numeric_limits<double>::infinity());
}
void TSR::Specification::setNoZPosTolerance()
{
setZPosTolerance(std::numeric_limits<double>::infinity());
}
void TSR::Specification::setNoPosTolerance()
{
setNoXPosTolerance();
setNoYPosTolerance();
setNoZPosTolerance();
}
void TSR::Specification::setNoXRotTolerance()
{
setXRotTolerance(constants::pi);
}
void TSR::Specification::setNoYRotTolerance()
{
setYRotTolerance(constants::pi);
}
void TSR::Specification::setNoZRotTolerance()
{
setZRotTolerance(constants::pi);
}
void TSR::Specification::setNoRotTolerance()
{
setNoXRotTolerance();
setNoYRotTolerance();
setNoZRotTolerance();
}
void TSR::Specification::fixBounds()
{
{
Eigen::Vector3d u, l;
for (std::size_t i = 0; i < 3; ++i)
{
u[i] = std::max(position.lower[i], position.upper[i]);
l[i] = std::min(position.lower[i], position.upper[i]);
}
position.lower = l;
position.upper = u;
}
{
Eigen::Vector3d u, l;
for (std::size_t i = 0; i < 3; ++i)
{
u[i] = std::max({orientation.lower[i], orientation.upper[i], -constants::pi});
l[i] = std::min({orientation.lower[i], orientation.upper[i], constants::pi});
}
orientation.lower = l;
orientation.upper = u;
}
}
std::size_t TSR::Specification::getDimension() const
{
std::size_t k = 0;
for (const auto &idx : indices)
{
if (idx)
k++;
}
return k;
}
bool TSR::Specification::isPosConstrained(double lower, double upper) const
{
return std::isfinite(lower) or std::isfinite(upper);
}
bool TSR::Specification::isRotConstrained(double lower, double upper) const
{
return std::abs(upper - lower) < constants::two_pi;
}
Eigen::Vector3d TSR::Specification::getPosition() const
{
return pose.translation();
}
Eigen::Quaterniond TSR::Specification::getRotation() const
{
return TF::getPoseRotation(pose);
}
Eigen::Vector3d TSR::Specification::getEulerRotation() const
{
return getRotation().toRotationMatrix().eulerAngles(0, 1, 2);
}
bool TSR::Specification::intersect(const Specification &other)
{
// must be same reference frame
if (target.structure != other.target.structure or target.frame != other.target.frame)
return false;
if (base.structure != other.base.structure or base.frame != other.base.frame)
return false;
// TODO: Check if rotations overlap
// if (getRotation().angularDistance(other.getRotation()) > magic::DEFAULT_IK_TOLERANCE)
// return false;
Eigen::Vector3d p = getPosition();
Eigen::Vector3d op = other.getPosition();
// check if positions overlap
for (std::size_t i = 0; i < 3; ++i)
{
double pi = p[i], piu = position.upper[i], pil = position.lower[i];
double opi = op[i], opiu = other.position.upper[i], opil = other.position.lower[i];
// pi---------> piu
// --|------|------|------|-- axis
// opil <---------opi
// Check if overlap
if (pi < opi)
if ((pi + piu) < (opi + opil))
return false;
// opi--------> opiu
// --|------|------|------|-- axis
// pil <----------pi
// Check if overlap
if (pi > opi)
if ((pi + pil) > (opi + opiu))
return false;
}
Eigen::Vector3d np;
if (not isRotationConstrained())
setRotation(other.getRotation());
// enforce new bounds
for (std::size_t i = 0; i < 3; ++i)
{
if (other.orientation.lower[i] > orientation.lower[i])
orientation.lower[i] = other.orientation.lower[i];
if (other.orientation.upper[i] < orientation.upper[i])
orientation.upper[i] = other.orientation.upper[i];
double pi = p[i], piu = position.upper[i], pil = position.lower[i];
double opi = op[i], opiu = other.position.upper[i], opil = other.position.lower[i];
double low = std::max(pi + pil, opi + opil);
double high = std::min(pi + piu, opi + opiu);
np[i] = (high + low) / 2.;
double v = std::fabs(high - np[i]);
position.upper[i] = v;
position.lower[i] = -v;
indices[i] = isRotConstrained(orientation.lower[i], orientation.upper[i]);
indices[3 + i] = isPosConstrained(-v, v);
}
dimension = getDimension();
setPosition(np);
return true;
}
bool TSR::Specification::isRelative() const
{
return base.frame != magic::ROOT_FRAME;
}
bool TSR::Specification::isPositionConstrained() const
{
bool value = false;
for (std::size_t i = 0; i < 3; ++i)
value |= isPosConstrained(position.lower[i], position.upper[i]);
return value;
}
bool TSR::Specification::isRotationConstrained() const
{
bool value = false;
for (std::size_t i = 0; i < 3; ++i)
value |= isRotConstrained(orientation.lower[i], orientation.upper[i]);
return value;
}
void TSR::Specification::print(std::ostream &out) const
{
if (isRelative())
out << boost::format("B:%1%:%2% => T:%3%:%4%") //
% base.frame % base.structure % target.frame % target.structure;
else
out << boost::format("T:%1%:%2%") //
% target.frame % target.structure;
out << std::endl;
out << "Position Orientation" << std::endl;
auto p = getPosition();
auto o = getRotation();
out << boost::format( //
" x:%1$+07.4f (%2$+07.4f, %3$+07.4f) [%7%] x:%4$+07.4f (%5$+07.4f, %6$+07.4f) [%8%]") //
% p[0] % position.lower[0] % position.upper[0] //
% o.x() % orientation.lower[0] % orientation.upper[0] //
% indices[3] % indices[0];
out << std::endl;
out << boost::format( //
" y:%1$+07.4f (%2$+07.4f, %3$+07.4f) [%7%] y:%4$+07.4f (%5$+07.4f, %6$+07.4f) [%8%]") //
% p[1] % position.lower[1] % position.upper[1] //
% o.y() % orientation.lower[1] % orientation.upper[1] //
% indices[4] % indices[1];
out << std::endl;
out << boost::format( //
" z:%1$+07.4f (%2$+07.4f, %3$+07.4f) [%7%] z:%4$+07.4f (%5$+07.4f, %6$+07.4f) [%8%]") //
% p[2] % position.lower[2] % position.upper[2] //
% o.z() % orientation.lower[2] % orientation.upper[2] //
% indices[5] % indices[2];
out << std::endl;
out << boost::format(" w:%1$+07.4f") % o.w();
out << std::endl;
}
///
/// TSR
///
TSR::TSR(const WorldPtr &world, const Specification &spec) : world_(world), spec_(spec)
{
}
TSR::~TSR()
{
clear();
}
void TSR::clear()
{
if (tnd_ and ik_)
tnd_->clearIK();
frame_ = nullptr;
ik_ = nullptr;
tnd_ = nullptr;
tsr_ = nullptr;
}
void TSR::setWorld(const WorldPtr &world)
{
clear();
world_ = world;
initialize();
}
void TSR::useGroup(const std::string &name)
{
auto robot = world_->getRobot(spec_.target.structure);
if (not robot)
throw std::runtime_error("Target robot does exist in world!");
useIndices(robot->getGroupIndices(name));
}
void TSR::useIndices(const std::vector<std::size_t> &indices)
{
indices_ = indices;
if (ik_)
{
ik_->setDofs(indices_);
indices_ = ik_->getDofs();
}
computeBijection();
}
void TSR::useWorldIndices(const std::vector<std::pair<std::size_t, std::size_t>> &indices)
{
std::vector<std::size_t> use;
for (const auto &index : indices)
{
if (index.first == getSkeletonIndex())
use.emplace_back(index.second);
}
useIndices(use);
}
void TSR::setWorldIndices(const std::vector<std::pair<std::size_t, std::size_t>> &indices)
{
world_indices_ = indices;
computeBijection();
}
std::size_t TSR::getSkeletonIndex()
{
if (not tnd_)
initialize();
return skel_index_;
}
const std::vector<std::size_t> &TSR::getIndices() const
{
return indices_;
}
std::vector<std::pair<std::size_t, std::size_t>> TSR::computeWorldIndices() const
{
std::vector<std::pair<std::size_t, std::size_t>> wi;
for (const auto &index : indices_)
wi.emplace_back(skel_index_, index);
return wi;
}
const std::vector<std::pair<std::size_t, std::size_t>> &TSR::getWorldIndices() const
{
return world_indices_;
}
std::size_t TSR::getDimension() const
{
return spec_.dimension;
}
std::size_t TSR::getNumDofs() const
{
return indices_.size();
}
std::size_t TSR::getNumWorldDofs() const
{
return world_indices_.size();
}
robowflex::RobotPose TSR::getTransformToFrame() const
{
const auto &sim = world_->getSim();
const auto &bskl = sim->getSkeleton(spec_.base.structure);
auto *bnd = bskl->getBodyNode(spec_.base.frame);
if (not tnd_)
throw std::runtime_error("Target body node is not initialized");
return tnd_->getTransform(bnd);
}
void TSR::getErrorWorldRaw(Eigen::Ref<Eigen::VectorXd> error) const
{
world_->lock();
error = tsr_->computeError();
world_->unlock();
}
void TSR::getErrorWorld(Eigen::Ref<Eigen::VectorXd> error) const
{
world_->lock();
auto tsr_error = tsr_->computeError();
std::size_t j = 0;
for (std::size_t i = 0; i < 6; ++i)
{
if (spec_.indices[i])
error[j++] = tsr_error[i];
}
world_->unlock();
}
void TSR::getErrorWorldState(const Eigen::Ref<const Eigen::VectorXd> &world,
Eigen::Ref<Eigen::VectorXd> error) const
{
if (bijection_.empty())
getError(world, error);
else
{
Eigen::VectorXd state(getNumDofs());
fromBijection(state, world);
getError(state, error);
}
}
void TSR::getError(const Eigen::Ref<const Eigen::VectorXd> &state, Eigen::Ref<Eigen::VectorXd> error) const
{
setPositions(state);
getErrorWorld(error);
}
void TSR::getJacobianWorld(Eigen::Ref<Eigen::MatrixXd> jacobian) const
{
world_->lock();
auto tjac = ik_->computeJacobian();
std::size_t j = 0;
for (std::size_t i = 0; i < 6; ++i)
{
if (spec_.indices[i])
jacobian.row(j++) = tjac.row(i);
}
world_->unlock();
}
void TSR::getJacobianWorldState(const Eigen::Ref<const Eigen::VectorXd> &world,
Eigen::Ref<Eigen::MatrixXd> jacobian) const
{
if (bijection_.empty())
getJacobian(world, jacobian);
else
{
Eigen::VectorXd state(getNumDofs());
fromBijection(state, world);
Eigen::VectorXd tjac(getDimension(), getNumDofs());
getJacobian(state, tjac);
for (std::size_t i = 0; i < bijection_.size(); ++i)
if (bijection_[i] < world_indices_.size())
jacobian.col(bijection_[i]) = tjac.col(i);
}
}
void TSR::getJacobian(const Eigen::Ref<const Eigen::VectorXd> &state,
Eigen::Ref<Eigen::MatrixXd> jacobian) const
{
world_->lock();
setPositions(state);
getJacobianWorld(jacobian);
world_->unlock();
}
double TSR::distanceWorld() const
{
Eigen::VectorXd x(getDimension());
getErrorWorld(x);
return x.norm();
}
double TSR::distanceWorldState(const Eigen::Ref<const Eigen::VectorXd> &world) const
{
Eigen::VectorXd x(getDimension());
getErrorWorldState(world, x);
return x.norm();
}
double TSR::distance(const Eigen::Ref<const Eigen::VectorXd> &state) const
{
Eigen::VectorXd x(getDimension());
getError(state, x);
return x.norm();
}
bool TSR::solveWorld()
{
if (not ik_)
{
RBX_ERROR("TSR: Solve called before initialize!");
return false;
}
world_->lock();
bool r = ik_->solveAndApply();
world_->unlock();
return r;
}
bool TSR::solveWorldState(Eigen::Ref<Eigen::VectorXd> world)
{
if (bijection_.empty())
return solve(world);
Eigen::VectorXd state(getNumDofs());
fromBijection(state, world);
bool r = solve(state);
toBijection(world, state);
return r;
}
bool TSR::solve(Eigen::Ref<Eigen::VectorXd> state)
{
world_->lock();
setPositions(state);
bool r = solveWorld();
getPositions(state);
world_->unlock();
return r;
}
bool TSR::solveGradientWorld()
{
world_->lock();
Eigen::VectorXd state = ik_->getPositions();
bool r = solveGradient(state);
setPositions(state);
world_->unlock();
return r;
}
bool TSR::solveGradientWorldState(Eigen::Ref<Eigen::VectorXd> world)
{
if (bijection_.empty())
return solveGradient(world);
Eigen::VectorXd state(getNumDofs());
fromBijection(state, world);
bool r = solveGradient(state);
toBijection(world, state);
return r;
}
bool TSR::solveGradient(Eigen::Ref<Eigen::VectorXd> state)
{
world_->lock();
unsigned int iter = 0;
double norm = 0;
Eigen::VectorXd f(getDimension());
Eigen::MatrixXd j(getDimension(), getNumDofs());
const double squared_tolerance = spec_.tolerance * spec_.tolerance;
setPositions(state);
getErrorWorld(f);
while ((norm = f.norm()) > squared_tolerance && iter++ < spec_.maxIter)
{
getJacobianWorld(j);
state -= j.jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(f);
setPositions(state);
getErrorWorld(f);
}
world_->unlock();
return norm < squared_tolerance;
}
void TSR::setPositionsWorldState(const Eigen::Ref<const Eigen::VectorXd> &world) const
{
Eigen::VectorXd state(getNumDofs());
fromBijection(state, world);
setPositions(state);
}
void TSR::setPositions(const Eigen::Ref<const Eigen::VectorXd> &state) const
{
world_->lock();
ik_->setPositions(state);
world_->unlock();
}
void TSR::getPositionsWorldState(Eigen::Ref<Eigen::VectorXd> world) const
{
Eigen::VectorXd state(getNumDofs());
getPositions(state);
toBijection(world, state);
}
void TSR::getPositions(Eigen::Ref<Eigen::VectorXd> state) const
{
world_->lock();
state = ik_->getPositions();
world_->unlock();
}
TSR::Specification &TSR::getSpecification()
{
return spec_;
}
void TSR::updatePose()
{
if (frame_)
frame_->setRelativeTransform(spec_.pose);
}
void TSR::updateBounds()
{
if (tsr_)
{
tsr_->setLinearBounds(spec_.position.lower, spec_.position.upper);
tsr_->setAngularBounds(spec_.orientation.lower, spec_.orientation.upper);
}
}
void TSR::updateSolver()
{
if (ik_)
{
ik_->getSolver()->setTolerance(spec_.tolerance);
ik_->getSolver()->setNumMaxIterations(spec_.maxIter);
}
}
void TSR::initialize()
{
const auto &sim = world_->getSim();
const auto &tskl = sim->getSkeleton(spec_.target.structure);
if (not tskl)
throw std::runtime_error("Target skeleton " + spec_.target.structure + " in TSR does not exist!");
skel_index_ = world_->getSkeletonIndex(tskl);
const auto &bskl = sim->getSkeleton(spec_.base.structure);
if (not bskl)
throw std::runtime_error("Base skeleton " + spec_.base.structure + " in TSR does not exist!");
tnd_ = tskl->getBodyNode(spec_.target.frame);
ik_ = tnd_->getIK(true);
frame_ = ik_->getTarget();
if (spec_.base.frame != magic::ROOT_FRAME)
{
auto *bnd = bskl->getBodyNode(spec_.base.frame);
frame_ = frame_->clone(bnd);
}
ik_->setTarget(frame_);
tsr_ = &ik_->setErrorMethod<dart::dynamics::InverseKinematics::TaskSpaceRegion>();
tsr_->setComputeFromCenter(false);
updatePose();
updateBounds();
updateSolver();
if (indices_.empty())
indices_ = ik_->getDofs();
else
ik_->setDofs(indices_);
}
void TSR::toBijection(Eigen::Ref<Eigen::VectorXd> world, const Eigen::Ref<const Eigen::VectorXd> &state) const
{
if (bijection_.empty())
world = state;
else
for (std::size_t i = 0; i < bijection_.size(); ++i)
if (bijection_[i] < world_indices_.size())
world[bijection_[i]] = state[i];
}
void TSR::fromBijection(Eigen::Ref<Eigen::VectorXd> state,
const Eigen::Ref<const Eigen::VectorXd> &world) const
{
if (bijection_.empty())
state = world;
else
for (std::size_t i = 0; i < bijection_.size(); ++i)
if (bijection_[i] < world_indices_.size())
state[i] = world[bijection_[i]];
}
void TSR::computeBijection()
{
if (world_indices_.empty())
return;
bijection_.resize(indices_.size());
bool same = world_indices_.size() == indices_.size();
for (std::size_t i = 0; i < indices_.size(); ++i)
{
bijection_[i] = world_indices_.size();
for (std::size_t j = 0; j < world_indices_.size(); ++j)
{
auto entry = world_indices_[j];
if (entry.first == getSkeletonIndex() and entry.second == indices_[i])
{
bijection_[i] = j;
same &= i == j;
break;
}
}
}
if (same)
bijection_.clear();
}
///
/// TSRSet
///
TSRSet::TSRSet(const WorldPtr &world, const TSRPtr &tsr) : world_(world)
{
addTSR(tsr);
}
TSRSet::TSRSet(const WorldPtr &world, const std::vector<TSRPtr> &tsrs, bool intersect) : world_(world)
{
for (const auto &tsr : tsrs)
addTSR(tsr, intersect);
}
void TSRSet::addTSR(const TSRPtr &tsr, bool intersect, double weight)
{
TSRPtr ntsr = tsr;
TSR::Specification spec = tsr->getSpecification();
if (intersect)
{
for (auto &etsr : tsrs_)
// Don't need this entire TSR if we can intersect
if (etsr->getSpecification().intersect(spec))
{
dimension_ = 0;
for (auto &tsr : tsrs_)
dimension_ += tsr->getDimension();
return;
}
// copy for intersections later
ntsr = std::make_shared<TSR>(world_, spec);
ntsr->useIndices(tsr->getIndices());
ntsr->setWorldIndices(tsr->getWorldIndices());
// weight relative frames less
if ((weight - 1.) < constants::eps)
if (spec.isRelative())
weight = 0.1;
}
tsrs_.emplace_back(ntsr);
weights_.emplace_back(weight);
dimension_ += ntsr->getDimension();
tolerance_ = std::min(tolerance_, spec.tolerance);
}
std::size_t TSRSet::numTSRs() const
{
return tsrs_.size();
}
const std::vector<TSRPtr> &TSRSet::getTSRs() const
{
return tsrs_;
}
void TSRSet::setWorld(const WorldPtr &world)
{
for (auto &tsr : tsrs_)
tsr->setWorld(world);
world_ = world;
initialize();
}
void TSRSet::addSuffix(const std::string &suffix)
{
for (auto &tsr : tsrs_)
tsr->getSpecification().addSuffix(suffix);
}
void TSRSet::useGroup(const std::string &name)
{
for (auto &tsr : tsrs_)
tsr->useGroup(name);
}
void TSRSet::useIndices(const std::vector<std::size_t> &indices)
{
for (auto &tsr : tsrs_)
tsr->useIndices(indices);
}
void TSRSet::useWorldIndices(const std::vector<std::pair<std::size_t, std::size_t>> &indices)
{
for (auto &tsr : tsrs_)
tsr->useWorldIndices(indices);
}
void TSRSet::setWorldIndices(const std::vector<std::pair<std::size_t, std::size_t>> &indices)
{
for (auto &tsr : tsrs_)
tsr->setWorldIndices(indices);
}
std::size_t TSRSet::getDimension() const
{
return dimension_;
}
void TSRSet::getErrorWorld(Eigen::Ref<Eigen::VectorXd> error) const
{
world_->lock();
std::size_t i = 0;
for (std::size_t j = 0; j < tsrs_.size(); ++j)
{
const auto &tsr = tsrs_[j];
tsr->getErrorWorld(error.segment(i, tsr->getDimension()));
error.segment(i, tsr->getDimension()) *= weights_[j];
i += tsr->getDimension();
}
world_->unlock();
}
void TSRSet::getErrorWorldState(const Eigen::Ref<const Eigen::VectorXd> &world,
Eigen::Ref<Eigen::VectorXd> error) const
{
world_->lock();
std::size_t i = 0;
for (std::size_t j = 0; j < tsrs_.size(); ++j)
{
const auto &tsr = tsrs_[j];
tsr->getErrorWorldState(world, error.segment(i, tsr->getDimension()));
error.segment(i, tsr->getDimension()) *= weights_[j];
i += tsr->getDimension();
}
world_->unlock();
}
void TSRSet::getJacobianWorldState(const Eigen::Ref<const Eigen::VectorXd> &world,
Eigen::Ref<Eigen::MatrixXd> jacobian) const
{
world_->lock();
unsigned int i = 0;
std::size_t n = world.size();
for (std::size_t j = 0; j < tsrs_.size(); ++j)
{
const auto &tsr = tsrs_[j];
tsr->getJacobianWorldState(world, jacobian.block(i, 0, tsr->getDimension(), n));
jacobian.block(i, 0, tsr->getDimension(), n) *= weights_[j];
i += tsr->getDimension();
}
world_->unlock();
}
double TSRSet::distanceWorld() const
{
Eigen::VectorXd x(getDimension());
getErrorWorld(x);
return x.norm();
}
double TSRSet::distanceWorldState(const Eigen::Ref<const Eigen::VectorXd> &world) const
{
Eigen::VectorXd x(getDimension());
getErrorWorldState(world, x);
return x.norm();
}
bool TSRSet::solveWorld()
{
world_->lock();
if (tsrs_.size() == 1)
{
bool r = tsrs_[0]->solveWorld();
world_->unlock();
return r;
}
auto sim = world_->getSim();
bool r = true;
for (const auto &skidx : skel_indices_)
{
auto skel = sim->getSkeleton(skidx);
auto ik = skel->getIK(true);
r &= ik->solveAndApply(true);
}
world_->unlock();
return r;
}
bool TSRSet::solveWorldState(Eigen::Ref<Eigen::VectorXd> world)
{
world_->lock();
for (auto &tsr : tsrs_)
tsr->setPositionsWorldState(world);
bool r = solveWorld();
for (auto &tsr : tsrs_)
tsr->getPositionsWorldState(world);
world_->unlock();
return r;
}
bool TSRSet::solveGradientWorldState(Eigen::Ref<Eigen::VectorXd> world)
{
unsigned int iter = 0;
double norm = 0;
Eigen::VectorXd f(getDimension());
Eigen::MatrixXd j(getDimension(), world.size());
const double squared_tolerance = tolerance_ * tolerance_;
const Eigen::VectorXd limit = Eigen::VectorXd::Constant(world.size(), limit_);
world_->lock();
getErrorWorldState(world, f);
while ((norm = f.norm()) > squared_tolerance and iter++ < maxIter_)
{
getJacobianWorldState(world, j);
if (qr_)
world -= (step_ * j.colPivHouseholderQr().solve(f)).cwiseMin(limit).cwiseMax(-limit);
else
{
if (damped_)
{
auto svd = j.jacobiSvd(Eigen::ComputeFullU | Eigen::ComputeFullV);
auto lr = svd.rank();
const auto &u = svd.matrixU().leftCols(lr);
const auto &v = svd.matrixV().leftCols(lr);
const auto &s = svd.singularValues().head(lr);
const auto &d = Eigen::VectorXd::Constant(lr, damping_);
const auto &damped = s.cwiseQuotient(s.cwiseProduct(s) + d.cwiseProduct(d));
Eigen::MatrixXd tmp;
tmp.noalias() = u.adjoint() * f;
tmp = damped.asDiagonal().inverse() * tmp;
auto step = v * tmp;
world -= (step_ * step).cwiseMin(limit).cwiseMax(-limit);
}
else
world -= (step_ * j.jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(f))
.cwiseMin(limit)
.cwiseMax(-limit);
}
enforceBoundsWorld(world);
getErrorWorldState(world, f);
}
world_->forceUpdate();
world_->unlock();
return norm < squared_tolerance;
}
void TSRSet::updateSolver()
{
auto sim = world_->getSim();
for (const auto &skidx : skel_indices_)
{
auto skel = sim->getSkeleton(skidx);
auto ik = skel->getIK(true);
auto sv = ik->getSolver();
sv->setNumMaxIterations(maxIter_);
sv->setTolerance(tolerance_);
}
}
std::size_t TSRSet::getMaxIterations() const
{
return maxIter_;
}
double TSRSet::getTolerance() const
{
return tolerance_;
}
void TSRSet::initialize()
{
skel_indices_.clear();
for (auto &tsr : tsrs_)
{
tsr->initialize();
skel_indices_.emplace(tsr->getSkeletonIndex());
}
updateSolver();
RBX_INFO("TSRSet: Initialized %d TSRs!", tsrs_.size());
}
const std::vector<std::pair<std::size_t, std::size_t>> &TSRSet::getWorldIndices() const
{
return tsrs_[0]->getWorldIndices();
}
void TSRSet::getPositionsWorldState(Eigen::Ref<Eigen::VectorXd> world) const
{
const auto &wi = getWorldIndices();
for (std::size_t i = 0; i < wi.size(); ++i)
{
const auto &wii = wi[i];
world[i] = world_->getSim()->getSkeleton(wii.first)->getDof(wii.second)->getPosition();
}
}
void TSRSet::computeLimits()
{
const auto &wi = getWorldIndices();
std::size_t n = wi.size();
upper_ = Eigen::VectorXd::Zero(n);
lower_ = Eigen::VectorXd::Zero(n);
for (std::size_t i = 0; i < n; ++i)
{
const auto &wii = wi[i];
const auto &dof = world_->getSim()->getSkeleton(wii.first)->getDof(wii.second);
// if (dof->isCyclic())
// {
// lower_[i] = -constants::pi;
// upper_[i] = constants::pi;
// }
// else
// {
auto limits = dof->getPositionLimits();
lower_[i] = limits.first;
upper_[i] = limits.second;
// }
}
}
void TSRSet::setMaxIterations(std::size_t iterations)
{
maxIter_ = iterations;
}
void TSRSet::setTolerance(double tolerance)
{
tolerance_ = tolerance;
}
void TSRSet::setWorldUpperLimits(const Eigen::Ref<const Eigen::VectorXd> &upper)
{
upper_ = upper;
}
void TSRSet::setWorldLowerLimits(const Eigen::Ref<const Eigen::VectorXd> &lower)
{
lower_ = lower;
}
void TSRSet::enforceBoundsWorld(Eigen::Ref<Eigen::VectorXd> world) const
{
if (upper_.size())
world = world.cwiseMin(upper_);
if (lower_.size())
world = world.cwiseMax(lower_);
}
void TSRSet::print(std::ostream &out) const
{
out << "TSRSet --------------------" << std::endl;
for (const auto &tsr : tsrs_)
tsr->getSpecification().print(out);
out << "---------------------------" << std::endl;
}
void TSRSet::setStep(double step)
{
step_ = step;
}
double TSRSet::getStep() const
{
return step_;
}
void TSRSet::useSVD()
{
qr_ = false;
}
void TSRSet::useQR()
{
qr_ = true;
}
void TSRSet::setLimit(double limit)
{
limit_ = limit;
}
double TSRSet::getLimit() const
{
return limit_;
}
void TSRSet::setDamping(double damping)
{
damping_ = damping;
}
double TSRSet::getDamping() const
{
return damping_;
}
void TSRSet::useDamping(bool damping)
{
damped_ = damping;
}
///
/// TSRConstraint
///
TSRConstraint::TSRConstraint(const StateSpacePtr &space, const TSRPtr &tsr)
: TSRConstraint(space, std::make_shared<TSRSet>(space->getWorld(), tsr))
{
}
TSRConstraint::TSRConstraint(const StateSpacePtr &space, const std::vector<TSRPtr> &tsrs)
: TSRConstraint(space, std::make_shared<TSRSet>(space->getWorld(), tsrs))
{
}
TSRConstraint::TSRConstraint(const StateSpacePtr &space, const TSRSetPtr &tsr)
: ompl::base::Constraint(space->getDimension(), tsr->getDimension(), tsr->getTolerance())
, space_(space)
, tsr_(tsr)
{
tsr_->useWorldIndices(space->getIndices());
tsr_->setWorldIndices(space->getIndices());
tsr_->setWorld(space->getWorld());
tsr_->setWorldLowerLimits(space->getLowerBound());
tsr_->setWorldUpperLimits(space->getUpperBound());
tsr_->initialize();
}
void TSRConstraint::function(const Eigen::Ref<const Eigen::VectorXd> &x,
Eigen::Ref<Eigen::VectorXd> out) const
{
tsr_->getErrorWorldState(x, out);
}
void TSRConstraint::jacobian(const Eigen::Ref<const Eigen::VectorXd> &x,
Eigen::Ref<Eigen::MatrixXd> out) const
{
tsr_->getJacobianWorldState(x, out);
}
bool TSRConstraint::project(Eigen::Ref<Eigen::VectorXd> x) const
{
space_->setWorldState(space_->getWorld(), x);
bool r = false;
if (tsr_->numTSRs() == 1 or not options.use_gradient)
r = tsr_->solveWorldState(x);
else if (options.use_gradient)
r = tsr_->solveGradientWorldState(x);
return r;
}
TSRSetPtr TSRConstraint::getSet()
{
return tsr_;
}
|
#include "DazToUnrealMaterials.h"
#include "DazToUnrealSettings.h"
#include "DazToUnrealTextures.h"
#include "Materials/MaterialInstanceConstant.h"
#include "Factories/MaterialInstanceConstantFactoryNew.h"
#include "Factories/SubsurfaceProfileFactory.h"
#include "Engine/SubsurfaceProfile.h"
#include "AssetRegistryModule.h"
#include "AssetToolsModule.h"
FSoftObjectPath FDazToUnrealMaterials::GetBaseMaterialForShader(FString ShaderName)
{
const UDazToUnrealSettings* CachedSettings = GetDefault<UDazToUnrealSettings>();
FSoftObjectPath BaseMaterialAssetPath = CachedSettings->BaseMaterial;
if (CachedSettings->BaseShaderMaterials.Contains(ShaderName))
{
BaseMaterialAssetPath = CachedSettings->BaseShaderMaterials[ShaderName];
}
return BaseMaterialAssetPath;
}
FSoftObjectPath FDazToUnrealMaterials::GetSkinMaterialForShader(FString ShaderName)
{
const UDazToUnrealSettings* CachedSettings = GetDefault<UDazToUnrealSettings>();
FSoftObjectPath BaseMaterialAssetPath = CachedSettings->BaseSkinMaterial;
if (CachedSettings->SkinShaderMaterials.Contains(ShaderName))
{
BaseMaterialAssetPath = CachedSettings->SkinShaderMaterials[ShaderName];
}
return BaseMaterialAssetPath;
}
FSoftObjectPath FDazToUnrealMaterials::GetBaseMaterial(FString MaterialName, TArray<FDUFTextureProperty > MaterialProperties)
{
const UDazToUnrealSettings* CachedSettings = GetDefault<UDazToUnrealSettings>();
// Find the proper Base Material
FSoftObjectPath BaseMaterialAssetPath = CachedSettings->BaseMaterial;
FString AssetType = "";
FString ShaderName = "";
FString Seperator;
if ( CachedSettings->UseOriginalMaterialName)
{
Seperator = "";
}
else
{
Seperator = "_";
}
TArray<FDUFTextureProperty> Properties = MaterialProperties;
for (FDUFTextureProperty Property : Properties)
{
if (Property.Name == TEXT("Asset Type"))
{
AssetType = Property.Value;
ShaderName = Property.ShaderName;
}
}
// Set the default material type
if (CachedSettings->BaseShaderMaterials.Contains(ShaderName))
{
BaseMaterialAssetPath = CachedSettings->BaseShaderMaterials[ShaderName];
//return BaseMaterialAssetPath;
}
if (AssetType == TEXT("Follower/Hair"))
{
BaseMaterialAssetPath = CachedSettings->BaseHairMaterial;
if (MaterialName.EndsWith(Seperator + TEXT("scalp")))
{
BaseMaterialAssetPath = CachedSettings->BaseScalpMaterial;
}
}
else if (AssetType == TEXT("Follower/Attachment/Head/Face/Eyelashes"))
{
if (MaterialName.Contains(Seperator + TEXT("EyeMoisture")))
{
BaseMaterialAssetPath = CachedSettings->BaseEyeMoistureMaterial;
}
else
{
BaseMaterialAssetPath = CachedSettings->BaseAlphaMaterial;
}
}
else if (AssetType == TEXT("Follower/Attachment/Lower-Body/Hip/Front") &&
MaterialName.Contains(Seperator + TEXT("Genitalia")))
{
BaseMaterialAssetPath = GetSkinMaterialForShader(ShaderName);
}
else if (AssetType == TEXT("Actor/Character"))
{
// Check for skin materials
if (MaterialName.EndsWith(Seperator + TEXT("Face")) ||
MaterialName.EndsWith(Seperator + TEXT("Head")) ||
MaterialName.EndsWith(Seperator + TEXT("Lips")) ||
MaterialName.EndsWith(Seperator + TEXT("Legs")) ||
MaterialName.EndsWith(Seperator + TEXT("Hips")) ||
MaterialName.EndsWith(Seperator + TEXT("Feet")) ||
MaterialName.EndsWith(Seperator + TEXT("Torso")) ||
MaterialName.EndsWith(Seperator + TEXT("Body")) ||
MaterialName.EndsWith(Seperator + TEXT("Neck")) ||
MaterialName.EndsWith(Seperator + TEXT("Shoulders")) ||
MaterialName.EndsWith(Seperator + TEXT("Arms")) ||
MaterialName.EndsWith(Seperator + TEXT("Forearms")) ||
MaterialName.EndsWith(Seperator + TEXT("Hands")) ||
MaterialName.EndsWith(Seperator + TEXT("EyeSocket")) ||
MaterialName.EndsWith(Seperator + TEXT("Ears")) ||
MaterialName.EndsWith(Seperator + TEXT("Fingernails")) ||
MaterialName.EndsWith(Seperator + TEXT("Toenails")) ||
MaterialName.EndsWith(Seperator + TEXT("Nipples")) ||
MaterialName.EndsWith(Seperator + TEXT("Genitalia")))
{
BaseMaterialAssetPath = GetSkinMaterialForShader(ShaderName);
}
else if (MaterialName.Contains(Seperator + TEXT("EyeMoisture")))
{
BaseMaterialAssetPath = CachedSettings->BaseEyeMoistureMaterial;
}
else if (MaterialName.Contains(Seperator + TEXT("EyeReflection")))
{
BaseMaterialAssetPath = CachedSettings->BaseEyeMoistureMaterial;
}
else if (MaterialName.Contains(Seperator + TEXT("Tear")))
{
BaseMaterialAssetPath = CachedSettings->BaseEyeMoistureMaterial;
}
else if (MaterialName.EndsWith(Seperator + TEXT("EyeLashes")))
{
BaseMaterialAssetPath = CachedSettings->BaseAlphaMaterial;
}
else if (MaterialName.EndsWith(Seperator + TEXT("Eyelashes")))
{
BaseMaterialAssetPath = CachedSettings->BaseAlphaMaterial;
}
else if (MaterialName.EndsWith(Seperator + TEXT("Eyelash")))
{
BaseMaterialAssetPath = CachedSettings->BaseAlphaMaterial;
}
else if (MaterialName.EndsWith(Seperator + TEXT("cornea")))
{
BaseMaterialAssetPath = CachedSettings->BaseCorneaMaterial;
}
/*else if (MaterialName.EndsWith(TEXT("_sclera")))
{
BaseMaterialAssetPath = CachedSettings->BaseMaterial;
}
else if (MaterialName.EndsWith(TEXT("_irises")))
{
BaseMaterialAssetPath = CachedSettings->BaseMaterial;
}
else if (MaterialName.EndsWith(TEXT("_pupils")))
{
BaseMaterialAssetPath = CachedSettings->BaseMaterial;
}*/
else
{
//BaseMaterialAssetPath = CachedSettings->BaseMaterial;
for (FDUFTextureProperty Property : Properties)
{
if (Property.Name == TEXT("Cutout Opacity Texture"))
{
BaseMaterialAssetPath = CachedSettings->BaseAlphaMaterial;
}
}
}
}
else if (MaterialName.Contains(Seperator + TEXT("EyeMoisture")))
{
BaseMaterialAssetPath = CachedSettings->BaseEyeMoistureMaterial;
}
else
{
//BaseMaterialAssetPath = CachedSettings->BaseMaterial;
for (FDUFTextureProperty Property : Properties)
{
if (Property.Name == TEXT("Cutout Opacity Texture"))
{
BaseMaterialAssetPath = CachedSettings->BaseAlphaMaterial;
}
if (Property.Name == TEXT("Opacity Strength") && Property.Value != TEXT("1"))
{
BaseMaterialAssetPath = CachedSettings->BaseAlphaMaterial;
}
}
}
if (MaterialName.EndsWith(Seperator + TEXT("NoDraw")))
{
BaseMaterialAssetPath = CachedSettings->NoDrawMaterial;
}
return BaseMaterialAssetPath;
}
UMaterialInstanceConstant* FDazToUnrealMaterials::CreateMaterial(const FString CharacterMaterialFolder, const FString CharacterTexturesFolder, FString& MaterialName, TMap<FString, TArray<FDUFTextureProperty>> MaterialProperties, const DazCharacterType CharacterType, UMaterialInstanceConstant* ParentMaterial, USubsurfaceProfile* SubsurfaceProfile)
{
const UDazToUnrealSettings* CachedSettings = GetDefault<UDazToUnrealSettings>();
FSoftObjectPath BaseMaterialAssetPath = CachedSettings->BaseMaterial;
// Prepare the material Properties
if (MaterialProperties.Contains(MaterialName))
{
BaseMaterialAssetPath = GetBaseMaterial(MaterialName, MaterialProperties[MaterialName]);
}
FString ShaderName = "";
FString AssetType = "";
if (MaterialProperties.Contains(MaterialName))
{
TArray<FDUFTextureProperty> Properties = MaterialProperties[MaterialName];
for (FDUFTextureProperty Property : Properties)
{
if (Property.Name == TEXT("Asset Type"))
{
AssetType = Property.Value;
ShaderName = Property.ShaderName;
}
}
}
FString Seperator;
if ( CachedSettings->UseOriginalMaterialName)
{
Seperator = "";
}
else
{
Seperator = "_";
}
if (AssetType == TEXT("Follower/Attachment/Head/Face/Eyelashes") ||
AssetType == TEXT("Follower/Attachment/Head/Face/Eyes") ||
AssetType == TEXT("Follower/Attachment/Head/Face/Eyes/Tear") ||
AssetType == TEXT("Follower/Attachment/Head/Face/Tears"))
{
if (MaterialName.Contains(Seperator + TEXT("EyeMoisture")) || MaterialName.EndsWith(Seperator + TEXT("EyeReflection")))
{
//BaseMaterialAssetPath = CachedSettings->BaseEyeMoistureMaterial;
SetMaterialProperty(MaterialName, TEXT("Metallic Weight"), TEXT("Double"), TEXT("1"), MaterialProperties);
SetMaterialProperty(MaterialName, TEXT("Opacity Strength"), TEXT("Double"), FString::SanitizeFloat(CachedSettings->DefaultEyeMoistureOpacity), MaterialProperties);
SetMaterialProperty(MaterialName, TEXT("Diffuse Color"), TEXT("Color"), TEXT("#bababa"), MaterialProperties);
SetMaterialProperty(MaterialName, TEXT("Index of Refraction"), TEXT("Double"), TEXT("1.0"), MaterialProperties);
}
else if (MaterialName.EndsWith(Seperator + TEXT("Tear")) || MaterialName.EndsWith(Seperator + TEXT("Tears")))
{
//BaseMaterialAssetPath = CachedSettings->BaseCorneaMaterial;
SetMaterialProperty(MaterialName, TEXT("Metallic Weight"), TEXT("Double"), TEXT("1"), MaterialProperties);
SetMaterialProperty(MaterialName, TEXT("Opacity Strength"), TEXT("Double"), FString::SanitizeFloat(CachedSettings->DefaultEyeMoistureOpacity), MaterialProperties);
SetMaterialProperty(MaterialName, TEXT("Index of Refraction"), TEXT("Double"), TEXT("1.0"), MaterialProperties);
}
else
{
//BaseMaterialAssetPath = CachedSettings->BaseAlphaMaterial;
SetMaterialProperty(MaterialName, TEXT("Metallic Weight"), TEXT("Double"), TEXT("0"), MaterialProperties);
SetMaterialProperty(MaterialName, TEXT("Glossy Layered Weight"), TEXT("Double"), TEXT("0"), MaterialProperties);
}
}
else if (AssetType == TEXT("Follower/Attachment/Lower-Body/Hip/Front") &&
MaterialName.Contains(Seperator + TEXT("Genitalia")))
{
//BaseMaterialAssetPath = CachedSettings->BaseSkinMaterial;
SetMaterialProperty(MaterialName, TEXT("Subsurface Alpha Texture"), TEXT("Texture"), FDazToUnrealTextures::GetSubSurfaceAlphaTexture(CharacterType, MaterialName), MaterialProperties);
}
else if (AssetType == TEXT("Actor/Character"))
{
// Check for skin materials
if (MaterialName.EndsWith(Seperator + TEXT("Face")) ||
MaterialName.EndsWith(Seperator + TEXT("Head")) ||
MaterialName.EndsWith(Seperator + TEXT("Lips")) ||
MaterialName.EndsWith(Seperator + TEXT("Legs")) ||
MaterialName.EndsWith(Seperator + TEXT("Torso")) ||
MaterialName.EndsWith(Seperator + TEXT("Body")) ||
MaterialName.EndsWith(Seperator + TEXT("Arms")) ||
MaterialName.EndsWith(Seperator + TEXT("EyeSocket")) ||
MaterialName.EndsWith(Seperator + TEXT("Ears")) ||
MaterialName.EndsWith(Seperator + TEXT("Fingernails")) ||
MaterialName.EndsWith(Seperator + TEXT("Toenails")) ||
MaterialName.EndsWith(Seperator + TEXT("Genitalia")))
{
//BaseMaterialAssetPath = CachedSettings->BaseSkinMaterial;
SetMaterialProperty(MaterialName, TEXT("Diffuse Subsurface Color Weight"), TEXT("Double"), FString::SanitizeFloat(CachedSettings->DefaultSkinDiffuseSubsurfaceColorWeight), MaterialProperties);
SetMaterialProperty(MaterialName, TEXT("Subsurface Alpha Texture"), TEXT("Texture"), FDazToUnrealTextures::GetSubSurfaceAlphaTexture(CharacterType, MaterialName), MaterialProperties);
}
else if (MaterialName.Contains(Seperator + TEXT("EyeMoisture")))
{
//BaseMaterialAssetPath = CachedSettings->BaseEyeMoistureMaterial;
SetMaterialProperty(MaterialName, TEXT("Metallic Weight"), TEXT("Double"), TEXT("1"), MaterialProperties);
SetMaterialProperty(MaterialName, TEXT("Opacity Strength"), TEXT("Double"), FString::SanitizeFloat(CachedSettings->DefaultEyeMoistureOpacity), MaterialProperties);
SetMaterialProperty(MaterialName, TEXT("Index of Refraction"), TEXT("Double"), TEXT("1.0"), MaterialProperties);
}
else if (MaterialName.EndsWith(Seperator + TEXT("EyeReflection")) || MaterialName.EndsWith(Seperator + TEXT("Tear")) || MaterialName.EndsWith(Seperator + TEXT("Tears")))
{
//BaseMaterialAssetPath = CachedSettings->BaseEyeMoistureMaterial;
SetMaterialProperty(MaterialName, TEXT("Metallic Weight"), TEXT("Double"), TEXT("1"), MaterialProperties);
SetMaterialProperty(MaterialName, TEXT("Opacity Strength"), TEXT("Double"), FString::SanitizeFloat(CachedSettings->DefaultEyeMoistureOpacity), MaterialProperties);
SetMaterialProperty(MaterialName, TEXT("Index of Refraction"), TEXT("Double"), TEXT("1.0"), MaterialProperties);
}
else if (MaterialName.EndsWith(Seperator + TEXT("EyeLashes")) || MaterialName.EndsWith(Seperator + TEXT("Eyelashes")))
{
//BaseMaterialAssetPath = CachedSettings->BaseAlphaMaterial;
SetMaterialProperty(MaterialName, TEXT("Metallic Weight"), TEXT("Double"), TEXT("0"), MaterialProperties);
SetMaterialProperty(MaterialName, TEXT("Glossy Layered Weight"), TEXT("Double"), TEXT("0"), MaterialProperties);
SetMaterialProperty(MaterialName, TEXT("Index of Refraction"), TEXT("Double"), TEXT("1.0"), MaterialProperties);
}
else if (MaterialName.EndsWith(Seperator + TEXT("cornea")))
{
//BaseMaterialAssetPath = CachedSettings->BaseCorneaMaterial;
SetMaterialProperty(MaterialName, TEXT("Metallic Weight"), TEXT("Double"), TEXT("1"), MaterialProperties);
SetMaterialProperty(MaterialName, TEXT("Opacity Strength"), TEXT("Double"), FString::SanitizeFloat(CachedSettings->DefaultEyeMoistureOpacity), MaterialProperties);
SetMaterialProperty(MaterialName, TEXT("Index of Refraction"), TEXT("Double"), TEXT("1.0"), MaterialProperties);
}
else if (MaterialName.EndsWith(Seperator + TEXT("sclera")))
{
//BaseMaterialAssetPath = CachedSettings->BaseMaterial;
}
else if (MaterialName.EndsWith(Seperator + TEXT("irises")))
{
SetMaterialProperty(MaterialName, TEXT("Pixel Depth Offset"), TEXT("Double"), TEXT("0.1"), MaterialProperties);
//BaseMaterialAssetPath = CachedSettings->BaseMaterial;
}
else if (MaterialName.EndsWith(Seperator + TEXT("pupils")))
{
SetMaterialProperty(MaterialName, TEXT("Pixel Depth Offset"), TEXT("Double"), TEXT("0.1"), MaterialProperties);
//BaseMaterialAssetPath = CachedSettings->BaseMaterial;
}
else
{
//BaseMaterialAssetPath = CachedSettings->BaseMaterial;
if (MaterialProperties.Contains(MaterialName))
{
TArray<FDUFTextureProperty> Properties = MaterialProperties[MaterialName];
for (FDUFTextureProperty Property : Properties)
{
if (Property.Name == TEXT("Cutout Opacity Texture"))
{
//BaseMaterialAssetPath = CachedSettings->BaseAlphaMaterial;
}
}
}
}
}
else if (MaterialName.Contains(Seperator + TEXT("EyeMoisture")))
{
//BaseMaterialAssetPath = CachedSettings->BaseEyeMoistureMaterial;
SetMaterialProperty(MaterialName, TEXT("Metallic Weight"), TEXT("Double"), TEXT("1"), MaterialProperties);
SetMaterialProperty(MaterialName, TEXT("Opacity Strength"), TEXT("Double"), FString::SanitizeFloat(CachedSettings->DefaultEyeMoistureOpacity), MaterialProperties);
SetMaterialProperty(MaterialName, TEXT("Index of Refraction"), TEXT("Double"), TEXT("1.0"), MaterialProperties);
}
else
{
//BaseMaterialAssetPath = CachedSettings->BaseMaterial;
if (MaterialProperties.Contains(MaterialName))
{
TArray<FDUFTextureProperty> Properties = MaterialProperties[MaterialName];
for (FDUFTextureProperty Property : Properties)
{
if (Property.Name == TEXT("Cutout Opacity Texture"))
{
//BaseMaterialAssetPath = CachedSettings->BaseAlphaMaterial;
}
if (Property.Name == TEXT("Opacity Strength") && Property.Value != TEXT("1"))
{
//BaseMaterialAssetPath = CachedSettings->BaseAlphaMaterial;
}
}
}
}
if (MaterialName.EndsWith(Seperator + TEXT("NoDraw")))
{
//BaseMaterialAssetPath = CachedSettings->NoDrawMaterial;
}
// Create the Material Instance
auto MaterialInstanceFactory = NewObject<UMaterialInstanceConstantFactoryNew>();
UPackage* Package = CreatePackage(nullptr, *(CharacterMaterialFolder / MaterialName));
UMaterialInstanceConstant* UnrealMaterialConstant = (UMaterialInstanceConstant*)MaterialInstanceFactory->FactoryCreateNew(UMaterialInstanceConstant::StaticClass(), Package, *MaterialName, RF_Standalone | RF_Public, NULL, GWarn);
if (UnrealMaterialConstant != NULL)
{
// Notify the asset registry
FAssetRegistryModule::AssetCreated(UnrealMaterialConstant);
// Set the dirty flag so this package will get saved later
Package->SetDirtyFlag(true);
UObject* BaseMaterial = BaseMaterialAssetPath.TryLoad();
if (ParentMaterial && ParentMaterial->Parent == BaseMaterial)
{
UnrealMaterialConstant->SetParentEditorOnly((UMaterial*)ParentMaterial);
}
else
{
UnrealMaterialConstant->SetParentEditorOnly((UMaterial*)BaseMaterial);
}
if (SubsurfaceProfile)
{
if (!ParentMaterial || ParentMaterial->SubsurfaceProfile != SubsurfaceProfile)
{
UnrealMaterialConstant->bOverrideSubsurfaceProfile = 1;
UnrealMaterialConstant->SubsurfaceProfile = SubsurfaceProfile;
}
else
{
UnrealMaterialConstant->bOverrideSubsurfaceProfile = 0;
}
}
// Set the MaterialInstance properties
if (MaterialProperties.Contains(MaterialName))
{
// Rename properties based on the settings
TArray<FDUFTextureProperty> MaterialInstanceProperties;
for (FDUFTextureProperty MaterialProperty : MaterialProperties[MaterialName])
{
if (CachedSettings->MaterialPropertyMapping.Contains(MaterialProperty.Name))
{
MaterialProperty.Name = CachedSettings->MaterialPropertyMapping[MaterialProperty.Name];
}
MaterialInstanceProperties.Add(MaterialProperty);
}
// Apply the properties
for (FDUFTextureProperty MaterialProperty : MaterialInstanceProperties)
{
if (MaterialProperty.Type == TEXT("Texture"))
{
FStringAssetReference TextureAssetPath(CharacterTexturesFolder / MaterialProperty.Value);
UObject* TextureAsset = TextureAssetPath.TryLoad();
if (TextureAsset == nullptr)
{
FStringAssetReference TextureAssetPathFull(MaterialProperty.Value);
TextureAsset = TextureAssetPathFull.TryLoad();
}
FMaterialParameterInfo ParameterInfo(*MaterialProperty.Name);
UnrealMaterialConstant->SetTextureParameterValueEditorOnly(ParameterInfo, (UTexture*)TextureAsset);
}
if (MaterialProperty.Type == TEXT("Double"))
{
float Value = FCString::Atof(*MaterialProperty.Value);
FMaterialParameterInfo ParameterInfo(*MaterialProperty.Name);
UnrealMaterialConstant->SetScalarParameterValueEditorOnly(ParameterInfo, Value);
}
if (MaterialProperty.Type == TEXT("Color"))
{
//FLinearColor Value = FDazToUnrealModule::FromHex(MaterialProperty.Value);
//FColor Color = Value.ToFColor(true);
FColor Color = FColor::FromHex(MaterialProperty.Value);
FMaterialParameterInfo ParameterInfo(*MaterialProperty.Name);
UnrealMaterialConstant->SetVectorParameterValueEditorOnly(ParameterInfo, Color);
}
if (MaterialProperty.Type == TEXT("Switch"))
{
FStaticParameterSet StaticParameters;
UnrealMaterialConstant->GetStaticParameterValues(StaticParameters);
for (int32 ParameterIdx = 0; ParameterIdx < StaticParameters.StaticSwitchParameters.Num(); ParameterIdx++)
{
for (int32 SwitchParamIdx = 0; SwitchParamIdx < StaticParameters.StaticSwitchParameters.Num(); SwitchParamIdx++)
{
FStaticSwitchParameter& StaticSwitchParam = StaticParameters.StaticSwitchParameters[SwitchParamIdx];
if (StaticSwitchParam.ParameterInfo.Name.ToString() == MaterialProperty.Name)
{
StaticSwitchParam.bOverride = true;
StaticSwitchParam.Value = MaterialProperty.Value.ToLower() == TEXT("true");
}
}
}
UnrealMaterialConstant->UpdateStaticPermutation(StaticParameters);
}
}
}
}
return UnrealMaterialConstant;
}
void FDazToUnrealMaterials::SetMaterialProperty(const FString& MaterialName, const FString& PropertyName, const FString& PropertyType, const FString& PropertyValue, TMap<FString, TArray<FDUFTextureProperty>>& MaterialProperties)
{
if (!MaterialProperties.Contains(MaterialName))
{
MaterialProperties.Add(MaterialName, TArray<FDUFTextureProperty>());
}
TArray<FDUFTextureProperty>& Properties = MaterialProperties[MaterialName];
for (FDUFTextureProperty& Property : Properties)
{
if (Property.Name == PropertyName)
{
Property.Value = PropertyValue;
return;
}
}
FDUFTextureProperty TextureProperty;
TextureProperty.Name = PropertyName;
TextureProperty.Type = PropertyType;
TextureProperty.Value = PropertyValue;
MaterialProperties[MaterialName].Add(TextureProperty);
}
FSoftObjectPath FDazToUnrealMaterials::GetMostCommonBaseMaterial(TArray<FString> MaterialNames, TMap<FString, TArray<FDUFTextureProperty>> MaterialProperties)
{
TArray<FSoftObjectPath> BaseMaterials;
for (FString MaterialName : MaterialNames)
{
BaseMaterials.Add(GetBaseMaterial(MaterialName, MaterialProperties[MaterialName]));
}
FSoftObjectPath MostCommonPath;
int32 MostCommonCount = 0;
for (FSoftObjectPath BaseMaterial : BaseMaterials)
{
int32 Count = 0;
for (FSoftObjectPath BaseMaterialMatch : BaseMaterials)
{
if (BaseMaterialMatch == BaseMaterial)
{
Count++;
}
}
if (Count > MostCommonCount)
{
MostCommonCount = Count;
MostCommonPath = BaseMaterial;
}
}
return MostCommonPath;
}
TArray<FDUFTextureProperty> FDazToUnrealMaterials::GetMostCommonProperties(TArray<FString> MaterialNames, TMap<FString, TArray<FDUFTextureProperty>> MaterialProperties)
{
// Get a list of property names
TArray<FString> PossibleProperties;
for (FString MaterialName : MaterialNames)
{
for (FDUFTextureProperty Property : MaterialProperties[MaterialName])
{
if (Property.Name != TEXT("Asset Type"))
{
PossibleProperties.AddUnique(Property.Name);
}
}
}
TArray<FDUFTextureProperty> MostCommonProperties;
for (FString PossibleProperty : PossibleProperties)
{
FDUFTextureProperty MostCommonProperty = GetMostCommonProperty(PossibleProperty, MaterialNames, MaterialProperties);
if (MostCommonProperty.Name != TEXT(""))
{
MostCommonProperties.Add(MostCommonProperty);
}
}
return MostCommonProperties;
}
FDUFTextureProperty FDazToUnrealMaterials::GetMostCommonProperty(FString PropertyName, TArray<FString> MaterialNames, TMap<FString, TArray<FDUFTextureProperty>> MaterialProperties)
{
TArray<FDUFTextureProperty> PossibleProperties;
// Only include properties that exist on all the child materials
int32 PropertyCount = 0;
for (FString MaterialName : MaterialNames)
{
// Gather all the options
for (FDUFTextureProperty Property : MaterialProperties[MaterialName])
{
if (Property.Name == PropertyName)
{
PropertyCount++;
PossibleProperties.Add(Property);
}
}
}
FDUFTextureProperty MostCommonProperty;
int32 MostCommonCount = 0;
if (PropertyCount == MaterialNames.Num())
{
for (FDUFTextureProperty PropertyToCount : PossibleProperties)
{
int32 Count = 0;
for (FDUFTextureProperty PropertyToMatch : PossibleProperties)
{
if (PropertyToMatch == PropertyToCount)
{
Count++;
}
}
if (Count > MostCommonCount)
{
MostCommonCount = Count;
MostCommonProperty = PropertyToCount;
}
}
}
if (MostCommonCount <= 1)
{
MostCommonProperty.Name = TEXT("");
}
return MostCommonProperty;
}
bool FDazToUnrealMaterials::HasMaterialProperty(const FString& PropertyName, const TArray<FDUFTextureProperty>& MaterialProperties)
{
for (FDUFTextureProperty MaterialProperty : MaterialProperties)
{
if (MaterialProperty.Name == PropertyName)
{
return true;
}
}
return false;
}
FString FDazToUnrealMaterials::GetMaterialProperty(const FString& PropertyName, const TArray<FDUFTextureProperty>& MaterialProperties)
{
for (FDUFTextureProperty MaterialProperty : MaterialProperties)
{
if (MaterialProperty.Name == PropertyName)
{
return MaterialProperty.Value;
}
}
return FString();
}
USubsurfaceProfile* FDazToUnrealMaterials::CreateSubsurfaceBaseProfileForCharacter(const FString CharacterMaterialFolder, TMap<FString, TArray<FDUFTextureProperty>>& MaterialProperties)
{
const UDazToUnrealSettings* CachedSettings = GetDefault<UDazToUnrealSettings>();
FString Seperator;
if ( CachedSettings->UseOriginalMaterialName)
{
Seperator = "";
}
else
{
Seperator = "_";
}
// Find the torso material.
for (TPair<FString, TArray<FDUFTextureProperty>> Pair : MaterialProperties)
{
FString AssetType;
for (FDUFTextureProperty Property : Pair.Value)
{
if (Property.Name == TEXT("Asset Type"))
{
AssetType = Property.Value;
}
}
if (AssetType == TEXT("Actor/Character"))
{
if (Pair.Key.EndsWith(Seperator + TEXT("Torso")) || Pair.Key.EndsWith(Seperator + TEXT("Body")))
{
return CreateSubsurfaceProfileForMaterial(Pair.Key, CharacterMaterialFolder, Pair.Value);
}
}
}
return nullptr;
}
USubsurfaceProfile* FDazToUnrealMaterials::CreateSubsurfaceProfileForMaterial(const FString MaterialName, const FString CharacterMaterialFolder, const TArray<FDUFTextureProperty > MaterialProperties)
{
// Create the Material Instance
//auto SubsurfaceProfileFactory = NewObject<USubsurfaceProfileFactory>();
//Only create for the PBRSkin base material
FString ShaderName;
for (FDUFTextureProperty Property : MaterialProperties)
{
if (Property.Name == TEXT("Asset Type"))
{
ShaderName = Property.ShaderName;
}
}
if (ShaderName != TEXT("PBRSkin"))
{
return nullptr;
}
FString SubsurfaceProfileName = MaterialName + TEXT("_Profile");
UPackage* Package = CreatePackage(nullptr, *(CharacterMaterialFolder / MaterialName));
//USubsurfaceProfile* SubsurfaceProfile = (USubsurfaceProfile*)SubsurfaceProfileFactory->FactoryCreateNew(USubsurfaceProfile::StaticClass(), Package, *MaterialName, RF_Standalone | RF_Public, NULL, GWarn);
FAssetToolsModule& AssetToolsModule = FModuleManager::GetModuleChecked<FAssetToolsModule>("AssetTools");
USubsurfaceProfile* SubsurfaceProfile = Cast<USubsurfaceProfile>(AssetToolsModule.Get().CreateAsset(SubsurfaceProfileName, FPackageName::GetLongPackagePath(*(CharacterMaterialFolder / MaterialName)), USubsurfaceProfile::StaticClass(), NULL));
if (HasMaterialProperty(TEXT("Specular Lobe 1 Roughness"), MaterialProperties))
{
SubsurfaceProfile->Settings.Roughness0 = FCString::Atof(*GetMaterialProperty(TEXT("Specular Lobe 1 Roughness"), MaterialProperties));
}
if (HasMaterialProperty(TEXT("Specular Lobe 2 Roughness Mult"), MaterialProperties))
{
SubsurfaceProfile->Settings.Roughness1 = FCString::Atof(*GetMaterialProperty(TEXT("Specular Lobe 2 Roughness Mult"), MaterialProperties));
}
if (HasMaterialProperty(TEXT("Dual Lobe Specular Ratio"), MaterialProperties))
{
SubsurfaceProfile->Settings.LobeMix = FCString::Atof(*GetMaterialProperty(TEXT("Dual Lobe Specular Ratio"), MaterialProperties));
}
if (HasMaterialProperty(TEXT("SSS Color"), MaterialProperties))
{
SubsurfaceProfile->Settings.SubsurfaceColor = FColor::FromHex(*GetMaterialProperty(TEXT("SSS Color"), MaterialProperties));
}
if (HasMaterialProperty(TEXT("SSS Color"), MaterialProperties))
{
SubsurfaceProfile->Settings.SubsurfaceColor = FColor::FromHex(*GetMaterialProperty(TEXT("SSS Color"), MaterialProperties));
}
if (HasMaterialProperty(TEXT("Transmitted Color"), MaterialProperties))
{
SubsurfaceProfile->Settings.FalloffColor = FColor::FromHex(*GetMaterialProperty(TEXT("Transmitted Color"), MaterialProperties));
}
return SubsurfaceProfile;
}
bool FDazToUnrealMaterials::SubsurfaceProfilesAreIdentical(USubsurfaceProfile* A, USubsurfaceProfile* B)
{
if (A == nullptr || B == nullptr) return false;
if (A->Settings.Roughness0 != B->Settings.Roughness0) return false;
if (A->Settings.Roughness1 != B->Settings.Roughness1) return false;
if (A->Settings.LobeMix != B->Settings.LobeMix) return false;
if (A->Settings.SubsurfaceColor != B->Settings.SubsurfaceColor) return false;
if (A->Settings.FalloffColor != B->Settings.FalloffColor) return false;
return true;
}
bool FDazToUnrealMaterials::SubsurfaceProfilesWouldBeIdentical(USubsurfaceProfile* ExistingSubsurfaceProfile, const TArray<FDUFTextureProperty > MaterialProperties)
{
if (ExistingSubsurfaceProfile == nullptr) return false;
if (ExistingSubsurfaceProfile->Settings.Roughness0 != FCString::Atof(*GetMaterialProperty(TEXT("Specular Lobe 1 Roughness"), MaterialProperties))) return false;
if (ExistingSubsurfaceProfile->Settings.Roughness1 != FCString::Atof(*GetMaterialProperty(TEXT("Specular Lobe 2 Roughness Mult"), MaterialProperties))) return false;
if (ExistingSubsurfaceProfile->Settings.LobeMix != FCString::Atof(*GetMaterialProperty(TEXT("Dual Lobe Specular Ratio"), MaterialProperties))) return false;
if (ExistingSubsurfaceProfile->Settings.SubsurfaceColor != FColor::FromHex(*GetMaterialProperty(TEXT("SSS Color"), MaterialProperties))) return false;
if (ExistingSubsurfaceProfile->Settings.FalloffColor != FColor::FromHex(*GetMaterialProperty(TEXT("Transmitted Color"), MaterialProperties))) return false;
return true;
}
|
/*
* Copyright (C) 2018-2022 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
*/
#include "opencl/test/unit_test/program/program_tests.h"
#include "shared/source/command_stream/command_stream_receiver_hw.h"
#include "shared/source/compiler_interface/compiler_warnings/compiler_warnings.h"
#include "shared/source/compiler_interface/intermediate_representations.h"
#include "shared/source/device_binary_format/elf/elf_decoder.h"
#include "shared/source/device_binary_format/elf/ocl_elf.h"
#include "shared/source/device_binary_format/patchtokens_decoder.h"
#include "shared/source/gmm_helper/gmm_helper.h"
#include "shared/source/helpers/aligned_memory.h"
#include "shared/source/helpers/hash.h"
#include "shared/source/helpers/hw_helper.h"
#include "shared/source/helpers/ptr_math.h"
#include "shared/source/helpers/string.h"
#include "shared/source/memory_manager/allocations_list.h"
#include "shared/source/memory_manager/graphics_allocation.h"
#include "shared/source/memory_manager/surface.h"
#include "shared/source/os_interface/os_context.h"
#include "shared/test/common/helpers/debug_manager_state_restore.h"
#include "shared/test/common/helpers/kernel_binary_helper.h"
#include "shared/test/common/libult/global_environment.h"
#include "shared/test/common/libult/ult_command_stream_receiver.h"
#include "shared/test/common/mocks/mock_allocation_properties.h"
#include "shared/test/common/mocks/mock_compiler_interface.h"
#include "shared/test/common/mocks/mock_graphics_allocation.h"
#include "shared/test/common/test_macros/test.h"
#include "shared/test/unit_test/device_binary_format/patchtokens_tests.h"
#include "shared/test/unit_test/device_binary_format/zebin_tests.h"
#include "shared/test/unit_test/utilities/base_object_utils.h"
#include "opencl/source/gtpin/gtpin_notify.h"
#include "opencl/source/helpers/hardware_commands_helper.h"
#include "opencl/source/kernel/kernel.h"
#include "opencl/source/program/create.inl"
#include "opencl/test/unit_test/fixtures/cl_device_fixture.h"
#include "opencl/test/unit_test/fixtures/multi_root_device_fixture.h"
#include "opencl/test/unit_test/mocks/mock_kernel.h"
#include "opencl/test/unit_test/mocks/mock_platform.h"
#include "opencl/test/unit_test/mocks/mock_program.h"
#include "opencl/test/unit_test/program/program_from_binary.h"
#include "opencl/test/unit_test/program/program_with_source.h"
#include "opencl/test/unit_test/test_macros/test_checks_ocl.h"
#include "compiler_options.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include <map>
#include <memory>
#include <string>
#include <vector>
using namespace NEO;
void ProgramTests::SetUp() {
ClDeviceFixture::SetUp();
cl_device_id device = pClDevice;
ContextFixture::SetUp(1, &device);
}
void ProgramTests::TearDown() {
ContextFixture::TearDown();
ClDeviceFixture::TearDown();
}
class NoCompilerInterfaceRootDeviceEnvironment : public RootDeviceEnvironment {
public:
NoCompilerInterfaceRootDeviceEnvironment(ExecutionEnvironment &executionEnvironment) : RootDeviceEnvironment(executionEnvironment) {
*hwInfo = *defaultHwInfo;
}
CompilerInterface *getCompilerInterface() override {
return nullptr;
}
bool initAilConfiguration() override {
return true;
}
};
class FailingGenBinaryProgram : public MockProgram {
public:
using MockProgram::MockProgram;
cl_int processGenBinary(const ClDevice &clDevice) override { return CL_INVALID_BINARY; }
};
class SucceedingGenBinaryProgram : public MockProgram {
public:
using MockProgram::MockProgram;
cl_int processGenBinary(const ClDevice &clDevice) override { return CL_SUCCESS; }
};
using ProgramFromBinaryTest = ProgramFromBinaryFixture;
TEST_F(ProgramFromBinaryTest, WhenBuildingProgramThenSuccessIsReturned) {
retVal = pProgram->build(
pProgram->getDevices(),
nullptr,
false);
EXPECT_EQ(CL_SUCCESS, retVal);
}
TEST_F(ProgramFromBinaryTest, WhenGettingProgramContextInfoThenCorrectContextIsReturned) {
cl_context contextRet = reinterpret_cast<cl_context>(static_cast<uintptr_t>(0xdeaddead));
size_t paramValueSizeRet = 0;
retVal = pProgram->getInfo(
CL_PROGRAM_CONTEXT,
sizeof(cl_context),
&contextRet,
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(pContext, contextRet);
EXPECT_EQ(sizeof(cl_context), paramValueSizeRet);
}
TEST_F(ProgramFromBinaryTest, GivenNonNullParamValueWhenGettingProgramBinaryInfoThenCorrectBinaryIsReturned) {
size_t paramValueSize = sizeof(unsigned char **);
size_t paramValueSizeRet = 0;
auto testBinary = std::make_unique<char[]>(knownSourceSize);
retVal = pProgram->getInfo(
CL_PROGRAM_BINARIES,
paramValueSize,
&testBinary,
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(paramValueSize, paramValueSizeRet);
EXPECT_STREQ((const char *)knownSource.get(), (const char *)testBinary.get());
}
TEST_F(ProgramFromBinaryTest, GivenNullParamValueWhenGettingProgramBinaryInfoThenSuccessIsReturned) {
size_t paramValueSize = sizeof(unsigned char **);
size_t paramValueSizeRet = 0;
retVal = pProgram->getInfo(
CL_PROGRAM_BINARIES,
0,
nullptr,
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(paramValueSize, paramValueSizeRet);
}
TEST_F(ProgramFromBinaryTest, GivenNonNullParamValueAndParamValueSizeZeroWhenGettingProgramBinaryInfoThenInvalidValueErrorIsReturned) {
size_t paramValueSizeRet = 0;
auto testBinary = std::make_unique<char[]>(knownSourceSize);
retVal = pProgram->getInfo(
CL_PROGRAM_BINARIES,
0,
&testBinary,
¶mValueSizeRet);
EXPECT_EQ(CL_INVALID_VALUE, retVal);
}
TEST_F(ProgramFromBinaryTest, GivenInvalidParametersWhenGettingProgramInfoThenValueSizeRetIsNotUpdated) {
size_t paramValueSizeRet = 0x1234;
auto testBinary = std::make_unique<char[]>(knownSourceSize);
retVal = pProgram->getInfo(
CL_PROGRAM_BINARIES,
0,
&testBinary,
¶mValueSizeRet);
EXPECT_EQ(CL_INVALID_VALUE, retVal);
EXPECT_EQ(0x1234u, paramValueSizeRet);
}
TEST_F(ProgramFromBinaryTest, GivenInvalidParamWhenGettingProgramBinaryInfoThenInvalidValueErrorIsReturned) {
size_t paramValueSizeRet = 0;
auto testBinary = std::make_unique<char[]>(knownSourceSize);
retVal = pProgram->getInfo(
CL_PROGRAM_BUILD_STATUS,
0,
nullptr,
¶mValueSizeRet);
EXPECT_EQ(CL_INVALID_VALUE, retVal);
}
TEST_F(ProgramFromBinaryTest, WhenGettingBinarySizesThenCorrectSizesAreReturned) {
size_t paramValueSize = sizeof(size_t *);
size_t paramValue[1];
size_t paramValueSizeRet = 0;
retVal = pProgram->getInfo(
CL_PROGRAM_BINARY_SIZES,
paramValueSize,
paramValue,
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(knownSourceSize, paramValue[0]);
EXPECT_EQ(paramValueSize, paramValueSizeRet);
}
TEST_F(ProgramFromBinaryTest, GivenProgramWithOneKernelWhenGettingNumKernelsThenOneIsReturned) {
size_t paramValue = 0;
size_t paramValueSize = sizeof(paramValue);
size_t paramValueSizeRet = 0;
retVal = pProgram->build(
pProgram->getDevices(),
nullptr,
false);
ASSERT_EQ(CL_SUCCESS, retVal);
retVal = pProgram->getInfo(
CL_PROGRAM_NUM_KERNELS,
paramValueSize,
¶mValue,
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(1u, paramValue);
EXPECT_EQ(paramValueSize, paramValueSizeRet);
}
TEST_F(ProgramFromBinaryTest, GivenProgramWithNoExecutableCodeWhenGettingNumKernelsThenInvalidProgramExecutableErrorIsReturned) {
size_t paramValue = 0;
size_t paramValueSize = sizeof(paramValue);
size_t paramValueSizeRet = 0;
CreateProgramFromBinary(pContext, pContext->getDevices(), binaryFileName);
MockProgram *p = pProgram;
p->setBuildStatus(CL_BUILD_NONE);
retVal = pProgram->getInfo(
CL_PROGRAM_NUM_KERNELS,
paramValueSize,
¶mValue,
¶mValueSizeRet);
EXPECT_EQ(CL_INVALID_PROGRAM_EXECUTABLE, retVal);
}
TEST_F(ProgramFromBinaryTest, WhenGettingKernelNamesThenCorrectNameIsReturned) {
size_t paramValueSize = sizeof(size_t *);
size_t paramValueSizeRet = 0;
retVal = pProgram->build(
pProgram->getDevices(),
nullptr,
false);
ASSERT_EQ(CL_SUCCESS, retVal);
// get info successfully about required sizes for kernel names
retVal = pProgram->getInfo(
CL_PROGRAM_KERNEL_NAMES,
0,
nullptr,
¶mValueSizeRet);
ASSERT_EQ(CL_SUCCESS, retVal);
ASSERT_NE(0u, paramValueSizeRet);
// get info successfully about kernel names
auto paramValue = std::make_unique<char[]>(paramValueSizeRet);
paramValueSize = paramValueSizeRet;
ASSERT_NE(paramValue, nullptr);
size_t expectedKernelsStringSize = strlen(kernelName) + 1;
retVal = pProgram->getInfo(
CL_PROGRAM_KERNEL_NAMES,
paramValueSize,
paramValue.get(),
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_STREQ(kernelName, (char *)paramValue.get());
EXPECT_EQ(expectedKernelsStringSize, paramValueSizeRet);
}
TEST_F(ProgramFromBinaryTest, GivenProgramWithNoExecutableCodeWhenGettingKernelNamesThenInvalidProgramExecutableErrorIsReturned) {
size_t paramValueSize = sizeof(size_t *);
size_t paramValueSizeRet = 0;
CreateProgramFromBinary(pContext, pContext->getDevices(), binaryFileName);
MockProgram *p = pProgram;
p->setBuildStatus(CL_BUILD_NONE);
retVal = pProgram->getInfo(
CL_PROGRAM_KERNEL_NAMES,
paramValueSize,
nullptr,
¶mValueSizeRet);
EXPECT_EQ(CL_INVALID_PROGRAM_EXECUTABLE, retVal);
}
TEST_F(ProgramFromBinaryTest, WhenGettingProgramScopeGlobalCtorsAndDtorsPresentInfoThenCorrectValueIsReturned) {
cl_uint paramRet = 0;
cl_uint expectedParam = CL_FALSE;
size_t paramSizeRet = 0;
retVal = pProgram->getInfo(
CL_PROGRAM_SCOPE_GLOBAL_CTORS_PRESENT,
sizeof(cl_uint),
¶mRet,
¶mSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(sizeof(cl_uint), paramSizeRet);
EXPECT_EQ(expectedParam, paramRet);
retVal = pProgram->getInfo(
CL_PROGRAM_SCOPE_GLOBAL_DTORS_PRESENT,
sizeof(cl_uint),
¶mRet,
¶mSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(sizeof(cl_uint), paramSizeRet);
EXPECT_EQ(expectedParam, paramRet);
}
TEST_F(ProgramFromBinaryTest, GivenNullDeviceWhenGettingBuildStatusThenBuildNoneIsReturned) {
cl_device_id device = pClDevice;
cl_build_status buildStatus = 0;
size_t paramValueSize = sizeof(buildStatus);
size_t paramValueSizeRet = 0;
retVal = pProgram->getBuildInfo(
device,
CL_PROGRAM_BUILD_STATUS,
paramValueSize,
&buildStatus,
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(paramValueSize, paramValueSizeRet);
EXPECT_EQ(CL_BUILD_NONE, buildStatus);
}
TEST_F(ProgramFromBinaryTest, GivenInvalidParametersWhenGettingBuildInfoThenValueSizeRetIsNotUpdated) {
cl_device_id device = pClDevice;
cl_build_status buildStatus = 0;
size_t paramValueSize = sizeof(buildStatus);
size_t paramValueSizeRet = 0x1234;
retVal = pProgram->getBuildInfo(
device,
0,
paramValueSize,
&buildStatus,
¶mValueSizeRet);
EXPECT_EQ(CL_INVALID_VALUE, retVal);
EXPECT_EQ(0x1234u, paramValueSizeRet);
}
TEST_F(ProgramFromBinaryTest, GivenDefaultDeviceWhenGettingBuildOptionsThenBuildOptionsAreEmpty) {
cl_device_id device = pClDevice;
size_t paramValueSizeRet = 0u;
size_t paramValueSize = 0u;
retVal = pProgram->getBuildInfo(
device,
CL_PROGRAM_BUILD_OPTIONS,
0,
nullptr,
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_NE(paramValueSizeRet, 0u);
auto paramValue = std::make_unique<char[]>(paramValueSizeRet);
paramValueSize = paramValueSizeRet;
retVal = pProgram->getBuildInfo(
device,
CL_PROGRAM_BUILD_OPTIONS,
paramValueSize,
paramValue.get(),
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_STREQ("", (char *)paramValue.get());
}
TEST_F(ProgramFromBinaryTest, GivenDefaultDeviceWhenGettingLogThenLogEmpty) {
cl_device_id device = pClDevice;
size_t paramValueSizeRet = 0u;
size_t paramValueSize = 0u;
retVal = pProgram->getBuildInfo(
device,
CL_PROGRAM_BUILD_LOG,
0,
nullptr,
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_NE(paramValueSizeRet, 0u);
auto paramValue = std::make_unique<char[]>(paramValueSizeRet);
paramValueSize = paramValueSizeRet;
retVal = pProgram->getBuildInfo(
device,
CL_PROGRAM_BUILD_LOG,
paramValueSize,
paramValue.get(),
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_STREQ("", (char *)paramValue.get());
}
TEST_F(ProgramFromBinaryTest, GivenLogEntriesWhenGetBuildLogThenLogIsApended) {
cl_device_id device = pClDevice;
size_t paramValueSizeRet = 0u;
size_t paramValueSize = 0u;
retVal = pProgram->getBuildInfo(
device,
CL_PROGRAM_BUILD_LOG,
0,
nullptr,
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_NE(paramValueSizeRet, 0u);
auto paramValue = std::make_unique<char[]>(paramValueSizeRet);
paramValueSize = paramValueSizeRet;
retVal = pProgram->getBuildInfo(
device,
CL_PROGRAM_BUILD_LOG,
paramValueSize,
paramValue.get(),
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_STREQ("", (char *)paramValue.get());
// Add more text to the log
pProgram->updateBuildLog(pClDevice->getRootDeviceIndex(), "testing", 8);
pProgram->updateBuildLog(pClDevice->getRootDeviceIndex(), "several", 8);
retVal = pProgram->getBuildInfo(
device,
CL_PROGRAM_BUILD_LOG,
0,
nullptr,
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_GE(paramValueSizeRet, 16u);
paramValue = std::make_unique<char[]>(paramValueSizeRet);
paramValueSize = paramValueSizeRet;
retVal = pProgram->getBuildInfo(
device,
CL_PROGRAM_BUILD_LOG,
paramValueSize,
paramValue.get(),
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_NE(nullptr, strstr(paramValue.get(), "testing"));
const char *paramValueContinued = strstr(paramValue.get(), "testing") + 7;
ASSERT_NE(nullptr, strstr(paramValueContinued, "several"));
}
TEST_F(ProgramFromBinaryTest, GivenNullParamValueWhenGettingProgramBinaryTypeThenParamValueSizeIsReturned) {
cl_device_id device = pClDevice;
size_t paramValueSizeRet = 0u;
size_t paramValueSize = 0u;
retVal = pProgram->getBuildInfo(
device,
CL_PROGRAM_BINARY_TYPE,
paramValueSize,
nullptr,
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_NE(paramValueSizeRet, 0u);
}
TEST_F(ProgramFromBinaryTest, WhenGettingProgramBinaryTypeThenCorrectProgramTypeIsReturned) {
cl_device_id device = pClDevice;
cl_program_binary_type programType = 0;
char *paramValue = (char *)&programType;
size_t paramValueSizeRet = 0u;
size_t paramValueSize = 0u;
retVal = pProgram->getBuildInfo(
device,
CL_PROGRAM_BINARY_TYPE,
paramValueSize,
nullptr,
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_NE(paramValueSizeRet, 0u);
paramValueSize = paramValueSizeRet;
retVal = pProgram->getBuildInfo(
device,
CL_PROGRAM_BINARY_TYPE,
paramValueSize,
paramValue,
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ((cl_program_binary_type)CL_PROGRAM_BINARY_TYPE_EXECUTABLE, programType);
}
TEST_F(ProgramFromBinaryTest, GivenInvalidParamWhenGettingBuildInfoThenInvalidValueErrorIsReturned) {
cl_device_id device = pClDevice;
size_t paramValueSizeRet = 0u;
retVal = pProgram->getBuildInfo(
device,
CL_PROGRAM_KERNEL_NAMES,
0,
nullptr,
¶mValueSizeRet);
EXPECT_EQ(CL_INVALID_VALUE, retVal);
}
TEST_F(ProgramFromBinaryTest, GivenGlobalVariableTotalSizeSetWhenGettingBuildGlobalVariableTotalSizeThenCorrectSizeIsReturned) {
cl_device_id device = pClDevice;
size_t globalVarSize = 22;
size_t paramValueSize = sizeof(globalVarSize);
size_t paramValueSizeRet = 0;
char *paramValue = (char *)&globalVarSize;
// get build info as is
retVal = pProgram->getBuildInfo(
device,
CL_PROGRAM_BUILD_GLOBAL_VARIABLE_TOTAL_SIZE,
paramValueSize,
paramValue,
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(paramValueSizeRet, sizeof(globalVarSize));
EXPECT_EQ(globalVarSize, 0u);
// Set GlobalVariableTotalSize as 1024
CreateProgramFromBinary(pContext, pContext->getDevices(), binaryFileName);
MockProgram *p = pProgram;
ProgramInfo programInfo;
char constantData[1024] = {};
programInfo.globalVariables.initData = constantData;
programInfo.globalVariables.size = sizeof(constantData);
p->processProgramInfo(programInfo, *pClDevice);
// get build info once again
retVal = pProgram->getBuildInfo(
device,
CL_PROGRAM_BUILD_GLOBAL_VARIABLE_TOTAL_SIZE,
paramValueSize,
paramValue,
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(paramValueSizeRet, sizeof(globalVarSize));
if (castToObject<ClDevice>(pClDevice)->areOcl21FeaturesEnabled()) {
EXPECT_EQ(globalVarSize, 1024u);
} else {
EXPECT_EQ(globalVarSize, 0u);
}
}
TEST_F(ProgramFromBinaryTest, givenProgramWhenItIsBeingBuildThenItContainsGraphicsAllocationInKernelInfo) {
pProgram->build(pProgram->getDevices(), nullptr, true);
auto kernelInfo = pProgram->getKernelInfo(size_t(0), rootDeviceIndex);
auto graphicsAllocation = kernelInfo->getGraphicsAllocation();
ASSERT_NE(nullptr, graphicsAllocation);
EXPECT_TRUE(graphicsAllocation->is32BitAllocation());
auto &hwHelper = NEO::HwHelper::get(defaultHwInfo->platform.eRenderCoreFamily);
size_t isaPadding = hwHelper.getPaddingForISAAllocation();
EXPECT_EQ(graphicsAllocation->getUnderlyingBufferSize(), kernelInfo->heapInfo.KernelHeapSize + isaPadding);
auto kernelIsa = graphicsAllocation->getUnderlyingBuffer();
EXPECT_NE(kernelInfo->heapInfo.pKernelHeap, kernelIsa);
EXPECT_EQ(0, memcmp(kernelIsa, kernelInfo->heapInfo.pKernelHeap, kernelInfo->heapInfo.KernelHeapSize));
auto rootDeviceIndex = graphicsAllocation->getRootDeviceIndex();
EXPECT_EQ(GmmHelper::decanonize(graphicsAllocation->getGpuBaseAddress()), pDevice->getMemoryManager()->getInternalHeapBaseAddress(rootDeviceIndex, graphicsAllocation->isAllocatedInLocalMemoryPool()));
}
TEST_F(ProgramFromBinaryTest, whenProgramIsBeingRebuildThenOutdatedGlobalBuffersAreFreed) {
pProgram->build(pProgram->getDevices(), nullptr, true);
EXPECT_EQ(nullptr, pProgram->buildInfos[pClDevice->getRootDeviceIndex()].constantSurface);
EXPECT_EQ(nullptr, pProgram->buildInfos[pClDevice->getRootDeviceIndex()].globalSurface);
pProgram->buildInfos[pClDevice->getRootDeviceIndex()].constantSurface = new MockGraphicsAllocation();
pProgram->processGenBinary(*pClDevice);
EXPECT_EQ(nullptr, pProgram->buildInfos[pClDevice->getRootDeviceIndex()].constantSurface);
EXPECT_EQ(nullptr, pProgram->buildInfos[pClDevice->getRootDeviceIndex()].globalSurface);
pProgram->buildInfos[pClDevice->getRootDeviceIndex()].globalSurface = new MockGraphicsAllocation();
pProgram->processGenBinary(*pClDevice);
EXPECT_EQ(nullptr, pProgram->buildInfos[pClDevice->getRootDeviceIndex()].constantSurface);
EXPECT_EQ(nullptr, pProgram->buildInfos[pClDevice->getRootDeviceIndex()].globalSurface);
}
TEST_F(ProgramFromBinaryTest, givenProgramWhenCleanKernelInfoIsCalledThenKernelAllocationIsFreed) {
pProgram->build(pProgram->getDevices(), nullptr, true);
EXPECT_EQ(1u, pProgram->getNumKernels());
for (auto i = 0u; i < pProgram->buildInfos.size(); i++) {
pProgram->cleanCurrentKernelInfo(i);
}
EXPECT_EQ(0u, pProgram->getNumKernels());
}
HWTEST_F(ProgramFromBinaryTest, givenProgramWhenCleanCurrentKernelInfoIsCalledButGpuIsNotYetDoneThenKernelAllocationIsPutOnDeferredFreeListAndCsrRegistersCacheFlush) {
auto &csr = pDevice->getGpgpuCommandStreamReceiver();
EXPECT_TRUE(csr.getTemporaryAllocations().peekIsEmpty());
pProgram->build(pProgram->getDevices(), nullptr, true);
auto kernelAllocation = pProgram->getKernelInfo(static_cast<size_t>(0u), rootDeviceIndex)->getGraphicsAllocation();
kernelAllocation->updateTaskCount(100, csr.getOsContext().getContextId());
*csr.getTagAddress() = 0;
pProgram->cleanCurrentKernelInfo(rootDeviceIndex);
EXPECT_FALSE(csr.getTemporaryAllocations().peekIsEmpty());
EXPECT_EQ(csr.getTemporaryAllocations().peekHead(), kernelAllocation);
EXPECT_TRUE(this->pDevice->getUltCommandStreamReceiver<FamilyType>().requiresInstructionCacheFlush);
}
HWTEST_F(ProgramFromBinaryTest, givenIsaAllocationUsedByMultipleCsrsWhenItIsDeletedThenItRegistersCacheFlushInEveryCsrThatUsedIt) {
auto &csr0 = this->pDevice->getUltCommandStreamReceiverFromIndex<FamilyType>(0u);
auto &csr1 = this->pDevice->getUltCommandStreamReceiverFromIndex<FamilyType>(1u);
pProgram->build(pProgram->getDevices(), nullptr, true);
auto kernelAllocation = pProgram->getKernelInfo(static_cast<size_t>(0u), rootDeviceIndex)->getGraphicsAllocation();
csr0.makeResident(*kernelAllocation);
csr1.makeResident(*kernelAllocation);
csr0.processResidency(csr0.getResidencyAllocations(), 0u);
csr1.processResidency(csr1.getResidencyAllocations(), 0u);
csr0.makeNonResident(*kernelAllocation);
csr1.makeNonResident(*kernelAllocation);
EXPECT_FALSE(csr0.requiresInstructionCacheFlush);
EXPECT_FALSE(csr1.requiresInstructionCacheFlush);
pProgram->cleanCurrentKernelInfo(rootDeviceIndex);
EXPECT_TRUE(csr0.requiresInstructionCacheFlush);
EXPECT_TRUE(csr1.requiresInstructionCacheFlush);
}
TEST_F(ProgramFromSourceTest, GivenSpecificParamatersWhenBuildingProgramThenSuccessOrCorrectErrorCodeIsReturned) {
KernelBinaryHelper kbHelper(binaryFileName, true);
auto device = pPlatform->getClDevice(0);
CreateProgramWithSource(
pContext,
sourceFileName);
// Order of following microtests is important - do not change.
// Add new microtests at end.
auto pMockProgram = pProgram;
// fail build - another build is already in progress
pMockProgram->setBuildStatus(CL_BUILD_IN_PROGRESS);
retVal = pProgram->build(pProgram->getDevices(), nullptr, false);
EXPECT_EQ(CL_INVALID_OPERATION, retVal);
pMockProgram->setBuildStatus(CL_BUILD_NONE);
// fail build - CompilerInterface cannot be obtained
auto executionEnvironment = device->getExecutionEnvironment();
std::unique_ptr<RootDeviceEnvironment> rootDeviceEnvironment = std::make_unique<NoCompilerInterfaceRootDeviceEnvironment>(*executionEnvironment);
std::swap(rootDeviceEnvironment, executionEnvironment->rootDeviceEnvironments[device->getRootDeviceIndex()]);
auto p2 = std::make_unique<MockProgram>(toClDeviceVector(*device));
retVal = p2->build(p2->getDevices(), nullptr, false);
EXPECT_EQ(CL_OUT_OF_HOST_MEMORY, retVal);
p2.reset(nullptr);
std::swap(rootDeviceEnvironment, executionEnvironment->rootDeviceEnvironments[device->getRootDeviceIndex()]);
// fail build - any build error (here caused by specifying unrecognized option)
retVal = pProgram->build(pProgram->getDevices(), "-invalid-option", false);
EXPECT_EQ(CL_BUILD_PROGRAM_FAILURE, retVal);
// fail build - linked code is corrupted and cannot be postprocessed
auto p3 = std::make_unique<FailingGenBinaryProgram>(toClDeviceVector(*device));
std::string testFile;
size_t sourceSize;
testFile.append(clFiles);
testFile.append("CopyBuffer_simd16.cl"); // source file
auto pSourceBuffer = loadDataFromFile(testFile.c_str(), sourceSize);
EXPECT_NE(0u, sourceSize);
EXPECT_NE(nullptr, pSourceBuffer);
p3->sourceCode = pSourceBuffer.get();
p3->createdFrom = Program::CreatedFrom::SOURCE;
retVal = p3->build(p3->getDevices(), nullptr, false);
EXPECT_EQ(CL_INVALID_BINARY, retVal);
p3.reset(nullptr);
// build successfully - build kernel and write it to Kernel Cache
pMockProgram->clearOptions();
std::string receivedInternalOptions;
auto debugVars = NEO::getFclDebugVars();
debugVars.receivedInternalOptionsOutput = &receivedInternalOptions;
gEnvironment->fclPushDebugVars(debugVars);
retVal = pProgram->build(pProgram->getDevices(), nullptr, false);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_TRUE(CompilerOptions::contains(receivedInternalOptions, pPlatform->getClDevice(0)->peekCompilerExtensions())) << receivedInternalOptions;
gEnvironment->fclPopDebugVars();
// get build log
size_t param_value_size_ret = 0u;
retVal = pProgram->getBuildInfo(
device,
CL_PROGRAM_BUILD_LOG,
0,
nullptr,
¶m_value_size_ret);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_NE(param_value_size_ret, 0u);
// get build log when the log does not exist
pMockProgram->clearLog(device->getRootDeviceIndex());
retVal = pProgram->getBuildInfo(
device,
CL_PROGRAM_BUILD_LOG,
0,
nullptr,
¶m_value_size_ret);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_NE(param_value_size_ret, 0u);
// build successfully - build kernel but do not write it to Kernel Cache (kernel is already in the Cache)
pMockProgram->setBuildStatus(CL_BUILD_NONE);
retVal = pProgram->build(pProgram->getDevices(), nullptr, false);
EXPECT_EQ(CL_SUCCESS, retVal);
// build successfully - kernel is already in Kernel Cache, do not build and take it from Cache
retVal = pProgram->build(pProgram->getDevices(), nullptr, true);
EXPECT_EQ(CL_SUCCESS, retVal);
// fail build - code to be build does not exist
pMockProgram->sourceCode = ""; // set source code as non-existent (invalid)
pMockProgram->createdFrom = Program::CreatedFrom::SOURCE;
pMockProgram->setBuildStatus(CL_BUILD_NONE);
pMockProgram->setCreatedFromBinary(false);
retVal = pProgram->build(pProgram->getDevices(), nullptr, false);
EXPECT_EQ(CL_INVALID_PROGRAM, retVal);
}
TEST_F(ProgramFromSourceTest, GivenDuplicateOptionsWhenCreatingWithSourceThenBuildSucceeds) {
KernelBinaryHelper kbHelper(binaryFileName, false);
retVal = pProgram->build(pProgram->getDevices(), nullptr, false);
EXPECT_EQ(CL_SUCCESS, retVal);
retVal = pProgram->build(pProgram->getDevices(), CompilerOptions::fastRelaxedMath.data(), false);
EXPECT_EQ(CL_SUCCESS, retVal);
retVal = pProgram->build(pProgram->getDevices(), CompilerOptions::fastRelaxedMath.data(), false);
EXPECT_EQ(CL_SUCCESS, retVal);
retVal = pProgram->build(pProgram->getDevices(), CompilerOptions::finiteMathOnly.data(), false);
EXPECT_EQ(CL_SUCCESS, retVal);
retVal = pProgram->build(pProgram->getDevices(), nullptr, false);
EXPECT_EQ(CL_SUCCESS, retVal);
}
TEST_F(ProgramFromSourceTest, WhenBuildingProgramThenFeaturesAndExtraExtensionsAreNotAdded) {
auto cip = new MockCompilerInterfaceCaptureBuildOptions();
auto pClDevice = pContext->getDevice(0);
pClDevice->getExecutionEnvironment()->rootDeviceEnvironments[pClDevice->getRootDeviceIndex()]->compilerInterface.reset(cip);
auto extensionsOption = static_cast<ClDevice *>(devices[0])->peekCompilerExtensions();
auto extensionsWithFeaturesOption = static_cast<ClDevice *>(devices[0])->peekCompilerExtensionsWithFeatures();
EXPECT_THAT(cip->buildInternalOptions, testing::Not(testing::HasSubstr(extensionsOption)));
EXPECT_THAT(cip->buildInternalOptions, testing::Not(testing::HasSubstr(extensionsWithFeaturesOption)));
EXPECT_THAT(cip->buildInternalOptions, testing::Not(testing::HasSubstr(std::string{"+cl_khr_3d_image_writes "})));
retVal = pProgram->build(pProgram->getDevices(), nullptr, false);
EXPECT_THAT(cip->buildInternalOptions, testing::HasSubstr(extensionsOption));
EXPECT_THAT(cip->buildInternalOptions, testing::Not(testing::HasSubstr(extensionsWithFeaturesOption)));
EXPECT_THAT(cip->buildInternalOptions, testing::Not(testing::HasSubstr(std::string{"+cl_khr_3d_image_writes "})));
}
TEST_F(ProgramFromSourceTest, WhenBuildingProgramWithOpenClC20ThenExtraExtensionsAreAdded) {
auto cip = new MockCompilerInterfaceCaptureBuildOptions();
auto pClDevice = pContext->getDevice(0);
pClDevice->getExecutionEnvironment()->rootDeviceEnvironments[pClDevice->getRootDeviceIndex()]->compilerInterface.reset(cip);
auto pProgram = std::make_unique<SucceedingGenBinaryProgram>(toClDeviceVector(*pClDevice));
pProgram->sourceCode = "__kernel mock() {}";
pProgram->createdFrom = Program::CreatedFrom::SOURCE;
MockProgram::getInternalOptionsCalled = 0;
auto extensionsOption = static_cast<ClDevice *>(devices[0])->peekCompilerExtensions();
auto extensionsWithFeaturesOption = static_cast<ClDevice *>(devices[0])->peekCompilerExtensionsWithFeatures();
EXPECT_THAT(cip->buildInternalOptions, testing::Not(testing::HasSubstr(std::string{"+cl_khr_3d_image_writes "})));
retVal = pProgram->build(pProgram->getDevices(), "-cl-std=CL2.0", false);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_THAT(cip->buildInternalOptions, testing::HasSubstr(std::string{"+cl_khr_3d_image_writes "}));
EXPECT_EQ(1, MockProgram::getInternalOptionsCalled);
}
TEST_F(ProgramFromSourceTest, WhenBuildingProgramWithOpenClC30ThenFeaturesAreAdded) {
auto cip = new MockCompilerInterfaceCaptureBuildOptions();
auto pClDevice = pContext->getDevice(0);
pClDevice->getExecutionEnvironment()->rootDeviceEnvironments[pClDevice->getRootDeviceIndex()]->compilerInterface.reset(cip);
auto pProgram = std::make_unique<SucceedingGenBinaryProgram>(toClDeviceVector(*pClDevice));
pProgram->sourceCode = "__kernel mock() {}";
pProgram->createdFrom = Program::CreatedFrom::SOURCE;
MockProgram::getInternalOptionsCalled = 0;
auto extensionsOption = static_cast<ClDevice *>(devices[0])->peekCompilerExtensions();
auto extensionsWithFeaturesOption = static_cast<ClDevice *>(devices[0])->peekCompilerExtensionsWithFeatures();
EXPECT_THAT(cip->buildInternalOptions, testing::Not(testing::HasSubstr(extensionsOption)));
EXPECT_THAT(cip->buildInternalOptions, testing::Not(testing::HasSubstr(extensionsWithFeaturesOption)));
retVal = pProgram->build(pProgram->getDevices(), "-cl-std=CL3.0", false);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_THAT(cip->buildInternalOptions, testing::Not(testing::HasSubstr(extensionsOption)));
EXPECT_THAT(cip->buildInternalOptions, testing::HasSubstr(extensionsWithFeaturesOption));
EXPECT_EQ(1, MockProgram::getInternalOptionsCalled);
}
TEST_F(ProgramFromSourceTest, WhenBuildingProgramWithOpenClC30ThenFeaturesAreAddedOnlyOnce) {
auto cip = new MockCompilerInterfaceCaptureBuildOptions();
auto pClDevice = pContext->getDevice(0);
pClDevice->getExecutionEnvironment()->rootDeviceEnvironments[pClDevice->getRootDeviceIndex()]->compilerInterface.reset(cip);
auto pProgram = std::make_unique<SucceedingGenBinaryProgram>(toClDeviceVector(*pClDevice));
pProgram->sourceCode = "__kernel mock() {}";
pProgram->createdFrom = Program::CreatedFrom::SOURCE;
retVal = pProgram->build(pProgram->getDevices(), "-cl-std=CL3.0", false);
EXPECT_EQ(CL_SUCCESS, retVal);
retVal = pProgram->build(pProgram->getDevices(), "-cl-std=CL3.0", false);
EXPECT_EQ(CL_SUCCESS, retVal);
auto extensionsWithFeaturesOption = pClDevice->peekCompilerExtensionsWithFeatures();
auto &internalOptions = cip->buildInternalOptions;
auto pos = internalOptions.find(extensionsWithFeaturesOption);
EXPECT_NE(std::string::npos, pos);
pos = internalOptions.find(extensionsWithFeaturesOption, pos + 1);
EXPECT_EQ(std::string::npos, pos);
}
TEST_F(ProgramFromSourceTest, WhenCompilingProgramThenFeaturesAndExtraExtensionsAreNotAdded) {
auto pCompilerInterface = new MockCompilerInterfaceCaptureBuildOptions();
auto pClDevice = static_cast<ClDevice *>(devices[0]);
pClDevice->getExecutionEnvironment()->rootDeviceEnvironments[pClDevice->getRootDeviceIndex()]->compilerInterface.reset(pCompilerInterface);
auto extensionsOption = pClDevice->peekCompilerExtensions();
auto extensionsWithFeaturesOption = pClDevice->peekCompilerExtensionsWithFeatures();
EXPECT_THAT(pCompilerInterface->buildInternalOptions, testing::Not(testing::HasSubstr(extensionsOption)));
EXPECT_THAT(pCompilerInterface->buildInternalOptions, testing::Not(testing::HasSubstr(extensionsWithFeaturesOption)));
EXPECT_THAT(pCompilerInterface->buildInternalOptions, testing::Not(testing::HasSubstr(std::string{"+cl_khr_3d_image_writes "})));
MockProgram::getInternalOptionsCalled = 0;
retVal = pProgram->compile(pProgram->getDevices(), nullptr, 0, nullptr, nullptr);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_THAT(pCompilerInterface->buildInternalOptions, testing::HasSubstr(extensionsOption));
EXPECT_THAT(pCompilerInterface->buildInternalOptions, testing::Not(testing::HasSubstr(extensionsWithFeaturesOption)));
EXPECT_THAT(pCompilerInterface->buildInternalOptions, testing::Not(testing::HasSubstr(std::string{"+cl_khr_3d_image_writes "})));
EXPECT_EQ(1, MockProgram::getInternalOptionsCalled);
}
TEST_F(ProgramFromSourceTest, WhenCompilingProgramWithOpenClC20ThenExtraExtensionsAreAdded) {
auto pCompilerInterface = new MockCompilerInterfaceCaptureBuildOptions();
auto pClDevice = static_cast<ClDevice *>(devices[0]);
pClDevice->getExecutionEnvironment()->rootDeviceEnvironments[pClDevice->getRootDeviceIndex()]->compilerInterface.reset(pCompilerInterface);
auto extensionsOption = pClDevice->peekCompilerExtensions();
auto extensionsWithFeaturesOption = pClDevice->peekCompilerExtensionsWithFeatures();
EXPECT_THAT(pCompilerInterface->buildInternalOptions, testing::Not(testing::HasSubstr(std::string{"+cl_khr_3d_image_writes "})));
MockProgram::getInternalOptionsCalled = 0;
retVal = pProgram->compile(pProgram->getDevices(), "-cl-std=CL2.0", 0, nullptr, nullptr);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_THAT(pCompilerInterface->buildInternalOptions, testing::HasSubstr(std::string{"+cl_khr_3d_image_writes "}));
EXPECT_EQ(1, MockProgram::getInternalOptionsCalled);
}
TEST_F(ProgramFromSourceTest, WhenCompilingProgramWithOpenClC30ThenFeaturesAreAdded) {
auto pCompilerInterface = new MockCompilerInterfaceCaptureBuildOptions();
auto pClDevice = pContext->getDevice(0);
pClDevice->getExecutionEnvironment()->rootDeviceEnvironments[pClDevice->getRootDeviceIndex()]->compilerInterface.reset(pCompilerInterface);
auto pProgram = std::make_unique<SucceedingGenBinaryProgram>(toClDeviceVector(*pClDevice));
pProgram->sourceCode = "__kernel mock() {}";
pProgram->createdFrom = Program::CreatedFrom::SOURCE;
auto extensionsOption = pClDevice->peekCompilerExtensions();
auto extensionsWithFeaturesOption = pClDevice->peekCompilerExtensionsWithFeatures();
EXPECT_THAT(pCompilerInterface->buildInternalOptions, testing::Not(testing::HasSubstr(extensionsOption)));
EXPECT_THAT(pCompilerInterface->buildInternalOptions, testing::Not(testing::HasSubstr(extensionsWithFeaturesOption)));
retVal = pProgram->compile(pProgram->getDevices(), "-cl-std=CL3.0", 0, nullptr, nullptr);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_THAT(pCompilerInterface->buildInternalOptions, testing::Not(testing::HasSubstr(extensionsOption)));
EXPECT_THAT(pCompilerInterface->buildInternalOptions, testing::HasSubstr(extensionsWithFeaturesOption));
}
class Callback {
public:
Callback() {
this->oldCallback = MemoryManagement::deleteCallback;
MemoryManagement::deleteCallback = thisCallback;
}
~Callback() {
MemoryManagement::deleteCallback = this->oldCallback;
}
static void watch(const void *p) {
watchList[p] = 0u;
}
static void unwatch(const void *p) {
EXPECT_GT(watchList[p], 0u);
watchList.erase(p);
}
private:
void (*oldCallback)(void *);
static void thisCallback(void *p) {
if (watchList.find(p) != watchList.end())
watchList[p]++;
}
static std::map<const void *, uint32_t> watchList;
};
std::map<const void *, uint32_t> Callback::watchList;
TEST_F(ProgramFromSourceTest, GivenDifferentCommpilerOptionsWhenBuildingProgramThenKernelHashesAreDifferent) {
KernelBinaryHelper kbHelper(binaryFileName, true);
auto rootDeviceIndex = pContext->getDevice(0)->getRootDeviceIndex();
CreateProgramWithSource(
pContext,
sourceFileName);
Callback callback;
retVal = pProgram->build(pProgram->getDevices(), nullptr, true);
EXPECT_EQ(CL_SUCCESS, retVal);
auto hash1 = pProgram->getCachedFileName();
auto kernel1 = pProgram->getKernelInfo("CopyBuffer", rootDeviceIndex);
Callback::watch(kernel1);
EXPECT_NE(nullptr, kernel1);
retVal = pProgram->build(pProgram->getDevices(), CompilerOptions::fastRelaxedMath.data(), true);
EXPECT_EQ(CL_SUCCESS, retVal);
auto hash2 = pProgram->getCachedFileName();
auto kernel2 = pProgram->getKernelInfo("CopyBuffer", rootDeviceIndex);
EXPECT_NE(nullptr, kernel2);
EXPECT_NE(hash1, hash2);
Callback::unwatch(kernel1);
Callback::watch(kernel2);
retVal = pProgram->build(pProgram->getDevices(), CompilerOptions::finiteMathOnly.data(), true);
EXPECT_EQ(CL_SUCCESS, retVal);
auto hash3 = pProgram->getCachedFileName();
auto kernel3 = pProgram->getKernelInfo("CopyBuffer", rootDeviceIndex);
EXPECT_NE(nullptr, kernel3);
EXPECT_NE(hash1, hash3);
EXPECT_NE(hash2, hash3);
Callback::unwatch(kernel2);
Callback::watch(kernel3);
pProgram->createdFrom = NEO::Program::CreatedFrom::BINARY;
pProgram->setIrBinary(new char[16], true);
pProgram->setIrBinarySize(16, true);
retVal = pProgram->build(pProgram->getDevices(), nullptr, true);
EXPECT_EQ(CL_SUCCESS, retVal);
auto hash4 = pProgram->getCachedFileName();
auto kernel4 = pProgram->getKernelInfo("CopyBuffer", rootDeviceIndex);
EXPECT_NE(nullptr, kernel4);
EXPECT_EQ(hash3, hash4);
Callback::unwatch(kernel3);
Callback::watch(kernel4);
pProgram->createdFrom = NEO::Program::CreatedFrom::SOURCE;
retVal = pProgram->build(pProgram->getDevices(), nullptr, true);
EXPECT_EQ(CL_SUCCESS, retVal);
auto hash5 = pProgram->getCachedFileName();
auto kernel5 = pProgram->getKernelInfo("CopyBuffer", rootDeviceIndex);
EXPECT_NE(nullptr, kernel5);
EXPECT_EQ(hash1, hash5);
Callback::unwatch(kernel4);
}
TEST_F(ProgramFromSourceTest, GivenEmptyProgramWhenCreatingProgramThenInvalidValueErrorIsReturned) {
auto p = Program::create(pContext, 0, nullptr, nullptr, retVal);
EXPECT_EQ(CL_INVALID_VALUE, retVal);
EXPECT_EQ(nullptr, p);
delete p;
}
TEST_F(ProgramFromSourceTest, GivenSpecificParamatersWhenCompilingProgramThenSuccessOrCorrectErrorCodeIsReturned) {
CreateProgramWithSource(
pContext,
sourceFileName);
cl_program inputHeaders;
const char *headerIncludeNames = "";
cl_program nullprogram = nullptr;
cl_program invprogram = (cl_program)pContext;
// Order of following microtests is important - do not change.
// Add new microtests at end.
// invalid compile parameters: combinations of numInputHeaders==0 & inputHeaders & headerIncludeNames
retVal = pProgram->compile(pProgram->getDevices(), nullptr, 0, &inputHeaders, nullptr);
EXPECT_EQ(CL_INVALID_VALUE, retVal);
retVal = pProgram->compile(pProgram->getDevices(), nullptr, 0, nullptr, &headerIncludeNames);
EXPECT_EQ(CL_INVALID_VALUE, retVal);
// invalid compile parameters: combinations of numInputHeaders!=0 & inputHeaders & headerIncludeNames
retVal = pProgram->compile(pProgram->getDevices(), nullptr, 1, &inputHeaders, nullptr);
EXPECT_EQ(CL_INVALID_VALUE, retVal);
retVal = pProgram->compile(pProgram->getDevices(), nullptr, 1, nullptr, &headerIncludeNames);
EXPECT_EQ(CL_INVALID_VALUE, retVal);
// fail compilation - another compilation is already in progress
pProgram->setBuildStatus(CL_BUILD_IN_PROGRESS);
retVal = pProgram->compile(pProgram->getDevices(), nullptr, 0, nullptr, nullptr);
EXPECT_EQ(CL_INVALID_OPERATION, retVal);
pProgram->setBuildStatus(CL_BUILD_NONE);
// invalid compile parameters: invalid header Program object==nullptr
retVal = pProgram->compile(pProgram->getDevices(), nullptr, 1, &nullprogram, &headerIncludeNames);
EXPECT_EQ(CL_INVALID_PROGRAM, retVal);
// invalid compile parameters: invalid header Program object==non Program object
retVal = pProgram->compile(pProgram->getDevices(), nullptr, 1, &invprogram, &headerIncludeNames);
EXPECT_EQ(CL_INVALID_PROGRAM, retVal);
// compile successfully kernel with header
std::string testFile;
size_t sourceSize;
MockProgram *p3; // header Program object
testFile.append(clFiles);
testFile.append("CopyBuffer_simd16.cl"); // header source file
auto pSourceBuffer = loadDataFromFile(testFile.c_str(), sourceSize);
EXPECT_NE(0u, sourceSize);
EXPECT_NE(nullptr, pSourceBuffer);
const char *sources[1] = {pSourceBuffer.get()};
p3 = Program::create<MockProgram>(pContext, 1, sources, &sourceSize, retVal);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_NE(nullptr, p3);
inputHeaders = p3;
retVal = pProgram->compile(pProgram->getDevices(), nullptr, 1, &inputHeaders, &headerIncludeNames);
EXPECT_EQ(CL_SUCCESS, retVal);
// fail compilation of kernel with header - header is invalid
p3->sourceCode = ""; // set header source code as non-existent (invalid)
retVal = p3->compile(p3->getDevices(), nullptr, 1, &inputHeaders, &headerIncludeNames);
EXPECT_EQ(CL_INVALID_PROGRAM, retVal);
delete p3;
// fail compilation - CompilerInterface cannot be obtained
auto device = pContext->getDevice(0);
auto executionEnvironment = device->getExecutionEnvironment();
std::unique_ptr<RootDeviceEnvironment> rootDeviceEnvironment = std::make_unique<NoCompilerInterfaceRootDeviceEnvironment>(*executionEnvironment);
std::swap(rootDeviceEnvironment, executionEnvironment->rootDeviceEnvironments[device->getRootDeviceIndex()]);
auto p2 = std::make_unique<MockProgram>(toClDeviceVector(*device));
retVal = p2->compile(p2->getDevices(), nullptr, 0, nullptr, nullptr);
EXPECT_EQ(CL_OUT_OF_HOST_MEMORY, retVal);
p2.reset(nullptr);
std::swap(rootDeviceEnvironment, executionEnvironment->rootDeviceEnvironments[device->getRootDeviceIndex()]);
// fail compilation - any compilation error (here caused by specifying unrecognized option)
retVal = pProgram->compile(pProgram->getDevices(), "-invalid-option", 0, nullptr, nullptr);
EXPECT_EQ(CL_COMPILE_PROGRAM_FAILURE, retVal);
// compile successfully
retVal = pProgram->compile(pProgram->getDevices(), nullptr, 0, nullptr, nullptr);
EXPECT_EQ(CL_SUCCESS, retVal);
}
TEST_F(ProgramFromSourceTest, GivenFlagsWhenCompilingProgramThenBuildOptionsHaveBeenApplied) {
auto cip = new MockCompilerInterfaceCaptureBuildOptions();
auto pDevice = pContext->getDevice(0);
pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]->compilerInterface.reset(cip);
auto program = std::make_unique<SucceedingGenBinaryProgram>(toClDeviceVector(*pDevice));
program->sourceCode = "__kernel mock() {}";
// Ask to build created program without NEO::CompilerOptions::gtpinRera and NEO::CompilerOptions::greaterThan4gbBuffersRequired flags.
cl_int retVal = program->compile(pProgram->getDevices(), CompilerOptions::fastRelaxedMath.data(), 0, nullptr, nullptr);
EXPECT_EQ(CL_SUCCESS, retVal);
// Check build options that were applied
EXPECT_TRUE(CompilerOptions::contains(cip->buildOptions, CompilerOptions::fastRelaxedMath)) << cip->buildOptions;
EXPECT_FALSE(CompilerOptions::contains(cip->buildInternalOptions, CompilerOptions::gtpinRera)) << cip->buildInternalOptions;
if (!pDevice->areSharedSystemAllocationsAllowed()) {
EXPECT_FALSE(CompilerOptions::contains(cip->buildInternalOptions, CompilerOptions::greaterThan4gbBuffersRequired)) << cip->buildInternalOptions;
}
EXPECT_TRUE(CompilerOptions::contains(cip->buildInternalOptions, pPlatform->getClDevice(0)->peekCompilerExtensions())) << cip->buildInternalOptions;
// Ask to build created program with NEO::CompilerOptions::gtpinRera and NEO::CompilerOptions::greaterThan4gbBuffersRequired flags.
cip->buildOptions.clear();
cip->buildInternalOptions.clear();
auto options = CompilerOptions::concatenate(CompilerOptions::greaterThan4gbBuffersRequired, CompilerOptions::gtpinRera, CompilerOptions::finiteMathOnly);
retVal = program->compile(pProgram->getDevices(), options.c_str(),
0, nullptr, nullptr);
EXPECT_EQ(CL_SUCCESS, retVal);
// Check build options that were applied
EXPECT_FALSE(CompilerOptions::contains(cip->buildOptions, CompilerOptions::fastRelaxedMath)) << cip->buildOptions;
EXPECT_TRUE(CompilerOptions::contains(cip->buildOptions, CompilerOptions::finiteMathOnly)) << cip->buildOptions;
EXPECT_TRUE(CompilerOptions::contains(cip->buildInternalOptions, CompilerOptions::gtpinRera)) << cip->buildInternalOptions;
EXPECT_TRUE(CompilerOptions::contains(cip->buildInternalOptions, CompilerOptions::greaterThan4gbBuffersRequired)) << cip->buildInternalOptions;
EXPECT_TRUE(CompilerOptions::contains(cip->buildInternalOptions, pPlatform->getClDevice(0)->peekCompilerExtensions())) << cip->buildInternalOptions;
}
TEST_F(ProgramTests, GivenFlagsWhenLinkingProgramThenBuildOptionsHaveBeenApplied) {
auto cip = new MockCompilerInterfaceCaptureBuildOptions();
auto pProgram = std::make_unique<SucceedingGenBinaryProgram>(toClDeviceVector(*pClDevice));
pProgram->sourceCode = "__kernel mock() {}";
pProgram->createdFrom = Program::CreatedFrom::SOURCE;
MockProgram::getInternalOptionsCalled = 0;
cl_program program = pProgram.get();
// compile successfully a kernel to be linked later
cl_int retVal = pProgram->compile(pProgram->getDevices(), nullptr, 0, nullptr, nullptr);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(1, MockProgram::getInternalOptionsCalled);
// Ask to link created program with NEO::CompilerOptions::gtpinRera and NEO::CompilerOptions::greaterThan4gbBuffersRequired flags.
auto options = CompilerOptions::concatenate(CompilerOptions::greaterThan4gbBuffersRequired, CompilerOptions::gtpinRera, CompilerOptions::finiteMathOnly);
pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]->compilerInterface.reset(cip);
retVal = pProgram->link(pProgram->getDevices(), options.c_str(), 1, &program);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(2, MockProgram::getInternalOptionsCalled);
// Check build options that were applied
EXPECT_FALSE(CompilerOptions::contains(cip->buildOptions, CompilerOptions::fastRelaxedMath)) << cip->buildOptions;
EXPECT_TRUE(CompilerOptions::contains(cip->buildOptions, CompilerOptions::finiteMathOnly)) << cip->buildOptions;
EXPECT_TRUE(CompilerOptions::contains(cip->buildInternalOptions, CompilerOptions::gtpinRera)) << cip->buildInternalOptions;
EXPECT_TRUE(CompilerOptions::contains(cip->buildInternalOptions, CompilerOptions::greaterThan4gbBuffersRequired)) << cip->buildInternalOptions;
}
TEST_F(ProgramFromSourceTest, GivenAdvancedOptionsWhenCreatingProgramThenSuccessIsReturned) {
std::string testFile;
size_t sourceSize = 0;
Program *p;
testFile.append(clFiles);
testFile.append("CopyBuffer_simd16.cl");
auto pSourceBuffer = loadDataFromFile(testFile.c_str(), sourceSize);
const char *sources[1] = {pSourceBuffer.get()};
EXPECT_NE(nullptr, pSourceBuffer);
//According to spec: If lengths is NULL, all strings in the strings argument are considered null-terminated.
p = Program::create(pContext, 1, sources, nullptr, retVal);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_NE(nullptr, p);
delete p;
//According to spec: If an element in lengths is zero, its accompanying string is null-terminated.
p = Program::create(pContext, 1, sources, &sourceSize, retVal);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_NE(nullptr, p);
delete p;
std::stringstream dataStream(pSourceBuffer.get());
std::string line;
std::vector<const char *> lines;
while (std::getline(dataStream, line, '\n')) {
char *ptr = new char[line.length() + 1]();
strcpy_s(ptr, line.length() + 1, line.c_str());
lines.push_back(ptr);
}
// Work on array of strings
p = Program::create(pContext, 1, &lines[0], nullptr, retVal);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_NE(nullptr, p);
delete p;
std::vector<size_t> sizes;
for (auto ptr : lines)
sizes.push_back(strlen(ptr));
sizes[sizes.size() / 2] = 0;
p = Program::create(pContext, (cl_uint)sizes.size(), &lines[0], &sizes[0], retVal);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_NE(nullptr, p);
delete p;
for (auto ptr : lines)
delete[] ptr;
}
TEST_F(ProgramFromSourceTest, GivenSpecificParamatersWhenLinkingProgramThenSuccessOrCorrectErrorCodeIsReturned) {
CreateProgramWithSource(
pContext,
sourceFileName);
cl_program program = pProgram;
cl_program nullprogram = nullptr;
cl_program invprogram = (cl_program)pContext;
// Order of following microtests is important - do not change.
// Add new microtests at end.
// invalid link parameters: combinations of numInputPrograms & inputPrograms
retVal = pProgram->link(pProgram->getDevices(), nullptr, 0, &program);
EXPECT_EQ(CL_INVALID_VALUE, retVal);
retVal = pProgram->link(pProgram->getDevices(), nullptr, 1, nullptr);
EXPECT_EQ(CL_INVALID_VALUE, retVal);
// fail linking - another linking is already in progress
pProgram->setBuildStatus(CL_BUILD_IN_PROGRESS);
retVal = pProgram->link(pProgram->getDevices(), nullptr, 1, &program);
EXPECT_EQ(CL_INVALID_OPERATION, retVal);
pProgram->setBuildStatus(CL_BUILD_NONE);
// invalid link parameters: invalid Program object==nullptr
retVal = pProgram->link(pProgram->getDevices(), nullptr, 1, &nullprogram);
EXPECT_EQ(CL_INVALID_PROGRAM, retVal);
// invalid link parameters: invalid Program object==non Program object
retVal = pProgram->link(pProgram->getDevices(), nullptr, 1, &invprogram);
EXPECT_EQ(CL_INVALID_PROGRAM, retVal);
// compile successfully a kernel to be linked later
retVal = pProgram->compile(pProgram->getDevices(), nullptr, 0, nullptr, nullptr);
EXPECT_EQ(CL_SUCCESS, retVal);
// fail linking - code to be linked does not exist
bool isSpirvTmp = pProgram->getIsSpirV();
char *pIrBin = pProgram->irBinary.get();
pProgram->irBinary.release();
size_t irBinSize = pProgram->irBinarySize;
pProgram->setIrBinary(nullptr, false);
retVal = pProgram->link(pProgram->getDevices(), nullptr, 1, &program);
EXPECT_EQ(CL_INVALID_PROGRAM, retVal);
pProgram->setIrBinary(pIrBin, isSpirvTmp);
// fail linking - size of code to be linked is == 0
pProgram->setIrBinarySize(0, isSpirvTmp);
retVal = pProgram->link(pProgram->getDevices(), nullptr, 1, &program);
EXPECT_EQ(CL_INVALID_PROGRAM, retVal);
pProgram->setIrBinarySize(irBinSize, isSpirvTmp);
// fail linking - any link error (here caused by specifying unrecognized option)
retVal = pProgram->link(pProgram->getDevices(), "-invalid-option", 1, &program);
EXPECT_EQ(CL_LINK_PROGRAM_FAILURE, retVal);
// fail linking - linked code is corrupted and cannot be postprocessed
auto p2 = std::make_unique<FailingGenBinaryProgram>(pProgram->getDevices());
retVal = p2->link(p2->getDevices(), nullptr, 1, &program);
EXPECT_EQ(CL_INVALID_BINARY, retVal);
p2.reset(nullptr);
// link successfully
retVal = pProgram->link(pProgram->getDevices(), nullptr, 1, &program);
EXPECT_EQ(CL_SUCCESS, retVal);
}
TEST_F(ProgramFromSourceTest, GivenInvalidOptionsWhenCreatingLibraryThenCorrectErrorIsReturned) {
cl_program program = pProgram;
// Order of following microtests is important - do not change.
// Add new microtests at end.
// compile successfully a kernel to be later used to create library
retVal = pProgram->compile(pProgram->getDevices(), nullptr, 0, nullptr, nullptr);
EXPECT_EQ(CL_SUCCESS, retVal);
// create library successfully
retVal = pProgram->link(pProgram->getDevices(), CompilerOptions::createLibrary.data(), 1, &program);
EXPECT_EQ(CL_SUCCESS, retVal);
// fail library creation - any link error (here caused by specifying unrecognized option)
retVal = pProgram->link(pProgram->getDevices(), CompilerOptions::concatenate(CompilerOptions::createLibrary, "-invalid-option").c_str(), 1, &program);
EXPECT_EQ(CL_LINK_PROGRAM_FAILURE, retVal);
auto device = pContext->getDevice(0);
auto executionEnvironment = device->getExecutionEnvironment();
std::unique_ptr<RootDeviceEnvironment> rootDeviceEnvironment = std::make_unique<NoCompilerInterfaceRootDeviceEnvironment>(*executionEnvironment);
std::swap(rootDeviceEnvironment, executionEnvironment->rootDeviceEnvironments[device->getRootDeviceIndex()]);
auto failingProgram = std::make_unique<MockProgram>(toClDeviceVector(*device));
// fail library creation - CompilerInterface cannot be obtained
retVal = failingProgram->link(failingProgram->getDevices(), CompilerOptions::createLibrary.data(), 1, &program);
EXPECT_EQ(CL_OUT_OF_HOST_MEMORY, retVal);
std::swap(rootDeviceEnvironment, executionEnvironment->rootDeviceEnvironments[device->getRootDeviceIndex()]);
}
class PatchTokenFromBinaryTest : public ProgramSimpleFixture {
public:
void SetUp() override {
ProgramSimpleFixture::SetUp();
}
void TearDown() override {
ProgramSimpleFixture::TearDown();
}
};
using PatchTokenTests = Test<PatchTokenFromBinaryTest>;
template <typename FamilyType>
class CommandStreamReceiverMock : public UltCommandStreamReceiver<FamilyType> {
using BaseClass = UltCommandStreamReceiver<FamilyType>;
using BaseClass::BaseClass;
public:
void makeResident(GraphicsAllocation &graphicsAllocation) override {
residency[graphicsAllocation.getUnderlyingBuffer()] = graphicsAllocation.getUnderlyingBufferSize();
CommandStreamReceiver::makeResident(graphicsAllocation);
}
void makeNonResident(GraphicsAllocation &graphicsAllocation) override {
residency.erase(graphicsAllocation.getUnderlyingBuffer());
CommandStreamReceiver::makeNonResident(graphicsAllocation);
}
std::map<const void *, size_t> residency;
};
HWTEST_F(PatchTokenTests, givenKernelRequiringConstantAllocationWhenMakeResidentIsCalledThenConstantAllocationIsMadeResident) {
CreateProgramFromBinary(pContext, pContext->getDevices(), "test_constant_memory");
ASSERT_NE(nullptr, pProgram);
retVal = pProgram->build(
pProgram->getDevices(),
nullptr,
false);
ASSERT_EQ(CL_SUCCESS, retVal);
auto pKernelInfo = pProgram->getKernelInfo("test", rootDeviceIndex);
ASSERT_NE(nullptr, pProgram->getConstantSurface(pClDevice->getRootDeviceIndex()));
uint32_t expected_values[] = {0xabcd5432u, 0xaabb5533u};
uint32_t *constBuff = reinterpret_cast<uint32_t *>(pProgram->getConstantSurface(pClDevice->getRootDeviceIndex())->getUnderlyingBuffer());
EXPECT_EQ(expected_values[0], constBuff[0]);
EXPECT_EQ(expected_values[1], constBuff[1]);
std::unique_ptr<Kernel> pKernel(Kernel::create(pProgram, *pKernelInfo, *pClDevice, &retVal));
ASSERT_EQ(CL_SUCCESS, retVal);
ASSERT_NE(nullptr, pKernel);
auto pCommandStreamReceiver = new CommandStreamReceiverMock<FamilyType>(*pDevice->executionEnvironment, pDevice->getRootDeviceIndex(), pDevice->getDeviceBitfield());
ASSERT_NE(nullptr, pCommandStreamReceiver);
pDevice->resetCommandStreamReceiver(pCommandStreamReceiver);
pCommandStreamReceiver->residency.clear();
pKernel->makeResident(*pCommandStreamReceiver);
EXPECT_EQ(2u, pCommandStreamReceiver->residency.size());
auto &residencyVector = pCommandStreamReceiver->getResidencyAllocations();
//we expect kernel ISA here and constant allocation
auto kernelIsa = pKernel->getKernelInfo().getGraphicsAllocation();
auto constantAllocation = pProgram->getConstantSurface(pDevice->getRootDeviceIndex());
auto element = std::find(residencyVector.begin(), residencyVector.end(), kernelIsa);
EXPECT_NE(residencyVector.end(), element);
element = std::find(residencyVector.begin(), residencyVector.end(), constantAllocation);
EXPECT_NE(residencyVector.end(), element);
auto crossThreadData = pKernel->getCrossThreadData();
uint32_t *constBuffGpuAddr = reinterpret_cast<uint32_t *>(pProgram->getConstantSurface(pContext->getDevice(0)->getRootDeviceIndex())->getGpuAddressToPatch());
uintptr_t *pDst = reinterpret_cast<uintptr_t *>(crossThreadData + pKernelInfo->kernelDescriptor.payloadMappings.implicitArgs.globalConstantsSurfaceAddress.stateless);
EXPECT_EQ(*pDst, reinterpret_cast<uintptr_t>(constBuffGpuAddr));
pCommandStreamReceiver->makeSurfacePackNonResident(pCommandStreamReceiver->getResidencyAllocations());
EXPECT_EQ(0u, pCommandStreamReceiver->residency.size());
std::vector<Surface *> surfaces;
pKernel->getResidency(surfaces);
EXPECT_EQ(2u, surfaces.size());
for (Surface *surface : surfaces) {
delete surface;
}
}
TEST_F(PatchTokenTests, WhenBuildingProgramThenGwsIsSet) {
CreateProgramFromBinary(pContext, pContext->getDevices(), "kernel_data_param");
ASSERT_NE(nullptr, pProgram);
retVal = pProgram->build(
pProgram->getDevices(),
nullptr,
false);
ASSERT_EQ(CL_SUCCESS, retVal);
auto pKernelInfo = pProgram->getKernelInfo("test", rootDeviceIndex);
ASSERT_NE(static_cast<uint32_t>(-1), pKernelInfo->kernelDescriptor.payloadMappings.dispatchTraits.globalWorkSize[0]);
ASSERT_NE(static_cast<uint32_t>(-1), pKernelInfo->kernelDescriptor.payloadMappings.dispatchTraits.globalWorkSize[1]);
ASSERT_NE(static_cast<uint32_t>(-1), pKernelInfo->kernelDescriptor.payloadMappings.dispatchTraits.globalWorkSize[2]);
}
TEST_F(PatchTokenTests, WhenBuildingProgramThenLwsIsSet) {
CreateProgramFromBinary(pContext, pContext->getDevices(), "kernel_data_param");
ASSERT_NE(nullptr, pProgram);
retVal = pProgram->build(
pProgram->getDevices(),
nullptr,
false);
ASSERT_EQ(CL_SUCCESS, retVal);
auto pKernelInfo = pProgram->getKernelInfo("test", rootDeviceIndex);
ASSERT_NE(static_cast<uint32_t>(-1), pKernelInfo->kernelDescriptor.payloadMappings.dispatchTraits.localWorkSize[0]);
ASSERT_NE(static_cast<uint32_t>(-1), pKernelInfo->kernelDescriptor.payloadMappings.dispatchTraits.localWorkSize[1]);
ASSERT_NE(static_cast<uint32_t>(-1), pKernelInfo->kernelDescriptor.payloadMappings.dispatchTraits.localWorkSize[2]);
pKernelInfo = pProgram->getKernelInfo("test_get_local_size", rootDeviceIndex);
ASSERT_NE(static_cast<uint32_t>(-1), pKernelInfo->kernelDescriptor.payloadMappings.dispatchTraits.localWorkSize[0]);
ASSERT_NE(static_cast<uint32_t>(-1), pKernelInfo->kernelDescriptor.payloadMappings.dispatchTraits.localWorkSize[1]);
ASSERT_NE(static_cast<uint32_t>(-1), pKernelInfo->kernelDescriptor.payloadMappings.dispatchTraits.localWorkSize[2]);
ASSERT_NE(static_cast<uint32_t>(-1), pKernelInfo->kernelDescriptor.payloadMappings.dispatchTraits.localWorkSize2[0]);
ASSERT_NE(static_cast<uint32_t>(-1), pKernelInfo->kernelDescriptor.payloadMappings.dispatchTraits.localWorkSize2[1]);
ASSERT_NE(static_cast<uint32_t>(-1), pKernelInfo->kernelDescriptor.payloadMappings.dispatchTraits.localWorkSize2[2]);
}
TEST_F(PatchTokenTests, WhenBuildingProgramThenConstantKernelArgsAreAvailable) {
// PATCH_TOKEN_STATELESS_CONSTANT_MEMORY_OBJECT_KERNEL_ARGUMENT
CreateProgramFromBinary(pContext, pContext->getDevices(), "test_basic_constant");
ASSERT_NE(nullptr, pProgram);
retVal = pProgram->build(
pProgram->getDevices(),
nullptr,
false);
EXPECT_EQ(CL_SUCCESS, retVal);
auto pKernelInfo = pProgram->getKernelInfo("constant_kernel", rootDeviceIndex);
ASSERT_NE(nullptr, pKernelInfo);
auto pKernel = Kernel::create(
pProgram,
*pKernelInfo,
*pClDevice,
&retVal);
ASSERT_EQ(CL_SUCCESS, retVal);
ASSERT_NE(nullptr, pKernel);
uint32_t numArgs;
retVal = pKernel->getInfo(CL_KERNEL_NUM_ARGS, sizeof(numArgs), &numArgs, nullptr);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(3u, numArgs);
uint32_t sizeOfPtr = sizeof(void *);
EXPECT_EQ(pKernelInfo->getArgDescriptorAt(0).as<ArgDescPointer>().pointerSize, sizeOfPtr);
EXPECT_EQ(pKernelInfo->getArgDescriptorAt(1).as<ArgDescPointer>().pointerSize, sizeOfPtr);
delete pKernel;
}
TEST_F(PatchTokenTests, GivenVmeKernelWhenBuildingKernelThenArgAvailable) {
if (!pDevice->getHardwareInfo().capabilityTable.supportsVme) {
GTEST_SKIP();
}
// PATCH_TOKEN_INLINE_VME_SAMPLER_INFO token indicates a VME kernel.
CreateProgramFromBinary(pContext, pContext->getDevices(), "vme_kernels");
ASSERT_NE(nullptr, pProgram);
retVal = pProgram->build(
pProgram->getDevices(),
nullptr,
false);
EXPECT_EQ(CL_SUCCESS, retVal);
auto pKernelInfo = pProgram->getKernelInfo("device_side_block_motion_estimate_intel", rootDeviceIndex);
ASSERT_NE(nullptr, pKernelInfo);
EXPECT_EQ(true, pKernelInfo->kernelDescriptor.kernelAttributes.flags.usesVme);
auto pKernel = Kernel::create(
pProgram,
*pKernelInfo,
*pClDevice,
&retVal);
ASSERT_NE(nullptr, pKernel);
delete pKernel;
}
class ProgramPatchTokenFromBinaryTest : public ProgramSimpleFixture {
public:
void SetUp() override {
ProgramSimpleFixture::SetUp();
}
void TearDown() override {
ProgramSimpleFixture::TearDown();
}
};
typedef Test<ProgramPatchTokenFromBinaryTest> ProgramPatchTokenTests;
TEST(ProgramFromBinaryTests, givenBinaryWithInvalidICBEThenErrorIsReturned) {
cl_int retVal = CL_INVALID_BINARY;
SProgramBinaryHeader binHeader;
memset(&binHeader, 0, sizeof(binHeader));
binHeader.Magic = iOpenCL::MAGIC_CL;
binHeader.Version = iOpenCL::CURRENT_ICBE_VERSION - 3;
binHeader.Device = defaultHwInfo->platform.eRenderCoreFamily;
binHeader.GPUPointerSizeInBytes = 8;
binHeader.NumberOfKernels = 0;
binHeader.SteppingId = 0;
binHeader.PatchListSize = 0;
size_t binSize = sizeof(SProgramBinaryHeader);
{
const unsigned char *binaries[1] = {reinterpret_cast<const unsigned char *>(&binHeader)};
MockContext context;
std::unique_ptr<Program> pProgram(Program::create<Program>(&context, context.getDevices(), &binSize, binaries, nullptr, retVal));
EXPECT_EQ(nullptr, pProgram.get());
EXPECT_EQ(CL_INVALID_BINARY, retVal);
}
{
// whatever method we choose CL_INVALID_BINARY is always returned
auto device = std::make_unique<MockClDevice>(MockDevice::createWithNewExecutionEnvironment<MockDevice>(nullptr, mockRootDeviceIndex));
std::unique_ptr<Program> pProgram(Program::createBuiltInFromGenBinary(nullptr, toClDeviceVector(*device), &binHeader, binSize, &retVal));
ASSERT_NE(nullptr, pProgram.get());
EXPECT_EQ(CL_SUCCESS, retVal);
retVal = pProgram->processGenBinary(*device);
EXPECT_EQ(CL_INVALID_BINARY, retVal);
}
}
TEST(ProgramFromBinaryTests, givenEmptyProgramThenErrorIsReturned) {
cl_int retVal = CL_INVALID_BINARY;
SProgramBinaryHeader binHeader;
memset(&binHeader, 0, sizeof(binHeader));
binHeader.Magic = iOpenCL::MAGIC_CL;
binHeader.Version = iOpenCL::CURRENT_ICBE_VERSION;
binHeader.Device = defaultHwInfo->platform.eRenderCoreFamily;
binHeader.GPUPointerSizeInBytes = 8;
binHeader.NumberOfKernels = 0;
binHeader.SteppingId = 0;
binHeader.PatchListSize = 0;
size_t binSize = sizeof(SProgramBinaryHeader);
auto device = std::make_unique<MockClDevice>(MockDevice::createWithNewExecutionEnvironment<MockDevice>(nullptr, mockRootDeviceIndex));
std::unique_ptr<MockProgram> pProgram(MockProgram::createBuiltInFromGenBinary<MockProgram>(nullptr, toClDeviceVector(*device), &binHeader, binSize, &retVal));
ASSERT_NE(nullptr, pProgram.get());
EXPECT_EQ(CL_SUCCESS, retVal);
auto rootDeviceIndex = mockRootDeviceIndex;
pProgram->buildInfos[rootDeviceIndex].unpackedDeviceBinary.reset(nullptr);
retVal = pProgram->processGenBinary(*device);
EXPECT_EQ(CL_INVALID_BINARY, retVal);
}
using ProgramWithDebugSymbolsTests = Test<ProgramSimpleFixture>;
TEST_F(ProgramWithDebugSymbolsTests, GivenProgramCreatedWithDashGOptionWhenGettingProgramBinariesThenDebugDataIsIncluded) {
CreateProgramFromBinary(pContext, pContext->getDevices(), "CopyBuffer_simd16", "-g");
ASSERT_NE(nullptr, pProgram);
retVal = pProgram->build(
pProgram->getDevices(),
"-g",
false);
EXPECT_EQ(CL_SUCCESS, retVal);
size_t paramValueSize = sizeof(size_t);
size_t paramValueSizeRet = 0;
size_t size = 0;
pProgram->buildInfos[rootDeviceIndex].packedDeviceBinary.reset();
pProgram->buildInfos[rootDeviceIndex].packedDeviceBinarySize = 0U;
retVal = pProgram->packDeviceBinary(*pClDevice);
retVal = pProgram->getInfo(
CL_PROGRAM_BINARY_SIZES,
paramValueSize,
&size,
nullptr);
EXPECT_EQ(CL_SUCCESS, retVal);
auto testBinary = std::make_unique<char[]>(size);
retVal = pProgram->getInfo(
CL_PROGRAM_BINARIES,
paramValueSize,
&testBinary,
¶mValueSizeRet);
EXPECT_EQ(CL_SUCCESS, retVal);
ArrayRef<const uint8_t> archive(reinterpret_cast<const uint8_t *>(testBinary.get()), size);
auto productAbbreviation = hardwarePrefix[pDevice->getHardwareInfo().platform.eProductFamily];
TargetDevice targetDevice = NEO::targetDeviceFromHwInfo(pDevice->getHardwareInfo());
std::string decodeErrors;
std::string decodeWarnings;
auto singleDeviceBinary = unpackSingleDeviceBinary(archive, ConstStringRef(productAbbreviation, strlen(productAbbreviation)), targetDevice,
decodeErrors, decodeWarnings);
EXPECT_FALSE(singleDeviceBinary.debugData.empty());
}
TEST_F(ProgramTests, WhenProgramIsCreatedThenCorrectOclVersionIsInOptions) {
DebugManagerStateRestore restorer;
DebugManager.flags.DisableStatelessToStatefulOptimization.set(false);
MockProgram program(pContext, false, toClDeviceVector(*pClDevice));
auto internalOptions = program.getInternalOptions();
if (pClDevice->getEnabledClVersion() == 30) {
EXPECT_TRUE(CompilerOptions::contains(internalOptions, "-ocl-version=300")) << internalOptions;
} else if (pClDevice->getEnabledClVersion() == 21) {
EXPECT_TRUE(CompilerOptions::contains(internalOptions, "-ocl-version=210")) << internalOptions;
} else {
EXPECT_TRUE(CompilerOptions::contains(internalOptions, "-ocl-version=120")) << internalOptions;
}
}
TEST_F(ProgramTests, GivenForcedClVersionWhenProgramIsCreatedThenCorrectOclOptionIsPresent) {
std::pair<unsigned int, std::string> testedValues[] = {
{0, "-ocl-version=120"},
{12, "-ocl-version=120"},
{21, "-ocl-version=210"},
{30, "-ocl-version=300"}};
for (auto &testedValue : testedValues) {
pClDevice->enabledClVersion = testedValue.first;
MockProgram program{pContext, false, toClDeviceVector(*pClDevice)};
auto internalOptions = program.getInternalOptions();
EXPECT_TRUE(CompilerOptions::contains(internalOptions, testedValue.second));
}
}
TEST_F(ProgramTests, GivenStatelessToStatefulIsDisabledWhenProgramIsCreatedThenGreaterThan4gbBuffersRequiredOptionIsSet) {
DebugManagerStateRestore restorer;
DebugManager.flags.DisableStatelessToStatefulOptimization.set(true);
MockProgram program(pContext, false, toClDeviceVector(*pClDevice));
auto internalOptions = program.getInternalOptions();
EXPECT_TRUE(CompilerOptions::contains(internalOptions, NEO::CompilerOptions::greaterThan4gbBuffersRequired));
}
TEST_F(ProgramTests, WhenCreatingProgramThenBindlessIsEnabledOnlyIfDebugFlagIsEnabled) {
using namespace testing;
DebugManagerStateRestore restorer;
{
DebugManager.flags.UseBindlessMode.set(0);
MockProgram programNoBindless(pContext, false, toClDeviceVector(*pClDevice));
auto internalOptionsNoBindless = programNoBindless.getInternalOptions();
EXPECT_FALSE(CompilerOptions::contains(internalOptionsNoBindless, CompilerOptions::bindlessMode)) << internalOptionsNoBindless;
}
{
DebugManager.flags.UseBindlessMode.set(1);
MockProgram programBindless(pContext, false, toClDeviceVector(*pClDevice));
auto internalOptionsBindless = programBindless.getInternalOptions();
EXPECT_TRUE(CompilerOptions::contains(internalOptionsBindless, CompilerOptions::bindlessMode)) << internalOptionsBindless;
}
}
TEST_F(ProgramTests, givenDeviceThatSupportsSharedSystemMemoryAllocationWhenProgramIsCompiledThenItForcesStatelessCompilation) {
pClDevice->deviceInfo.sharedSystemMemCapabilities = CL_UNIFIED_SHARED_MEMORY_ACCESS_INTEL | CL_UNIFIED_SHARED_MEMORY_ATOMIC_ACCESS_INTEL | CL_UNIFIED_SHARED_MEMORY_CONCURRENT_ACCESS_INTEL | CL_UNIFIED_SHARED_MEMORY_CONCURRENT_ATOMIC_ACCESS_INTEL;
pClDevice->getRootDeviceEnvironment().getMutableHardwareInfo()->capabilityTable.sharedSystemMemCapabilities = 1;
MockProgram program(pContext, false, toClDeviceVector(*pClDevice));
auto internalOptions = program.getInternalOptions();
EXPECT_TRUE(CompilerOptions::contains(internalOptions.c_str(), CompilerOptions::greaterThan4gbBuffersRequired)) << internalOptions;
}
TEST_F(ProgramTests, GivenForce32BitAddressessWhenProgramIsCreatedThenGreaterThan4gbBuffersRequiredIsCorrectlySet) {
DebugManagerStateRestore dbgRestorer;
cl_int retVal = CL_DEVICE_NOT_FOUND;
DebugManager.flags.DisableStatelessToStatefulOptimization.set(false);
if (pDevice) {
const_cast<DeviceInfo *>(&pDevice->getDeviceInfo())->force32BitAddressess = true;
MockProgram program(pContext, false, toClDeviceVector(*pClDevice));
auto internalOptions = program.getInternalOptions();
if (pDevice->areSharedSystemAllocationsAllowed()) {
EXPECT_TRUE(CompilerOptions::contains(internalOptions, CompilerOptions::greaterThan4gbBuffersRequired)) << internalOptions;
} else {
EXPECT_FALSE(CompilerOptions::contains(internalOptions, NEO::CompilerOptions::greaterThan4gbBuffersRequired)) << internalOptions;
}
} else {
EXPECT_NE(CL_DEVICE_NOT_FOUND, retVal);
}
}
TEST_F(ProgramTests, Given32bitSupportWhenProgramIsCreatedThenGreaterThan4gbBuffersRequiredIsCorrectlySet) {
DebugManagerStateRestore dbgRestorer;
DebugManager.flags.DisableStatelessToStatefulOptimization.set(false);
std::unique_ptr<MockProgram> program{Program::createBuiltInFromSource<MockProgram>("", pContext, pContext->getDevices(), nullptr)};
auto internalOptions = program->getInternalOptions();
if ((false == pDevice->areSharedSystemAllocationsAllowed()) && (false == is32bit)) {
EXPECT_FALSE(CompilerOptions::contains(internalOptions, NEO::CompilerOptions::greaterThan4gbBuffersRequired)) << internalOptions;
} else {
EXPECT_TRUE(CompilerOptions::contains(internalOptions, NEO::CompilerOptions::greaterThan4gbBuffersRequired)) << internalOptions;
}
}
TEST_F(ProgramTests, GivenStatelessToStatefulIsDisabledWhenProgramIsCreatedThenGreaterThan4gbBuffersRequiredIsCorrectlySet) {
DebugManagerStateRestore dbgRestorer;
DebugManager.flags.DisableStatelessToStatefulOptimization.set(true);
std::unique_ptr<MockProgram> program{Program::createBuiltInFromSource<MockProgram>("", pContext, pContext->getDevices(), nullptr)};
auto internalOptions = program->getInternalOptions();
EXPECT_TRUE(CompilerOptions::contains(internalOptions, NEO::CompilerOptions::greaterThan4gbBuffersRequired)) << internalOptions;
}
TEST_F(ProgramTests, givenProgramWhenItIsCompiledThenItAlwaysHavePreserveVec3TypeInternalOptionSet) {
std::unique_ptr<MockProgram> program(Program::createBuiltInFromSource<MockProgram>("", pContext, pContext->getDevices(), nullptr));
auto internalOptions = program->getInternalOptions();
EXPECT_TRUE(CompilerOptions::contains(internalOptions, CompilerOptions::preserveVec3Type)) << internalOptions;
}
TEST_F(ProgramTests, Force32BitAddressessWhenProgramIsCreatedThenGreaterThan4gbBuffersRequiredIsCorrectlySet) {
DebugManagerStateRestore dbgRestorer;
DebugManager.flags.DisableStatelessToStatefulOptimization.set(false);
const_cast<DeviceInfo *>(&pDevice->getDeviceInfo())->force32BitAddressess = true;
std::unique_ptr<MockProgram> program{Program::createBuiltInFromSource<MockProgram>("", pContext, pContext->getDevices(), nullptr)};
auto internalOptions = program->getInternalOptions();
if (is32bit) {
EXPECT_TRUE(CompilerOptions::contains(internalOptions, CompilerOptions::greaterThan4gbBuffersRequired)) << internalOptions;
} else {
if (false == pDevice->areSharedSystemAllocationsAllowed()) {
EXPECT_FALSE(CompilerOptions::contains(internalOptions, NEO::CompilerOptions::greaterThan4gbBuffersRequired)) << internalOptions;
} else {
EXPECT_TRUE(CompilerOptions::contains(internalOptions, NEO::CompilerOptions::greaterThan4gbBuffersRequired)) << internalOptions;
}
}
}
TEST_F(ProgramTests, GivenStatelessToStatefulBufferOffsetOptimizationWhenProgramIsCreatedThenBufferOffsetArgIsSet) {
DebugManagerStateRestore dbgRestorer;
DebugManager.flags.EnableStatelessToStatefulBufferOffsetOpt.set(1);
cl_int errorCode = CL_SUCCESS;
const char programSource[] = "program";
const char *programPointer = programSource;
const char **programSources = reinterpret_cast<const char **>(&programPointer);
size_t length = sizeof(programSource);
std::unique_ptr<MockProgram> program(Program::create<MockProgram>(pContext, 1u, programSources, &length, errorCode));
auto internalOptions = program->getInternalOptions();
EXPECT_TRUE(CompilerOptions::contains(internalOptions, CompilerOptions::hasBufferOffsetArg)) << internalOptions;
}
TEST_F(ProgramTests, givenStatelessToStatefullOptimizationOffWHenProgramIsCreatedThenOptimizationStringIsNotPresent) {
DebugManagerStateRestore dbgRestorer;
DebugManager.flags.EnableStatelessToStatefulBufferOffsetOpt.set(0);
cl_int errorCode = CL_SUCCESS;
const char programSource[] = "program";
const char *programPointer = programSource;
const char **programSources = reinterpret_cast<const char **>(&programPointer);
size_t length = sizeof(programSource);
std::unique_ptr<MockProgram> program(Program::create<MockProgram>(pContext, 1u, programSources, &length, errorCode));
auto internalOptions = program->getInternalOptions();
EXPECT_FALSE(CompilerOptions::contains(internalOptions, CompilerOptions::hasBufferOffsetArg)) << internalOptions;
}
TEST_F(ProgramTests, GivenContextWhenCreateProgramThenIncrementContextRefCount) {
auto initialApiRefCount = pContext->getReference();
auto initialInternalRefCount = pContext->getRefInternalCount();
MockProgram *program = new MockProgram(pContext, false, pContext->getDevices());
EXPECT_EQ(pContext->getReference(), initialApiRefCount);
EXPECT_EQ(pContext->getRefInternalCount(), initialInternalRefCount + 1);
program->release();
EXPECT_EQ(pContext->getReference(), initialApiRefCount);
EXPECT_EQ(pContext->getRefInternalCount(), initialInternalRefCount);
}
TEST_F(ProgramTests, GivenContextWhenCreateProgramFromSourceThenIncrementContextRefCount) {
auto initialApiRefCount = pContext->getReference();
auto initialInternalRefCount = pContext->getRefInternalCount();
auto tempProgram = new Program(nullptr, false, pContext->getDevices());
EXPECT_FALSE(tempProgram->getIsBuiltIn());
auto program = new Program(pContext, false, pContext->getDevices());
EXPECT_FALSE(program->getIsBuiltIn());
EXPECT_EQ(pContext->getReference(), initialApiRefCount);
EXPECT_EQ(pContext->getRefInternalCount(), initialInternalRefCount + 1);
program->release();
EXPECT_EQ(pContext->getReference(), initialApiRefCount);
EXPECT_EQ(pContext->getRefInternalCount(), initialInternalRefCount);
tempProgram->release();
EXPECT_EQ(pContext->getReference(), initialApiRefCount);
EXPECT_EQ(pContext->getRefInternalCount(), initialInternalRefCount);
}
TEST_F(ProgramTests, GivenContextWhenCreateBuiltInProgramFromSourceThenDontIncrementContextRefCount) {
auto initialApiRefCount = pContext->getReference();
auto initialInternalRefCount = pContext->getRefInternalCount();
auto tempProgram = new Program(nullptr, true, pContext->getDevices());
EXPECT_TRUE(tempProgram->getIsBuiltIn());
auto program = new Program(pContext, true, pContext->getDevices());
EXPECT_TRUE(program->getIsBuiltIn());
EXPECT_EQ(pContext->getReference(), initialApiRefCount);
EXPECT_EQ(pContext->getRefInternalCount(), initialInternalRefCount);
program->release();
EXPECT_EQ(pContext->getReference(), initialApiRefCount);
EXPECT_EQ(pContext->getRefInternalCount(), initialInternalRefCount);
tempProgram->release();
EXPECT_EQ(pContext->getReference(), initialApiRefCount);
EXPECT_EQ(pContext->getRefInternalCount(), initialInternalRefCount);
}
TEST_F(ProgramTests, WhenBuildingProgramThenPointerToProgramIsReturned) {
cl_int retVal = CL_DEVICE_NOT_FOUND;
Program *pProgram = Program::createBuiltInFromSource("", pContext, pContext->getDevices(), &retVal);
EXPECT_NE(nullptr, pProgram);
EXPECT_EQ(CL_SUCCESS, retVal);
delete pProgram;
pProgram = Program::createBuiltInFromSource("", pContext, pContext->getDevices(), nullptr);
EXPECT_NE(nullptr, pProgram);
delete pProgram;
}
TEST_F(ProgramTests, GivenNullBinaryWhenCreatingProgramFromGenBinaryThenInvalidValueErrorIsReturned) {
cl_int retVal = CL_SUCCESS;
Program *pProgram = Program::createBuiltInFromGenBinary(pContext, pContext->getDevices(), nullptr, 0, &retVal);
EXPECT_EQ(nullptr, pProgram);
EXPECT_NE(CL_SUCCESS, retVal);
}
TEST_F(ProgramTests, WhenCreatingProgramFromGenBinaryThenSuccessIsReturned) {
cl_int retVal = CL_INVALID_BINARY;
char binary[10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, '\0'};
size_t size = 10;
Program *pProgram = Program::createBuiltInFromGenBinary(pContext, pContext->getDevices(), binary, size, &retVal);
EXPECT_NE(nullptr, pProgram);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ((uint32_t)CL_PROGRAM_BINARY_TYPE_EXECUTABLE, (uint32_t)pProgram->getProgramBinaryType(pClDevice));
EXPECT_TRUE(pProgram->getIsBuiltIn());
cl_device_id deviceId = pContext->getDevice(0);
cl_build_status status = 0;
pProgram->getBuildInfo(deviceId, CL_PROGRAM_BUILD_STATUS,
sizeof(cl_build_status), &status, nullptr);
EXPECT_EQ(CL_BUILD_SUCCESS, status);
delete pProgram;
}
TEST_F(ProgramTests, GivenRetValNullPointerWhenCreatingProgramFromGenBinaryThenSuccessIsReturned) {
char binary[10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, '\0'};
size_t size = 10;
Program *pProgram = Program::createBuiltInFromGenBinary(pContext, pContext->getDevices(), binary, size, nullptr);
EXPECT_NE(nullptr, pProgram);
EXPECT_EQ((uint32_t)CL_PROGRAM_BINARY_TYPE_EXECUTABLE, (uint32_t)pProgram->getProgramBinaryType(pClDevice));
cl_device_id deviceId = pContext->getDevice(0);
cl_build_status status = 0;
pProgram->getBuildInfo(deviceId, CL_PROGRAM_BUILD_STATUS,
sizeof(cl_build_status), &status, nullptr);
EXPECT_EQ(CL_BUILD_SUCCESS, status);
delete pProgram;
}
TEST_F(ProgramTests, GivenNullContextWhenCreatingProgramFromGenBinaryThenSuccessIsReturned) {
cl_int retVal = CL_INVALID_BINARY;
char binary[10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, '\0'};
size_t size = 10;
Program *pProgram = Program::createBuiltInFromGenBinary(nullptr, toClDeviceVector(*pClDevice), binary, size, &retVal);
EXPECT_NE(nullptr, pProgram);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ((uint32_t)CL_PROGRAM_BINARY_TYPE_EXECUTABLE, (uint32_t)pProgram->getProgramBinaryType(pClDevice));
cl_build_status status = 0;
pProgram->getBuildInfo(pClDevice, CL_PROGRAM_BUILD_STATUS,
sizeof(cl_build_status), &status, nullptr);
EXPECT_EQ(CL_BUILD_SUCCESS, status);
delete pProgram;
}
TEST_F(ProgramTests, givenValidZebinPrepareLinkerInput) {
ZebinTestData::ValidEmptyProgram zebin;
const std::string validZeInfo = std::string("version :\'") + toString(zeInfoDecoderVersion) + R"===('
kernels:
- name : some_kernel
execution_env :
simd_size : 8
)===";
auto device = std::make_unique<MockClDevice>(MockDevice::createWithNewExecutionEnvironment<MockDevice>(nullptr, mockRootDeviceIndex));
{
auto program = std::make_unique<MockProgram>(nullptr, false, toClDeviceVector(*pClDevice));
program->buildInfos[rootDeviceIndex].unpackedDeviceBinary = makeCopy(zebin.storage.data(), zebin.storage.size());
program->buildInfos[rootDeviceIndex].unpackedDeviceBinarySize = zebin.storage.size();
auto retVal = program->processGenBinary(*pClDevice);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_NE(nullptr, program->buildInfos[rootDeviceIndex].linkerInput.get());
}
{
zebin.removeSection(NEO::Elf::SHT_ZEBIN::SHT_ZEBIN_ZEINFO, NEO::Elf::SectionsNamesZebin::zeInfo);
zebin.appendSection(NEO::Elf::SHT_ZEBIN::SHT_ZEBIN_ZEINFO, NEO::Elf::SectionsNamesZebin::zeInfo, ArrayRef<const uint8_t>::fromAny(validZeInfo.data(), validZeInfo.size()));
zebin.appendSection(NEO::Elf::SHT_PROGBITS, NEO::Elf::SectionsNamesZebin::textPrefix.str() + "some_kernel", {});
auto program = std::make_unique<MockProgram>(nullptr, false, toClDeviceVector(*pClDevice));
program->buildInfos[rootDeviceIndex].unpackedDeviceBinary = makeCopy(zebin.storage.data(), zebin.storage.size());
program->buildInfos[rootDeviceIndex].unpackedDeviceBinarySize = zebin.storage.size();
auto retVal = program->processGenBinary(*pClDevice);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_NE(nullptr, program->buildInfos[rootDeviceIndex].linkerInput.get());
}
}
TEST_F(ProgramTests, givenProgramFromGenBinaryWhenSLMSizeIsBiggerThenDeviceLimitThenReturnError) {
PatchTokensTestData::ValidProgramWithKernelUsingSlm patchtokensProgram;
patchtokensProgram.slmMutable->TotalInlineLocalMemorySize = static_cast<uint32_t>(pDevice->getDeviceInfo().localMemSize * 2);
patchtokensProgram.recalcTokPtr();
auto program = std::make_unique<MockProgram>(nullptr, false, toClDeviceVector(*pClDevice));
program->buildInfos[rootDeviceIndex].unpackedDeviceBinary = makeCopy(patchtokensProgram.storage.data(), patchtokensProgram.storage.size());
program->buildInfos[rootDeviceIndex].unpackedDeviceBinarySize = patchtokensProgram.storage.size();
auto retVal = program->processGenBinary(*pClDevice);
EXPECT_EQ(CL_OUT_OF_RESOURCES, retVal);
}
TEST_F(ProgramTests, givenExistingConstantSurfacesWhenProcessGenBinaryThenCleanupTheSurfaceOnlyForSpecificDevice) {
PatchTokensTestData::ValidProgramWithKernelUsingSlm patchtokensProgram;
auto program = std::make_unique<MockProgram>(nullptr, false, toClDeviceVector(*pClDevice));
program->buildInfos.resize(2);
program->buildInfos[0].constantSurface = pDevice->getMemoryManager()->allocateGraphicsMemoryWithProperties({rootDeviceIndex, MemoryConstants::cacheLineSize,
AllocationType::CONSTANT_SURFACE, pDevice->getDeviceBitfield()});
program->buildInfos[1].constantSurface = pDevice->getMemoryManager()->allocateGraphicsMemoryWithProperties({rootDeviceIndex, MemoryConstants::cacheLineSize,
AllocationType::CONSTANT_SURFACE, pDevice->getDeviceBitfield()});
program->buildInfos[rootDeviceIndex].unpackedDeviceBinary = makeCopy(patchtokensProgram.storage.data(), patchtokensProgram.storage.size());
program->buildInfos[rootDeviceIndex].unpackedDeviceBinarySize = patchtokensProgram.storage.size();
auto constantSurface0 = program->buildInfos[0].constantSurface;
EXPECT_NE(nullptr, constantSurface0);
auto constantSurface1 = program->buildInfos[1].constantSurface;
EXPECT_NE(nullptr, constantSurface1);
auto retVal = program->processGenBinary(*pClDevice);
EXPECT_EQ(nullptr, program->buildInfos[0].constantSurface);
EXPECT_EQ(constantSurface1, program->buildInfos[1].constantSurface);
EXPECT_EQ(CL_SUCCESS, retVal);
}
TEST_F(ProgramTests, givenExistingGlobalSurfacesWhenProcessGenBinaryThenCleanupTheSurfaceOnlyForSpecificDevice) {
PatchTokensTestData::ValidProgramWithKernelUsingSlm patchtokensProgram;
auto program = std::make_unique<MockProgram>(nullptr, false, toClDeviceVector(*pClDevice));
program->buildInfos.resize(2);
program->buildInfos[0].globalSurface = pDevice->getMemoryManager()->allocateGraphicsMemoryWithProperties({rootDeviceIndex, MemoryConstants::cacheLineSize,
AllocationType::GLOBAL_SURFACE, pDevice->getDeviceBitfield()});
program->buildInfos[1].globalSurface = pDevice->getMemoryManager()->allocateGraphicsMemoryWithProperties({rootDeviceIndex, MemoryConstants::cacheLineSize,
AllocationType::GLOBAL_SURFACE, pDevice->getDeviceBitfield()});
program->buildInfos[rootDeviceIndex].unpackedDeviceBinary = makeCopy(patchtokensProgram.storage.data(), patchtokensProgram.storage.size());
program->buildInfos[rootDeviceIndex].unpackedDeviceBinarySize = patchtokensProgram.storage.size();
auto globalSurface0 = program->buildInfos[0].globalSurface;
EXPECT_NE(nullptr, globalSurface0);
auto globalSurface1 = program->buildInfos[1].globalSurface;
EXPECT_NE(nullptr, globalSurface1);
auto retVal = program->processGenBinary(*pClDevice);
EXPECT_EQ(nullptr, program->buildInfos[0].globalSurface);
EXPECT_EQ(globalSurface1, program->buildInfos[1].globalSurface);
EXPECT_EQ(CL_SUCCESS, retVal);
}
TEST_F(ProgramTests, GivenNoCompilerInterfaceRootDeviceEnvironmentWhenRebuildingBinaryThenOutOfHostMemoryErrorIsReturned) {
auto pDevice = pContext->getDevice(0);
auto executionEnvironment = pDevice->getExecutionEnvironment();
std::unique_ptr<RootDeviceEnvironment> rootDeviceEnvironment = std::make_unique<NoCompilerInterfaceRootDeviceEnvironment>(*executionEnvironment);
rootDeviceEnvironment->setHwInfo(&pDevice->getHardwareInfo());
std::swap(rootDeviceEnvironment, executionEnvironment->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]);
auto program = std::make_unique<MockProgram>(toClDeviceVector(*pDevice));
EXPECT_NE(nullptr, program);
// Load a binary program file
std::string filePath;
retrieveBinaryKernelFilename(filePath, "CopyBuffer_simd16_", ".bin");
size_t binarySize = 0;
auto pBinary = loadDataFromFile(filePath.c_str(), binarySize);
EXPECT_NE(0u, binarySize);
// Create program from loaded binary
cl_int retVal = program->createProgramFromBinary(pBinary.get(), binarySize, *pClDevice);
EXPECT_EQ(CL_SUCCESS, retVal);
// Ask to rebuild program from its IR binary - it should fail (no Compiler Interface)
retVal = program->rebuildProgramFromIr();
EXPECT_EQ(CL_OUT_OF_HOST_MEMORY, retVal);
std::swap(rootDeviceEnvironment, executionEnvironment->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]);
}
TEST_F(ProgramTests, GivenGtpinReraFlagWhenBuildingProgramThenCorrectOptionsAreSet) {
auto cip = new MockCompilerInterfaceCaptureBuildOptions();
auto pDevice = pContext->getDevice(0);
pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]->compilerInterface.reset(cip);
auto program = std::make_unique<SucceedingGenBinaryProgram>(toClDeviceVector(*pDevice));
program->sourceCode = "__kernel mock() {}";
program->createdFrom = Program::CreatedFrom::SOURCE;
// Ask to build created program without NEO::CompilerOptions::gtpinRera flag.
cl_int retVal = program->build(program->getDevices(), CompilerOptions::fastRelaxedMath.data(), false);
EXPECT_EQ(CL_SUCCESS, retVal);
// Check build options that were applied
EXPECT_TRUE(CompilerOptions::contains(cip->buildOptions, CompilerOptions::fastRelaxedMath)) << cip->buildOptions;
EXPECT_FALSE(CompilerOptions::contains(cip->buildOptions, CompilerOptions::gtpinRera)) << cip->buildInternalOptions;
// Ask to build created program with NEO::CompilerOptions::gtpinRera flag.
cip->buildOptions.clear();
cip->buildInternalOptions.clear();
retVal = program->build(program->getDevices(), CompilerOptions::concatenate(CompilerOptions::gtpinRera, CompilerOptions::finiteMathOnly).c_str(), false);
EXPECT_EQ(CL_SUCCESS, retVal);
// Check build options that were applied
EXPECT_FALSE(CompilerOptions::contains(cip->buildOptions, CompilerOptions::fastRelaxedMath)) << cip->buildOptions;
EXPECT_TRUE(CompilerOptions::contains(cip->buildOptions, CompilerOptions::finiteMathOnly)) << cip->buildOptions;
EXPECT_TRUE(CompilerOptions::contains(cip->buildInternalOptions, CompilerOptions::gtpinRera)) << cip->buildInternalOptions;
}
TEST_F(ProgramTests, GivenFailingGenBinaryProgramWhenRebuildingBinaryThenInvalidBinaryErrorIsReturned) {
cl_int retVal;
auto program = std::make_unique<FailingGenBinaryProgram>(toClDeviceVector(*pClDevice));
EXPECT_NE(nullptr, program);
// Load a binary program file
std::string filePath;
retrieveBinaryKernelFilename(filePath, "CopyBuffer_simd16_", ".bin");
size_t binarySize = 0;
auto pBinary = loadDataFromFile(filePath.c_str(), binarySize);
EXPECT_NE(0u, binarySize);
// Create program from loaded binary
retVal = program->createProgramFromBinary(pBinary.get(), binarySize, *pClDevice);
EXPECT_EQ(CL_SUCCESS, retVal);
// Ask to rebuild program from its IR binary - it should fail (simulated invalid binary)
retVal = program->rebuildProgramFromIr();
EXPECT_EQ(CL_INVALID_BINARY, retVal);
}
class Program32BitTests : public ProgramTests {
public:
void SetUp() override {
DebugManager.flags.Force32bitAddressing.set(true);
ProgramTests::SetUp();
}
void TearDown() override {
ProgramTests::TearDown();
DebugManager.flags.Force32bitAddressing.set(false);
}
};
TEST_F(Program32BitTests, givenDeviceWithForce32BitAddressingOnWhenBuiltinIsCreatedThenNoFlagsArePassedAsInternalOptions) {
MockProgram program(toClDeviceVector(*pClDevice));
auto internalOptions = program.getInternalOptions();
EXPECT_THAT(internalOptions, testing::HasSubstr(std::string("")));
}
TEST_F(Program32BitTests, givenDeviceWithForce32BitAddressingOnWhenProgramIsCreatedThen32bitFlagIsPassedAsInternalOption) {
MockProgram program(pContext, false, toClDeviceVector(*pClDevice));
auto internalOptions = program.getInternalOptions();
std::string s1 = internalOptions;
size_t pos = s1.find(NEO::CompilerOptions::arch32bit.data());
if constexpr (is64bit) {
EXPECT_NE(pos, std::string::npos);
} else {
EXPECT_EQ(pos, std::string::npos);
}
}
HWTEST_F(ProgramTests, givenNewProgramThenStatelessToStatefulBufferOffsetOptimizationIsMatchingThePlatformEnablingStatus) {
MockProgram program(pContext, false, toClDeviceVector(*pClDevice));
auto internalOptions = program.getInternalOptions();
if (HwHelperHw<FamilyType>::get().isStatelesToStatefullWithOffsetSupported()) {
EXPECT_TRUE(CompilerOptions::contains(internalOptions, CompilerOptions::hasBufferOffsetArg));
} else {
EXPECT_FALSE(CompilerOptions::contains(internalOptions, CompilerOptions::hasBufferOffsetArg));
}
}
TEST(ProgramTest, givenImagesSupportedWhenCreatingProgramThenInternalOptionsAreCorrectlyInitialized) {
VariableBackup<bool> supportsImagesCapability{&defaultHwInfo->capabilityTable.supportsImages};
for (auto areImagesSupported : ::testing::Bool()) {
supportsImagesCapability = areImagesSupported;
UltClDeviceFactory clDeviceFactory{1, 0};
MockContext context{clDeviceFactory.rootDevices[0]};
MockProgram program(&context, false, toClDeviceVector(*clDeviceFactory.rootDevices[0]));
auto internalOptions = program.getInternalOptions();
EXPECT_EQ(areImagesSupported, CompilerOptions::contains(internalOptions, CompilerOptions::enableImageSupport));
}
}
template <int32_t ErrCodeToReturn, bool spirv = true>
struct CreateProgramFromBinaryMock : public MockProgram {
using MockProgram::MockProgram;
cl_int createProgramFromBinary(const void *pBinary,
size_t binarySize, ClDevice &clDevice) override {
this->irBinary.reset(new char[binarySize]);
this->irBinarySize = binarySize;
this->isSpirV = spirv;
memcpy_s(this->irBinary.get(), binarySize, pBinary, binarySize);
return ErrCodeToReturn;
}
};
TEST_F(ProgramTests, GivenFailedBinaryWhenCreatingFromIlThenInvalidBinaryErrorIsReturned) {
const uint32_t notSpirv[16] = {0xDEADBEEF};
cl_int retVal = CL_SUCCESS;
auto prog = Program::createFromIL<CreateProgramFromBinaryMock<CL_INVALID_BINARY>>(pContext, reinterpret_cast<const void *>(notSpirv), sizeof(notSpirv), retVal);
EXPECT_EQ(nullptr, prog);
EXPECT_EQ(CL_INVALID_BINARY, retVal);
}
TEST_F(ProgramTests, GivenSuccessfullyBuiltBinaryWhenCreatingFromIlThenValidProgramIsReturned) {
const uint32_t spirv[16] = {0x03022307};
cl_int retVal = CL_SUCCESS;
auto prog = Program::createFromIL<CreateProgramFromBinaryMock<CL_SUCCESS>>(pContext, reinterpret_cast<const void *>(spirv), sizeof(spirv), retVal);
ASSERT_NE(nullptr, prog);
EXPECT_EQ(CL_SUCCESS, retVal);
prog->release();
}
TEST_F(ProgramTests, givenProgramCreatedFromILWhenCompileIsCalledThenReuseTheILInsteadOfCallingCompilerInterface) {
const uint32_t spirv[16] = {0x03022307};
cl_int errCode = 0;
auto pProgram = Program::createFromIL<MockProgram>(pContext, reinterpret_cast<const void *>(spirv), sizeof(spirv), errCode);
ASSERT_NE(nullptr, pProgram);
auto debugVars = NEO::getIgcDebugVars();
debugVars.forceBuildFailure = true;
gEnvironment->fclPushDebugVars(debugVars);
auto compilerErr = pProgram->compile(pProgram->getDevices(), nullptr, 0, nullptr, nullptr);
EXPECT_EQ(CL_SUCCESS, compilerErr);
gEnvironment->fclPopDebugVars();
pProgram->release();
}
TEST_F(ProgramTests, givenProgramCreatedFromIntermediateBinaryRepresentationWhenCompileIsCalledThenReuseTheILInsteadOfCallingCompilerInterface) {
const uint32_t spirv[16] = {0x03022307};
cl_int errCode = 0;
size_t lengths = sizeof(spirv);
const unsigned char *binaries[1] = {reinterpret_cast<const unsigned char *>(spirv)};
auto pProgram = Program::create<MockProgram>(pContext, pContext->getDevices(), &lengths, binaries, nullptr, errCode);
ASSERT_NE(nullptr, pProgram);
auto debugVars = NEO::getIgcDebugVars();
debugVars.forceBuildFailure = true;
gEnvironment->fclPushDebugVars(debugVars);
auto compilerErr = pProgram->compile(pProgram->getDevices(), nullptr, 0, nullptr, nullptr);
EXPECT_EQ(CL_SUCCESS, compilerErr);
gEnvironment->fclPopDebugVars();
pProgram->release();
}
TEST_F(ProgramTests, GivenIlIsNullptrWhenCreatingFromIlThenInvalidBinaryErrorIsReturned) {
cl_int retVal = CL_SUCCESS;
auto prog = Program::createFromIL<CreateProgramFromBinaryMock<CL_INVALID_BINARY>>(pContext, nullptr, 16, retVal);
EXPECT_EQ(nullptr, prog);
EXPECT_EQ(CL_INVALID_BINARY, retVal);
}
TEST_F(ProgramTests, GivenIlSizeZeroWhenCreatingFromIlThenInvalidBinaryErrorIsReturned) {
const uint32_t spirv[16] = {0x03022307};
cl_int retVal = CL_SUCCESS;
auto prog = Program::createFromIL<CreateProgramFromBinaryMock<CL_INVALID_BINARY>>(pContext, reinterpret_cast<const void *>(spirv), 0, retVal);
EXPECT_EQ(nullptr, prog);
EXPECT_EQ(CL_INVALID_BINARY, retVal);
}
TEST_F(ProgramTests, WhenCreatingFromIlThenIsSpirvIsSetCorrectly) {
const uint32_t spirv[16] = {0x03022307};
cl_int retVal = CL_SUCCESS;
auto prog = Program::createFromIL<Program>(pContext, reinterpret_cast<const void *>(spirv), sizeof(spirv), retVal);
EXPECT_NE(nullptr, prog);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_TRUE(prog->getIsSpirV());
prog->release();
const char llvmBc[16] = {'B', 'C', '\xc0', '\xde'};
prog = Program::createFromIL<Program>(pContext, reinterpret_cast<const void *>(llvmBc), sizeof(llvmBc), retVal);
EXPECT_NE(nullptr, prog);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_FALSE(prog->getIsSpirV());
prog->release();
}
static const uint8_t llvmBinary[] = "BC\xc0\xde ";
TEST(isValidLlvmBinary, whenLlvmMagicWasFoundThenBinaryIsValidLLvm) {
EXPECT_TRUE(NEO::isLlvmBitcode(llvmBinary));
}
TEST(isValidLlvmBinary, whenBinaryIsNullptrThenBinaryIsNotValidLLvm) {
EXPECT_FALSE(NEO::isLlvmBitcode(ArrayRef<const uint8_t>()));
}
TEST(isValidLlvmBinary, whenBinaryIsShorterThanLllvMagicThenBinaryIsNotValidLLvm) {
EXPECT_FALSE(NEO::isLlvmBitcode(ArrayRef<const uint8_t>(llvmBinary, 2)));
}
TEST(isValidLlvmBinary, whenBinaryDoesNotContainLllvMagicThenBinaryIsNotValidLLvm) {
const uint8_t notLlvmBinary[] = "ABCDEFGHIJKLMNO";
EXPECT_FALSE(NEO::isLlvmBitcode(notLlvmBinary));
}
const uint32_t spirv[16] = {0x03022307};
const uint32_t spirvInvEndianes[16] = {0x07230203};
TEST(isValidSpirvBinary, whenSpirvMagicWasFoundThenBinaryIsValidSpirv) {
EXPECT_TRUE(NEO::isSpirVBitcode(ArrayRef<const uint8_t>(reinterpret_cast<const uint8_t *>(&spirv), sizeof(spirv))));
EXPECT_TRUE(NEO::isSpirVBitcode(ArrayRef<const uint8_t>(reinterpret_cast<const uint8_t *>(&spirvInvEndianes), sizeof(spirvInvEndianes))));
}
TEST(isValidSpirvBinary, whenBinaryIsNullptrThenBinaryIsNotValidLLvm) {
EXPECT_FALSE(NEO::isSpirVBitcode(ArrayRef<const uint8_t>()));
}
TEST(isValidSpirvBinary, whenBinaryIsShorterThanLllvMagicThenBinaryIsNotValidLLvm) {
EXPECT_FALSE(NEO::isSpirVBitcode(ArrayRef<const uint8_t>(reinterpret_cast<const uint8_t *>(&spirvInvEndianes), 2)));
}
TEST(isValidSpirvBinary, whenBinaryDoesNotContainLllvMagicThenBinaryIsNotValidLLvm) {
const uint8_t notSpirvBinary[] = "ABCDEFGHIJKLMNO";
EXPECT_FALSE(NEO::isSpirVBitcode(notSpirvBinary));
}
TEST_F(ProgramTests, WhenLinkingTwoValidSpirvProgramsThenValidProgramIsReturned) {
const uint32_t spirv[16] = {0x03022307};
cl_int errCode = CL_SUCCESS;
auto node1 = Program::createFromIL<CreateProgramFromBinaryMock<CL_SUCCESS, false>>(pContext, reinterpret_cast<const void *>(spirv), sizeof(spirv), errCode);
ASSERT_NE(nullptr, node1);
EXPECT_EQ(CL_SUCCESS, errCode);
auto node2 = Program::createFromIL<CreateProgramFromBinaryMock<CL_SUCCESS>>(pContext, reinterpret_cast<const void *>(spirv), sizeof(spirv), errCode);
ASSERT_NE(nullptr, node2);
EXPECT_EQ(CL_SUCCESS, errCode);
auto prog = Program::createFromIL<CreateProgramFromBinaryMock<CL_SUCCESS>>(pContext, reinterpret_cast<const void *>(spirv), sizeof(spirv), errCode);
ASSERT_NE(nullptr, prog);
EXPECT_EQ(CL_SUCCESS, errCode);
cl_program linkNodes[] = {node1, node2};
errCode = prog->link(prog->getDevices(), nullptr, 2, linkNodes);
EXPECT_EQ(CL_SUCCESS, errCode);
prog->release();
node2->release();
node1->release();
}
TEST(ProgramDestructionTests, givenProgramUsingDeviceWhenItIsDestroyedAfterPlatfromCleanupThenItIsCleanedUpProperly) {
initPlatform();
auto device = platform()->getClDevice(0);
MockContext *context = new MockContext(device, false);
MockProgram *pProgram = new MockProgram(context, false, toClDeviceVector(*device));
auto globalAllocation = device->getMemoryManager()->allocateGraphicsMemoryWithProperties(MockAllocationProperties{device->getRootDeviceIndex(), MemoryConstants::pageSize});
pProgram->setGlobalSurface(globalAllocation);
platformsImpl->clear();
EXPECT_EQ(1, device->getRefInternalCount());
EXPECT_EQ(1, pProgram->getRefInternalCount());
context->decRefInternal();
pProgram->decRefInternal();
}
TEST_F(ProgramTests, givenProgramWithSpirvWhenRebuildProgramIsCalledThenSpirvPathIsTaken) {
auto compilerInterface = new MockCompilerInterface();
auto compilerMain = new MockCIFMain();
compilerInterface->setFclMain(compilerMain);
compilerMain->Retain();
compilerInterface->setIgcMain(compilerMain);
compilerMain->setDefaultCreatorFunc<NEO::MockIgcOclDeviceCtx>(NEO::MockIgcOclDeviceCtx::Create);
compilerMain->setDefaultCreatorFunc<NEO::MockFclOclDeviceCtx>(NEO::MockFclOclDeviceCtx::Create);
pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]->compilerInterface.reset(compilerInterface);
std::string receivedInput;
MockCompilerDebugVars debugVars = {};
debugVars.receivedInput = &receivedInput;
debugVars.forceBuildFailure = true;
gEnvironment->igcPushDebugVars(debugVars);
std::unique_ptr<void, void (*)(void *)> igcDebugVarsAutoPop{&gEnvironment, [](void *) { gEnvironment->igcPopDebugVars(); }};
auto program = clUniquePtr(new MockProgram(toClDeviceVector(*pClDevice)));
uint32_t spirv[16] = {0x03022307, 0x23471113, 0x17192329};
program->irBinary = makeCopy(spirv, sizeof(spirv));
program->irBinarySize = sizeof(spirv);
program->isSpirV = true;
auto buildRet = program->rebuildProgramFromIr();
EXPECT_NE(CL_SUCCESS, buildRet);
ASSERT_EQ(sizeof(spirv), receivedInput.size());
EXPECT_EQ(0, memcmp(spirv, receivedInput.c_str(), receivedInput.size()));
ASSERT_EQ(1U, compilerInterface->requestedTranslationCtxs.size());
EXPECT_EQ(IGC::CodeType::spirV, compilerInterface->requestedTranslationCtxs[0].first);
EXPECT_EQ(IGC::CodeType::oclGenBin, compilerInterface->requestedTranslationCtxs[0].second);
}
TEST_F(ProgramTests, givenProgramWithSpirvWhenRebuildIsCalledThenRebuildWarningIsIssued) {
const auto program{clUniquePtr(new MockProgram(toClDeviceVector(*pClDevice)))};
uint32_t spirv[16] = {0x03022307, 0x23471113, 0x17192329};
program->irBinary = makeCopy(spirv, sizeof(spirv));
program->irBinarySize = sizeof(spirv);
program->isSpirV = true;
const auto buildResult{program->rebuildProgramFromIr()};
ASSERT_EQ(CL_SUCCESS, buildResult);
const std::string buildLog{program->getBuildLog(pClDevice->getRootDeviceIndex())};
const auto containsWarning{buildLog.find(CompilerWarnings::recompiledFromIr.data()) != std::string::npos};
EXPECT_TRUE(containsWarning);
}
TEST_F(ProgramTests, givenProgramWithSpirvWhenRebuildIsCalledButSuppressFlagIsEnabledThenRebuildWarningIsNotIssued) {
const auto program{clUniquePtr(new MockProgram(toClDeviceVector(*pClDevice)))};
uint32_t spirv[16] = {0x03022307, 0x23471113, 0x17192329};
program->irBinary = makeCopy(spirv, sizeof(spirv));
program->irBinarySize = sizeof(spirv);
program->isSpirV = true;
const auto buildOptions{CompilerOptions::noRecompiledFromIr};
program->setBuildOptions(buildOptions.data());
const auto buildResult{program->rebuildProgramFromIr()};
ASSERT_EQ(CL_SUCCESS, buildResult);
const std::string buildLog{program->getBuildLog(pClDevice->getRootDeviceIndex())};
const auto containsWarning{buildLog.find(CompilerWarnings::recompiledFromIr.data()) != std::string::npos};
EXPECT_FALSE(containsWarning);
}
TEST_F(ProgramTests, givenProgramWithSpirvWhenRecompileIsCalledThenRebuildWarningIsIssued) {
const auto program{clUniquePtr(new MockProgram(toClDeviceVector(*pClDevice)))};
uint32_t spirv[16] = {0x03022307, 0x23471113, 0x17192329};
program->irBinary = makeCopy(spirv, sizeof(spirv));
program->irBinarySize = sizeof(spirv);
program->isSpirV = true;
const auto compileResult{program->recompile()};
ASSERT_EQ(CL_SUCCESS, compileResult);
const std::string buildLog{program->getBuildLog(pClDevice->getRootDeviceIndex())};
const auto containsWarning{buildLog.find(CompilerWarnings::recompiledFromIr.data()) != std::string::npos};
EXPECT_TRUE(containsWarning);
}
TEST_F(ProgramTests, givenProgramWithSpirvWhenRecompileIsCalledButSuppressFlagIsEnabledThenRebuildWarningIsNotIssued) {
const auto program{clUniquePtr(new MockProgram(toClDeviceVector(*pClDevice)))};
uint32_t spirv[16] = {0x03022307, 0x23471113, 0x17192329};
program->irBinary = makeCopy(spirv, sizeof(spirv));
program->irBinarySize = sizeof(spirv);
program->isSpirV = true;
const auto buildOptions{CompilerOptions::noRecompiledFromIr};
program->setBuildOptions(buildOptions.data());
const auto compileResult{program->recompile()};
ASSERT_EQ(CL_SUCCESS, compileResult);
const std::string buildLog{program->getBuildLog(pClDevice->getRootDeviceIndex())};
const auto containsWarning{buildLog.find(CompilerWarnings::recompiledFromIr.data()) != std::string::npos};
EXPECT_FALSE(containsWarning);
}
TEST_F(ProgramTests, whenRebuildingProgramThenStoreDeviceBinaryProperly) {
auto compilerInterface = new MockCompilerInterface();
pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]->compilerInterface.reset(compilerInterface);
auto compilerMain = new MockCIFMain();
compilerInterface->setIgcMain(compilerMain);
compilerMain->setDefaultCreatorFunc<NEO::MockIgcOclDeviceCtx>(NEO::MockIgcOclDeviceCtx::Create);
MockCompilerDebugVars debugVars = {};
char binaryToReturn[] = "abcdfghijklmnop";
debugVars.binaryToReturn = binaryToReturn;
debugVars.binaryToReturnSize = sizeof(binaryToReturn);
gEnvironment->igcPushDebugVars(debugVars);
std::unique_ptr<void, void (*)(void *)> igcDebugVarsAutoPop{&gEnvironment, [](void *) { gEnvironment->igcPopDebugVars(); }};
auto program = clUniquePtr(new MockProgram(toClDeviceVector(*pClDevice)));
uint32_t ir[16] = {0x03022307, 0x23471113, 0x17192329};
program->irBinary = makeCopy(ir, sizeof(ir));
program->irBinarySize = sizeof(ir);
EXPECT_EQ(nullptr, program->buildInfos[rootDeviceIndex].unpackedDeviceBinary);
EXPECT_EQ(0U, program->buildInfos[rootDeviceIndex].unpackedDeviceBinarySize);
program->rebuildProgramFromIr();
ASSERT_NE(nullptr, program->buildInfos[rootDeviceIndex].unpackedDeviceBinary);
ASSERT_EQ(sizeof(binaryToReturn), program->buildInfos[rootDeviceIndex].unpackedDeviceBinarySize);
EXPECT_EQ(0, memcmp(binaryToReturn, program->buildInfos[rootDeviceIndex].unpackedDeviceBinary.get(), program->buildInfos[rootDeviceIndex].unpackedDeviceBinarySize));
}
TEST_F(ProgramTests, givenProgramWhenInternalOptionsArePassedThenTheyAreAddedToProgramInternalOptions) {
MockProgram program(toClDeviceVector(*pClDevice));
std::string buildOptions = NEO::CompilerOptions::gtpinRera.str();
std::string internalOptions;
program.extractInternalOptions(buildOptions, internalOptions);
EXPECT_STREQ(internalOptions.c_str(), NEO::CompilerOptions::gtpinRera.data());
}
TEST_F(ProgramTests, givenProgramWhenUnknownInternalOptionsArePassedThenTheyAreNotAddedToProgramInternalOptions) {
MockProgram program(toClDeviceVector(*pClDevice));
const char *internalOption = "-unknown-internal-options-123";
std::string buildOptions(internalOption);
std::string internalOptions;
program.extractInternalOptions(buildOptions, internalOptions);
EXPECT_EQ(0u, internalOptions.length());
}
TEST_F(ProgramTests, givenProgramWhenAllInternalOptionsArePassedMixedWithUnknownInputThenTheyAreParsedCorrectly) {
MockProgram program(toClDeviceVector(*pClDevice));
std::string buildOptions = CompilerOptions::concatenate("###", CompilerOptions::gtpinRera, "###", CompilerOptions::greaterThan4gbBuffersRequired, "###");
std::string expectedOutput = CompilerOptions::concatenate(CompilerOptions::gtpinRera, CompilerOptions::greaterThan4gbBuffersRequired);
std::string internalOptions;
program.extractInternalOptions(buildOptions, internalOptions);
EXPECT_EQ(expectedOutput, internalOptions);
}
TEST_F(ProgramTests, givenProgramWhenInternalOptionsArePassedWithValidValuesThenTheyAreAddedToProgramInternalOptions) {
MockProgram program(toClDeviceVector(*pClDevice));
program.isFlagOptionOverride = false;
program.isOptionValueValidOverride = true;
std::string buildOptions = CompilerOptions::concatenate(CompilerOptions::gtpinRera, "someValue");
std::string internalOptions;
program.extractInternalOptions(buildOptions, internalOptions);
EXPECT_EQ(buildOptions, internalOptions) << internalOptions;
}
TEST_F(ProgramTests, givenProgramWhenInternalOptionsArePassedWithInvalidValuesThenTheyAreNotAddedToProgramInternalOptions) {
MockProgram program(toClDeviceVector(*pClDevice));
program.isFlagOptionOverride = false;
std::string buildOptions = CompilerOptions::concatenate(CompilerOptions::gtpinRera, "someValue");
std::string expectedOutput = "";
std::string internalOptions;
program.extractInternalOptions(buildOptions, internalOptions);
EXPECT_EQ(expectedOutput, internalOptions);
program.isOptionValueValidOverride = true;
buildOptions = std::string(CompilerOptions::gtpinRera);
internalOptions.erase();
program.extractInternalOptions(buildOptions, internalOptions);
EXPECT_EQ(expectedOutput, internalOptions);
}
TEST_F(ProgramTests, GivenInjectInternalBuildOptionsWhenBuildingProgramThenInternalOptionsWereAppended) {
DebugManagerStateRestore dbgRestorer;
DebugManager.flags.InjectInternalBuildOptions.set("-abc");
auto cip = new MockCompilerInterfaceCaptureBuildOptions();
auto pDevice = pContext->getDevice(0);
pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]->compilerInterface.reset(cip);
auto program = std::make_unique<SucceedingGenBinaryProgram>(toClDeviceVector(*pDevice));
program->sourceCode = "__kernel mock() {}";
program->createdFrom = Program::CreatedFrom::SOURCE;
cl_int retVal = program->build(program->getDevices(), "", false);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_TRUE(CompilerOptions::contains(cip->buildInternalOptions, "-abc")) << cip->buildInternalOptions;
}
TEST_F(ProgramTests, GivenInjectInternalBuildOptionsWhenBuildingBuiltInProgramThenInternalOptionsAreNotAppended) {
DebugManagerStateRestore dbgRestorer;
DebugManager.flags.InjectInternalBuildOptions.set("-abc");
auto cip = new MockCompilerInterfaceCaptureBuildOptions();
auto pDevice = pContext->getDevice(0);
pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]->compilerInterface.reset(cip);
auto program = std::make_unique<SucceedingGenBinaryProgram>(toClDeviceVector(*pDevice));
program->sourceCode = "__kernel mock() {}";
program->createdFrom = Program::CreatedFrom::SOURCE;
program->isBuiltIn = true;
cl_int retVal = program->build(program->getDevices(), "", false);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_FALSE(CompilerOptions::contains(cip->buildInternalOptions, "-abc")) << cip->buildInternalOptions;
}
TEST_F(ProgramTests, GivenInjectInternalBuildOptionsWhenCompilingProgramThenInternalOptionsWereAppended) {
DebugManagerStateRestore dbgRestorer;
DebugManager.flags.InjectInternalBuildOptions.set("-abc");
auto cip = new MockCompilerInterfaceCaptureBuildOptions();
auto pDevice = pContext->getDevice(0);
pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]->compilerInterface.reset(cip);
auto program = std::make_unique<SucceedingGenBinaryProgram>(toClDeviceVector(*pDevice));
program->sourceCode = "__kernel mock() {}";
program->createdFrom = Program::CreatedFrom::SOURCE;
cl_int retVal = program->compile(program->getDevices(), nullptr, 0, nullptr, nullptr);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_TRUE(CompilerOptions::contains(cip->buildInternalOptions, "-abc")) << cip->buildInternalOptions;
}
TEST_F(ProgramTests, GivenInjectInternalBuildOptionsWhenCompilingBuiltInProgramThenInternalOptionsAreNotAppended) {
DebugManagerStateRestore dbgRestorer;
DebugManager.flags.InjectInternalBuildOptions.set("-abc");
auto cip = new MockCompilerInterfaceCaptureBuildOptions();
auto pDevice = pContext->getDevice(0);
pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]->compilerInterface.reset(cip);
auto program = std::make_unique<SucceedingGenBinaryProgram>(toClDeviceVector(*pDevice));
program->sourceCode = "__kernel mock() {}";
program->createdFrom = Program::CreatedFrom::SOURCE;
program->isBuiltIn = true;
cl_int retVal = program->compile(program->getDevices(), nullptr, 0, nullptr, nullptr);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_FALSE(CompilerOptions::contains(cip->buildInternalOptions, "-abc")) << cip->buildInternalOptions;
}
class AdditionalOptionsMockProgram : public MockProgram {
public:
using MockProgram::MockProgram;
void applyAdditionalOptions(std::string &internalOptions) override {
applyAdditionalOptionsCalled++;
MockProgram::applyAdditionalOptions(internalOptions);
}
uint32_t applyAdditionalOptionsCalled = 0;
};
TEST_F(ProgramTests, givenProgramWhenBuiltThenAdditionalOptionsAreApplied) {
AdditionalOptionsMockProgram program(toClDeviceVector(*pClDevice));
program.build(program.getDevices(), nullptr, false);
EXPECT_EQ(1u, program.applyAdditionalOptionsCalled);
}
TEST(CreateProgramFromBinaryTests, givenBinaryProgramBuiltInWhenKernelRebulildIsForcedThenDeviceBinaryIsNotUsed) {
DebugManagerStateRestore dbgRestorer;
DebugManager.flags.RebuildPrecompiledKernels.set(true);
cl_int retVal = CL_INVALID_BINARY;
PatchTokensTestData::ValidEmptyProgram programTokens;
auto clDevice = std::make_unique<MockClDevice>(MockDevice::createWithNewExecutionEnvironment<MockDevice>(nullptr));
std::unique_ptr<MockProgram> pProgram(Program::createBuiltInFromGenBinary<MockProgram>(nullptr, toClDeviceVector(*clDevice), programTokens.storage.data(), programTokens.storage.size(), &retVal));
ASSERT_NE(nullptr, pProgram.get());
EXPECT_EQ(CL_SUCCESS, retVal);
auto rootDeviceIndex = clDevice->getRootDeviceIndex();
retVal = pProgram->createProgramFromBinary(programTokens.storage.data(), programTokens.storage.size(), *clDevice);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(nullptr, pProgram->buildInfos[rootDeviceIndex].unpackedDeviceBinary.get());
EXPECT_EQ(0U, pProgram->buildInfos[rootDeviceIndex].unpackedDeviceBinarySize);
EXPECT_EQ(nullptr, pProgram->buildInfos[rootDeviceIndex].packedDeviceBinary);
EXPECT_EQ(0U, pProgram->buildInfos[rootDeviceIndex].packedDeviceBinarySize);
}
TEST(CreateProgramFromBinaryTests, givenBinaryProgramBuiltInWhenKernelRebulildIsForcedThenRebuildWarningIsEnabled) {
DebugManagerStateRestore dbgRestorer{};
DebugManager.flags.RebuildPrecompiledKernels.set(true);
PatchTokensTestData::ValidEmptyProgram programTokens;
cl_int retVal{CL_INVALID_BINARY};
const auto clDevice = std::make_unique<MockClDevice>(MockDevice::createWithNewExecutionEnvironment<MockDevice>(nullptr));
std::unique_ptr<MockProgram> pProgram(Program::createBuiltInFromGenBinary<MockProgram>(nullptr, toClDeviceVector(*clDevice), programTokens.storage.data(), programTokens.storage.size(), &retVal));
ASSERT_NE(nullptr, pProgram.get());
ASSERT_EQ(CL_SUCCESS, retVal);
retVal = pProgram->createProgramFromBinary(programTokens.storage.data(), programTokens.storage.size(), *clDevice);
ASSERT_EQ(CL_SUCCESS, retVal);
ASSERT_TRUE(pProgram->shouldWarnAboutRebuild);
}
TEST(CreateProgramFromBinaryTests, givenBinaryProgramNotBuiltInWhenBuiltInKernelRebulildIsForcedThenDeviceBinaryIsUsed) {
DebugManagerStateRestore dbgRestorer;
DebugManager.flags.RebuildPrecompiledKernels.set(true);
cl_int retVal = CL_INVALID_BINARY;
PatchTokensTestData::ValidEmptyProgram programTokens;
const unsigned char *binaries[] = {programTokens.storage.data()};
size_t lengths[] = {programTokens.storage.size()};
auto clDevice = std::make_unique<MockClDevice>(MockDevice::createWithNewExecutionEnvironment<MockDevice>(nullptr));
std::unique_ptr<MockProgram> pProgram(Program::create<MockProgram>(
nullptr,
toClDeviceVector(*clDevice),
lengths,
binaries,
nullptr,
retVal));
ASSERT_NE(nullptr, pProgram.get());
EXPECT_EQ(CL_SUCCESS, retVal);
auto rootDeviceIndex = clDevice->getRootDeviceIndex();
EXPECT_NE(nullptr, pProgram->buildInfos[rootDeviceIndex].unpackedDeviceBinary.get());
EXPECT_LT(0U, pProgram->buildInfos[rootDeviceIndex].unpackedDeviceBinarySize);
EXPECT_NE(nullptr, pProgram->buildInfos[rootDeviceIndex].packedDeviceBinary);
EXPECT_LT(0U, pProgram->buildInfos[rootDeviceIndex].packedDeviceBinarySize);
}
TEST(CreateProgramFromBinaryTests, givenBinaryProgramWhenKernelRebulildIsNotForcedThenDeviceBinaryIsUsed) {
cl_int retVal = CL_INVALID_BINARY;
PatchTokensTestData::ValidEmptyProgram programTokens;
auto clDevice = std::make_unique<MockClDevice>(MockDevice::createWithNewExecutionEnvironment<MockDevice>(nullptr));
std::unique_ptr<MockProgram> pProgram(Program::createBuiltInFromGenBinary<MockProgram>(nullptr, toClDeviceVector(*clDevice), programTokens.storage.data(), programTokens.storage.size(), &retVal));
ASSERT_NE(nullptr, pProgram.get());
EXPECT_EQ(CL_SUCCESS, retVal);
auto rootDeviceIndex = clDevice->getRootDeviceIndex();
retVal = pProgram->createProgramFromBinary(programTokens.storage.data(), programTokens.storage.size(), *clDevice);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_NE(nullptr, reinterpret_cast<uint8_t *>(pProgram->buildInfos[rootDeviceIndex].unpackedDeviceBinary.get()));
EXPECT_EQ(programTokens.storage.size(), pProgram->buildInfos[rootDeviceIndex].unpackedDeviceBinarySize);
EXPECT_NE(nullptr, reinterpret_cast<uint8_t *>(pProgram->buildInfos[rootDeviceIndex].packedDeviceBinary.get()));
EXPECT_EQ(programTokens.storage.size(), pProgram->buildInfos[rootDeviceIndex].packedDeviceBinarySize);
}
struct SpecializationConstantProgramMock : public MockProgram {
using MockProgram::MockProgram;
cl_int updateSpecializationConstant(cl_uint specId, size_t specSize, const void *specValue) override {
return CL_SUCCESS;
}
};
struct SpecializationConstantCompilerInterfaceMock : public CompilerInterface {
TranslationOutput::ErrorCode retVal = TranslationOutput::ErrorCode::Success;
int counter = 0;
const char *spirV = nullptr;
TranslationOutput::ErrorCode getSpecConstantsInfo(const NEO::Device &device, ArrayRef<const char> srcSpirV, SpecConstantInfo &output) override {
counter++;
spirV = srcSpirV.begin();
return retVal;
}
void returnError() {
retVal = TranslationOutput::ErrorCode::CompilationFailure;
}
};
struct SpecializationConstantRootDeviceEnvironemnt : public RootDeviceEnvironment {
SpecializationConstantRootDeviceEnvironemnt(ExecutionEnvironment &executionEnvironment) : RootDeviceEnvironment(executionEnvironment) {
compilerInterface.reset(new SpecializationConstantCompilerInterfaceMock());
}
CompilerInterface *getCompilerInterface() override {
return compilerInterface.get();
}
bool initAilConfiguration() override {
return true;
}
};
struct setProgramSpecializationConstantTests : public ::testing::Test {
setProgramSpecializationConstantTests() : device(new MockDevice()) {}
void SetUp() override {
mockCompiler = new SpecializationConstantCompilerInterfaceMock();
auto rootDeviceEnvironment = device.getExecutionEnvironment()->rootDeviceEnvironments[0].get();
rootDeviceEnvironment->compilerInterface.reset(mockCompiler);
mockProgram.reset(new SpecializationConstantProgramMock(toClDeviceVector(device)));
mockProgram->isSpirV = true;
EXPECT_FALSE(mockProgram->areSpecializationConstantsInitialized);
EXPECT_EQ(0, mockCompiler->counter);
}
SpecializationConstantCompilerInterfaceMock *mockCompiler = nullptr;
std::unique_ptr<SpecializationConstantProgramMock> mockProgram;
MockClDevice device;
int specValue = 1;
};
TEST_F(setProgramSpecializationConstantTests, whenSetProgramSpecializationConstantThenBinarySourceIsUsed) {
auto retVal = mockProgram->setProgramSpecializationConstant(1, sizeof(int), &specValue);
EXPECT_EQ(1, mockCompiler->counter);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_TRUE(mockProgram->areSpecializationConstantsInitialized);
EXPECT_EQ(mockProgram->irBinary.get(), mockCompiler->spirV);
}
TEST_F(setProgramSpecializationConstantTests, whenSetProgramSpecializationConstantMultipleTimesThenSpecializationConstantsAreInitializedOnce) {
auto retVal = mockProgram->setProgramSpecializationConstant(1, sizeof(int), &specValue);
EXPECT_EQ(1, mockCompiler->counter);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_TRUE(mockProgram->areSpecializationConstantsInitialized);
retVal = mockProgram->setProgramSpecializationConstant(1, sizeof(int), &specValue);
EXPECT_EQ(1, mockCompiler->counter);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_TRUE(mockProgram->areSpecializationConstantsInitialized);
}
TEST_F(setProgramSpecializationConstantTests, givenInvalidGetSpecConstantsInfoReturnValueWhenSetProgramSpecializationConstantThenErrorIsReturned) {
mockCompiler->returnError();
auto retVal = mockProgram->setProgramSpecializationConstant(1, sizeof(int), &specValue);
EXPECT_EQ(1, mockCompiler->counter);
EXPECT_EQ(CL_INVALID_VALUE, retVal);
EXPECT_FALSE(mockProgram->areSpecializationConstantsInitialized);
}
TEST(setProgramSpecializationConstantTest, givenUninitializedCompilerinterfaceWhenSetProgramSpecializationConstantThenErrorIsReturned) {
auto executionEnvironment = new MockExecutionEnvironment();
executionEnvironment->rootDeviceEnvironments[0] = std::make_unique<NoCompilerInterfaceRootDeviceEnvironment>(*executionEnvironment);
executionEnvironment->rootDeviceEnvironments[0]->setHwInfo(defaultHwInfo.get());
MockClDevice mockDevice(new MockDevice{executionEnvironment, 0});
SpecializationConstantProgramMock mockProgram(toClDeviceVector(mockDevice));
mockProgram.isSpirV = true;
int specValue = 1;
auto retVal = mockProgram.setProgramSpecializationConstant(1, sizeof(int), &specValue);
EXPECT_EQ(CL_OUT_OF_HOST_MEMORY, retVal);
}
using ProgramBinTest = Test<ProgramSimpleFixture>;
TEST_F(ProgramBinTest, givenPrintProgramBinaryProcessingTimeSetWhenBuildProgramThenProcessingTimeIsPrinted) {
DebugManagerStateRestore restorer;
DebugManager.flags.PrintProgramBinaryProcessingTime.set(true);
testing::internal::CaptureStdout();
CreateProgramFromBinary(pContext, pContext->getDevices(), "kernel_data_param");
auto retVal = pProgram->build(
pProgram->getDevices(),
nullptr,
false);
auto output = testing::internal::GetCapturedStdout();
EXPECT_FALSE(output.compare(0, 14, "Elapsed time: "));
EXPECT_EQ(CL_SUCCESS, retVal);
}
struct DebugDataGuard {
DebugDataGuard(const DebugDataGuard &) = delete;
DebugDataGuard(DebugDataGuard &&) = delete;
DebugDataGuard() {
for (size_t n = 0; n < sizeof(mockDebugData); n++) {
mockDebugData[n] = (char)n;
}
auto vars = NEO::getIgcDebugVars();
vars.debugDataToReturn = mockDebugData;
vars.debugDataToReturnSize = sizeof(mockDebugData);
NEO::setIgcDebugVars(vars);
}
~DebugDataGuard() {
auto vars = NEO::getIgcDebugVars();
vars.debugDataToReturn = nullptr;
vars.debugDataToReturnSize = 0;
NEO::setIgcDebugVars(vars);
}
char mockDebugData[32];
};
TEST_F(ProgramBinTest, GivenBuildWithDebugDataThenBuildDataAvailableViaGetInfo) {
DebugDataGuard debugDataGuard;
const char *sourceCode = "__kernel void\nCB(\n__global unsigned int* src, __global unsigned int* dst)\n{\nint id = (int)get_global_id(0);\ndst[id] = src[id];\n}\n";
pProgram = Program::create<MockProgram>(
pContext,
1,
&sourceCode,
&knownSourceSize,
retVal);
retVal = pProgram->build(pProgram->getDevices(), nullptr, false);
EXPECT_EQ(CL_SUCCESS, retVal);
// Verify
size_t debugDataSize = 0;
retVal = pProgram->getInfo(CL_PROGRAM_DEBUG_INFO_SIZES_INTEL, sizeof(debugDataSize), &debugDataSize, nullptr);
EXPECT_EQ(CL_SUCCESS, retVal);
std::unique_ptr<char[]> debugData{new char[debugDataSize]};
for (size_t n = 0; n < sizeof(debugData); n++) {
debugData[n] = 0;
}
char *pDebugData = &debugData[0];
size_t retData = 0;
bool isOK = true;
retVal = pProgram->getInfo(CL_PROGRAM_DEBUG_INFO_INTEL, 1, &pDebugData, &retData);
EXPECT_EQ(CL_INVALID_VALUE, retVal);
retVal = pProgram->getInfo(CL_PROGRAM_DEBUG_INFO_INTEL, debugDataSize, &pDebugData, &retData);
EXPECT_EQ(CL_SUCCESS, retVal);
cl_uint numDevices;
retVal = clGetProgramInfo(pProgram, CL_PROGRAM_NUM_DEVICES, sizeof(numDevices), &numDevices, nullptr);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(numDevices * sizeof(debugData), retData);
// Check integrity of returned debug data
for (size_t n = 0; n < debugDataSize; n++) {
if (debugData[n] != (char)n) {
isOK = false;
break;
}
}
EXPECT_TRUE(isOK);
for (size_t n = debugDataSize; n < sizeof(debugData); n++) {
if (debugData[n] != (char)0) {
isOK = false;
break;
}
}
EXPECT_TRUE(isOK);
retData = 0;
retVal = pProgram->getInfo(CL_PROGRAM_DEBUG_INFO_INTEL, debugDataSize, nullptr, &retData);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(numDevices * sizeof(debugData), retData);
}
TEST_F(ProgramBinTest, givenNoDebugDataAvailableThenDebugDataIsNotAvailableViaGetInfo) {
const char *sourceCode = "__kernel void\nCB(\n__global unsigned int* src, __global unsigned int* dst)\n{\nint id = (int)get_global_id(0);\ndst[id] = src[id];\n}\n";
pProgram = Program::create<MockProgram>(
pContext,
1,
&sourceCode,
&knownSourceSize,
retVal);
EXPECT_EQ(0u, pProgram->buildInfos[rootDeviceIndex].debugDataSize);
EXPECT_EQ(nullptr, pProgram->buildInfos[rootDeviceIndex].debugData);
size_t debugDataSize = 0;
retVal = pProgram->getInfo(CL_PROGRAM_DEBUG_INFO_SIZES_INTEL, sizeof(debugDataSize), &debugDataSize, nullptr);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(0u, debugDataSize);
cl_uint numDevices;
retVal = clGetProgramInfo(pProgram, CL_PROGRAM_NUM_DEVICES, sizeof(numDevices), &numDevices, nullptr);
debugDataSize = numDevices * sizeof(void **);
std::unique_ptr<char[]> debugData{new char[debugDataSize]};
for (size_t n = 0; n < sizeof(debugData); n++) {
debugData[n] = 0;
}
char *pDebugData = &debugData[0];
size_t retData = 0;
retVal = pProgram->getInfo(CL_PROGRAM_DEBUG_INFO_INTEL, debugDataSize, &pDebugData, &retData);
EXPECT_EQ(CL_SUCCESS, retVal);
for (size_t n = 0; n < sizeof(debugData); n++) {
EXPECT_EQ(0, debugData[n]);
}
}
TEST_F(ProgramBinTest, GivenDebugDataAvailableWhenLinkingProgramThenDebugDataIsStoredInProgram) {
DebugDataGuard debugDataGuard;
const char *sourceCode = "__kernel void\nCB(\n__global unsigned int* src, __global unsigned int* dst)\n{\nint id = (int)get_global_id(0);\ndst[id] = src[id];\n}\n";
pProgram = Program::create<MockProgram>(
pContext,
1,
&sourceCode,
&knownSourceSize,
retVal);
retVal = pProgram->compile(pProgram->getDevices(), nullptr, 0, nullptr, nullptr);
EXPECT_EQ(CL_SUCCESS, retVal);
cl_program programToLink = pProgram;
retVal = pProgram->link(pProgram->getDevices(), nullptr, 1, &programToLink);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_NE(nullptr, pProgram->getDebugData(rootDeviceIndex));
}
using ProgramMultiRootDeviceTests = MultiRootDeviceFixture;
TEST_F(ProgramMultiRootDeviceTests, WhenProgramIsCreatedThenBuildInfosVectorIsProperlyResized) {
{
ClDeviceVector deviceVector;
deviceVector.push_back(device1);
deviceVector.push_back(device2);
EXPECT_EQ(1u, deviceVector[0]->getRootDeviceIndex());
auto program = std::make_unique<MockProgram>(context.get(), false, deviceVector);
EXPECT_EQ(3u, program->buildInfos.size());
}
{
ClDeviceVector deviceVector;
deviceVector.push_back(device2);
deviceVector.push_back(device1);
EXPECT_EQ(2u, deviceVector[0]->getRootDeviceIndex());
auto program = std::make_unique<MockProgram>(context.get(), false, deviceVector);
EXPECT_EQ(3u, program->buildInfos.size());
}
}
class MockCompilerInterfaceWithGtpinParam : public CompilerInterface {
public:
TranslationOutput::ErrorCode link(
const NEO::Device &device,
const TranslationInput &input,
TranslationOutput &output) override {
gtpinInfoPassed = input.GTPinInput;
return CompilerInterface::link(device, input, output);
}
void *gtpinInfoPassed;
};
TEST_F(ProgramBinTest, GivenSourceKernelWhenLinkingProgramThenGtpinInitInfoIsPassed) {
void *pIgcInitPtr = reinterpret_cast<void *>(0x1234);
gtpinSetIgcInit(pIgcInitPtr);
const char *sourceCode = "__kernel void\nCB(\n__global unsigned int* src, __global unsigned int* dst)\n{\nint id = (int)get_global_id(0);\ndst[id] = src[id];\n}\n";
pProgram = Program::create<MockProgram>(
pContext,
1,
&sourceCode,
&knownSourceSize,
retVal);
std::unique_ptr<MockCompilerInterfaceWithGtpinParam> mockCompilerInterface(new MockCompilerInterfaceWithGtpinParam);
retVal = pProgram->compile(pProgram->getDevices(), nullptr, 0, nullptr, nullptr);
EXPECT_EQ(CL_SUCCESS, retVal);
pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]->compilerInterface.reset(mockCompilerInterface.get());
cl_program programToLink = pProgram;
retVal = pProgram->link(pProgram->getDevices(), nullptr, 1, &programToLink);
EXPECT_EQ(pIgcInitPtr, mockCompilerInterface->gtpinInfoPassed);
mockCompilerInterface.release();
}
TEST(ProgramReplaceDeviceBinary, GivenBinaryZebinThenUseAsBothPackedAndUnpackedBinaryContainer) {
ZebinTestData::ValidEmptyProgram zebin;
std::unique_ptr<char[]> src = makeCopy(zebin.storage.data(), zebin.storage.size());
MockContext context;
auto device = context.getDevice(0);
auto rootDeviceIndex = device->getRootDeviceIndex();
MockProgram program{&context, false, toClDeviceVector(*device)};
program.replaceDeviceBinary(std::move(src), zebin.storage.size(), rootDeviceIndex);
ASSERT_EQ(zebin.storage.size(), program.buildInfos[rootDeviceIndex].packedDeviceBinarySize);
ASSERT_EQ(zebin.storage.size(), program.buildInfos[rootDeviceIndex].unpackedDeviceBinarySize);
ASSERT_NE(nullptr, program.buildInfos[rootDeviceIndex].packedDeviceBinary);
ASSERT_NE(nullptr, program.buildInfos[rootDeviceIndex].unpackedDeviceBinary);
EXPECT_EQ(0, memcmp(program.buildInfos[rootDeviceIndex].packedDeviceBinary.get(), zebin.storage.data(), program.buildInfos[rootDeviceIndex].packedDeviceBinarySize));
EXPECT_EQ(0, memcmp(program.buildInfos[rootDeviceIndex].unpackedDeviceBinary.get(), zebin.storage.data(), program.buildInfos[rootDeviceIndex].unpackedDeviceBinarySize));
}
TEST(ProgramCallbackTest, whenFunctionIsNullptrThenUserDataNeedsToBeNullptr) {
void *userData = nullptr;
EXPECT_TRUE(Program::isValidCallback(nullptr, nullptr));
EXPECT_FALSE(Program::isValidCallback(nullptr, &userData));
}
void CL_CALLBACK callbackFuncProgram(
cl_program program,
void *userData) {
*reinterpret_cast<bool *>(userData) = true;
}
TEST(ProgramCallbackTest, whenFunctionIsNotNullptrThenUserDataDoesntMatter) {
void *userData = nullptr;
EXPECT_TRUE(Program::isValidCallback(callbackFuncProgram, nullptr));
EXPECT_TRUE(Program::isValidCallback(callbackFuncProgram, &userData));
}
TEST(ProgramCallbackTest, whenInvokeCallbackIsCalledThenFunctionIsProperlyInvoked) {
bool functionCalled = false;
MockContext context;
MockProgram program{&context, false, context.getDevices()};
program.invokeCallback(callbackFuncProgram, &functionCalled);
EXPECT_TRUE(functionCalled);
program.invokeCallback(nullptr, nullptr);
}
TEST(BuildProgramTest, givenMultiDeviceProgramWhenBuildingThenStoreAndProcessBinaryOnlyOncePerRootDevice) {
MockProgram *pProgram = nullptr;
std::unique_ptr<char[]> pSource = nullptr;
size_t sourceSize = 0;
std::string testFile;
KernelBinaryHelper kbHelper("CopyBuffer_simd16");
testFile.append(clFiles);
testFile.append("CopyBuffer_simd16.cl");
pSource = loadDataFromFile(
testFile.c_str(),
sourceSize);
ASSERT_NE(0u, sourceSize);
ASSERT_NE(nullptr, pSource);
const char *sources[1] = {pSource.get()};
MockUnrestrictiveContextMultiGPU context;
cl_int retVal = CL_INVALID_PROGRAM;
pProgram = Program::create<MockProgram>(
&context,
1,
sources,
&sourceSize,
retVal);
EXPECT_NE(nullptr, pProgram);
ASSERT_EQ(CL_SUCCESS, retVal);
cl_build_status buildStatus;
for (const auto &device : context.getDevices()) {
retVal = clGetProgramBuildInfo(pProgram, device, CL_PROGRAM_BUILD_STATUS, sizeof(buildStatus), &buildStatus, NULL);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(CL_BUILD_NONE, buildStatus);
}
retVal = clBuildProgram(
pProgram,
0,
nullptr,
nullptr,
nullptr,
nullptr);
for (auto &rootDeviceIndex : context.getRootDeviceIndices()) {
EXPECT_EQ(1, pProgram->replaceDeviceBinaryCalledPerRootDevice[rootDeviceIndex]);
EXPECT_EQ(1, pProgram->processGenBinaryCalledPerRootDevice[rootDeviceIndex]);
}
ASSERT_EQ(CL_SUCCESS, retVal);
retVal = clReleaseProgram(pProgram);
EXPECT_EQ(CL_SUCCESS, retVal);
}
TEST(BuildProgramTest, givenMultiDeviceProgramWhenBuildingThenStoreKernelInfoPerEachRootDevice) {
MockProgram *pProgram = nullptr;
std::unique_ptr<char[]> pSource = nullptr;
size_t sourceSize = 0;
std::string testFile;
KernelBinaryHelper kbHelper("CopyBuffer_simd16");
testFile.append(clFiles);
testFile.append("CopyBuffer_simd16.cl");
pSource = loadDataFromFile(
testFile.c_str(),
sourceSize);
ASSERT_NE(0u, sourceSize);
ASSERT_NE(nullptr, pSource);
const char *sources[1] = {pSource.get()};
MockUnrestrictiveContextMultiGPU context;
cl_int retVal = CL_INVALID_PROGRAM;
pProgram = Program::create<MockProgram>(
&context,
1,
sources,
&sourceSize,
retVal);
EXPECT_NE(nullptr, pProgram);
ASSERT_EQ(CL_SUCCESS, retVal);
cl_build_status buildStatus;
for (const auto &device : context.getDevices()) {
retVal = clGetProgramBuildInfo(pProgram, device, CL_PROGRAM_BUILD_STATUS, sizeof(buildStatus), &buildStatus, NULL);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(CL_BUILD_NONE, buildStatus);
}
retVal = clBuildProgram(
pProgram,
0,
nullptr,
nullptr,
nullptr,
nullptr);
ASSERT_EQ(CL_SUCCESS, retVal);
for (auto &rootDeviceIndex : context.getRootDeviceIndices()) {
EXPECT_LT(0u, pProgram->getNumKernels());
for (auto i = 0u; i < pProgram->getNumKernels(); i++) {
EXPECT_NE(nullptr, pProgram->getKernelInfo(i, rootDeviceIndex));
}
}
retVal = clReleaseProgram(pProgram);
EXPECT_EQ(CL_SUCCESS, retVal);
}
TEST(ProgramTest, whenProgramIsBuiltAsAnExecutableForAtLeastOneDeviceThenIsBuiltMethodReturnsTrue) {
MockSpecializedContext context;
MockProgram program(&context, false, context.getDevices());
EXPECT_FALSE(program.isBuilt());
program.deviceBuildInfos[context.getDevice(0)].buildStatus = CL_BUILD_SUCCESS;
program.deviceBuildInfos[context.getDevice(0)].programBinaryType = CL_PROGRAM_BINARY_TYPE_COMPILED_OBJECT;
program.deviceBuildInfos[context.getDevice(1)].buildStatus = CL_BUILD_ERROR;
EXPECT_FALSE(program.isBuilt());
program.deviceBuildInfos[context.getDevice(0)].buildStatus = CL_BUILD_SUCCESS;
program.deviceBuildInfos[context.getDevice(0)].programBinaryType = CL_PROGRAM_BINARY_TYPE_EXECUTABLE;
EXPECT_TRUE(program.isBuilt());
}
TEST(ProgramTest, givenUnlockedProgramWhenRetainForKernelIsCalledThenProgramIsLocked) {
MockSpecializedContext context;
MockProgram program(&context, false, context.getDevices());
EXPECT_FALSE(program.isLocked());
program.retainForKernel();
EXPECT_TRUE(program.isLocked());
}
TEST(ProgramTest, givenLockedProgramWhenReleasingForKernelIsCalledForEachRetainThenProgramIsUnlocked) {
MockSpecializedContext context;
MockProgram program(&context, false, context.getDevices());
EXPECT_FALSE(program.isLocked());
program.retainForKernel();
EXPECT_TRUE(program.isLocked());
program.retainForKernel();
EXPECT_TRUE(program.isLocked());
program.releaseForKernel();
EXPECT_TRUE(program.isLocked());
program.releaseForKernel();
EXPECT_FALSE(program.isLocked());
}
|
// Copyright 1998-2017 Epic Games, Inc. All Rights Reserved.
#include "URealisticGraspingEditor.h"
#include "UGraspingEditorStyle.h"
#include "UGraspingEditorCommands.h"
#include "UGraspingEditorCallback.h"
#include "Engine.h"
#include "LevelEditor.h"
#include "ISkeletalMeshEditor.h"
#include "ISkeletalMeshEditorModule.h"
#include "ISkeletonEditor.h"
#include "ISkeletonEditorModule.h"
#define LOCTEXT_NAMESPACE "FURealisticGraspingEditorModule"
void FURealisticGraspingEditorModule::StartupModule()
{
// This code will execute after your module is loaded into memory; the exact timing is specified in the .uplugin file per-module
UGraspingEditorStyle::Initialize();
UGraspingEditorStyle::ReloadTextures();
//Initializes the drop down menu.
InitializeUIButtons();
//Creates the button in the menu bar fo the skeleton editor.
CreateButton();
}
void FURealisticGraspingEditorModule::ShutdownModule()
{
// This function may be called during shutdown to clean up your module. For modules that support dynamic reloading,
// we call this function before unloading the module.
//UGraspingEditorStyle::Shutdown();
UGraspingEditorCommands::Unregister();
}
void FURealisticGraspingEditorModule::InitializeUIButtons()
{
UGraspingEditorCommands::Register();
PluginCommandList = MakeShareable(new FUICommandList);
const UGraspingEditorCommands& Commands = UGraspingEditorCommands::Get();
PluginCommandList->MapAction(
Commands.CreateGraspingStyle,
FExecuteAction::CreateStatic(&UGraspingEditorCallback::ShowInstructions),
FCanExecuteAction()
);
PluginCommandList->MapAction(
Commands.LoadGraspingStyle,
FExecuteAction::CreateStatic(&UGraspingEditorCallback::ShowInstructions),
FCanExecuteAction()
);
PluginCommandList->MapAction(
Commands.SaveGraspingPosition,
FExecuteAction::CreateStatic(&UGraspingEditorCallback::ShowInstructions),
FCanExecuteAction()
);
PluginCommandList->MapAction(
Commands.EditGraspingPosition,
FExecuteAction::CreateStatic(&UGraspingEditorCallback::ShowInstructions),
FCanExecuteAction()
);
}
void FURealisticGraspingEditorModule::CreateButton()
{
//Load the ISkeletalMeshEditorModule and add a new button to its menu bar.
ISkeletalMeshEditorModule& SkeletalMeshEditorModule =
FModuleManager::Get().LoadModuleChecked<ISkeletalMeshEditorModule>("SkeletonEditor");
// Add toolbar entry
TSharedPtr<FExtender> ToolbarExtender = MakeShareable(new FExtender);
ToolbarExtender->AddToolBarExtension(
"Asset",
EExtensionHook::After,
PluginCommandList,
FToolBarExtensionDelegate::CreateRaw(this, &FURealisticGraspingEditorModule::AddOptions)
);
SkeletalMeshEditorModule.GetToolBarExtensibilityManager()->AddExtender(ToolbarExtender);
}
TSharedRef<SWidget> FURealisticGraspingEditorModule::CreateOptionMenu()
{
//Creates all of the drop down entries.
FMenuBuilder Builder(false, PluginCommandList.ToSharedRef());
const UGraspingEditorCommands& Commands = UGraspingEditorCommands::Get();
Builder.BeginSection("GraspingOptions");
{
Builder.AddMenuEntry(Commands.CreateGraspingStyle);
Builder.AddMenuEntry(Commands.LoadGraspingStyle);
Builder.AddMenuEntry(Commands.SaveGraspingPosition);
Builder.AddMenuEntry(Commands.EditGraspingPosition);
}
Builder.EndSection();
return Builder.MakeWidget();
}
void FURealisticGraspingEditorModule::AddOptions(FToolBarBuilder & Builder)
{
UGraspingEditorCommands GraspingEditorCommands;
//Adds the drop down menu to the button.
Builder.AddComboButton(
FUIAction(),
FOnGetContent::CreateRaw(this, &FURealisticGraspingEditorModule::CreateOptionMenu),
LOCTEXT("GraspingDebugToolbar", "Grasping Options"),
LOCTEXT("GraspingDebugToolbar_ToolTip", "Grasping plugin options"),
FSlateIcon(UGraspingEditorStyle::GetStyleSetName(), "GraspingEditor.DebugOptionToolBar"),
false
);
}
#undef LOCTEXT_NAMESPACE
IMPLEMENT_MODULE(FURealisticGraspingEditorModule, URealisticGrasping)
|
/*
* sample_app.hpp
* This file is a part of Kaptivate
* https://github.com/FunkyTownEnterprises/Kaptivate
*
* Copyright (c) 2011 Ben Cable, Chris Eberle
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFT
*/
#pragma once
#include "resource.hpp"
|
// Ouzel by Elviss Strazdins
#ifndef OUZEL_CORE_SYSTEMLINUX_HPP
#define OUZEL_CORE_SYSTEMLINUX_HPP
#include "../System.hpp"
namespace ouzel::core::linux
{
class System final: public core::System
{
public:
System(int argc, char* argv[]);
~System() override = default;
};
}
#endif // OUZEL_CORE_SYSTEMLINUX_HPP
|
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/compiler.h"
#include "src/zone.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/generic-node-inl.h"
#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/schedule.h"
#include "test/cctest/cctest.h"
#if V8_TURBOFAN_TARGET
using namespace v8::internal;
using namespace v8::internal::compiler;
static SimpleOperator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
0, 0, "dummy");
// So we can get a real JS function.
static Handle<JSFunction> Compile(const char* source) {
Isolate* isolate = CcTest::i_isolate();
Handle<String> source_code = isolate->factory()
->NewStringFromUtf8(CStrVector(source))
.ToHandleChecked();
Handle<SharedFunctionInfo> shared_function = Compiler::CompileScript(
source_code, Handle<String>(), 0, 0, false,
Handle<Context>(isolate->native_context()), NULL, NULL,
v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE);
return isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared_function, isolate->native_context());
}
TEST(TestLinkageCreate) {
InitializedHandleScope handles;
Handle<JSFunction> function = Compile("a + b");
CompilationInfoWithZone info(function);
Linkage linkage(&info);
}
TEST(TestLinkageJSFunctionIncoming) {
InitializedHandleScope handles;
const char* sources[] = {"(function() { })", "(function(a) { })",
"(function(a,b) { })", "(function(a,b,c) { })"};
for (int i = 0; i < 3; i++) {
i::HandleScope handles(CcTest::i_isolate());
Handle<JSFunction> function = v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(CompileRun(sources[i])));
CompilationInfoWithZone info(function);
Linkage linkage(&info);
CallDescriptor* descriptor = linkage.GetIncomingDescriptor();
CHECK_NE(NULL, descriptor);
CHECK_EQ(1 + i, descriptor->JSParameterCount());
CHECK_EQ(1, descriptor->ReturnCount());
CHECK_EQ(Operator::kNoProperties, descriptor->properties());
CHECK_EQ(true, descriptor->IsJSFunctionCall());
}
}
TEST(TestLinkageCodeStubIncoming) {
Isolate* isolate = CcTest::InitIsolateOnce();
CompilationInfoWithZone info(static_cast<HydrogenCodeStub*>(NULL), isolate);
Linkage linkage(&info);
// TODO(titzer): test linkage creation with a bonafide code stub.
// this just checks current behavior.
CHECK_EQ(NULL, linkage.GetIncomingDescriptor());
}
TEST(TestLinkageJSCall) {
HandleAndZoneScope handles;
Handle<JSFunction> function = Compile("a + c");
CompilationInfoWithZone info(function);
Linkage linkage(&info);
for (int i = 0; i < 32; i++) {
CallDescriptor* descriptor = linkage.GetJSCallDescriptor(i);
CHECK_NE(NULL, descriptor);
CHECK_EQ(i, descriptor->JSParameterCount());
CHECK_EQ(1, descriptor->ReturnCount());
CHECK_EQ(Operator::kNoProperties, descriptor->properties());
CHECK_EQ(true, descriptor->IsJSFunctionCall());
}
}
TEST(TestLinkageRuntimeCall) {
// TODO(titzer): test linkage creation for outgoing runtime calls.
}
TEST(TestLinkageStubCall) {
// TODO(titzer): test linkage creation for outgoing stub calls.
}
#endif // V8_TURBOFAN_TARGET
|
#include <FoundationPCH.h>
#include <Foundation/IO/ChunkStream.h>
ezChunkStreamWriter::ezChunkStreamWriter(ezStreamWriter& pStream)
: m_Stream(pStream)
{
m_bWritingFile = false;
m_bWritingChunk = false;
}
void ezChunkStreamWriter::BeginStream(ezUInt16 uiVersion)
{
EZ_ASSERT_DEV(!m_bWritingFile, "Already writing the file.");
EZ_ASSERT_DEV(uiVersion > 0, "The version number must be larger than 0");
m_bWritingFile = true;
const char* szTag = "BGNCHNK2";
m_Stream.WriteBytes(szTag, 8).IgnoreResult();
m_Stream.WriteBytes(&uiVersion, 2).IgnoreResult();
}
void ezChunkStreamWriter::EndStream()
{
EZ_ASSERT_DEV(m_bWritingFile, "Not writing to the file.");
EZ_ASSERT_DEV(!m_bWritingChunk, "A chunk is still open for writing: '{0}'", m_sChunkName);
m_bWritingFile = false;
const char* szTag = "END CHNK";
m_Stream.WriteBytes(szTag, 8).IgnoreResult();
}
void ezChunkStreamWriter::BeginChunk(const char* szName, ezUInt32 uiVersion)
{
EZ_ASSERT_DEV(m_bWritingFile, "Not writing to the file.");
EZ_ASSERT_DEV(!m_bWritingChunk, "A chunk is already open for writing: '{0}'", m_sChunkName);
m_sChunkName = szName;
const char* szTag = "NXT CHNK";
m_Stream.WriteBytes(szTag, 8).IgnoreResult();
m_Stream << m_sChunkName;
m_Stream << uiVersion;
m_bWritingChunk = true;
}
void ezChunkStreamWriter::EndChunk()
{
EZ_ASSERT_DEV(m_bWritingFile, "Not writing to the file.");
EZ_ASSERT_DEV(m_bWritingChunk, "No chunk is currently open.");
m_bWritingChunk = false;
const ezUInt32 uiStorageSize = m_Storage.GetCount();
m_Stream << uiStorageSize;
/// \todo Write Chunk CRC
for (ezUInt32 i = 0; i < uiStorageSize;)
{
const ezUInt32 uiRange = m_Storage.GetContiguousRange(i);
EZ_ASSERT_DEBUG(uiRange > 0, "Invalid contiguous range");
m_Stream.WriteBytes(&m_Storage[i], uiRange).IgnoreResult();
i += uiRange;
}
m_Storage.Clear();
}
ezResult ezChunkStreamWriter::WriteBytes(const void* pWriteBuffer, ezUInt64 uiBytesToWrite)
{
EZ_ASSERT_DEV(m_bWritingChunk, "No chunk is currently written to");
const ezUInt8* pBytes = (const ezUInt8*)pWriteBuffer;
for (ezUInt64 i = 0; i < uiBytesToWrite; ++i)
m_Storage.PushBack(pBytes[i]);
return EZ_SUCCESS;
}
ezChunkStreamReader::ezChunkStreamReader(ezStreamReader& stream)
: m_Stream(stream)
{
m_ChunkInfo.m_bValid = false;
m_EndChunkFileMode = EndChunkFileMode::JustClose;
}
ezUInt64 ezChunkStreamReader::ReadBytes(void* pReadBuffer, ezUInt64 uiBytesToRead)
{
EZ_ASSERT_DEV(m_ChunkInfo.m_bValid, "No valid chunk available.");
uiBytesToRead = ezMath::Min<ezUInt64>(uiBytesToRead, m_ChunkInfo.m_uiUnreadChunkBytes);
m_ChunkInfo.m_uiUnreadChunkBytes -= (ezUInt32)uiBytesToRead;
return m_Stream.ReadBytes(pReadBuffer, uiBytesToRead);
}
ezUInt16 ezChunkStreamReader::BeginStream()
{
m_ChunkInfo.m_bValid = false;
char szTag[9];
m_Stream.ReadBytes(szTag, 8);
szTag[8] = '\0';
ezUInt16 uiVersion = 0;
if (ezStringUtils::IsEqual(szTag, "BGNCHNK2"))
{
m_Stream.ReadBytes(&uiVersion, 2);
}
else
{
// "BGN CHNK" is the old chunk identifier, before a version number was written
EZ_ASSERT_DEV(ezStringUtils::IsEqual(szTag, "BGN CHNK"), "Not a valid chunk file.");
}
TryReadChunkHeader();
return uiVersion;
}
void ezChunkStreamReader::EndStream()
{
if (m_EndChunkFileMode == EndChunkFileMode::SkipToEnd)
{
while (m_ChunkInfo.m_bValid)
NextChunk();
}
}
void ezChunkStreamReader::TryReadChunkHeader()
{
m_ChunkInfo.m_bValid = false;
char szTag[9];
m_Stream.ReadBytes(szTag, 8);
szTag[8] = '\0';
if (ezStringUtils::IsEqual(szTag, "END CHNK"))
return;
if (ezStringUtils::IsEqual(szTag, "NXT CHNK"))
{
m_Stream >> m_ChunkInfo.m_sChunkName;
m_Stream >> m_ChunkInfo.m_uiChunkVersion;
m_Stream >> m_ChunkInfo.m_uiChunkBytes;
m_ChunkInfo.m_uiUnreadChunkBytes = m_ChunkInfo.m_uiChunkBytes;
m_ChunkInfo.m_bValid = true;
return;
}
EZ_REPORT_FAILURE("Invalid chunk file, tag is '{0}'", szTag);
}
void ezChunkStreamReader::NextChunk()
{
if (!m_ChunkInfo.m_bValid)
return;
const ezUInt64 uiToSkip = m_ChunkInfo.m_uiUnreadChunkBytes;
const ezUInt64 uiSkipped = SkipBytes(uiToSkip);
EZ_VERIFY(uiSkipped == uiToSkip, "Corrupt chunk '{0}' (version {1}), tried to skip {2} bytes, could only read {3} bytes", m_ChunkInfo.m_sChunkName, m_ChunkInfo.m_uiChunkVersion, uiToSkip, uiSkipped);
TryReadChunkHeader();
}
EZ_STATICLINK_FILE(Foundation, Foundation_IO_Implementation_ChunkStream);
|
// Copyright (c) 2017 Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include "mfx_common.h"
#if defined(MFX_ENABLE_VP8_VIDEO_DECODE_HW)
#include "mfx_session.h"
#include "mfx_common_decode_int.h"
#include "mfx_vp8_dec_decode_hw.h"
#include "mfx_enc_common.h"
#include "umc_va_base.h"
#include "vm_sys_info.h"
#include <va/va.h>
#include <va/va_dec_vp8.h>
#include <iostream>
#include <sstream>
#include <fstream>
#include "mfx_vp8_dec_decode_common.h"
mfxStatus VideoDECODEVP8_HW::PackHeaders(mfxBitstream *p_bistream)
{
using namespace VP8Defs;
mfxStatus sts = MFX_ERR_NONE;
sFrameInfo info = m_frames.back();
/////////////////////////////////////////////////////////////////////////////////////////
UMCVACompBuffer* compBufPic;
VAPictureParameterBufferVP8 *picParams
= (VAPictureParameterBufferVP8*)m_p_video_accelerator->GetCompBuffer(VAPictureParameterBufferType, &compBufPic,
sizeof(VAPictureParameterBufferVP8));
//frame width in pixels
picParams->frame_width = m_frame_info.frameSize.width;
//frame height in pixels
picParams->frame_height = m_frame_info.frameSize.height;
picParams->pic_fields.value = 0;
if (I_PICTURE == m_frame_info.frameType)
{
//0 means key_frame
picParams->pic_fields.bits.key_frame = 0;
picParams->last_ref_frame = VA_INVALID_SURFACE;
picParams->golden_ref_frame = VA_INVALID_SURFACE;
picParams->alt_ref_frame = VA_INVALID_SURFACE;
picParams->out_of_loop_frame = VA_INVALID_SURFACE;
}
else // inter frame
{
picParams->pic_fields.bits.key_frame = 1;
picParams->last_ref_frame = m_p_video_accelerator->GetSurfaceID(info.lastrefIndex);
picParams->golden_ref_frame = m_p_video_accelerator->GetSurfaceID(info.goldIndex);
picParams->alt_ref_frame = m_p_video_accelerator->GetSurfaceID(info.altrefIndex);
picParams->out_of_loop_frame = VA_INVALID_SURFACE;
}
//same as version in bitstream syntax
picParams->pic_fields.bits.version = m_frame_info.version;
//same as segmentation_enabled in bitstream syntax
picParams->pic_fields.bits.segmentation_enabled = m_frame_info.segmentationEnabled;
//same as update_mb_segmentation_map in bitstream syntax
picParams->pic_fields.bits.update_mb_segmentation_map = m_frame_info.updateSegmentMap;
//same as update_segment_feature_data in bitstream syntax
picParams->pic_fields.bits.update_segment_feature_data = m_frame_info.updateSegmentData;
//same as filter_type in bitstream syntax
picParams->pic_fields.bits.filter_type = m_frame_info.loopFilterType;
//same as sharpness_level in bitstream syntax
picParams->pic_fields.bits.sharpness_level = m_frame_info.sharpnessLevel;
//same as loop_filter_adj_enable in bitstream syntax
picParams->pic_fields.bits.loop_filter_adj_enable = m_frame_info.mbLoopFilterAdjust;
//same as mode_ref_lf_delta_update in bitstream syntax
picParams->pic_fields.bits.mode_ref_lf_delta_update = m_frame_info.modeRefLoopFilterDeltaUpdate;
//same as sign_bias_golden in bitstream syntax
picParams->pic_fields.bits.sign_bias_golden = 0;
//same as sign_bias_alternate in bitstream syntax
picParams->pic_fields.bits.sign_bias_alternate = 0;
if (I_PICTURE != m_frame_info.frameType)
{
picParams->pic_fields.bits.sign_bias_golden = m_refresh_info.refFrameBiasTable[3];
picParams->pic_fields.bits.sign_bias_alternate = m_refresh_info.refFrameBiasTable[2];
}
//same as mb_no_coeff_skip in bitstream syntax
picParams->pic_fields.bits.mb_no_coeff_skip = m_frame_info.mbSkipEnabled;
//flag to indicate that loop filter should be disabled
picParams->pic_fields.bits.loop_filter_disable = 0;
if (m_frame_info.loopFilterLevel == 0 || (m_frame_info.version == 2 || m_frame_info.version == 3))
{
picParams->pic_fields.bits.loop_filter_disable = 1;
}
// probabilities of the segment_id decoding tree and same as
// mb_segment_tree_probs in the spec.
picParams->mb_segment_tree_probs[0] = m_frame_info.segmentTreeProbabilities[0];
picParams->mb_segment_tree_probs[1] = m_frame_info.segmentTreeProbabilities[1];
picParams->mb_segment_tree_probs[2] = m_frame_info.segmentTreeProbabilities[2];
if (m_frame_info.segmentationEnabled)
{
for (int i = 0; i < 4; i++)
{
if (m_frame_info.segmentAbsMode)
picParams->loop_filter_level[i] = m_frame_info.segmentFeatureData[VP8_ALT_LOOP_FILTER][i];
else
{
picParams->loop_filter_level[i] = m_frame_info.loopFilterLevel + m_frame_info.segmentFeatureData[VP8_ALT_LOOP_FILTER][i];
picParams->loop_filter_level[i] = (picParams->loop_filter_level[i] >= 0) ?
((picParams->loop_filter_level[i] <= 63) ? picParams->loop_filter_level[i] : 63) : 0;
}
}
}
else
{
picParams->loop_filter_level[0] = m_frame_info.loopFilterLevel;
picParams->loop_filter_level[1] = m_frame_info.loopFilterLevel;
picParams->loop_filter_level[2] = m_frame_info.loopFilterLevel;
picParams->loop_filter_level[3] = m_frame_info.loopFilterLevel;
}
//loop filter deltas for reference frame based MB level adjustment
picParams->loop_filter_deltas_ref_frame[0] = m_frame_info.refLoopFilterDeltas[0];
picParams->loop_filter_deltas_ref_frame[1] = m_frame_info.refLoopFilterDeltas[1];
picParams->loop_filter_deltas_ref_frame[2] = m_frame_info.refLoopFilterDeltas[2];
picParams->loop_filter_deltas_ref_frame[3] = m_frame_info.refLoopFilterDeltas[3];
//loop filter deltas for coding mode based MB level adjustment
picParams->loop_filter_deltas_mode[0] = m_frame_info.modeLoopFilterDeltas[0];
picParams->loop_filter_deltas_mode[1] = m_frame_info.modeLoopFilterDeltas[1];
picParams->loop_filter_deltas_mode[2] = m_frame_info.modeLoopFilterDeltas[2];
picParams->loop_filter_deltas_mode[3] = m_frame_info.modeLoopFilterDeltas[3];
//same as prob_skip_false in bitstream syntax
picParams->prob_skip_false = m_frame_info.skipFalseProb;
//same as prob_intra in bitstream syntax
picParams->prob_intra = m_frame_info.intraProb;
//same as prob_last in bitstream syntax
picParams->prob_last = m_frame_info.lastProb;
//same as prob_gf in bitstream syntax
picParams->prob_gf = m_frame_info.goldProb;
//list of 4 probabilities of the luma intra prediction mode decoding
//tree and same as y_mode_probs in frame header
//list of 3 probabilities of the chroma intra prediction mode decoding
//tree and same as uv_mode_probs in frame header
const mfxU8 *prob_y_table;
const mfxU8 *prob_uv_table;
if (I_PICTURE == m_frame_info.frameType)
{
prob_y_table = vp8_kf_mb_mode_y_probs;
prob_uv_table = vp8_kf_mb_mode_uv_probs;
}
else
{
prob_y_table = m_frameProbs.mbModeProbY;
prob_uv_table = m_frameProbs.mbModeProbUV;
}
for (uint32_t i = 0; i < VP8_NUM_MB_MODES_Y - 1; i += 1)
{
picParams->y_mode_probs[i] = prob_y_table[i];
}
for (uint32_t i = 0; i < VP8_NUM_MB_MODES_UV - 1; i += 1)
{
picParams->uv_mode_probs[i] = prob_uv_table[i];
}
//updated mv decoding probabilities and same as mv_probs in frame header
for (uint32_t i = 0; i < VP8_NUM_MV_PROBS; i += 1)
{
picParams->mv_probs[0][i] = m_frameProbs.mvContexts[0][i];
picParams->mv_probs[1][i] = m_frameProbs.mvContexts[1][i];
}
picParams->bool_coder_ctx.range = m_boolDecoder[VP8_FIRST_PARTITION].range();
picParams->bool_coder_ctx.value = (m_boolDecoder[VP8_FIRST_PARTITION].value() >> 24) & 0xff;
picParams->bool_coder_ctx.count = m_boolDecoder[VP8_FIRST_PARTITION].bitcount() & 0x7;
compBufPic->SetDataSize(sizeof(VAPictureParameterBufferVP8));
//////////////////////////////////////////////////////////////////
UMCVACompBuffer* compBufCp;
VAProbabilityDataBufferVP8 *coeffProbs = (VAProbabilityDataBufferVP8*)m_p_video_accelerator->
GetCompBuffer(VAProbabilityBufferType, &compBufCp, sizeof(VAProbabilityDataBufferVP8));
std::copy(reinterpret_cast<const char*>(m_frameProbs.coeff_probs),
reinterpret_cast<const char*>(m_frameProbs.coeff_probs) + sizeof(m_frameProbs.coeff_probs),
reinterpret_cast<char*>(coeffProbs));
compBufCp->SetDataSize(sizeof(VAProbabilityDataBufferVP8));
//////////////////////////////////////////////////////////////////
UMCVACompBuffer* compBufQm;
VAIQMatrixBufferVP8 *qmTable = (VAIQMatrixBufferVP8*)m_p_video_accelerator->
GetCompBuffer(VAIQMatrixBufferType, &compBufQm, sizeof(VAIQMatrixBufferVP8));
if (m_frame_info.segmentationEnabled == 0)
{
// when segmentation is disabled, use the first entry 0 for the quantization values
qmTable->quantization_index[0][1] = (unsigned char)m_quantInfo.ydcQ[0];
qmTable->quantization_index[0][0] = (unsigned char)m_quantInfo.yacQ[0];
qmTable->quantization_index[0][4] = (unsigned char)m_quantInfo.uvdcQ[0];
qmTable->quantization_index[0][5] = (unsigned char)m_quantInfo.uvacQ[0];
qmTable->quantization_index[0][2] = (unsigned char)m_quantInfo.y2dcQ[0];
qmTable->quantization_index[0][3] = (unsigned char)m_quantInfo.y2acQ[0];
}
else
{
for (uint32_t i = 0; i < 4; i += 1)
{
qmTable->quantization_index[i][1] = (unsigned char)m_quantInfo.ydcQ[i];
qmTable->quantization_index[i][0] = (unsigned char)m_quantInfo.yacQ[i];
qmTable->quantization_index[i][4] = (unsigned char)m_quantInfo.uvdcQ[i];
qmTable->quantization_index[i][5] = (unsigned char)m_quantInfo.uvacQ[i];
qmTable->quantization_index[i][2] = (unsigned char)m_quantInfo.y2dcQ[i];
qmTable->quantization_index[i][3] = (unsigned char)m_quantInfo.y2acQ[i];
}
}
compBufQm->SetDataSize(sizeof(VAIQMatrixBufferVP8));
//////////////////////////////////////////////////////////////////
uint32_t offset = 0;
if (I_PICTURE == m_frame_info.frameType)
offset = 10;
else
offset = 3;
int32_t size = p_bistream->DataLength;
UMCVACompBuffer* compBufSlice;
VASliceParameterBufferVP8 *sliceParams
= (VASliceParameterBufferVP8*)m_p_video_accelerator->
GetCompBuffer(VASliceParameterBufferType, &compBufSlice, sizeof(VASliceParameterBufferVP8));
#ifdef ANDROID
// number of bytes in the slice data buffer for the partitions
sliceParams->slice_data_size = (int32_t)size - offset;
//offset to the first byte of partition data
sliceParams->slice_data_offset = 0;
//see VA_SLICE_DATA_FLAG_XXX definitions
sliceParams->slice_data_flag = VA_SLICE_DATA_FLAG_ALL;
#endif
//offset to the first bit of MB from the first byte of partition data
sliceParams->macroblock_offset = m_frame_info.entropyDecSize;
// Partitions
sliceParams->num_of_partitions = m_frame_info.numPartitions + 1;
sliceParams->partition_size[0] = m_frame_info.firstPartitionSize;
for (int32_t i = 1; i < m_frame_info.numPartitions + 1; i += 1)
{
sliceParams->partition_size[i] = m_frame_info.partitionSize[i - 1];
}
compBufSlice->SetDataSize(sizeof(VASliceParameterBufferVP8));
//////////////////////////////////////////////////////////////////
UMCVACompBuffer* compBufBs;
uint8_t *bistreamData = (uint8_t *)m_p_video_accelerator->GetCompBuffer(VASliceDataBufferType, &compBufBs, p_bistream->DataLength - offset);
uint8_t *pBuffer = (uint8_t*) p_bistream->Data;
std::copy(pBuffer + offset, pBuffer + size, bistreamData);
compBufBs->SetDataSize((int32_t)size - offset);
return sts;
} // Status VP8VideoDecoderHardware::PackHeaders(MediaData* src)
#endif
|
//===-- ProfiledBinary.cpp - Binary decoder ---------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "ProfiledBinary.h"
#include "ErrorHandling.h"
#include "ProfileGenerator.h"
#include "llvm/ADT/Triple.h"
#include "llvm/DebugInfo/Symbolize/SymbolizableModule.h"
#include "llvm/Demangle/Demangle.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/MC/TargetRegistry.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/TargetSelect.h"
#define DEBUG_TYPE "load-binary"
using namespace llvm;
using namespace sampleprof;
cl::opt<bool> ShowDisassemblyOnly("show-disassembly-only", cl::init(false),
cl::ZeroOrMore,
cl::desc("Print disassembled code."));
cl::opt<bool> ShowSourceLocations("show-source-locations", cl::init(false),
cl::ZeroOrMore,
cl::desc("Print source locations."));
static cl::opt<bool>
ShowCanonicalFnName("show-canonical-fname", cl::init(false), cl::ZeroOrMore,
cl::desc("Print canonical function name."));
static cl::opt<bool> ShowPseudoProbe(
"show-pseudo-probe", cl::init(false), cl::ZeroOrMore,
cl::desc("Print pseudo probe section and disassembled info."));
static cl::opt<bool> UseDwarfCorrelation(
"use-dwarf-correlation", cl::init(false), cl::ZeroOrMore,
cl::desc("Use dwarf for profile correlation even when binary contains "
"pseudo probe."));
static cl::opt<std::string>
DWPPath("dwp", cl::init(""), cl::ZeroOrMore,
cl::desc("Path of .dwp file. When not specified, it will be "
"<binary>.dwp in the same directory as the main binary."));
static cl::list<std::string> DisassembleFunctions(
"disassemble-functions", cl::CommaSeparated,
cl::desc("List of functions to print disassembly for. Accept demangled "
"names only. Only work with show-disassembly-only"));
extern cl::opt<bool> ShowDetailedWarning;
namespace llvm {
namespace sampleprof {
static const Target *getTarget(const ObjectFile *Obj) {
Triple TheTriple = Obj->makeTriple();
std::string Error;
std::string ArchName;
const Target *TheTarget =
TargetRegistry::lookupTarget(ArchName, TheTriple, Error);
if (!TheTarget)
exitWithError(Error, Obj->getFileName());
return TheTarget;
}
void BinarySizeContextTracker::addInstructionForContext(
const SampleContextFrameVector &Context, uint32_t InstrSize) {
ContextTrieNode *CurNode = &RootContext;
bool IsLeaf = true;
for (const auto &Callsite : reverse(Context)) {
StringRef CallerName = Callsite.FuncName;
LineLocation CallsiteLoc = IsLeaf ? LineLocation(0, 0) : Callsite.Location;
CurNode = CurNode->getOrCreateChildContext(CallsiteLoc, CallerName);
IsLeaf = false;
}
CurNode->addFunctionSize(InstrSize);
}
uint32_t
BinarySizeContextTracker::getFuncSizeForContext(const SampleContext &Context) {
ContextTrieNode *CurrNode = &RootContext;
ContextTrieNode *PrevNode = nullptr;
SampleContextFrames Frames = Context.getContextFrames();
int32_t I = Frames.size() - 1;
Optional<uint32_t> Size;
// Start from top-level context-less function, traverse down the reverse
// context trie to find the best/longest match for given context, then
// retrieve the size.
while (CurrNode && I >= 0) {
// Process from leaf function to callers (added to context).
const auto &ChildFrame = Frames[I--];
PrevNode = CurrNode;
CurrNode =
CurrNode->getChildContext(ChildFrame.Location, ChildFrame.FuncName);
if (CurrNode && CurrNode->getFunctionSize().hasValue())
Size = CurrNode->getFunctionSize().getValue();
}
// If we traversed all nodes along the path of the context and haven't
// found a size yet, pivot to look for size from sibling nodes, i.e size
// of inlinee under different context.
if (!Size.hasValue()) {
if (!CurrNode)
CurrNode = PrevNode;
while (!Size.hasValue() && CurrNode &&
!CurrNode->getAllChildContext().empty()) {
CurrNode = &CurrNode->getAllChildContext().begin()->second;
if (CurrNode->getFunctionSize().hasValue())
Size = CurrNode->getFunctionSize().getValue();
}
}
assert(Size.hasValue() && "We should at least find one context size.");
return Size.getValue();
}
void BinarySizeContextTracker::trackInlineesOptimizedAway(
MCPseudoProbeDecoder &ProbeDecoder) {
ProbeFrameStack ProbeContext;
for (const auto &Child : ProbeDecoder.getDummyInlineRoot().getChildren())
trackInlineesOptimizedAway(ProbeDecoder, *Child.second.get(), ProbeContext);
}
void BinarySizeContextTracker::trackInlineesOptimizedAway(
MCPseudoProbeDecoder &ProbeDecoder,
MCDecodedPseudoProbeInlineTree &ProbeNode, ProbeFrameStack &ProbeContext) {
StringRef FuncName =
ProbeDecoder.getFuncDescForGUID(ProbeNode.Guid)->FuncName;
ProbeContext.emplace_back(FuncName, 0);
// This ProbeContext has a probe, so it has code before inlining and
// optimization. Make sure we mark its size as known.
if (!ProbeNode.getProbes().empty()) {
ContextTrieNode *SizeContext = &RootContext;
for (auto &ProbeFrame : reverse(ProbeContext)) {
StringRef CallerName = ProbeFrame.first;
LineLocation CallsiteLoc(ProbeFrame.second, 0);
SizeContext =
SizeContext->getOrCreateChildContext(CallsiteLoc, CallerName);
}
// Add 0 size to make known.
SizeContext->addFunctionSize(0);
}
// DFS down the probe inline tree
for (const auto &ChildNode : ProbeNode.getChildren()) {
InlineSite Location = ChildNode.first;
ProbeContext.back().second = std::get<1>(Location);
trackInlineesOptimizedAway(ProbeDecoder, *ChildNode.second.get(),
ProbeContext);
}
ProbeContext.pop_back();
}
void ProfiledBinary::warnNoFuncEntry() {
uint64_t NoFuncEntryNum = 0;
for (auto &F : BinaryFunctions) {
if (F.second.Ranges.empty())
continue;
bool hasFuncEntry = false;
for (auto &R : F.second.Ranges) {
if (FuncRange *FR = findFuncRangeForStartOffset(R.first)) {
if (FR->IsFuncEntry) {
hasFuncEntry = true;
break;
}
}
}
if (!hasFuncEntry) {
NoFuncEntryNum++;
if (ShowDetailedWarning)
WithColor::warning()
<< "Failed to determine function entry for " << F.first
<< " due to inconsistent name from symbol table and dwarf info.\n";
}
}
emitWarningSummary(NoFuncEntryNum, BinaryFunctions.size(),
"of functions failed to determine function entry due to "
"inconsistent name from symbol table and dwarf info.");
}
void ProfiledBinary::load() {
// Attempt to open the binary.
OwningBinary<Binary> OBinary = unwrapOrError(createBinary(Path), Path);
Binary &ExeBinary = *OBinary.getBinary();
auto *Obj = dyn_cast<ELFObjectFileBase>(&ExeBinary);
if (!Obj)
exitWithError("not a valid Elf image", Path);
TheTriple = Obj->makeTriple();
// Current only support X86
if (!TheTriple.isX86())
exitWithError("unsupported target", TheTriple.getTriple());
LLVM_DEBUG(dbgs() << "Loading " << Path << "\n");
// Find the preferred load address for text sections.
setPreferredTextSegmentAddresses(Obj);
checkPseudoProbe(Obj);
if (ShowDisassemblyOnly)
decodePseudoProbe(Obj);
// Load debug info of subprograms from DWARF section.
// If path of debug info binary is specified, use the debug info from it,
// otherwise use the debug info from the executable binary.
if (!DebugBinaryPath.empty()) {
OwningBinary<Binary> DebugPath =
unwrapOrError(createBinary(DebugBinaryPath), DebugBinaryPath);
loadSymbolsFromDWARF(*cast<ObjectFile>(DebugPath.getBinary()));
} else {
loadSymbolsFromDWARF(*cast<ObjectFile>(&ExeBinary));
}
// Disassemble the text sections.
disassemble(Obj);
// Use function start and return address to infer prolog and epilog
ProEpilogTracker.inferPrologOffsets(StartOffset2FuncRangeMap);
ProEpilogTracker.inferEpilogOffsets(RetOffsets);
warnNoFuncEntry();
// TODO: decode other sections.
}
bool ProfiledBinary::inlineContextEqual(uint64_t Address1, uint64_t Address2) {
uint64_t Offset1 = virtualAddrToOffset(Address1);
uint64_t Offset2 = virtualAddrToOffset(Address2);
const SampleContextFrameVector &Context1 = getFrameLocationStack(Offset1);
const SampleContextFrameVector &Context2 = getFrameLocationStack(Offset2);
if (Context1.size() != Context2.size())
return false;
if (Context1.empty())
return false;
// The leaf frame contains location within the leaf, and it
// needs to be remove that as it's not part of the calling context
return std::equal(Context1.begin(), Context1.begin() + Context1.size() - 1,
Context2.begin(), Context2.begin() + Context2.size() - 1);
}
SampleContextFrameVector
ProfiledBinary::getExpandedContext(const SmallVectorImpl<uint64_t> &Stack,
bool &WasLeafInlined) {
SampleContextFrameVector ContextVec;
// Process from frame root to leaf
for (auto Address : Stack) {
uint64_t Offset = virtualAddrToOffset(Address);
const SampleContextFrameVector &ExpandedContext =
getFrameLocationStack(Offset);
// An instruction without a valid debug line will be ignored by sample
// processing
if (ExpandedContext.empty())
return SampleContextFrameVector();
// Set WasLeafInlined to the size of inlined frame count for the last
// address which is leaf
WasLeafInlined = (ExpandedContext.size() > 1);
ContextVec.append(ExpandedContext);
}
// Replace with decoded base discriminator
for (auto &Frame : ContextVec) {
Frame.Location.Discriminator = ProfileGeneratorBase::getBaseDiscriminator(
Frame.Location.Discriminator, UseFSDiscriminator);
}
assert(ContextVec.size() && "Context length should be at least 1");
// Compress the context string except for the leaf frame
auto LeafFrame = ContextVec.back();
LeafFrame.Location = LineLocation(0, 0);
ContextVec.pop_back();
CSProfileGenerator::compressRecursionContext(ContextVec);
CSProfileGenerator::trimContext(ContextVec);
ContextVec.push_back(LeafFrame);
return ContextVec;
}
template <class ELFT>
void ProfiledBinary::setPreferredTextSegmentAddresses(const ELFFile<ELFT> &Obj,
StringRef FileName) {
const auto &PhdrRange = unwrapOrError(Obj.program_headers(), FileName);
// FIXME: This should be the page size of the system running profiling.
// However such info isn't available at post-processing time, assuming
// 4K page now. Note that we don't use EXEC_PAGESIZE from <linux/param.h>
// because we may build the tools on non-linux.
uint32_t PageSize = 0x1000;
for (const typename ELFT::Phdr &Phdr : PhdrRange) {
if (Phdr.p_type == ELF::PT_LOAD) {
if (!FirstLoadableAddress)
FirstLoadableAddress = Phdr.p_vaddr & ~(PageSize - 1U);
if (Phdr.p_flags & ELF::PF_X) {
// Segments will always be loaded at a page boundary.
PreferredTextSegmentAddresses.push_back(Phdr.p_vaddr &
~(PageSize - 1U));
TextSegmentOffsets.push_back(Phdr.p_offset & ~(PageSize - 1U));
}
}
}
if (PreferredTextSegmentAddresses.empty())
exitWithError("no executable segment found", FileName);
}
void ProfiledBinary::setPreferredTextSegmentAddresses(
const ELFObjectFileBase *Obj) {
if (const auto *ELFObj = dyn_cast<ELF32LEObjectFile>(Obj))
setPreferredTextSegmentAddresses(ELFObj->getELFFile(), Obj->getFileName());
else if (const auto *ELFObj = dyn_cast<ELF32BEObjectFile>(Obj))
setPreferredTextSegmentAddresses(ELFObj->getELFFile(), Obj->getFileName());
else if (const auto *ELFObj = dyn_cast<ELF64LEObjectFile>(Obj))
setPreferredTextSegmentAddresses(ELFObj->getELFFile(), Obj->getFileName());
else if (const auto *ELFObj = cast<ELF64BEObjectFile>(Obj))
setPreferredTextSegmentAddresses(ELFObj->getELFFile(), Obj->getFileName());
else
llvm_unreachable("invalid ELF object format");
}
void ProfiledBinary::checkPseudoProbe(const ELFObjectFileBase *Obj) {
if (UseDwarfCorrelation)
return;
bool HasProbeDescSection = false;
bool HasPseudoProbeSection = false;
StringRef FileName = Obj->getFileName();
for (section_iterator SI = Obj->section_begin(), SE = Obj->section_end();
SI != SE; ++SI) {
const SectionRef &Section = *SI;
StringRef SectionName = unwrapOrError(Section.getName(), FileName);
if (SectionName == ".pseudo_probe_desc") {
HasProbeDescSection = true;
} else if (SectionName == ".pseudo_probe") {
HasPseudoProbeSection = true;
}
}
// set UsePseudoProbes flag, used for PerfReader
UsePseudoProbes = HasProbeDescSection && HasPseudoProbeSection;
}
void ProfiledBinary::decodePseudoProbe(const ELFObjectFileBase *Obj) {
if (!UsePseudoProbes)
return;
std::unordered_set<uint64_t> ProfiledGuids;
if (!ShowDisassemblyOnly)
for (auto *F : ProfiledFunctions)
ProfiledGuids.insert(Function::getGUID(F->FuncName));
StringRef FileName = Obj->getFileName();
for (section_iterator SI = Obj->section_begin(), SE = Obj->section_end();
SI != SE; ++SI) {
const SectionRef &Section = *SI;
StringRef SectionName = unwrapOrError(Section.getName(), FileName);
if (SectionName == ".pseudo_probe_desc") {
StringRef Contents = unwrapOrError(Section.getContents(), FileName);
if (!ProbeDecoder.buildGUID2FuncDescMap(
reinterpret_cast<const uint8_t *>(Contents.data()),
Contents.size()))
exitWithError(
"Pseudo Probe decoder fail in .pseudo_probe_desc section");
} else if (SectionName == ".pseudo_probe") {
StringRef Contents = unwrapOrError(Section.getContents(), FileName);
if (!ProbeDecoder.buildAddress2ProbeMap(
reinterpret_cast<const uint8_t *>(Contents.data()),
Contents.size(), ProfiledGuids))
exitWithError("Pseudo Probe decoder fail in .pseudo_probe section");
}
}
// Build TopLevelProbeFrameMap to track size for optimized inlinees when probe
// is available
if (TrackFuncContextSize) {
for (const auto &Child : ProbeDecoder.getDummyInlineRoot().getChildren()) {
auto *Frame = Child.second.get();
StringRef FuncName =
ProbeDecoder.getFuncDescForGUID(Frame->Guid)->FuncName;
TopLevelProbeFrameMap[FuncName] = Frame;
}
}
if (ShowPseudoProbe)
ProbeDecoder.printGUID2FuncDescMap(outs());
}
void ProfiledBinary::decodePseudoProbe() {
OwningBinary<Binary> OBinary = unwrapOrError(createBinary(Path), Path);
Binary &ExeBinary = *OBinary.getBinary();
auto *Obj = dyn_cast<ELFObjectFileBase>(&ExeBinary);
decodePseudoProbe(Obj);
}
void ProfiledBinary::setIsFuncEntry(uint64_t Offset, StringRef RangeSymName) {
// Note that the start offset of each ELF section can be a non-function
// symbol, we need to binary search for the start of a real function range.
auto *FuncRange = findFuncRangeForOffset(Offset);
// Skip external function symbol.
if (!FuncRange)
return;
// Set IsFuncEntry to ture if there is only one range in the function or the
// RangeSymName from ELF is equal to its DWARF-based function name.
if (FuncRange->Func->Ranges.size() == 1 ||
(!FuncRange->IsFuncEntry && FuncRange->getFuncName() == RangeSymName))
FuncRange->IsFuncEntry = true;
}
bool ProfiledBinary::dissassembleSymbol(std::size_t SI, ArrayRef<uint8_t> Bytes,
SectionSymbolsTy &Symbols,
const SectionRef &Section) {
std::size_t SE = Symbols.size();
uint64_t SectionOffset = Section.getAddress() - getPreferredBaseAddress();
uint64_t SectSize = Section.getSize();
uint64_t StartOffset = Symbols[SI].Addr - getPreferredBaseAddress();
uint64_t NextStartOffset =
(SI + 1 < SE) ? Symbols[SI + 1].Addr - getPreferredBaseAddress()
: SectionOffset + SectSize;
setIsFuncEntry(StartOffset,
FunctionSamples::getCanonicalFnName(Symbols[SI].Name));
StringRef SymbolName =
ShowCanonicalFnName
? FunctionSamples::getCanonicalFnName(Symbols[SI].Name)
: Symbols[SI].Name;
bool ShowDisassembly =
ShowDisassemblyOnly && (DisassembleFunctionSet.empty() ||
DisassembleFunctionSet.count(SymbolName));
if (ShowDisassembly)
outs() << '<' << SymbolName << ">:\n";
auto WarnInvalidInsts = [](uint64_t Start, uint64_t End) {
WithColor::warning() << "Invalid instructions at "
<< format("%8" PRIx64, Start) << " - "
<< format("%8" PRIx64, End) << "\n";
};
uint64_t Offset = StartOffset;
// Size of a consecutive invalid instruction range starting from Offset -1
// backwards.
uint64_t InvalidInstLength = 0;
while (Offset < NextStartOffset) {
MCInst Inst;
uint64_t Size;
// Disassemble an instruction.
bool Disassembled =
DisAsm->getInstruction(Inst, Size, Bytes.slice(Offset - SectionOffset),
Offset + getPreferredBaseAddress(), nulls());
if (Size == 0)
Size = 1;
if (ShowDisassembly) {
if (ShowPseudoProbe) {
ProbeDecoder.printProbeForAddress(outs(),
Offset + getPreferredBaseAddress());
}
outs() << format("%8" PRIx64 ":", Offset + getPreferredBaseAddress());
size_t Start = outs().tell();
if (Disassembled)
IPrinter->printInst(&Inst, Offset + Size, "", *STI.get(), outs());
else
outs() << "\t<unknown>";
if (ShowSourceLocations) {
unsigned Cur = outs().tell() - Start;
if (Cur < 40)
outs().indent(40 - Cur);
InstructionPointer IP(this, Offset);
outs() << getReversedLocWithContext(
symbolize(IP, ShowCanonicalFnName, ShowPseudoProbe));
}
outs() << "\n";
}
if (Disassembled) {
const MCInstrDesc &MCDesc = MII->get(Inst.getOpcode());
// Record instruction size.
Offset2InstSizeMap[Offset] = Size;
// Populate address maps.
CodeAddrOffsets.push_back(Offset);
if (MCDesc.isCall())
CallOffsets.insert(Offset);
else if (MCDesc.isReturn())
RetOffsets.insert(Offset);
else if (MCDesc.isBranch())
BranchOffsets.insert(Offset);
if (InvalidInstLength) {
WarnInvalidInsts(Offset - InvalidInstLength, Offset - 1);
InvalidInstLength = 0;
}
} else {
InvalidInstLength += Size;
}
Offset += Size;
}
if (InvalidInstLength)
WarnInvalidInsts(Offset - InvalidInstLength, Offset - 1);
if (ShowDisassembly)
outs() << "\n";
return true;
}
void ProfiledBinary::setUpDisassembler(const ELFObjectFileBase *Obj) {
const Target *TheTarget = getTarget(Obj);
std::string TripleName = TheTriple.getTriple();
StringRef FileName = Obj->getFileName();
MRI.reset(TheTarget->createMCRegInfo(TripleName));
if (!MRI)
exitWithError("no register info for target " + TripleName, FileName);
MCTargetOptions MCOptions;
AsmInfo.reset(TheTarget->createMCAsmInfo(*MRI, TripleName, MCOptions));
if (!AsmInfo)
exitWithError("no assembly info for target " + TripleName, FileName);
SubtargetFeatures Features = Obj->getFeatures();
STI.reset(
TheTarget->createMCSubtargetInfo(TripleName, "", Features.getString()));
if (!STI)
exitWithError("no subtarget info for target " + TripleName, FileName);
MII.reset(TheTarget->createMCInstrInfo());
if (!MII)
exitWithError("no instruction info for target " + TripleName, FileName);
MCContext Ctx(Triple(TripleName), AsmInfo.get(), MRI.get(), STI.get());
std::unique_ptr<MCObjectFileInfo> MOFI(
TheTarget->createMCObjectFileInfo(Ctx, /*PIC=*/false));
Ctx.setObjectFileInfo(MOFI.get());
DisAsm.reset(TheTarget->createMCDisassembler(*STI, Ctx));
if (!DisAsm)
exitWithError("no disassembler for target " + TripleName, FileName);
MIA.reset(TheTarget->createMCInstrAnalysis(MII.get()));
int AsmPrinterVariant = AsmInfo->getAssemblerDialect();
IPrinter.reset(TheTarget->createMCInstPrinter(
Triple(TripleName), AsmPrinterVariant, *AsmInfo, *MII, *MRI));
IPrinter->setPrintBranchImmAsAddress(true);
}
void ProfiledBinary::disassemble(const ELFObjectFileBase *Obj) {
// Set up disassembler and related components.
setUpDisassembler(Obj);
// Create a mapping from virtual address to symbol name. The symbols in text
// sections are the candidates to dissassemble.
std::map<SectionRef, SectionSymbolsTy> AllSymbols;
StringRef FileName = Obj->getFileName();
for (const SymbolRef &Symbol : Obj->symbols()) {
const uint64_t Addr = unwrapOrError(Symbol.getAddress(), FileName);
const StringRef Name = unwrapOrError(Symbol.getName(), FileName);
section_iterator SecI = unwrapOrError(Symbol.getSection(), FileName);
if (SecI != Obj->section_end())
AllSymbols[*SecI].push_back(SymbolInfoTy(Addr, Name, ELF::STT_NOTYPE));
}
// Sort all the symbols. Use a stable sort to stabilize the output.
for (std::pair<const SectionRef, SectionSymbolsTy> &SecSyms : AllSymbols)
stable_sort(SecSyms.second);
DisassembleFunctionSet.insert(DisassembleFunctions.begin(),
DisassembleFunctions.end());
assert((DisassembleFunctionSet.empty() || ShowDisassemblyOnly) &&
"Functions to disassemble should be only specified together with "
"--show-disassembly-only");
if (ShowDisassemblyOnly)
outs() << "\nDisassembly of " << FileName << ":\n";
// Dissassemble a text section.
for (section_iterator SI = Obj->section_begin(), SE = Obj->section_end();
SI != SE; ++SI) {
const SectionRef &Section = *SI;
if (!Section.isText())
continue;
uint64_t ImageLoadAddr = getPreferredBaseAddress();
uint64_t SectionOffset = Section.getAddress() - ImageLoadAddr;
uint64_t SectSize = Section.getSize();
if (!SectSize)
continue;
// Register the text section.
TextSections.insert({SectionOffset, SectSize});
StringRef SectionName = unwrapOrError(Section.getName(), FileName);
if (ShowDisassemblyOnly) {
outs() << "\nDisassembly of section " << SectionName;
outs() << " [" << format("0x%" PRIx64, Section.getAddress()) << ", "
<< format("0x%" PRIx64, Section.getAddress() + SectSize)
<< "]:\n\n";
}
if (SectionName == ".plt")
continue;
// Get the section data.
ArrayRef<uint8_t> Bytes =
arrayRefFromStringRef(unwrapOrError(Section.getContents(), FileName));
// Get the list of all the symbols in this section.
SectionSymbolsTy &Symbols = AllSymbols[Section];
// Disassemble symbol by symbol.
for (std::size_t SI = 0, SE = Symbols.size(); SI != SE; ++SI) {
if (!dissassembleSymbol(SI, Bytes, Symbols, Section))
exitWithError("disassembling error", FileName);
}
}
// Dissassemble rodata section to check if FS discriminator symbol exists.
checkUseFSDiscriminator(Obj, AllSymbols);
}
void ProfiledBinary::checkUseFSDiscriminator(
const ELFObjectFileBase *Obj,
std::map<SectionRef, SectionSymbolsTy> &AllSymbols) {
const char *FSDiscriminatorVar = "__llvm_fs_discriminator__";
for (section_iterator SI = Obj->section_begin(), SE = Obj->section_end();
SI != SE; ++SI) {
const SectionRef &Section = *SI;
if (!Section.isData() || Section.getSize() == 0)
continue;
SectionSymbolsTy &Symbols = AllSymbols[Section];
for (std::size_t SI = 0, SE = Symbols.size(); SI != SE; ++SI) {
if (Symbols[SI].Name == FSDiscriminatorVar) {
UseFSDiscriminator = true;
return;
}
}
}
}
void ProfiledBinary::loadSymbolsFromDWARFUnit(DWARFUnit &CompilationUnit) {
for (const auto &DieInfo : CompilationUnit.dies()) {
llvm::DWARFDie Die(&CompilationUnit, &DieInfo);
if (!Die.isSubprogramDIE())
continue;
auto Name = Die.getName(llvm::DINameKind::LinkageName);
if (!Name)
Name = Die.getName(llvm::DINameKind::ShortName);
if (!Name)
continue;
auto RangesOrError = Die.getAddressRanges();
if (!RangesOrError)
continue;
const DWARFAddressRangesVector &Ranges = RangesOrError.get();
if (Ranges.empty())
continue;
// Different DWARF symbols can have same function name, search or create
// BinaryFunction indexed by the name.
auto Ret = BinaryFunctions.emplace(Name, BinaryFunction());
auto &Func = Ret.first->second;
if (Ret.second)
Func.FuncName = Ret.first->first;
for (const auto &Range : Ranges) {
uint64_t FuncStart = Range.LowPC;
uint64_t FuncSize = Range.HighPC - FuncStart;
if (FuncSize == 0 || FuncStart < getPreferredBaseAddress())
continue;
uint64_t StartOffset = FuncStart - getPreferredBaseAddress();
uint64_t EndOffset = Range.HighPC - getPreferredBaseAddress();
// We may want to know all ranges for one function. Here group the
// ranges and store them into BinaryFunction.
Func.Ranges.emplace_back(StartOffset, EndOffset);
auto R = StartOffset2FuncRangeMap.emplace(StartOffset, FuncRange());
if (R.second) {
FuncRange &FRange = R.first->second;
FRange.Func = &Func;
FRange.StartOffset = StartOffset;
FRange.EndOffset = EndOffset;
} else {
WithColor::warning()
<< "Duplicated symbol start address at "
<< format("%8" PRIx64, StartOffset + getPreferredBaseAddress())
<< " " << R.first->second.getFuncName() << " and " << Name << "\n";
}
}
}
}
void ProfiledBinary::loadSymbolsFromDWARF(ObjectFile &Obj) {
auto DebugContext = llvm::DWARFContext::create(
Obj, DWARFContext::ProcessDebugRelocations::Process, nullptr, DWPPath);
if (!DebugContext)
exitWithError("Error creating the debug info context", Path);
for (const auto &CompilationUnit : DebugContext->compile_units())
loadSymbolsFromDWARFUnit(*CompilationUnit.get());
// Handles DWO sections that can either be in .o, .dwo or .dwp files.
for (const auto &CompilationUnit : DebugContext->compile_units()) {
DWARFUnit *const DwarfUnit = CompilationUnit.get();
if (llvm::Optional<uint64_t> DWOId = DwarfUnit->getDWOId()) {
DWARFUnit *DWOCU = DwarfUnit->getNonSkeletonUnitDIE(false).getDwarfUnit();
if (!DWOCU->isDWOUnit()) {
std::string DWOName = dwarf::toString(
DwarfUnit->getUnitDIE().find(
{dwarf::DW_AT_dwo_name, dwarf::DW_AT_GNU_dwo_name}),
"");
WithColor::warning()
<< "DWO debug information for " << DWOName
<< " was not loaded. Please check the .o, .dwo or .dwp path.\n";
continue;
}
loadSymbolsFromDWARFUnit(*DWOCU);
}
}
if (BinaryFunctions.empty())
WithColor::warning() << "Loading of DWARF info completed, but no binary "
"functions have been retrieved.\n";
}
void ProfiledBinary::populateSymbolListFromDWARF(
ProfileSymbolList &SymbolList) {
for (auto &I : StartOffset2FuncRangeMap)
SymbolList.add(I.second.getFuncName());
}
void ProfiledBinary::setupSymbolizer() {
symbolize::LLVMSymbolizer::Options SymbolizerOpts;
SymbolizerOpts.PrintFunctions =
DILineInfoSpecifier::FunctionNameKind::LinkageName;
SymbolizerOpts.Demangle = false;
SymbolizerOpts.DefaultArch = TheTriple.getArchName().str();
SymbolizerOpts.UseSymbolTable = false;
SymbolizerOpts.RelativeAddresses = false;
SymbolizerOpts.DWPName = DWPPath;
Symbolizer = std::make_unique<symbolize::LLVMSymbolizer>(SymbolizerOpts);
}
SampleContextFrameVector ProfiledBinary::symbolize(const InstructionPointer &IP,
bool UseCanonicalFnName,
bool UseProbeDiscriminator) {
assert(this == IP.Binary &&
"Binary should only symbolize its own instruction");
auto Addr = object::SectionedAddress{IP.Offset + getPreferredBaseAddress(),
object::SectionedAddress::UndefSection};
DIInliningInfo InlineStack = unwrapOrError(
Symbolizer->symbolizeInlinedCode(SymbolizerPath.str(), Addr),
SymbolizerPath);
SampleContextFrameVector CallStack;
for (int32_t I = InlineStack.getNumberOfFrames() - 1; I >= 0; I--) {
const auto &CallerFrame = InlineStack.getFrame(I);
if (CallerFrame.FunctionName == "<invalid>")
break;
StringRef FunctionName(CallerFrame.FunctionName);
if (UseCanonicalFnName)
FunctionName = FunctionSamples::getCanonicalFnName(FunctionName);
uint32_t Discriminator = CallerFrame.Discriminator;
uint32_t LineOffset = (CallerFrame.Line - CallerFrame.StartLine) & 0xffff;
if (UseProbeDiscriminator) {
LineOffset =
PseudoProbeDwarfDiscriminator::extractProbeIndex(Discriminator);
Discriminator = 0;
}
LineLocation Line(LineOffset, Discriminator);
auto It = NameStrings.insert(FunctionName.str());
CallStack.emplace_back(*It.first, Line);
}
return CallStack;
}
void ProfiledBinary::computeInlinedContextSizeForRange(uint64_t StartOffset,
uint64_t EndOffset) {
uint64_t RangeBegin = offsetToVirtualAddr(StartOffset);
uint64_t RangeEnd = offsetToVirtualAddr(EndOffset);
InstructionPointer IP(this, RangeBegin, true);
if (IP.Address != RangeBegin)
WithColor::warning() << "Invalid start instruction at "
<< format("%8" PRIx64, RangeBegin) << "\n";
if (IP.Address >= RangeEnd)
return;
do {
uint64_t Offset = virtualAddrToOffset(IP.Address);
const SampleContextFrameVector &SymbolizedCallStack =
getFrameLocationStack(Offset, UsePseudoProbes);
uint64_t Size = Offset2InstSizeMap[Offset];
// Record instruction size for the corresponding context
FuncSizeTracker.addInstructionForContext(SymbolizedCallStack, Size);
} while (IP.advance() && IP.Address < RangeEnd);
}
void ProfiledBinary::computeInlinedContextSizeForFunc(
const BinaryFunction *Func) {
// Note that a function can be spilt into multiple ranges, so compute for all
// ranges of the function.
for (const auto &Range : Func->Ranges)
computeInlinedContextSizeForRange(Range.first, Range.second);
// Track optimized-away inlinee for probed binary. A function inlined and then
// optimized away should still have their probes left over in places.
if (usePseudoProbes()) {
auto I = TopLevelProbeFrameMap.find(Func->FuncName);
if (I != TopLevelProbeFrameMap.end()) {
BinarySizeContextTracker::ProbeFrameStack ProbeContext;
FuncSizeTracker.trackInlineesOptimizedAway(ProbeDecoder, *I->second,
ProbeContext);
}
}
}
InstructionPointer::InstructionPointer(const ProfiledBinary *Binary,
uint64_t Address, bool RoundToNext)
: Binary(Binary), Address(Address) {
Index = Binary->getIndexForAddr(Address);
if (RoundToNext) {
// we might get address which is not the code
// it should round to the next valid address
if (Index >= Binary->getCodeOffsetsSize())
this->Address = UINT64_MAX;
else
this->Address = Binary->getAddressforIndex(Index);
}
}
bool InstructionPointer::advance() {
Index++;
if (Index >= Binary->getCodeOffsetsSize()) {
Address = UINT64_MAX;
return false;
}
Address = Binary->getAddressforIndex(Index);
return true;
}
bool InstructionPointer::backward() {
if (Index == 0) {
Address = 0;
return false;
}
Index--;
Address = Binary->getAddressforIndex(Index);
return true;
}
void InstructionPointer::update(uint64_t Addr) {
Address = Addr;
Index = Binary->getIndexForAddr(Address);
}
} // end namespace sampleprof
} // end namespace llvm
|
// Copyright (c) 2012-2020 FRC Team 3512. All Rights Reserved.
/* ===== Hammer =====
* There are two parts to switching the state of this mechanism.
*
* 1) The first device is moved into position and out of the other one's way.
* 2) The second device is moved after a delay to avoid a mechanical lock-up.
*
* When the trigger is released, the state variable switches and a timer is
* started.
* The first device is moved into position immediately.
*
* After the delay for the respective state change has passed,
* the second device is moved into position. This is done
* to make sure that there are no mechanical lock-ups caused
* by the devices being triggered at the same time. It may
* damage them.
*
* After the second device is told to move, the timer is stopped and reset
* since it isn't needed anymore.
*
* Note: We don't know for sure if either device moved out of the way by
* the time the second device needs to be moved. It's just
* assumed that the delay gave the first device enough time
* to do so.
*/
#include "LockSolenoid.hpp"
LockSolenoid::LockSolenoid(int armSolenoidChannel, int lockSolenoidChannel)
: m_armSolenoid(armSolenoidChannel), m_lockSolenoid(armSolenoidChannel) {}
void LockSolenoid::Set(State lock) {
// If LockSolenoid is done changing states
if (m_lockState == m_changeTo && lock != State::kTransition) {
// Start a potential state change
m_changeTo = lock;
}
// If LockSolenoid needs to change states
if (m_lockState != m_changeTo) {
// Used to track delay between actions
m_timer.Reset();
m_timer.Start();
/* Moves first device immediately */
// Operates lock with single solenoid as arm
if (m_changeTo == State::kDeployed) {
// If hammer should be going down, deploy it immediately
m_armSolenoid.Set(true);
} else {
// If hammer is coming back up, unlock solenoid immediately
m_lockSolenoid.Set(false);
}
}
}
void LockSolenoid::Update() {
if (m_lockState != m_changeTo) {
/* If we are deploying the solenoids, use the deploy delay.
* Otherwise, use the retract delay.
*/
if (m_timer.Get() >
(m_changeTo == State::kDeployed ? kDeployDelay : kRetractDelay)) {
if (m_changeTo == State::kDeployed) {
// If going to lock, activate lock
m_lockSolenoid.Set(true);
} else {
// Else, move arm back up
m_armSolenoid.Set(false);
}
// Update the status since it just finished transitioning
m_lockState = m_changeTo;
// Stop the timer because it's no longer needed
m_timer.Stop();
}
}
}
LockSolenoid::State LockSolenoid::Get() const {
if (m_lockState != m_changeTo) {
return State::kTransition;
} else {
return m_lockState;
}
}
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2017 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#if defined(HAVE_CONFIG_H)
#include <config/bitcoin-config.h>
#endif
#include <init.h>
#include <addrman.h>
#include <amount.h>
#include <chain.h>
#include <chainparams.h>
#include <checkpoints.h>
#include <compat/sanity.h>
#include <consensus/validation.h>
#include <fs.h>
#include <httpserver.h>
#include <httprpc.h>
#include <key.h>
#include <validation.h>
#include <miner.h>
#include <netbase.h>
#include <net.h>
#include <net_processing.h>
#include <policy/feerate.h>
#include <policy/fees.h>
#include <policy/policy.h>
#include <rpc/server.h>
#include <rpc/register.h>
#include <rpc/safemode.h>
#include <rpc/blockchain.h>
#include <script/standard.h>
#include <script/sigcache.h>
#include <scheduler.h>
#include <timedata.h>
#include <txdb.h>
#include <txmempool.h>
#include <torcontrol.h>
#include <ui_interface.h>
#include <util.h>
#include <utilmoneystr.h>
#include <validationinterface.h>
#ifdef ENABLE_WALLET
#include <wallet/init.h>
#endif
#include <warnings.h>
#include <stdint.h>
#include <stdio.h>
#include <memory>
#ifndef WIN32
#include <signal.h>
#endif
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/replace.hpp>
#include <boost/algorithm/string/split.hpp>
#include <boost/bind.hpp>
#include <boost/interprocess/sync/file_lock.hpp>
#include <boost/thread.hpp>
#include <openssl/crypto.h>
#if ENABLE_ZMQ
#include <zmq/zmqnotificationinterface.h>
#endif
#ifdef USE_SSE2
#include "crypto/scrypt.h"
#endif
bool fFeeEstimatesInitialized = false;
static const bool DEFAULT_PROXYRANDOMIZE = true;
static const bool DEFAULT_REST_ENABLE = false;
static const bool DEFAULT_STOPAFTERBLOCKIMPORT = false;
std::unique_ptr<CConnman> g_connman;
std::unique_ptr<PeerLogicValidation> peerLogic;
#if ENABLE_ZMQ
static CZMQNotificationInterface* pzmqNotificationInterface = nullptr;
#endif
#ifdef WIN32
// Win32 LevelDB doesn't use filedescriptors, and the ones used for
// accessing block files don't count towards the fd_set size limit
// anyway.
#define MIN_CORE_FILEDESCRIPTORS 0
#else
#define MIN_CORE_FILEDESCRIPTORS 150
#endif
static const char* FEE_ESTIMATES_FILENAME="fee_estimates.dat";
//////////////////////////////////////////////////////////////////////////////
//
// Shutdown
//
//
// Thread management and startup/shutdown:
//
// The network-processing threads are all part of a thread group
// created by AppInit() or the Qt main() function.
//
// A clean exit happens when StartShutdown() or the SIGTERM
// signal handler sets fRequestShutdown, which makes main thread's
// WaitForShutdown() interrupts the thread group.
// And then, WaitForShutdown() makes all other on-going threads
// in the thread group join the main thread.
// Shutdown() is then called to clean up database connections, and stop other
// threads that should only be stopped after the main network-processing
// threads have exited.
//
// Shutdown for Qt is very similar, only it uses a QTimer to detect
// fRequestShutdown getting set, and then does the normal Qt
// shutdown thing.
//
std::atomic<bool> fRequestShutdown(false);
std::atomic<bool> fDumpMempoolLater(false);
void StartShutdown()
{
fRequestShutdown = true;
}
bool ShutdownRequested()
{
return fRequestShutdown;
}
/**
* This is a minimally invasive approach to shutdown on LevelDB read errors from the
* chainstate, while keeping user interface out of the common library, which is shared
* between bitcoind, and bitcoin-qt and non-server tools.
*/
class CCoinsViewErrorCatcher final : public CCoinsViewBacked
{
public:
explicit CCoinsViewErrorCatcher(CCoinsView* view) : CCoinsViewBacked(view) {}
bool GetCoin(const COutPoint &outpoint, Coin &coin) const override {
try {
return CCoinsViewBacked::GetCoin(outpoint, coin);
} catch(const std::runtime_error& e) {
uiInterface.ThreadSafeMessageBox(_("Error reading from database, shutting down."), "", CClientUIInterface::MSG_ERROR);
LogPrintf("Error reading from database: %s\n", e.what());
// Starting the shutdown sequence and returning false to the caller would be
// interpreted as 'entry not found' (as opposed to unable to read data), and
// could lead to invalid interpretation. Just exit immediately, as we can't
// continue anyway, and all writes should be atomic.
abort();
}
}
// Writes do not need similar protection, as failure to write is handled by the caller.
};
static std::unique_ptr<CCoinsViewErrorCatcher> pcoinscatcher;
static std::unique_ptr<ECCVerifyHandle> globalVerifyHandle;
static boost::thread_group threadGroup;
static CScheduler scheduler;
void Interrupt()
{
InterruptHTTPServer();
InterruptHTTPRPC();
InterruptRPC();
InterruptREST();
InterruptTorControl();
if (g_connman)
g_connman->Interrupt();
}
void Shutdown()
{
LogPrintf("%s: In progress...\n", __func__);
static CCriticalSection cs_Shutdown;
TRY_LOCK(cs_Shutdown, lockShutdown);
if (!lockShutdown)
return;
/// Note: Shutdown() must be able to handle cases in which initialization failed part of the way,
/// for example if the data directory was found to be locked.
/// Be sure that anything that writes files or flushes caches only does this if the respective
/// module was initialized.
RenameThread("usdacoin-shutoff");
mempool.AddTransactionsUpdated(1);
StopHTTPRPC();
StopREST();
StopRPC();
StopHTTPServer();
#ifdef ENABLE_WALLET
FlushWallets();
#endif
MapPort(false);
// Because these depend on each-other, we make sure that neither can be
// using the other before destroying them.
if (peerLogic) UnregisterValidationInterface(peerLogic.get());
if (g_connman) g_connman->Stop();
peerLogic.reset();
g_connman.reset();
StopTorControl();
// After everything has been shut down, but before things get flushed, stop the
// CScheduler/checkqueue threadGroup
threadGroup.interrupt_all();
threadGroup.join_all();
if (fDumpMempoolLater && gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
DumpMempool();
}
if (fFeeEstimatesInitialized)
{
::feeEstimator.FlushUnconfirmed(::mempool);
fs::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME;
CAutoFile est_fileout(fsbridge::fopen(est_path, "wb"), SER_DISK, CLIENT_VERSION);
if (!est_fileout.IsNull())
::feeEstimator.Write(est_fileout);
else
LogPrintf("%s: Failed to write fee estimates to %s\n", __func__, est_path.string());
fFeeEstimatesInitialized = false;
}
// FlushStateToDisk generates a SetBestChain callback, which we should avoid missing
if (pcoinsTip != nullptr) {
FlushStateToDisk();
}
// After there are no more peers/RPC left to give us new data which may generate
// CValidationInterface callbacks, flush them...
GetMainSignals().FlushBackgroundCallbacks();
// Any future callbacks will be dropped. This should absolutely be safe - if
// missing a callback results in an unrecoverable situation, unclean shutdown
// would too. The only reason to do the above flushes is to let the wallet catch
// up with our current chain to avoid any strange pruning edge cases and make
// next startup faster by avoiding rescan.
{
LOCK(cs_main);
if (pcoinsTip != nullptr) {
FlushStateToDisk();
}
pcoinsTip.reset();
pcoinscatcher.reset();
pcoinsdbview.reset();
pblocktree.reset();
}
#ifdef ENABLE_WALLET
StopWallets();
#endif
#if ENABLE_ZMQ
if (pzmqNotificationInterface) {
UnregisterValidationInterface(pzmqNotificationInterface);
delete pzmqNotificationInterface;
pzmqNotificationInterface = nullptr;
}
#endif
#ifndef WIN32
try {
fs::remove(GetPidFile());
} catch (const fs::filesystem_error& e) {
LogPrintf("%s: Unable to remove pidfile: %s\n", __func__, e.what());
}
#endif
UnregisterAllValidationInterfaces();
GetMainSignals().UnregisterBackgroundSignalScheduler();
GetMainSignals().UnregisterWithMempoolSignals(mempool);
#ifdef ENABLE_WALLET
CloseWallets();
#endif
globalVerifyHandle.reset();
ECC_Stop();
LogPrintf("%s: done\n", __func__);
}
/**
* Signal handlers are very limited in what they are allowed to do.
* The execution context the handler is invoked in is not guaranteed,
* so we restrict handler operations to just touching variables:
*/
#ifndef WIN32
static void HandleSIGTERM(int)
{
fRequestShutdown = true;
}
static void HandleSIGHUP(int)
{
fReopenDebugLog = true;
}
#else
static BOOL WINAPI consoleCtrlHandler(DWORD dwCtrlType)
{
fRequestShutdown = true;
Sleep(INFINITE);
return true;
}
#endif
#ifndef WIN32
static void registerSignalHandler(int signal, void(*handler)(int))
{
struct sigaction sa;
sa.sa_handler = handler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
sigaction(signal, &sa, nullptr);
}
#endif
void OnRPCStarted()
{
uiInterface.NotifyBlockTip.connect(&RPCNotifyBlockChange);
}
void OnRPCStopped()
{
uiInterface.NotifyBlockTip.disconnect(&RPCNotifyBlockChange);
RPCNotifyBlockChange(false, nullptr);
cvBlockChange.notify_all();
LogPrint(BCLog::RPC, "RPC stopped.\n");
}
std::string HelpMessage(HelpMessageMode mode)
{
const auto defaultBaseParams = CreateBaseChainParams(CBaseChainParams::MAIN);
const auto testnetBaseParams = CreateBaseChainParams(CBaseChainParams::TESTNET);
const auto defaultChainParams = CreateChainParams(CBaseChainParams::MAIN);
const auto testnetChainParams = CreateChainParams(CBaseChainParams::TESTNET);
const bool showDebug = gArgs.GetBoolArg("-help-debug", false);
// When adding new options to the categories, please keep and ensure alphabetical ordering.
// Do not translate _(...) -help-debug options, Many technical terms, and only a very small audience, so is unnecessary stress to translators.
std::string strUsage = HelpMessageGroup(_("Options:"));
strUsage += HelpMessageOpt("-?", _("Print this help message and exit"));
strUsage += HelpMessageOpt("-version", _("Print version and exit"));
strUsage += HelpMessageOpt("-alertnotify=<cmd>", _("Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)"));
strUsage += HelpMessageOpt("-blocknotify=<cmd>", _("Execute command when the best block changes (%s in cmd is replaced by block hash)"));
if (showDebug)
strUsage += HelpMessageOpt("-blocksonly", strprintf(_("Whether to operate in a blocks only mode (default: %u)"), DEFAULT_BLOCKSONLY));
strUsage +=HelpMessageOpt("-assumevalid=<hex>", strprintf(_("If this block is in the chain assume that it and its ancestors are valid and potentially skip their script verification (0 to verify all, default: %s, testnet: %s)"), defaultChainParams->GetConsensus().defaultAssumeValid.GetHex(), testnetChainParams->GetConsensus().defaultAssumeValid.GetHex()));
strUsage += HelpMessageOpt("-conf=<file>", strprintf(_("Specify configuration file (default: %s)"), BITCOIN_CONF_FILENAME));
if (mode == HMM_BITCOIND)
{
#if HAVE_DECL_DAEMON
strUsage += HelpMessageOpt("-daemon", _("Run in the background as a daemon and accept commands"));
#endif
}
strUsage += HelpMessageOpt("-datadir=<dir>", _("Specify data directory"));
if (showDebug) {
strUsage += HelpMessageOpt("-dbbatchsize", strprintf("Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize));
}
strUsage += HelpMessageOpt("-dbcache=<n>", strprintf(_("Set database cache size in megabytes (%d to %d, default: %d)"), nMinDbCache, nMaxDbCache, nDefaultDbCache));
if (showDebug)
strUsage += HelpMessageOpt("-feefilter", strprintf("Tell other nodes to filter invs to us by our mempool min fee (default: %u)", DEFAULT_FEEFILTER));
strUsage += HelpMessageOpt("-loadblock=<file>", _("Imports blocks from external blk000??.dat file on startup"));
strUsage += HelpMessageOpt("-debuglogfile=<file>", strprintf(_("Specify location of debug log file: this can be an absolute path or a path relative to the data directory (default: %s)"), DEFAULT_DEBUGLOGFILE));
strUsage += HelpMessageOpt("-maxorphantx=<n>", strprintf(_("Keep at most <n> unconnectable transactions in memory (default: %u)"), DEFAULT_MAX_ORPHAN_TRANSACTIONS));
strUsage += HelpMessageOpt("-maxmempool=<n>", strprintf(_("Keep the transaction memory pool below <n> megabytes (default: %u)"), DEFAULT_MAX_MEMPOOL_SIZE));
strUsage += HelpMessageOpt("-mempoolexpiry=<n>", strprintf(_("Do not keep transactions in the mempool longer than <n> hours (default: %u)"), DEFAULT_MEMPOOL_EXPIRY));
if (showDebug) {
strUsage += HelpMessageOpt("-minimumchainwork=<hex>", strprintf("Minimum work assumed to exist on a valid chain in hex (default: %s, testnet: %s)", defaultChainParams->GetConsensus().nMinimumChainWork.GetHex(), testnetChainParams->GetConsensus().nMinimumChainWork.GetHex()));
}
strUsage += HelpMessageOpt("-persistmempool", strprintf(_("Whether to save the mempool on shutdown and load on restart (default: %u)"), DEFAULT_PERSIST_MEMPOOL));
strUsage += HelpMessageOpt("-blockreconstructionextratxn=<n>", strprintf(_("Extra transactions to keep in memory for compact block reconstructions (default: %u)"), DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN));
strUsage += HelpMessageOpt("-par=<n>", strprintf(_("Set the number of script verification threads (%u to %d, 0 = auto, <0 = leave that many cores free, default: %d)"),
-GetNumCores(), MAX_SCRIPTCHECK_THREADS, DEFAULT_SCRIPTCHECK_THREADS));
#ifndef WIN32
strUsage += HelpMessageOpt("-pid=<file>", strprintf(_("Specify pid file (default: %s)"), BITCOIN_PID_FILENAME));
#endif
strUsage += HelpMessageOpt("-prune=<n>", strprintf(_("Reduce storage requirements by enabling pruning (deleting) of old blocks. This allows the pruneblockchain RPC to be called to delete specific blocks, and enables automatic pruning of old blocks if a target size in MiB is provided. This mode is incompatible with -txindex and -rescan. "
"Warning: Reverting this setting requires re-downloading the entire blockchain. "
"(default: 0 = disable pruning blocks, 1 = allow manual pruning via RPC, >=%u = automatically prune block files to stay under the specified target size in MiB)"), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024));
strUsage += HelpMessageOpt("-reindex-chainstate", _("Rebuild chain state from the currently indexed blocks"));
strUsage += HelpMessageOpt("-reindex", _("Rebuild chain state and block index from the blk*.dat files on disk"));
#ifndef WIN32
strUsage += HelpMessageOpt("-sysperms", _("Create new files with system default permissions, instead of umask 077 (only effective with disabled wallet functionality)"));
#endif
strUsage += HelpMessageOpt("-txindex", strprintf(_("Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)"), DEFAULT_TXINDEX));
strUsage += HelpMessageGroup(_("Connection options:"));
strUsage += HelpMessageOpt("-addnode=<ip>", _("Add a node to connect to and attempt to keep the connection open (see the `addnode` RPC command help for more info)"));
strUsage += HelpMessageOpt("-banscore=<n>", strprintf(_("Threshold for disconnecting misbehaving peers (default: %u)"), DEFAULT_BANSCORE_THRESHOLD));
strUsage += HelpMessageOpt("-bantime=<n>", strprintf(_("Number of seconds to keep misbehaving peers from reconnecting (default: %u)"), DEFAULT_MISBEHAVING_BANTIME));
strUsage += HelpMessageOpt("-bind=<addr>", _("Bind to given address and always listen on it. Use [host]:port notation for IPv6"));
strUsage += HelpMessageOpt("-connect=<ip>", _("Connect only to the specified node(s); -connect=0 disables automatic connections (the rules for this peer are the same as for -addnode)"));
strUsage += HelpMessageOpt("-discover", _("Discover own IP addresses (default: 1 when listening and no -externalip or -proxy)"));
strUsage += HelpMessageOpt("-dns", _("Allow DNS lookups for -addnode, -seednode and -connect") + " " + strprintf(_("(default: %u)"), DEFAULT_NAME_LOOKUP));
strUsage += HelpMessageOpt("-dnsseed", _("Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect used)"));
strUsage += HelpMessageOpt("-externalip=<ip>", _("Specify your own public address"));
strUsage += HelpMessageOpt("-forcednsseed", strprintf(_("Always query for peer addresses via DNS lookup (default: %u)"), DEFAULT_FORCEDNSSEED));
strUsage += HelpMessageOpt("-listen", _("Accept connections from outside (default: 1 if no -proxy or -connect)"));
strUsage += HelpMessageOpt("-listenonion", strprintf(_("Automatically create Tor hidden service (default: %d)"), DEFAULT_LISTEN_ONION));
strUsage += HelpMessageOpt("-maxconnections=<n>", strprintf(_("Maintain at most <n> connections to peers (default: %u)"), DEFAULT_MAX_PEER_CONNECTIONS));
strUsage += HelpMessageOpt("-maxreceivebuffer=<n>", strprintf(_("Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)"), DEFAULT_MAXRECEIVEBUFFER));
strUsage += HelpMessageOpt("-maxsendbuffer=<n>", strprintf(_("Maximum per-connection send buffer, <n>*1000 bytes (default: %u)"), DEFAULT_MAXSENDBUFFER));
strUsage += HelpMessageOpt("-maxtimeadjustment", strprintf(_("Maximum allowed median peer time offset adjustment. Local perspective of time may be influenced by peers forward or backward by this amount. (default: %u seconds)"), DEFAULT_MAX_TIME_ADJUSTMENT));
strUsage += HelpMessageOpt("-onion=<ip:port>", strprintf(_("Use separate SOCKS5 proxy to reach peers via Tor hidden services (default: %s)"), "-proxy"));
strUsage += HelpMessageOpt("-onlynet=<net>", _("Only connect to nodes in network <net> (ipv4, ipv6 or onion)"));
strUsage += HelpMessageOpt("-permitbaremultisig", strprintf(_("Relay non-P2SH multisig (default: %u)"), DEFAULT_PERMIT_BAREMULTISIG));
strUsage += HelpMessageOpt("-peerbloomfilters", strprintf(_("Support filtering of blocks and transaction with bloom filters (default: %u)"), DEFAULT_PEERBLOOMFILTERS));
strUsage += HelpMessageOpt("-port=<port>", strprintf(_("Listen for connections on <port> (default: %u or testnet: %u)"), defaultChainParams->GetDefaultPort(), testnetChainParams->GetDefaultPort()));
strUsage += HelpMessageOpt("-proxy=<ip:port>", _("Connect through SOCKS5 proxy"));
strUsage += HelpMessageOpt("-proxyrandomize", strprintf(_("Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)"), DEFAULT_PROXYRANDOMIZE));
strUsage += HelpMessageOpt("-seednode=<ip>", _("Connect to a node to retrieve peer addresses, and disconnect"));
strUsage += HelpMessageOpt("-timeout=<n>", strprintf(_("Specify connection timeout in milliseconds (minimum: 1, default: %d)"), DEFAULT_CONNECT_TIMEOUT));
strUsage += HelpMessageOpt("-torcontrol=<ip>:<port>", strprintf(_("Tor control port to use if onion listening enabled (default: %s)"), DEFAULT_TOR_CONTROL));
strUsage += HelpMessageOpt("-torpassword=<pass>", _("Tor control port password (default: empty)"));
#ifdef USE_UPNP
#if USE_UPNP
strUsage += HelpMessageOpt("-upnp", _("Use UPnP to map the listening port (default: 1 when listening and no -proxy)"));
#else
strUsage += HelpMessageOpt("-upnp", strprintf(_("Use UPnP to map the listening port (default: %u)"), 0));
#endif
#endif
strUsage += HelpMessageOpt("-whitebind=<addr>", _("Bind to given address and whitelist peers connecting to it. Use [host]:port notation for IPv6"));
strUsage += HelpMessageOpt("-whitelist=<IP address or network>", _("Whitelist peers connecting from the given IP address (e.g. 1.2.3.4) or CIDR notated network (e.g. 1.2.3.0/24). Can be specified multiple times.") +
" " + _("Whitelisted peers cannot be DoS banned and their transactions are always relayed, even if they are already in the mempool, useful e.g. for a gateway"));
strUsage += HelpMessageOpt("-maxuploadtarget=<n>", strprintf(_("Tries to keep outbound traffic under the given target (in MiB per 24h), 0 = no limit (default: %d)"), DEFAULT_MAX_UPLOAD_TARGET));
#ifdef ENABLE_WALLET
strUsage += GetWalletHelpString(showDebug);
#endif
#if ENABLE_ZMQ
strUsage += HelpMessageGroup(_("ZeroMQ notification options:"));
strUsage += HelpMessageOpt("-zmqpubhashblock=<address>", _("Enable publish hash block in <address>"));
strUsage += HelpMessageOpt("-zmqpubhashtx=<address>", _("Enable publish hash transaction in <address>"));
strUsage += HelpMessageOpt("-zmqpubrawblock=<address>", _("Enable publish raw block in <address>"));
strUsage += HelpMessageOpt("-zmqpubrawtx=<address>", _("Enable publish raw transaction in <address>"));
#endif
strUsage += HelpMessageGroup(_("Debugging/Testing options:"));
strUsage += HelpMessageOpt("-uacomment=<cmt>", _("Append comment to the user agent string"));
if (showDebug)
{
strUsage += HelpMessageOpt("-checkblocks=<n>", strprintf(_("How many blocks to check at startup (default: %u, 0 = all)"), DEFAULT_CHECKBLOCKS));
strUsage += HelpMessageOpt("-checklevel=<n>", strprintf(_("How thorough the block verification of -checkblocks is (0-4, default: %u)"), DEFAULT_CHECKLEVEL));
strUsage += HelpMessageOpt("-checkblockindex", strprintf("Do a full consistency check for mapBlockIndex, setBlockIndexCandidates, chainActive and mapBlocksUnlinked occasionally. Also sets -checkmempool (default: %u)", defaultChainParams->DefaultConsistencyChecks()));
strUsage += HelpMessageOpt("-checkmempool=<n>", strprintf("Run checks every <n> transactions (default: %u)", defaultChainParams->DefaultConsistencyChecks()));
strUsage += HelpMessageOpt("-checkpoints", strprintf("Disable expensive verification for known chain history (default: %u)", DEFAULT_CHECKPOINTS_ENABLED));
strUsage += HelpMessageOpt("-disablesafemode", strprintf("Disable safemode, override a real safe mode event (default: %u)", DEFAULT_DISABLE_SAFEMODE));
strUsage += HelpMessageOpt("-deprecatedrpc=<method>", "Allows deprecated RPC method(s) to be used");
strUsage += HelpMessageOpt("-testsafemode", strprintf("Force safe mode (default: %u)", DEFAULT_TESTSAFEMODE));
strUsage += HelpMessageOpt("-dropmessagestest=<n>", "Randomly drop 1 of every <n> network messages");
strUsage += HelpMessageOpt("-fuzzmessagestest=<n>", "Randomly fuzz 1 of every <n> network messages");
strUsage += HelpMessageOpt("-stopafterblockimport", strprintf("Stop running after importing blocks from disk (default: %u)", DEFAULT_STOPAFTERBLOCKIMPORT));
strUsage += HelpMessageOpt("-stopatheight", strprintf("Stop running after reaching the given height in the main chain (default: %u)", DEFAULT_STOPATHEIGHT));
strUsage += HelpMessageOpt("-limitancestorcount=<n>", strprintf("Do not accept transactions if number of in-mempool ancestors is <n> or more (default: %u)", DEFAULT_ANCESTOR_LIMIT));
strUsage += HelpMessageOpt("-limitancestorsize=<n>", strprintf("Do not accept transactions whose size with all in-mempool ancestors exceeds <n> kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT));
strUsage += HelpMessageOpt("-limitdescendantcount=<n>", strprintf("Do not accept transactions if any ancestor would have <n> or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT));
strUsage += HelpMessageOpt("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT));
strUsage += HelpMessageOpt("-vbparams=deployment:start:end", "Use given start/end times for specified version bits deployment (regtest-only)");
}
strUsage += HelpMessageOpt("-debug=<category>", strprintf(_("Output debugging information (default: %u, supplying <category> is optional)"), 0) + ". " +
_("If <category> is not supplied or if <category> = 1, output all debugging information.") + " " + _("<category> can be:") + " " + ListLogCategories() + ".");
strUsage += HelpMessageOpt("-debugexclude=<category>", strprintf(_("Exclude debugging information for a category. Can be used in conjunction with -debug=1 to output debug logs for all categories except one or more specified categories.")));
strUsage += HelpMessageOpt("-help-debug", _("Show all debugging options (usage: --help -help-debug)"));
strUsage += HelpMessageOpt("-logips", strprintf(_("Include IP addresses in debug output (default: %u)"), DEFAULT_LOGIPS));
strUsage += HelpMessageOpt("-logtimestamps", strprintf(_("Prepend debug output with timestamp (default: %u)"), DEFAULT_LOGTIMESTAMPS));
if (showDebug)
{
strUsage += HelpMessageOpt("-logtimemicros", strprintf("Add microsecond precision to debug timestamps (default: %u)", DEFAULT_LOGTIMEMICROS));
strUsage += HelpMessageOpt("-mocktime=<n>", "Replace actual time with <n> seconds since epoch (default: 0)");
strUsage += HelpMessageOpt("-maxsigcachesize=<n>", strprintf("Limit sum of signature cache and script execution cache sizes to <n> MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_SIZE));
strUsage += HelpMessageOpt("-maxtipage=<n>", strprintf("Maximum tip age in seconds to consider node in initial block download (default: %u)", DEFAULT_MAX_TIP_AGE));
}
strUsage += HelpMessageOpt("-maxtxfee=<amt>", strprintf(_("Maximum total fees (in %s) to use in a single wallet transaction or raw transaction; setting this too low may abort large transactions (default: %s)"),
CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MAXFEE)));
strUsage += HelpMessageOpt("-printtoconsole", _("Send trace/debug info to console instead of debug.log file"));
if (showDebug)
{
strUsage += HelpMessageOpt("-printpriority", strprintf("Log transaction fee per kB when mining blocks (default: %u)", DEFAULT_PRINTPRIORITY));
}
strUsage += HelpMessageOpt("-shrinkdebugfile", _("Shrink debug.log file on client startup (default: 1 when no -debug)"));
AppendParamsHelpMessages(strUsage, showDebug);
strUsage += HelpMessageGroup(_("Node relay options:"));
if (showDebug) {
strUsage += HelpMessageOpt("-acceptnonstdtxn", strprintf("Relay and mine \"non-standard\" transactions (%sdefault: %u)", "testnet/regtest only; ", !testnetChainParams->RequireStandard()));
strUsage += HelpMessageOpt("-incrementalrelayfee=<amt>", strprintf("Fee rate (in %s/kB) used to define cost of relay, used for mempool limiting and BIP 125 replacement. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_INCREMENTAL_RELAY_FEE)));
strUsage += HelpMessageOpt("-dustrelayfee=<amt>", strprintf("Fee rate (in %s/kB) used to defined dust, the value of an output such that it will cost more than its value in fees at this fee rate to spend it. (default: %s)", CURRENCY_UNIT, FormatMoney(DUST_RELAY_TX_FEE)));
}
strUsage += HelpMessageOpt("-bytespersigop", strprintf(_("Equivalent bytes per sigop in transactions for relay and mining (default: %u)"), DEFAULT_BYTES_PER_SIGOP));
strUsage += HelpMessageOpt("-datacarrier", strprintf(_("Relay and mine data carrier transactions (default: %u)"), DEFAULT_ACCEPT_DATACARRIER));
strUsage += HelpMessageOpt("-datacarriersize", strprintf(_("Maximum size of data in data carrier transactions we relay and mine (default: %u)"), MAX_OP_RETURN_RELAY));
strUsage += HelpMessageOpt("-mempoolreplacement", strprintf(_("Enable transaction replacement in the memory pool (default: %u)"), DEFAULT_ENABLE_REPLACEMENT));
strUsage += HelpMessageOpt("-minrelaytxfee=<amt>", strprintf(_("Fees (in %s/kB) smaller than this are considered zero fee for relaying, mining and transaction creation (default: %s)"),
CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE)));
strUsage += HelpMessageOpt("-whitelistrelay", strprintf(_("Accept relayed transactions received from whitelisted peers even when not relaying transactions (default: %d)"), DEFAULT_WHITELISTRELAY));
strUsage += HelpMessageOpt("-whitelistforcerelay", strprintf(_("Force relay of transactions from whitelisted peers even if they violate local relay policy (default: %d)"), DEFAULT_WHITELISTFORCERELAY));
strUsage += HelpMessageGroup(_("Block creation options:"));
strUsage += HelpMessageOpt("-blockmaxweight=<n>", strprintf(_("Set maximum BIP141 block weight (default: %d)"), DEFAULT_BLOCK_MAX_WEIGHT));
strUsage += HelpMessageOpt("-blockmintxfee=<amt>", strprintf(_("Set lowest fee rate (in %s/kB) for transactions to be included in block creation. (default: %s)"), CURRENCY_UNIT, FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE)));
if (showDebug)
strUsage += HelpMessageOpt("-blockversion=<n>", "Override block version to test forking scenarios");
strUsage += HelpMessageGroup(_("RPC server options:"));
strUsage += HelpMessageOpt("-server", _("Accept command line and JSON-RPC commands"));
strUsage += HelpMessageOpt("-rest", strprintf(_("Accept public REST requests (default: %u)"), DEFAULT_REST_ENABLE));
strUsage += HelpMessageOpt("-rpcbind=<addr>[:port]", _("Bind to given address to listen for JSON-RPC connections. This option is ignored unless -rpcallowip is also passed. Port is optional and overrides -rpcport. Use [host]:port notation for IPv6. This option can be specified multiple times (default: 127.0.0.1 and ::1 i.e., localhost, or if -rpcallowip has been specified, 0.0.0.0 and :: i.e., all addresses)"));
strUsage += HelpMessageOpt("-rpccookiefile=<loc>", _("Location of the auth cookie (default: data dir)"));
strUsage += HelpMessageOpt("-rpcuser=<user>", _("Username for JSON-RPC connections"));
strUsage += HelpMessageOpt("-rpcpassword=<pw>", _("Password for JSON-RPC connections"));
strUsage += HelpMessageOpt("-rpcauth=<userpw>", _("Username and hashed password for JSON-RPC connections. The field <userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A canonical python script is included in share/rpcuser. The client then connects normally using the rpcuser=<USERNAME>/rpcpassword=<PASSWORD> pair of arguments. This option can be specified multiple times"));
strUsage += HelpMessageOpt("-rpcport=<port>", strprintf(_("Listen for JSON-RPC connections on <port> (default: %u or testnet: %u)"), defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort()));
strUsage += HelpMessageOpt("-rpcallowip=<ip>", _("Allow JSON-RPC connections from specified source. Valid for <ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This option can be specified multiple times"));
strUsage += HelpMessageOpt("-rpcserialversion", strprintf(_("Sets the serialization of raw transaction or block hex returned in non-verbose mode, non-segwit(0) or segwit(1) (default: %d)"), DEFAULT_RPC_SERIALIZE_VERSION));
strUsage += HelpMessageOpt("-rpcthreads=<n>", strprintf(_("Set the number of threads to service RPC calls (default: %d)"), DEFAULT_HTTP_THREADS));
if (showDebug) {
strUsage += HelpMessageOpt("-rpcworkqueue=<n>", strprintf("Set the depth of the work queue to service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE));
strUsage += HelpMessageOpt("-rpcservertimeout=<n>", strprintf("Timeout during HTTP requests (default: %d)", DEFAULT_HTTP_SERVER_TIMEOUT));
}
return strUsage;
}
std::string LicenseInfo()
{
const std::string URL_SOURCE_CODE = "<https://github.com/thaotlh94/usdacoin>";
const std::string URL_WEBSITE = "<https://usdacoin.com>";
return CopyrightHolders(strprintf(_("Copyright (C) %i-%i"), 2011, COPYRIGHT_YEAR) + " ") + "\n" +
"\n" +
strprintf(_("Please contribute if you find %s useful. "
"Visit %s for further information about the software."),
PACKAGE_NAME, URL_WEBSITE) +
"\n" +
strprintf(_("The source code is available from %s."),
URL_SOURCE_CODE) +
"\n" +
"\n" +
_("This is experimental software.") + "\n" +
strprintf(_("Distributed under the MIT software license, see the accompanying file %s or %s"), "COPYING", "<https://opensource.org/licenses/MIT>") + "\n" +
"\n" +
strprintf(_("This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit %s and cryptographic software written by Eric Young and UPnP software written by Thomas Bernard."), "<https://www.openssl.org>") +
"\n";
}
static void BlockNotifyCallback(bool initialSync, const CBlockIndex *pBlockIndex)
{
if (initialSync || !pBlockIndex)
return;
std::string strCmd = gArgs.GetArg("-blocknotify", "");
if (!strCmd.empty()) {
boost::replace_all(strCmd, "%s", pBlockIndex->GetBlockHash().GetHex());
boost::thread t(runCommand, strCmd); // thread runs free
}
}
static bool fHaveGenesis = false;
static CWaitableCriticalSection cs_GenesisWait;
static CConditionVariable condvar_GenesisWait;
static void BlockNotifyGenesisWait(bool, const CBlockIndex *pBlockIndex)
{
if (pBlockIndex != nullptr) {
{
WaitableLock lock_GenesisWait(cs_GenesisWait);
fHaveGenesis = true;
}
condvar_GenesisWait.notify_all();
}
}
struct CImportingNow
{
CImportingNow() {
assert(fImporting == false);
fImporting = true;
}
~CImportingNow() {
assert(fImporting == true);
fImporting = false;
}
};
// If we're using -prune with -reindex, then delete block files that will be ignored by the
// reindex. Since reindexing works by starting at block file 0 and looping until a blockfile
// is missing, do the same here to delete any later block files after a gap. Also delete all
// rev files since they'll be rewritten by the reindex anyway. This ensures that vinfoBlockFile
// is in sync with what's actually on disk by the time we start downloading, so that pruning
// works correctly.
void CleanupBlockRevFiles()
{
std::map<std::string, fs::path> mapBlockFiles;
// Glob all blk?????.dat and rev?????.dat files from the blocks directory.
// Remove the rev files immediately and insert the blk file paths into an
// ordered map keyed by block file index.
LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n");
fs::path blocksdir = GetDataDir() / "blocks";
for (fs::directory_iterator it(blocksdir); it != fs::directory_iterator(); it++) {
if (fs::is_regular_file(*it) &&
it->path().filename().string().length() == 12 &&
it->path().filename().string().substr(8,4) == ".dat")
{
if (it->path().filename().string().substr(0,3) == "blk")
mapBlockFiles[it->path().filename().string().substr(3,5)] = it->path();
else if (it->path().filename().string().substr(0,3) == "rev")
remove(it->path());
}
}
// Remove all block files that aren't part of a contiguous set starting at
// zero by walking the ordered map (keys are block file indices) by
// keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
// start removing block files.
int nContigCounter = 0;
for (const std::pair<std::string, fs::path>& item : mapBlockFiles) {
if (atoi(item.first) == nContigCounter) {
nContigCounter++;
continue;
}
remove(item.second);
}
}
void ThreadImport(std::vector<fs::path> vImportFiles)
{
const CChainParams& chainparams = Params();
RenameThread("usdacoin-loadblk");
{
CImportingNow imp;
// -reindex
if (fReindex) {
int nFile = 0;
while (true) {
CDiskBlockPos pos(nFile, 0);
if (!fs::exists(GetBlockPosFilename(pos, "blk")))
break; // No block files left to reindex
FILE *file = OpenBlockFile(pos, true);
if (!file)
break; // This error is logged in OpenBlockFile
LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile);
LoadExternalBlockFile(chainparams, file, &pos);
nFile++;
}
pblocktree->WriteReindexing(false);
fReindex = false;
LogPrintf("Reindexing finished\n");
// To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked):
LoadGenesisBlock(chainparams);
}
// hardcoded $DATADIR/bootstrap.dat
fs::path pathBootstrap = GetDataDir() / "bootstrap.dat";
if (fs::exists(pathBootstrap)) {
FILE *file = fsbridge::fopen(pathBootstrap, "rb");
if (file) {
fs::path pathBootstrapOld = GetDataDir() / "bootstrap.dat.old";
LogPrintf("Importing bootstrap.dat...\n");
LoadExternalBlockFile(chainparams, file);
RenameOver(pathBootstrap, pathBootstrapOld);
} else {
LogPrintf("Warning: Could not open bootstrap file %s\n", pathBootstrap.string());
}
}
// -loadblock=
for (const fs::path& path : vImportFiles) {
FILE *file = fsbridge::fopen(path, "rb");
if (file) {
LogPrintf("Importing blocks file %s...\n", path.string());
LoadExternalBlockFile(chainparams, file);
} else {
LogPrintf("Warning: Could not open blocks file %s\n", path.string());
}
}
// scan for better chains in the block chain database, that are not yet connected in the active best chain
CValidationState state;
if (!ActivateBestChain(state, chainparams)) {
LogPrintf("Failed to connect best block\n");
StartShutdown();
return;
}
if (gArgs.GetBoolArg("-stopafterblockimport", DEFAULT_STOPAFTERBLOCKIMPORT)) {
LogPrintf("Stopping after block import\n");
StartShutdown();
return;
}
} // End scope of CImportingNow
if (gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
LoadMempool();
fDumpMempoolLater = !fRequestShutdown;
}
}
/** Sanity checks
* Ensure that Bitcoin is running in a usable environment with all
* necessary library support.
*/
bool InitSanityCheck(void)
{
if(!ECC_InitSanityCheck()) {
InitError("Elliptic curve cryptography sanity check failure. Aborting.");
return false;
}
if (!glibc_sanity_test() || !glibcxx_sanity_test())
return false;
if (!Random_SanityCheck()) {
InitError("OS cryptographic RNG sanity check failure. Aborting.");
return false;
}
return true;
}
bool AppInitServers()
{
RPCServer::OnStarted(&OnRPCStarted);
RPCServer::OnStopped(&OnRPCStopped);
if (!InitHTTPServer())
return false;
if (!StartRPC())
return false;
if (!StartHTTPRPC())
return false;
if (gArgs.GetBoolArg("-rest", DEFAULT_REST_ENABLE) && !StartREST())
return false;
if (!StartHTTPServer())
return false;
return true;
}
// Parameter interaction based on rules
void InitParameterInteraction()
{
// when specifying an explicit binding address, you want to listen on it
// even when -connect or -proxy is specified
if (gArgs.IsArgSet("-bind")) {
if (gArgs.SoftSetBoolArg("-listen", true))
LogPrintf("%s: parameter interaction: -bind set -> setting -listen=1\n", __func__);
}
if (gArgs.IsArgSet("-whitebind")) {
if (gArgs.SoftSetBoolArg("-listen", true))
LogPrintf("%s: parameter interaction: -whitebind set -> setting -listen=1\n", __func__);
}
if (gArgs.IsArgSet("-connect")) {
// when only connecting to trusted nodes, do not seed via DNS, or listen by default
if (gArgs.SoftSetBoolArg("-dnsseed", false))
LogPrintf("%s: parameter interaction: -connect set -> setting -dnsseed=0\n", __func__);
if (gArgs.SoftSetBoolArg("-listen", false))
LogPrintf("%s: parameter interaction: -connect set -> setting -listen=0\n", __func__);
}
if (gArgs.IsArgSet("-proxy")) {
// to protect privacy, do not listen by default if a default proxy server is specified
if (gArgs.SoftSetBoolArg("-listen", false))
LogPrintf("%s: parameter interaction: -proxy set -> setting -listen=0\n", __func__);
// to protect privacy, do not use UPNP when a proxy is set. The user may still specify -listen=1
// to listen locally, so don't rely on this happening through -listen below.
if (gArgs.SoftSetBoolArg("-upnp", false))
LogPrintf("%s: parameter interaction: -proxy set -> setting -upnp=0\n", __func__);
// to protect privacy, do not discover addresses by default
if (gArgs.SoftSetBoolArg("-discover", false))
LogPrintf("%s: parameter interaction: -proxy set -> setting -discover=0\n", __func__);
}
if (!gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) {
// do not map ports or try to retrieve public IP when not listening (pointless)
if (gArgs.SoftSetBoolArg("-upnp", false))
LogPrintf("%s: parameter interaction: -listen=0 -> setting -upnp=0\n", __func__);
if (gArgs.SoftSetBoolArg("-discover", false))
LogPrintf("%s: parameter interaction: -listen=0 -> setting -discover=0\n", __func__);
if (gArgs.SoftSetBoolArg("-listenonion", false))
LogPrintf("%s: parameter interaction: -listen=0 -> setting -listenonion=0\n", __func__);
}
if (gArgs.IsArgSet("-externalip")) {
// if an explicit public IP is specified, do not try to find others
if (gArgs.SoftSetBoolArg("-discover", false))
LogPrintf("%s: parameter interaction: -externalip set -> setting -discover=0\n", __func__);
}
// disable whitelistrelay in blocksonly mode
if (gArgs.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)) {
if (gArgs.SoftSetBoolArg("-whitelistrelay", false))
LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting -whitelistrelay=0\n", __func__);
}
// Forcing relay from whitelisted hosts implies we will accept relays from them in the first place.
if (gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) {
if (gArgs.SoftSetBoolArg("-whitelistrelay", true))
LogPrintf("%s: parameter interaction: -whitelistforcerelay=1 -> setting -whitelistrelay=1\n", __func__);
}
}
static std::string ResolveErrMsg(const char * const optname, const std::string& strBind)
{
return strprintf(_("Cannot resolve -%s address: '%s'"), optname, strBind);
}
void InitLogging()
{
fPrintToConsole = gArgs.GetBoolArg("-printtoconsole", false);
fLogTimestamps = gArgs.GetBoolArg("-logtimestamps", DEFAULT_LOGTIMESTAMPS);
fLogTimeMicros = gArgs.GetBoolArg("-logtimemicros", DEFAULT_LOGTIMEMICROS);
fLogIPs = gArgs.GetBoolArg("-logips", DEFAULT_LOGIPS);
LogPrintf("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
std::string version_string = FormatFullVersion();
#ifdef DEBUG
version_string += " (debug build)";
#else
version_string += " (release build)";
#endif
LogPrintf(PACKAGE_NAME " version %s\n", version_string);
}
namespace { // Variables internal to initialization process only
int nMaxConnections;
int nUserMaxConnections;
int nFD;
ServiceFlags nLocalServices = ServiceFlags(NODE_NETWORK | NODE_NETWORK_LIMITED);
} // namespace
[[noreturn]] static void new_handler_terminate()
{
// Rather than throwing std::bad-alloc if allocation fails, terminate
// immediately to (try to) avoid chain corruption.
// Since LogPrintf may itself allocate memory, set the handler directly
// to terminate first.
std::set_new_handler(std::terminate);
LogPrintf("Error: Out of memory. Terminating.\n");
// The log was successful, terminate now.
std::terminate();
};
bool AppInitBasicSetup()
{
// ********************************************************* Step 1: setup
#ifdef _MSC_VER
// Turn off Microsoft heap dump noise
_CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_WARN, CreateFileA("NUL", GENERIC_WRITE, 0, nullptr, OPEN_EXISTING, 0, 0));
// Disable confusing "helpful" text message on abort, Ctrl-C
_set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT);
#endif
#ifdef WIN32
// Enable Data Execution Prevention (DEP)
// Minimum supported OS versions: WinXP SP3, WinVista >= SP1, Win Server 2008
// A failure is non-critical and needs no further attention!
#ifndef PROCESS_DEP_ENABLE
// We define this here, because GCCs winbase.h limits this to _WIN32_WINNT >= 0x0601 (Windows 7),
// which is not correct. Can be removed, when GCCs winbase.h is fixed!
#define PROCESS_DEP_ENABLE 0x00000001
#endif
typedef BOOL (WINAPI *PSETPROCDEPPOL)(DWORD);
PSETPROCDEPPOL setProcDEPPol = (PSETPROCDEPPOL)GetProcAddress(GetModuleHandleA("Kernel32.dll"), "SetProcessDEPPolicy");
if (setProcDEPPol != nullptr) setProcDEPPol(PROCESS_DEP_ENABLE);
#endif
if (!SetupNetworking())
return InitError("Initializing networking failed");
#ifndef WIN32
if (!gArgs.GetBoolArg("-sysperms", false)) {
umask(077);
}
// Clean shutdown on SIGTERM
registerSignalHandler(SIGTERM, HandleSIGTERM);
registerSignalHandler(SIGINT, HandleSIGTERM);
// Reopen debug.log on SIGHUP
registerSignalHandler(SIGHUP, HandleSIGHUP);
// Ignore SIGPIPE, otherwise it will bring the daemon down if the client closes unexpectedly
signal(SIGPIPE, SIG_IGN);
#else
SetConsoleCtrlHandler(consoleCtrlHandler, true);
#endif
std::set_new_handler(new_handler_terminate);
return true;
}
bool AppInitParameterInteraction()
{
const CChainParams& chainparams = Params();
// ********************************************************* Step 2: parameter interactions
// also see: InitParameterInteraction()
// if using block pruning, then disallow txindex
if (gArgs.GetArg("-prune", 0)) {
if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX))
return InitError(_("Prune mode is incompatible with -txindex."));
}
// -bind and -whitebind can't be set when not listening
size_t nUserBind = gArgs.GetArgs("-bind").size() + gArgs.GetArgs("-whitebind").size();
if (nUserBind != 0 && !gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) {
return InitError("Cannot set -bind or -whitebind together with -listen=0");
}
// Make sure enough file descriptors are available
int nBind = std::max(nUserBind, size_t(1));
nUserMaxConnections = gArgs.GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS);
nMaxConnections = std::max(nUserMaxConnections, 0);
// Trim requested connection counts, to fit into system limitations
nMaxConnections = std::max(std::min(nMaxConnections, (int)(FD_SETSIZE - nBind - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS)), 0);
nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS + MAX_ADDNODE_CONNECTIONS);
if (nFD < MIN_CORE_FILEDESCRIPTORS)
return InitError(_("Not enough file descriptors available."));
nMaxConnections = std::min(nFD - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS, nMaxConnections);
if (nMaxConnections < nUserMaxConnections)
InitWarning(strprintf(_("Reducing -maxconnections from %d to %d, because of system limitations."), nUserMaxConnections, nMaxConnections));
// ********************************************************* Step 3: parameter-to-internal-flags
if (gArgs.IsArgSet("-debug")) {
// Special-case: if -debug=0/-nodebug is set, turn off debugging messages
const std::vector<std::string> categories = gArgs.GetArgs("-debug");
if (std::none_of(categories.begin(), categories.end(),
[](std::string cat){return cat == "0" || cat == "none";})) {
for (const auto& cat : categories) {
uint32_t flag = 0;
if (!GetLogCategory(&flag, &cat)) {
InitWarning(strprintf(_("Unsupported logging category %s=%s."), "-debug", cat));
continue;
}
logCategories |= flag;
}
}
}
// Now remove the logging categories which were explicitly excluded
for (const std::string& cat : gArgs.GetArgs("-debugexclude")) {
uint32_t flag = 0;
if (!GetLogCategory(&flag, &cat)) {
InitWarning(strprintf(_("Unsupported logging category %s=%s."), "-debugexclude", cat));
continue;
}
logCategories &= ~flag;
}
// Check for -debugnet
if (gArgs.GetBoolArg("-debugnet", false))
InitWarning(_("Unsupported argument -debugnet ignored, use -debug=net."));
// Check for -socks - as this is a privacy risk to continue, exit here
if (gArgs.IsArgSet("-socks"))
return InitError(_("Unsupported argument -socks found. Setting SOCKS version isn't possible anymore, only SOCKS5 proxies are supported."));
// Check for -tor - as this is a privacy risk to continue, exit here
if (gArgs.GetBoolArg("-tor", false))
return InitError(_("Unsupported argument -tor found, use -onion."));
if (gArgs.GetBoolArg("-benchmark", false))
InitWarning(_("Unsupported argument -benchmark ignored, use -debug=bench."));
if (gArgs.GetBoolArg("-whitelistalwaysrelay", false))
InitWarning(_("Unsupported argument -whitelistalwaysrelay ignored, use -whitelistrelay and/or -whitelistforcerelay."));
if (gArgs.IsArgSet("-blockminsize"))
InitWarning("Unsupported argument -blockminsize ignored.");
// Checkmempool and checkblockindex default to true in regtest mode
int ratio = std::min<int>(std::max<int>(gArgs.GetArg("-checkmempool", chainparams.DefaultConsistencyChecks() ? 1 : 0), 0), 1000000);
if (ratio != 0) {
mempool.setSanityCheck(1.0 / ratio);
}
fCheckBlockIndex = gArgs.GetBoolArg("-checkblockindex", chainparams.DefaultConsistencyChecks());
fCheckpointsEnabled = gArgs.GetBoolArg("-checkpoints", DEFAULT_CHECKPOINTS_ENABLED);
hashAssumeValid = uint256S(gArgs.GetArg("-assumevalid", chainparams.GetConsensus().defaultAssumeValid.GetHex()));
if (!hashAssumeValid.IsNull())
LogPrintf("Assuming ancestors of block %s have valid signatures.\n", hashAssumeValid.GetHex());
else
LogPrintf("Validating signatures for all blocks.\n");
if (gArgs.IsArgSet("-minimumchainwork")) {
const std::string minChainWorkStr = gArgs.GetArg("-minimumchainwork", "");
if (!IsHexNumber(minChainWorkStr)) {
return InitError(strprintf("Invalid non-hex (%s) minimum chain work value specified", minChainWorkStr));
}
nMinimumChainWork = UintToArith256(uint256S(minChainWorkStr));
} else {
nMinimumChainWork = UintToArith256(chainparams.GetConsensus().nMinimumChainWork);
}
LogPrintf("Setting nMinimumChainWork=%s\n", nMinimumChainWork.GetHex());
if (nMinimumChainWork < UintToArith256(chainparams.GetConsensus().nMinimumChainWork)) {
LogPrintf("Warning: nMinimumChainWork set below default value of %s\n", chainparams.GetConsensus().nMinimumChainWork.GetHex());
}
// mempool limits
int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
int64_t nMempoolSizeMin = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * 1000 * 40;
if (nMempoolSizeMax < 0 || nMempoolSizeMax < nMempoolSizeMin)
return InitError(strprintf(_("-maxmempool must be at least %d MB"), std::ceil(nMempoolSizeMin / 1000000.0)));
// incremental relay fee sets the minimum feerate increase necessary for BIP 125 replacement in the mempool
// and the amount the mempool min fee increases above the feerate of txs evicted due to mempool limiting.
if (gArgs.IsArgSet("-incrementalrelayfee"))
{
CAmount n = 0;
if (!ParseMoney(gArgs.GetArg("-incrementalrelayfee", ""), n))
return InitError(AmountErrMsg("incrementalrelayfee", gArgs.GetArg("-incrementalrelayfee", "")));
incrementalRelayFee = CFeeRate(n);
}
// -par=0 means autodetect, but nScriptCheckThreads==0 means no concurrency
nScriptCheckThreads = gArgs.GetArg("-par", DEFAULT_SCRIPTCHECK_THREADS);
if (nScriptCheckThreads <= 0)
nScriptCheckThreads += GetNumCores();
if (nScriptCheckThreads <= 1)
nScriptCheckThreads = 0;
else if (nScriptCheckThreads > MAX_SCRIPTCHECK_THREADS)
nScriptCheckThreads = MAX_SCRIPTCHECK_THREADS;
// block pruning; get the amount of disk space (in MiB) to allot for block & undo files
int64_t nPruneArg = gArgs.GetArg("-prune", 0);
if (nPruneArg < 0) {
return InitError(_("Prune cannot be configured with a negative value."));
}
nPruneTarget = (uint64_t) nPruneArg * 1024 * 1024;
if (nPruneArg == 1) { // manual pruning: -prune=1
LogPrintf("Block pruning enabled. Use RPC call pruneblockchain(height) to manually prune block and undo files.\n");
nPruneTarget = std::numeric_limits<uint64_t>::max();
fPruneMode = true;
} else if (nPruneTarget) {
if (nPruneTarget < MIN_DISK_SPACE_FOR_BLOCK_FILES) {
return InitError(strprintf(_("Prune configured below the minimum of %d MiB. Please use a higher number."), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024));
}
LogPrintf("Prune configured to target %uMiB on disk for block and undo files.\n", nPruneTarget / 1024 / 1024);
fPruneMode = true;
}
nConnectTimeout = gArgs.GetArg("-timeout", DEFAULT_CONNECT_TIMEOUT);
if (nConnectTimeout <= 0)
nConnectTimeout = DEFAULT_CONNECT_TIMEOUT;
if (gArgs.IsArgSet("-minrelaytxfee")) {
CAmount n = 0;
if (!ParseMoney(gArgs.GetArg("-minrelaytxfee", ""), n)) {
return InitError(AmountErrMsg("minrelaytxfee", gArgs.GetArg("-minrelaytxfee", "")));
}
// High fee check is done afterward in WalletParameterInteraction()
::minRelayTxFee = CFeeRate(n);
} else if (incrementalRelayFee > ::minRelayTxFee) {
// Allow only setting incrementalRelayFee to control both
::minRelayTxFee = incrementalRelayFee;
LogPrintf("Increasing minrelaytxfee to %s to match incrementalrelayfee\n",::minRelayTxFee.ToString());
}
// Sanity check argument for min fee for including tx in block
// TODO: Harmonize which arguments need sanity checking and where that happens
if (gArgs.IsArgSet("-blockmintxfee"))
{
CAmount n = 0;
if (!ParseMoney(gArgs.GetArg("-blockmintxfee", ""), n))
return InitError(AmountErrMsg("blockmintxfee", gArgs.GetArg("-blockmintxfee", "")));
}
// Feerate used to define dust. Shouldn't be changed lightly as old
// implementations may inadvertently create non-standard transactions
if (gArgs.IsArgSet("-dustrelayfee"))
{
CAmount n = 0;
if (!ParseMoney(gArgs.GetArg("-dustrelayfee", ""), n) || 0 == n)
return InitError(AmountErrMsg("dustrelayfee", gArgs.GetArg("-dustrelayfee", "")));
dustRelayFee = CFeeRate(n);
}
fRequireStandard = !gArgs.GetBoolArg("-acceptnonstdtxn", !chainparams.RequireStandard());
if (chainparams.RequireStandard() && !fRequireStandard)
return InitError(strprintf("acceptnonstdtxn is not currently supported for %s chain", chainparams.NetworkIDString()));
nBytesPerSigOp = gArgs.GetArg("-bytespersigop", nBytesPerSigOp);
#ifdef ENABLE_WALLET
if (!WalletParameterInteraction())
return false;
#endif
fIsBareMultisigStd = gArgs.GetBoolArg("-permitbaremultisig", DEFAULT_PERMIT_BAREMULTISIG);
fAcceptDatacarrier = gArgs.GetBoolArg("-datacarrier", DEFAULT_ACCEPT_DATACARRIER);
nMaxDatacarrierBytes = gArgs.GetArg("-datacarriersize", nMaxDatacarrierBytes);
// Option to startup with mocktime set (used for regression testing):
SetMockTime(gArgs.GetArg("-mocktime", 0)); // SetMockTime(0) is a no-op
if (gArgs.GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS))
nLocalServices = ServiceFlags(nLocalServices | NODE_BLOOM);
if (gArgs.GetArg("-rpcserialversion", DEFAULT_RPC_SERIALIZE_VERSION) < 0)
return InitError("rpcserialversion must be non-negative.");
if (gArgs.GetArg("-rpcserialversion", DEFAULT_RPC_SERIALIZE_VERSION) > 1)
return InitError("unknown rpcserialversion requested.");
nMaxTipAge = gArgs.GetArg("-maxtipage", DEFAULT_MAX_TIP_AGE);
fEnableReplacement = gArgs.GetBoolArg("-mempoolreplacement", DEFAULT_ENABLE_REPLACEMENT);
if ((!fEnableReplacement) && gArgs.IsArgSet("-mempoolreplacement")) {
// Minimal effort at forwards compatibility
std::string strReplacementModeList = gArgs.GetArg("-mempoolreplacement", ""); // default is impossible
std::vector<std::string> vstrReplacementModes;
boost::split(vstrReplacementModes, strReplacementModeList, boost::is_any_of(","));
fEnableReplacement = (std::find(vstrReplacementModes.begin(), vstrReplacementModes.end(), "fee") != vstrReplacementModes.end());
}
if (gArgs.IsArgSet("-vbparams")) {
// Allow overriding version bits parameters for testing
if (!chainparams.MineBlocksOnDemand()) {
return InitError("Version bits parameters may only be overridden on regtest.");
}
for (const std::string& strDeployment : gArgs.GetArgs("-vbparams")) {
std::vector<std::string> vDeploymentParams;
boost::split(vDeploymentParams, strDeployment, boost::is_any_of(":"));
if (vDeploymentParams.size() != 3) {
return InitError("Version bits parameters malformed, expecting deployment:start:end");
}
int64_t nStartTime, nTimeout;
if (!ParseInt64(vDeploymentParams[1], &nStartTime)) {
return InitError(strprintf("Invalid nStartTime (%s)", vDeploymentParams[1]));
}
if (!ParseInt64(vDeploymentParams[2], &nTimeout)) {
return InitError(strprintf("Invalid nTimeout (%s)", vDeploymentParams[2]));
}
bool found = false;
for (int j=0; j<(int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++j)
{
if (vDeploymentParams[0].compare(VersionBitsDeploymentInfo[j].name) == 0) {
UpdateVersionBitsParameters(Consensus::DeploymentPos(j), nStartTime, nTimeout);
found = true;
LogPrintf("Setting version bits activation parameters for %s to start=%ld, timeout=%ld\n", vDeploymentParams[0], nStartTime, nTimeout);
break;
}
}
if (!found) {
return InitError(strprintf("Invalid deployment (%s)", vDeploymentParams[0]));
}
}
}
return true;
}
static bool LockDataDirectory(bool probeOnly)
{
// Make sure only a single Bitcoin process is using the data directory.
fs::path datadir = GetDataDir();
if (!LockDirectory(datadir, ".lock", probeOnly)) {
return InitError(strprintf(_("Cannot obtain a lock on data directory %s. %s is probably already running."), datadir.string(), _(PACKAGE_NAME)));
}
return true;
}
bool AppInitSanityChecks()
{
// ********************************************************* Step 4: sanity checks
// Initialize elliptic curve code
std::string sha256_algo = SHA256AutoDetect();
LogPrintf("Using the '%s' SHA256 implementation\n", sha256_algo);
RandomInit();
ECC_Start();
globalVerifyHandle.reset(new ECCVerifyHandle());
// Sanity check
if (!InitSanityCheck())
return InitError(strprintf(_("Initialization sanity check failed. %s is shutting down."), _(PACKAGE_NAME)));
// Probe the data directory lock to give an early error message, if possible
// We cannot hold the data directory lock here, as the forking for daemon() hasn't yet happened,
// and a fork will cause weird behavior to it.
return LockDataDirectory(true);
}
bool AppInitLockDataDirectory()
{
// After daemonization get the data directory lock again and hold on to it until exit
// This creates a slight window for a race condition to happen, however this condition is harmless: it
// will at most make us exit without printing a message to console.
if (!LockDataDirectory(false)) {
// Detailed error printed inside LockDataDirectory
return false;
}
return true;
}
bool AppInitMain()
{
const CChainParams& chainparams = Params();
// ********************************************************* Step 4a: application initialization
#ifndef WIN32
CreatePidFile(GetPidFile(), getpid());
#endif
if (gArgs.GetBoolArg("-shrinkdebugfile", logCategories == BCLog::NONE)) {
// Do this first since it both loads a bunch of debug.log into memory,
// and because this needs to happen before any other debug.log printing
ShrinkDebugFile();
}
if (fPrintToDebugLog) {
if (!OpenDebugLog()) {
return InitError(strprintf("Could not open debug log file %s", GetDebugLogPath().string()));
}
}
if (!fLogTimestamps)
LogPrintf("Startup time: %s\n", DateTimeStrFormat("%Y-%m-%d %H:%M:%S", GetTime()));
LogPrintf("Default data directory %s\n", GetDefaultDataDir().string());
LogPrintf("Using data directory %s\n", GetDataDir().string());
LogPrintf("Using config file %s\n", GetConfigFile(gArgs.GetArg("-conf", BITCOIN_CONF_FILENAME)).string());
LogPrintf("Using at most %i automatic connections (%i file descriptors available)\n", nMaxConnections, nFD);
// Warn about relative -datadir path.
if (gArgs.IsArgSet("-datadir") && !fs::path(gArgs.GetArg("-datadir", "")).is_absolute()) {
LogPrintf("Warning: relative datadir option '%s' specified, which will be interpreted relative to the "
"current working directory '%s'. This is fragile, because if usdacoin is started in the future "
"from a different location, it will be unable to locate the current data files. There could "
"also be data loss if usdacoin is started while in a temporary directory.\n",
gArgs.GetArg("-datadir", ""), fs::current_path().string());
}
InitSignatureCache();
InitScriptExecutionCache();
LogPrintf("Using %u threads for script verification\n", nScriptCheckThreads);
if (nScriptCheckThreads) {
for (int i=0; i<nScriptCheckThreads-1; i++)
threadGroup.create_thread(&ThreadScriptCheck);
}
// Start the lightweight task scheduler thread
CScheduler::Function serviceLoop = boost::bind(&CScheduler::serviceQueue, &scheduler);
threadGroup.create_thread(boost::bind(&TraceThread<CScheduler::Function>, "scheduler", serviceLoop));
GetMainSignals().RegisterBackgroundSignalScheduler(scheduler);
GetMainSignals().RegisterWithMempoolSignals(mempool);
/* Register RPC commands regardless of -server setting so they will be
* available in the GUI RPC console even if external calls are disabled.
*/
RegisterAllCoreRPCCommands(tableRPC);
#ifdef ENABLE_WALLET
RegisterWalletRPC(tableRPC);
#endif
/* Start the RPC server already. It will be started in "warmup" mode
* and not really process calls already (but it will signify connections
* that the server is there and will be ready later). Warmup mode will
* be disabled when initialisation is finished.
*/
if (gArgs.GetBoolArg("-server", false))
{
uiInterface.InitMessage.connect(SetRPCWarmupStatus);
if (!AppInitServers())
return InitError(_("Unable to start HTTP server. See debug log for details."));
}
int64_t nStart;
#if defined(USE_SSE2)
std::string sse2detect = scrypt_detect_sse2();
LogPrintf("%s\n", sse2detect);
#endif
// ********************************************************* Step 5: verify wallet database integrity
#ifdef ENABLE_WALLET
if (!VerifyWallets())
return false;
#endif
// ********************************************************* Step 6: network initialization
// Note that we absolutely cannot open any actual connections
// until the very end ("start node") as the UTXO/block state
// is not yet setup and may end up being set up twice if we
// need to reindex later.
assert(!g_connman);
g_connman = std::unique_ptr<CConnman>(new CConnman(GetRand(std::numeric_limits<uint64_t>::max()), GetRand(std::numeric_limits<uint64_t>::max())));
CConnman& connman = *g_connman;
peerLogic.reset(new PeerLogicValidation(&connman, scheduler));
RegisterValidationInterface(peerLogic.get());
// sanitize comments per BIP-0014, format user agent and check total size
std::vector<std::string> uacomments;
for (const std::string& cmt : gArgs.GetArgs("-uacomment")) {
if (cmt != SanitizeString(cmt, SAFE_CHARS_UA_COMMENT))
return InitError(strprintf(_("User Agent comment (%s) contains unsafe characters."), cmt));
uacomments.push_back(cmt);
}
strSubVersion = FormatSubVersion(CLIENT_NAME, CLIENT_VERSION, uacomments);
if (strSubVersion.size() > MAX_SUBVERSION_LENGTH) {
return InitError(strprintf(_("Total length of network version string (%i) exceeds maximum length (%i). Reduce the number or size of uacomments."),
strSubVersion.size(), MAX_SUBVERSION_LENGTH));
}
if (gArgs.IsArgSet("-onlynet")) {
std::set<enum Network> nets;
for (const std::string& snet : gArgs.GetArgs("-onlynet")) {
enum Network net = ParseNetwork(snet);
if (net == NET_UNROUTABLE)
return InitError(strprintf(_("Unknown network specified in -onlynet: '%s'"), snet));
nets.insert(net);
}
for (int n = 0; n < NET_MAX; n++) {
enum Network net = (enum Network)n;
if (!nets.count(net))
SetLimited(net);
}
}
// Check for host lookup allowed before parsing any network related parameters
fNameLookup = gArgs.GetBoolArg("-dns", DEFAULT_NAME_LOOKUP);
bool proxyRandomize = gArgs.GetBoolArg("-proxyrandomize", DEFAULT_PROXYRANDOMIZE);
// -proxy sets a proxy for all outgoing network traffic
// -noproxy (or -proxy=0) as well as the empty string can be used to not set a proxy, this is the default
std::string proxyArg = gArgs.GetArg("-proxy", "");
SetLimited(NET_TOR);
if (proxyArg != "" && proxyArg != "0") {
CService proxyAddr;
if (!Lookup(proxyArg.c_str(), proxyAddr, 9050, fNameLookup)) {
return InitError(strprintf(_("Invalid -proxy address or hostname: '%s'"), proxyArg));
}
proxyType addrProxy = proxyType(proxyAddr, proxyRandomize);
if (!addrProxy.IsValid())
return InitError(strprintf(_("Invalid -proxy address or hostname: '%s'"), proxyArg));
SetProxy(NET_IPV4, addrProxy);
SetProxy(NET_IPV6, addrProxy);
SetProxy(NET_TOR, addrProxy);
SetNameProxy(addrProxy);
SetLimited(NET_TOR, false); // by default, -proxy sets onion as reachable, unless -noonion later
}
// -onion can be used to set only a proxy for .onion, or override normal proxy for .onion addresses
// -noonion (or -onion=0) disables connecting to .onion entirely
// An empty string is used to not override the onion proxy (in which case it defaults to -proxy set above, or none)
std::string onionArg = gArgs.GetArg("-onion", "");
if (onionArg != "") {
if (onionArg == "0") { // Handle -noonion/-onion=0
SetLimited(NET_TOR); // set onions as unreachable
} else {
CService onionProxy;
if (!Lookup(onionArg.c_str(), onionProxy, 9050, fNameLookup)) {
return InitError(strprintf(_("Invalid -onion address or hostname: '%s'"), onionArg));
}
proxyType addrOnion = proxyType(onionProxy, proxyRandomize);
if (!addrOnion.IsValid())
return InitError(strprintf(_("Invalid -onion address or hostname: '%s'"), onionArg));
SetProxy(NET_TOR, addrOnion);
SetLimited(NET_TOR, false);
}
}
// see Step 2: parameter interactions for more information about these
fListen = gArgs.GetBoolArg("-listen", DEFAULT_LISTEN);
fDiscover = gArgs.GetBoolArg("-discover", true);
fRelayTxes = !gArgs.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY);
for (const std::string& strAddr : gArgs.GetArgs("-externalip")) {
CService addrLocal;
if (Lookup(strAddr.c_str(), addrLocal, GetListenPort(), fNameLookup) && addrLocal.IsValid())
AddLocal(addrLocal, LOCAL_MANUAL);
else
return InitError(ResolveErrMsg("externalip", strAddr));
}
#if ENABLE_ZMQ
pzmqNotificationInterface = CZMQNotificationInterface::Create();
if (pzmqNotificationInterface) {
RegisterValidationInterface(pzmqNotificationInterface);
}
#endif
uint64_t nMaxOutboundLimit = 0; //unlimited unless -maxuploadtarget is set
uint64_t nMaxOutboundTimeframe = MAX_UPLOAD_TIMEFRAME;
if (gArgs.IsArgSet("-maxuploadtarget")) {
nMaxOutboundLimit = gArgs.GetArg("-maxuploadtarget", DEFAULT_MAX_UPLOAD_TARGET)*1024*1024;
}
// ********************************************************* Step 7: load block chain
fReindex = gArgs.GetBoolArg("-reindex", false);
bool fReindexChainState = gArgs.GetBoolArg("-reindex-chainstate", false);
// cache size calculations
int64_t nTotalCache = (gArgs.GetArg("-dbcache", nDefaultDbCache) << 20);
nTotalCache = std::max(nTotalCache, nMinDbCache << 20); // total cache cannot be less than nMinDbCache
nTotalCache = std::min(nTotalCache, nMaxDbCache << 20); // total cache cannot be greater than nMaxDbcache
int64_t nBlockTreeDBCache = nTotalCache / 8;
nBlockTreeDBCache = std::min(nBlockTreeDBCache, (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX) ? nMaxBlockDBAndTxIndexCache : nMaxBlockDBCache) << 20);
nTotalCache -= nBlockTreeDBCache;
int64_t nCoinDBCache = std::min(nTotalCache / 2, (nTotalCache / 4) + (1 << 23)); // use 25%-50% of the remainder for disk cache
nCoinDBCache = std::min(nCoinDBCache, nMaxCoinsDBCache << 20); // cap total coins db cache
nTotalCache -= nCoinDBCache;
nCoinCacheUsage = nTotalCache; // the rest goes to in-memory cache
int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
LogPrintf("Cache configuration:\n");
LogPrintf("* Using %.1fMiB for block index database\n", nBlockTreeDBCache * (1.0 / 1024 / 1024));
LogPrintf("* Using %.1fMiB for chain state database\n", nCoinDBCache * (1.0 / 1024 / 1024));
LogPrintf("* Using %.1fMiB for in-memory UTXO set (plus up to %.1fMiB of unused mempool space)\n", nCoinCacheUsage * (1.0 / 1024 / 1024), nMempoolSizeMax * (1.0 / 1024 / 1024));
bool fLoaded = false;
while (!fLoaded && !fRequestShutdown) {
bool fReset = fReindex;
std::string strLoadError;
uiInterface.InitMessage(_("Loading block index..."));
nStart = GetTimeMillis();
do {
try {
UnloadBlockIndex();
pcoinsTip.reset();
pcoinsdbview.reset();
pcoinscatcher.reset();
// new CBlockTreeDB tries to delete the existing file, which
// fails if it's still open from the previous loop. Close it first:
pblocktree.reset();
pblocktree.reset(new CBlockTreeDB(nBlockTreeDBCache, false, fReset));
if (fReset) {
pblocktree->WriteReindexing(true);
//If we're reindexing in prune mode, wipe away unusable block files and all undo data files
if (fPruneMode)
CleanupBlockRevFiles();
}
if (fRequestShutdown) break;
// LoadBlockIndex will load fTxIndex from the db, or set it if
// we're reindexing. It will also load fHavePruned if we've
// ever removed a block file from disk.
// Note that it also sets fReindex based on the disk flag!
// From here on out fReindex and fReset mean something different!
if (!LoadBlockIndex(chainparams)) {
strLoadError = _("Error loading block database");
break;
}
// If the loaded chain has a wrong genesis, bail out immediately
// (we're likely using a testnet datadir, or the other way around).
if (!mapBlockIndex.empty() && mapBlockIndex.count(chainparams.GetConsensus().hashGenesisBlock) == 0)
return InitError(_("Incorrect or no genesis block found. Wrong datadir for network?"));
// Check for changed -txindex state
if (fTxIndex != gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
strLoadError = _("You need to rebuild the database using -reindex to change -txindex");
break;
}
// Check for changed -prune state. What we are concerned about is a user who has pruned blocks
// in the past, but is now trying to run unpruned.
if (fHavePruned && !fPruneMode) {
strLoadError = _("You need to rebuild the database using -reindex to go back to unpruned mode. This will redownload the entire blockchain");
break;
}
// At this point blocktree args are consistent with what's on disk.
// If we're not mid-reindex (based on disk + args), add a genesis block on disk
// (otherwise we use the one already on disk).
// This is called again in ThreadImport after the reindex completes.
if (!fReindex && !LoadGenesisBlock(chainparams)) {
strLoadError = _("Error initializing block database");
break;
}
// At this point we're either in reindex or we've loaded a useful
// block tree into mapBlockIndex!
pcoinsdbview.reset(new CCoinsViewDB(nCoinDBCache, false, fReset || fReindexChainState));
pcoinscatcher.reset(new CCoinsViewErrorCatcher(pcoinsdbview.get()));
// If necessary, upgrade from older database format.
// This is a no-op if we cleared the coinsviewdb with -reindex or -reindex-chainstate
if (!pcoinsdbview->Upgrade()) {
strLoadError = _("Error upgrading chainstate database");
break;
}
// ReplayBlocks is a no-op if we cleared the coinsviewdb with -reindex or -reindex-chainstate
if (!ReplayBlocks(chainparams, pcoinsdbview.get())) {
strLoadError = _("Unable to replay blocks. You will need to rebuild the database using -reindex-chainstate.");
break;
}
// The on-disk coinsdb is now in a good state, create the cache
pcoinsTip.reset(new CCoinsViewCache(pcoinscatcher.get()));
bool is_coinsview_empty = fReset || fReindexChainState || pcoinsTip->GetBestBlock().IsNull();
if (!is_coinsview_empty) {
// LoadChainTip sets chainActive based on pcoinsTip's best block
if (!LoadChainTip(chainparams)) {
strLoadError = _("Error initializing block database");
break;
}
assert(chainActive.Tip() != nullptr);
}
if (!fReset) {
// Note that RewindBlockIndex MUST run even if we're about to -reindex-chainstate.
// It both disconnects blocks based on chainActive, and drops block data in
// mapBlockIndex based on lack of available witness data.
uiInterface.InitMessage(_("Rewinding blocks..."));
if (!RewindBlockIndex(chainparams)) {
strLoadError = _("Unable to rewind the database to a pre-fork state. You will need to redownload the blockchain");
break;
}
}
if (!is_coinsview_empty) {
uiInterface.InitMessage(_("Verifying blocks..."));
if (fHavePruned && gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS) > MIN_BLOCKS_TO_KEEP) {
LogPrintf("Prune: pruned datadir may not have more than %d blocks; only checking available blocks",
MIN_BLOCKS_TO_KEEP);
}
{
LOCK(cs_main);
CBlockIndex* tip = chainActive.Tip();
RPCNotifyBlockChange(true, tip);
if (tip && tip->nTime > GetAdjustedTime() + 2 * 60 * 60) {
strLoadError = _("The block database contains a block which appears to be from the future. "
"This may be due to your computer's date and time being set incorrectly. "
"Only rebuild the block database if you are sure that your computer's date and time are correct");
break;
}
}
if (!CVerifyDB().VerifyDB(chainparams, pcoinsdbview.get(), gArgs.GetArg("-checklevel", DEFAULT_CHECKLEVEL),
gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS))) {
strLoadError = _("Corrupted block database detected");
break;
}
}
} catch (const std::exception& e) {
LogPrintf("%s\n", e.what());
strLoadError = _("Error opening block database");
break;
}
fLoaded = true;
} while(false);
if (!fLoaded && !fRequestShutdown) {
// first suggest a reindex
if (!fReset) {
bool fRet = uiInterface.ThreadSafeQuestion(
strLoadError + ".\n\n" + _("Do you want to rebuild the block database now?"),
strLoadError + ".\nPlease restart with -reindex or -reindex-chainstate to recover.",
"", CClientUIInterface::MSG_ERROR | CClientUIInterface::BTN_ABORT);
if (fRet) {
fReindex = true;
fRequestShutdown = false;
} else {
LogPrintf("Aborted block database rebuild. Exiting.\n");
return false;
}
} else {
return InitError(strLoadError);
}
}
}
// As LoadBlockIndex can take several minutes, it's possible the user
// requested to kill the GUI during the last operation. If so, exit.
// As the program has not fully started yet, Shutdown() is possibly overkill.
if (fRequestShutdown)
{
LogPrintf("Shutdown requested. Exiting.\n");
return false;
}
if (fLoaded) {
LogPrintf(" block index %15dms\n", GetTimeMillis() - nStart);
}
fs::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME;
CAutoFile est_filein(fsbridge::fopen(est_path, "rb"), SER_DISK, CLIENT_VERSION);
// Allowed to fail as this file IS missing on first startup.
if (!est_filein.IsNull())
::feeEstimator.Read(est_filein);
fFeeEstimatesInitialized = true;
// ********************************************************* Step 8: load wallet
#ifdef ENABLE_WALLET
if (!OpenWallets())
return false;
#else
LogPrintf("No wallet support compiled in!\n");
#endif
// ********************************************************* Step 9: data directory maintenance
// if pruning, unset the service bit and perform the initial blockstore prune
// after any wallet rescanning has taken place.
if (fPruneMode) {
LogPrintf("Unsetting NODE_NETWORK on prune mode\n");
nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK);
if (!fReindex) {
uiInterface.InitMessage(_("Pruning blockstore..."));
PruneAndFlush();
}
}
if (chainparams.GetConsensus().vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout != 0) {
// Only advertise witness capabilities if they have a reasonable start time.
// This allows us to have the code merged without a defined softfork, by setting its
// end time to 0.
// Note that setting NODE_WITNESS is never required: the only downside from not
// doing so is that after activation, no upgraded nodes will fetch from you.
nLocalServices = ServiceFlags(nLocalServices | NODE_WITNESS);
}
// ********************************************************* Step 10: import blocks
if (!CheckDiskSpace())
return false;
// Either install a handler to notify us when genesis activates, or set fHaveGenesis directly.
// No locking, as this happens before any background thread is started.
if (chainActive.Tip() == nullptr) {
uiInterface.NotifyBlockTip.connect(BlockNotifyGenesisWait);
} else {
fHaveGenesis = true;
}
if (gArgs.IsArgSet("-blocknotify"))
uiInterface.NotifyBlockTip.connect(BlockNotifyCallback);
std::vector<fs::path> vImportFiles;
for (const std::string& strFile : gArgs.GetArgs("-loadblock")) {
vImportFiles.push_back(strFile);
}
threadGroup.create_thread(boost::bind(&ThreadImport, vImportFiles));
// Wait for genesis block to be processed
{
WaitableLock lock(cs_GenesisWait);
// We previously could hang here if StartShutdown() is called prior to
// ThreadImport getting started, so instead we just wait on a timer to
// check ShutdownRequested() regularly.
while (!fHaveGenesis && !ShutdownRequested()) {
condvar_GenesisWait.wait_for(lock, std::chrono::milliseconds(500));
}
uiInterface.NotifyBlockTip.disconnect(BlockNotifyGenesisWait);
}
if (ShutdownRequested()) {
return false;
}
// ********************************************************* Step 11: start node
int chain_active_height;
//// debug print
{
LOCK(cs_main);
LogPrintf("mapBlockIndex.size() = %u\n", mapBlockIndex.size());
chain_active_height = chainActive.Height();
}
LogPrintf("nBestHeight = %d\n", chain_active_height);
if (gArgs.GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION))
StartTorControl(threadGroup, scheduler);
Discover(threadGroup);
// Map ports with UPnP
MapPort(gArgs.GetBoolArg("-upnp", DEFAULT_UPNP));
CConnman::Options connOptions;
connOptions.nLocalServices = nLocalServices;
connOptions.nMaxConnections = nMaxConnections;
connOptions.nMaxOutbound = std::min(MAX_OUTBOUND_CONNECTIONS, connOptions.nMaxConnections);
connOptions.nMaxAddnode = MAX_ADDNODE_CONNECTIONS;
connOptions.nMaxFeeler = 1;
connOptions.nBestHeight = chain_active_height;
connOptions.uiInterface = &uiInterface;
connOptions.m_msgproc = peerLogic.get();
connOptions.nSendBufferMaxSize = 1000*gArgs.GetArg("-maxsendbuffer", DEFAULT_MAXSENDBUFFER);
connOptions.nReceiveFloodSize = 1000*gArgs.GetArg("-maxreceivebuffer", DEFAULT_MAXRECEIVEBUFFER);
connOptions.m_added_nodes = gArgs.GetArgs("-addnode");
connOptions.nMaxOutboundTimeframe = nMaxOutboundTimeframe;
connOptions.nMaxOutboundLimit = nMaxOutboundLimit;
for (const std::string& strBind : gArgs.GetArgs("-bind")) {
CService addrBind;
if (!Lookup(strBind.c_str(), addrBind, GetListenPort(), false)) {
return InitError(ResolveErrMsg("bind", strBind));
}
connOptions.vBinds.push_back(addrBind);
}
for (const std::string& strBind : gArgs.GetArgs("-whitebind")) {
CService addrBind;
if (!Lookup(strBind.c_str(), addrBind, 0, false)) {
return InitError(ResolveErrMsg("whitebind", strBind));
}
if (addrBind.GetPort() == 0) {
return InitError(strprintf(_("Need to specify a port with -whitebind: '%s'"), strBind));
}
connOptions.vWhiteBinds.push_back(addrBind);
}
for (const auto& net : gArgs.GetArgs("-whitelist")) {
CSubNet subnet;
LookupSubNet(net.c_str(), subnet);
if (!subnet.IsValid())
return InitError(strprintf(_("Invalid netmask specified in -whitelist: '%s'"), net));
connOptions.vWhitelistedRange.push_back(subnet);
}
connOptions.vSeedNodes = gArgs.GetArgs("-seednode");
// Initiate outbound connections unless connect=0
connOptions.m_use_addrman_outgoing = !gArgs.IsArgSet("-connect");
if (!connOptions.m_use_addrman_outgoing) {
const auto connect = gArgs.GetArgs("-connect");
if (connect.size() != 1 || connect[0] != "0") {
connOptions.m_specified_outgoing = connect;
}
}
if (!connman.Start(scheduler, connOptions)) {
return false;
}
// ********************************************************* Step 12: finished
SetRPCWarmupFinished();
uiInterface.InitMessage(_("Done loading"));
#ifdef ENABLE_WALLET
StartWallets(scheduler);
#endif
return true;
}
|
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/searchcore/proton/persistenceengine/ipersistencehandler.h>
#include <vespa/searchcore/proton/persistenceengine/persistence_handler_map.h>
#include <vespa/document/fieldvalue/document.h>
#include <vespa/document/update/documentupdate.h>
#include <vespa/vespalib/testkit/testapp.h>
using namespace document;
using namespace proton;
using HandlerSnapshot = PersistenceHandlerMap::HandlerSnapshot;
struct DummyPersistenceHandler : public IPersistenceHandler {
using SP = std::shared_ptr<DummyPersistenceHandler>;
void initialize() override {}
void handlePut(FeedToken, const storage::spi::Bucket &, storage::spi::Timestamp, DocumentSP) override {}
void handleUpdate(FeedToken, const storage::spi::Bucket &, storage::spi::Timestamp, DocumentUpdateSP) override {}
void handleRemove(FeedToken, const storage::spi::Bucket &, storage::spi::Timestamp, const document::DocumentId &) override {}
void handleListBuckets(IBucketIdListResultHandler &) override {}
void handleSetClusterState(const storage::spi::ClusterState &, IGenericResultHandler &) override {}
void handleSetActiveState(const storage::spi::Bucket &, storage::spi::BucketInfo::ActiveState, std::shared_ptr<IGenericResultHandler>) override {}
void handleGetBucketInfo(const storage::spi::Bucket &, IBucketInfoResultHandler &) override {}
void handleCreateBucket(FeedToken, const storage::spi::Bucket &) override {}
void handleDeleteBucket(FeedToken, const storage::spi::Bucket &) override {}
void handleGetModifiedBuckets(IBucketIdListResultHandler &) override {}
void handleSplit(FeedToken, const storage::spi::Bucket &, const storage::spi::Bucket &, const storage::spi::Bucket &) override {}
void handleJoin(FeedToken, const storage::spi::Bucket &, const storage::spi::Bucket &, const storage::spi::Bucket &) override {}
RetrieversSP getDocumentRetrievers(storage::spi::ReadConsistency) override { return RetrieversSP(); }
void handleListActiveBuckets(IBucketIdListResultHandler &) override {}
void handlePopulateActiveBuckets(document::BucketId::List, IGenericResultHandler &) override {}
};
BucketSpace space_1(1);
BucketSpace space_2(2);
BucketSpace space_null(3);
DocTypeName type_a("a");
DocTypeName type_b("b");
DocTypeName type_c("c");
DummyPersistenceHandler::SP handler_a(std::make_shared<DummyPersistenceHandler>());
DummyPersistenceHandler::SP handler_b(std::make_shared<DummyPersistenceHandler>());
DummyPersistenceHandler::SP handler_c(std::make_shared<DummyPersistenceHandler>());
DummyPersistenceHandler::SP handler_a_new(std::make_shared<DummyPersistenceHandler>());
void
assertHandler(const IPersistenceHandler::SP & lhs, const IPersistenceHandler * rhs)
{
EXPECT_EQUAL(lhs.get(), rhs);
}
void
assertHandler(const IPersistenceHandler::SP &lhs, const IPersistenceHandler::SP &rhs)
{
EXPECT_EQUAL(lhs.get(), rhs.get());
}
template <typename T>
void
assertNullHandler(const T & handler)
{
EXPECT_TRUE(! handler);
}
void
assertSnapshot(const std::vector<IPersistenceHandler::SP> &exp, HandlerSnapshot snapshot)
{
EXPECT_EQUAL(exp.size(), snapshot.size());
auto &sequence = snapshot.handlers();
for (size_t i = 0; i < exp.size() && sequence.valid(); ++i, sequence.next()) {
EXPECT_EQUAL(exp[i].get(), sequence.get());
}
}
struct Fixture {
PersistenceHandlerMap map;
Fixture() {
TEST_DO(assertNullHandler(map.putHandler(space_1, type_a, handler_a)));
TEST_DO(assertNullHandler(map.putHandler(space_1, type_b, handler_b)));
TEST_DO(assertNullHandler(map.putHandler(space_2, type_c, handler_c)));
}
};
TEST_F("require that handlers can be retrieved", Fixture)
{
TEST_DO(assertHandler(handler_a, f.map.getHandler(space_1, type_a)));
TEST_DO(assertHandler(handler_b, f.map.getHandler(space_1, type_b)));
TEST_DO(assertHandler(handler_c, f.map.getHandler(space_2, type_c)));
TEST_DO(assertNullHandler(f.map.getHandler(space_1, type_c)));
TEST_DO(assertNullHandler(f.map.getHandler(space_null, type_a)));
}
TEST_F("require that old handler is returned if replaced by new handler", Fixture)
{
TEST_DO(assertHandler(handler_a, f.map.putHandler(space_1, type_a, handler_a_new)));
TEST_DO(assertHandler(handler_a_new, f.map.getHandler(space_1, type_a)));
}
TEST_F("require that handler can be removed (and old handler returned)", Fixture)
{
TEST_DO(assertHandler(handler_a, f.map.removeHandler(space_1, type_a)));
TEST_DO(assertNullHandler(f.map.getHandler(space_1, type_a)));
TEST_DO(assertNullHandler(f.map.removeHandler(space_1, type_c)));
}
TEST_F("require that handler snapshot can be retrieved for all handlers", Fixture)
{
TEST_DO(assertSnapshot({handler_c, handler_a, handler_b}, f.map.getHandlerSnapshot()));
}
TEST_F("require that handler snapshot can be retrieved for given bucket space", Fixture)
{
TEST_DO(assertSnapshot({handler_a, handler_b}, f.map.getHandlerSnapshot(space_1)));
TEST_DO(assertSnapshot({handler_c}, f.map.getHandlerSnapshot(space_2)));
TEST_DO(assertSnapshot({}, f.map.getHandlerSnapshot(space_null)));
}
TEST_MAIN()
{
TEST_RUN_ALL();
}
|
#include <stdio.h>
#include <iostream>
#include <fstream>
#define DEV 1
using std::cin;
using std::cout;
using std::endl;
using std::terminate;
void solve(int x) {
int N; cin >> N;
int V[N + 2];
V[0] = -1;
for (int i = 1; i <= N; ++i) {
cin >> V[i];
}
V[N + 1] = -1;
int y = 0;
int tmp = -1;
for (int i = 1; i <= N; ++i) {
if (V[i - 1] < V[i] && V[i] > V[i + 1] && V[i] > tmp) {
tmp = V[i];
y++;
}
}
cout << "Case #" << x << ": " << y << endl;
}
int main() {
#ifdef DEV
std::ifstream in("KickStart/Round D 2020/Record Breaker.input");
cin.rdbuf(in.rdbuf());
int t; cin >> t;
for (int x = 1; x <= t; ++x) {
solve(x);
}
#else
int t; cin >> t;
for (int x = 1; x <= t; ++x) {
solve(x);
}
#endif
return 0;
}
|
// std::codecvt implementation details, generic version -*- C++ -*-
// Copyright (C) 2002 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 2, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING. If not, write to the Free
// Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
// USA.
// As a special exception, you may use this file as part of a free software
// library without restriction. Specifically, if other files instantiate
// templates or use macros or inline functions from this file, or you compile
// this file and link it with other files to produce an executable, this
// file does not by itself cause the resulting executable to be covered by
// the GNU General Public License. This exception does not however
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
//
// ISO C++ 14882: 22.2.1.5 - Template class codecvt
//
// Written by Benjamin Kosnik <bkoz@redhat.com>
#include <locale>
namespace std
{
// Specializations.
#ifdef _GLIBCPP_USE_WCHAR_T
codecvt_base::result
codecvt<wchar_t, char, mbstate_t>::
do_out(state_type& __state, const intern_type* __from,
const intern_type* __from_end, const intern_type*& __from_next,
extern_type* __to, extern_type* __to_end,
extern_type*& __to_next) const
{
result __ret = error;
size_t __len = min(__from_end - __from, __to_end - __to);
size_t __conv = wcsrtombs(__to, &__from, __len, &__state);
if (__conv == __len)
{
__from_next = __from;
__to_next = __to + __conv;
__ret = ok;
}
else if (__conv > 0 && __conv < __len)
{
__from_next = __from;
__to_next = __to + __conv;
__ret = partial;
}
else
__ret = error;
return __ret;
}
codecvt_base::result
codecvt<wchar_t, char, mbstate_t>::
do_in(state_type& __state, const extern_type* __from,
const extern_type* __from_end, const extern_type*& __from_next,
intern_type* __to, intern_type* __to_end,
intern_type*& __to_next) const
{
result __ret = error;
size_t __len = min(__from_end - __from, __to_end - __to);
size_t __conv = mbsrtowcs(__to, &__from, __len, &__state);
if (__conv == __len)
{
__from_next = __from;
__to_next = __to + __conv;
__ret = ok;
}
else if (__conv > 0 && __conv < __len)
{
__from_next = __from;
__to_next = __to + __conv;
__ret = partial;
}
else
__ret = error;
return __ret;
}
#endif
}
|
#include <cassert>
#include <cstring>
#include <algorithm>
#include "lct.h"
using std::swap;
static struct Node {
int val, mx, pos;
int fa, lch, rch;
bool rev;
} m[NMAX + 1];
inline void push(int x) {
if (m[x].rev) {
swap(m[x].lch, m[x].rch);
m[m[x].lch].rev ^= 1;
m[m[x].rch].rev ^= 1;
m[x].rev = 0;
}
}
inline void update(int x) {
m[x].mx = m[x].val;
m[x].pos = x;
chkmax(m[x], m[m[x].lch]);
chkmax(m[x], m[m[x].rch]);
}
inline void lrot(int x) {
assert(!m[x].rev);
int y = m[x].lch;
assert(y);
assert(!m[y].rev);
m[m[y].rch].fa = x;
m[x].lch = m[y].rch;
m[y].rch = x;
if (m[x].fa > 0) {
int p = m[x].fa;
if (m[p].lch == x) m[p].lch = y;
else m[p].rch = y;
}
m[y].fa = m[x].fa;
m[x].fa = y;
m[y].mx = m[x].mx;
m[y].pos = m[x].pos;
update(x);
// update(y);
}
inline void rrot(int x) {
assert(!m[x].rev);
int y = m[x].rch;
assert(y);
assert(!m[y].rev);
m[m[y].lch].fa = x;
m[x].rch = m[y].lch;
m[y].lch = x;
if (m[x].fa > 0) {
int p = m[x].fa;
if (m[p].lch == x) m[p].lch = y;
else m[p].rch = y;
}
m[y].fa = m[x].fa;
m[x].fa = y;
m[y].mx = m[x].mx;
m[y].pos = m[x].pos;
update(x);
// update(y);
}
inline void access(int x) {
if (m[x].fa > 0) access(m[x].fa);
push(x);
}
inline void spaly(int x, bool accessed = false) {
if (!accessed) access(x);
while (m[x].fa > 0) {
int p = m[x].fa;
if (m[p].lch == x) lrot(p);
else rrot(p);
}
}
void LCT::init(int n) {
memset(m + 1, 0, sizeof(Node) * n);
for (int i = 1; i <= n; i++) m[i].pos = i;
}
auto LCT::splice(int x) -> int {
assert(m[x].fa < 0);
int p = -m[x].fa;
spaly(p);
m[m[p].rch].fa = -p;
m[p].rch = x;
m[x].fa = p;
update(p);
return p;
}
void LCT::expose(int x) {
spaly(x);
m[m[x].rch].fa = -x;
m[x].rch = 0;
update(x);
while (m[x].fa) x = splice(x);
}
void LCT::link(int x, int y) {
spaly(y);
assert(!m[y].fa);
m[y].fa = -x;
// expose(y);
}
void LCT::fastcut(int x) {
// assume the father of x on the tree has been exposed.
spaly(x);
m[x].fa = 0;
}
void LCT::cut(int x) {
expose(x);
spaly(x);
int y = m[x].lch;
if (!y) return;
push(y);
while (m[y].rch) {
y = m[y].rch;
push(y);
}
spaly(y, true);
m[m[y].rch].fa = 0;
m[y].rch = 0;
update(y);
}
void LCT::evert(int x) {
expose(x);
spaly(x);
m[x].rev ^= 1;
}
void LCT::set(int x, int v) {
spaly(x);
m[x].val = v;
if (v > m[x].mx) {
m[x].mx = v;
m[x].pos = x;
}
}
auto LCT::query(int x, int y) -> int {
evert(x);
expose(y);
spaly(y);
while (m[x].fa) x = m[x].fa;
return x != y ? 0 : m[y].pos;
}
auto LCT::get(int x) -> int {
return m[x].val;
}
|
#include <chrono> // for high_resolution_clock
#include <iostream>
#include <fstream>
#include <cstdlib>
#include <ctime>
#include <ros/ros.h>
#include <eigen_matrix_utils/eigen_matrix_utils.h>
#include <state_space_filters/common_filters.h>
#include <gtest/gtest.h>
using namespace eigen_control_toolbox;
Eigen::IOFormat fmt(Eigen::StreamPrecision, Eigen::DontAlignCols, ", ", ", ", "", "", "", "");
ros::NodeHandle* nh;
constexpr int stress_cycles = 1e5;
constexpr int cycles = 1000;
double natural_frequency = 50; // [rad/s] 2 pi * f
double sampling_period = 0.001; // s
TEST(TestSuite, FirstOrderLowPassX)
{
EXPECT_NO_FATAL_FAILURE( FirstOrderLowPassX lpf );
EXPECT_NO_FATAL_FAILURE( FirstOrderLowPassX lpf(natural_frequency,sampling_period, 3) );
EXPECT_NO_FATAL_FAILURE( FirstOrderLowPassXPtr lpf );
EXPECT_NO_FATAL_FAILURE( FirstOrderLowPassXPtr lpf(
new FirstOrderLowPassX(natural_frequency,sampling_period, 3)) );
FirstOrderLowPassX lpf;
FirstOrderLowPassXPtr lpf_ptr(new FirstOrderLowPassX());
EXPECT_TRUE( lpf.init(natural_frequency, sampling_period, 3) );
EXPECT_TRUE( lpf_ptr->init(natural_frequency, sampling_period, 3) );
EXPECT_NO_FATAL_FAILURE( int order = lpf.xDim() );
EXPECT_NO_FATAL_FAILURE( int nin = lpf.uDim() );
EXPECT_NO_FATAL_FAILURE( int nout = lpf.yDim() );
EXPECT_NO_FATAL_FAILURE( int order = lpf_ptr->xDim() );
EXPECT_NO_FATAL_FAILURE( int nin = lpf_ptr->uDim() );
EXPECT_NO_FATAL_FAILURE( int nout = lpf_ptr->yDim() );
EXPECT_TRUE( lpf.xDim() == lpf.xDim() );
EXPECT_TRUE( lpf.uDim() == lpf.uDim() );
EXPECT_TRUE( lpf.yDim() == lpf.yDim() );
int ch;
EXPECT_NO_FATAL_FAILURE( ch = lpf.getChannels() );
Eigen::VectorXd u(ch); u.setRandom();
Eigen::VectorXd y(ch); y.setRandom();
EXPECT_TRUE( lpf.setStateFromLastIO(u, y) );
EXPECT_TRUE( eigen_utils::norm(lpf.u() - u) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpf.u()));
EXPECT_TRUE( eigen_utils::norm(lpf.y() - y) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpf.y()));
EXPECT_NO_FATAL_FAILURE( y=lpf.update(u) );
}
TEST(TestSuite, FirstOrderLowPassXPerformance)
{
for(int i=2; i<10;i++)
{
FirstOrderLowPassX lpf;
lpf.init(natural_frequency, sampling_period, i);
int ch = lpf.getChannels();
Eigen::VectorXd u(ch); u.setRandom();
Eigen::VectorXd y(ch); y.setRandom();
lpf.setStateFromLastIO(u, y);
auto start = std::chrono::high_resolution_clock::now();
for (unsigned int i=0;i<stress_cycles ;i++)
{
y=lpf.update(u);
}
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::microseconds elapsed = std::chrono::duration_cast<std::chrono::microseconds>(finish - start);
std::cout << "Num. Channels: " << ch << ", Performance over " << stress_cycles << " cycles: " << elapsed.count() << "us\t\t";
std::cout << "Time/ch/cycles: " << (double(elapsed.count())/double(ch))/double(stress_cycles) << "us" << std::endl;
EXPECT_TRUE( eigen_utils::norm(lpf.y() - u) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpf.y() - u));
}
}
TEST(TestSuite, FirstOrderLowPassXPtrPerformance)
{
for(int i=2; i<10;i++)
{
FirstOrderLowPassXPtr lpf(new FirstOrderLowPassX());
lpf->init(natural_frequency, sampling_period, i);
int ch = lpf->getChannels();
Eigen::VectorXd u(ch); u.setRandom();
Eigen::VectorXd y(ch); y.setRandom();
lpf->setStateFromLastIO(u, y);
auto start = std::chrono::high_resolution_clock::now();
for (unsigned int i=0;i<stress_cycles ;i++)
{
y=lpf->update(u);
}
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::microseconds elapsed = std::chrono::duration_cast<std::chrono::microseconds>(finish - start);
std::cout << "Num. Channels: " << ch << ", Performance over " << stress_cycles << " cycles: " << elapsed.count() << "us\t\t";
std::cout << "Time/ch/cycles: " << (double(elapsed.count())/double(ch))/double(stress_cycles) << "us" << std::endl;
EXPECT_TRUE( eigen_utils::norm(lpf->y() - u) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpf->y() - u));
}
}
// Declare a test
TEST(TestSuite, FirstOrderLowPassXPlot)
{
FirstOrderLowPassX lpf;
lpf.init(natural_frequency, sampling_period, 3);
int ch = lpf.getChannels();
Eigen::VectorXd u(ch); u.setRandom();
Eigen::VectorXd y(ch); y.setRandom();
lpf.setStateFromLastIO(u, y);
std::ofstream ofile("testX.plt", std::ofstream::out);
ofile << "u1,u2,u3,"
<< "x1,x2,x3,"
<< "y1,y2,y3,"
<< "(u-y).norm,"
<< "(u-i).norm" << std::endl;
u(0) = 1.0;
for (unsigned int i=0;i<cycles;i++)
{
ofile << u.transpose().format(fmt) << ", "
<< lpf.x().transpose().format(fmt) << ", "
<< lpf.y().transpose().format(fmt) << ", "
<< eigen_utils::norm(u-lpf.y()) << ","
<< eigen_utils::norm(u-lpf.u()) << std::endl;
y=lpf.update(u);
}
EXPECT_TRUE( eigen_utils::norm(lpf.y() - u) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpf.y() - u));
}
TEST(TestSuite, FirstOrderLowPass6)
{
EXPECT_NO_FATAL_FAILURE( FirstOrderLowPass<6> lpf(natural_frequency,sampling_period) );
EXPECT_NO_FATAL_FAILURE( FirstOrderLowPassPtr<6> lpf(
new FirstOrderLowPass<6>(natural_frequency,sampling_period)) );
FirstOrderLowPass<6> lpf(natural_frequency,sampling_period);
FirstOrderLowPassPtr<6> lpf_ptr(new FirstOrderLowPass<6>(natural_frequency,sampling_period));
EXPECT_NO_FATAL_FAILURE( int order = lpf.xDim() );
EXPECT_NO_FATAL_FAILURE( int nin = lpf.uDim() );
EXPECT_NO_FATAL_FAILURE( int nout = lpf.yDim() );
EXPECT_NO_FATAL_FAILURE( int order = lpf_ptr->xDim() );
EXPECT_NO_FATAL_FAILURE( int nin = lpf_ptr->uDim() );
EXPECT_NO_FATAL_FAILURE( int nout = lpf_ptr->yDim() );
int ch;
EXPECT_NO_FATAL_FAILURE( ch = lpf.getChannels() );
Eigen::VectorXd u(ch); u.setRandom();
Eigen::VectorXd y(ch); y.setRandom();
EXPECT_TRUE( lpf.setStateFromLastIO(u, y) );
EXPECT_TRUE( eigen_utils::norm(lpf.u() - u) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpf.u()));
EXPECT_TRUE( eigen_utils::norm(lpf.y() - y) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpf.y()));
EXPECT_NO_FATAL_FAILURE( y=lpf.update(u) );
}
TEST(TestSuite, FirstOrderLowPass6Performance)
{
FirstOrderLowPass<6> lpf(natural_frequency,sampling_period);
int ch = lpf.getChannels();
Eigen::VectorXd u(ch); u.setZero();
Eigen::VectorXd y(ch); y.setZero();
lpf.setStateFromLastIO(u, y);
u(0) = 1.0;
auto start = std::chrono::high_resolution_clock::now();
for (unsigned int i=0;i<stress_cycles ;i++)
{
y=lpf.update(u);
}
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::microseconds elapsed = std::chrono::duration_cast<std::chrono::microseconds>(finish - start);
std::cout << "Num. Channels: " << ch << ", Performance over " << stress_cycles << " cycles: " << elapsed.count() << "us\t\t";
std::cout << "Time/ch/cycles: " << (double(elapsed.count())/double(ch))/double(stress_cycles) << "us" << std::endl;
EXPECT_TRUE( eigen_utils::norm(lpf.y() - u) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpf.y() - u));
}
TEST(TestSuite, FirstOrderLowPass6PtrPerformance)
{
FirstOrderLowPassPtr<6> lpf(new FirstOrderLowPass<6>(natural_frequency,sampling_period));
int ch = lpf->getChannels();
Eigen::VectorXd u(ch); u.setZero();
Eigen::VectorXd y(ch); y.setZero();
lpf->setStateFromLastIO(u, y);
u(0) = 1.0;
auto start = std::chrono::high_resolution_clock::now();
for (unsigned int i=0;i<stress_cycles ;i++)
{
y=lpf->update(u);
}
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::microseconds elapsed = std::chrono::duration_cast<std::chrono::microseconds>(finish - start);
std::cout << "Num. Channels: " << ch << ", Performance over " << stress_cycles << " cycles: " << elapsed.count() << "us\t\t";
std::cout << "Time/ch/cycles: " << (double(elapsed.count())/double(ch))/double(stress_cycles) << "us" << std::endl;
EXPECT_TRUE( eigen_utils::norm(lpf->y() - u) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpf->y() - u));
}
TEST(TestSuite, FirstOrderLowPass6Plot)
{
FirstOrderLowPass<6> lpf(natural_frequency,sampling_period);
int ch = lpf.getChannels();
Eigen::VectorXd u(ch); u.setRandom();
Eigen::VectorXd y(ch); y.setRandom();
lpf.setStateFromLastIO(u, y);
std::ofstream ofile("test3.plt", std::ofstream::out);
ofile << "u1,u2,u3,u4,u5,u6,"
<< "x1,x2,x3,x4,x5,x6,"
<< "y1,y2,y3,y4,y5,y6,"
<< "(u-y).norm,"
<< "(u-i).norm" << std::endl;
u(0) = 1.0;
for (unsigned int i=0;i<cycles;i++)
{
ofile << u.transpose().format(fmt) << ", "
<< lpf.x().transpose().format(fmt) << ", "
<< lpf.y().transpose().format(fmt) << ", "
<< eigen_utils::norm(u-lpf.y()) << ","
<< eigen_utils::norm(u-lpf.u()) << std::endl;
y=lpf.update(u);
}
EXPECT_TRUE( eigen_utils::norm(lpf.y() - u) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpf.y() - u));
}
TEST(TestSuite, FirstOrderLowPass1)
{
EXPECT_NO_FATAL_FAILURE( FirstOrderLowPass<1> lpf(natural_frequency,sampling_period) );
FirstOrderLowPass<1> lpf(natural_frequency,sampling_period);
EXPECT_NO_FATAL_FAILURE( int order = lpf.xDim() );
EXPECT_NO_FATAL_FAILURE( int nin = lpf.uDim() );
EXPECT_NO_FATAL_FAILURE( int nout = lpf.yDim() );
int ch;
EXPECT_NO_FATAL_FAILURE( ch = lpf.getChannels() );
double u = 0.67;
double y = 0.34;
EXPECT_TRUE( lpf.setStateFromLastIO(u, y) );
EXPECT_TRUE( eigen_utils::norm(lpf.u() - u) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpf.u()));
EXPECT_TRUE( eigen_utils::norm(lpf.y() - y) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpf.y()));
EXPECT_NO_FATAL_FAILURE( y=lpf.update(u) );
}
TEST(TestSuite, FirstOrderLowPass1Performance)
{
FirstOrderLowPass<1> lpfa(natural_frequency,sampling_period);
FirstOrderLowPass<1> lpfb(natural_frequency,sampling_period);
FirstOrderLowPass<1> lpfc(natural_frequency,sampling_period);
FirstOrderLowPass<1> lpfd(natural_frequency,sampling_period);
FirstOrderLowPass<1> lpfe(natural_frequency,sampling_period);
FirstOrderLowPass<1> lpff(natural_frequency,sampling_period);
double u = 0.67;
double y = 0.34;
lpfa.setStateFromLastIO(u, y);
lpfb.setStateFromLastIO(u, y);
lpfc.setStateFromLastIO(u, y);
lpfd.setStateFromLastIO(u, y);
lpfe.setStateFromLastIO(u, y);
lpff.setStateFromLastIO(u, y);
u = 1.0;
auto start = std::chrono::high_resolution_clock::now();
for (unsigned int i=0;i<stress_cycles ;i++)
{
double ya = lpfa.update(u);
double yb = lpfb.update(u);
double yc = lpfc.update(u);
double yd = lpfd.update(u);
double ye = lpfe.update(u);
double yf = lpff.update(u);
}
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::microseconds elapsed = std::chrono::duration_cast<std::chrono::microseconds>(finish - start);
std::cout << "Num. Channels: " << 6 << ", Performance over " << stress_cycles << " cycles: " << elapsed.count() << "us\t\t";
std::cout << "Time/ch/cycles: " << (double(elapsed.count())/double(6))/double(stress_cycles) << "us" << std::endl;
EXPECT_TRUE( eigen_utils::norm(lpfa.y() - u) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpfa.y() - u));
EXPECT_TRUE( eigen_utils::norm(lpfb.y() - u) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpfb.y() - u));
EXPECT_TRUE( eigen_utils::norm(lpfc.y() - u) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpfc.y() - u));
EXPECT_TRUE( eigen_utils::norm(lpfd.y() - u) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpfa.y() - u));
EXPECT_TRUE( eigen_utils::norm(lpfe.y() - u) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpfb.y() - u));
EXPECT_TRUE( eigen_utils::norm(lpff.y() - u) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpfc.y() - u));
}
TEST(TestSuite, FirstOrderLowPass1Plot)
{
FirstOrderLowPass<1> lpf(natural_frequency,sampling_period);
double u = 0.67;
double y = 0.34;
lpf.setStateFromLastIO(u, y);
std::ofstream ofile("test1.plt", std::ofstream::out);
ofile << u << ", "
<< lpf.x() << ", "
<< lpf.y() << ", "
<< eigen_utils::norm(u-lpf.y()) << ","
<< eigen_utils::norm(u-lpf.u()) << std::endl;
ofile << "u,"
<< "x,"
<< "y,"
<< "(u-y).norm,"
<< "(u-i).norm" << std::endl;
u = 1.0;
for (unsigned int i=0;i<cycles;i++)
{
y=lpf.update(u);
ofile << u << ", "
<< lpf.x() << ", "
<< lpf.y() << ", "
<< eigen_utils::norm(u-lpf.y()) << ","
<< eigen_utils::norm(u-lpf.u()) << std::endl;
}
EXPECT_TRUE( eigen_utils::norm(lpf.y() - u) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(lpf.y() - u));
}
TEST(TestSuite, FirstOrderHighPassX)
{
EXPECT_NO_FATAL_FAILURE( FirstOrderHighPassX hpf );
EXPECT_NO_FATAL_FAILURE( FirstOrderHighPassX hpf(natural_frequency,sampling_period, 3) );
EXPECT_NO_FATAL_FAILURE( FirstOrderHighPassXPtr hpf );
EXPECT_NO_FATAL_FAILURE( FirstOrderHighPassXPtr hpf(
new FirstOrderHighPassX(natural_frequency,sampling_period, 3)) );
FirstOrderHighPassX hpf;
FirstOrderHighPassXPtr hpf_ptr(new FirstOrderHighPassX());
EXPECT_TRUE( hpf.init(natural_frequency, sampling_period, 3) );
EXPECT_TRUE( hpf_ptr->init(natural_frequency, sampling_period, 3) );
EXPECT_NO_FATAL_FAILURE( int order = hpf.xDim() );
EXPECT_NO_FATAL_FAILURE( int nin = hpf.uDim() );
EXPECT_NO_FATAL_FAILURE( int nout = hpf.yDim() );
EXPECT_NO_FATAL_FAILURE( int order = hpf_ptr->xDim() );
EXPECT_NO_FATAL_FAILURE( int nin = hpf_ptr->uDim() );
EXPECT_NO_FATAL_FAILURE( int nout = hpf_ptr->yDim() );
EXPECT_TRUE( hpf.xDim() == hpf.xDim() );
EXPECT_TRUE( hpf.uDim() == hpf.uDim() );
EXPECT_TRUE( hpf.yDim() == hpf.yDim() );
int ch;
EXPECT_NO_FATAL_FAILURE( ch = hpf.getChannels() );
Eigen::VectorXd u(ch); u.setRandom();
Eigen::VectorXd y(ch); y.setRandom();
EXPECT_TRUE( hpf.setStateFromLastIO(u, y) );
EXPECT_TRUE( eigen_utils::norm(hpf.u() - u) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(hpf.u()));
EXPECT_TRUE( eigen_utils::norm(hpf.y() - y) < 1e-12 ) << "get: " + std::to_string(eigen_utils::norm(hpf.y()));
EXPECT_NO_FATAL_FAILURE( y=hpf.update(u) );
}
int main(int argc,char** argv)
{
// ------ Init ROS ------
ros::init(argc,&*argv,"test_filters");
nh = new ros::NodeHandle();
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
//
// Created by Martin Blicha on 15.07.20.
//
#include <engine/TPA.h>
#include <engine/Bmc.h>
#include <engine/Lawi.h>
#include <engine/Spacer.h>
#include <engine/ReverseWrapper.h>
#include <engine/LoopAccelerator.h>
#include "ChcInterpreter.h"
#include "ChcGraph.h"
#include "Validator.h"
#include "Normalizer.h"
namespace {
bool addLetFrame(const vec<char *> & names, vec<PTRef> const& args, Logic & logic, LetRecords& letRecords) {
assert(names.size() == args.size());
if (names.size() > 1) {
// check that they are pairwise distinct;
std::unordered_set<const char*, StringHash, Equal<const char*>> namesAsSet(names.begin(), names.end());
if (namesAsSet.size() != names.size_()) {
return false;
}
}
for (int i = 0; i < names.size(); ++i) {
const char* name = names[i];
if (logic.hasSym(name) && logic.getSym(logic.symNameToRef(name)[0]).noScoping()) {
return false;
}
letRecords.addBinding(name, args[i]);
}
return true;
}
}
std::unique_ptr<ChcSystem> ChcInterpreter::interpretSystemAst(Logic & logic, const ASTNode * root) {
ChcInterpreterContext ctx(logic, opts);
return ctx.interpretSystemAst(root);
}
std::unique_ptr<ChcSystem> ChcInterpreterContext::interpretSystemAst(const ASTNode * root) {
if (not root) {
return std::unique_ptr<ChcSystem>();
}
this->system.reset();
auto it = root->children->begin();
for (; it != root->children->end() && not this->doExit; ++it) {
interpretCommand(**it);
// delete *it;
// *it = nullptr;
}
return std::move(this->system);
}
void ChcInterpreterContext::interpretCommand(ASTNode & node) {
assert(node.getType() == CMD_T);
const smt2token cmd = node.getToken();
switch (cmd.x) {
case t_setlogic: {
ASTNode & logic_n = **(node.children->begin());
const char * logic_name = logic_n.getValue();
if (strcmp(logic_name, "HORN") == 0) {
system.reset(new ChcSystem());
} else {
reportError("Invalid (set-logic) comand");
}
break;
}
case t_setinfo: {
// TODO: implement this
break;
}
case t_declarefun: {
if (not system) {
reportError("Missing (set-logic) command, ignoring (declare-fun)");
} else {
interpretDeclareFun(node);
}
break;
}
case t_assert: {
if (not system) {
reportError("Missing (set-logic) command, ignoring (assert)");
} else {
interpretAssert(node);
}
break;
}
case t_checksat: {
if (not system) {
reportError("Missing (set-logic) command, ignoring (check-sat)");
} else {
interpretCheckSat();
}
break;
}
case t_exit: {
this->doExit = true;
break;
}
default:
reportError("Unknown command, ignoring");
}
}
SRef ChcInterpreterContext::getSort(ASTNode & sortNode) {
assert(sortNode.getType() == ID_T);
auto buildSortName = [](ASTNode const & node) {
auto it = node.children->begin();
char* canon_name;
int written = asprintf(&canon_name, "%s", (**it).getValue());
assert(written >= 0); (void)written;
return canon_name;
};
SRef res = SRef_Undef;
char * name = buildSortName(sortNode);
if (logic.containsSort(name)) {
res = logic.getSortRef(name);
}
free(name);
return res;
}
void ChcInterpreterContext::interpretDeclareFun(ASTNode & node) {
auto it = node.children->begin();
ASTNode& name_node = **(it++);
ASTNode& args_node = **(it++);
ASTNode& ret_node = **(it++);
assert(it == node.children->end());
const char* fname = name_node.getValue();
auto buildSortName = [](ASTNode const & node) {
auto it = node.children->begin();
char* canon_name;
int written = asprintf(&canon_name, "%s", (**it).getValue());
assert(written >= 0); (void)written;
return canon_name;
};
SRef codomainSort = getSort(ret_node);
if (codomainSort == SRef_Undef) {
reportError("Unknown return sort of " + std::string(fname));
return;
}
else if (codomainSort != logic.getSort_bool()) {
reportError("Return sort of uninterpeted predicate must be Bool");
return;
}
// domain sorts
vec<SRef> args;
for (auto it2 = args_node.children->begin(); it2 != args_node.children->end(); it2++) {
SRef argSort = getSort(**it2);
if (argSort != SRef_Undef) {
args.push(argSort);
} else {
reportError("Undefined sort in function " + std::string(fname));
return;
}
}
char* msg;
SymRef rval = logic.declareFun(fname, codomainSort, args, &msg);
if (rval == SymRef_Undef) {
reportError("While declare-fun " + std::string(fname));
return;
}
system->addUninterpretedPredicate(rval);
}
void ChcInterpreterContext::interpretAssert(ASTNode & node) {
ASTNode& termNode = **(node.children->begin());
PTRef term = parseTerm(termNode);
assert(term != PTRef_Undef);
// std::cout << backgroundTheory->getLogic().printTerm(term) << std::endl;
auto chclause = chclauseFromPTRef(term);
system->addClause(std::move(chclause));
}
PTRef ChcInterpreterContext::parseTerm(const ASTNode & termNode) {
ASTType t = termNode.getType();
if (t == TERM_T) {
const char* name = (**(termNode.children->begin())).getValue();
const char* msg;
return logic.mkConst(name, &msg);
}
else if (t == FORALL_T) { // Forall has two children: sorted_var_list and term
auto it = termNode.children->begin();
ASTNode& qvars = **it;
assert(qvars.getType() == SVL_T);
// HACK! Using let frames to properly parse formula with universal quantifiers (same variable name might already be assoociated with multiple sorts
class QuantifierHack{
std::size_t counter = 0;
LetRecords & rec;
public:
QuantifierHack(LetRecords& rec): rec(rec) {}
~QuantifierHack() {
for (std::size_t i = 0; i < counter; ++i) {
rec.popFrame();
}
}
void addBinding(const char* name, PTRef term) {
rec.pushFrame();
rec.addBinding(name, term);
++counter;
}
} quantifierHack(letRecords);
for (ASTNode * var : *qvars.children) {
assert(var && var->getType() == SV_T);
// make sure the term store know about these variables
const char* name = var->getValue();
char* msg;
SRef sort = getSort(**var->children->begin());
PTRef varTerm = logic.mkVar(sort, name);
quantifierHack.addBinding(name, varTerm);
// std::cout << var->getValue() << std::endl; // name of the variable
// std::cout << backgroundTheory->getLogic().getSortName(getSort(**var->children->begin())) << std::endl; // sort of th variable
}
++it;
ASTNode& innerTerm = **it;
return parseTerm(innerTerm);
}
else if (t == QID_T) {
const char* name = (**(termNode.children->begin())).getValue();
PTRef tr = letRecords.getOrUndef(name);
if (tr != PTRef_Undef) {
return tr;
}
char* msg = nullptr;
tr = logic.resolveTerm(name, {}, &msg);
assert(tr != PTRef_Undef);
return tr;
}
else if (t == LQID_T) {
auto node_iter = termNode.children->begin();
const char* name = (**node_iter).getValue(); node_iter++;
// Parse the arguments
vec<PTRef> args;
for (; node_iter != termNode.children->end(); node_iter++) {
PTRef arg_term = parseTerm(**node_iter);
if (arg_term == PTRef_Undef) {
assert(false);
return PTRef_Undef;
}
else
args.push(arg_term);
}
assert(args.size() > 0);
char* msg = nullptr;
PTRef tr = PTRef_Undef;
tr = logic.resolveTerm(name, std::move(args), &msg);
assert(tr != PTRef_Undef);
return tr;
}
else if (t == LET_T) {
auto ch = termNode.children->begin();
auto vbl = (**ch).children->begin();
vec<PTRef> tmp_args;
vec<char*> names;
// use RAII idiom to guard the scope of new LetFrame (and ensure the cleaup of names)
class Guard {
LetRecords& rec;
vec<char*>& names;
public:
Guard(LetRecords& rec, vec<char*>& names): rec(rec), names(names) { rec.pushFrame(); }
~Guard() { rec.popFrame(); for (int i = 0; i < names.size(); i++) { free(names[i]); }}
} scopeGuard(letRecords, names);
// First read the term declarations in the let statement
while (vbl != (**ch).children->end()) {
PTRef let_tr = parseTerm(**((**vbl).children->begin()));
if (let_tr == PTRef_Undef) return PTRef_Undef;
tmp_args.push(let_tr);
char* name = strdup((**vbl).getValue());
names.push(name);
vbl++;
}
// Only then insert them to the table
bool success = addLetFrame(names, tmp_args, logic, letRecords);
if (not success) {
return PTRef_Undef;
}
ch++;
// This is now constructed with the let declarations context in let_branch
PTRef tr = parseTerm(**(ch));
if (tr == PTRef_Undef) {
return PTRef_Undef;
}
return tr;
}
else {
std::cout << "Unknown type: " << termNode.typeToStr() << std::endl;
throw std::logic_error("Type not handled in parsing!\n");
}
}
void ChcInterpreterContext::interpretCheckSat() {
ChcPrinter printer{logic};
// printer.print(*system, std::cout);
ChcGraphBuilder builder{logic};
auto normalizedSystem = Normalizer(logic).normalize(*system);
auto hypergraph = builder.buildGraph(normalizedSystem);
if (hypergraph->isNormalGraph()) {
auto graph = hypergraph->toNormalGraph();
// graph->toDot(std::cout, logic);
auto engine = getEngine();
bool backwardAnalysis = opts.hasOption(Options::ANALYSIS_FLOW) && opts.getOption(Options::ANALYSIS_FLOW) == "backward";
if (backwardAnalysis) {
engine = std::unique_ptr<Engine>(new ReverseWrapper(std::move(engine), logic));
}
bool tryAccelerateLoops = opts.hasOption(Options::ACCELERATE_LOOPS);
if (tryAccelerateLoops) {
assert(opts.getOption(Options::ACCELERATE_LOOPS) == "true");
LALogic * laLogic = dynamic_cast<LALogic*>(&logic);
if (laLogic) {
engine = std::unique_ptr<Engine>(new LoopAccelerator(*laLogic, std::move(engine)));
} else {
std::cerr << "Loops can be accelerated only for arithmetic problems, skipping this preprocessing\n";
}
}
auto res = engine->solve(*graph);
bool validateWitness = opts.hasOption(Options::VALIDATE_RESULT);
assert(not validateWitness || opts.getOption(Options::VALIDATE_RESULT) == std::string("true"));
bool printWitness = opts.hasOption(Options::PRINT_WITNESS);
assert(not printWitness || opts.getOption(Options::PRINT_WITNESS) == std::string("true"));
switch (res.getAnswer()) {
case VerificationResult::SAFE: {
std::cout << "sat" << std::endl;
break;
}
case VerificationResult::UNSAFE: {
std::cout << "unsat" << std::endl;
break;
}
case VerificationResult::UNKNOWN:
std::cout << "unknown" << std::endl;
break;
}
if (validateWitness || printWitness) {
ChcGraphContext ctx(*graph, logic);
SystemVerificationResult systemResult (std::move(res), ctx);
if (printWitness) {
systemResult.printWitness(std::cout, logic);
}
if (validateWitness) {
auto validationResult = Validator(logic).validate(*normalizedSystem.normalizedSystem, systemResult);
switch (validationResult) {
case Validator::Result::VALIDATED: {
std::cout << "Internal witness validation successful!" << std::endl;
break;
}
case Validator::Result::NOT_VALIDATED: {
std::cout << "Internal witness validation failed!" << std::endl;
break;
}
default:
throw std::logic_error("Unexpected case in result validation!");
}
}
}
} else {
if (opts.getOption(Options::ENGINE) != "spacer") {
throw std::logic_error("Only Spacer engine can solve nonlinear CHCs at the moment!");
}
auto engine = std::unique_ptr<Engine>(new Spacer(logic, opts));
auto res = engine->solve(*hypergraph);
switch (res.getAnswer()) {
case VerificationResult::SAFE: {
std::cout << "sat" << std::endl;
if (opts.hasOption(Options::VALIDATE_RESULT)) {
SystemVerificationResult systemResult(std::move(res));
auto validationResult = Validator(logic).validate(*normalizedSystem.normalizedSystem, systemResult);
switch (validationResult) {
case Validator::Result::VALIDATED: {
std::cout << "Internal witness validation successful!" << std::endl;
break;
}
case Validator::Result::NOT_VALIDATED: {
std::cout << "Internal witness validation failed!" << std::endl;
break;
}
}
}
break;
}
case VerificationResult::UNSAFE: {
std::cout << "unsat" << std::endl;
break;
}
case VerificationResult::UNKNOWN:
std::cout << "unknown" << std::endl;
break;
}
}
}
void ChcInterpreterContext::reportError(std::string msg) {
std::cout << "(error " << '"' << msg << '"' << ")\n";
}
ChClause ChcInterpreterContext::chclauseFromPTRef(PTRef ref) {
assert(ref != PTRef_Undef);
Logic & logic = this->logic;
PTRef disjunction = ref;
if (not logic.isOr(disjunction)) {
// special cases
// 1. Head with empty body
if (isUninterpretedPredicate(ref)) {
return ChClause{.head = PTRefToCHC::constructHead(ref), .body = PTRefToCHC::constructBody(logic.getTerm_true(), {})};
} else if (logic.isNot(ref)) {
PTRef argOfNot = logic.getPterm(ref)[0];
// 2. Empty head, single predicate in body
if (isUninterpretedPredicate(argOfNot)) {
return ChClause{.head = PTRefToCHC::constructHead(logic.getTerm_false()), .body = PTRefToCHC::constructBody(logic.getTerm_true(), {argOfNot})};
} else if(logic.isAnd(argOfNot)) {
// The clause is represented as negation of conjunction, turn it into disjunction
vec<PTRef> args;
for (int i = 0; i < logic.getPterm(argOfNot).size(); ++i) {
PTRef arg = logic.getPterm(argOfNot)[i];
args.push(logic.mkNot(arg));
}
disjunction = logic.mkOr(args);
} else {
throw std::logic_error(std::string("Unknown format of in parsing CHC: ") + logic.printTerm(ref));
}
}
}
assert(logic.isOr(disjunction));
// identify interpreted part and uninterpreted part
vec<PTRef> disjuncts = TermUtils(logic).getTopLevelDisjuncts(disjunction);
// find uninterpreted predicates (positive or negative)
auto uninterpretedEnd = std::partition(disjuncts.begin(), disjuncts.end(), [this, &logic](PTRef arg) {
return this->isUninterpretedPredicate(arg) || (logic.isNot(arg) && this->isUninterpretedPredicate(logic.getPterm(arg)[0]));
});
// find positive uninterpreted predicates
auto positiveEnd = std::partition(disjuncts.begin(), uninterpretedEnd, [&logic](PTRef arg) {
return not logic.isNot(arg);
});
if (positiveEnd - disjuncts.begin() > 1) {
throw std::logic_error(std::string("More than one positive uninterpreted predicate in clause"));
}
ChcHead head = positiveEnd == disjuncts.begin() ? PTRefToCHC::constructHead(logic.getTerm_false()) : PTRefToCHC::constructHead(*disjuncts.begin());
// Negate the body so that it represents antecedent of the implication
std::transform(positiveEnd, disjuncts.end(), positiveEnd, [&logic](PTRef bodyArg) { return logic.mkNot(bodyArg); });
vec<PTRef> interpretedArgs;
std::for_each(uninterpretedEnd, disjuncts.end(), [&interpretedArgs](PTRef arg) { interpretedArgs.push(arg); });
PTRef interpretedPart = logic.mkAnd(interpretedArgs);
ChcBody body = PTRefToCHC::constructBody(interpretedPart, positiveEnd, uninterpretedEnd);
return ChClause{.head = std::move(head), .body = std::move(body)};
}
bool ChcInterpreterContext::isUninterpretedPredicate(PTRef ref) const {
return system->isUninterpretedPredicate(logic.getSymRef(ref));
}
std::unique_ptr<Engine> ChcInterpreterContext::getEngine() const {
std::string engineStr = opts.hasOption(Options::ENGINE) ? opts.getOption(Options::ENGINE) : "lawi";
if (engineStr == "tpa-split") {
return std::unique_ptr<Engine>(new TPASplit(logic, opts));
} else if (engineStr == "tpa") {
return std::unique_ptr<Engine>(new TPABasic(logic, opts));
} else if (engineStr == "bmc") {
return std::unique_ptr<Engine>(new BMC(logic, opts));
} else if (engineStr == "lawi") {
return std::unique_ptr<Engine>(new Lawi(logic, opts));
} else if (engineStr == "spacer") {
return std::unique_ptr<Engine>(new Spacer(logic, opts));
} else {
throw std::invalid_argument("Unknown engine specified");
}
}
|
//===--- SwiftSourceDocInfo.cpp -------------------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#include "SourceKit/Support/FileSystemProvider.h"
#include "SourceKit/Support/ImmutableTextBuffer.h"
#include "SourceKit/Support/Logging.h"
#include "SourceKit/Support/UIdent.h"
#include "SwiftASTManager.h"
#include "SwiftEditorDiagConsumer.h"
#include "SwiftLangSupport.h"
#include "swift/AST/ASTDemangler.h"
#include "swift/AST/ASTPrinter.h"
#include "swift/AST/Decl.h"
#include "swift/AST/LookupKinds.h"
#include "swift/AST/ModuleNameLookup.h"
#include "swift/AST/NameLookup.h"
#include "swift/AST/SwiftNameTranslation.h"
#include "swift/AST/GenericSignature.h"
#include "swift/Basic/SourceManager.h"
#include "swift/Frontend/Frontend.h"
#include "swift/Frontend/PrintingDiagnosticConsumer.h"
#include "swift/IDE/CommentConversion.h"
#include "swift/IDE/ModuleInterfacePrinting.h"
#include "swift/IDE/SourceEntityWalker.h"
#include "swift/IDE/Utils.h"
#include "swift/IDE/Refactoring.h"
#include "swift/IDE/IDERequests.h"
#include "swift/Markup/XMLUtils.h"
#include "swift/Sema/IDETypeChecking.h"
#include "swift/SymbolGraphGen/SymbolGraphGen.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Index/USRGeneration.h"
#include "clang/Lex/Lexer.h"
#include "llvm/Support/MemoryBuffer.h"
#include <numeric>
using namespace SourceKit;
using namespace swift;
using namespace swift::ide;
namespace {
class AnnotatedDeclarationPrinter : public XMLEscapingPrinter {
public:
AnnotatedDeclarationPrinter(raw_ostream &OS)
:XMLEscapingPrinter(OS) { }
private:
void printTypeRef(
Type T, const TypeDecl *TD, Identifier Name,
PrintNameContext NameContext = PrintNameContext::Normal) override {
printXML("<Type usr=\"");
SwiftLangSupport::printUSR(TD, OS);
printXML("\">");
StreamPrinter::printTypeRef(T, TD, Name, NameContext);
printXML("</Type>");
}
};
} // end anonymous namespace
static StringRef getTagForDecl(const Decl *D, bool isRef) {
auto UID = SwiftLangSupport::getUIDForDecl(D, isRef);
static const char *prefix = "source.lang.swift.";
assert(UID.getName().startswith(prefix));
return UID.getName().drop_front(strlen(prefix));
}
static StringRef ExternalParamNameTag = "decl.var.parameter.argument_label";
static StringRef LocalParamNameTag = "decl.var.parameter.name";
static StringRef GenericParamNameTag = "decl.generic_type_param.name";
static StringRef SyntaxKeywordTag = "syntaxtype.keyword";
static StringRef getTagForParameter(PrintStructureKind context) {
switch (context) {
case PrintStructureKind::FunctionParameter:
return "decl.var.parameter";
case PrintStructureKind::FunctionReturnType:
return "decl.function.returntype";
case PrintStructureKind::FunctionType:
return "";
case PrintStructureKind::TupleType:
return "tuple";
case PrintStructureKind::TupleElement:
return "tuple.element";
case PrintStructureKind::GenericParameter:
return "decl.generic_type_param";
case PrintStructureKind::GenericRequirement:
return "decl.generic_type_requirement";
case PrintStructureKind::BuiltinAttribute:
return "syntaxtype.attribute.builtin";
case PrintStructureKind::NumberLiteral:
return "syntaxtype.number";
case PrintStructureKind::StringLiteral:
return "syntaxtype.string";
case PrintStructureKind::DefaultArgumentClause:
case PrintStructureKind::DeclGenericParameterClause:
case PrintStructureKind::DeclGenericRequirementClause:
case PrintStructureKind::EffectsSpecifiers:
case PrintStructureKind::DeclResultTypeClause:
case PrintStructureKind::FunctionParameterList:
case PrintStructureKind::FunctionParameterType:
// These kinds are ignored by 'isIgnoredPrintStructureKind()'
llvm_unreachable("ignored structure kind");
}
llvm_unreachable("unexpected structure kind");
}
static StringRef getDeclNameTagForDecl(const Decl *D) {
switch (D->getKind()) {
case DeclKind::Param:
// When we're examining the parameter itself, it is the local name that is
// the name of the variable.
return LocalParamNameTag;
case DeclKind::GenericTypeParam:
return ""; // Handled by printName.
case DeclKind::Constructor:
case DeclKind::Destructor:
case DeclKind::Subscript:
// The names 'init'/'deinit'/'subscript' are actually keywords.
return SyntaxKeywordTag;
default:
return "decl.name";
}
}
namespace {
/// A typesafe union of contexts that the printer can be inside.
/// Currently: Decl, PrintStructureKind
class PrintContext {
// Use the low bit to determine the type; store the enum value shifted left
// to leave the low bit free.
const uintptr_t value;
static constexpr unsigned declTag = 0;
static constexpr unsigned PrintStructureKindTag = 1;
static constexpr unsigned typeTag = 2;
static constexpr unsigned tagMask = 3;
static constexpr unsigned tagShift = 2;
bool hasTag(unsigned tag) const { return (value & tagMask) == tag; }
public:
PrintContext(const Decl *D) : value(uintptr_t(D)) {
static_assert(llvm::PointerLikeTypeTraits<Decl *>::NumLowBitsAvailable >=
tagShift,
"missing spare bit in Decl *");
}
PrintContext(PrintStructureKind K)
: value((uintptr_t(K) << tagShift) | PrintStructureKindTag) {}
PrintContext(TypeLoc unused) : value(typeTag) {}
/// Get the context as a Decl, or nullptr.
const Decl *getDecl() const {
return hasTag(declTag) ? (const Decl *)value : nullptr;
}
/// Get the context as a PrintStructureKind, or None.
Optional<PrintStructureKind> getPrintStructureKind() const {
if (!hasTag(PrintStructureKindTag))
return None;
return PrintStructureKind(value >> tagShift);
}
/// Whether this is a PrintStructureKind context of the given \p kind.
bool is(PrintStructureKind kind) const {
auto storedKind = getPrintStructureKind();
return storedKind && *storedKind == kind;
}
bool isType() const { return hasTag(typeTag); }
};
/// An ASTPrinter for annotating declarations with XML tags that describe the
/// key substructure of the declaration for CursorInfo/DocInfo.
///
/// Prints declarations with decl- and type-specific tags derived from the
/// UIDs used for decl/refs. For example (including newlines purely for ease of
/// reading):
///
/// \verbatim
/// <decl.function.free>
/// func <decl.name>foo</decl.name>
/// (
/// <decl.var.parameter>
/// <decl.var.parameter.name>x</decl.var.parameter.name>:
/// <ref.struct usr="Si">Int</ref.struct>
/// </decl.var.parameter>
/// ) -> <decl.function.returntype>
/// <ref.struct usr="Si">Int</ref.struct></decl.function.returntype>
/// </decl.function.free>
/// \endverbatim
class FullyAnnotatedDeclarationPrinter final : public XMLEscapingPrinter {
public:
FullyAnnotatedDeclarationPrinter(raw_ostream &OS) : XMLEscapingPrinter(OS) {}
private:
// MARK: The ASTPrinter callback interface.
void printDeclPre(const Decl *D, Optional<BracketOptions> Bracket) override {
contextStack.emplace_back(PrintContext(D));
openTag(getTagForDecl(D, /*isRef=*/false));
}
void printDeclPost(const Decl *D, Optional<BracketOptions> Bracket) override {
assert(contextStack.back().getDecl() == D && "unmatched printDeclPre");
contextStack.pop_back();
closeTag(getTagForDecl(D, /*isRef=*/false));
}
void printDeclLoc(const Decl *D) override {
auto tag = getDeclNameTagForDecl(D);
if (!tag.empty())
openTag(tag);
}
void printDeclNameEndLoc(const Decl *D) override {
auto tag = getDeclNameTagForDecl(D);
if (!tag.empty())
closeTag(tag);
}
void printTypePre(const TypeLoc &TL) override {
auto tag = getTypeTagForCurrentContext();
contextStack.emplace_back(PrintContext(TL));
if (!tag.empty())
openTag(tag);
}
void printTypePost(const TypeLoc &TL) override {
assert(contextStack.back().isType());
contextStack.pop_back();
auto tag = getTypeTagForCurrentContext();
if (!tag.empty())
closeTag(tag);
}
bool isIgnoredPrintStructureKind(PrintStructureKind kind) {
switch (kind) {
case PrintStructureKind::DefaultArgumentClause:
case PrintStructureKind::DeclGenericParameterClause:
case PrintStructureKind::DeclGenericRequirementClause:
case PrintStructureKind::EffectsSpecifiers:
case PrintStructureKind::DeclResultTypeClause:
case PrintStructureKind::FunctionParameterList:
case PrintStructureKind::FunctionParameterType:
return true;
default:
return false;
}
}
void printStructurePre(PrintStructureKind kind, const Decl *D) override {
if (isIgnoredPrintStructureKind(kind))
return;
if (kind == PrintStructureKind::TupleElement ||
kind == PrintStructureKind::TupleType)
fixupTuple(kind);
contextStack.emplace_back(PrintContext(kind));
auto tag = getTagForParameter(kind);
if (tag.empty())
return;
if (D && kind == PrintStructureKind::GenericParameter) {
assert(isa<ValueDecl>(D) && "unexpected non-value decl for param");
openTagWithUSRForDecl(tag, cast<ValueDecl>(D));
} else {
openTag(tag);
}
}
void printStructurePost(PrintStructureKind kind, const Decl *D) override {
if (isIgnoredPrintStructureKind(kind))
return;
if (kind == PrintStructureKind::TupleElement ||
kind == PrintStructureKind::TupleType) {
auto prev = contextStack.pop_back_val();
(void)prev;
fixupTuple(kind);
assert(prev.is(kind) && "unmatched printStructurePre");
} else {
assert(contextStack.back().is(kind) && "unmatched printStructurePre");
contextStack.pop_back();
}
auto tag = getTagForParameter(kind);
if (!tag.empty())
closeTag(tag);
}
void printNamePre(PrintNameContext context) override {
auto tag = getTagForPrintNameContext(context);
if (!tag.empty())
openTag(tag);
}
void printNamePost(PrintNameContext context) override {
auto tag = getTagForPrintNameContext(context);
if (!tag.empty())
closeTag(tag);
}
void printTypeRef(
Type T, const TypeDecl *TD, Identifier name,
PrintNameContext NameContext = PrintNameContext::Normal) override {
auto tag = getTagForDecl(TD, /*isRef=*/true);
openTagWithUSRForDecl(tag, TD);
insideRef = true;
XMLEscapingPrinter::printTypeRef(T, TD, name, NameContext);
insideRef = false;
closeTag(tag);
}
// MARK: Convenience functions for printing.
void openTag(StringRef tag) { OS << "<" << tag << ">"; }
void closeTag(StringRef tag) { OS << "</" << tag << ">"; }
void openTagWithUSRForDecl(StringRef tag, const ValueDecl *VD) {
OS << "<" << tag << " usr=\"";
SwiftLangSupport::printUSR(VD, OS);
OS << "\">";
}
// MARK: Misc.
StringRef getTypeTagForCurrentContext() const {
if (contextStack.empty())
return "";
static StringRef parameterTypeTag = "decl.var.parameter.type";
static StringRef genericParamTypeTag = "decl.generic_type_param.constraint";
auto context = contextStack.back();
if (context.is(PrintStructureKind::FunctionParameter))
return parameterTypeTag;
if (context.is(PrintStructureKind::GenericParameter))
return genericParamTypeTag;
if (context.is(PrintStructureKind::TupleElement))
return "tuple.element.type";
if (context.getPrintStructureKind().hasValue() || context.isType())
return "";
assert(context.getDecl() && "unexpected context kind");
switch (context.getDecl()->getKind()) {
case DeclKind::Param:
return parameterTypeTag;
case DeclKind::GenericTypeParam:
return genericParamTypeTag;
case DeclKind::Var:
return "decl.var.type";
case DeclKind::Subscript:
case DeclKind::Func:
default:
return "";
}
}
StringRef getTagForPrintNameContext(PrintNameContext context) {
if (insideRef)
return "";
bool insideParam =
!contextStack.empty() &&
contextStack.back().is(PrintStructureKind::FunctionParameter);
switch (context) {
case PrintNameContext::FunctionParameterExternal:
return ExternalParamNameTag;
case PrintNameContext::FunctionParameterLocal:
return LocalParamNameTag;
case PrintNameContext::TupleElement:
if (insideParam)
return ExternalParamNameTag;
return "tuple.element.argument_label";
case PrintNameContext::Keyword:
case PrintNameContext::IntroducerKeyword:
return SyntaxKeywordTag;
case PrintNameContext::GenericParameter:
return GenericParamNameTag;
case PrintNameContext::Attribute:
return "syntaxtype.attribute.name";
default:
return "";
}
}
/// 'Fix' a tuple or tuple element structure kind to be a function parameter
/// or function type if we are currently inside a function type. This
/// simplifies functions that need to differentiate a tuple from the input
/// part of a function type.
void fixupTuple(PrintStructureKind &kind) {
assert(kind == PrintStructureKind::TupleElement ||
kind == PrintStructureKind::TupleType);
// Skip over 'type's in the context stack.
for (auto I = contextStack.rbegin(), E = contextStack.rend(); I != E; ++I) {
if (I->is(PrintStructureKind::FunctionType)) {
if (kind == PrintStructureKind::TupleElement)
kind = PrintStructureKind::FunctionParameter;
else
kind = PrintStructureKind::FunctionType;
break;
} else if (!I->isType()) {
break;
}
}
}
private:
/// A stack of contexts being printed, used to determine the context for
/// subsequent ASTPrinter callbacks.
llvm::SmallVector<PrintContext, 3> contextStack;
bool insideRef = false;
};
} // end anonymous namespace
static Type findBaseTypeForReplacingArchetype(const ValueDecl *VD, const Type Ty) {
if (Ty.isNull())
return Type();
// Find the nominal type decl related to VD.
if (!VD->getDeclContext()->isTypeContext())
return Type();
return Ty->getRValueType()->getInOutObjectType()->getMetatypeInstanceType();
}
static void printAnnotatedDeclaration(const ValueDecl *VD,
const Type BaseTy,
raw_ostream &OS) {
AnnotatedDeclarationPrinter Printer(OS);
PrintOptions PO = PrintOptions::printQuickHelpDeclaration();
if (BaseTy) {
PO.setBaseType(BaseTy);
PO.PrintAsMember = true;
}
// If it's implicit, try to find an overridden ValueDecl that's not implicit.
// This will ensure we can properly annotate TypeRepr with a usr
// in AnnotatedDeclarationPrinter.
while (VD->isImplicit() && VD->getOverriddenDecl())
VD = VD->getOverriddenDecl();
// VD may be a compiler synthesized member, constructor, or shorthand argument
// so always print it even if it's implicit.
//
// FIXME: Update PrintOptions::printQuickHelpDeclaration to print implicit
// decls by default. That causes issues due to newlines being printed before
// implicit OpaqueTypeDecls at time of writing.
PO.TreatAsExplicitDeclList.push_back(VD);
// Wrap this up in XML, as that's what we'll use for documentation comments.
OS<<"<Declaration>";
VD->print(Printer, PO);
OS<<"</Declaration>";
}
void SwiftLangSupport::printFullyAnnotatedDeclaration(const ValueDecl *VD,
Type BaseTy,
raw_ostream &OS) {
FullyAnnotatedDeclarationPrinter Printer(OS);
PrintOptions PO = PrintOptions::printQuickHelpDeclaration();
if (BaseTy) {
PO.setBaseType(BaseTy);
PO.PrintAsMember = true;
}
// If it's implicit, try to find an overridden ValueDecl that's not implicit.
// This will ensure we can properly annotate TypeRepr with a usr
// in AnnotatedDeclarationPrinter.
while (VD->isImplicit() && VD->getOverriddenDecl())
VD = VD->getOverriddenDecl();
// VD may be a compiler synthesized member, constructor, or shorthand argument
// so always print it even if it's implicit.
//
// FIXME: Update PrintOptions::printQuickHelpDeclaration to print implicit
// decls by default. That causes issues due to newlines being printed before
// implicit OpaqueTypeDecls at time of writing.
PO.TreatAsExplicitDeclList.push_back(VD);
VD->print(Printer, PO);
}
void SwiftLangSupport::printFullyAnnotatedDeclaration(const ExtensionDecl *ED,
raw_ostream &OS) {
FullyAnnotatedDeclarationPrinter Printer(OS);
PrintOptions PO = PrintOptions::printQuickHelpDeclaration();
ED->print(Printer, PO);
}
void SwiftLangSupport::printFullyAnnotatedSynthesizedDeclaration(
const swift::ValueDecl *VD, TypeOrExtensionDecl Target,
llvm::raw_ostream &OS) {
FullyAnnotatedDeclarationPrinter Printer(OS);
PrintOptions PO = PrintOptions::printQuickHelpDeclaration();
PO.initForSynthesizedExtension(Target);
PO.PrintAsMember = true;
VD->print(Printer, PO);
}
void SwiftLangSupport::printFullyAnnotatedSynthesizedDeclaration(
const swift::ExtensionDecl *ED, TypeOrExtensionDecl Target,
llvm::raw_ostream &OS) {
FullyAnnotatedDeclarationPrinter Printer(OS);
PrintOptions PO = PrintOptions::printQuickHelpDeclaration();
PO.initForSynthesizedExtension(Target);
ED->print(Printer, PO);
}
template <typename FnTy>
static void walkRelatedDecls(const ValueDecl *VD, const FnTy &Fn) {
if (isa<ParamDecl>(VD))
return; // Parameters don't have interesting related declarations.
auto &ctx = VD->getASTContext();
llvm::SmallDenseMap<DeclName, unsigned, 16> NamesSeen;
++NamesSeen[VD->getName()];
auto *DC = VD->getDeclContext();
bool typeLookup = DC->isTypeContext();
SmallVector<ValueDecl *, 4> results;
if (typeLookup) {
auto type = DC->getDeclaredInterfaceType();
if (!type->is<ErrorType>()) {
DC->lookupQualified(type, DeclNameRef(VD->getBaseName()),
NL_QualifiedDefault, results);
}
} else {
namelookup::lookupInModule(DC->getModuleScopeContext(),
VD->getBaseName(), results,
NLKind::UnqualifiedLookup,
namelookup::ResolutionKind::Overloadable,
DC->getModuleScopeContext(),
NL_UnqualifiedDefault);
}
SmallVector<ValueDecl *, 8> RelatedDecls;
for (auto result : results) {
if (result->getAttrs().isUnavailable(ctx))
continue;
if (result != VD) {
++NamesSeen[result->getName()];
RelatedDecls.push_back(result);
}
}
// Now provide the results along with whether the name is duplicate or not.
for (auto result : RelatedDecls)
Fn(result, typeLookup, NamesSeen[result->getName()] > 1);
}
//===----------------------------------------------------------------------===//
// SwiftLangSupport::getCursorInfo
//===----------------------------------------------------------------------===//
static StringRef getSourceToken(unsigned Offset,
ImmutableTextSnapshotRef Snap) {
auto MemBuf = Snap->getBuffer()->getInternalBuffer();
// FIXME: Invalid offset shouldn't reach here.
if (Offset >= MemBuf->getBufferSize())
return StringRef();
SourceManager SM;
auto MemBufRef = llvm::MemoryBuffer::getMemBuffer(MemBuf->getBuffer(),
MemBuf->getBufferIdentifier());
auto BufId = SM.addNewSourceBuffer(std::move(MemBufRef));
SourceLoc Loc = SM.getLocForOffset(BufId, Offset);
return Lexer::getTokenAtLocation(SM, Loc).getText();
}
static llvm::Optional<unsigned>
mapOffsetToOlderSnapshot(unsigned Offset,
ImmutableTextSnapshotRef NewSnap,
ImmutableTextSnapshotRef OldSnap) {
SmallVector<ReplaceImmutableTextUpdateRef, 16> Updates;
OldSnap->foreachReplaceUntil(NewSnap,
[&](ReplaceImmutableTextUpdateRef Upd)->bool {
Updates.push_back(Upd);
return true;
});
// Walk the updates backwards and "undo" them.
for (auto I = Updates.rbegin(), E = Updates.rend(); I != E; ++I) {
auto Upd = *I;
if (Upd->getByteOffset() <= Offset &&
Offset < Upd->getByteOffset() + Upd->getText().size())
return None; // Offset is part of newly inserted text.
if (Upd->getByteOffset() <= Offset) {
Offset += Upd->getLength(); // "bring back" what was removed.
Offset -= Upd->getText().size(); // "remove" what was added.
}
}
return Offset;
}
static llvm::Optional<unsigned>
mapOffsetToNewerSnapshot(unsigned Offset,
ImmutableTextSnapshotRef OldSnap,
ImmutableTextSnapshotRef NewSnap) {
bool Completed = OldSnap->foreachReplaceUntil(NewSnap,
[&](ReplaceImmutableTextUpdateRef Upd)->bool {
if (Upd->getByteOffset() <= Offset &&
Offset < Upd->getByteOffset() + Upd->getLength())
return false; // Offset is part of removed text.
if (Upd->getByteOffset() <= Offset) {
Offset += Upd->getText().size();
Offset -= Upd->getLength();
}
return true;
});
if (Completed)
return Offset;
return None;
}
/// Tries to remap the location from a previous snapshot to the latest one and
/// then sets the location's line and column.
static void mapLocToLatestSnapshot(
SwiftLangSupport &Lang, LocationInfo &Location,
ArrayRef<ImmutableTextSnapshotRef> PreviousASTSnaps) {
auto EditorDoc = Lang.getEditorDocuments()->findByPath(Location.Filename);
if (!EditorDoc)
return;
ImmutableTextSnapshotRef LatestSnap = EditorDoc->getLatestSnapshot();
if (!LatestSnap)
return;
for (auto &PrevSnap : PreviousASTSnaps) {
if (PrevSnap->isFromSameBuffer(LatestSnap)) {
if (PrevSnap->getStamp() == LatestSnap->getStamp())
break;
auto OptBegin = mapOffsetToNewerSnapshot(Location.Offset,
PrevSnap, LatestSnap);
if (!OptBegin.hasValue()) {
Location.Filename = StringRef();
return;
}
auto OptEnd = mapOffsetToNewerSnapshot(Location.Offset +
Location.Length,
PrevSnap, LatestSnap);
if (!OptEnd.hasValue()) {
Location.Filename = StringRef();
return;
}
Location.Offset = *OptBegin;
Location.Length = *OptEnd - *OptBegin;
}
}
std::tie(Location.Line, Location.Column) =
LatestSnap->getBuffer()->getLineAndColumn(Location.Offset);
}
/// Returns true for error.
static bool passCursorInfoForModule(ModuleEntity Mod,
SwiftInterfaceGenMap &IFaceGenContexts,
const CompilerInvocation &Invok,
std::function<void(const RequestResult<CursorInfoData> &)> Receiver) {
std::string FullName = Mod.getFullName();
SmallVector<CursorSymbolInfo, 1> Symbols;
SmallVector<StringRef, 4> ModuleGroups;
CursorSymbolInfo &Symbol = Symbols.emplace_back();
Symbol.Kind = SwiftLangSupport::getUIDForModuleRef();
Symbol.Name = Mod.getName();
Symbol.ModuleName = FullName;
if (auto IFaceGenRef = IFaceGenContexts.find(Symbol.ModuleName, Invok))
Symbol.ModuleInterfaceName = IFaceGenRef->getDocumentName();
Symbol.IsSystem = Mod.isSystemModule();
if (auto MD = Mod.getAsSwiftModule()) {
ide::collectModuleGroups(const_cast<ModuleDecl *>(MD), ModuleGroups);
Symbol.ModuleGroupArray = llvm::makeArrayRef(ModuleGroups);
}
CursorInfoData Data;
Data.Symbols = llvm::makeArrayRef(Symbols);
Receiver(RequestResult<CursorInfoData>::fromResult(Data));
return false;
}
static void
collectAvailableRenameInfo(const ValueDecl *VD, Optional<RenameRefInfo> RefInfo,
SmallVectorImpl<RefactoringInfo> &Refactorings) {
SmallVector<RenameAvailabilityInfo, 2> Renames;
collectRenameAvailabilityInfo(VD, RefInfo, Renames);
for (auto Info : Renames) {
Refactorings.emplace_back(
SwiftLangSupport::getUIDForRefactoringKind(Info.Kind),
ide::getDescriptiveRefactoringKindName(Info.Kind),
ide::getDescriptiveRenameUnavailableReason(Info.AvailableKind));
}
}
static void collectAvailableRefactoringsOtherThanRename(
ResolvedCursorInfo CursorInfo,
SmallVectorImpl<RefactoringInfo> &Refactorings) {
SmallVector<RefactoringKind, 8> Kinds;
collectAvailableRefactorings(CursorInfo, Kinds, /*ExcludeRename*/ true);
for (auto Kind : Kinds) {
Refactorings.emplace_back(SwiftLangSupport::getUIDForRefactoringKind(Kind),
ide::getDescriptiveRefactoringKindName(Kind),
StringRef());
}
}
static Optional<unsigned>
getParamParentNameOffset(const ValueDecl *VD, SourceLoc Cursor) {
if (Cursor.isInvalid())
return None;
SourceLoc Loc;
if (auto PD = dyn_cast<ParamDecl>(VD)) {
// Avoid returning parent loc for internal-only names.
if (PD->getArgumentNameLoc().isValid() && PD->getArgumentNameLoc() != Cursor)
return None;
auto *DC = PD->getDeclContext();
switch (DC->getContextKind()) {
case DeclContextKind::SubscriptDecl:
Loc = cast<SubscriptDecl>(DC)->getNameLoc();
break;
case DeclContextKind::AbstractFunctionDecl:
Loc = cast<AbstractFunctionDecl>(DC)->getNameLoc();
break;
default:
break;
}
}
if (Loc.isInvalid())
return None;
auto &SM = VD->getASTContext().SourceMgr;
return SM.getLocOffsetInBuffer(Loc, SM.findBufferContainingLoc(Loc));
}
static StringRef getModuleName(const ValueDecl *VD,
llvm::BumpPtrAllocator &Allocator) {
ASTContext &Ctx = VD->getASTContext();
ClangImporter *Importer =
static_cast<ClangImporter *>(Ctx.getClangModuleLoader());
if (auto ClangNode = VD->getClangNode()) {
if (const auto *ClangMod = Importer->getClangOwningModule(ClangNode))
return copyString(Allocator, ClangMod->getFullModuleName());
return "";
}
ModuleDecl *MD = VD->getModuleContext();
// If the decl is from a cross-import overlay module, report the
// overlay's declaring module as the owning module.
if (ModuleDecl *Declaring = MD->getDeclaringModuleIfCrossImportOverlay())
MD = Declaring;
return MD->getNameStr();
}
struct DeclInfo {
const ValueDecl *VD;
Type ContainerType;
bool IsRef;
bool IsDynamic;
ArrayRef<NominalTypeDecl *> ReceiverTypes;
/// If VD is a synthesized property wrapper backing storage (_foo) or
/// projected value ($foo) of a property (foo), the property instead.
/// Otherwise, VD.
const ValueDecl *OriginalProperty = nullptr;
bool Unavailable = true;
Type BaseType;
bool InSynthesizedExtension = false;
DeclInfo(const ValueDecl *VD, Type ContainerType, bool IsRef, bool IsDynamic,
ArrayRef<NominalTypeDecl *> ReceiverTypes,
const CompilerInvocation &Invoc)
: VD(VD), ContainerType(ContainerType), IsRef(IsRef),
IsDynamic(IsDynamic), ReceiverTypes(ReceiverTypes) {
if (VD == nullptr)
return;
// The synthesized properties $foo and _foo aren't unavailable even if
// the original property foo is, so check them rather than the original
// property.
Unavailable = AvailableAttr::isUnavailable(VD);
// No point computing the rest since they won't be used anyway.
if (Unavailable)
return;
OriginalProperty = VD;
if (auto *VarD = dyn_cast<VarDecl>(VD)) {
if (auto *Wrapped = VarD->getOriginalWrappedProperty())
OriginalProperty = Wrapped;
}
BaseType = findBaseTypeForReplacingArchetype(VD, ContainerType);
InSynthesizedExtension = false;
if (BaseType) {
if (auto Target = BaseType->getAnyNominal()) {
SynthesizedExtensionAnalyzer Analyzer(
Target, PrintOptions::printModuleInterface(
Invoc.getFrontendOptions().PrintFullConvention));
InSynthesizedExtension = Analyzer.isInSynthesizedExtension(VD);
}
}
}
};
static StringRef copyAndClearString(llvm::BumpPtrAllocator &Allocator,
SmallVectorImpl<char> &Str) {
auto Ref = copyString(Allocator, StringRef(Str.data(), Str.size()));
Str.clear();
return Ref;
}
template <typename T>
static ArrayRef<T> copyAndClearArray(llvm::BumpPtrAllocator &Allocator,
SmallVectorImpl<T> &Array) {
auto Ref = copyArray(Allocator, llvm::makeArrayRef(Array));
Array.clear();
return Ref;
}
static void setLocationInfoForClangNode(ClangNode ClangNode,
ClangImporter *Importer,
LocationInfo &Location) {
clang::ASTContext &ClangCtx = Importer->getClangASTContext();
clang::SourceManager &ClangSM = ClangCtx.getSourceManager();
clang::SourceRange SR = ClangNode.getLocation();
if (auto MD =
dyn_cast_or_null<clang::ObjCMethodDecl>(ClangNode.getAsDecl())) {
SR = clang::SourceRange(MD->getSelectorStartLoc(),
MD->getDeclaratorEndLoc());
}
clang::CharSourceRange CharRange =
clang::Lexer::makeFileCharRange(clang::CharSourceRange::getTokenRange(SR),
ClangSM, ClangCtx.getLangOpts());
if (CharRange.isInvalid())
return;
std::pair<clang::FileID, unsigned> Decomp =
ClangSM.getDecomposedLoc(CharRange.getBegin());
if (!Decomp.first.isInvalid()) {
if (auto FE = ClangSM.getFileEntryForID(Decomp.first)) {
Location.Filename = FE->getName();
std::pair<clang::FileID, unsigned> EndDecomp =
ClangSM.getDecomposedLoc(CharRange.getEnd());
Location.Offset = Decomp.second;
Location.Length = EndDecomp.second - Decomp.second;
Location.Line = ClangSM.getLineNumber(Decomp.first, Decomp.second);
Location.Column = ClangSM.getColumnNumber(Decomp.first, Decomp.second);
}
}
}
static unsigned getCharLength(SourceManager &SM, SourceRange TokenRange) {
SourceLoc CharEndLoc = Lexer::getLocForEndOfToken(SM, TokenRange.End);
return SM.getByteDistance(TokenRange.Start, CharEndLoc);
}
static void setLocationInfo(const ValueDecl *VD,
LocationInfo &Location) {
ASTContext &Ctx = VD->getASTContext();
SourceManager &SM = Ctx.SourceMgr;
auto ClangNode = VD->getClangNode();
auto Loc = VD->getLoc(/*SerializedOK=*/true);
if (Loc.isValid()) {
auto getSignatureRange = [&](const ValueDecl *VD) -> Optional<unsigned> {
if (auto FD = dyn_cast<AbstractFunctionDecl>(VD)) {
SourceRange R = FD->getSignatureSourceRange();
if (R.isValid())
return getCharLength(SM, R);
}
return None;
};
unsigned NameLen;
if (auto SigLen = getSignatureRange(VD)) {
NameLen = SigLen.getValue();
} else if (VD->hasName()) {
NameLen = VD->getBaseName().userFacingName().size();
} else {
NameLen = getCharLength(SM, Loc);
}
unsigned DeclBufID = SM.findBufferContainingLoc(Loc);
Location.Filename = SM.getIdentifierForBuffer(DeclBufID);
Location.Offset = SM.getLocOffsetInBuffer(Loc, DeclBufID);
Location.Length = NameLen;
std::tie(Location.Line, Location.Column) = SM.getLineAndColumnInBuffer(
Loc, DeclBufID);
} else if (ClangNode) {
ClangImporter *Importer =
static_cast<ClangImporter*>(Ctx.getClangModuleLoader());
setLocationInfoForClangNode(ClangNode, Importer, Location);
}
}
static llvm::Error
fillSymbolInfo(CursorSymbolInfo &Symbol, const DeclInfo &DInfo,
ModuleDecl *MainModule, SourceLoc CursorLoc, bool AddSymbolGraph,
SwiftLangSupport &Lang, const CompilerInvocation &Invoc,
ArrayRef<ImmutableTextSnapshotRef> PreviousSnaps,
llvm::BumpPtrAllocator &Allocator) {
SmallString<256> Buffer;
SmallVector<StringRef, 4> Strings;
llvm::raw_svector_ostream OS(Buffer);
Symbol.DeclarationLang = SwiftLangSupport::getUIDForDeclLanguage(DInfo.VD);
Symbol.Kind = SwiftLangSupport::getUIDForDecl(DInfo.VD, DInfo.IsRef);
SwiftLangSupport::printDisplayName(DInfo.VD, OS);
Symbol.Name = copyAndClearString(Allocator, Buffer);
SwiftLangSupport::printUSR(DInfo.OriginalProperty, OS);
if (DInfo.InSynthesizedExtension) {
OS << LangSupport::SynthesizedUSRSeparator;
SwiftLangSupport::printUSR(DInfo.BaseType->getAnyNominal(), OS);
}
Symbol.USR = copyAndClearString(Allocator, Buffer);
{
PrintOptions Options;
Options.PrintTypeAliasUnderlyingType = true;
DInfo.VD->getInterfaceType().print(OS, Options);
}
Symbol.TypeName = copyAndClearString(Allocator, Buffer);
SwiftLangSupport::printDeclTypeUSR(DInfo.VD, OS);
Symbol.TypeUSR = copyAndClearString(Allocator, Buffer);
if (DInfo.ContainerType && !DInfo.ContainerType->hasArchetype()) {
SwiftLangSupport::printTypeUSR(DInfo.ContainerType, OS);
}
Symbol.ContainerTypeUSR = copyAndClearString(Allocator, Buffer);
ide::getDocumentationCommentAsXML(DInfo.OriginalProperty, OS);
Symbol.DocComment = copyAndClearString(Allocator, Buffer);
{
auto *Group = DInfo.InSynthesizedExtension ? DInfo.BaseType->getAnyNominal()
: DInfo.VD;
if (auto Name = Group->getGroupName())
Symbol.GroupName = Name.getValue();
}
ide::getLocalizationKey(DInfo.VD, OS);
Symbol.LocalizationKey = copyAndClearString(Allocator, Buffer);
printAnnotatedDeclaration(DInfo.VD, DInfo.BaseType, OS);
Symbol.AnnotatedDeclaration = copyAndClearString(Allocator, Buffer);
SwiftLangSupport::printFullyAnnotatedDeclaration(DInfo.VD, DInfo.BaseType,
OS);
Symbol.FullyAnnotatedDeclaration = copyAndClearString(Allocator, Buffer);
if (AddSymbolGraph) {
SmallVector<symbolgraphgen::PathComponent, 4> PathComponents;
SmallVector<symbolgraphgen::FragmentInfo, 8> FragmentInfos;
symbolgraphgen::SymbolGraphOptions Options{
"",
Invoc.getLangOptions().Target,
/*PrettyPrint=*/false,
AccessLevel::Private,
/*EmitSynthesizedMembers*/ false,
/*PrintMessages*/ false,
/*SkipInheritedDocs*/ false,
/*IncludeSPISymbols*/ true,
};
symbolgraphgen::printSymbolGraphForDecl(DInfo.VD, DInfo.BaseType,
DInfo.InSynthesizedExtension,
Options, OS, PathComponents,
FragmentInfos);
Symbol.SymbolGraph = copyAndClearString(Allocator, Buffer);
SmallVector<ParentInfo, 4> Parents;
for (auto &Component : PathComponents) {
SwiftLangSupport::printUSR(Component.VD, OS);
Parents.emplace_back(copyString(Allocator, Component.Title),
Component.Kind,
copyAndClearString(Allocator, Buffer));
};
Symbol.ParentContexts = copyArray(Allocator, llvm::makeArrayRef(Parents));
SmallVector<ReferencedDeclInfo, 8> ReferencedDecls;
for (auto &FI: FragmentInfos) {
SmallVector<ParentInfo, 4> FIParents;
for (auto &Component: FI.ParentContexts) {
SwiftLangSupport::printUSR(Component.VD, OS);
FIParents.emplace_back(copyString(Allocator, Component.Title),
Component.Kind,
copyAndClearString(Allocator, Buffer));
}
ASTContext &Ctx = FI.VD->getASTContext();
StringRef Filename = "";
if (auto Loc = FI.VD->getLoc(/*SerializedOK=*/true)) {
Filename = Ctx.SourceMgr.getDisplayNameForLoc(Loc);
} else if (auto ClangNode = FI.VD->getClangNode()) {
auto Loc = ClangNode.getLocation();
if (Loc.isValid()) {
Filename = Ctx.getClangModuleLoader()->getClangASTContext()
.getSourceManager()
.getFilename(Loc);
}
}
SwiftLangSupport::printUSR(FI.VD, OS);
ReferencedDecls.emplace_back(
copyAndClearString(Allocator, Buffer),
SwiftLangSupport::getUIDForDeclLanguage(FI.VD),
swift::getAccessLevelSpelling(FI.VD->getFormalAccess()), Filename,
getModuleName(FI.VD, Allocator),
FI.VD->getModuleContext()->isSystemModule(),
FI.VD->isSPI(),
copyArray(Allocator, llvm::makeArrayRef(FIParents)));
}
Symbol.ReferencedSymbols = copyArray(Allocator,
llvm::makeArrayRef(ReferencedDecls));
}
Symbol.ModuleName = getModuleName(DInfo.VD, Allocator);
if (auto IFaceGenRef =
Lang.getIFaceGenContexts().find(Symbol.ModuleName, Invoc))
Symbol.ModuleInterfaceName = IFaceGenRef->getDocumentName();
setLocationInfo(DInfo.OriginalProperty, Symbol.Location);
if (!Symbol.Location.Filename.empty()) {
mapLocToLatestSnapshot(Lang, Symbol.Location, PreviousSnaps);
if (Symbol.Location.Filename.empty()) {
return llvm::createStringError(
llvm::inconvertibleErrorCode(),
"Failed to remap declaration to latest snapshot.");
}
}
ide::walkOverriddenDecls(
DInfo.VD,
[&](llvm::PointerUnion<const ValueDecl *, const clang::NamedDecl *> D) {
// Could have junk in from previous failing USR print
Buffer.clear();
if (auto VD = D.dyn_cast<const ValueDecl *>()) {
if (SwiftLangSupport::printUSR(VD, OS))
return;
} else {
if (clang::index::generateUSRForDecl(
D.get<const clang::NamedDecl *>(), Buffer))
return;
}
Strings.push_back(copyAndClearString(Allocator, Buffer));
});
Symbol.OverrideUSRs = copyAndClearArray(Allocator, Strings);
walkRelatedDecls(DInfo.VD, [&](const ValueDecl *RelatedDecl,
bool UseOriginalBase, bool DuplicateName) {
OS << "<RelatedName usr=\"";
SwiftLangSupport::printUSR(RelatedDecl, OS);
OS << "\">";
if (isa<AbstractFunctionDecl>(RelatedDecl) && DuplicateName) {
// Related decls are generally overloads, so print parameter types to
// differentiate them.
PrintOptions PO;
PO.SkipAttributes = true;
PO.PrintStaticKeyword = false;
PO.PrintSelfAccessKindKeyword = false;
PO.SkipIntroducerKeywords = true;
PO.ArgAndParamPrinting =
PrintOptions::ArgAndParamPrintingMode::ArgumentOnly;
XMLEscapingPrinter Printer(OS);
if (UseOriginalBase && DInfo.BaseType) {
PO.setBaseType(DInfo.BaseType);
PO.PrintAsMember = true;
}
RelatedDecl->print(Printer, PO);
} else {
SmallString<128> RelatedBuffer;
llvm::raw_svector_ostream RelatedOS(RelatedBuffer);
SwiftLangSupport::printDisplayName(RelatedDecl, RelatedOS);
swift::markup::appendWithXMLEscaping(OS, RelatedBuffer);
}
OS << "</RelatedName>";
Strings.push_back(copyAndClearString(Allocator, Buffer));
});
Symbol.AnnotatedRelatedDeclarations = copyAndClearArray(Allocator, Strings);
for (auto *ReceiverTy : DInfo.ReceiverTypes) {
if (!SwiftLangSupport::printUSR(ReceiverTy, OS))
Strings.push_back(copyAndClearString(Allocator, Buffer));
}
Symbol.ReceiverUSRs = copyAndClearArray(Allocator, Strings);
Symbol.IsSystem = DInfo.VD->getModuleContext()->isSystemModule();
Symbol.IsDynamic = DInfo.IsDynamic;
Symbol.IsSynthesized = DInfo.VD->isImplicit();
Symbol.ParentNameOffset = getParamParentNameOffset(DInfo.VD, CursorLoc);
return llvm::Error::success();
}
/// Returns true on success, false on error (and sets `Diagnostic` accordingly).
static bool passCursorInfoForDecl(
const ResolvedCursorInfo &Info, ModuleDecl *MainModule,
bool AddRefactorings, bool AddSymbolGraph,
ArrayRef<RefactoringInfo> KnownRefactoringInfo, SwiftLangSupport &Lang,
const CompilerInvocation &Invoc, std::string &Diagnostic,
ArrayRef<ImmutableTextSnapshotRef> PreviousSnaps,
std::function<void(const RequestResult<CursorInfoData> &)> Receiver) {
DeclInfo OrigInfo(Info.ValueD, Info.ContainerType, Info.IsRef, Info.IsDynamic,
Info.ReceiverTypes, Invoc);
DeclInfo CtorTypeInfo(Info.CtorTyRef, Type(), true, false,
ArrayRef<NominalTypeDecl *>(), Invoc);
DeclInfo &MainInfo = CtorTypeInfo.VD ? CtorTypeInfo : OrigInfo;
if (MainInfo.Unavailable) {
Diagnostic = "Unavailable in the current compilation context.";
return false;
}
llvm::BumpPtrAllocator Allocator;
SmallVector<CursorSymbolInfo, 2> Symbols;
CursorSymbolInfo &MainSymbol = Symbols.emplace_back();
// The primary result for constructor calls, eg. `MyType()` should be
// the type itself, rather than the constructor. The constructor will be
// added as a secondary result.
if (auto Err = fillSymbolInfo(MainSymbol, MainInfo, MainModule, Info.Loc,
AddSymbolGraph, Lang, Invoc, PreviousSnaps,
Allocator)) {
llvm::handleAllErrors(std::move(Err), [&](const llvm::StringError &E) {
Diagnostic = E.message();
});
return false;
}
if (MainInfo.VD != OrigInfo.VD && !OrigInfo.Unavailable) {
CursorSymbolInfo &CtorSymbol = Symbols.emplace_back();
if (auto Err = fillSymbolInfo(CtorSymbol, OrigInfo, MainModule, Info.Loc,
AddSymbolGraph, Lang, Invoc, PreviousSnaps,
Allocator)) {
// Ignore but make sure to remove the partially-filled symbol
llvm::handleAllErrors(std::move(Err), [&](const llvm::StringError &E) {});
Symbols.pop_back();
}
}
SmallVector<RefactoringInfo, 8> Refactorings;
if (AddRefactorings) {
Optional<RenameRefInfo> RefInfo;
if (Info.IsRef)
RefInfo = {Info.SF, Info.Loc, Info.IsKeywordArgument};
collectAvailableRenameInfo(MainInfo.VD, RefInfo, Refactorings);
collectAvailableRefactoringsOtherThanRename(Info, Refactorings);
}
Refactorings.insert(Refactorings.end(), KnownRefactoringInfo.begin(),
KnownRefactoringInfo.end());
CursorInfoData Data;
Data.Symbols = llvm::makeArrayRef(Symbols);
Data.AvailableActions = llvm::makeArrayRef(Refactorings);
Receiver(RequestResult<CursorInfoData>::fromResult(Data));
return true;
}
static clang::DeclarationName
getClangDeclarationName(const clang::NamedDecl *ND, NameTranslatingInfo &Info) {
auto &Ctx = ND->getASTContext();
auto OrigName = ND->getDeclName();
assert(SwiftLangSupport::getNameKindForUID(Info.NameKind) == NameKind::ObjC);
if (Info.BaseName.empty() == Info.ArgNames.empty()) {
// cannot have both.
return clang::DeclarationName();
}
if (!Info.BaseName.empty()) {
return clang::DeclarationName(&Ctx.Idents.get(Info.BaseName));
} else {
switch (OrigName.getNameKind()) {
case clang::DeclarationName::ObjCZeroArgSelector:
case clang::DeclarationName::ObjCOneArgSelector:
case clang::DeclarationName::ObjCMultiArgSelector:
break;
default:
return clang::DeclarationName();
}
auto OrigSel = OrigName.getObjCSelector();
unsigned NumPieces = OrigSel.isUnarySelector() ? 1 : OrigSel.getNumArgs();
if (Info.ArgNames.size() > NumPieces)
return clang::DeclarationName();
ArrayRef<StringRef> Args = llvm::makeArrayRef(Info.ArgNames);
std::vector<clang::IdentifierInfo *> Pieces;
for (unsigned i = 0; i < NumPieces; ++i) {
if (i >= Info.ArgNames.size() || Info.ArgNames[i].empty()) {
Pieces.push_back(OrigSel.getIdentifierInfoForSlot(i));
} else {
StringRef T = Args[i];
Pieces.push_back(&Ctx.Idents.get(T.endswith(":") ? T.drop_back() : T));
}
}
return clang::DeclarationName(
Ctx.Selectors.getSelector(OrigSel.getNumArgs(), Pieces.data()));
}
}
static DeclName getSwiftDeclName(const ValueDecl *VD,
NameTranslatingInfo &Info) {
auto &Ctx = VD->getDeclContext()->getASTContext();
assert(SwiftLangSupport::getNameKindForUID(Info.NameKind) == NameKind::Swift);
const DeclName OrigName = VD->getName();
DeclBaseName BaseName = Info.BaseName.empty()
? OrigName.getBaseName()
: DeclBaseName(
Info.BaseName == "init"
? DeclBaseName::createConstructor()
: Ctx.getIdentifier(Info.BaseName));
auto OrigArgs = OrigName.getArgumentNames();
SmallVector<Identifier, 8> Args(OrigArgs.begin(), OrigArgs.end());
if (Info.ArgNames.size() > OrigArgs.size())
return DeclName();
for (unsigned i = 0; i < OrigArgs.size(); ++i) {
if (i < Info.ArgNames.size() && !Info.ArgNames[i].empty()) {
StringRef Arg = Info.ArgNames[i];
Args[i] = Ctx.getIdentifier(Arg == "_" ? StringRef() : Arg);
}
}
return DeclName(Ctx, BaseName, llvm::makeArrayRef(Args));
}
/// Returns true on success, false on error (and sets `Diagnostic` accordingly).
static bool passNameInfoForDecl(ResolvedCursorInfo CursorInfo,
NameTranslatingInfo &Info,
std::string &Diagnostic,
std::function<void(const RequestResult<NameTranslatingInfo> &)> Receiver) {
auto *VD = CursorInfo.ValueD;
// If the given name is not a function name, and the cursor points to
// a contructor call, we use the type declaration instead of the init
// declaration to translate the name.
if (Info.ArgNames.empty() && !Info.IsZeroArgSelector) {
if (auto *TD = CursorInfo.CtorTyRef) {
VD = TD;
}
}
switch (SwiftLangSupport::getNameKindForUID(Info.NameKind)) {
case NameKind::Swift: {
NameTranslatingInfo Result;
auto DeclName = getSwiftDeclName(VD, Info);
if (!DeclName) {
Diagnostic = "Unable to resolve Swift declaration name.";
return false;
}
auto ResultPair =
swift::objc_translation::getObjCNameForSwiftDecl(VD, DeclName);
Identifier Name = ResultPair.first;
if (!Name.empty()) {
Result.NameKind = SwiftLangSupport::getUIDForNameKind(NameKind::ObjC);
Result.BaseName = Name.str();
Receiver(RequestResult<NameTranslatingInfo>::fromResult(Result));
} else if (ObjCSelector Selector = ResultPair.second) {
Result.NameKind = SwiftLangSupport::getUIDForNameKind(NameKind::ObjC);
SmallString<64> Buffer;
StringRef Total = Selector.getString(Buffer);
SmallVector<StringRef, 4> Pieces;
Total.split(Pieces, ":");
if (Selector.getNumArgs()) {
assert(Pieces.back().empty());
Pieces.pop_back();
} else {
Result.IsZeroArgSelector = true;
}
Result.ArgNames.insert(Result.ArgNames.begin(), Pieces.begin(), Pieces.end());
Receiver(RequestResult<NameTranslatingInfo>::fromResult(Result));
} else {
Diagnostic = "Unable to resolve name info.";
return false;
}
return true;
}
case NameKind::ObjC: {
ClangImporter *Importer = static_cast<ClangImporter *>(VD->getDeclContext()->
getASTContext().getClangModuleLoader());
const clang::NamedDecl *Named = nullptr;
auto *BaseDecl = VD;
while (!Named && BaseDecl) {
Named = dyn_cast_or_null<clang::NamedDecl>(BaseDecl->getClangDecl());
BaseDecl = BaseDecl->getOverriddenDecl();
}
if (!Named) {
Diagnostic = "Unable to resolve a named declaration.";
return false;
}
auto ObjCName = getClangDeclarationName(Named, Info);
if (!ObjCName) {
Diagnostic = "Unable to resolve ObjC declaration name.";
return false;
}
DeclName Name = Importer->importName(Named, ObjCName);
NameTranslatingInfo Result;
Result.NameKind = SwiftLangSupport::getUIDForNameKind(NameKind::Swift);
Result.BaseName = Name.getBaseName().userFacingName();
llvm::transform(Name.getArgumentNames(),
std::back_inserter(Result.ArgNames),
[](Identifier Id) { return Id.str(); });
Receiver(RequestResult<NameTranslatingInfo>::fromResult(Result));
return true;
}
}
}
class CursorRangeInfoConsumer : public SwiftASTConsumer {
protected:
SwiftLangSupport ⟪
SwiftInvocationRef ASTInvok;
std::string InputFile;
unsigned Offset;
unsigned Length;
private:
const bool TryExistingAST;
SmallVector<ImmutableTextSnapshotRef, 4> PreviousASTSnaps;
protected:
bool CancelOnSubsequentRequest;
protected:
ArrayRef<ImmutableTextSnapshotRef> getPreviousASTSnaps() {
return llvm::makeArrayRef(PreviousASTSnaps);
}
public:
CursorRangeInfoConsumer(StringRef InputFile, unsigned Offset, unsigned Length,
SwiftLangSupport &Lang, SwiftInvocationRef ASTInvok,
bool TryExistingAST, bool CancelOnSubsequentRequest)
: Lang(Lang), ASTInvok(ASTInvok),InputFile(InputFile.str()), Offset(Offset),
Length(Length), TryExistingAST(TryExistingAST),
CancelOnSubsequentRequest(CancelOnSubsequentRequest) {}
bool canUseASTWithSnapshots(ArrayRef<ImmutableTextSnapshotRef> Snapshots) override {
if (!TryExistingAST) {
LOG_INFO_FUNC(High, "will resolve using up-to-date AST");
return false;
}
// If there is an existing AST and the offset can be mapped back to the
// document snapshot that was used to create it, then use that AST.
// The downside is that we may return stale information, but we get the
// benefit of increased responsiveness, since the request will not be
// blocked waiting on the AST to be fully typechecked.
ImmutableTextSnapshotRef InputSnap;
if (auto EditorDoc = Lang.getEditorDocuments()->findByPath(InputFile))
InputSnap = EditorDoc->getLatestSnapshot();
if (!InputSnap)
return false;
auto mappedBackOffset = [&]()->llvm::Optional<unsigned> {
for (auto &Snap : Snapshots) {
if (Snap->isFromSameBuffer(InputSnap)) {
if (Snap->getStamp() == InputSnap->getStamp())
return Offset;
auto OptOffset = mapOffsetToOlderSnapshot(Offset, InputSnap, Snap);
if (!OptOffset.hasValue())
return None;
// Check that the new and old offset still point to the same token.
StringRef NewTok = getSourceToken(Offset, InputSnap);
if (NewTok.empty())
return None;
if (NewTok == getSourceToken(OptOffset.getValue(), Snap))
return OptOffset;
return None;
}
}
return None;
};
auto OldOffsetOpt = mappedBackOffset();
if (OldOffsetOpt.hasValue()) {
Offset = *OldOffsetOpt;
PreviousASTSnaps.append(Snapshots.begin(), Snapshots.end());
LOG_INFO_FUNC(High, "will try existing AST");
return true;
}
LOG_INFO_FUNC(High, "will resolve using up-to-date AST");
return false;
}
};
static void resolveCursor(
SwiftLangSupport &Lang, StringRef InputFile, unsigned Offset,
unsigned Length, bool Actionables, bool SymbolGraph,
SwiftInvocationRef Invok, bool TryExistingAST,
bool CancelOnSubsequentRequest,
llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> fileSystem,
SourceKitCancellationToken CancellationToken,
std::function<void(const RequestResult<CursorInfoData> &)> Receiver) {
assert(Invok);
assert(fileSystem);
class CursorInfoConsumer : public CursorRangeInfoConsumer {
bool Actionables;
bool SymbolGraph;
SourceKitCancellationToken CancellationToken;
std::function<void(const RequestResult<CursorInfoData> &)> Receiver;
public:
CursorInfoConsumer(
StringRef InputFile, unsigned Offset, unsigned Length, bool Actionables,
bool SymbolGraph, SwiftLangSupport &Lang, SwiftInvocationRef ASTInvok,
bool TryExistingAST, bool CancelOnSubsequentRequest,
SourceKitCancellationToken CancellationToken,
std::function<void(const RequestResult<CursorInfoData> &)> Receiver)
: CursorRangeInfoConsumer(InputFile, Offset, Length, Lang, ASTInvok,
TryExistingAST, CancelOnSubsequentRequest),
Actionables(Actionables), SymbolGraph(SymbolGraph),
CancellationToken(CancellationToken), Receiver(std::move(Receiver)) {}
void handlePrimaryAST(ASTUnitRef AstUnit) override {
auto &CompIns = AstUnit->getCompilerInstance();
ModuleDecl *MainModule = CompIns.getMainModule();
SourceManager &SM = CompIns.getSourceMgr();
unsigned BufferID = AstUnit->getPrimarySourceFile().getBufferID().getValue();
SourceLoc Loc =
Lexer::getLocForStartOfToken(SM, BufferID, Offset);
if (Loc.isInvalid()) {
Receiver(RequestResult<CursorInfoData>::fromError(
"Unable to resolve the start of the token."));
return;
}
// Sanitize length.
if (Length) {
SourceLoc TokEnd = Lexer::getLocForEndOfToken(SM, Loc);
SourceLoc EndLoc = SM.getLocForOffset(BufferID, Offset + Length);
// If TokEnd is not before the given EndLoc, the EndLoc contains no
// more stuff than this token, so set the length to 0.
if (SM.isBeforeInBuffer(EndLoc, TokEnd) || TokEnd == EndLoc)
Length = 0;
}
// Retrieve relevant actions on the code under selection.
llvm::SmallVector<RefactoringInfo, 8> Actions;
if (Actionables && Length) {
SmallVector<RefactoringKind, 8> Kinds;
RangeConfig Range;
Range.BufferId = BufferID;
auto Pair = SM.getLineAndColumnInBuffer(Loc);
Range.Line = Pair.first;
Range.Column = Pair.second;
Range.Length = Length;
bool CollectRangeStartRefactorings = false;
collectAvailableRefactorings(&AstUnit->getPrimarySourceFile(), Range,
CollectRangeStartRefactorings, Kinds, {});
for (RefactoringKind Kind : Kinds) {
Actions.emplace_back(SwiftLangSupport::getUIDForRefactoringKind(Kind),
getDescriptiveRefactoringKindName(Kind),
/*UnavailableReason*/ StringRef());
}
if (!CollectRangeStartRefactorings) {
// If Length is given then this request is only for refactorings,
// return straight away unless we need cursor based refactorings as
// well.
CursorInfoData Data;
Data.AvailableActions = llvm::makeArrayRef(Actions);
Receiver(RequestResult<CursorInfoData>::fromResult(Data));
return;
}
// Fall through to collect cursor based refactorings
}
auto *File = &AstUnit->getPrimarySourceFile();
ResolvedCursorInfo CursorInfo =
evaluateOrDefault(File->getASTContext().evaluator,
CursorInfoRequest{CursorInfoOwner(File, Loc)},
ResolvedCursorInfo());
CompilerInvocation CompInvok;
ASTInvok->applyTo(CompInvok);
switch (CursorInfo.Kind) {
case CursorInfoKind::ModuleRef:
passCursorInfoForModule(CursorInfo.Mod, Lang.getIFaceGenContexts(),
CompInvok, Receiver);
return;
case CursorInfoKind::ValueRef: {
std::string Diagnostic;
bool Success = passCursorInfoForDecl(
CursorInfo, MainModule, Actionables, SymbolGraph, Actions, Lang,
CompInvok, Diagnostic, getPreviousASTSnaps(), Receiver);
if (!Success) {
if (!getPreviousASTSnaps().empty()) {
// Attempt again using the up-to-date AST.
resolveCursor(Lang, InputFile, Offset, Length, Actionables,
SymbolGraph, ASTInvok, /*TryExistingAST=*/false,
CancelOnSubsequentRequest, SM.getFileSystem(),
CancellationToken, Receiver);
} else {
CursorInfoData Info;
Info.InternalDiagnostic = Diagnostic;
Receiver(RequestResult<CursorInfoData>::fromResult(Info));
}
}
return;
}
case CursorInfoKind::ExprStart:
case CursorInfoKind::StmtStart: {
if (Actionables) {
collectAvailableRefactoringsOtherThanRename(CursorInfo, Actions);
if (!Actions.empty()) {
CursorInfoData Data;
Data.AvailableActions = llvm::makeArrayRef(Actions);
Receiver(RequestResult<CursorInfoData>::fromResult(Data));
return;
}
}
CursorInfoData Info;
Info.InternalDiagnostic =
"Resolved to incomplete expression or statement.";
Receiver(RequestResult<CursorInfoData>::fromResult(Info));
return;
}
case CursorInfoKind::Invalid:
CursorInfoData Data;
if (Actionables) {
Data.AvailableActions = llvm::makeArrayRef(Actions);
} else {
Data.InternalDiagnostic = "Unable to resolve cursor info.";
}
Receiver(RequestResult<CursorInfoData>::fromResult(Data));
return;
}
}
void cancelled() override {
Receiver(RequestResult<CursorInfoData>::cancelled());
}
void failed(StringRef Error) override {
LOG_WARN_FUNC("cursor info failed: " << Error);
Receiver(RequestResult<CursorInfoData>::fromError(Error));
}
};
auto Consumer = std::make_shared<CursorInfoConsumer>(
InputFile, Offset, Length, Actionables, SymbolGraph, Lang, Invok,
TryExistingAST, CancelOnSubsequentRequest, CancellationToken, Receiver);
/// FIXME: When request cancellation is implemented and Xcode adopts it,
/// don't use 'OncePerASTToken'.
static const char OncePerASTToken = 0;
static const char OncePerASTTokenWithActionables = 0;
const void *Once = nullptr;
if (CancelOnSubsequentRequest)
Once = Actionables ? &OncePerASTTokenWithActionables : &OncePerASTToken;
Lang.getASTManager()->processASTAsync(Invok, std::move(Consumer), Once,
CancellationToken, fileSystem);
}
static void computeDiagnostics(
SwiftLangSupport &Lang, StringRef InputFile, SwiftInvocationRef Invok,
llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> FileSystem,
SourceKitCancellationToken CancellationToken,
std::function<void(const RequestResult<DiagnosticsResult> &)> Receiver) {
class DiagnosticsConsumer : public SwiftASTConsumer {
std::function<void(const RequestResult<DiagnosticsResult> &)> Receiver;
public:
DiagnosticsConsumer(
std::function<void(const RequestResult<DiagnosticsResult> &)> Receiver)
: Receiver(Receiver) {}
void handlePrimaryAST(ASTUnitRef AstUnit) override {
unsigned BufferID =
AstUnit->getPrimarySourceFile().getBufferID().getValue();
auto &DiagConsumer = AstUnit->getEditorDiagConsumer();
auto Diagnostics = DiagConsumer.getDiagnosticsForBuffer(BufferID);
Receiver(RequestResult<DiagnosticsResult>::fromResult(Diagnostics));
}
};
auto Consumer = std::make_shared<DiagnosticsConsumer>(std::move(Receiver));
Lang.getASTManager()->processASTAsync(Invok, std::move(Consumer),
/*OncePerASTToken=*/nullptr,
CancellationToken, FileSystem);
}
static void resolveName(
SwiftLangSupport &Lang, StringRef InputFile, unsigned Offset,
SwiftInvocationRef Invok, bool TryExistingAST, NameTranslatingInfo &Input,
SourceKitCancellationToken CancellationToken,
std::function<void(const RequestResult<NameTranslatingInfo> &)> Receiver) {
assert(Invok);
class NameInfoConsumer : public CursorRangeInfoConsumer {
NameTranslatingInfo Input;
SourceKitCancellationToken CancellationToken;
std::function<void(const RequestResult<NameTranslatingInfo> &)> Receiver;
public:
NameInfoConsumer(
StringRef InputFile, unsigned Offset, SwiftLangSupport &Lang,
SwiftInvocationRef ASTInvok, bool TryExistingAST,
NameTranslatingInfo Input, SourceKitCancellationToken CancellationToken,
std::function<void(const RequestResult<NameTranslatingInfo> &)>
Receiver)
: CursorRangeInfoConsumer(InputFile, Offset, 0, Lang, ASTInvok,
TryExistingAST,
/*CancelOnSubsequentRequest=*/false),
Input(std::move(Input)), CancellationToken(CancellationToken),
Receiver(std::move(Receiver)) {}
void handlePrimaryAST(ASTUnitRef AstUnit) override {
auto &CompIns = AstUnit->getCompilerInstance();
unsigned BufferID = AstUnit->getPrimarySourceFile().getBufferID().getValue();
SourceLoc Loc =
Lexer::getLocForStartOfToken(CompIns.getSourceMgr(), BufferID, Offset);
if (Loc.isInvalid()) {
Receiver(RequestResult<NameTranslatingInfo>::fromError(
"Unable to resolve the start of the token."));
return;
}
auto *File = &AstUnit->getPrimarySourceFile();
ResolvedCursorInfo CursorInfo =
evaluateOrDefault(File->getASTContext().evaluator,
CursorInfoRequest{CursorInfoOwner(File, Loc)},
ResolvedCursorInfo());
if (CursorInfo.isInvalid()) {
NameTranslatingInfo Info;
Info.InternalDiagnostic = "Unable to resolve cursor info.";
Receiver(RequestResult<NameTranslatingInfo>::fromResult(Info));
return;
}
CompilerInvocation CompInvok;
ASTInvok->applyTo(CompInvok);
switch(CursorInfo.Kind) {
case CursorInfoKind::ModuleRef:
return;
case CursorInfoKind::ValueRef: {
std::string Diagnostic;
bool Success = passNameInfoForDecl(CursorInfo, Input, Diagnostic,
Receiver);
if (!Success) {
if (!getPreviousASTSnaps().empty()) {
// Attempt again using the up-to-date AST.
resolveName(Lang, InputFile, Offset, ASTInvok,
/*TryExistingAST=*/false, Input, CancellationToken,
Receiver);
} else {
NameTranslatingInfo Info;
Info.InternalDiagnostic = Diagnostic;
Receiver(RequestResult<NameTranslatingInfo>::fromResult(Info));
}
}
return;
}
case CursorInfoKind::ExprStart:
case CursorInfoKind::StmtStart: {
NameTranslatingInfo Info;
Info.InternalDiagnostic =
"Resolved to incomplete expression or statement.";
Receiver(RequestResult<NameTranslatingInfo>::fromResult(Info));
return;
}
case CursorInfoKind::Invalid:
llvm_unreachable("bad sema token kind.");
}
}
void cancelled() override {
Receiver(RequestResult<NameTranslatingInfo>::cancelled());
}
void failed(StringRef Error) override {
LOG_WARN_FUNC("name info failed: " << Error);
Receiver(RequestResult<NameTranslatingInfo>::fromError(Error));
}
};
auto Consumer = std::make_shared<NameInfoConsumer>(
InputFile, Offset, Lang, Invok, TryExistingAST, Input, CancellationToken,
Receiver);
Lang.getASTManager()->processASTAsync(
Invok, std::move(Consumer), /*OncePerASTToken=*/nullptr,
CancellationToken, llvm::vfs::getRealFileSystem());
}
static void
resolveRange(SwiftLangSupport &Lang, StringRef InputFile, unsigned Offset,
unsigned Length, SwiftInvocationRef Invok, bool TryExistingAST,
bool CancelOnSubsequentRequest,
SourceKitCancellationToken CancellationToken,
std::function<void(const RequestResult<RangeInfo> &)> Receiver) {
assert(Invok);
class RangeInfoConsumer : public CursorRangeInfoConsumer {
SourceKitCancellationToken CancellationToken;
std::function<void(const RequestResult<RangeInfo> &)> Receiver;
public:
RangeInfoConsumer(
StringRef InputFile, unsigned Offset, unsigned Length,
SwiftLangSupport &Lang, SwiftInvocationRef ASTInvok,
bool TryExistingAST, bool CancelOnSubsequentRequest,
SourceKitCancellationToken CancellationToken,
std::function<void(const RequestResult<RangeInfo> &)> Receiver)
: CursorRangeInfoConsumer(InputFile, Offset, Length, Lang, ASTInvok,
TryExistingAST, CancelOnSubsequentRequest),
CancellationToken(CancellationToken), Receiver(std::move(Receiver)) {}
void handlePrimaryAST(ASTUnitRef AstUnit) override {
// FIXME: Implement tracing
auto *File = &AstUnit->getPrimarySourceFile();
ResolvedRangeInfo Info = evaluateOrDefault(File->getASTContext().evaluator,
RangeInfoRequest(RangeInfoOwner({File, Offset, Length})),
ResolvedRangeInfo());
CompilerInvocation CompInvok;
ASTInvok->applyTo(CompInvok);
RangeInfo Result;
Result.RangeKind = Lang.getUIDForRangeKind(Info.Kind);
if (Info.Kind == RangeKind::Invalid) {
Result.RangeContent = "";
} else {
assert(Info.ContentRange.isValid());
Result.RangeContent = Info.ContentRange.str();
}
switch (Info.Kind) {
case RangeKind::SingleExpression: {
SmallString<64> SS;
llvm::raw_svector_ostream OS(SS);
Info.ExitInfo.ReturnType->print(OS);
Result.ExprType = OS.str();
Receiver(RequestResult<RangeInfo>::fromResult(Result));
return;
}
case RangeKind::SingleDecl:
case RangeKind::MultiTypeMemberDecl:
case RangeKind::MultiStatement:
case RangeKind::SingleStatement: {
Receiver(RequestResult<RangeInfo>::fromResult(Result));
return;
}
case RangeKind::PartOfExpression:
case RangeKind::Invalid:
if (!getPreviousASTSnaps().empty()) {
// Attempt again using the up-to-date AST.
resolveRange(Lang, InputFile, Offset, Length, ASTInvok,
/*TryExistingAST=*/false, CancelOnSubsequentRequest,
CancellationToken, Receiver);
} else {
Receiver(RequestResult<RangeInfo>::fromResult(Result));
}
return;
}
}
void cancelled() override {
Receiver(RequestResult<RangeInfo>::cancelled());
}
void failed(StringRef Error) override {
LOG_WARN_FUNC("range info failed: " << Error);
Receiver(RequestResult<RangeInfo>::fromError(Error));
}
};
auto Consumer = std::make_shared<RangeInfoConsumer>(
InputFile, Offset, Length, Lang, Invok, TryExistingAST,
CancelOnSubsequentRequest, CancellationToken, Receiver);
/// FIXME: When request cancellation is implemented and Xcode adopts it,
/// don't use 'OncePerASTToken'.
static const char OncePerASTToken = 0;
const void *Once = CancelOnSubsequentRequest ? &OncePerASTToken : nullptr;
Lang.getASTManager()->processASTAsync(Invok, std::move(Consumer), Once,
CancellationToken,
llvm::vfs::getRealFileSystem());
}
void SwiftLangSupport::getCursorInfo(
StringRef InputFile, unsigned Offset, unsigned Length, bool Actionables,
bool SymbolGraph, bool CancelOnSubsequentRequest,
ArrayRef<const char *> Args, Optional<VFSOptions> vfsOptions,
SourceKitCancellationToken CancellationToken,
std::function<void(const RequestResult<CursorInfoData> &)> Receiver) {
std::string error;
auto fileSystem = getFileSystem(vfsOptions, InputFile, error);
if (!fileSystem)
return Receiver(RequestResult<CursorInfoData>::fromError(error));
if (auto IFaceGenRef = IFaceGenContexts.get(InputFile)) {
IFaceGenRef->accessASTAsync([this, IFaceGenRef, Offset, Actionables,
SymbolGraph, Receiver] {
SwiftInterfaceGenContext::ResolvedEntity Entity;
Entity = IFaceGenRef->resolveEntityForOffset(Offset);
if (Entity.isResolved()) {
CompilerInvocation Invok;
IFaceGenRef->applyTo(Invok);
if (Entity.Mod) {
passCursorInfoForModule(Entity.Mod, IFaceGenContexts, Invok,
Receiver);
} else {
std::string Diagnostic; // Unused.
ModuleDecl *MainModule = IFaceGenRef->getModuleDecl();
ResolvedCursorInfo Info;
Info.ValueD = const_cast<ValueDecl *>(Entity.Dcl);
Info.IsRef = Entity.IsRef;
passCursorInfoForDecl(Info, MainModule, Actionables, SymbolGraph, {},
*this, Invok, Diagnostic, {}, Receiver);
}
} else {
CursorInfoData Info;
Info.InternalDiagnostic =
"Unable to resolve entity from generated interface.";
Receiver(RequestResult<CursorInfoData>::fromResult(Info));
}
});
return;
}
std::string Error;
SwiftInvocationRef Invok =
ASTMgr->getTypecheckInvocation(Args, InputFile, fileSystem, Error);
if (!Error.empty()) {
LOG_WARN_FUNC("error creating ASTInvocation: " << Error);
}
if (!Invok) {
Receiver(RequestResult<CursorInfoData>::fromError(Error));
return;
}
resolveCursor(*this, InputFile, Offset, Length, Actionables, SymbolGraph,
Invok, /*TryExistingAST=*/true, CancelOnSubsequentRequest,
fileSystem, CancellationToken, Receiver);
}
void SwiftLangSupport::getDiagnostics(
StringRef InputFile, ArrayRef<const char *> Args,
Optional<VFSOptions> VfsOptions,
SourceKitCancellationToken CancellationToken,
std::function<void(const RequestResult<DiagnosticsResult> &)> Receiver) {
std::string FileSystemError;
auto FileSystem = getFileSystem(VfsOptions, InputFile, FileSystemError);
if (!FileSystem) {
Receiver(RequestResult<DiagnosticsResult>::fromError(FileSystemError));
return;
}
std::string InvocationError;
SwiftInvocationRef Invok = ASTMgr->getTypecheckInvocation(
Args, InputFile, FileSystem, InvocationError);
if (!InvocationError.empty()) {
LOG_WARN_FUNC("error creating ASTInvocation: " << InvocationError);
}
if (!Invok) {
Receiver(RequestResult<DiagnosticsResult>::fromError(InvocationError));
return;
}
computeDiagnostics(*this, InputFile, Invok, FileSystem, CancellationToken,
Receiver);
}
void SwiftLangSupport::getRangeInfo(
StringRef InputFile, unsigned Offset, unsigned Length,
bool CancelOnSubsequentRequest, ArrayRef<const char *> Args,
SourceKitCancellationToken CancellationToken,
std::function<void(const RequestResult<RangeInfo> &)> Receiver) {
if (IFaceGenContexts.get(InputFile)) {
// FIXME: return range info for generated interfaces.
Receiver(RequestResult<RangeInfo>::fromError(
"Range info for generated interfaces is not implemented."));
return;
}
std::string Error;
SwiftInvocationRef Invok =
ASTMgr->getTypecheckInvocation(Args, InputFile, Error);
if (!Invok) {
LOG_WARN_FUNC("failed to create an ASTInvocation: " << Error);
Receiver(RequestResult<RangeInfo>::fromError(Error));
return;
}
if (Length == 0) {
Receiver(RequestResult<RangeInfo>::fromError("Invalid range length."));
return;
}
resolveRange(*this, InputFile, Offset, Length, Invok, /*TryExistingAST=*/true,
CancelOnSubsequentRequest, CancellationToken, Receiver);
}
void SwiftLangSupport::getNameInfo(
StringRef InputFile, unsigned Offset, NameTranslatingInfo &Input,
ArrayRef<const char *> Args, SourceKitCancellationToken CancellationToken,
std::function<void(const RequestResult<NameTranslatingInfo> &)> Receiver) {
if (auto IFaceGenRef = IFaceGenContexts.get(InputFile)) {
IFaceGenRef->accessASTAsync([IFaceGenRef, Offset, Input, Receiver] {
SwiftInterfaceGenContext::ResolvedEntity Entity;
Entity = IFaceGenRef->resolveEntityForOffset(Offset);
if (Entity.isResolved()) {
CompilerInvocation Invok;
IFaceGenRef->applyTo(Invok);
if (Entity.Mod) {
// Module is ignored
} else {
// FIXME: Should pass the main module for the interface but currently
// it's not necessary.
}
} else {
NameTranslatingInfo Info;
Info.InternalDiagnostic =
"Unable to resolve entity from generated interface.";
Receiver(RequestResult<NameTranslatingInfo>::fromResult(Info));
}
});
return;
}
std::string Error;
SwiftInvocationRef Invok =
ASTMgr->getTypecheckInvocation(Args, InputFile, Error);
if (!Invok) {
LOG_WARN_FUNC("failed to create an ASTInvocation: " << Error);
Receiver(RequestResult<NameTranslatingInfo>::fromError(Error));
return;
}
resolveName(*this, InputFile, Offset, Invok, /*TryExistingAST=*/true, Input,
CancellationToken, Receiver);
}
static void resolveCursorFromUSR(
SwiftLangSupport &Lang, StringRef InputFile, StringRef USR,
SwiftInvocationRef Invok, bool TryExistingAST,
bool CancelOnSubsequentRequest,
llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> fileSystem,
SourceKitCancellationToken CancellationToken,
std::function<void(const RequestResult<CursorInfoData> &)> Receiver) {
assert(Invok);
class CursorInfoConsumer : public SwiftASTConsumer {
std::string InputFile;
StringRef USR;
SwiftLangSupport ⟪
SwiftInvocationRef ASTInvok;
const bool TryExistingAST;
bool CancelOnSubsequentRequest;
SourceKitCancellationToken CancellationToken;
std::function<void(const RequestResult<CursorInfoData> &)> Receiver;
SmallVector<ImmutableTextSnapshotRef, 4> PreviousASTSnaps;
public:
CursorInfoConsumer(
StringRef InputFile, StringRef USR, SwiftLangSupport &Lang,
SwiftInvocationRef ASTInvok, bool TryExistingAST,
bool CancelOnSubsequentRequest,
SourceKitCancellationToken CancellationToken,
std::function<void(const RequestResult<CursorInfoData> &)> Receiver)
: InputFile(InputFile), USR(USR), Lang(Lang),
ASTInvok(std::move(ASTInvok)), TryExistingAST(TryExistingAST),
CancelOnSubsequentRequest(CancelOnSubsequentRequest),
CancellationToken(CancellationToken), Receiver(std::move(Receiver)) {}
bool canUseASTWithSnapshots(
ArrayRef<ImmutableTextSnapshotRef> Snapshots) override {
if (!TryExistingAST) {
LOG_INFO_FUNC(High, "will resolve using up-to-date AST");
return false;
}
if (!Snapshots.empty()) {
PreviousASTSnaps.append(Snapshots.begin(), Snapshots.end());
LOG_INFO_FUNC(High, "will try existing AST");
return true;
}
LOG_INFO_FUNC(High, "will resolve using up-to-date AST");
return false;
}
void handlePrimaryAST(ASTUnitRef AstUnit) override {
auto &CompIns = AstUnit->getCompilerInstance();
ModuleDecl *MainModule = CompIns.getMainModule();
if (USR.startswith("c:")) {
LOG_WARN_FUNC("lookup for C/C++/ObjC USRs not implemented");
CursorInfoData Info;
Info.InternalDiagnostic = "Lookup for C/C++/ObjC USRs not implemented.";
Receiver(RequestResult<CursorInfoData>::fromResult(Info));
return;
}
auto &context = CompIns.getASTContext();
TypeDecl *D = Demangle::getTypeDeclForUSR(context, USR);
if (!D) {
CursorInfoData Info;
Info.InternalDiagnostic = "Unable to resolve type from USR.";
Receiver(RequestResult<CursorInfoData>::fromResult(Info));
return;
}
CompilerInvocation CompInvok;
ASTInvok->applyTo(CompInvok);
if (auto *M = dyn_cast<ModuleDecl>(D)) {
passCursorInfoForModule(M, Lang.getIFaceGenContexts(), CompInvok,
Receiver);
} else {
ResolvedCursorInfo Info;
Info.ValueD = D;
Info.IsRef = false;
auto *DC = D->getDeclContext();
Type selfTy;
if (DC->isTypeContext()) {
Info.ContainerType = DC->getSelfInterfaceType();
Info.ContainerType = D->getInnermostDeclContext()->mapTypeIntoContext(
Info.ContainerType);
}
std::string Diagnostic;
bool Success =
passCursorInfoForDecl(Info, MainModule, /*AddRefactorings*/ false,
/*AddSymbolGraph*/ false, {}, Lang, CompInvok,
Diagnostic, PreviousASTSnaps, Receiver);
if (!Success) {
if (!PreviousASTSnaps.empty()) {
// Attempt again using the up-to-date AST.
resolveCursorFromUSR(Lang, InputFile, USR, ASTInvok,
/*TryExistingAST=*/false,
CancelOnSubsequentRequest,
CompIns.getSourceMgr().getFileSystem(),
CancellationToken, Receiver);
} else {
CursorInfoData Info;
Info.InternalDiagnostic = Diagnostic;
Receiver(RequestResult<CursorInfoData>::fromResult(Info));
}
}
}
}
void cancelled() override {
Receiver(RequestResult<CursorInfoData>::cancelled());
}
void failed(StringRef Error) override {
LOG_WARN_FUNC("cursor info failed: " << Error);
Receiver(RequestResult<CursorInfoData>::fromError(Error));
}
};
auto Consumer = std::make_shared<CursorInfoConsumer>(
InputFile, USR, Lang, Invok, TryExistingAST, CancelOnSubsequentRequest,
CancellationToken, Receiver);
/// FIXME: When request cancellation is implemented and Xcode adopts it,
/// don't use 'OncePerASTToken'.
static const char OncePerASTToken = 0;
const void *Once = CancelOnSubsequentRequest ? &OncePerASTToken : nullptr;
Lang.getASTManager()->processASTAsync(Invok, std::move(Consumer), Once,
CancellationToken, fileSystem);
}
void SwiftLangSupport::getCursorInfoFromUSR(
StringRef filename, StringRef USR, bool CancelOnSubsequentRequest,
ArrayRef<const char *> Args, Optional<VFSOptions> vfsOptions,
SourceKitCancellationToken CancellationToken,
std::function<void(const RequestResult<CursorInfoData> &)> Receiver) {
std::string error;
auto fileSystem = getFileSystem(vfsOptions, filename, error);
if (!fileSystem)
return Receiver(RequestResult<CursorInfoData>::fromError(error));
if (auto IFaceGenRef = IFaceGenContexts.get(filename)) {
LOG_WARN_FUNC("Info from usr for generated interface not implemented yet.");
CursorInfoData Info;
Info.InternalDiagnostic = "Info for generated interfaces not implemented.";
Receiver(RequestResult<CursorInfoData>::fromResult(Info));
return;
}
std::string Error;
SwiftInvocationRef Invok =
ASTMgr->getTypecheckInvocation(Args, filename, fileSystem, Error);
if (!Invok) {
LOG_WARN_FUNC("failed to create an ASTInvocation: " << Error);
Receiver(RequestResult<CursorInfoData>::fromError(Error));
return;
}
resolveCursorFromUSR(*this, filename, USR, Invok, /*TryExistingAST=*/true,
CancelOnSubsequentRequest, fileSystem, CancellationToken,
Receiver);
}
//===----------------------------------------------------------------------===//
// SwiftLangSupport::findUSRRange
//===----------------------------------------------------------------------===//
llvm::Optional<std::pair<unsigned, unsigned>>
SwiftLangSupport::findUSRRange(StringRef DocumentName, StringRef USR) {
if (auto IFaceGenRef = IFaceGenContexts.get(DocumentName))
return IFaceGenRef->findUSRRange(USR);
// Only works for a module interface document currently.
// FIXME: Report it as failed request.
return None;
}
//===----------------------------------------------------------------------===//
// SwiftLangSupport::findRelatedIdentifiersInFile
//===----------------------------------------------------------------------===//
namespace {
class RelatedIdScanner : public SourceEntityWalker {
ValueDecl *Dcl;
llvm::SmallVectorImpl<std::pair<unsigned, unsigned>> &Ranges;
SourceManager &SourceMgr;
unsigned BufferID = -1;
bool Cancelled = false;
public:
explicit RelatedIdScanner(SourceFile &SrcFile, unsigned BufferID,
ValueDecl *D,
llvm::SmallVectorImpl<std::pair<unsigned, unsigned>> &Ranges)
: Ranges(Ranges), SourceMgr(SrcFile.getASTContext().SourceMgr),
BufferID(BufferID) {
if (auto *V = dyn_cast<VarDecl>(D)) {
// Always use the canonical var decl for comparison. This is so we
// pick up all occurrences of x in case statements like the below:
// case .first(let x), .second(let x)
// fallthrough
// case .third(let x)
// print(x)
Dcl = V->getCanonicalVarDecl();
// If we have a prioperty wrapper backing property or projected value, use
// the wrapped property instead (i.e. if this is _foo or $foo, pretend
// it's foo).
if (auto *Wrapped = V->getOriginalWrappedProperty()) {
Dcl = Wrapped;
}
} else {
Dcl = D;
}
}
private:
bool walkToDeclPre(Decl *D, CharSourceRange Range) override {
if (Cancelled)
return false;
if (auto *V = dyn_cast<VarDecl>(D)) {
// Handle references to the implicitly generated vars in case statements
// matching multiple patterns
D = V->getCanonicalVarDecl();
}
if (D == Dcl)
return passId(Range);
return true;
}
bool visitDeclReference(ValueDecl *D, CharSourceRange Range,
TypeDecl *CtorTyRef, ExtensionDecl *ExtTyRef, Type T,
ReferenceMetaData Data) override {
if (Cancelled)
return false;
if (auto *V = dyn_cast<VarDecl>(D)) {
D = V->getCanonicalVarDecl();
// If we have a prioperty wrapper backing property or projected value, use
// the wrapped property for comparison instead (i.e. if this is _foo or
// $foo, pretend it's foo).
if (auto *Wrapped = V->getOriginalWrappedProperty()) {
assert(Range.getByteLength() > 1 &&
(Range.str().front() == '_' || Range.str().front() == '$'));
D = Wrapped;
Range = CharSourceRange(Range.getStart().getAdvancedLoc(1), Range.getByteLength() - 1);
}
} else if (CtorTyRef) {
D = CtorTyRef;
}
if (D == Dcl)
return passId(Range);
return true;
}
bool passId(CharSourceRange Range) {
unsigned Offset = SourceMgr.getLocOffsetInBuffer(Range.getStart(),BufferID);
Ranges.push_back({ Offset, Range.getByteLength() });
return !Cancelled;
}
};
} // end anonymous namespace
void SwiftLangSupport::findRelatedIdentifiersInFile(
StringRef InputFile, unsigned Offset, bool CancelOnSubsequentRequest,
ArrayRef<const char *> Args, SourceKitCancellationToken CancellationToken,
std::function<void(const RequestResult<RelatedIdentsInfo> &)> Receiver) {
std::string Error;
SwiftInvocationRef Invok =
ASTMgr->getTypecheckInvocation(Args, InputFile, Error);
if (!Invok) {
LOG_WARN_FUNC("failed to create an ASTInvocation: " << Error);
Receiver(RequestResult<RelatedIdentsInfo>::fromError(Error));
return;
}
class RelatedIdConsumer : public SwiftASTConsumer {
unsigned Offset;
std::function<void(const RequestResult<RelatedIdentsInfo> &)> Receiver;
SwiftInvocationRef Invok;
public:
RelatedIdConsumer(unsigned Offset,
std::function<void(const RequestResult<RelatedIdentsInfo> &)> Receiver,
SwiftInvocationRef Invok)
: Offset(Offset), Receiver(std::move(Receiver)), Invok(Invok) { }
// FIXME: Don't silently eat errors here.
void handlePrimaryAST(ASTUnitRef AstUnit) override {
auto &CompInst = AstUnit->getCompilerInstance();
auto &SrcFile = AstUnit->getPrimarySourceFile();
SmallVector<std::pair<unsigned, unsigned>, 8> Ranges;
auto Action = [&]() {
unsigned BufferID = SrcFile.getBufferID().getValue();
SourceLoc Loc =
Lexer::getLocForStartOfToken(CompInst.getSourceMgr(), BufferID, Offset);
if (Loc.isInvalid())
return;
ResolvedCursorInfo CursorInfo =
evaluateOrDefault(SrcFile.getASTContext().evaluator,
CursorInfoRequest{CursorInfoOwner(&SrcFile, Loc)},
ResolvedCursorInfo());
if (CursorInfo.isInvalid())
return;
if (CursorInfo.IsKeywordArgument)
return;
ValueDecl *VD = CursorInfo.typeOrValue();
if (!VD)
return; // This was a module reference.
// Only accept pointing to an identifier.
if (!CursorInfo.IsRef &&
(isa<ConstructorDecl>(VD) ||
isa<DestructorDecl>(VD) ||
isa<SubscriptDecl>(VD)))
return;
if (VD->isOperator())
return;
RelatedIdScanner Scanner(SrcFile, BufferID, VD, Ranges);
if (auto *Case = getCaseStmtOfCanonicalVar(VD)) {
Scanner.walk(Case);
while ((Case = Case->getFallthroughDest().getPtrOrNull())) {
Scanner.walk(Case);
}
} else if (DeclContext *LocalDC = VD->getDeclContext()->getLocalContext()) {
Scanner.walk(LocalDC);
} else {
Scanner.walk(SrcFile);
}
};
Action();
RelatedIdentsInfo Info;
Info.Ranges = Ranges;
Receiver(RequestResult<RelatedIdentsInfo>::fromResult(Info));
}
void cancelled() override {
Receiver(RequestResult<RelatedIdentsInfo>::cancelled());
}
void failed(StringRef Error) override {
LOG_WARN_FUNC("related idents failed: " << Error);
Receiver(RequestResult<RelatedIdentsInfo>::fromError(Error));
}
static CaseStmt *getCaseStmtOfCanonicalVar(Decl *D) {
assert(D);
if (auto *VD = dyn_cast<VarDecl>(D)) {
if (auto *Canonical = VD->getCanonicalVarDecl()) {
return dyn_cast_or_null<CaseStmt>(Canonical->getRecursiveParentPatternStmt());
}
}
return nullptr;
}
};
auto Consumer = std::make_shared<RelatedIdConsumer>(Offset, Receiver, Invok);
/// FIXME: When request cancellation is implemented and Xcode adopts it,
/// don't use 'OncePerASTToken'.
static const char OncePerASTToken = 0;
const void *Once = CancelOnSubsequentRequest ? &OncePerASTToken : nullptr;
ASTMgr->processASTAsync(Invok, std::move(Consumer), Once, CancellationToken,
llvm::vfs::getRealFileSystem());
}
//===----------------------------------------------------------------------===//
// SwiftLangSupport::semanticRefactoring
//===----------------------------------------------------------------------===//
static RefactoringKind getIDERefactoringKind(SemanticRefactoringInfo Info) {
switch(Info.Kind) {
case SemanticRefactoringKind::None: return RefactoringKind::None;
#define SEMANTIC_REFACTORING(KIND, NAME, ID) \
case SemanticRefactoringKind::KIND: return RefactoringKind::KIND;
#include "swift/IDE/RefactoringKinds.def"
}
}
void SwiftLangSupport::semanticRefactoring(
StringRef Filename, SemanticRefactoringInfo Info,
ArrayRef<const char *> Args, SourceKitCancellationToken CancellationToken,
CategorizedEditsReceiver Receiver) {
std::string Error;
SwiftInvocationRef Invok =
ASTMgr->getTypecheckInvocation(Args, Filename, Error);
if (!Invok) {
LOG_WARN_FUNC("failed to create an ASTInvocation: " << Error);
Receiver(RequestResult<ArrayRef<CategorizedEdits>>::fromError(Error));
return;
}
assert(Invok);
class SemaRefactoringConsumer : public SwiftASTConsumer {
SemanticRefactoringInfo Info;
CategorizedEditsReceiver Receiver;
public:
SemaRefactoringConsumer(SemanticRefactoringInfo Info,
CategorizedEditsReceiver Receiver) : Info(Info),
Receiver(std::move(Receiver)) {}
void handlePrimaryAST(ASTUnitRef AstUnit) override {
auto &CompIns = AstUnit->getCompilerInstance();
ModuleDecl *MainModule = CompIns.getMainModule();
RefactoringOptions Opts(getIDERefactoringKind(Info));
Opts.Range.BufferId = AstUnit->getPrimarySourceFile().getBufferID().
getValue();
Opts.Range.Line = Info.Line;
Opts.Range.Column = Info.Column;
Opts.Range.Length = Info.Length;
Opts.PreferredName = Info.PreferredName.str();
RequestRefactoringEditConsumer EditConsumer(Receiver);
refactorSwiftModule(MainModule, Opts, EditConsumer, EditConsumer);
}
void cancelled() override {
Receiver(RequestResult<ArrayRef<CategorizedEdits>>::cancelled());
}
void failed(StringRef Error) override {
Receiver(RequestResult<ArrayRef<CategorizedEdits>>::fromError(Error));
}
};
auto Consumer = std::make_shared<SemaRefactoringConsumer>(Info, Receiver);
/// FIXME: When request cancellation is implemented and Xcode adopts it,
/// don't use 'OncePerASTToken'.
static const char OncePerASTToken = 0;
getASTManager()->processASTAsync(Invok, std::move(Consumer), &OncePerASTToken,
CancellationToken,
llvm::vfs::getRealFileSystem());
}
void SwiftLangSupport::collectExpressionTypes(
StringRef FileName, ArrayRef<const char *> Args,
ArrayRef<const char *> ExpectedProtocols, bool CanonicalType,
SourceKitCancellationToken CancellationToken,
std::function<void(const RequestResult<ExpressionTypesInFile> &)>
Receiver) {
std::string Error;
SwiftInvocationRef Invok =
ASTMgr->getTypecheckInvocation(Args, FileName, Error);
if (!Invok) {
LOG_WARN_FUNC("failed to create an ASTInvocation: " << Error);
Receiver(RequestResult<ExpressionTypesInFile>::fromError(Error));
return;
}
assert(Invok);
class ExpressionTypeCollector: public SwiftASTConsumer {
std::function<void(const RequestResult<ExpressionTypesInFile> &)> Receiver;
std::vector<const char *> ExpectedProtocols;
bool CanonicalType;
public:
ExpressionTypeCollector(
std::function<void(const RequestResult<ExpressionTypesInFile> &)> Receiver,
ArrayRef<const char *> ExpectedProtocols,
bool CanonicalType):
Receiver(std::move(Receiver)),
ExpectedProtocols(ExpectedProtocols.vec()),
CanonicalType(CanonicalType) {}
void handlePrimaryAST(ASTUnitRef AstUnit) override {
auto *SF = AstUnit->getCompilerInstance().getPrimarySourceFile();
std::vector<ExpressionTypeInfo> Scratch;
llvm::SmallString<256> TypeBuffer;
llvm::raw_svector_ostream OS(TypeBuffer);
ExpressionTypesInFile Result;
for (auto Item: collectExpressionType(*SF, ExpectedProtocols, Scratch,
CanonicalType, OS)) {
Result.Results.push_back({Item.offset, Item.length, Item.typeOffset, {}});
for (auto P: Item.protocols) {
Result.Results.back().ProtocolOffsets.push_back(P.first);
}
}
Result.TypeBuffer = OS.str();
Receiver(RequestResult<ExpressionTypesInFile>::fromResult(Result));
}
void cancelled() override {
Receiver(RequestResult<ExpressionTypesInFile>::cancelled());
}
void failed(StringRef Error) override {
Receiver(RequestResult<ExpressionTypesInFile>::fromError(Error));
}
};
auto Collector = std::make_shared<ExpressionTypeCollector>(Receiver,
ExpectedProtocols,
CanonicalType);
/// FIXME: When request cancellation is implemented and Xcode adopts it,
/// don't use 'OncePerASTToken'.
static const char OncePerASTToken = 0;
getASTManager()->processASTAsync(Invok, std::move(Collector),
&OncePerASTToken, CancellationToken,
llvm::vfs::getRealFileSystem());
}
void SwiftLangSupport::collectVariableTypes(
StringRef FileName, ArrayRef<const char *> Args, Optional<unsigned> Offset,
Optional<unsigned> Length, SourceKitCancellationToken CancellationToken,
std::function<void(const RequestResult<VariableTypesInFile> &)> Receiver) {
std::string Error;
SwiftInvocationRef Invok =
ASTMgr->getTypecheckInvocation(Args, FileName, Error);
if (!Invok) {
LOG_WARN_FUNC("failed to create an ASTInvocation: " << Error);
Receiver(RequestResult<VariableTypesInFile>::fromError(Error));
return;
}
assert(Invok);
class VariableTypeCollectorASTConsumer : public SwiftASTConsumer {
private:
std::function<void(const RequestResult<VariableTypesInFile> &)> Receiver;
Optional<unsigned> Offset;
Optional<unsigned> Length;
public:
VariableTypeCollectorASTConsumer(
std::function<void(const RequestResult<VariableTypesInFile> &)>
Receiver,
Optional<unsigned> Offset, Optional<unsigned> Length)
: Receiver(std::move(Receiver)), Offset(Offset), Length(Length) {}
void handlePrimaryAST(ASTUnitRef AstUnit) override {
auto &CompInst = AstUnit->getCompilerInstance();
auto *SF = CompInst.getPrimarySourceFile();
// Construct the range for which variable types are to be queried. If
// offset/length are unset, the (default) range will be used, which
// corresponds to the entire document.
SourceRange Range;
if (Offset.hasValue() && Length.hasValue()) {
auto &SM = CompInst.getSourceMgr();
unsigned BufferID = SF->getBufferID().getValue();
SourceLoc Start = Lexer::getLocForStartOfToken(SM, BufferID, *Offset);
SourceLoc End =
Lexer::getLocForStartOfToken(SM, BufferID, *Offset + *Length);
Range = SourceRange(Start, End);
}
std::vector<VariableTypeInfo> Infos;
std::string TypeBuffer;
llvm::raw_string_ostream OS(TypeBuffer);
VariableTypesInFile Result;
collectVariableType(*SF, Range, Infos, OS);
for (auto Info : Infos) {
Result.Results.push_back({Info.Offset, Info.Length, Info.TypeOffset, Info.HasExplicitType});
}
Result.TypeBuffer = OS.str();
Receiver(RequestResult<VariableTypesInFile>::fromResult(Result));
}
void cancelled() override {
Receiver(RequestResult<VariableTypesInFile>::cancelled());
}
void failed(StringRef Error) override {
Receiver(RequestResult<VariableTypesInFile>::fromError(Error));
}
};
auto Collector = std::make_shared<VariableTypeCollectorASTConsumer>(
Receiver, Offset, Length);
/// FIXME: When request cancellation is implemented and Xcode adopts it,
/// don't use 'OncePerASTToken'.
static const char OncePerASTToken = 0;
getASTManager()->processASTAsync(Invok, std::move(Collector),
&OncePerASTToken, CancellationToken,
llvm::vfs::getRealFileSystem());
}
|
// (a) Who goes with fergus?
// (b) 3.14e1L --> long double
// (c) 1024f --> floating point
// (d) 3.14L --> long double
|
#include "../TensorShaderAvxBackend.h"
using namespace System;
void zero_padding_1d(unsigned int channels,
unsigned int inwidth,
unsigned int outwidth,
unsigned int th,
unsigned int pad_left, unsigned int pad_right,
float* inmap_ptr, float* outmap_ptr) {
const unsigned int inmap_offset = channels * inwidth * th, outmap_offset = channels * outwidth * th;
inmap_ptr += inmap_offset;
outmap_ptr += outmap_offset;
/* x center */{
const unsigned int length = channels * inwidth;
const unsigned int j = length & ~7u, k = length - j;
const __m256i mask = TensorShaderAvxBackend::masktable_m256(k);
const unsigned int inmap_idx = 0;
const unsigned int outmap_idx = channels * pad_left;
for (unsigned int i = 0; i < j; i += 8) {
__m256 x = _mm256_loadu_ps(inmap_ptr + inmap_idx + i);
_mm256_storeu_ps(outmap_ptr + outmap_idx + i, x);
}
if (k > 0) {
__m256 x = _mm256_maskload_ps(inmap_ptr + inmap_idx + j, mask);
_mm256_maskstore_ps(outmap_ptr + outmap_idx + j, mask, x);
}
}
/* x left */ {
const unsigned int length = channels * pad_left;
const unsigned int j = length & ~7u, k = length - j;
const __m256i mask = TensorShaderAvxBackend::masktable_m256(k);
const __m256 x = _mm256_setzero_ps();
const unsigned int outmap_idx = 0;
for (unsigned int i = 0; i < j; i += 8) {
_mm256_storeu_ps(outmap_ptr + outmap_idx + i, x);
}
if (k > 0) {
_mm256_maskstore_ps(outmap_ptr + outmap_idx + j, mask, x);
}
}
/* x right */ {
const unsigned int length = channels * pad_right;
const unsigned int j = length & ~7u, k = length - j;
const __m256i mask = TensorShaderAvxBackend::masktable_m256(k);
const __m256 x = _mm256_setzero_ps();
const unsigned int outmap_idx = channels * (pad_left + inwidth);
for (unsigned int i = 0; i < j; i += 8) {
_mm256_storeu_ps(outmap_ptr + outmap_idx + i, x);
}
if (k > 0) {
_mm256_maskstore_ps(outmap_ptr + outmap_idx + j, mask, x);
}
}
}
void TensorShaderAvxBackend::Padding::ZeroPadding1D(unsigned int channels, unsigned int inwidth, unsigned int batch, unsigned int th, unsigned int pad_left, unsigned int pad_right, AvxArray<float>^ inmap, AvxArray<float>^ outmap) {
Util::CheckDuplicateArray(inmap, outmap);
if (th >= batch) {
throw gcnew System::ArgumentException();
}
unsigned int outwidth = inwidth + pad_left + pad_right;
Util::CheckLength(channels * inwidth * batch, inmap);
Util::CheckLength(channels * outwidth * batch, outmap);
float* inmap_ptr = (float*)(inmap->Ptr.ToPointer());
float* outmap_ptr = (float*)(outmap->Ptr.ToPointer());
zero_padding_1d(channels, inwidth, outwidth, th, pad_left, pad_right, inmap_ptr, outmap_ptr);
}
|
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2022 Datadog, Inc.
#include "ManagedThreadList.h"
#include "Log.h"
#include "OpSysTools.h"
ManagedThreadList* ManagedThreadList::s_singletonInstance = nullptr;
const std::uint32_t ManagedThreadList::FillFactorPercent = 20;
const std::uint32_t ManagedThreadList::MinBufferSize = 50;
const std::uint32_t ManagedThreadList::MinCompactionUsedIndex = 10;
void ManagedThreadList::CreateNewSingletonInstance(ICorProfilerInfo4* pCorProfilerInfo)
{
ManagedThreadList* newSingletonInstance = new ManagedThreadList(pCorProfilerInfo);
ManagedThreadList::DeleteSingletonInstance();
ManagedThreadList::s_singletonInstance = newSingletonInstance;
}
ManagedThreadList* ManagedThreadList::GetSingletonInstance()
{
ManagedThreadList* singletonInstance = ManagedThreadList::s_singletonInstance;
if (singletonInstance != nullptr)
{
return singletonInstance;
}
throw std::logic_error("No singleton instance of ManagedThreadList has been created, or it has already been deleted.");
}
void ManagedThreadList::DeleteSingletonInstance(void)
{
ManagedThreadList* singletonInstance = ManagedThreadList::s_singletonInstance;
if (singletonInstance != nullptr)
{
ManagedThreadList::s_singletonInstance = nullptr;
delete singletonInstance;
}
}
ManagedThreadList::ManagedThreadList(ICorProfilerInfo4* pCorProfilerInfo) :
_threadsData{new DirectAccessCollection<ManagedThreadInfo*>(MinBufferSize)},
_nextFreeIndex{0},
_activeThreadCount{0},
_nextElementIteratorIndex{0},
_pCorProfilerInfo{pCorProfilerInfo}
{
_lookupByClrThreadId.reserve(MinBufferSize);
_lookupByProfilerThreadInfoId.reserve(MinBufferSize);
_pCorProfilerInfo->AddRef();
}
ManagedThreadList::~ManagedThreadList()
{
std::lock_guard<std::recursive_mutex> lock(_mutex);
DirectAccessCollection<ManagedThreadInfo*>* threadsData = _threadsData;
if (threadsData != nullptr)
{
_threadsData = nullptr;
ManagedThreadInfo** pDataItem = nullptr;
for (std::uint32_t i = 0; i < _nextFreeIndex; i++)
{
threadsData->TryGet(i, &pDataItem);
if (*pDataItem != nullptr)
{
(*pDataItem)->Release();
*pDataItem = nullptr;
}
}
delete threadsData;
}
ICorProfilerInfo4* pCorProfilerInfo = _pCorProfilerInfo;
if (pCorProfilerInfo != nullptr)
{
pCorProfilerInfo->Release();
_pCorProfilerInfo = nullptr;
}
}
bool ManagedThreadList::GetOrCreateThread(ThreadID clrThreadId, ManagedThreadInfo** ppThreadInfo)
{
std::lock_guard<std::recursive_mutex> lock(_mutex);
ManagedThreadInfo* pExistingOrNewInfo;
if (!TryFindThreadByClrThreadId(clrThreadId, &pExistingOrNewInfo))
{
if (!AddNewThread(clrThreadId, &pExistingOrNewInfo))
{
return false;
}
}
if (ppThreadInfo != nullptr)
{
*ppThreadInfo = pExistingOrNewInfo;
}
return true;
}
bool ManagedThreadList::AddNewThread(ThreadID clrThreadId, ManagedThreadInfo** ppCreatedThreadInfo)
{
std::lock_guard<std::recursive_mutex> lock(_mutex);
ManagedThreadInfo* pNewThreadInfo = new ManagedThreadInfo(clrThreadId);
pNewThreadInfo->AddRef();
if (_threadsData->TrySet(_nextFreeIndex, pNewThreadInfo))
{
_lookupByClrThreadId[clrThreadId] = pNewThreadInfo;
_lookupByProfilerThreadInfoId[pNewThreadInfo->GetProfilerThreadInfoId()] = pNewThreadInfo;
_nextFreeIndex++;
_activeThreadCount++;
if (ppCreatedThreadInfo != nullptr)
{
*ppCreatedThreadInfo = pNewThreadInfo;
}
return true;
}
ResizeAndCompactData();
if (_threadsData->TrySet(_nextFreeIndex, pNewThreadInfo))
{
_lookupByClrThreadId[clrThreadId] = pNewThreadInfo;
_lookupByProfilerThreadInfoId[pNewThreadInfo->GetProfilerThreadInfoId()] = pNewThreadInfo;
_nextFreeIndex++;
_activeThreadCount++;
if (ppCreatedThreadInfo != nullptr)
{
*ppCreatedThreadInfo = pNewThreadInfo;
}
return true;
}
Log::Error("Cannot add new thread even after calling ResizeAndCompactData(): must be a bug!");
pNewThreadInfo->Release();
return false;
}
bool ManagedThreadList::UnregisterThread(ThreadID clrThreadId, ManagedThreadInfo** ppThreadInfo)
{
std::lock_guard<std::recursive_mutex> lock(_mutex);
std::uint32_t threadIndex;
ManagedThreadInfo** pThreadInfoListEntry = nullptr;
bool threadExists = TryFindThreadIndexInList(clrThreadId, 0, &threadIndex, &pThreadInfoListEntry);
if (!threadExists)
{
Log::Error("ManagedThreadList: thread ", std::dec, clrThreadId, "cannot be unregister because not in the list");
return false;
}
if (ppThreadInfo != nullptr)
{
*ppThreadInfo = *pThreadInfoListEntry;
(*ppThreadInfo)->AddRef(); // Caller must release
}
// Remove from the ByClrThreadId lookup:
_lookupByClrThreadId.erase((*pThreadInfoListEntry)->GetClrThreadId());
// Remove from the ByProfilerThreadInfoId lookup:
_lookupByProfilerThreadInfoId.erase((*pThreadInfoListEntry)->GetProfilerThreadInfoId());
// Remove the item from the collection and then delete the object:
(*pThreadInfoListEntry)->Release();
*pThreadInfoListEntry = nullptr;
// Decrement counter of items:
_activeThreadCount--;
// Optimization: If we removed the last item, we can update next insertion index accordingly.
// (Otherwise we defer to a later compaction)
if (threadIndex == _nextFreeIndex - 1)
{
_nextFreeIndex--;
}
// Check fragmentation and perform compaction if required:
std::uint32_t fragAmnt = _nextFreeIndex - _activeThreadCount;
std::uint32_t fragPercent = static_cast<std::uint32_t>((fragAmnt * 100.0) / _activeThreadCount);
if (fragPercent > FillFactorPercent && _nextFreeIndex >= MinCompactionUsedIndex)
{
ResizeAndCompactData();
}
return true;
}
bool ManagedThreadList::SetThreadOsInfo(ThreadID clrThreadId, DWORD osThreadId, HANDLE osThreadHandle)
{
std::lock_guard<std::recursive_mutex> lock(_mutex);
//Log::Debug("ManagedThreadList::SetThreadInfo(" + std::to_string(clrThreadId)
// + ", " + std::to_string(pThreadInfo->GetClrThreadId())
// + "): start {_threadsData->Count()=" + std::to_string(_threadsData->Count())
// + ", _nextFreeIndex=" + std::to_string(_nextFreeIndex)
// + ", _activeThreadCount=" + std::to_string(_activeThreadCount)
// + ", _nextElementIteratorIndex=" + std::to_string(_nextElementIteratorIndex) + "}");
ManagedThreadInfo* pExistingInfo = nullptr;
if (!GetOrCreateThread(clrThreadId, &pExistingInfo))
{
Log::Error("ManagedThreadList: thread 0x", std::hex, clrThreadId, " cannot be associated to OS ID(0x", std::hex, osThreadId, std::dec, ") because not in the list");
return false;
}
pExistingInfo->SetOsInfo(osThreadId, osThreadHandle);
Log::Debug("ManagedThreadList::SetThreadOsInfo(clrThreadId: 0x", std::hex, clrThreadId,
", osThreadId: ", std::dec, osThreadId,
", osThreadHandle: 0x", std::hex, osThreadHandle, ")",
" completed for ProfilerThreadInfoId=", std::dec, pExistingInfo->GetProfilerThreadInfoId(), ".");
return true;
}
bool ManagedThreadList::SetThreadName(ThreadID clrThreadId, shared::WSTRING* pThreadName)
{
std::lock_guard<std::recursive_mutex> lock(_mutex);
ManagedThreadInfo* pExistingInfo;
if (!GetOrCreateThread(clrThreadId, &pExistingInfo))
{
Log::Error("ManagedThreadList: impossible to set thread 0x", std::hex, clrThreadId, " name to \"", (pThreadName == nullptr ? WStr("null") : *pThreadName), "\") because not in the list");
return false;
}
pExistingInfo->SetThreadName(pThreadName);
Log::Debug("ManagedThreadList::SetThreadName(clrThreadId: 0x", std::hex, clrThreadId,
", pThreadName: \"", (pThreadName == nullptr ? WStr("null") : *pThreadName), "\")",
" completed for ProfilerThreadInfoId=", pExistingInfo->GetProfilerThreadInfoId(), ".");
return true;
}
std::uint32_t ManagedThreadList::Count(void) const
{
return _activeThreadCount;
}
ManagedThreadInfo* ManagedThreadList::LoopNext(void)
{
std::lock_guard<std::recursive_mutex> lock(_mutex);
if (_activeThreadCount == 0)
{
return nullptr;
}
ManagedThreadInfo** pDataItem = nullptr;
bool canGet = _nextElementIteratorIndex < _nextFreeIndex && _threadsData->TryGet(_nextElementIteratorIndex++, &pDataItem);
while (canGet && *pDataItem == nullptr)
{
canGet = _nextElementIteratorIndex < _nextFreeIndex && _threadsData->TryGet(_nextElementIteratorIndex++, &pDataItem);
}
if (!canGet)
{
_nextElementIteratorIndex = 0;
canGet = _nextElementIteratorIndex < _nextFreeIndex && _threadsData->TryGet(_nextElementIteratorIndex++, &pDataItem);
while (canGet && *pDataItem == nullptr)
{
canGet = _nextElementIteratorIndex < _nextFreeIndex && _threadsData->TryGet(_nextElementIteratorIndex++, &pDataItem);
}
if (!canGet)
{
return nullptr;
}
}
(*pDataItem)->AddRef(); // Caller must release
return *pDataItem;
}
void ManagedThreadList::ResizeAndCompactData(void)
{
// This helper function must be called under the update lock (_mutex)!
// Compute size for new buffer:
std::uint32_t newDataSize = (std::max)(static_cast<std::uint32_t>(_activeThreadCount * 0.01 * (100.0 + FillFactorPercent)), MinBufferSize);
// Allocate new buffer and copy data from old to new buffer:
DirectAccessCollection<ManagedThreadInfo*>* newThreadsData = new DirectAccessCollection<ManagedThreadInfo*>(newDataSize);
std::uint32_t newNextFreeIndex = 0;
std::uint32_t newNextElementIteratorIndex = _nextElementIteratorIndex;
ManagedThreadInfo** ppThreadInfo = nullptr;
for (std::uint32_t i = 0; i < _nextFreeIndex; i++)
{
_threadsData->TryGet(i, &ppThreadInfo);
// As we copy, perform compaction:
if (*ppThreadInfo == nullptr)
{
if (i < _nextElementIteratorIndex)
{
newNextElementIteratorIndex--;
}
}
else
{
newThreadsData->TrySet(newNextFreeIndex, *ppThreadInfo);
newNextFreeIndex++;
}
}
// Swap old and new data:
DirectAccessCollection<ManagedThreadInfo*>* oldThreadsData = _threadsData;
_threadsData = newThreadsData;
_nextFreeIndex = newNextFreeIndex;
_activeThreadCount = newNextFreeIndex;
_nextElementIteratorIndex = newNextElementIteratorIndex;
delete oldThreadsData;
}
/// <summary>
/// See the comment in the header file at the declaration of the _lookupByProfilerThreadInfoId table.
/// </summary>
bool ManagedThreadList::TryFindThreadByProfilerThreadInfoId(std::uint32_t profilerThreadInfoId, ManagedThreadInfo** ppThreadInfo)
{
if (_activeThreadCount < 1)
{
return false;
}
{
std::lock_guard<std::recursive_mutex> lock(_mutex);
std::unordered_map<std::uint32_t, ManagedThreadInfo*>::const_iterator elem = _lookupByProfilerThreadInfoId.find(profilerThreadInfoId);
if (elem == _lookupByProfilerThreadInfoId.end())
{
return false;
}
else
{
if (ppThreadInfo != nullptr)
{
*ppThreadInfo = elem->second;
(*ppThreadInfo)->AddRef(); // caller must release
}
return true;
}
}
}
bool ManagedThreadList::TryGetThreadInfo(const std::uint32_t profilerThreadInfoId,
ThreadID* pClrThreadId,
DWORD* pOsThreadId,
HANDLE* pOsThreadHandle,
WCHAR* pThreadNameBuff,
const std::uint32_t threadNameBuffSize,
std::uint32_t* pActualThreadNameLen)
{
ManagedThreadInfo* pThreadInfo = nullptr;
bool canFind = this->TryFindThreadByProfilerThreadInfoId(profilerThreadInfoId, &pThreadInfo);
if (!canFind || pThreadInfo == nullptr)
{
return false;
}
if (pClrThreadId != nullptr)
{
*pClrThreadId = pThreadInfo->GetClrThreadId();
}
if (pOsThreadId != nullptr)
{
*pOsThreadId = pThreadInfo->GetOsThreadId();
}
if (pOsThreadHandle != nullptr)
{
*pOsThreadHandle = pThreadInfo->GetOsThreadHandle();
}
const shared::WSTRING& tName = pThreadInfo->GetThreadName();
if (pThreadNameBuff != nullptr && threadNameBuffSize > 0)
{
std::uint32_t copyCharCount = (std::min)(static_cast<std::uint32_t>(tName.size()), threadNameBuffSize - 1);
// If a managed thread name was set, we will use it.
// If a managed thread name was not set, we will attempt to use a potentially set native thread name (or debugger thread description).
// Note that we do not get callbacks for the update of the latter, so we have to query it every time.
// However, the results of this TryGetThreadInfo(..) method are cached on the managed side.
// As a result, this API should not be called more than once per export cycle per thread, and we do not expect much overhead from this query.
// (But native thread name updates may propagate with a delay).
if (copyCharCount > 0)
{
tName.copy(pThreadNameBuff, copyCharCount, 0);
pThreadNameBuff[copyCharCount] = static_cast<WCHAR>(0);
}
else
{
if (false == OpSysTools::GetNativeThreadName(pThreadInfo->GetOsThreadHandle(), pThreadNameBuff, threadNameBuffSize))
{
pThreadNameBuff[0] = static_cast<WCHAR>(0);
}
}
}
if (pActualThreadNameLen != nullptr)
{
*pActualThreadNameLen = static_cast<std::uint32_t>(tName.size());
}
pThreadInfo->Release();
return true;
}
HRESULT ManagedThreadList::TryGetCurrentThreadInfo(ManagedThreadInfo** ppThreadInfo)
{
ThreadID clrThreadId;
HRESULT hr = _pCorProfilerInfo->GetCurrentThreadID(&clrThreadId);
if (FAILED(hr))
{
return hr;
}
if (clrThreadId == 0)
{
return E_FAIL;
}
if (TryFindThreadByClrThreadId(clrThreadId, ppThreadInfo))
{
return S_OK;
}
else
{
return S_FALSE;
}
}
bool ManagedThreadList::TryFindThreadByClrThreadId(ThreadID clrThreadId, ManagedThreadInfo** ppThreadInfo)
{
// This helper method is called from modifying fucntions under the update lock (_mutex)!
// If there is nothing in the list, fail fast:
if (_nextFreeIndex < 1)
{
return false;
}
// Optimization. Try look at a few last list entries before falling back to the table lookup...
static const std::uint32_t MaxScanOptimizationLength = 10;
std::uint32_t minIndex = (_nextFreeIndex <= MaxScanOptimizationLength) ? 0 : _nextFreeIndex - MaxScanOptimizationLength - 1;
ManagedThreadInfo** pThreadInfoListEntry;
std::uint32_t threadIndexUnused;
if (TryFindThreadIndexInList(clrThreadId, minIndex, &threadIndexUnused, &pThreadInfoListEntry))
{
if (ppThreadInfo != nullptr)
{
*ppThreadInfo = *pThreadInfoListEntry;
}
return true;
}
// The thread id we are looking for is not in the last MaxScanOptimizationLength elements of the collection.
// Use the lookup table:
std::unordered_map<ThreadID, ManagedThreadInfo*>::const_iterator elem = _lookupByClrThreadId.find(clrThreadId);
if (elem == _lookupByClrThreadId.end())
{
return false;
}
else
{
if (ppThreadInfo != nullptr)
{
*ppThreadInfo = elem->second;
}
return true;
}
}
bool ManagedThreadList::TryFindThreadIndexInList(ThreadID clrThreadId,
std::uint32_t minIndex,
std::uint32_t* pThreadIndex,
ManagedThreadInfo*** pppThreadInfo)
{
// This helper method is called from modifying fucntions under the update lock (_mutex)!
// If there is nothing in the list, fail fast:
if (_nextFreeIndex < 1)
{
return false;
}
ManagedThreadInfo** pThreadInfoListEntry = nullptr;
std::uint32_t ind = _nextFreeIndex;
while (ind > minIndex)
{
ind--;
_threadsData->TryGet(ind, &pThreadInfoListEntry);
// If we found the thread we looked for:
if ((*pThreadInfoListEntry) != nullptr && (*pThreadInfoListEntry)->GetClrThreadId() == clrThreadId)
{
*pThreadIndex = ind;
*pppThreadInfo = pThreadInfoListEntry;
return true;
}
}
return false;
}
|
///////////////////////////////////////////////////////////////////////////////
// //
// DxilAddPixelHitInstrumentation.cpp //
// Copyright (C) Microsoft Corporation. All rights reserved. //
// This file is distributed under the University of Illinois Open Source //
// License. See LICENSE.TXT for details. //
// //
// Provides a pass to add instrumentation to retrieve mesh shader output. //
// Used by PIX. //
// //
///////////////////////////////////////////////////////////////////////////////
#include "dxc/DXIL/DxilOperations.h"
#include "dxc/DXIL/DxilUtil.h"
#include "dxc/DXIL/DxilInstructions.h"
#include "dxc/DXIL/DxilModule.h"
#include "dxc/DxilPIXPasses/DxilPIXPasses.h"
#include "dxc/HLSL/DxilGenerationPass.h"
#include "dxc/HLSL/DxilSpanAllocator.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Transforms/Utils/Local.h"
#include <deque>
#ifdef _WIN32
#include <winerror.h>
#endif
#include "PixPassHelpers.h"
// Keep these in sync with the same-named value in the debugger application's
// WinPixShaderUtils.h
constexpr uint64_t DebugBufferDumpingGroundSize = 64 * 1024;
// The actual max size per record is much smaller than this, but it never
// hurts to be generous.
constexpr size_t CounterOffsetBeyondUsefulData = DebugBufferDumpingGroundSize / 2;
// Keep these in sync with the same-named values in PIX's MeshShaderOutput.cpp
constexpr uint32_t triangleIndexIndicator = 0x1;
constexpr uint32_t int32ValueIndicator = 0x2;
constexpr uint32_t floatValueIndicator = 0x3;
constexpr uint32_t int16ValueIndicator = 0x4;
constexpr uint32_t float16ValueIndicator = 0x5;
using namespace llvm;
using namespace hlsl;
using namespace PIXPassHelpers;
class DxilPIXMeshShaderOutputInstrumentation : public ModulePass
{
public:
static char ID; // Pass identification, replacement for typeid
explicit DxilPIXMeshShaderOutputInstrumentation() : ModulePass(ID) {}
const char *getPassName() const override {
return "DXIL mesh shader output instrumentation";
}
void applyOptions(PassOptions O) override;
bool runOnModule(Module &M) override;
private:
CallInst *m_OutputUAV = nullptr;
int m_RemainingReservedSpaceInBytes = 0;
Constant *m_OffsetMask = nullptr;
SmallVector<Value*,2> m_threadUniquifier;
uint64_t m_UAVSize = 1024 * 1024;
bool m_ExpandPayload = false;
struct BuilderContext {
Module &M;
DxilModule &DM;
LLVMContext &Ctx;
OP *HlslOP;
IRBuilder<> &Builder;
};
SmallVector<Value*, 2> insertInstructionsToCreateDisambiguationValue(OP* HlslOP, LLVMContext& Ctx, StructType * originalPayloadStructType, Instruction * firstGetPayload);
Value *reserveDebugEntrySpace(BuilderContext &BC, uint32_t SpaceInBytes);
uint32_t UAVDumpingGroundOffset();
Value *writeDwordAndReturnNewOffset(BuilderContext &BC, Value *TheOffset,
Value *TheValue);
template <typename... T> void Instrument(BuilderContext &BC, T... values);
};
void DxilPIXMeshShaderOutputInstrumentation::applyOptions(PassOptions O)
{
GetPassOptionUInt64(O, "UAVSize", &m_UAVSize, 1024 * 1024);
GetPassOptionBool(O, "expand-payload", &m_ExpandPayload, 0);
}
uint32_t DxilPIXMeshShaderOutputInstrumentation::UAVDumpingGroundOffset()
{
return static_cast<uint32_t>(m_UAVSize - DebugBufferDumpingGroundSize);
}
Value *DxilPIXMeshShaderOutputInstrumentation::reserveDebugEntrySpace(
BuilderContext &BC, uint32_t SpaceInBytes)
{
// Check the previous caller didn't reserve too much space:
assert(m_RemainingReservedSpaceInBytes == 0);
// Check that the caller didn't ask for so much memory that it will
// overwrite the offset counter:
assert(m_RemainingReservedSpaceInBytes < (int)CounterOffsetBeyondUsefulData);
m_RemainingReservedSpaceInBytes = SpaceInBytes;
// Insert the UAV increment instruction:
Function *AtomicOpFunc =
BC.HlslOP->GetOpFunc(OP::OpCode::AtomicBinOp, Type::getInt32Ty(BC.Ctx));
Constant *AtomicBinOpcode =
BC.HlslOP->GetU32Const((unsigned)OP::OpCode::AtomicBinOp);
Constant *AtomicAdd =
BC.HlslOP->GetU32Const((unsigned)DXIL::AtomicBinOpCode::Add);
Constant *OffsetArg =
BC.HlslOP->GetU32Const(UAVDumpingGroundOffset() + CounterOffsetBeyondUsefulData);
UndefValue *UndefArg = UndefValue::get(Type::getInt32Ty(BC.Ctx));
Constant *Increment = BC.HlslOP->GetU32Const(SpaceInBytes);
auto *PreviousValue = BC.Builder.CreateCall(
AtomicOpFunc,
{
AtomicBinOpcode, // i32, ; opcode
m_OutputUAV, // %dx.types.Handle, ; resource handle
AtomicAdd, // i32, ; binary operation code : EXCHANGE, IADD, AND, OR,
// XOR, IMIN, IMAX, UMIN, UMAX
OffsetArg, // i32, ; coordinate c0: index in bytes
UndefArg, // i32, ; coordinate c1 (unused)
UndefArg, // i32, ; coordinate c2 (unused)
Increment, // i32); increment value
},
"UAVIncResult");
return BC.Builder.CreateAnd(PreviousValue, m_OffsetMask, "MaskedForUAVLimit");
}
Value *DxilPIXMeshShaderOutputInstrumentation::writeDwordAndReturnNewOffset(
BuilderContext &BC, Value *TheOffset, Value *TheValue)
{
Function *StoreValue =
BC.HlslOP->GetOpFunc(OP::OpCode::BufferStore, Type::getInt32Ty(BC.Ctx));
Constant *StoreValueOpcode =
BC.HlslOP->GetU32Const((unsigned)DXIL::OpCode::BufferStore);
UndefValue *Undef32Arg = UndefValue::get(Type::getInt32Ty(BC.Ctx));
Constant *WriteMask_X = BC.HlslOP->GetI8Const(1);
(void)BC.Builder.CreateCall(
StoreValue,
{StoreValueOpcode, // i32 opcode
m_OutputUAV, // %dx.types.Handle, ; resource handle
TheOffset, // i32 c0: index in bytes into UAV
Undef32Arg, // i32 c1: unused
TheValue,
Undef32Arg, // unused values
Undef32Arg, // unused values
Undef32Arg, // unused values
WriteMask_X});
m_RemainingReservedSpaceInBytes -= sizeof(uint32_t);
assert(m_RemainingReservedSpaceInBytes >=
0); // or else the caller didn't reserve enough space
return BC.Builder.CreateAdd(
TheOffset,
BC.HlslOP->GetU32Const(static_cast<unsigned int>(sizeof(uint32_t))));
}
template <typename... T>
void DxilPIXMeshShaderOutputInstrumentation::Instrument(BuilderContext &BC,
T... values)
{
llvm::SmallVector<llvm::Value *, 10> Values(
{static_cast<llvm::Value *>(values)...});
const uint32_t DwordCount = Values.size();
llvm::Value *byteOffset =
reserveDebugEntrySpace(BC, DwordCount * sizeof(uint32_t));
for (llvm::Value *V : Values)
{
byteOffset = writeDwordAndReturnNewOffset(BC, byteOffset, V);
}
}
Value* GetValueFromExpandedPayload(IRBuilder<> &Builder, StructType* originalPayloadStructType, Instruction* firstGetPayload, unsigned int offset, const char * name) {
auto *DerefPointer = Builder.getInt32(0);
auto *OffsetToExpandedData = Builder.getInt32(offset);
auto *GEP = Builder.CreateGEP(
cast<PointerType>(firstGetPayload->getType()->getScalarType())
->getElementType(),
firstGetPayload, {DerefPointer, OffsetToExpandedData});
return Builder.CreateLoad(GEP, name);
}
SmallVector<Value*, 2> DxilPIXMeshShaderOutputInstrumentation::
insertInstructionsToCreateDisambiguationValue(OP* HlslOP, LLVMContext& Ctx, StructType* originalPayloadStructType, Instruction* firstGetPayload) {
// When a mesh shader is called from an amplification shader, all of the
// thread id values are relative to the DispatchMesh call made by
// that amplification shader. Data about what thread counts were passed
// by the CPU to *CommandList::DispatchMesh are not available, but we
// will have added that value to the AS->MS payload...
IRBuilder<> Builder(firstGetPayload->getNextNode());
auto * ASThreadId = GetValueFromExpandedPayload(Builder, originalPayloadStructType, firstGetPayload, originalPayloadStructType->getStructNumElements(), "ASThreadId");
auto * ASDispatchMeshYCount = GetValueFromExpandedPayload(Builder, originalPayloadStructType, firstGetPayload, originalPayloadStructType->getStructNumElements() + 1, "ASDispatchMeshYCount");
auto * ASDispatchMeshZCount = GetValueFromExpandedPayload(Builder, originalPayloadStructType, firstGetPayload, originalPayloadStructType->getStructNumElements() + 2, "ASDispatchMeshZCount");
Constant *Zero32Arg = HlslOP->GetU32Const(0);
Constant *One32Arg = HlslOP->GetU32Const(1);
Constant *Two32Arg = HlslOP->GetU32Const(2);
auto GroupIdFunc =
HlslOP->GetOpFunc(DXIL::OpCode::GroupId, Type::getInt32Ty(Ctx));
Constant *Opcode = HlslOP->GetU32Const((unsigned)DXIL::OpCode::GroupId);
auto * GroupIdX =
Builder.CreateCall(GroupIdFunc, {Opcode, Zero32Arg}, "GroupIdX");
auto * GroupIdY =
Builder.CreateCall(GroupIdFunc, {Opcode, One32Arg}, "GroupIdY");
auto * GroupIdZ =
Builder.CreateCall(GroupIdFunc, {Opcode, Two32Arg}, "GroupIdZ");
auto *XxY =
Builder.CreateMul(GroupIdX, ASDispatchMeshYCount);
auto *XplusY = Builder.CreateAdd(GroupIdY, XxY);
auto *XYxZ = Builder.CreateMul(XplusY, ASDispatchMeshZCount);
auto *XYZ = Builder.CreateAdd(GroupIdZ, XYxZ);
SmallVector<Value *, 2> ret;
ret.push_back(ASThreadId);
ret.push_back(XYZ);
return ret;
}
bool DxilPIXMeshShaderOutputInstrumentation::runOnModule(Module &M)
{
DxilModule &DM = M.GetOrCreateDxilModule();
LLVMContext &Ctx = M.getContext();
OP *HlslOP = DM.GetOP();
Type *OriginalPayloadStructType = nullptr;
ExpandedStruct expanded = {};
Instruction* FirstNewStructGetMeshPayload = nullptr;
if (m_ExpandPayload) {
Instruction * getMeshPayloadInstructions = nullptr;
llvm::Function *entryFunction = PIXPassHelpers::GetEntryFunction(DM);
for (inst_iterator I = inst_begin(entryFunction),
E = inst_end(entryFunction);
I != E; ++I) {
if (auto* Instr = llvm::cast<Instruction>(&*I)) {
if (hlsl::OP::IsDxilOpFuncCallInst(Instr,
hlsl::OP::OpCode::GetMeshPayload)) {
getMeshPayloadInstructions = Instr;
Type *OriginalPayloadStructPointerType = Instr->getType();
OriginalPayloadStructType = OriginalPayloadStructPointerType->getPointerElementType();
// The validator assures that there is only one call to GetMeshPayload...
break;
}
}
}
if (OriginalPayloadStructType == nullptr) {
// If the application used no payload, then we won't attempt to add one.
// TODO: Is there a credible use case with no AS->MS payload?
// PIX bug #35288335
return false;
}
if (expanded.ExpandedPayloadStructPtrType == nullptr) {
expanded = ExpandStructType(Ctx, OriginalPayloadStructType);
}
if (getMeshPayloadInstructions != nullptr) {
Function* DxilFunc = HlslOP->GetOpFunc(OP::OpCode::GetMeshPayload, expanded.ExpandedPayloadStructPtrType);
Constant* opArg = HlslOP->GetU32Const((unsigned)OP::OpCode::GetMeshPayload);
IRBuilder<> Builder(getMeshPayloadInstructions);
Value* args[] = { opArg };
Instruction* payload = Builder.CreateCall(DxilFunc, args);
if (FirstNewStructGetMeshPayload == nullptr) {
FirstNewStructGetMeshPayload = payload;
}
ReplaceAllUsesOfInstructionWithNewValueAndDeleteInstruction(getMeshPayloadInstructions, payload, expanded.ExpandedPayloadStructType);
}
}
Instruction *firstInsertionPt =
dxilutil::FirstNonAllocaInsertionPt(GetEntryFunction(DM));
IRBuilder<> Builder(firstInsertionPt);
BuilderContext BC{M, DM, Ctx, HlslOP, Builder};
m_OffsetMask = BC.HlslOP->GetU32Const(UAVDumpingGroundOffset() - 1);
m_OutputUAV = CreateUAV(DM, Builder, 0, "PIX_DebugUAV_Handle");
if (FirstNewStructGetMeshPayload == nullptr) {
m_threadUniquifier.push_back(BC.HlslOP->GetU32Const(0));
m_threadUniquifier.push_back(BC.HlslOP->GetU32Const(0));
}
else {
m_threadUniquifier = insertInstructionsToCreateDisambiguationValue(HlslOP, Ctx, cast<StructType>(OriginalPayloadStructType), FirstNewStructGetMeshPayload);
}
auto F = HlslOP->GetOpFunc(DXIL::OpCode::EmitIndices, Type::getVoidTy(Ctx));
auto FunctionUses = F->uses();
for (auto FI = FunctionUses.begin(); FI != FunctionUses.end();)
{
auto &FunctionUse = *FI++;
auto FunctionUser = FunctionUse.getUser();
auto Call = cast<CallInst>(FunctionUser);
IRBuilder<> Builder2(Call);
BuilderContext BC2{M, DM, Ctx, HlslOP, Builder2};
Instrument(BC2, BC2.HlslOP->GetI32Const(triangleIndexIndicator),
m_threadUniquifier[0], m_threadUniquifier[1], Call->getOperand(1),
Call->getOperand(2), Call->getOperand(3), Call->getOperand(4));
}
struct OutputType
{
Type *type;
uint32_t tag;
};
SmallVector<OutputType, 4> StoreVertexOutputOverloads
{
{Type::getInt32Ty(Ctx), int32ValueIndicator},
{Type::getInt16Ty(Ctx), int16ValueIndicator},
{Type::getFloatTy(Ctx), floatValueIndicator},
{Type::getHalfTy(Ctx), float16ValueIndicator}
};
for (auto const &Overload : StoreVertexOutputOverloads)
{
F = HlslOP->GetOpFunc(DXIL::OpCode::StoreVertexOutput, Overload.type);
FunctionUses = F->uses();
for (auto FI = FunctionUses.begin(); FI != FunctionUses.end();)
{
auto &FunctionUse = *FI++;
auto FunctionUser = FunctionUse.getUser();
auto Call = cast<CallInst>(FunctionUser);
IRBuilder<> Builder2(Call);
BuilderContext BC2{M, DM, Ctx, HlslOP, Builder2};
// Expand column index to 32 bits:
auto ColumnIndex = BC2.Builder.CreateCast(
Instruction::ZExt,
Call->getOperand(3),
Type::getInt32Ty(Ctx));
// Coerce actual value to int32
Value *CoercedValue = Call->getOperand(4);
if (Overload.tag == floatValueIndicator)
{
CoercedValue = BC2.Builder.CreateCast(
Instruction::BitCast,
CoercedValue,
Type::getInt32Ty(Ctx));
}
else if (Overload.tag == float16ValueIndicator)
{
auto * HalfInt = BC2.Builder.CreateCast(
Instruction::BitCast,
CoercedValue,
Type::getInt16Ty(Ctx));
CoercedValue = BC2.Builder.CreateCast(
Instruction::ZExt,
HalfInt,
Type::getInt32Ty(Ctx));
}
else if (Overload.tag == int16ValueIndicator)
{
CoercedValue = BC2.Builder.CreateCast(
Instruction::ZExt,
CoercedValue,
Type::getInt32Ty(Ctx));
}
Instrument(
BC2,
BC2.HlslOP->GetI32Const(Overload.tag),
m_threadUniquifier[0], m_threadUniquifier[1],
Call->getOperand(1),
Call->getOperand(2),
ColumnIndex,
CoercedValue,
Call->getOperand(5));
}
}
DM.ReEmitDxilResources();
return true;
}
char DxilPIXMeshShaderOutputInstrumentation::ID = 0;
ModulePass *llvm::createDxilDxilPIXMeshShaderOutputInstrumentation()
{
return new DxilPIXMeshShaderOutputInstrumentation();
}
INITIALIZE_PASS(DxilPIXMeshShaderOutputInstrumentation,
"hlsl-dxil-pix-meshshader-output-instrumentation",
"DXIL mesh shader output instrumentation for PIX", false, false)
|
/* Copyright (C) 2006 - 2010 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/>
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* ScriptData
SDName: Boss_Trollgore
SD%Complete: 20%
SDComment:
SDCategory: Drak'Tharon Keep
EndScriptData */
#include "precompiled.h"
enum
{
SAY_AGGRO = -1600000,
SAY_CONSUME = -1600001,
SAY_DEATH = -1600002,
SAY_EXPLODE = -1600003,
SAY_KILL = -1600004
};
/*######
## boss_trollgore
######*/
struct MANGOS_DLL_DECL boss_trollgoreAI : public ScriptedAI
{
boss_trollgoreAI(Creature* pCreature) : ScriptedAI(pCreature)
{
m_pInstance = (ScriptedInstance*)pCreature->GetInstanceData();
m_bIsRegularMode = pCreature->GetMap()->IsRegularDifficulty();
Reset();
}
ScriptedInstance* m_pInstance;
bool m_bIsRegularMode;
void Reset()
{
}
void Aggro(Unit* pWho)
{
DoScriptText(SAY_AGGRO, m_creature);
}
void KilledUnit(Unit* pVictim)
{
if (pVictim->GetCharmerOrOwnerPlayerOrPlayerItself())
DoScriptText(SAY_KILL, m_creature);
}
void JustDied(Unit* pKiller)
{
DoScriptText(SAY_DEATH, m_creature);
}
void UpdateAI(const uint32 uiDiff)
{
if (!m_creature->SelectHostileTarget() || !m_creature->getVictim())
return;
DoMeleeAttackIfReady();
}
};
CreatureAI* GetAI_boss_trollgore(Creature* pCreature)
{
return new boss_trollgoreAI(pCreature);
}
void AddSC_boss_trollgore()
{
Script *newscript;
newscript = new Script;
newscript->Name = "boss_trollgore";
newscript->GetAI = &GetAI_boss_trollgore;
newscript->RegisterSelf();
}
|
#include <stddef.h>
#include <string>
#include <filesystem>
#include "../mtlpp.hpp"
const unsigned int arrayLength = 10; //1 << 24;
const unsigned int bufferSize = arrayLength * sizeof(float);
//xcrun -sdk macosx metal -c add.metal -o add.air
//xcrun -sdk macosx metallib add.air -o add.metallib
//xcrun -sdk macosx metal -c operations.metal -o operations.air
//xcrun -sdk macosx metallib operations.air -o operations.metallib
//define metal reflections here
void mtlAddArrays(const float* inA, const float* inB, float* result, int length);
//end define Metal Reflections
class MetalEngine
{
mtlpp::Device _mDevice = mtlpp::Device::CreateSystemDefaultDevice();
mtlpp::ComputePipelineState _mFunctionPSO;
mtlpp::CommandQueue _mCommandQueue;
mtlpp::Buffer _mBufferA;
mtlpp::Buffer _mBufferB;
mtlpp::Buffer _mBufferResult;
ns::Error* error; //nullptr
MetalEngine();
MetalEngine(mtlpp::Device device);
MetalEngine(ns::String libraryPath, ns::String mtlFunction, mtlpp::Device device);
MetalEngine(ns::String mtlFunction, mtlpp::Device device);
MetalEngine(const char src[], ns::String functionName, mtlpp::Device device);
void generateRandomFloatData(mtlpp::Buffer buffer);
void prepareData(mtlpp::Device device);
void sendComputeCommand(mtlpp::CommandQueue commandQueue);
void encodeAddCommand(mtlpp::ComputeCommandEncoder computeEncoder);
void verifyResults();
void execute();
};
|
/*******************************************************************\
Module: Memory Analyzer
Author: Malte Mues <mail.mues@gmail.com>
Daniel Poetzl
\*******************************************************************/
/// \file
/// Commandline parser for the memory analyzer executing main work.
#include "memory_analyzer_parse_options.h"
#include "analyze_symbol.h"
#include "gdb_api.h"
#include <algorithm>
#include <fstream>
#include <ansi-c/ansi_c_language.h>
#include <goto-programs/goto_model.h>
#include <goto-programs/read_goto_binary.h>
#include <goto-programs/show_symbol_table.h>
#include <langapi/mode.h>
#include <util/config.h>
#include <util/exit_codes.h>
#include <util/message.h>
#include <util/string_utils.h>
#include <util/version.h>
memory_analyzer_parse_optionst::memory_analyzer_parse_optionst(
int argc,
const char *argv[])
: parse_options_baset(
MEMORY_ANALYZER_OPTIONS,
argc,
argv,
std::string("MEMORY-ANALYZER ") + CBMC_VERSION),
message(ui_message_handler)
{
}
int memory_analyzer_parse_optionst::doit()
{
if(cmdline.isset("version"))
{
message.status() << CBMC_VERSION << '\n';
return CPROVER_EXIT_SUCCESS;
}
config.set(cmdline);
if(cmdline.args.size() < 1)
{
throw invalid_command_line_argument_exceptiont(
"no binary provided for analysis", "<binary> <args>");
}
if(!cmdline.isset("symbols"))
{
throw invalid_command_line_argument_exceptiont(
"need to provide symbols to analyse via --symbols", "--symbols");
}
const bool core_file = cmdline.isset("core-file");
const bool breakpoint = cmdline.isset("breakpoint");
if(core_file && breakpoint)
{
throw invalid_command_line_argument_exceptiont(
"cannot start gdb from both core-file and breakpoint",
"--core-file/--breakpoint");
}
if(!core_file && !breakpoint)
{
throw invalid_command_line_argument_exceptiont(
"need to provide either core-file or breakpoint for gdb",
"--core-file/--breakpoint");
}
const bool output_file = cmdline.isset("output-file");
const bool symtab_snapshot = cmdline.isset("symtab-snapshot");
if(symtab_snapshot && output_file)
{
throw invalid_command_line_argument_exceptiont(
"printing to a file is not supported for symbol table snapshot output",
"--symtab-snapshot");
}
register_language(new_ansi_c_language);
std::string binary = cmdline.args.front();
const std::string symbol_list(cmdline.get_value("symbols"));
std::vector<std::string> result;
split_string(symbol_list, ',', result, true, true);
auto opt = read_goto_binary(binary, ui_message_handler);
if(!opt.has_value())
{
throw deserialization_exceptiont(
"cannot read goto binary '" + binary + "'");
}
const goto_modelt goto_model(std::move(opt.value()));
gdb_value_extractort gdb_value_extractor(
goto_model.symbol_table, cmdline.args);
gdb_value_extractor.create_gdb_process();
if(core_file)
{
std::string core_file = cmdline.get_value("core-file");
gdb_value_extractor.run_gdb_from_core(core_file);
}
else if(breakpoint)
{
std::string breakpoint = cmdline.get_value("breakpoint");
gdb_value_extractor.run_gdb_to_breakpoint(breakpoint);
}
std::vector<irep_idt> result_ids(result.size());
std::transform(
result.begin(), result.end(), result_ids.begin(), [](std::string &name) {
return irep_idt{name};
});
gdb_value_extractor.analyze_symbols(result_ids);
std::ofstream file;
if(output_file)
{
file.open(cmdline.get_value("output-file"));
}
std::ostream &out =
output_file ? (std::ostream &)file : (std::ostream &)message.result();
if(symtab_snapshot)
{
symbol_tablet snapshot = gdb_value_extractor.get_snapshot_as_symbol_table();
show_symbol_table(snapshot, ui_message_handler);
}
else
{
std::string snapshot = gdb_value_extractor.get_snapshot_as_c_code();
out << snapshot;
}
if(output_file)
{
file.close();
}
else
{
message.result() << messaget::eom;
}
return CPROVER_EXIT_SUCCESS;
}
void memory_analyzer_parse_optionst::help()
{
message.status()
<< '\n'
<< banner_string("Memory-Analyzer", CBMC_VERSION) << '\n'
<< align_center_with_border("Copyright (C) 2019") << '\n'
<< align_center_with_border("Malte Mues, Diffblue Ltd.") << '\n'
<< align_center_with_border("info@diffblue.com") << '\n'
<< '\n'
<< "Usage: Purpose:\n"
<< '\n'
<< " memory-analyzer [-?] [-h] [--help] show help\n"
<< " memory-analyzer --version show"
<< " version\n"
<< " memory-analyzer --symbols <symbol-list> <options> <binary> analyze"
<< " binary\n"
<< "\n"
<< " --core-file <file> analyze from core file\n"
<< " --breakpoint <breakpoint> analyze from breakpoint\n"
<< " --symbols <symbol-list> list of symbols to analyze\n"
<< " --symtab-snapshot output snapshot as symbol table\n"
<< " --output-file <file> write snapshot to file\n"
<< " --json-ui output snapshot in JSON format\n"
<< messaget::eom;
}
|
#include "kindyn/controller/cardsflow_command_interface.hpp"
namespace hardware_interface
{
CardsflowHandle::CardsflowHandle() : CardsflowStateHandle(){}
/**
* @param js This joint's state handle
* @param cmd A pointer to the storage for this joint's output command
*/
CardsflowHandle::CardsflowHandle(const CardsflowStateHandle& js, double* joint_position_cmd,
double* joint_velocity_cmd, double* joint_torque_cmd, VectorXd *motor_cmd)
: CardsflowStateHandle(js), joint_position_cmd_(joint_position_cmd), joint_velocity_cmd_(joint_velocity_cmd),
joint_torque_cmd_(joint_torque_cmd), motor_cmd_(motor_cmd)
{
}
void CardsflowHandle::setMotorCommand(VectorXd command) {*motor_cmd_ = command;}
double CardsflowHandle::getJointPositionCommand() const {return *joint_position_cmd_;}
double CardsflowHandle::getJointVelocityCommand() const {return *joint_velocity_cmd_;}
double CardsflowHandle::getJointTorqueCommand() const {return *joint_torque_cmd_;}
void CardsflowHandle::setJointPositionCommand(double cmd){*joint_position_cmd_ = cmd;}
void CardsflowHandle::setJointVelocityCommand(double cmd){*joint_velocity_cmd_ = cmd;}
void CardsflowHandle::setJointTorqueCommand(double cmd){*joint_torque_cmd_ = cmd;}
}
|
#include <iostream>
#include <sstream>
#include "doctest.h"
// テスト対象
#include "counter.cpp"
TEST_SUITE("counter.cpp") {
TEST_CASE("calcTime") {
SUBCASE("shows error message.") {
std::stringbuf buf;
std::streambuf* prev = std::cout.rdbuf(&buf);
Counter* c = new Counter();
c->setFibonacci(NULL);
c->calcTime(1);
std::cout.rdbuf(prev);
CHECK_EQ("[error] Fibonacci is not initialized.\n", buf.str());
delete c;
}
}
}
|
#pragma once
#include <boost/asio/ip/address.hpp>
#include <string>
namespace redfish
{
namespace ip_util
{
/**
* @brief Converts boost::asio::ip::address to string
* Will automatically convert IPv4-mapped IPv6 address back to IPv4.
*
* @param[in] ipAddr IP address to convert
*
* @return IP address string
*/
inline std::string toString(const boost::asio::ip::address& ipAddr)
{
if (ipAddr.is_v6() && ipAddr.to_v6().is_v4_mapped())
{
return boost::asio::ip::make_address_v4(boost::asio::ip::v4_mapped,
ipAddr.to_v6())
.to_string();
}
return ipAddr.to_string();
}
} // namespace ip_util
} // namespace redfish
|
#include <stdint.h>
#include <string>
#include <sstream>
#include <vector>
#include <list>
#include <map>
#include <unordered_map>
#include <fstream>
#include "zlib.h"
#include "debug.hpp"
#include "defines.hpp"
#include "Config.hpp"
#include "Request.hpp"
#include "Response.hpp"
#include "Mem_Interface.hpp"
#include "Cache.hpp"
#include "Cache_L1i_Standalone.hpp"
#include "Mmu.hpp"
#include "Memory.hpp"
#include "Mem_Hierarchy.hpp"
using namespace std;
Mem_Hierarchy::Mem_Hierarchy(const Config &config): config(config)
{
// if (config.mem_hier_config.l1i_on) {
// vector<Cpu *>;
// for(int i = 0;)
// }
// default setup
if (config.l1i_standalone) {
cache_l1i_standalone = new Cache_L1i_Standalone(config, config.l1i_configs[0], L1i);
caches["l1i"].push_back(cache_l1i_standalone);
memory = NULL;
} else {
mmus[INSTRUCTION].push_back(new Mmu(config, INSTRUCTION));
caches["l1i"].push_back(new Cache(config, config.l1i_configs[0], L1i));
memory = new Memory(config);
mmus[INSTRUCTION][0]->add_sub_interface(caches["l1i"][0]);
caches["l1i"][0]->add_super_interface(mmus[INSTRUCTION][0]);
caches["l1i"][0]->add_sub_interface(memory);
memory->add_super_interface(caches["l1i"][0]);
}
}
Mmu *Mem_Hierarchy::get_cpu_mmu(mmu_type_t mmu_type, int cpu_num)
{
if (mmus.find(mmu_type) == mmus.end()) {
return NULL;
}
if (mmus[mmu_type].size() < cpu_num) {
return NULL;
}
return mmus[mmu_type][cpu_num];
}
Cache_L1i_Standalone *Mem_Hierarchy::get_cpu_l1i()
{
return cache_l1i_standalone;
}
res_t Mem_Hierarchy::step()
{
log_insanity("Mem_Hierarchy::step entered");
for (map<mmu_type_t, vector<Mmu *> >::iterator it = mmus.begin(); it != mmus.end(); it++) {
for (Mmu *mmu : it->second) {
mmu->step();
}
}
for (map<string, vector<Mem_Interface *> >::iterator it = caches.begin(); it != caches.end(); it++) {
for (Mem_Interface *mem_interface : it->second) {
mem_interface->step();
}
}
//log_insanity("Here1");
if (memory != NULL) {
//log_insanity("Here1");
memory->step();
}
log_insanity("Mem_Hierarchy::step exited");
return SUCCESS;
}
void Mem_Hierarchy::print_results()
{
for (map<string, vector<Mem_Interface *> >::iterator it = caches.begin(); it != caches.end(); it++) {
for (Mem_Interface *mem_interface : it->second) {
mem_interface->print_results();
}
}
}
|
/*
Copyright (c) 2009-2015, Jack Poulson
All rights reserved.
This file is part of Elemental and is under the BSD 2-Clause License,
which can be found in the LICENSE file in the root directory, or at
http://opensource.org/licenses/BSD-2-Clause
*/
#pragma once
#ifndef EL_TRTRMM_UNBLOCKED_HPP
#define EL_TRTRMM_UNBLOCKED_HPP
namespace El {
namespace trtrmm {
template<typename T>
inline void
LUnblocked( Matrix<T>& L, bool conjugate=false )
{
DEBUG_ONLY(
CallStackEntry cse("trtrmm::LUnblocked");
if( L.Height() != L.Width() )
LogicError("L must be square");
)
const Int n = L.Height();
T* LBuffer = L.Buffer();
const Int ldim = L.LDim();
for( Int j=0; j<n; ++j )
{
T* EL_RESTRICT l10 = &LBuffer[j];
if( conjugate )
{
// L00 := L00 + l10^H l10
for( Int k=0; k<j; ++k )
{
const T gamma = l10[k*ldim];
T* EL_RESTRICT L00Col = &LBuffer[k*ldim];
for( Int i=k; i<j; ++i )
L00Col[i] += Conj(l10[i*ldim])*gamma;
}
}
else
{
// L00 := L00 + l10^T l10
for( Int k=0; k<j; ++k )
{
const T gamma = l10[k*ldim];
T* EL_RESTRICT L00Col = &LBuffer[k*ldim];
for( Int i=k; i<j; ++i )
L00Col[i] += l10[i*ldim]*gamma;
}
}
// l10 := l10 lambda11
const T lambda11 = LBuffer[j+j*ldim];
for( Int k=0; k<j; ++k )
l10[k*ldim] *= lambda11;
// lambda11 := lambda11^2 or |lambda11|^2
if( conjugate )
LBuffer[j+j*ldim] = lambda11*Conj(lambda11);
else
LBuffer[j+j*ldim] = lambda11*lambda11;
}
}
template<typename T>
inline void
UUnblocked( Matrix<T>& U, bool conjugate=false )
{
DEBUG_ONLY(
CallStackEntry cse("trtrmm::UUnblocked");
if( U.Height() != U.Width() )
LogicError("U must be square");
)
const Int n = U.Height();
T* UBuffer = U.Buffer();
const Int ldim = U.LDim();
for( Int j=0; j<n; ++j )
{
T* EL_RESTRICT u01 = &UBuffer[j*ldim];
if( conjugate )
{
// U00 := U00 + u01 u01^H
for( Int k=0; k<j; ++k )
{
const T gamma = Conj(u01[k]);
T* EL_RESTRICT U00Col = &UBuffer[k*ldim];
for( Int i=0; i<=k; ++i )
U00Col[i] += u01[i]*gamma;
}
}
else
{
// U00 := U00 + u01 u01^T
for( Int k=0; k<j; ++k )
{
const T gamma = u01[k];
T* EL_RESTRICT U00Col = &UBuffer[k*ldim];
for( Int i=0; i<=k; ++i )
U00Col[i] += u01[i]*gamma;
}
}
// u01 := u01 upsilon11
const T upsilon11 = UBuffer[j+j*ldim];
for( Int k=0; k<j; ++k )
u01[k] *= upsilon11;
// upsilon11 := upsilon11^2 or |upsilon11|^2
if( conjugate )
UBuffer[j+j*ldim] = upsilon11*Conj(upsilon11);
else
UBuffer[j+j*ldim] = upsilon11*upsilon11;
}
}
} // namespace trtrmm
} // namespace El
#endif // ifndef EL_TRTRMM_UNBLOCKED_HPP
|
#include "SceneTest.h"
namespace test {
TEST_F(SceneTest, CheckConstructorMatrixTest)
{
EXPECT_FALSE(scene_);
sgl::matrix test;
scene_ = std::make_shared<sgl::SceneMatrix>(test);
EXPECT_TRUE(scene_);
}
TEST_F(SceneTest, CheckConstructorMeshTest)
{
EXPECT_FALSE(scene_);
auto mesh = std::make_shared<sgl::Mesh>("../Asset/CubeUVNormal.obj");
scene_ = std::make_shared<sgl::SceneMesh>(mesh);
EXPECT_TRUE(scene_);
}
// Simple test scene with populate tree.
TEST_F(SceneTest, CheckTreeConstructTest)
{
EXPECT_FALSE(scene_tree_);
scene_tree_ = std::make_shared<sgl::SceneTree>();
EXPECT_TRUE(scene_tree_);
PopulateTree();
unsigned int count_mesh = 0;
unsigned int count_matrix = 0;
for (const auto& scene : *scene_tree_)
{
const auto& mesh = scene->GetLocalMesh();
if (mesh)
{
count_mesh++;
}
else
{
count_matrix++;
}
}
EXPECT_EQ(2, count_mesh);
EXPECT_EQ(2, count_matrix);
EXPECT_EQ(4, scene_tree_->size());
}
} // End namespace test.
|
/***************************************************************************
copyright : (C) 2006 by Lukáš Lalinský
email : lalinsky@gmail.com
copyright : (C) 2004 by Allan Sandfeld Jensen
email : kde@carewolf.org
(original MPC implementation)
***************************************************************************/
/***************************************************************************
* This library is free software; you can redistribute it and/or modify *
* it under the terms of the GNU Lesser General Public License version *
* 2.1 as published by the Free Software Foundation. *
* *
* This library is distributed in the hope that it will be useful, but *
* WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
* Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public *
* License along with this library; if not, write to the Free Software *
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
* USA *
* *
* Alternatively, this file is available under the Mozilla Public *
* License Version 1.1. You may obtain a copy of the License at *
* http://www.mozilla.org/MPL/ *
***************************************************************************/
#include <tstring.h>
#include <tdebug.h>
#include <bitset>
#include "trueaudioproperties.h"
#include "trueaudiofile.h"
using namespace TagLib;
class TrueAudio::Properties::PropertiesPrivate
{
public:
PropertiesPrivate(const ByteVector &d, long length, ReadStyle s) :
data(d),
streamLength(length),
style(s),
version(0),
length(0),
bitrate(0),
sampleRate(0),
channels(0),
bitsPerSample(0) {}
ByteVector data;
long streamLength;
ReadStyle style;
int version;
int length;
int bitrate;
int sampleRate;
int channels;
int bitsPerSample;
};
////////////////////////////////////////////////////////////////////////////////
// public members
////////////////////////////////////////////////////////////////////////////////
TrueAudio::Properties::Properties(const ByteVector &data, long streamLength, ReadStyle style) : AudioProperties(style)
{
d = new PropertiesPrivate(data, streamLength, style);
read();
}
TrueAudio::Properties::~Properties()
{
delete d;
}
int TrueAudio::Properties::length() const
{
return d->length;
}
int TrueAudio::Properties::bitrate() const
{
return d->bitrate;
}
int TrueAudio::Properties::sampleRate() const
{
return d->sampleRate;
}
int TrueAudio::Properties::bitsPerSample() const
{
return d->bitsPerSample;
}
int TrueAudio::Properties::channels() const
{
return d->channels;
}
int TrueAudio::Properties::ttaVersion() const
{
return d->version;
}
////////////////////////////////////////////////////////////////////////////////
// private members
////////////////////////////////////////////////////////////////////////////////
void TrueAudio::Properties::read()
{
if(!d->data.startsWith("TTA"))
return;
int pos = 3;
d->version = d->data[pos] - '0';
pos += 1 + 2;
d->channels = d->data.mid(pos, 2).toShort(false);
pos += 2;
d->bitsPerSample = d->data.mid(pos, 2).toShort(false);
pos += 2;
d->sampleRate = d->data.mid(pos, 4).toUInt(false);
pos += 4;
unsigned long samples = d->data.mid(pos, 4).toUInt(false);
d->length = samples / d->sampleRate;
d->bitrate = d->length > 0 ? ((d->streamLength * 8L) / d->length) / 1000 : 0;
}
|
#include <gtest/gtest.h>
#include "hydrogen.hpp"
//#include <unsupported/Eigen/MatrixFunctions>
// Given the function, return the graidents
// w.r.t. the coordinates
template<typename T, typename Fn>
Eigen::Matrix<T, 1, 3> gradient(const Eigen::Matrix<T, 1, 3>& p, Fn func) {
const T eps = 1e-6;
// +2h +h -h -2h, and divide by 12
T coeff[] = {-1, 8, -8, 1};
T ds[] = {+2, +1, -1, -2};
auto dfunc = [&](const Eigen::Matrix<T, 1, 3>& dp){
T ret = 0.0;
for(int i=0; i<4; i++) {
ret += coeff[i]*func(p+ds[i]*dp);
}
return ret/(12.0*eps);
};
Eigen::Matrix<T, 1, 3> dx;
dx << eps, 0.0, 0.0;
Eigen::Matrix<T, 1, 3> dy;
dy << 0.0, eps, 0.0;
Eigen::Matrix<T, 1, 3> dz;
dz << 0.0, 0.0, eps;
Eigen::Matrix<T, 1, 3> ret;
ret << dfunc(dx), dfunc(dy), dfunc(dz);
return ret;
}
// Given the function, return the laplacian
// w.r.t. the coordinates
template<typename T, typename Fn>
T laplace(const Eigen::Matrix<T, 1, 3>& p, Fn func) {
const T eps = 1e-6;
Eigen::Matrix<T, 1, 3> dx;
dx << eps, 0.0, 0.0;
Eigen::Matrix<T, 1, 3> dy;
dy << 0.0, eps, 0.0;
Eigen::Matrix<T, 1, 3> dz;
dz << 0.0, 0.0, eps;
return (func(p+dx) + func(p-dx) +
func(p+dy) + func(p-dy) +
func(p+dz) + func(p-dz) - 6*func(p))/(eps*eps);
}
TEST(SimpleGradientTEST, Gradient) {
auto func = [](const Eigen::Matrix<double, 1, 3>& p) {
return p.array().pow(2).sum();
};
auto dfunc = [](const Eigen::Matrix<double, 1, 3>& p) {
auto ret = 2.0*p;
return ret;
};
Eigen::Matrix<double, 1, 3> p = Eigen::Matrix<double, 1, 3>::Random();
// std::cout<<p<<std::endl;
auto nderv = gradient(p, func);
// std::cout<<nderv<<std::endl;
auto derv = dfunc(p);
// std::cout<<derv<<std::endl;
ASSERT_NEAR((nderv - derv).norm(), 0.0, 1e-6);
}
TEST(SimpleLaplacianTEST, Laplacian) {
auto func = [](const Eigen::Matrix<double, 1, 3>& p) {
return p.array().pow(2).sum();
};
Eigen::Matrix<double, 1, 3> p = Eigen::Matrix<double, 1, 3>::Random();
double nderv2 = laplace(p, func);
double derv2 = 6.0;
ASSERT_NEAR(nderv2, derv2, 1e-2);
}
TEST(AtomicWaveFn, Gradient) {
auto wfn = new AtomicWaveFn<double>(0.5, 1.0);
auto func = [&](const Eigen::Matrix<double, 1, 3>& p) {
return wfn->value(p);
};
Eigen::Matrix<double, 1, 3> p = Eigen::Matrix<double, 1, 3>::Random();
auto nderv = gradient(p, func);
auto derv = wfn->grad(p);
ASSERT_NEAR((nderv - derv).norm(), 0.0, 1e-6);
delete wfn;
}
TEST(AtomicWaveFn, Laplacian) {
auto wfn = new AtomicWaveFn<double>(0.5, 1.0);
auto func = [&](const Eigen::Matrix<double, 1, 3>& p) {
return wfn->value(p);
};
Eigen::Matrix<double, 1, 3> p = Eigen::Matrix<double, 1, 3>::Random();
auto nderv2 = laplace(p, func);
auto derv2 = wfn->laplace(p);
ASSERT_NEAR(nderv2, derv2, 1e-2);
delete wfn;
}
TEST(VBWaveFn, Gradient) {
Eigen::Matrix<double, 1, 3> r1 = Eigen::Matrix<double, 1, 3>::Random();
Eigen::Matrix<double, 1, 3> r2 = Eigen::Matrix<double, 1, 3>::Random();
auto wfn = new VBWaveFn<double>(0.5, 1.0, r1, r2);
auto func = [&](const Eigen::Matrix<double, 1, 3>& p) {
return wfn->value(p);
};
Eigen::Matrix<double, 1, 3> p = Eigen::Matrix<double, 1, 3>::Random();
auto nderv = gradient(p, func);
auto derv = wfn->grad(p);
ASSERT_NEAR((nderv - derv).norm(), 0.0, 1e-6);
delete wfn;
}
TEST(VBWaveFn, Laplacian) {
Eigen::Matrix<double, 1, 3> r1 = Eigen::Matrix<double, 1, 3>::Random();
Eigen::Matrix<double, 1, 3> r2 = Eigen::Matrix<double, 1, 3>::Random();
auto wfn = new VBWaveFn<double>(0.5, 1.0, r1, r2);
auto func = [&](const Eigen::Matrix<double, 1, 3>& p) {
return wfn->value(p);
};
Eigen::Matrix<double, 1, 3> p = Eigen::Matrix<double, 1, 3>::Random();
auto nderv2 = laplace(p, func);
auto derv2 = wfn->laplace(p);
ASSERT_NEAR(nderv2, derv2, 1e-2);
delete wfn;
}
TEST(MOWaveFn, Gradient) {
Eigen::Matrix<double, 1, 3> r1 = Eigen::Matrix<double, 1, 3>::Random();
Eigen::Matrix<double, 1, 3> r2 = Eigen::Matrix<double, 1, 3>::Random();
auto wfn = new MOWaveFn<double>(0.5, 1.0, r1, r2);
auto func = [&](const Eigen::Matrix<double, 1, 3>& p) {
return wfn->value(p);
};
Eigen::Matrix<double, 1, 3> p = Eigen::Matrix<double, 1, 3>::Random();
auto nderv = gradient(p, func);
auto derv = wfn->grad(p);
ASSERT_NEAR((nderv - derv).norm(), 0.0, 1e-6);
delete wfn;
}
TEST(MOWaveFn, Laplacian) {
Eigen::Matrix<double, 1, 3> r1 = Eigen::Matrix<double, 1, 3>::Random();
Eigen::Matrix<double, 1, 3> r2 = Eigen::Matrix<double, 1, 3>::Random();
auto wfn = new MOWaveFn<double>(0.5, 1.0, r1, r2);
auto func = [&](const Eigen::Matrix<double, 1, 3>& p) {
return wfn->value(p);
};
Eigen::Matrix<double, 1, 3> p = Eigen::Matrix<double, 1, 3>::Random();
auto nderv2 = laplace(p, func);
auto derv2 = wfn->laplace(p);
ASSERT_NEAR(nderv2, derv2, 1e-2);
delete wfn;
}
TEST(JastrowWfn, Gradient) {
Eigen::Matrix<double, 1, 3> r1 = Eigen::Matrix<double, 1, 3>::Random();
Eigen::Matrix<double, 1, 3> r2 = Eigen::Matrix<double, 1, 3>::Random();
auto wfn = new JastrowWfn<double>(2.0);
auto func1 = [&](const Eigen::Matrix<double, 1, 3>& p) {
return wfn->value(p, r2);
};
auto func2 = [&](const Eigen::Matrix<double, 1, 3>& p) {
return wfn->value(r1, p);
};
Eigen::Matrix<double, 1, 3> derv_f1, derv_f2;
auto nderv_f1 = gradient(r1, func1);
auto nderv_f2 = gradient(r2, func2);
std::tie(derv_f1, derv_f2) = wfn->grad(r1, r2);
ASSERT_NEAR((nderv_f1 - derv_f1).norm(), 0.0, 1e-6);
ASSERT_NEAR((nderv_f2 - derv_f2).norm(), 0.0, 1e-6);
delete wfn;
}
TEST(JastrowWfn, Laplacian) {
Eigen::Matrix<double, 1, 3> r1 = Eigen::Matrix<double, 1, 3>::Random();
Eigen::Matrix<double, 1, 3> r2 = Eigen::Matrix<double, 1, 3>::Random();
auto wfn = new JastrowWfn<double>(2.0);
auto func1 = [&](const Eigen::Matrix<double, 1, 3>& p) {
return wfn->value(p, r2);
};
auto func2 = [&](const Eigen::Matrix<double, 1, 3>& p) {
return wfn->value(r1, p);
};
auto nderv2_f1 = laplace(r1, func1);
auto nderv2_f2 = laplace(r2, func2);
double derv2_f1, derv2_f2;
std::tie(derv2_f1, derv2_f2) = wfn->laplace(r1, r2);
ASSERT_NEAR(nderv2_f1, derv2_f1, 1e-2);
ASSERT_NEAR(nderv2_f2, derv2_f2, 1e-2);
delete wfn;
}
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
/**
Copyright 2009-2018 National Technology and Engineering Solutions of Sandia,
LLC (NTESS). Under the terms of Contract DE-NA-0003525, the U.S. Government
retains certain rights in this software.
Sandia National Laboratories is a multimission laboratory managed and operated
by National Technology and Engineering Solutions of Sandia, LLC., a wholly
owned subsidiary of Honeywell International, Inc., for the U.S. Department of
Energy's National Nuclear Security Administration under contract DE-NA0003525.
Copyright (c) 2009-2018, NTESS
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Questions? Contact sst-macro-help@sandia.gov
*/
#include <sstmac/software/ami/ami.h>
#include <sprockit/errors.h>
#include <sstmac/hardware/common/flow.h>
namespace sstmac {
namespace ami {
#define enumcase(x) case x: return #x
const char*
tostr(CAT cat)
{
switch(cat) {
enumcase(COMPUTE);
enumcase(COMM);
enumcase(DISP);
enumcase(QUERY);
enumcase(HW);
}
spkt_throw_printf(sprockit::IllformedError,
"Invalid AMI_CAT %d received", cat);
}
const char*
tostr(COMM_FUNC func)
{
switch(func) {
enumcase(COMM_SEND);
enumcase(COMM_PMI_SEND);
}
spkt_throw_printf(sprockit::IllformedError,
"Invalid AMI_COMM_FUNC %d received", func);
}
const char*
tostr(SERVICE_FUNC func)
{
switch(func) {
enumcase(COMP_DISKACCESS);
}
spkt_throw_printf(sprockit::IllformedError,
"Invalid AMI_COMP_FUNC %d received", func);
return 0;
}
const char*
tostr(COMP_FUNC func)
{
switch(func) {
enumcase(COMP_TIME);
enumcase(COMP_INSTR);
enumcase(COMP_EIGER);
}
spkt_throw_printf(sprockit::IllformedError,
"Invalid AMI_COMP_FUNC %d received", func);
return 0;
}
}
}
|
/*************************************************************************
* Copyright (c) 2015, Synopsys, Inc. *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following conditions are *
* met: *
* *
* 1. Redistributions of source code must retain the above copyright *
* notice, this list of conditions and the following disclaimer. *
* *
* 2. Redistributions in binary form must reproduce the above copyright *
* notice, this list of conditions and the following disclaimer in the *
* documentation and/or other materials provided with the distribution. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS *
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT *
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR *
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT *
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY *
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT *
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
*************************************************************************/
// **********
//
// ui_cm_get.cxx - CM Get User Interface Operation Class.
//
// **********
#include "cLibraryFunctions.h"
#include "machdep.h"
#include "messages.h"
#include "customize.h"
#include "prompt.h"
#include "../include/ui_cm_oper.h"
#include "../include/ui_cm_get.h"
// Needed to send cmds to client via DISui.
#include "../../DIS_ui/interface.h"
extern Application* DISui_app;
// Constructor.
ui_cm_get::ui_cm_get(const char* name) : ui_cm_oper(name),
needs_comment_string_(false), has_comment_string_(false),
comment_string_(0)
{
// Override settings from base class.
// We don't want the module to be checked out before we check it out.
module_must_be_checked_out_ = false;
// We don't need to confirm each module before checking it out.
module_must_be_confirmed_ = false;
module_has_been_confirmed_ = true;
}
// Destructor.
ui_cm_get::~ui_cm_get()
{
}
// Check pre conditions.
bool ui_cm_get::CheckPreConditions(const symbolArr& file_syms)
{
bool status = ui_cm_oper::CheckPreConditions(file_syms);
if (status) {
// See if the user want to specify a comment on get.
needs_comment_string_ = customize::configurator_get_comments();
}
return (status);
}
//----------------------- Protected Methods ------------------------------//
// See if the get operation needs to a comment.
bool ui_cm_get::NeedsMoreInfo(projModulePtr mod)
{
bool needs_info = needs_comment_string_ && !has_comment_string_;
if (needs_info) module_has_been_confirmed_ = false;
return (needs_info);
}
// Do whatever to get more info.
bool ui_cm_get::GetMoreInfo(projModulePtr mod)
{
bool status = true;
if (needs_comment_string_) {
genString prompt = "Please enter a brief explanation of why you are checking out\n";
prompt += realOSPATH(mod->get_name());
// If any modules are left in the module array,
// Give the user the option to apply this comment
// to all remaining modules.
if (modules_.size() > 0 )
status = dis_prompt_string("CM Check Out", "OK",
"OK For All", "Cancel", prompt,
comment_string_);
else
status = dis_prompt_string("CM Check Out", "OK",
"Cancel", prompt, comment_string_);
if (status == 1) { // OK button selected.
module_has_been_confirmed_ = true;
} else if (status == 2) { // OK-For-All button selected.
has_comment_string_ = true;
module_has_been_confirmed_ = true;
}
}
return (status);
}
// Perform the CM Get operation on the module.
bool ui_cm_get::OperateOnModule(projModule* mod)
{
bool status = true;
dis_message(NULL, MSG_INFORM,
"Checking out '%s' ...", mod->get_name() );
symbolPtr sym = mod;
if (needs_comment_string_) {
projModule* gotten = projModule::get_module_with_data(sym, comment_string_);
if (gotten == NULL) status = false;
} else {
projModule* gotten = projModule::get_module(sym);
if (gotten == NULL) status = false;
}
if (status)
dis_message(NULL, MSG_INFORM,
"%s check out completed.",
mod->get_name() );
else
dis_message(NULL, MSG_INFORM,
"Problem checking out '%s'.",
mod->get_name() );
return (status);
}
/********** end of ui_cm_get.cxx **********/
|
/*********************************************************************
* This file is distributed as part of the C++ port of the APRIL tags
* library. The code is licensed under GPLv2.
*
* Original author: Edwin Olson <ebolson@umich.edu>
* C++ port and modifications: Matt Zucker <mzucker1@swarthmore.edu>
********************************************************************/
#include "Geometry.h"
#include "MathUtil.h"
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
XYW::XYW() {}
XYW::XYW(at::real xx, at::real yy, at::real ww): x(xx), y(yy), w(ww) {}
at::Point XYW::point() const { return at::Point(x,y); }
Gridder::Gridder(at::real x0, at::real y0, at::real x1, at::real y1, at::real metersPerCell) {
this->x0 = x0;
this->y0 = y0;
this->metersPerCell = metersPerCell;
width = (int) ((x1 - x0)/metersPerCell + 1);
height = (int) ((y1 - y0)/metersPerCell + 1);
this->x1 = x0 + metersPerCell*width;
this->y1 = y0 + metersPerCell*height;
cells.clear();
cells.resize(width*height);
}
int Gridder::sub2ind(int x, int y) const {
return y*width + x;
}
void Gridder::add(at::real x, at::real y, Segment* s) {
int ix = (int) ((x - x0)/metersPerCell);
int iy = (int) ((y - y0)/metersPerCell);
if (ix >=0 && iy >=0 && ix < width && iy < height) {
size_t idx = sub2ind(ix,iy);
s->nextGrid = cells[idx];
cells[idx] = s;
}
}
void Gridder::find(at::real x, at::real y, at::real range, SegmentArray& results) const {
results.clear();
int ix0 = (int) ((x - range - x0)/metersPerCell);
int iy0 = (int) ((y - range - y0)/metersPerCell);
int ix1 = (int) ((x + range - x0)/metersPerCell);
int iy1 = (int) ((y + range - y0)/metersPerCell);
for (int iy=iy0; iy<=iy1; ++iy) {
for (int ix=ix0; ix<=ix1; ++ix) {
if (ix >=0 && iy >=0 && ix < width && iy < height) {
for (Segment* s = cells[sub2ind(ix,iy)]; s; s = s->nextGrid) {
results.push_back(s);
}
}
}
}
}
bool intersect(const GLineSegment2D& g1,
const GLineSegment2D& g2,
at::Point& pinter) {
Segment s1, s2;
s1.x0 = g1.p1.x;
s1.x1 = g1.p2.x;
s1.y0 = g1.p1.y;
s1.y1 = g1.p2.y;
s2.x0 = g2.p1.x;
s2.x1 = g2.p2.x;
s2.y0 = g2.p1.y;
s2.y1 = g2.p2.y;
return intersect(&s1, &s2, pinter);
}
bool intersect(const Segment* s1,
const Segment* s2,
at::Point& pinter) {
at::real m00 = s1->x1-s1->x0;
at::real m01 = s2->x0-s2->x1;
at::real m10 = s1->y1-s1->y0;
at::real m11 = s2->y0-s2->y1;
at::real det = m00*m11 - m01*m10;
if (MathUtil::fabs(det) < 0.0000000001) {
return false;
}
at::real i00 = m11/det;
//at::real i11 = m00/det;
at::real i01 = -m01/det;
//at::real i10 = -m10/det;
at::real b00 = s2->x0 - s1->x0;
at::real b10 = s2->y0 - s1->y0;
at::real x00 = i00*b00 + i01*b10;
pinter.x = m00*x00 + s1->x0;
pinter.y = m10*x00 + s1->y0;
return true;
}
at::real pdist(const at::Point& p1, const at::Point& p2) {
at::Point pd = p2-p1;
return sqrt(pd.dot(pd));
}
at::real pdist(const at::Point& p, int x, int y) {
return pdist(p, at::Point(x,y));
}
GLineSegment2D::GLineSegment2D() {}
GLineSegment2D::GLineSegment2D(const at::Point& pp1, const at::Point& pp2):
p1(pp1), p2(pp2) {}
at::real GLineSegment2D::length() const {
return pdist(p1, p2);
}
static at::real square(at::real x) { return x*x; }
GLineSegment2D lsqFitXYW(const XYWArray& points) {
at::real Cxx=0, Cyy=0, Cxy=0, Ex=0, Ey=0, mXX=0, mYY=0, mXY=0, mX=0, mY=0;
at::real n=0;
int idx = 0;
for (size_t i=0; i<points.size(); ++i) {
const XYW& tp = points[i];
at::real x = tp.x;
at::real y = tp.y;
at::real alpha = tp.w;
mY += y*alpha;
mX += x*alpha;
mYY += y*y*alpha;
mXX += x*x*alpha;
mXY += x*y*alpha;
n += alpha;
idx++;
}
Ex = mX/n;
Ey = mY/n;
Cxx = mXX/n - square(mX/n);
Cyy = mYY/n - square(mY/n);
Cxy = mXY/n - (mX/n)*(mY/n);
// find dominant direction via SVD
at::real phi = 0.5*atan2(-2*Cxy,(Cyy-Cxx));
//at::real rho = Ex*cos(phi) + Ey*sin(phi);
// compute line parameters
at::real dx = -sin(phi);
at::real dy = cos(phi);
at::real px = Ex;
at::real py = Ey;
at::real maxcoord = -DBL_MAX;
at::real mincoord = DBL_MAX;
for (size_t i=0; i<points.size(); ++i) {
const XYW& tp = points[i];
at::real coord = dx*(tp.x-px) + dy*(tp.y-py);
maxcoord = std::max(coord, maxcoord);
mincoord = std::min(coord, mincoord);
}
at::Point p0(px, py);
at::Point dd(dx, dy);
at::Point p1 = p0 + mincoord*dd;
at::Point p2 = p0 + maxcoord*dd;
return GLineSegment2D( p1, p2 );
};
at::real area(const at::Point* p, size_t n) {
if (n < 3) { return 0; }
at::real a = 0;
for (size_t i=0; i<n; ++i) {
const at::Point& p0 = p[i];
const at::Point& p1 = p[(i+1)%n];
a += p0.x * p1.y - p0.y * p1.x;
}
if (a<0) { a = -a; }
a *= at::real(0.5);
return a;
}
at::real area(const cv::Point* p, size_t n) {
if (n < 3) { return 0; }
at::real a = 0;
for (size_t i=0; i<n; ++i) {
const cv::Point& p0 = p[i];
const cv::Point& p1 = p[(i+1)%n];
a += p0.x * p1.y - p0.y * p1.x;
}
if (a<0) { a = -a; }
a *= at::real(0.5);
return a;
}
Quad::Quad() {}
at::real Quad::area() const {
return ::area(p, 4);
}
Quad::Quad(const at::Point pp[4], const at::Point& oc, at::real op):
opticalCenter(oc), observedPerimeter(op)
{
for (size_t i=0; i<4; ++i) { p[i] = pp[i]; }
recomputeHomography();
}
void Quad::recomputeHomography() {
at::Point src[4] = {
at::Point(-1, -1),
at::Point( 1, -1),
at::Point( 1, 1),
at::Point(-1, 1),
};
at::Point dst[4];
for (size_t i=0; i<4; ++i) {
dst[i] = p[i] - opticalCenter;
}
cv::Mat_<at::Point> srcmat(4, 1, src);
cv::Mat_<at::Point> dstmat(4, 1, dst);
H = cv::findHomography(srcmat, dstmat);
}
at::Point Quad::interpolate(const at::Point& p) const {
return interpolate(p.x, p.y);
}
at::Point Quad::interpolate(at::real x, at::real y) const {
at::real z = H[2][0]*x + H[2][1]*y + H[2][2];
return at::Point( (H[0][0]*x + H[0][1]*y + H[0][2])/z + opticalCenter.x,
(H[1][0]*x + H[1][1]*y + H[1][2])/z + opticalCenter.y );
}
at::Point Quad::interpolate01(const at::Point& p) const {
return interpolate01(p.x, p.y);
}
at::Point Quad::interpolate01(at::real x, at::real y) const {
return interpolate(2*x-1, 2*y-1);
}
|
#ifdef _WIN32
#include "platform/win32/win32.h"
#include "resource.h"
namespace util {
WindowsPlatform::WindowsPlatform() {
// Get the icon directory
PBYTE iconDirectory = getIconDirectory(WIN32_ICON_MAIN);
// Get the default device info
m_screenScalingFactor = getScreenScalingFactor(nullptr);
m_refreshRate = getRefreshRate(nullptr);
// Store each icon
std::array<int, 6> icons = {16, 32, 48, 64, 128};
for (auto& it : icons) {
HICON icon = getIconFromIconDirectory(iconDirectory, it);
m_hIcons.push_back(icon);
}
}
WindowsPlatform::~WindowsPlatform() {
for (auto& it : m_hIcons) {
if (it) DestroyIcon(it);
}
m_hIcons.clear();
}
void WindowsPlatform::setIcon(const sf::WindowHandle& inHandle) {
std::size_t indexSmallIcon =
static_cast<std::size_t>(std::min(std::max(std::ceil(m_screenScalingFactor - 1.0f), 0.0f),
static_cast<float>(m_hIcons.size()) - 1.0f));
std::size_t indexBigIcon = static_cast<std::size_t>(
std::min(std::max(std::ceil(m_screenScalingFactor - 1.0f), 0.0f) + 1.0f,
static_cast<float>(m_hIcons.size()) - 1.0f));
if (m_hIcons[indexBigIcon])
SendMessage(inHandle, WM_SETICON, ICON_BIG, (LPARAM)m_hIcons[indexBigIcon]);
if (m_hIcons[indexSmallIcon])
SendMessage(inHandle, WM_SETICON, ICON_SMALL, (LPARAM)m_hIcons[indexSmallIcon]);
}
void WindowsPlatform::toggleFullscreen(const sf::WindowHandle& inHandle, const sf::Uint32 inStyle,
const bool inWindowed, const sf::Vector2u& inResolution) {
DWORD win32Style = sfmlWindowStyleToWin32WindowStyle(inStyle);
UINT flags = SWP_DRAWFRAME | SWP_FRAMECHANGED;
if (inWindowed) {
// window (centered on the focused screen)
HDC screenDC = GetDC(inHandle);
int screenWidth = GetDeviceCaps(screenDC, HORZRES);
int screenHeight = GetDeviceCaps(screenDC, VERTRES);
ReleaseDC(inHandle, screenDC);
int width = static_cast<int>(inResolution.x);
int height = static_cast<int>(inResolution.y);
int left = (screenWidth - width) / 2;
int top = (screenHeight - height) / 2;
RECT rectangle = {0, 0, width, height};
AdjustWindowRect(&rectangle, win32Style, false);
width = rectangle.right - rectangle.left;
height = rectangle.bottom - rectangle.top;
SetWindowLongPtr(inHandle, GWL_STYLE, win32Style);
SetWindowLongPtr(inHandle, GWL_EXSTYLE, 0);
SetWindowPos(inHandle, nullptr, left, top, width, height, flags);
} else {
// fullscreen
int width = static_cast<int>(inResolution.x);
int height = static_cast<int>(inResolution.y);
// first time prevents the border from showing in the corner
SetWindowPos(inHandle, HWND_TOP, 0, 0, width, height, flags);
SetWindowLongPtr(inHandle, GWL_EXSTYLE, WS_EX_APPWINDOW);
SetWindowLongPtr(inHandle, GWL_STYLE, win32Style);
// second time cleans up the rect after the border has been removed
SetWindowPos(inHandle, HWND_TOP, 0, 0, width, height, flags);
// note: double SetWindowPos call isn't very effective on slower machines anyway
}
ShowWindow(inHandle, SW_SHOW);
}
float WindowsPlatform::getScreenScalingFactor(const sf::WindowHandle& inHandle) {
UNUSED(inHandle);
if (m_screenScalingFactor != 0.0f) return m_screenScalingFactor;
HDC screenDC = GetDC(nullptr);
int logicalScreenHeight = GetDeviceCaps(screenDC, VERTRES);
int physicalScreenHeight = GetDeviceCaps(screenDC, DESKTOPVERTRES);
m_screenScalingFactor =
static_cast<float>(physicalScreenHeight) / static_cast<float>(logicalScreenHeight);
ReleaseDC(nullptr, screenDC);
return m_screenScalingFactor;
}
int WindowsPlatform::getRefreshRate(const sf::WindowHandle& inHandle) {
UNUSED(inHandle);
if (m_refreshRate != 0) return m_refreshRate;
HDC screenDC = GetDC(nullptr);
m_refreshRate = GetDeviceCaps(screenDC, VREFRESH);
ReleaseDC(nullptr, screenDC);
return m_refreshRate;
}
PBYTE WindowsPlatform::getIconDirectory(const int inResourceId) {
HMODULE hModule = GetModuleHandle(nullptr);
HRSRC hResource = FindResource(hModule, MAKEINTRESOURCE(inResourceId), RT_GROUP_ICON);
HGLOBAL hData = LoadResource(hModule, hResource);
PBYTE data = (PBYTE)LockResource(hData);
return data;
}
HICON WindowsPlatform::getIconFromIconDirectory(PBYTE inIconDirectory, const uint inSize) {
HMODULE hModule = GetModuleHandle(nullptr);
int resourceId =
LookupIconIdFromDirectoryEx(inIconDirectory, TRUE, inSize, inSize, LR_DEFAULTCOLOR);
HRSRC hResource = FindResource(hModule, MAKEINTRESOURCE(resourceId), RT_ICON);
HGLOBAL hData = LoadResource(hModule, hResource);
PBYTE data = (PBYTE)LockResource(hData);
DWORD sizeofData = SizeofResource(hModule, hResource);
HICON icon =
CreateIconFromResourceEx(data, sizeofData, TRUE, 0x00030000, inSize, inSize, LR_DEFAULTCOLOR);
return icon;
}
DWORD WindowsPlatform::sfmlWindowStyleToWin32WindowStyle(const sf::Uint32 inStyle) {
DWORD style = 0;
if (inStyle == sf::Style::None || inStyle == sf::Style::Fullscreen) {
style = WS_VISIBLE | WS_POPUP | WS_CLIPCHILDREN | WS_CLIPSIBLINGS;
} else {
style = WS_VISIBLE;
if (inStyle & sf::Style::Titlebar) style |= WS_CAPTION | WS_MINIMIZEBOX;
if (inStyle & sf::Style::Resize) style |= WS_THICKFRAME | WS_MAXIMIZEBOX;
if (inStyle & sf::Style::Close) style |= WS_SYSMENU;
}
return style;
}
} // namespace util
#endif // _WIN32
|
//
// VulkanBackend.cpp
// MNN
//
// Created by MNN on 2019/01/31.
// Copyright © 2018, Alibaba Group Holding Limited
//
#include "VulkanBackend.hpp"
#include <mutex>
#include "Execution.hpp"
#include "Macro.h"
#include "Tensor.hpp"
#include "TensorUtils.hpp"
#include "VulkanDevice.hpp"
#include "VulkanImageConverter.hpp"
#include "VulkanInstance.hpp"
//#define MNN_OPEN_TIME_TRACE
#include "AutoTime.hpp"
#ifdef MNN_USE_NEON
#include <arm_neon.h>
#endif
namespace MNN {
static std::map<OpType, VulkanBackend::Creator*>* gCreator = nullptr;
// –––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
// Creator
// –––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
static inline std::map<OpType, VulkanBackend::Creator*>* getCreatorMap() {
if (nullptr == gCreator) {
gCreator = new std::map<OpType, VulkanBackend::Creator*>();
}
return gCreator;
}
static void _copyBufferToTensor(const Tensor* dest, const VulkanBuffer* source) {
auto sourcePtr = source->map();
auto dataType = dest->getType();
//TODO: Support other kind of dataType
MNN_ASSERT(dataType.bits == 32);
::memcpy(dest->host<float>(), sourcePtr, dest->size());
source->unmap();
}
static int _getAlignSize(const Tensor* tensor) {
auto format = TensorUtils::getDescribe(tensor)->dimensionFormat;
auto elementSize = tensor->elementSize();
// [TODO] Find a better way
if (format == MNN_DATA_FORMAT_NCHW) {
if (tensor->dimensions() >= 2) {
elementSize = elementSize / tensor->channel() * ALIGN_UP4(tensor->channel());
}
} else if (format == MNN_DATA_FORMAT_NHWC) {
if (tensor->dimensions() >= 3) {
elementSize = elementSize / tensor->channel() * ALIGN_UP4(tensor->channel());
}
}
return elementSize;
}
static void _copyTensorToBuffer(const Tensor* source, const VulkanBuffer* dest) {
auto destPtr = dest->map();
auto dataType = source->getType();
//TODO: Support other kind of dataType
MNN_ASSERT(dataType.bits == 32);
::memcpy(destPtr, source->host<float>(), source->size());
dest->unmap();
}
VulkanTensor::VulkanTensor(const Tensor* shape, const VulkanMemoryPool& pool, bool forceBuffer, bool seperate) {
auto format = TensorUtils::getDescribe(shape)->dimensionFormat;
if (MNN_DATA_FORMAT_NC4HW4 == format && !forceBuffer) {
mImage = std::make_shared<VulkanImage>(pool, seperate,
std::vector<int>{
std::max(shape->width(), 1),
std::max(shape->height(), 1),
UP_DIV(shape->channel(), 4) * shape->batch(),
},
shape->getType());
} else {
// Compute Shader don't support uint8 / int8 / float16 / uint64, all use int32/float32
mBuffer = std::make_shared<VulkanBuffer>(pool, seperate, _getAlignSize(shape) * sizeof(float));
}
}
void VulkanTensor::release() {
if (nullptr != mBuffer.get()) {
mBuffer->release();
}
if (nullptr != mImage.get()) {
mImage->release();
}
}
uint64_t VulkanTensor::deviceId() {
if (mImage.get()) {
return reinterpret_cast<uint64_t>(mImage->view());
} else {
return reinterpret_cast<uint64_t>(mBuffer->buffer());
}
}
VulkanBackend::VulkanBackend(const MNNVulkanContext* context) : Backend(MNN_FORWARD_VULKAN) {
if (NULL != context) {
mInstance = std::make_shared<VulkanInstance>(context->pInstance);
mDevice = std::make_shared<VulkanDevice>(mInstance, context->pPhysicalDevice, context->pDevice,
context->iQueueFamilyIndex, context->pQueue);
} else {
mInstance = std::make_shared<VulkanInstance>();
mDevice = std::make_shared<VulkanDevice>(mInstance);
}
auto& dev = device();
mCmdPool = std::make_shared<VulkanCommandPool>(dev);
mFence = std::make_shared<VulkanFence>(dev);
std::string deviceName = dev.proty().deviceName;
if (deviceName.find("Mali") != std::string::npos) {
mGpuType = MALI;
} else if (deviceName.find("Adreno") != std::string::npos) {
mGpuType = ADRENO;
}
mMemoryPool = std::make_shared<VulkanMemoryPool>(dev);
mDynamicMemoryPool = std::make_shared<VulkanMemoryPool>(dev);
mSampler = std::make_shared<VulkanSampler>(dev, VK_FILTER_NEAREST, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER);
mPipelineFactory = std::make_shared<VulkanPipelineFactory>(dev);
}
VulkanBackend::~VulkanBackend() {
/*keep release order*/
mPipelineFactory = nullptr;
mSampler = nullptr;
mStaticeBuffers.clear();
mAllBuffers.clear();
mHostBuffer = nullptr;
mCmdBuffers.clear();
mFence = nullptr;
mConverters.clear();
mDynamicMemoryPool = nullptr;
mMemoryPool = nullptr;
mCmdPool = nullptr;
mDevice = nullptr;
mInstance = nullptr;
}
bool VulkanBackend::onLoadLibrary(const GpuLibrary* library) {
// [TODO]: Support Plugin
return true;
}
void VulkanBackend::pushCommand(VkCommandBuffer buffer) const {
mCmdBuffers.emplace_back(buffer);
}
const VulkanPipeline* VulkanBackend::getPipeline(const std::string& key, const std::vector<VkDescriptorType>& types,
const std::vector<uint32_t>& localSize) const {
return mPipelineFactory->getPipeline(key, types, localSize);
}
bool VulkanBackend::_supportImageSize(const Tensor* MTensor) {
if (UP_DIV(MTensor->channel(), 4) * MTensor->batch() > device().proty().limits.maxImageDimension3D) {
return false;
}
return true;
}
bool VulkanBackend::onAcquireBuffer(const Tensor* tensor, StorageType storageType) {
auto MTensor = const_cast<Tensor*>(tensor);
auto format = TensorUtils::getDescribe(MTensor)->dimensionFormat;
bool forceBuffer = false;
if (MNN_DATA_FORMAT_NC4HW4 == format) {
if (!_supportImageSize(MTensor)) {
// forceBuffer = true;
MNN_PRINT("Force Use Buffer because then Tensor is too Large: %d, %d, %d, %d\n", MTensor->width(),
MTensor->height(), MTensor->channel(), MTensor->batch());
forceBuffer = true;
}
}
if (Backend::STATIC == storageType) {
auto newBuffer = std::make_shared<VulkanTensor>(MTensor, getMemoryPool(), forceBuffer);
MTensor->buffer().device = newBuffer->deviceId();
mStaticeBuffers.insert(std::make_pair(MTensor->buffer().device, newBuffer));
} else {
bool seperate = storageType == Backend::DYNAMIC_SEPERATE;
auto newBuffer = std::make_shared<VulkanTensor>(MTensor, getDynamicMemoryPool(), forceBuffer, seperate);
MTensor->buffer().device = newBuffer->deviceId();
mAllBuffers.insert(std::make_pair(MTensor->buffer().device, newBuffer));
}
return true;
}
bool VulkanBackend::onReleaseBuffer(const Tensor* tensor, StorageType storageType) {
auto buffer = (tensor->deviceId());
if (Backend::DYNAMIC == storageType) {
auto iter = mAllBuffers.find(buffer);
MNN_ASSERT(iter != mAllBuffers.end());
iter->second->release();
}
if (Backend::STATIC == storageType) {
auto iter = mStaticeBuffers.find(buffer);
MNN_ASSERT(iter != mStaticeBuffers.end());
mStaticeBuffers.erase(iter);
}
return true;
}
bool VulkanBackend::onClearBuffer() {
mMemoryPool->clear();
mDynamicMemoryPool->clear();
mAllBuffers.clear();
return true;
}
Execution* VulkanBackend::onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op) {
auto creator = getCreatorMap();
auto iter = creator->find(op->type());
if (iter == creator->end()) {
// MNN_PRINT("Vulkan don't support %d, %s: %s\n", op->type(), EnumNameOpType(op->type()),
// op->name()->c_str());
return nullptr;
}
bool valid = true;
for (auto t : inputs) {
if (TensorUtils::getDescribe(t)->dimensionFormat == MNN_DATA_FORMAT_NC4HW4 && !_supportImageSize(t)) {
valid = false;
break;
}
}
for (auto t : outputs) {
if (TensorUtils::getDescribe(t)->dimensionFormat == MNN_DATA_FORMAT_NC4HW4 && !_supportImageSize(t)) {
valid = false;
break;
}
}
if (!valid) {
return nullptr;
}
return iter->second->onCreate(inputs, op, this);
}
void VulkanBackend::onExecuteBegin() const {
// FUNC_PRINT_ALL(mDynamicMemoryPool->computeSize(), f);
}
void VulkanBackend::onExecuteEnd() const {
_finish();
}
void VulkanBackend::_finish() const {
if (mCmdBuffers.empty()) {
return;
}
AUTOTIME;
VkSubmitInfo submit_info = {.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = nullptr,
.waitSemaphoreCount = 0,
.pWaitSemaphores = nullptr,
.pWaitDstStageMask = nullptr,
.commandBufferCount = (uint32_t)mCmdBuffers.size(),
.pCommandBuffers = mCmdBuffers.data(),
.signalSemaphoreCount = 0,
.pSignalSemaphores = nullptr};
auto fenceReal = mFence->get();
mFence->reset();
CALL_VK(vkQueueSubmit(device().acquireDefaultDevQueue(), 1, &submit_info, fenceReal));
mCmdBuffers.clear();
auto res = mFence->wait();
MNN_VK_CHECK(res);
}
const VulkanDevice& VulkanBackend::device() const {
return (*mDevice);
}
void VulkanBackend::onCopyBuffer(const Tensor* srcTensor, const Tensor* dstTensor) const {
AUTOTIME;
if (srcTensor->host<float>() != nullptr) {
_finish();
auto size = _getAlignSize(srcTensor) * 4;
// host->gpu
_allocHostBuffer(size);
_copyTensorToBuffer(srcTensor, mHostBuffer.get());
auto format = TensorUtils::getDescribe(srcTensor)->dimensionFormat;
auto key = std::make_tuple(dstTensor, true, format);
auto iter = mConverters.find(key);
if (iter == mConverters.end()) {
auto converter = std::make_shared<VulkanImageConverter>(this);
std::shared_ptr<VulkanCommandPool::Buffer> convertorBuffer(
const_cast<VulkanCommandPool::Buffer*>(mCmdPool->allocBuffer()));
convertorBuffer->begin(0);
converter->encodeBufferToTensor(mHostBuffer->buffer(), dstTensor, mHostBuffer->size(), 0,
TensorUtils::getDescribe(srcTensor)->dimensionFormat,
convertorBuffer.get());
convertorBuffer->end();
mConverters.insert(std::make_pair(key, std::make_pair(converter, convertorBuffer)));
iter = mConverters.find(key);
}
mCmdBuffers.push_back(iter->second.second->get());
} else {
// gpu->host
auto size = _getAlignSize(dstTensor) * 4;
_finish();
_allocHostBuffer(size);
auto format = TensorUtils::getDescribe(dstTensor)->dimensionFormat;
auto key = std::make_tuple(srcTensor, false, format);
auto iter = mConverters.find(key);
if (iter == mConverters.end()) {
auto converter = std::make_shared<VulkanImageConverter>(this);
std::shared_ptr<VulkanCommandPool::Buffer> convertorBuffer(
const_cast<VulkanCommandPool::Buffer*>(mCmdPool->allocBuffer()));
convertorBuffer->begin(0);
converter->encodeTensorToBuffer(srcTensor, mHostBuffer->buffer(), mHostBuffer->size(), 0,
TensorUtils::getDescribe(dstTensor)->dimensionFormat,
convertorBuffer.get());
convertorBuffer->end();
mConverters.insert(std::make_pair(key, std::make_pair(converter, convertorBuffer)));
iter = mConverters.find(key);
}
mCmdBuffers.push_back(iter->second.second->get());
_finish();
_copyBufferToTensor(dstTensor, mHostBuffer.get());
}
}
const VulkanTensor* VulkanBackend::findTensor(uint64_t deviceId) const {
auto iter = mAllBuffers.find(deviceId);
if (iter != mAllBuffers.end()) {
return iter->second.get();
}
return nullptr;
}
void VulkanBackend::_allocHostBuffer(size_t size) const {
if (mHostBuffer.get() == nullptr || mHostBuffer->size() < size) {
mHostBuffer =
std::make_shared<VulkanBuffer>(getMemoryPool(), false, size, nullptr, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
VK_SHARING_MODE_EXCLUSIVE, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
mConverters.clear();
}
}
bool VulkanBackend::addCreator(OpType t, Creator* c) {
auto allKind = getCreatorMap();
allKind->insert(std::make_pair(t, c));
return true;
}
void VulkanBackend::copyBufferToImage(const VulkanBuffer* buffer, const VulkanImage* image) const {
std::vector<int> dimVector = image->dims();
if (image->format() != VK_FORMAT_R16G16B16A16_SFLOAT) {
VkBufferImageCopy copyRegions;
::memset(©Regions, 0, sizeof(copyRegions));
copyRegions.imageOffset.x = 0;
copyRegions.imageOffset.y = 0;
copyRegions.imageOffset.z = 0;
copyRegions.imageExtent.depth = image->depth();
copyRegions.imageExtent.height = image->height();
copyRegions.imageExtent.width = image->width();
copyRegions.imageSubresource.layerCount = 1;
copyRegions.imageSubresource.mipLevel = 0;
copyRegions.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copyRegions.imageSubresource.baseArrayLayer = 0;
std::unique_ptr<VulkanCommandPool::Buffer> cmdbuffer(
const_cast<VulkanCommandPool::Buffer*>(mCmdPool->allocBuffer()));
cmdbuffer->begin(0);
vkCmdCopyBufferToImage(cmdbuffer->get(), buffer->buffer(), image->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1, ©Regions);
cmdbuffer->end();
mCmdPool->submitAndWait(cmdbuffer->get());
}
const VulkanPipeline* transformPipeline = nullptr;
std::vector<VkDescriptorType> types{VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER};
int localX = 16;
int localY = 16;
int localZ = 1;
switch (dimVector.size()) {
case 1:
transformPipeline = getPipeline("glsl_buffer2Image1D_comp",
/*glsl_buffer2Image1D_comp, glsl_buffer2Image1D_comp_len,*/ types);
localX = 256;
localY = 1;
break;
case 2:
transformPipeline = getPipeline("glsl_buffer2Image2D_comp",
/*glsl_buffer2Image2D_comp, glsl_buffer2Image2D_comp_len,*/ types);
break;
case 3:
transformPipeline = getPipeline("glsl_buffer2Image3D_comp",
/*glsl_buffer2Image3D_comp, glsl_buffer2Image3D_comp_len,*/ types);
break;
default:
break;
}
std::unique_ptr<VulkanPipeline::DescriptorSet> sets(transformPipeline->createSet());
auto constBuffer = std::make_shared<VulkanBuffer>(getMemoryPool(), false, dimVector.size() * sizeof(int),
dimVector.data(), VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
sets->writeImage(image->view(), mSampler->get(), VK_IMAGE_LAYOUT_GENERAL, 0);
sets->writeBuffer(buffer->buffer(), 1, buffer->size());
sets->writeBuffer(constBuffer->buffer(), 2, constBuffer->size());
std::unique_ptr<VulkanCommandPool::Buffer> cmdbuffer(
const_cast<VulkanCommandPool::Buffer*>(mCmdPool->allocBuffer()));
cmdbuffer->begin(0);
transformPipeline->bind(cmdbuffer->get(), sets->get());
vkCmdDispatch(cmdbuffer->get(), UP_DIV(image->width(), localX), UP_DIV(image->height(), localY),
UP_DIV(image->depth(), localZ));
cmdbuffer->end();
mCmdPool->submitAndWait(cmdbuffer->get());
}
static bool _testVulkan() {
// std::make_unique need c++14
std::unique_ptr<VulkanInstance> instance(new VulkanInstance());
if (nullptr == instance) {
MNN_ERROR("Invalide device for support vulkan\n");
return false;
}
if (!instance->success()) {
MNN_ERROR("Invalide device for support vulkan\n");
return false;
}
if (!instance->supportVulkan()) {
MNN_ERROR("Invalide device for support vulkan\n");
return false;
}
return true;
}
// –––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
// Backend Register
// –––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
class VulkanBackendCreator : public BackendCreator {
virtual Backend* onCreate(const Backend::Info& info) const {
MNNVulkanContext* context = nullptr;
if (nullptr != info.user && nullptr != info.user->sharedContext) {
MNN_PRINT("Use user's vulkan context\n");
context = static_cast<MNNVulkanContext*>(info.user->sharedContext);
}
auto backend = new VulkanBackend(context);
if (!backend->success()) {
delete backend;
return nullptr;
}
return backend;
}
};
static bool gResistor = []() {
if (InitVulkan()) {
if (_testVulkan()) {
MNNInsertExtraBackendCreator(MNN_FORWARD_VULKAN, new VulkanBackendCreator);
}
return true;
}
return false;
}();
} // namespace MNN
|
#include "systems/RenderSystem.h"
#include "components/Position.h"
#include "components/Display.h"
#include "components/Background.h"
#include "ScreenSize.h"
#include "ResourcePath.hpp"
using namespace entityx;
RenderSystem::RenderSystem(sf::RenderWindow& window,
const std::shared_ptr<sf::Texture>& spSSTexture)
: m_window(window)
, m_spSSTexture(spSSTexture)
, m_bgTexture()
, m_bgSprite()
, m_sprite()
{
m_sprite.setTexture(*m_spSSTexture);
m_bgTexture.loadFromFile(resourcePath() + "images/bg.png");
m_bgTexture.setRepeated(true);
m_bgSprite.setTexture(m_bgTexture);
}
void RenderSystem::update(EntityManager &entities,
EventManager &events,
double dt)
{
m_window.clear(sf::Color::Black);
Background::Handle background;
Position::Handle position;
for (Entity entity : entities.entities_with_components(background, position))
{
m_bgSprite.setTextureRect(sf::IntRect(0, -position->position.y, 800, 600));
m_window.draw(m_bgSprite);
}
Display::Handle display;
for (Entity entity : entities.entities_with_components(position, display))
{
if (!isOutsideScreen(position->position, display->coord.width, display->coord.height))
{
m_sprite.setOrigin(display->coord.width/2.0,
display->coord.height/2.0);
m_sprite.setPosition(position->position);
m_sprite.setRotation(position->heading);
m_sprite.setTextureRect(display->coord);
m_sprite.setColor(display->color);
m_window.draw(m_sprite);
}
else
{
entity.destroy();
}
}
}
bool RenderSystem::isOutsideScreen(const sf::Vector2f& position,
int width,
int height)
{
return position.x + width / 2.0 < 0.0
|| position.x - width / 2.0 > ScreenSize::width()
|| position.y - height / 2.0 > ScreenSize::height();
}
|
#ifndef AV_SPEECH_IN_NOISE_TESTS_TASKCONTROLLEROBSERVERSTUB_HPP_
#define AV_SPEECH_IN_NOISE_TESTS_TASKCONTROLLEROBSERVERSTUB_HPP_
#include <av-speech-in-noise/ui/Task.hpp>
namespace av_speech_in_noise {
class TaskControllerObserverStub : public TaskController::Observer {
public:
void notifyThatTaskHasStarted() override {
notifiedThatTaskHasStarted_ = true;
}
[[nodiscard]] auto notifiedThatTaskHasStarted() const -> bool {
return notifiedThatTaskHasStarted_;
}
[[nodiscard]] auto notifiedThatTrialHasStarted() const -> bool {
return notifiedThatTrialHasStarted_;
}
void notifyThatTrialHasStarted() override {
notifiedThatTrialHasStarted_ = true;
}
private:
bool notifiedThatTrialHasStarted_{};
bool notifiedThatTaskHasStarted_{};
};
}
#endif
|
// Copyright 2016 Stellar Development Foundation and contributors. Licensed
// under the Apache License, Version 2.0. See the COPYING file at the root
// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
#include "util/BitsetEnumerator.h"
#include <cassert>
namespace stellar
{
///////////////////////////////////////////////////////////////////////////
// ConstantEnumerator
///////////////////////////////////////////////////////////////////////////
ConstantEnumerator::ConstantEnumerator(std::bitset<64> bits)
: mBits(bits), mDone(false)
{
}
std::shared_ptr<BitsetEnumerator>
ConstantEnumerator::bitNumber(size_t n)
{
assert(n < 64);
std::bitset<64> bits;
bits.set(n);
return std::make_shared<ConstantEnumerator>(bits);
}
std::vector<std::shared_ptr<BitsetEnumerator>>
ConstantEnumerator::bitNumbers(std::vector<size_t> ns)
{
std::vector<std::shared_ptr<BitsetEnumerator>> ret;
for (auto n : ns)
{
ret.push_back(bitNumber(n));
}
return ret;
}
void
ConstantEnumerator::reset()
{
mDone = false;
}
ConstantEnumerator::operator bool() const
{
return !mDone;
}
std::bitset<64> ConstantEnumerator::operator*() const
{
return mBits;
}
void ConstantEnumerator::operator++()
{
mDone = true;
}
///////////////////////////////////////////////////////////////////////////
// PermutationEnumerator
///////////////////////////////////////////////////////////////////////////
PermutationEnumerator::PermutationEnumerator(size_t nSet, size_t nTotal)
: mCur(0), mSet(nSet), mTot(nTotal)
{
assert(mSet <= mTot);
assert(mSet > 0 && mSet <= 64);
assert(mTot > 0 && mTot <= 64);
while (nSet-- > 0)
{
mCur <<= 1;
mCur |= 1;
}
}
void
PermutationEnumerator::reset()
{
mCur = 0;
auto nSet = mSet;
while (nSet-- > 0)
{
mCur <<= 1;
mCur |= 1;
}
}
PermutationEnumerator::operator bool() const
{
uint64_t one = 1;
return !(mCur & ~((one << mTot) - 1));
}
std::bitset<64> PermutationEnumerator::operator*() const
{
std::bitset<64> bits(mCur);
assert(bits.count() == mSet);
return bits;
}
// Simplest way of expressing unsigned unary-neg without tripping compiler
// errors, and/or hunting for the One Right Signedness Cast agreeable to
// all compilers / avoiding undefined behavior.
static inline uint64_t
uneg(uint64_t const& n)
{
return (~n) + 1;
}
void PermutationEnumerator::operator++()
{
// Next bit-permutation. See:
// https://graphics.stanford.edu/~seander/bithacks.html#NextBitPermutation
uint64_t t = (mCur | (mCur - 1)) + 1;
mCur = t | ((((t & uneg(t)) / (mCur & uneg(mCur))) >> 1) - 1);
}
///////////////////////////////////////////////////////////////////////////
// PowersetEnumerator
///////////////////////////////////////////////////////////////////////////
PowersetEnumerator::PowersetEnumerator(size_t nBits)
: mCur(1), mLim(1ull << nBits)
{
assert(nBits < 64);
}
void
PowersetEnumerator::reset()
{
mCur = 1;
}
PowersetEnumerator::operator bool() const
{
return mCur < mLim;
}
std::bitset<64> PowersetEnumerator::operator*() const
{
return std::bitset<64>(mCur);
}
void PowersetEnumerator::operator++()
{
++mCur;
}
///////////////////////////////////////////////////////////////////////////
// CartesianProductEnumerator
///////////////////////////////////////////////////////////////////////////
CartesianProductEnumerator::CartesianProductEnumerator(
std::vector<std::shared_ptr<BitsetEnumerator>> innerEnums)
: mInnerEnums(innerEnums)
{
for (auto const& e : mInnerEnums)
{
e->reset();
}
}
void
CartesianProductEnumerator::reset()
{
for (auto& e : mInnerEnums)
{
e->reset();
}
}
CartesianProductEnumerator::operator bool() const
{
for (auto const& e : mInnerEnums)
{
if (*e)
{
return true;
}
}
return false;
}
std::bitset<64> CartesianProductEnumerator::operator*() const
{
std::bitset<64> tmp;
for (auto const& e : mInnerEnums)
{
tmp |= **e;
}
return tmp;
}
void CartesianProductEnumerator::operator++()
{
// Want to walk along the array looking for the first
// element that wasn't done, but becomes done when we
// increment it.
for (size_t i = 0; i < mInnerEnums.size(); ++i)
{
auto curr = mInnerEnums[i];
if (!(*curr))
{
continue;
}
// enumerator i is 'true', so now advance it and see if it
// went false.
++(*curr);
if (*curr)
{
// It's still got life in it, stop let it go.
return;
}
else
{
// We just exhausted enumerator i, meaning we need to
// "carry" the advance to the next one, and if it
// remains live after the carry, reset enumerator i and
// all the previous ones.
for (size_t carry = i + 1; carry < mInnerEnums.size(); ++carry)
{
auto next = mInnerEnums[carry];
++(*next);
if (*next)
{
for (size_t reset = 0; reset <= i; ++reset)
{
mInnerEnums[i]->reset();
}
return;
}
}
}
}
}
///////////////////////////////////////////////////////////////////////////
// SelectionEnumerator
///////////////////////////////////////////////////////////////////////////
SelectionEnumerator::SelectionEnumerator(
std::shared_ptr<BitsetEnumerator> index,
std::vector<std::shared_ptr<BitsetEnumerator>> const& innerEnums)
: mInnerEnums(innerEnums)
, mIndexEnum(index)
, mProduct(select(index, mInnerEnums))
{
for (auto const& e : mInnerEnums)
{
e->reset();
}
}
std::shared_ptr<BitsetEnumerator>
SelectionEnumerator::bitNumbers(size_t nSel, std::vector<size_t> ns)
{
auto idx = std::make_shared<PermutationEnumerator>(nSel, ns.size());
auto ces = ConstantEnumerator::bitNumbers(ns);
return std::make_shared<SelectionEnumerator>(idx, ces);
}
CartesianProductEnumerator
SelectionEnumerator::select(
std::shared_ptr<BitsetEnumerator> index,
std::vector<std::shared_ptr<BitsetEnumerator>> const& from)
{
std::bitset<64> bits(**index);
std::vector<std::shared_ptr<BitsetEnumerator>> active;
for (size_t i = 0; i < 64; ++i)
{
if (i >= from.size())
{
break;
}
if (bits[i])
{
active.push_back(from[i]);
}
}
return CartesianProductEnumerator(active);
}
std::bitset<64> SelectionEnumerator::operator*() const
{
return *mProduct;
}
void
SelectionEnumerator::reset()
{
mIndexEnum->reset();
mProduct = select(mIndexEnum, mInnerEnums);
}
SelectionEnumerator::operator bool() const
{
return mProduct || *mIndexEnum;
}
void SelectionEnumerator::operator++()
{
if (mProduct)
{
++mProduct;
}
if (!mProduct)
{
++(*mIndexEnum);
if (*mIndexEnum)
{
mProduct = select(mIndexEnum, mInnerEnums);
}
}
}
}
|
//===--- SwiftEditor.cpp --------------------------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#include "SwiftASTManager.h"
#include "SwiftEditorDiagConsumer.h"
#include "SwiftLangSupport.h"
#include "SourceKit/Core/Context.h"
#include "SourceKit/Core/NotificationCenter.h"
#include "SourceKit/Support/FileSystemProvider.h"
#include "SourceKit/Support/ImmutableTextBuffer.h"
#include "SourceKit/Support/Logging.h"
#include "SourceKit/Support/Tracing.h"
#include "SourceKit/Support/UIdent.h"
#include "swift/AST/ASTPrinter.h"
#include "swift/AST/ASTVisitor.h"
#include "swift/AST/ASTWalker.h"
#include "swift/AST/DiagnosticsClangImporter.h"
#include "swift/AST/DiagnosticsParse.h"
#include "swift/AST/DiagnosticsFrontend.h"
#include "swift/Basic/SourceManager.h"
#include "swift/Demangling/ManglingUtils.h"
#include "swift/Frontend/Frontend.h"
#include "swift/Frontend/PrintingDiagnosticConsumer.h"
#include "swift/IDE/CodeCompletion.h"
#include "swift/IDE/CommentConversion.h"
#include "swift/IDE/Indenting.h"
#include "swift/IDE/SourceEntityWalker.h"
#include "swift/IDE/SyntaxModel.h"
#include "swift/Subsystems.h"
#include "swift/SyntaxParse/SyntaxTreeCreator.h"
#include "swift/Syntax/Serialization/SyntaxSerialization.h"
#include "swift/Syntax/SyntaxNodes.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Mutex.h"
using namespace SourceKit;
using namespace swift;
using namespace ide;
static std::vector<unsigned> getSortedBufferIDs(
const llvm::DenseMap<unsigned, std::vector<DiagnosticEntryInfo>> &Map) {
std::vector<unsigned> bufferIDs;
bufferIDs.reserve(Map.size());
for (auto I = Map.begin(), E = Map.end(); I != E; ++I) {
bufferIDs.push_back(I->getFirst());
}
llvm::array_pod_sort(bufferIDs.begin(), bufferIDs.end());
return bufferIDs;
}
void EditorDiagConsumer::getAllDiagnostics(
SmallVectorImpl<DiagnosticEntryInfo> &Result) {
Result.append(InvalidLocDiagnostics.begin(), InvalidLocDiagnostics.end());
// Note: we cannot reuse InputBufIds because there may be diagnostics outside
// the inputs. Instead, sort the extant buffers.
auto bufferIDs = getSortedBufferIDs(BufferDiagnostics);
for (unsigned bufferID : bufferIDs) {
const auto &diags = BufferDiagnostics[bufferID];
Result.append(diags.begin(), diags.end());
}
}
void EditorDiagConsumer::handleDiagnostic(SourceManager &SM,
const DiagnosticInfo &Info) {
if (Info.Kind == DiagnosticKind::Error) {
HadAnyError = true;
}
// Filter out benign diagnostics for editing.
if (Info.ID == diag::lex_editor_placeholder.ID ||
Info.ID == diag::error_doing_code_completion.ID)
return;
bool IsNote = (Info.Kind == DiagnosticKind::Note);
if (IsNote && !haveLastDiag())
// Is this possible?
return;
if (Info.Kind == DiagnosticKind::Remark) {
// FIXME: we may want to handle optimization remarks in sourcekitd.
LOG_WARN_FUNC("unhandled optimization remark");
return;
}
DiagnosticEntryInfo SKInfo;
// Actually substitute the diagnostic arguments into the diagnostic text.
llvm::SmallString<256> Text;
{
llvm::raw_svector_ostream Out(Text);
DiagnosticEngine::formatDiagnosticText(Out, Info.FormatString,
Info.FormatArgs);
}
SKInfo.Description = std::string(Text.str());
for (auto notePath : Info.EducationalNotePaths)
SKInfo.EducationalNotePaths.push_back(notePath);
Optional<unsigned> BufferIDOpt;
if (Info.Loc.isValid()) {
BufferIDOpt = SM.findBufferContainingLoc(Info.Loc);
}
if (BufferIDOpt && !isInputBufferID(*BufferIDOpt)) {
if (Info.ID == diag::error_from_clang.ID ||
Info.ID == diag::warning_from_clang.ID ||
Info.ID == diag::note_from_clang.ID ||
!IsNote) {
// Handle it as other diagnostics.
} else {
// FIXME: This is a note pointing to a synthesized declaration buffer for
// a declaration coming from a module.
// We should include the Decl* in the DiagnosticInfo and have a way for
// Xcode to handle this "points-at-a-decl-from-module" location.
//
// For now instead of ignoring it, pick up the declaration name from the
// buffer identifier and append it to the diagnostic message.
auto &LastDiag = getLastDiag();
SKInfo.Description += " (";
SKInfo.Description += SM.getIdentifierForBuffer(*BufferIDOpt);
SKInfo.Description += ")";
SKInfo.Offset = LastDiag.Offset;
SKInfo.Line = LastDiag.Line;
SKInfo.Column = LastDiag.Column;
SKInfo.Filename = LastDiag.Filename;
LastDiag.Notes.push_back(std::move(SKInfo));
return;
}
}
if (BufferIDOpt.hasValue()) {
unsigned BufferID = *BufferIDOpt;
SKInfo.Offset = SM.getLocOffsetInBuffer(Info.Loc, BufferID);
std::tie(SKInfo.Line, SKInfo.Column) =
SM.getLineAndColumn(Info.Loc, BufferID);
SKInfo.Filename = SM.getDisplayNameForLoc(Info.Loc).str();
for (auto R : Info.Ranges) {
if (R.isInvalid() || SM.findBufferContainingLoc(R.getStart()) != BufferID)
continue;
unsigned Offset = SM.getLocOffsetInBuffer(R.getStart(), BufferID);
unsigned Length = R.getByteLength();
SKInfo.Ranges.push_back({Offset, Length});
}
for (auto F : Info.FixIts) {
if (F.getRange().isInvalid() ||
SM.findBufferContainingLoc(F.getRange().getStart()) != BufferID)
continue;
unsigned Offset =
SM.getLocOffsetInBuffer(F.getRange().getStart(), BufferID);
unsigned Length = F.getRange().getByteLength();
SKInfo.Fixits.push_back({Offset, Length, F.getText().str()});
}
} else {
SKInfo.Filename = "<unknown>";
}
if (IsNote) {
getLastDiag().Notes.push_back(std::move(SKInfo));
return;
}
switch (Info.Kind) {
case DiagnosticKind::Error:
SKInfo.Severity = DiagnosticSeverityKind::Error;
break;
case DiagnosticKind::Warning:
SKInfo.Severity = DiagnosticSeverityKind::Warning;
break;
case DiagnosticKind::Note:
case DiagnosticKind::Remark:
llvm_unreachable("already covered");
}
if (!BufferIDOpt) {
InvalidLocDiagnostics.push_back(std::move(SKInfo));
clearLastDiag();
return;
}
unsigned BufferID = *BufferIDOpt;
DiagnosticsTy &Diagnostics = BufferDiagnostics[BufferID];
if (Diagnostics.empty() || Diagnostics.back().Offset <= SKInfo.Offset) {
Diagnostics.push_back(std::move(SKInfo));
LastDiagBufferID = BufferID;
LastDiagIndex = Diagnostics.size() - 1;
return;
}
// Keep the diagnostics array in source order.
auto Pos = std::lower_bound(Diagnostics.begin(), Diagnostics.end(), SKInfo.Offset,
[&](const DiagnosticEntryInfo &LHS, unsigned Offset) -> bool {
return LHS.Offset < Offset;
});
LastDiagBufferID = BufferID;
LastDiagIndex = Pos - Diagnostics.begin();
Diagnostics.insert(Pos, std::move(SKInfo));
}
SwiftEditorDocumentRef
SwiftEditorDocumentFileMap::getByUnresolvedName(StringRef FilePath) {
SwiftEditorDocumentRef EditorDoc;
Queue.dispatchSync([&]{
auto It = Docs.find(FilePath);
if (It != Docs.end())
EditorDoc = It->second.DocRef;
});
return EditorDoc;
}
SwiftEditorDocumentRef
SwiftEditorDocumentFileMap::findByPath(StringRef FilePath) {
SwiftEditorDocumentRef EditorDoc;
std::string ResolvedPath = SwiftLangSupport::resolvePathSymlinks(FilePath);
Queue.dispatchSync([&]{
for (auto &Entry : Docs) {
if (Entry.getKey() == FilePath ||
Entry.getValue().ResolvedPath == ResolvedPath) {
EditorDoc = Entry.getValue().DocRef;
break;
}
}
});
return EditorDoc;
}
bool SwiftEditorDocumentFileMap::getOrUpdate(
StringRef FilePath, SwiftLangSupport &LangSupport,
SwiftEditorDocumentRef &EditorDoc) {
bool found = false;
std::string ResolvedPath = SwiftLangSupport::resolvePathSymlinks(FilePath);
Queue.dispatchBarrierSync([&]{
DocInfo &Doc = Docs[FilePath];
if (!Doc.DocRef) {
Doc.DocRef = EditorDoc;
Doc.ResolvedPath = ResolvedPath;
} else {
EditorDoc = Doc.DocRef;
found = true;
}
});
return found;
}
SwiftEditorDocumentRef SwiftEditorDocumentFileMap::remove(StringRef FilePath) {
SwiftEditorDocumentRef Removed;
Queue.dispatchBarrierSync([&]{
auto I = Docs.find(FilePath);
if (I != Docs.end()) {
Removed = I->second.DocRef;
Docs.erase(I);
}
});
return Removed;
}
namespace {
/// Merges two overlapping ranges and splits the first range into two
/// ranges before and after the overlapping range.
void mergeSplitRanges(unsigned Off1, unsigned Len1, unsigned Off2, unsigned Len2,
std::function<void(unsigned BeforeOff, unsigned BeforeLen,
unsigned AfterOff,
unsigned AfterLen)> applier) {
unsigned End1 = Off1 + Len1;
unsigned End2 = Off2 + Len2;
if (End1 > Off2) {
// Overlapping. Split into before and after ranges.
unsigned BeforeOff = Off1;
unsigned BeforeLen = Off2 > Off1 ? Off2 - Off1 : 0;
unsigned AfterOff = End2;
unsigned AfterLen = End1 > End2 ? End1 - End2 : 0;
applier(BeforeOff, BeforeLen, AfterOff, AfterLen);
}
else {
// Not overlapping.
applier(Off1, Len1, 0, 0);
}
}
struct SwiftSyntaxToken {
unsigned Offset;
unsigned Length:24;
SyntaxNodeKind Kind:8;
static SwiftSyntaxToken createInvalid() {
return {0, 0, SyntaxNodeKind::AttributeBuiltin};
}
SwiftSyntaxToken(unsigned Offset, unsigned Length, SyntaxNodeKind Kind)
: Offset(Offset), Length(Length), Kind(Kind) {}
unsigned endOffset() const { return Offset + Length; }
bool isInvalid() const { return Length == 0; }
bool operator==(const SwiftSyntaxToken &Other) const {
return Offset == Other.Offset && Length == Other.Length &&
Kind == Other.Kind;
}
bool operator!=(const SwiftSyntaxToken &Other) const {
return Offset != Other.Offset || Length != Other.Length ||
Kind != Other.Kind;
}
};
struct SwiftEditorCharRange {
unsigned Offset;
unsigned EndOffset;
SwiftEditorCharRange(unsigned Offset, unsigned EndOffset) :
Offset(Offset), EndOffset(EndOffset) {}
SwiftEditorCharRange(SwiftSyntaxToken Token) :
Offset(Token.Offset), EndOffset(Token.endOffset()) {}
size_t length() const { return EndOffset - Offset; }
bool isEmpty() const { return Offset == EndOffset; }
bool intersects(const SwiftSyntaxToken &Token) const {
return this->Offset < (Token.endOffset()) && this->EndOffset > Token.Offset;
}
void extendToInclude(const SwiftEditorCharRange &Range) {
if (Range.Offset < Offset)
Offset = Range.Offset;
if (Range.EndOffset > EndOffset)
EndOffset = Range.EndOffset;
}
void extendToInclude(unsigned OtherOffset) {
extendToInclude({OtherOffset, OtherOffset});
}
};
/// Finds and represents the first mismatching tokens in two syntax maps,
/// ignoring invalidated tokens.
template <class Iter>
struct TokenMismatch {
/// The begin and end iterators of the previous syntax map
Iter PrevTok, PrevEnd;
/// The begin and end iterators of the current syntax map
Iter CurrTok, CurrEnd;
TokenMismatch(Iter CurrTok, Iter CurrEnd, Iter PrevTok, Iter PrevEnd) :
PrevTok(PrevTok), PrevEnd(PrevEnd), CurrTok(CurrTok), CurrEnd(CurrEnd) {
skipInvalid();
while(advance());
}
/// Returns true if a mismatch was found
bool foundMismatch() const {
return CurrTok != CurrEnd || PrevTok != PrevEnd;
}
/// Returns the smallest start offset of the mismatched token ranges
unsigned mismatchStart() const {
assert(foundMismatch());
if (CurrTok != CurrEnd) {
if (PrevTok != PrevEnd)
return std::min(CurrTok->Offset, PrevTok->Offset);
return CurrTok->Offset;
}
return PrevTok->Offset;
}
/// Returns the largest end offset of the mismatched token ranges
unsigned mismatchEnd() const {
assert(foundMismatch());
if (CurrTok != CurrEnd) {
if (PrevTok != PrevEnd)
return std::max(CurrTok->endOffset(), PrevTok->endOffset());
return CurrTok->endOffset();
}
return PrevTok->endOffset();
}
private:
void skipInvalid() {
while (PrevTok != PrevEnd && PrevTok->isInvalid())
++PrevTok;
}
bool advance() {
if (CurrTok == CurrEnd || PrevTok == PrevEnd || *CurrTok != *PrevTok)
return false;
++CurrTok;
++PrevTok;
skipInvalid();
return true;
}
};
/// Represents a the syntax highlighted token ranges in a source file
struct SwiftSyntaxMap {
std::vector<SwiftSyntaxToken> Tokens;
explicit SwiftSyntaxMap(unsigned Capacity = 0) {
if (Capacity)
Tokens.reserve(Capacity);
}
void addToken(const SwiftSyntaxToken &Token) {
assert(Tokens.empty() || Token.Offset >= Tokens.back().Offset);
Tokens.push_back(Token);
}
/// Merge this nested token into the last token that was added
void mergeToken(const SwiftSyntaxToken &Token) {
if (Tokens.empty()) {
Tokens.push_back(Token);
return;
}
auto &LastTok = Tokens.back();
assert(LastTok.Offset <= Token.Offset);
mergeSplitRanges(LastTok.Offset, LastTok.Length, Token.Offset, Token.Length,
[&](unsigned BeforeOff, unsigned BeforeLen,
unsigned AfterOff, unsigned AfterLen) {
auto LastKind = LastTok.Kind;
Tokens.pop_back();
if (BeforeLen)
Tokens.emplace_back(BeforeOff, BeforeLen, LastKind);
Tokens.push_back(Token);
if (AfterLen)
Tokens.emplace_back(AfterOff, AfterLen, LastKind);
});
}
/// Adjusts the token offsets and lengths in this syntax map to account for
/// replacing \p Len bytes at the given \p Offset with \p NewLen bytes. Tokens
/// before the replacement stay the same, tokens after it are shifted, and
/// tokens that intersect it are 'removed' (really just marked invalid).
/// Clients are expected to match this behavior.
///
/// Returns the union of the replaced range and the token ranges it
/// intersected, or nothing if no tokens were intersected.
llvm::Optional<SwiftEditorCharRange>
adjustForReplacement(unsigned Offset, unsigned Len, unsigned NewLen) {
unsigned ReplacedStart = Offset;
unsigned ReplacedEnd = Offset + Len;
bool TokenIntersected = false;
SwiftEditorCharRange Affected = { /*Offset=*/ReplacedStart,
/*EndOffset=*/ReplacedEnd};
// Adjust the tokens
auto Token = Tokens.begin();
while (Token != Tokens.end() && Token->endOffset() <= ReplacedStart) {
// Completely before the replaced range – no change needed
++Token;
}
while (Token != Tokens.end() && Token->Offset < ReplacedEnd) {
// Intersecting the replaced range – extend Affected and invalidate
TokenIntersected = true;
Affected.extendToInclude(*Token);
*Token = SwiftSyntaxToken::createInvalid();
++Token;
}
while (Token != Tokens.end()) {
// Completely after the replaced range - shift to account for NewLen
if (NewLen >= Len)
Token->Offset += NewLen - Len;
else
Token->Offset -= Len - NewLen;
++Token;
}
// If the replaced range didn't intersect with any existing tokens, there's
// no need to report an affected range
if (!TokenIntersected)
return None;
// Update the end of the affected range to account for NewLen
if (NewLen >= Len) {
Affected.EndOffset += NewLen - Len;
} else {
Affected.EndOffset -= Len - NewLen;
}
return Affected;
}
/// Passes each token in this SwiftSyntaxMap to the given \p Consumer
void forEach(EditorConsumer &Consumer) {
for (auto &Token: Tokens) {
auto Kind = SwiftLangSupport::getUIDForSyntaxNodeKind(Token.Kind);
Consumer.handleSyntaxMap(Token.Offset, Token.Length, Kind);
}
}
/// Finds the delta between the given SwiftSyntaxMap, \p Prev, and this one.
/// It passes each token not in \p Prev to the given \p Consumer and, if
/// needed, also expands or sets the given \p Affected range to cover all
/// non-matching tokens in the two lists.
///
/// Returns true if this SwiftSyntaxMap is different to \p Prev.
bool forEachChanged(const SwiftSyntaxMap &Prev,
llvm::Optional<SwiftEditorCharRange> &Affected,
EditorConsumer &Consumer) const {
typedef std::vector<SwiftSyntaxToken>::const_iterator ForwardIt;
typedef std::vector<SwiftSyntaxToken>::const_reverse_iterator ReverseIt;
// Find the first pair of tokens that don't match
TokenMismatch<ForwardIt>
Forward(Tokens.begin(), Tokens.end(), Prev.Tokens.begin(), Prev.Tokens.end());
// Exit early if there was no mismatch
if (!Forward.foundMismatch())
return Affected && !Affected->isEmpty();
// Find the last pair of tokens that don't match
TokenMismatch<ReverseIt>
Backward(Tokens.rbegin(), Tokens.rend(), Prev.Tokens.rbegin(), Prev.Tokens.rend());
assert(Backward.foundMismatch());
// Set or extend the affected range to include the mismatched range
SwiftEditorCharRange
MismatchRange = {Forward.mismatchStart(),Backward.mismatchEnd()};
if (!Affected) {
Affected = MismatchRange;
} else {
Affected->extendToInclude(MismatchRange);
}
// Report all tokens in the affected range to the EditorConsumer
auto From = Forward.CurrTok;
auto To = Backward.CurrTok;
while (From != Tokens.begin() && (From-1)->Offset >= Affected->Offset)
--From;
while (To != Tokens.rbegin() && (To-1)->endOffset() <= Affected->EndOffset)
--To;
for (; From < To.base(); ++From) {
auto Kind = SwiftLangSupport::getUIDForSyntaxNodeKind(From->Kind);
Consumer.handleSyntaxMap(From->Offset, From->Length, Kind);
}
return true;
}
};
struct EditorConsumerSyntaxMapEntry {
unsigned Offset;
unsigned Length;
UIdent Kind;
EditorConsumerSyntaxMapEntry(unsigned Offset, unsigned Length, UIdent Kind)
:Offset(Offset), Length(Length), Kind(Kind) { }
};
struct SwiftSemanticToken {
unsigned ByteOffset;
unsigned Length : 24;
// The code-completion kinds are a good match for the semantic kinds we want.
// FIXME: Maybe rename CodeCompletionDeclKind to a more general concept ?
CodeCompletionDeclKind Kind : 6;
unsigned IsRef : 1;
unsigned IsSystem : 1;
SwiftSemanticToken(CodeCompletionDeclKind Kind,
unsigned ByteOffset, unsigned Length,
bool IsRef, bool IsSystem)
: ByteOffset(ByteOffset), Length(Length), Kind(Kind),
IsRef(IsRef), IsSystem(IsSystem) { }
bool getIsRef() const { return static_cast<bool>(IsRef); }
bool getIsSystem() const { return static_cast<bool>(IsSystem); }
UIdent getUIdentForKind() const {
return SwiftLangSupport::getUIDForCodeCompletionDeclKind(Kind, getIsRef());
}
};
static_assert(sizeof(SwiftSemanticToken) == 8, "Too big");
class SwiftDocumentSemanticInfo :
public ThreadSafeRefCountedBase<SwiftDocumentSemanticInfo> {
const std::string Filename;
std::weak_ptr<SwiftASTManager> ASTMgr;
std::shared_ptr<NotificationCenter> NotificationCtr;
ThreadSafeRefCntPtr<SwiftInvocation> InvokRef;
llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> fileSystem;
std::string CompilerArgsError;
uint64_t ASTGeneration = 0;
ImmutableTextSnapshotRef TokSnapshot;
std::vector<SwiftSemanticToken> SemaToks;
ImmutableTextSnapshotRef DiagSnapshot;
std::vector<DiagnosticEntryInfo> SemaDiags;
mutable llvm::sys::Mutex Mtx;
public:
SwiftDocumentSemanticInfo(
StringRef Filename, std::weak_ptr<SwiftASTManager> ASTMgr,
std::shared_ptr<NotificationCenter> NotificationCtr,
llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> fileSystem)
: Filename(Filename), ASTMgr(ASTMgr), NotificationCtr(NotificationCtr),
fileSystem(fileSystem) {}
SwiftInvocationRef getInvocation() const {
return InvokRef;
}
llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> getFileSystem() const {
return fileSystem;
}
uint64_t getASTGeneration() const;
void setCompilerArgs(ArrayRef<const char *> Args) {
if (auto ASTMgr = this->ASTMgr.lock()) {
InvokRef =
ASTMgr->getInvocation(Args, Filename, CompilerArgsError);
}
}
void readSemanticInfo(ImmutableTextSnapshotRef NewSnapshot,
std::vector<SwiftSemanticToken> &Tokens,
Optional<std::vector<DiagnosticEntryInfo>> &Diags,
ArrayRef<DiagnosticEntryInfo> ParserDiags);
void processLatestSnapshotAsync(EditableTextBufferRef EditableBuffer);
void updateSemanticInfo(std::vector<SwiftSemanticToken> Toks,
std::vector<DiagnosticEntryInfo> Diags,
ImmutableTextSnapshotRef Snapshot,
uint64_t ASTGeneration);
void removeCachedAST() {
if (InvokRef) {
if (auto ASTMgr = this->ASTMgr.lock()) {
ASTMgr->removeCachedAST(InvokRef);
}
}
}
private:
std::vector<SwiftSemanticToken> takeSemanticTokens(
ImmutableTextSnapshotRef NewSnapshot);
Optional<std::vector<DiagnosticEntryInfo>> getSemanticDiagnostics(
ImmutableTextSnapshotRef NewSnapshot,
ArrayRef<DiagnosticEntryInfo> ParserDiags);
};
class SwiftDocumentSyntaxInfo {
SourceManager SM;
EditorDiagConsumer DiagConsumer;
std::shared_ptr<SyntaxTreeCreator> SynTreeCreator;
std::unique_ptr<ParserUnit> Parser;
unsigned BufferID;
std::vector<std::string> Args;
std::string PrimaryFile;
/// Whether or not the AST stored in the source file is up-to-date or just an
/// artifact of incremental syntax parsing
bool HasUpToDateAST;
public:
SwiftDocumentSyntaxInfo(const CompilerInvocation &CompInv,
ImmutableTextSnapshotRef Snapshot,
std::vector<std::string> &Args,
StringRef FilePath)
: Args(Args), PrimaryFile(FilePath) {
std::unique_ptr<llvm::MemoryBuffer> BufCopy =
llvm::MemoryBuffer::getMemBufferCopy(
Snapshot->getBuffer()->getText(), FilePath);
BufferID = SM.addNewSourceBuffer(std::move(BufCopy));
DiagConsumer.setInputBufferIDs(BufferID);
if (CompInv.getLangOptions().BuildSyntaxTree) {
RC<SyntaxArena> syntaxArena{new syntax::SyntaxArena()};
SynTreeCreator = std::make_shared<SyntaxTreeCreator>(
SM, BufferID, CompInv.getMainFileSyntaxParsingCache(), syntaxArena);
}
Parser.reset(
new ParserUnit(SM, SourceFileKind::Main, BufferID,
CompInv.getLangOptions(),
CompInv.getTypeCheckerOptions(),
CompInv.getModuleName(),
SynTreeCreator,
CompInv.getMainFileSyntaxParsingCache())
);
registerParseRequestFunctions(Parser->getParser().Context.evaluator);
registerTypeCheckerRequestFunctions(
Parser->getParser().Context.evaluator);
Parser->getDiagnosticEngine().addConsumer(DiagConsumer);
// If there is a syntax parsing cache, incremental syntax parsing is
// performed and thus the generated AST may not be up-to-date.
HasUpToDateAST = CompInv.getMainFileSyntaxParsingCache() == nullptr;
}
void parse() {
auto root = Parser->parse();
if (SynTreeCreator)
SynTreeCreator->acceptSyntaxRoot(root, Parser->getSourceFile());
}
SourceFile &getSourceFile() {
return Parser->getSourceFile();
}
unsigned getBufferID() {
return BufferID;
}
const LangOptions &getLangOptions() {
return Parser->getLangOptions();
}
SourceManager &getSourceManager() {
return SM;
}
bool hasUpToDateAST() { return HasUpToDateAST; }
ArrayRef<DiagnosticEntryInfo> getDiagnostics() {
return DiagConsumer.getDiagnosticsForBuffer(BufferID);
}
};
} // anonymous namespace
uint64_t SwiftDocumentSemanticInfo::getASTGeneration() const {
llvm::sys::ScopedLock L(Mtx);
return ASTGeneration;
}
void SwiftDocumentSemanticInfo::readSemanticInfo(
ImmutableTextSnapshotRef NewSnapshot,
std::vector<SwiftSemanticToken> &Tokens,
Optional<std::vector<DiagnosticEntryInfo>> &Diags,
ArrayRef<DiagnosticEntryInfo> ParserDiags) {
llvm::sys::ScopedLock L(Mtx);
Tokens = takeSemanticTokens(NewSnapshot);
Diags = getSemanticDiagnostics(NewSnapshot, ParserDiags);
}
std::vector<SwiftSemanticToken>
SwiftDocumentSemanticInfo::takeSemanticTokens(
ImmutableTextSnapshotRef NewSnapshot) {
llvm::sys::ScopedLock L(Mtx);
if (SemaToks.empty())
return {};
// Adjust the position of the tokens.
TokSnapshot->foreachReplaceUntil(NewSnapshot,
[&](ReplaceImmutableTextUpdateRef Upd) -> bool {
if (SemaToks.empty())
return false;
auto ReplaceBegin = std::lower_bound(SemaToks.begin(), SemaToks.end(),
Upd->getByteOffset(),
[&](const SwiftSemanticToken &Tok, unsigned StartOffset) -> bool {
return Tok.ByteOffset+Tok.Length < StartOffset;
});
std::vector<SwiftSemanticToken>::iterator ReplaceEnd;
if (Upd->getLength() == 0) {
ReplaceEnd = ReplaceBegin;
} else {
ReplaceEnd = std::upper_bound(ReplaceBegin, SemaToks.end(),
Upd->getByteOffset() + Upd->getLength(),
[&](unsigned EndOffset, const SwiftSemanticToken &Tok) -> bool {
return EndOffset < Tok.ByteOffset;
});
}
unsigned InsertLen = Upd->getText().size();
int Delta = InsertLen - Upd->getLength();
if (Delta != 0) {
for (std::vector<SwiftSemanticToken>::iterator
I = ReplaceEnd, E = SemaToks.end(); I != E; ++I)
I->ByteOffset += Delta;
}
SemaToks.erase(ReplaceBegin, ReplaceEnd);
return true;
});
return std::move(SemaToks);
}
Optional<std::vector<DiagnosticEntryInfo>>
SwiftDocumentSemanticInfo::getSemanticDiagnostics(
ImmutableTextSnapshotRef NewSnapshot,
ArrayRef<DiagnosticEntryInfo> ParserDiags) {
std::vector<DiagnosticEntryInfo> curSemaDiags;
{
llvm::sys::ScopedLock L(Mtx);
if (!DiagSnapshot || DiagSnapshot->getStamp() != NewSnapshot->getStamp()) {
// The semantic diagnostics are out-of-date, ignore them.
return llvm::None;
}
curSemaDiags = SemaDiags;
}
// Diagnostics from the AST and diagnostics from the parser are based on the
// same source text snapshot. But diagnostics from the AST may have excluded
// the parser diagnostics due to a fatal error, e.g. if the source has a
// 'so such module' error, which will suppress other diagnostics.
// We don't want to turn off the suppression to avoid a flood of diagnostics
// when a module import fails, but we also don't want to lose the parser
// diagnostics in such a case, so merge the parser diagnostics with the sema
// ones.
auto orderDiagnosticEntryInfos = [](const DiagnosticEntryInfo &LHS,
const DiagnosticEntryInfo &RHS) -> bool {
if (LHS.Filename != RHS.Filename)
return LHS.Filename < RHS.Filename;
if (LHS.Offset != RHS.Offset)
return LHS.Offset < RHS.Offset;
return LHS.Description < RHS.Description;
};
std::vector<DiagnosticEntryInfo> sortedParserDiags;
sortedParserDiags.reserve(ParserDiags.size());
sortedParserDiags.insert(sortedParserDiags.end(), ParserDiags.begin(),
ParserDiags.end());
std::stable_sort(sortedParserDiags.begin(), sortedParserDiags.end(),
orderDiagnosticEntryInfos);
std::vector<DiagnosticEntryInfo> finalDiags;
finalDiags.reserve(sortedParserDiags.size()+curSemaDiags.size());
// Add sema diagnostics unless it is an existing parser diagnostic.
// Note that we want to merge and eliminate diagnostics from the 'sema' set
// that also show up in the 'parser' set, but we don't want to remove
// duplicate diagnostics from within the same set (e.g. duplicates existing in
// the 'sema' set). We want to report the diagnostics as the compiler reported
// them, even if there's some duplicate one. This is why we don't just do a
// simple append/sort/keep-uniques step.
for (const auto &curDE : curSemaDiags) {
bool existsAsParserDiag = std::binary_search(sortedParserDiags.begin(),
sortedParserDiags.end(),
curDE, orderDiagnosticEntryInfos);
if (!existsAsParserDiag) {
finalDiags.push_back(curDE);
}
}
finalDiags.insert(finalDiags.end(),
sortedParserDiags.begin(), sortedParserDiags.end());
std::stable_sort(finalDiags.begin(), finalDiags.end(),
orderDiagnosticEntryInfos);
return finalDiags;
}
void SwiftDocumentSemanticInfo::updateSemanticInfo(
std::vector<SwiftSemanticToken> Toks,
std::vector<DiagnosticEntryInfo> Diags,
ImmutableTextSnapshotRef Snapshot,
uint64_t ASTGeneration) {
{
llvm::sys::ScopedLock L(Mtx);
if (ASTGeneration > this->ASTGeneration) {
SemaToks = std::move(Toks);
SemaDiags = std::move(Diags);
TokSnapshot = DiagSnapshot = std::move(Snapshot);
this->ASTGeneration = ASTGeneration;
}
}
LOG_INFO_FUNC(High, "posted document update notification for: " << Filename);
NotificationCtr->postDocumentUpdateNotification(Filename);
}
namespace {
class SemanticAnnotator : public SourceEntityWalker {
SourceManager &SM;
unsigned BufferID;
public:
std::vector<SwiftSemanticToken> SemaToks;
SemanticAnnotator(SourceManager &SM, unsigned BufferID)
: SM(SM), BufferID(BufferID) {}
bool visitDeclReference(ValueDecl *D, CharSourceRange Range,
TypeDecl *CtorTyRef, ExtensionDecl *ExtTyRef, Type T,
ReferenceMetaData Data) override {
if (Data.isImplicit)
return true;
if (isa<VarDecl>(D) && D->hasName() &&
D->getFullName() == D->getASTContext().Id_self)
return true;
// Do not annotate references to unavailable decls.
if (AvailableAttr::isUnavailable(D))
return true;
if (CtorTyRef)
D = CtorTyRef;
annotate(D, /*IsRef=*/true, Range);
return true;
}
bool visitSubscriptReference(ValueDecl *D, CharSourceRange Range,
ReferenceMetaData Data,
bool IsOpenBracket) override {
// We should treat both open and close brackets equally
return visitDeclReference(D, Range, nullptr, nullptr, Type(), Data);
}
void annotate(const Decl *D, bool IsRef, CharSourceRange Range) {
unsigned ByteOffset = SM.getLocOffsetInBuffer(Range.getStart(), BufferID);
unsigned Length = Range.getByteLength();
auto Kind = CodeCompletionResult::getCodeCompletionDeclKind(D);
bool IsSystem = D->getModuleContext()->isSystemModule();
SemaToks.emplace_back(Kind, ByteOffset, Length, IsRef, IsSystem);
}
};
} // anonymous namespace
namespace {
class AnnotAndDiagASTConsumer : public SwiftASTConsumer {
EditableTextBufferRef EditableBuffer;
RefPtr<SwiftDocumentSemanticInfo> SemaInfoRef;
public:
std::vector<SwiftSemanticToken> SemaToks;
AnnotAndDiagASTConsumer(EditableTextBufferRef EditableBuffer,
RefPtr<SwiftDocumentSemanticInfo> SemaInfoRef)
: EditableBuffer(std::move(EditableBuffer)),
SemaInfoRef(std::move(SemaInfoRef)) { }
void failed(StringRef Error) override {
LOG_WARN_FUNC("sema annotations failed: " << Error);
}
void handlePrimaryAST(ASTUnitRef AstUnit) override {
auto Generation = AstUnit->getGeneration();
auto &CompIns = AstUnit->getCompilerInstance();
auto &Consumer = AstUnit->getEditorDiagConsumer();
assert(Generation);
if (Generation < SemaInfoRef->getASTGeneration()) {
// It may happen that this request was waiting in async queue for
// too long so another thread has already updated this sema with
// ast generation bigger than ASTGeneration
return;
}
ImmutableTextSnapshotRef DocSnapshot;
for (auto &Snap : AstUnit->getSnapshots()) {
if (Snap->getEditableBuffer() == EditableBuffer) {
DocSnapshot = Snap;
break;
}
}
if (!DocSnapshot) {
LOG_WARN_FUNC("did not find document snapshot when handling the AST");
return;
}
if (Generation == SemaInfoRef->getASTGeneration()) {
// Save time if we already know we processed this AST version.
if (DocSnapshot->getStamp() != EditableBuffer->getSnapshot()->getStamp()){
// Handle edits that occurred after we processed the AST.
SemaInfoRef->processLatestSnapshotAsync(EditableBuffer);
}
return;
}
if (!AstUnit->getPrimarySourceFile().getBufferID().hasValue()) {
LOG_WARN_FUNC("Primary SourceFile is expected to have a BufferID");
return;
}
unsigned BufferID = AstUnit->getPrimarySourceFile().getBufferID().getValue();
SemanticAnnotator Annotator(CompIns.getSourceMgr(), BufferID);
Annotator.walk(AstUnit->getPrimarySourceFile());
SemaToks = std::move(Annotator.SemaToks);
SemaInfoRef->
updateSemanticInfo(std::move(SemaToks),
std::move(Consumer.getDiagnosticsForBuffer(BufferID)),
DocSnapshot,
Generation);
if (DocSnapshot->getStamp() != EditableBuffer->getSnapshot()->getStamp()) {
// Handle edits that occurred after we processed the AST.
SemaInfoRef->processLatestSnapshotAsync(EditableBuffer);
}
}
};
} // anonymous namespace
void SwiftDocumentSemanticInfo::processLatestSnapshotAsync(
EditableTextBufferRef EditableBuffer) {
SwiftInvocationRef Invok = InvokRef;
if (!Invok)
return;
RefPtr<SwiftDocumentSemanticInfo> SemaInfoRef = this;
auto Consumer = std::make_shared<AnnotAndDiagASTConsumer>(EditableBuffer,
SemaInfoRef);
// Semantic annotation queries for a particular document should cancel
// previously queued queries for the same document. Each document has a
// SwiftDocumentSemanticInfo pointer so use that for the token.
const void *OncePerASTToken = SemaInfoRef.get();
if (auto ASTMgr = this->ASTMgr.lock()) {
ASTMgr->processASTAsync(Invok, std::move(Consumer), OncePerASTToken,
fileSystem);
}
}
struct SwiftEditorDocument::Implementation {
std::weak_ptr<SwiftASTManager> ASTMgr;
std::shared_ptr<NotificationCenter> NotificationCtr;
const std::string FilePath;
EditableTextBufferRef EditableBuffer;
/// The list of syntax highlighted token offsets and ranges in the document
SwiftSyntaxMap SyntaxMap;
/// The minimal range of syntax highlighted tokens affected by the last edit
llvm::Optional<SwiftEditorCharRange> AffectedRange;
/// Whether the last operation was an edit rather than a document open
bool Edited;
/// The syntax tree of the document
llvm::Optional<SourceFileSyntax> SyntaxTree;
std::vector<DiagnosticEntryInfo> ParserDiagnostics;
RefPtr<SwiftDocumentSemanticInfo> SemanticInfo;
CodeFormatOptions FormatOptions;
std::shared_ptr<SwiftDocumentSyntaxInfo> SyntaxInfo;
std::shared_ptr<SwiftDocumentSyntaxInfo> getSyntaxInfo() {
llvm::sys::ScopedLock L(AccessMtx);
return SyntaxInfo;
}
llvm::sys::Mutex AccessMtx;
Implementation(StringRef FilePath, SwiftLangSupport &LangSupport,
CodeFormatOptions options,
llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> fileSystem)
: ASTMgr(LangSupport.getASTManager()),
NotificationCtr(LangSupport.getNotificationCenter()),
FilePath(FilePath), FormatOptions(options) {
assert(fileSystem);
// This instance of semantic info is used if a document is opened with
// `key.syntactic_only: 1`, but subsequently a semantic request such as
// cursor_info is made.
SemanticInfo = new SwiftDocumentSemanticInfo(
FilePath, ASTMgr, NotificationCtr, fileSystem);
}
};
namespace {
static UIdent getAccessLevelUID(AccessLevel Access) {
static UIdent AccessOpen("source.lang.swift.accessibility.open");
static UIdent AccessPublic("source.lang.swift.accessibility.public");
static UIdent AccessInternal("source.lang.swift.accessibility.internal");
static UIdent AccessFilePrivate("source.lang.swift.accessibility.fileprivate");
static UIdent AccessPrivate("source.lang.swift.accessibility.private");
switch (Access) {
case AccessLevel::Private:
return AccessPrivate;
case AccessLevel::FilePrivate:
return AccessFilePrivate;
case AccessLevel::Internal:
return AccessInternal;
case AccessLevel::Public:
return AccessPublic;
case AccessLevel::Open:
return AccessOpen;
}
llvm_unreachable("Unhandled access level in switch.");
}
static Optional<AccessLevel>
inferDefaultAccessSyntactically(const ExtensionDecl *ED) {
// Check if the extension has an explicit access control attribute.
if (auto *AA = ED->getAttrs().getAttribute<AccessControlAttr>())
return std::min(std::max(AA->getAccess(), AccessLevel::FilePrivate),
AccessLevel::Public);
return None;
}
/// Document structure is a purely syntactic request that shouldn't require name lookup
/// or type-checking, so this is a best-effort computation, particularly where extensions
/// are concerned.
static Optional<AccessLevel> inferAccessSyntactically(const ValueDecl *D) {
assert(D);
// Check if the decl has an explicit access control attribute.
if (auto *AA = D->getAttrs().getAttribute<AccessControlAttr>())
return AA->getAccess();
DeclContext *DC = D->getDeclContext();
if (D->getKind() == DeclKind::Destructor ||
D->getKind() == DeclKind::EnumElement) {
if (auto container = dyn_cast<NominalTypeDecl>(D->getDeclContext())) {
if (auto containerAccess = inferAccessSyntactically(container))
return std::max(containerAccess.getValue(), AccessLevel::Internal);
return None;
}
return AccessLevel::Private;
}
switch (DC->getContextKind()) {
case DeclContextKind::TopLevelCodeDecl:
return AccessLevel::FilePrivate;
case DeclContextKind::SerializedLocal:
case DeclContextKind::AbstractClosureExpr:
case DeclContextKind::EnumElementDecl:
case DeclContextKind::Initializer:
case DeclContextKind::AbstractFunctionDecl:
case DeclContextKind::SubscriptDecl:
return AccessLevel::Private;
case DeclContextKind::Module:
case DeclContextKind::FileUnit:
return AccessLevel::Internal;
case DeclContextKind::GenericTypeDecl: {
auto generic = cast<GenericTypeDecl>(DC);
AccessLevel access = AccessLevel::Internal;
if (isa<ProtocolDecl>(generic)) {
if (auto protoAccess = inferAccessSyntactically(generic))
access = std::max(AccessLevel::FilePrivate, protoAccess.getValue());
}
return access;
}
case DeclContextKind::ExtensionDecl:
auto *ED = cast<ExtensionDecl>(DC);
return inferDefaultAccessSyntactically(ED);
}
llvm_unreachable("Unhandled DeclContextKind in switch.");
}
/// Document structure is a purely syntactic request that shouldn't require name lookup
/// or type-checking, so this is a best-effort computation.
static bool inferIsSettableSyntactically(const AbstractStorageDecl *D) {
if (auto *VD = dyn_cast<VarDecl>(D)) {
if (VD->isLet())
return false;
}
if (D->hasParsedAccessors()) {
return D->getParsedAccessor(AccessorKind::Set) != nullptr ||
D->hasObservers();
} else {
return true;
}
}
static Optional<AccessLevel>
inferSetterAccessSyntactically(const AbstractStorageDecl *D) {
if (!inferIsSettableSyntactically(D))
return None;
if (auto *AA = D->getAttrs().getAttribute<SetterAccessAttr>())
return AA->getAccess();
return inferAccessSyntactically(D);
}
class SwiftDocumentStructureWalker: public ide::SyntaxModelWalker {
SourceManager &SrcManager;
EditorConsumer &Consumer;
unsigned BufferID;
public:
SwiftDocumentStructureWalker(SourceManager &SrcManager,
unsigned BufferID,
EditorConsumer &Consumer)
: SrcManager(SrcManager), Consumer(Consumer), BufferID(BufferID) { }
bool walkToSubStructurePre(SyntaxStructureNode Node) override {
unsigned StartOffset = SrcManager.getLocOffsetInBuffer(Node.Range.getStart(),
BufferID);
unsigned EndOffset = SrcManager.getLocOffsetInBuffer(Node.Range.getEnd(),
BufferID);
unsigned NameStart;
unsigned NameEnd;
if (Node.NameRange.isValid()) {
NameStart = SrcManager.getLocOffsetInBuffer(Node.NameRange.getStart(),
BufferID);
NameEnd = SrcManager.getLocOffsetInBuffer(Node.NameRange.getEnd(),
BufferID);
}
else {
NameStart = NameEnd = 0;
}
unsigned BodyOffset;
unsigned BodyEnd;
if (Node.BodyRange.isValid()) {
BodyOffset = SrcManager.getLocOffsetInBuffer(Node.BodyRange.getStart(),
BufferID);
BodyEnd = SrcManager.getLocOffsetInBuffer(Node.BodyRange.getEnd(),
BufferID);
}
else {
BodyOffset = BodyEnd = 0;
}
unsigned DocOffset = 0;
unsigned DocEnd = 0;
if (Node.DocRange.isValid()) {
DocOffset = SrcManager.getLocOffsetInBuffer(Node.DocRange.getStart(),
BufferID);
DocEnd = SrcManager.getLocOffsetInBuffer(Node.DocRange.getEnd(),
BufferID);
}
UIdent Kind = SwiftLangSupport::getUIDForSyntaxStructureKind(Node.Kind);
UIdent AccessLevel;
UIdent SetterAccessLevel;
if (Node.Kind != SyntaxStructureKind::Parameter &&
Node.Kind != SyntaxStructureKind::LocalVariable &&
Node.Kind != SyntaxStructureKind::GenericTypeParam) {
if (auto *VD = dyn_cast_or_null<ValueDecl>(Node.Dcl)) {
if (auto Access = inferAccessSyntactically(VD))
AccessLevel = getAccessLevelUID(Access.getValue());
} else if (auto *ED = dyn_cast_or_null<ExtensionDecl>(Node.Dcl)) {
if (auto DefaultAccess = inferDefaultAccessSyntactically(ED))
AccessLevel = getAccessLevelUID(DefaultAccess.getValue());
}
if (auto *ASD = dyn_cast_or_null<AbstractStorageDecl>(Node.Dcl)) {
if (auto SetAccess = inferSetterAccessSyntactically(ASD))
SetterAccessLevel = getAccessLevelUID(SetAccess.getValue());
}
}
SmallVector<StringRef, 4> InheritedNames;
if (!Node.InheritedTypeRanges.empty()) {
for (auto &TR : Node.InheritedTypeRanges) {
InheritedNames.push_back(SrcManager.extractText(TR));
}
}
StringRef TypeName;
if (Node.TypeRange.isValid()) {
TypeName = SrcManager.extractText(Node.TypeRange);
}
SmallString<64> DisplayNameBuf;
StringRef DisplayName;
if (auto ValueD = dyn_cast_or_null<ValueDecl>(Node.Dcl)) {
llvm::raw_svector_ostream OS(DisplayNameBuf);
if (!SwiftLangSupport::printDisplayName(ValueD, OS))
DisplayName = OS.str();
}
else if (Node.NameRange.isValid()) {
DisplayName = SrcManager.extractText(Node.NameRange);
}
SmallString<64> RuntimeNameBuf;
StringRef RuntimeName = getObjCRuntimeName(Node.Dcl, RuntimeNameBuf);
SmallString<64> SelectorNameBuf;
StringRef SelectorName = getObjCSelectorName(Node.Dcl, SelectorNameBuf);
std::vector<std::tuple<UIdent, unsigned, unsigned>> Attrs;
for (auto Attr : Node.Attrs) {
if (auto AttrUID = SwiftLangSupport::getUIDForDeclAttribute(Attr)) {
unsigned AttrOffset = 0;
unsigned AttrEnd = 0;
auto AttrRange = Attr->getRangeWithAt();
if (AttrRange.isValid()) {
auto CharRange = Lexer::getCharSourceRangeFromSourceRange(SrcManager,
AttrRange);
AttrOffset = SrcManager.getLocOffsetInBuffer(CharRange.getStart(),
BufferID);
AttrEnd = SrcManager.getLocOffsetInBuffer(CharRange.getEnd(),
BufferID);
}
auto AttrTuple = std::make_tuple(AttrUID.getValue(), AttrOffset,
AttrEnd - AttrOffset);
Attrs.push_back(AttrTuple);
}
}
Consumer.beginDocumentSubStructure(StartOffset, EndOffset - StartOffset,
Kind, AccessLevel, SetterAccessLevel,
NameStart, NameEnd - NameStart,
BodyOffset, BodyEnd - BodyOffset,
DocOffset, DocEnd - DocOffset,
DisplayName,
TypeName, RuntimeName,
SelectorName,
InheritedNames, Attrs);
for (const auto &Elem : Node.Elements) {
if (Elem.Range.isInvalid())
continue;
UIdent Kind = SwiftLangSupport::getUIDForSyntaxStructureElementKind(Elem.Kind);
unsigned Offset = SrcManager.getLocOffsetInBuffer(Elem.Range.getStart(),
BufferID);
unsigned Length = Elem.Range.getByteLength();
Consumer.handleDocumentSubStructureElement(Kind, Offset, Length);
}
return true;
}
StringRef getObjCRuntimeName(const Decl *D, SmallString<64> &Buf) {
// We only report runtime name for classes and protocols with an explicitly
// defined ObjC name, i.e. those that have @objc("SomeName")
if (D && (isa<ClassDecl>(D) || isa<ProtocolDecl>(D))) {
auto *ObjCNameAttr = D->getAttrs().getAttribute<ObjCAttr>();
if (ObjCNameAttr && ObjCNameAttr->hasName())
return ObjCNameAttr->getName()->getString(Buf);
}
return StringRef();
}
StringRef getObjCSelectorName(const Decl *D, SmallString<64> &Buf) {
// We only vend the selector name for @IBAction and @IBSegueAction methods.
if (auto FuncD = dyn_cast_or_null<FuncDecl>(D)) {
if (FuncD->getAttrs().hasAttribute<IBActionAttr>() ||
FuncD->getAttrs().hasAttribute<IBSegueActionAttr>())
return FuncD->getObjCSelector().getString(Buf);
}
return StringRef();
}
bool walkToSubStructurePost(SyntaxStructureNode Node) override {
Consumer.endDocumentSubStructure();
return true;
}
bool walkToNodePre(SyntaxNode Node) override {
if (Node.Kind != SyntaxNodeKind::CommentMarker)
return false;
unsigned StartOffset = SrcManager.getLocOffsetInBuffer(Node.Range.getStart(),
BufferID);
unsigned EndOffset = SrcManager.getLocOffsetInBuffer(Node.Range.getEnd(),
BufferID);
UIdent Kind = SwiftLangSupport::getUIDForSyntaxNodeKind(Node.Kind);
Consumer.beginDocumentSubStructure(StartOffset, EndOffset - StartOffset,
Kind, UIdent(), UIdent(), 0, 0,
0, 0, 0, 0,
StringRef(),
StringRef(), StringRef(),
StringRef(),
{}, {});
return true;
}
bool walkToNodePost(SyntaxNode Node) override {
if (Node.Kind != SyntaxNodeKind::CommentMarker)
return true;
Consumer.endDocumentSubStructure();
return true;
}
};
/// Walks the syntax model to populate a given SwiftSyntaxMap with the token
/// ranges to highlight and pass document structure information to the given
/// EditorConsumer.
class SwiftEditorSyntaxWalker: public ide::SyntaxModelWalker {
/// The syntax map to populate
SwiftSyntaxMap &SyntaxMap;
SourceManager &SrcManager;
unsigned BufferID;
SwiftDocumentStructureWalker DocStructureWalker;
/// The current token nesting level (e.g. for a field in a doc comment)
unsigned NestingLevel = 0;
public:
SwiftEditorSyntaxWalker(SwiftSyntaxMap &SyntaxMap,
SourceManager &SrcManager, EditorConsumer &Consumer,
unsigned BufferID)
: SyntaxMap(SyntaxMap), SrcManager(SrcManager), BufferID(BufferID),
DocStructureWalker(SrcManager, BufferID, Consumer) { }
bool walkToNodePre(SyntaxNode Node) override {
if (Node.Kind == SyntaxNodeKind::CommentMarker)
return DocStructureWalker.walkToNodePre(Node);
++NestingLevel;
auto End = SrcManager.getLocOffsetInBuffer(Node.Range.getEnd(), BufferID),
Start = SrcManager.getLocOffsetInBuffer(Node.Range.getStart(), BufferID);
if (NestingLevel > 1) {
// We're nested inside the previously reported token - merge
SyntaxMap.mergeToken({Start, End - Start, Node.Kind});
} else {
// We're a top-level token, add it after the previous one
SyntaxMap.addToken({Start, End - Start, Node.Kind});
}
return true;
}
bool walkToNodePost(SyntaxNode Node) override {
if (Node.Kind == SyntaxNodeKind::CommentMarker)
return DocStructureWalker.walkToNodePost(Node);
--NestingLevel;
return true;
}
bool walkToSubStructurePre(SyntaxStructureNode Node) override {
return DocStructureWalker.walkToSubStructurePre(Node);
}
bool walkToSubStructurePost(SyntaxStructureNode Node) override {
return DocStructureWalker.walkToSubStructurePost(Node);
}
};
class PlaceholderExpansionScanner {
public:
struct Param {
CharSourceRange NameRange;
CharSourceRange TypeRange;
Param(CharSourceRange NameRange, CharSourceRange TypeRange)
:NameRange(NameRange), TypeRange(TypeRange) { }
};
private:
struct ClosureInfo {
std::vector<Param> Params;
CharSourceRange ReturnTypeRange;
};
SourceManager &SM;
ClosureInfo TargetClosureInfo;
EditorPlaceholderExpr *PHE = nullptr;
class PlaceholderFinder: public ASTWalker {
SourceLoc PlaceholderLoc;
EditorPlaceholderExpr *&Found;
public:
PlaceholderFinder(SourceLoc PlaceholderLoc,
EditorPlaceholderExpr *&Found)
: PlaceholderLoc(PlaceholderLoc), Found(Found) {
}
std::pair<bool, Expr *> walkToExprPre(Expr *E) override {
if (isa<EditorPlaceholderExpr>(E) && E->getStartLoc() == PlaceholderLoc) {
Found = cast<EditorPlaceholderExpr>(E);
return { false, nullptr };
}
return { true, E };
}
bool walkToDeclPre(Decl *D) override {
if (auto *ICD = dyn_cast<IfConfigDecl>(D)) {
// The base walker assumes the content of active IfConfigDecl clauses
// has been injected into the parent context and will be walked there.
// This doesn't hold for pre-typechecked ASTs and we need to find
// placeholders in inactive clauses anyway, so walk them here.
for (auto Clause: ICD->getClauses()) {
for (auto Elem: Clause.Elements) {
Elem.walk(*this);
}
}
return false;
}
return true;
}
};
class ClosureTypeWalker: public ASTWalker {
SourceManager &SM;
ClosureInfo &Info;
public:
bool FoundFunctionTypeRepr = false;
explicit ClosureTypeWalker(SourceManager &SM, ClosureInfo &Info) : SM(SM),
Info(Info) { }
bool walkToTypeReprPre(TypeRepr *T) override {
if (auto *FTR = dyn_cast<FunctionTypeRepr>(T)) {
FoundFunctionTypeRepr = true;
for (auto &ArgElt : FTR->getArgsTypeRepr()->getElements()) {
CharSourceRange NR;
CharSourceRange TR;
auto name = ArgElt.Name;
if (!name.empty()) {
NR = CharSourceRange(ArgElt.NameLoc,
name.getLength());
}
SourceLoc SRE = Lexer::getLocForEndOfToken(SM,
ArgElt.Type->getEndLoc());
TR = CharSourceRange(SM, ArgElt.Type->getStartLoc(), SRE);
Info.Params.emplace_back(NR, TR);
}
if (auto *RTR = FTR->getResultTypeRepr()) {
SourceLoc SRE = Lexer::getLocForEndOfToken(SM, RTR->getEndLoc());
Info.ReturnTypeRange = CharSourceRange(SM, RTR->getStartLoc(), SRE);
}
}
return !FoundFunctionTypeRepr;
}
bool walkToTypeReprPost(TypeRepr *T) override {
// If we just visited the FunctionTypeRepr, end traversal.
return !FoundFunctionTypeRepr;
}
};
bool containClosure(Expr *E) {
if (E->getStartLoc().isInvalid())
return false;
EditorPlaceholderExpr *Found = nullptr;
ClosureInfo Info;
ClosureTypeWalker ClosureWalker(SM, Info);
PlaceholderFinder Finder(E->getStartLoc(), Found);
E->walk(Finder);
if (Found) {
if (auto TR = Found->getTypeLoc().getTypeRepr()) {
TR->walk(ClosureWalker);
return ClosureWalker.FoundFunctionTypeRepr;
}
}
E->walk(ClosureWalker);
return ClosureWalker.FoundFunctionTypeRepr;
}
bool scanClosureType(SourceFile &SF, SourceLoc PlaceholderLoc) {
TargetClosureInfo.Params.clear();
TargetClosureInfo.ReturnTypeRange = CharSourceRange();
PlaceholderFinder Finder(PlaceholderLoc, PHE);
SF.walk(Finder);
if (!PHE || !PHE->getTypeForExpansion())
return false;
ClosureTypeWalker PW(SM, TargetClosureInfo);
PHE->getTypeForExpansion()->walk(PW);
return PW.FoundFunctionTypeRepr;
}
/// Finds the enclosing CallExpr, and indicates whether it should be further
/// considered a candidate for application of trailing closure.
/// For example, if the CallExpr is enclosed in another expression or statement
/// such as "outer(inner(<#closure#>))", or "if inner(<#closure#>)", then trailing
/// closure should not be applied to the inner call.
std::pair<Expr*, bool> enclosingCallExprArg(SourceFile &SF, SourceLoc SL) {
class CallExprFinder : public SourceEntityWalker {
public:
const SourceManager &SM;
SourceLoc TargetLoc;
std::pair<Expr *, Expr*> EnclosingCallAndArg;
Expr *OuterExpr;
Stmt *OuterStmt;
explicit CallExprFinder(const SourceManager &SM)
:SM(SM) { }
bool checkCallExpr(Expr *E) {
Expr* Arg = nullptr;
if (auto *CE = dyn_cast<CallExpr>(E)) {
// Call expression can have argument.
Arg = CE->getArg();
} else if (auto UME = dyn_cast<UnresolvedMemberExpr>(E)) {
// Unresolved member can have argument too.
Arg = UME->getArgument();
}
if (!Arg)
return false;
if (EnclosingCallAndArg.first)
OuterExpr = EnclosingCallAndArg.first;
EnclosingCallAndArg = {E, Arg};
return true;
}
bool walkToExprPre(Expr *E) override {
auto SR = E->getSourceRange();
if (SR.isValid() && SM.rangeContainsTokenLoc(SR, TargetLoc)) {
if (auto closure = dyn_cast<ClosureExpr>(E)) {
if (closure->hasSingleExpressionBody()) {
// Treat a single-expression body like a brace statement and reset
// the enclosing context. Note: when the placeholder is the whole
// body it is handled specially as wrapped in braces by
// shouldUseTrailingClosureInTuple().
auto SR = closure->getSingleExpressionBody()->getSourceRange();
if (SR.isValid() && SR.Start != TargetLoc &&
SM.rangeContainsTokenLoc(SR, TargetLoc)) {
OuterStmt = nullptr;
OuterExpr = nullptr;
EnclosingCallAndArg = {nullptr, nullptr};
return true;
}
}
}
if (!checkCallExpr(E) && !EnclosingCallAndArg.first) {
OuterExpr = E;
}
}
return true;
}
bool walkToExprPost(Expr *E) override {
if (E->getStartLoc() == TargetLoc)
return false; // found what we needed to find, stop walking.
return true;
}
bool walkToStmtPre(Stmt *S) override {
auto SR = S->getSourceRange();
if (SR.isValid() && SM.rangeContainsTokenLoc(SR, TargetLoc)) {
// A statement inside an expression - e.g. `foo({ if ... })` - resets
// the enclosing context.
OuterExpr = nullptr;
EnclosingCallAndArg = {nullptr, nullptr};
switch (S->getKind()) {
case StmtKind::Brace:
case StmtKind::Return:
case StmtKind::Yield:
case StmtKind::Throw:
// A trailing closure is allowed in these statements.
OuterStmt = nullptr;
break;
default:
OuterStmt = S;
break;
}
}
return true;
}
bool shouldWalkInactiveConfigRegion() override { return true; }
Expr *findEnclosingCallArg(SourceFile &SF, SourceLoc SL) {
EnclosingCallAndArg = {nullptr, nullptr};
OuterExpr = nullptr;
OuterStmt = nullptr;
TargetLoc = SL;
walk(SF);
return EnclosingCallAndArg.second;
}
};
CallExprFinder CEFinder(SM);
auto *CE = CEFinder.findEnclosingCallArg(SF, SL);
if (!CE)
return std::make_pair(CE, false);
if (CEFinder.OuterExpr)
return std::make_pair(CE, false);
if (CEFinder.OuterStmt)
return std::make_pair(CE, false);
return std::make_pair(CE, true);
}
bool shouldUseTrailingClosureInTuple(TupleExpr *TE,
SourceLoc PlaceHolderStartLoc,
bool &isWrappedWithBraces) {
if (TE->getElements().empty())
return false;
for (unsigned I = 0, N = TE->getNumElements(); I < N; ++ I) {
bool IsLast = I == N - 1;
Expr *E = TE->getElement(I);
// Placeholders wrapped in braces {<#T##() -> Int#>} can also
// be valid for trailing syntax.
if (auto CE = dyn_cast<ClosureExpr>(E)) {
if (CE->hasSingleExpressionBody() &&
CE->getSingleExpressionBody()->getStartLoc()
== PlaceHolderStartLoc) {
// We found the placeholder.
isWrappedWithBraces = true;
return IsLast;
}
} else if (IsLast) {
return E->getStartLoc() == PlaceHolderStartLoc;
} else if (containClosure(E)) {
return false;
}
}
return false;
}
public:
explicit PlaceholderExpansionScanner(SourceManager &SM) : SM(SM) { }
/// Retrieves the parameter list, return type and context info for
/// a typed completion placeholder in a function call.
/// For example: foo.bar(aaa, <#T##(Int, Int) -> Bool#>).
bool scan(SourceFile &SF, unsigned BufID, unsigned Offset,
unsigned Length, std::function<void(Expr *Args,
bool UseTrailingClosure,
bool isWrappedWithBraces,
ArrayRef<Param>,
CharSourceRange)> Callback,
std::function<bool(EditorPlaceholderExpr*)> NonClosureCallback) {
SourceLoc PlaceholderStartLoc = SM.getLocForOffset(BufID, Offset);
// See if the placeholder is encapsulated with an EditorPlaceholderExpr
// and retrieve parameter and return type ranges.
if (!scanClosureType(SF, PlaceholderStartLoc)) {
return NonClosureCallback(PHE);
}
// Now we need to see if we can suggest trailing closure expansion,
// and if the call parens can be removed in that case.
// We'll first find the enclosing CallExpr, and then do further analysis.
bool UseTrailingClosure = false;
bool isWrappedWithBraces = false;
auto ECE = enclosingCallExprArg(SF, PlaceholderStartLoc);
Expr *Args = ECE.first;
if (Args && ECE.second) {
if (isa<ParenExpr>(Args)) {
UseTrailingClosure = true;
} else if (auto *TE = dyn_cast<TupleExpr>(Args)) {
UseTrailingClosure = shouldUseTrailingClosureInTuple(
TE, PlaceholderStartLoc,
isWrappedWithBraces);
}
}
Callback(Args, UseTrailingClosure, isWrappedWithBraces,
TargetClosureInfo.Params,
TargetClosureInfo.ReturnTypeRange);
return true;
}
};
} // anonymous namespace
SwiftEditorDocument::SwiftEditorDocument(
StringRef FilePath, SwiftLangSupport &LangSupport,
llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> fileSystem,
CodeFormatOptions Options)
:Impl(*new Implementation(FilePath, LangSupport, Options, fileSystem)) { }
SwiftEditorDocument::~SwiftEditorDocument()
{
delete &Impl;
}
ImmutableTextSnapshotRef SwiftEditorDocument::initializeText(
llvm::MemoryBuffer *Buf, ArrayRef<const char *> Args,
bool ProvideSemanticInfo,
llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> fileSystem) {
llvm::sys::ScopedLock L(Impl.AccessMtx);
Impl.Edited = false;
Impl.EditableBuffer =
new EditableTextBuffer(Impl.FilePath, Buf->getBuffer());
// Reset the syntax map data and affected range
Impl.SyntaxMap.Tokens.clear();
Impl.AffectedRange = {0, static_cast<unsigned>(Buf->getBufferSize())};
// Try to create a compiler invocation object if needing semantic info
// or it's syntactic-only but with passed-in compiler arguments.
if (ProvideSemanticInfo || !Args.empty()) {
Impl.SemanticInfo = new SwiftDocumentSemanticInfo(
Impl.FilePath, Impl.ASTMgr, Impl.NotificationCtr, fileSystem);
Impl.SemanticInfo->setCompilerArgs(Args);
}
return Impl.EditableBuffer->getSnapshot();
}
static void updateSemaInfo(RefPtr<SwiftDocumentSemanticInfo> SemanticInfo,
EditableTextBufferRef EditableBuffer) {
if (SemanticInfo) {
SemanticInfo->processLatestSnapshotAsync(EditableBuffer);
}
}
ImmutableTextSnapshotRef SwiftEditorDocument::replaceText(
unsigned Offset, unsigned Length, llvm::MemoryBuffer *Buf,
bool ProvideSemanticInfo, std::string &error) {
ImmutableTextSnapshotRef Snapshot;
EditableTextBufferRef EditableBuffer;
RefPtr<SwiftDocumentSemanticInfo> SemanticInfo;
{
llvm::sys::ScopedLock L(Impl.AccessMtx);
EditableBuffer = Impl.EditableBuffer;
SemanticInfo = Impl.SemanticInfo;
// Validate offset and length.
if ((Offset + Length) > EditableBuffer->getSize()) {
error = "'offset' + 'length' is out of range";
return nullptr;
}
Impl.Edited = true;
llvm::StringRef Str = Buf->getBuffer();
// Update the buffer itself
Snapshot = EditableBuffer->replace(Offset, Length, Str);
// Update the old syntax map offsets to account for the replaced range.
// Also set the initial AffectedRange to cover any tokens that
// the replaced range intersected. This allows for clients that split
// multi-line tokens at line boundaries, and ensure all parts of these tokens
// will be cleared.
Impl.AffectedRange =
Impl.SyntaxMap.adjustForReplacement(Offset, Length, Str.size());
// We need to release `AccessMtx` before calling into the ASTManager, since
// it may call back to the editor for document state.
}
if (ProvideSemanticInfo) {
// If this is not a no-op, update semantic info.
if (Length != 0 || Buf->getBufferSize() != 0) {
::updateSemaInfo(SemanticInfo, EditableBuffer);
// FIXME: we should also update any "interesting" ASTs that depend on this
// document here, e.g. any ASTs for files visible in an editor. However,
// because our API conflates this with any file with unsaved changes we do
// not update all open documents, since there could be too many of them.
}
}
return Snapshot;
}
void SwiftEditorDocument::updateSemaInfo() {
Impl.AccessMtx.lock();
auto EditableBuffer = Impl.EditableBuffer;
auto SemanticInfo = Impl.SemanticInfo;
// We need to release `AccessMtx` before calling into the ASTManager, since it
// may call back to the editor for document state.
Impl.AccessMtx.unlock();
::updateSemaInfo(SemanticInfo, EditableBuffer);
}
void SwiftEditorDocument::parse(ImmutableTextSnapshotRef Snapshot,
SwiftLangSupport &Lang, bool BuildSyntaxTree,
SyntaxParsingCache *SyntaxCache) {
llvm::sys::ScopedLock L(Impl.AccessMtx);
assert(Impl.SemanticInfo && "Impl.SemanticInfo must be set");
std::vector<std::string> Args;
std::string PrimaryFile; // Ignored, Impl.FilePath will be used
CompilerInvocation CompInv;
if (Impl.SemanticInfo->getInvocation()) {
Impl.SemanticInfo->getInvocation()->applyTo(CompInv);
Impl.SemanticInfo->getInvocation()->raw(Args, PrimaryFile);
} else {
// Use stdin as a .swift input to satisfy the driver. Note that we don't
// use Impl.FilePath here because it may be invalid filename for driver
// like "" or "-foobar".
SmallVector<const char *, 1> Args;
Args.push_back("-");
std::string Error;
// Ignore possible error(s)
Lang.getASTManager()->
initCompilerInvocation(CompInv, Args, StringRef(), Error);
}
CompInv.getLangOptions().BuildSyntaxTree = BuildSyntaxTree;
CompInv.setMainFileSyntaxParsingCache(SyntaxCache);
// When reuse parts of the syntax tree from a SyntaxParsingCache, not
// all tokens are visited and thus token collection is invalid
CompInv.getLangOptions().CollectParsedToken = (SyntaxCache == nullptr);
// Access to Impl.SyntaxInfo is guarded by Impl.AccessMtx
Impl.SyntaxInfo.reset(
new SwiftDocumentSyntaxInfo(CompInv, Snapshot, Args, Impl.FilePath));
Impl.SyntaxInfo->parse();
}
void SwiftEditorDocument::readSyntaxInfo(EditorConsumer &Consumer) {
llvm::sys::ScopedLock L(Impl.AccessMtx);
Impl.ParserDiagnostics = Impl.SyntaxInfo->getDiagnostics();
SwiftSyntaxMap NewMap = SwiftSyntaxMap(Impl.SyntaxMap.Tokens.size() + 16);
if (Consumer.syntaxTreeEnabled()) {
auto SyntaxTree = Impl.SyntaxInfo->getSourceFile().getSyntaxRoot();
Impl.SyntaxTree.emplace(SyntaxTree);
if (Consumer.syntaxMapEnabled()) {
Consumer.handleRequestError(
"Retrieving both a syntax map and a syntax tree at the same time is "
"not supported. Use the SyntaxClassifier in swiftSyntax to generate "
"the syntax map on the Swift side.");
}
if (Consumer.documentStructureEnabled()) {
Consumer.handleRequestError(
"Retrieving both the document structure and a syntax tree at the "
"same time is not supported. Use the syntax tree to compute the "
"document structure.");
}
} else {
ide::SyntaxModelContext ModelContext(Impl.SyntaxInfo->getSourceFile());
SwiftEditorSyntaxWalker SyntaxWalker(
NewMap, Impl.SyntaxInfo->getSourceManager(), Consumer,
Impl.SyntaxInfo->getBufferID());
ModelContext.walk(SyntaxWalker);
bool SawChanges = true;
if (Impl.Edited) {
// We're ansering an edit request. Report all highlighted token ranges not
// in the previous syntax map to the Consumer and extend the AffectedRange
// to contain all added/removed token ranges.
SawChanges =
NewMap.forEachChanged(Impl.SyntaxMap, Impl.AffectedRange, Consumer);
} else {
// The is an open/initialise. Report all highlighted token ranges to the
// Consumer.
NewMap.forEach(Consumer);
}
Impl.SyntaxMap = std::move(NewMap);
// Recording an affected length of 0 still results in the client updating
// its copy of the syntax map (by clearning all tokens on the line of the
// affected offset). We need to not record it at all to signal a no-op.
if (SawChanges)
Consumer.recordAffectedRange(Impl.AffectedRange->Offset,
Impl.AffectedRange->length());
}
}
void SwiftEditorDocument::readSemanticInfo(ImmutableTextSnapshotRef Snapshot,
EditorConsumer& Consumer) {
llvm::sys::ScopedLock L(Impl.AccessMtx);
std::vector<SwiftSemanticToken> SemaToks;
Optional<std::vector<DiagnosticEntryInfo>> SemaDiags;
Impl.SemanticInfo->readSemanticInfo(Snapshot, SemaToks, SemaDiags,
Impl.ParserDiagnostics);
for (auto SemaTok : SemaToks) {
unsigned Offset = SemaTok.ByteOffset;
unsigned Length = SemaTok.Length;
UIdent Kind = SemaTok.getUIdentForKind();
bool IsSystem = SemaTok.getIsSystem();
if (Kind.isValid())
Consumer.handleSemanticAnnotation(Offset, Length, Kind, IsSystem);
}
static UIdent SemaDiagStage("source.diagnostic.stage.swift.sema");
static UIdent ParseDiagStage("source.diagnostic.stage.swift.parse");
// If there's no value returned for diagnostics it means they are out-of-date
// (based on a different snapshot).
if (SemaDiags.hasValue()) {
Consumer.setDiagnosticStage(SemaDiagStage);
for (auto &Diag : SemaDiags.getValue())
Consumer.handleDiagnostic(Diag, SemaDiagStage);
} else {
Consumer.setDiagnosticStage(ParseDiagStage);
for (auto &Diag : Impl.ParserDiagnostics)
Consumer.handleDiagnostic(Diag, ParseDiagStage);
}
}
void SwiftEditorDocument::removeCachedAST() {
Impl.SemanticInfo->removeCachedAST();
}
void SwiftEditorDocument::applyFormatOptions(OptionsDictionary &FmtOptions) {
static UIdent KeyUseTabs("key.editor.format.usetabs");
static UIdent KeyIndentWidth("key.editor.format.indentwidth");
static UIdent KeyTabWidth("key.editor.format.tabwidth");
static UIdent KeyIndentSwitchCase("key.editor.format.indent_switch_case");
FmtOptions.valueForOption(KeyUseTabs, Impl.FormatOptions.UseTabs);
FmtOptions.valueForOption(KeyIndentWidth, Impl.FormatOptions.IndentWidth);
FmtOptions.valueForOption(KeyTabWidth, Impl.FormatOptions.TabWidth);
FmtOptions.valueForOption(KeyIndentSwitchCase, Impl.FormatOptions.IndentSwitchCase);
}
const CodeFormatOptions &SwiftEditorDocument::getFormatOptions() {
return Impl.FormatOptions;
}
const llvm::Optional<swift::SourceFileSyntax> &
SwiftEditorDocument::getSyntaxTree() const {
return Impl.SyntaxTree;
}
std::string SwiftEditorDocument::getFilePath() const { return Impl.FilePath; }
bool SwiftEditorDocument::hasUpToDateAST() const {
return Impl.SyntaxInfo->hasUpToDateAST();
}
llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem>
SwiftEditorDocument::getFileSystem() const {
llvm::sys::ScopedLock L(Impl.AccessMtx);
return Impl.SemanticInfo ? Impl.SemanticInfo->getFileSystem()
: llvm::vfs::getRealFileSystem();
}
void SwiftEditorDocument::formatText(unsigned Line, unsigned Length,
EditorConsumer &Consumer) {
auto SyntaxInfo = Impl.getSyntaxInfo();
SourceFile &SF = SyntaxInfo->getSourceFile();
SourceManager &SM = SyntaxInfo->getSourceManager();
LineRange inputRange = LineRange(Line, Length);
CodeFormatOptions Options = getFormatOptions();
auto indented = reformat(inputRange, Options, SM, SF);
LineRange LineRange = indented.first;
StringRef ModifiedText = indented.second;
Consumer.recordFormattedText(ModifiedText);
Consumer.recordAffectedLineRange(LineRange.startLine(), LineRange.lineCount());
}
bool isReturningVoid(SourceManager &SM, CharSourceRange Range) {
if (Range.isInvalid())
return false;
StringRef Text = SM.extractText(Range);
return "()" == Text || "Void" == Text;
}
void SwiftEditorDocument::expandPlaceholder(unsigned Offset, unsigned Length,
EditorConsumer &Consumer) {
auto SyntaxInfo = Impl.getSyntaxInfo();
SourceManager &SM = SyntaxInfo->getSourceManager();
unsigned BufID = SyntaxInfo->getBufferID();
const unsigned PlaceholderStartLen = 2;
const unsigned PlaceholderEndLen = 2;
if (Length < (PlaceholderStartLen + PlaceholderEndLen)) {
Consumer.handleRequestError("Invalid Length parameter");
return;
}
PlaceholderExpansionScanner Scanner(SM);
SourceFile &SF = SyntaxInfo->getSourceFile();
Scanner.scan(SF, BufID, Offset, Length,
[&](Expr *Args,
bool UseTrailingClosure, bool isWrappedWithBraces,
ArrayRef<PlaceholderExpansionScanner::Param> ClosureParams,
CharSourceRange ClosureReturnTypeRange) {
unsigned EffectiveOffset = Offset;
unsigned EffectiveLength = Length;
llvm::SmallString<128> ExpansionStr;
{
llvm::raw_svector_ostream OS(ExpansionStr);
if (UseTrailingClosure) {
assert(Args);
if (isa<ParenExpr>(Args)) {
// There appears to be no other parameters in this call, so we'll
// expand replacement for trailing closure and cover call parens.
// For example:
// foo.bar(<#closure#>) turns into foo.bar <#closure#>.
EffectiveOffset = SM.getLocOffsetInBuffer(Args->getStartLoc(), BufID);
OS << " ";
} else {
auto *TupleE = cast<TupleExpr>(Args);
auto Elems = TupleE->getElements();
assert(!Elems.empty());
if (Elems.size() == 1) {
EffectiveOffset = SM.getLocOffsetInBuffer(Args->getStartLoc(), BufID);
OS << " ";
} else {
// Expand replacement range for trailing closure.
// For example:
// foo.bar(a, <#closure#>) turns into foo.bar(a) <#closure#>.
// If the preceding token in the call is the leading parameter
// separator, we'll expand replacement to cover that.
assert(Elems.size() > 1);
SourceLoc BeforeLoc = Lexer::getLocForEndOfToken(SM,
Elems[Elems.size()-2]->getEndLoc());
EffectiveOffset = SM.getLocOffsetInBuffer(BeforeLoc, BufID);
OS << ") ";
}
}
unsigned End = SM.getLocOffsetInBuffer(Args->getEndLoc(), BufID);
EffectiveLength = (End + 1) - EffectiveOffset;
}
// Trailing closure syntax handling will replace braces anyway.
bool printBraces = !isWrappedWithBraces || UseTrailingClosure;
if (printBraces)
OS << "{ ";
bool ReturningVoid = isReturningVoid(SM, ClosureReturnTypeRange);
bool HasSignature = !ClosureParams.empty() ||
(ClosureReturnTypeRange.isValid() && !ReturningVoid);
bool FirstParam = true;
if (HasSignature)
OS << "(";
for (auto &Param: ClosureParams) {
if (!FirstParam)
OS << ", ";
FirstParam = false;
if (Param.NameRange.isValid()) {
// If we have a parameter name, just output the name as is and skip
// the type. For example:
// <#(arg1: Int, arg2: Int)#> turns into (arg1, arg2).
OS << SM.extractText(Param.NameRange);
}
else {
// If we only have the parameter type, output the type as a
// placeholder. For example:
// <#(Int, Int)#> turns into (<#Int#>, <#Int#>).
OS << "<#";
OS << SM.extractText(Param.TypeRange);
OS << "#>";
}
}
if (HasSignature)
OS << ") ";
if (ClosureReturnTypeRange.isValid()) {
auto ReturnTypeText = SM.extractText(ClosureReturnTypeRange);
// We need return type if it is not Void.
if (!ReturningVoid) {
OS << "-> ";
OS << ReturnTypeText << " ";
}
}
if (HasSignature)
OS << "in";
OS << "\n" << getCodePlaceholder() << "\n";
if (printBraces)
OS << "}";
}
Consumer.handleSourceText(ExpansionStr);
Consumer.recordAffectedRange(EffectiveOffset, EffectiveLength);
}, [&](EditorPlaceholderExpr *PHE) {
if (!PHE)
return false;
if (auto Ty = PHE->getTypeForExpansion()) {
std::string S;
llvm::raw_string_ostream OS(S);
Ty->print(OS);
Consumer.handleSourceText(OS.str());
Consumer.recordAffectedRange(Offset, Length);
return true;
}
return false;
});
}
ImmutableTextSnapshotRef SwiftEditorDocument::getLatestSnapshot() const {
llvm::sys::ScopedLock L(Impl.AccessMtx);
return Impl.EditableBuffer->getSnapshot();
}
void SwiftEditorDocument::reportDocumentStructure(SourceFile &SrcFile,
EditorConsumer &Consumer) {
ide::SyntaxModelContext ModelContext(SrcFile);
SwiftDocumentStructureWalker Walker(SrcFile.getASTContext().SourceMgr,
*SrcFile.getBufferID(),
Consumer);
ModelContext.walk(Walker);
}
//===----------------------------------------------------------------------===//
// EditorOpen
//===----------------------------------------------------------------------===//
void SwiftLangSupport::editorOpen(
StringRef Name, llvm::MemoryBuffer *Buf, EditorConsumer &Consumer,
ArrayRef<const char *> Args, Optional<VFSOptions> vfsOptions) {
std::string error;
// Do not provide primaryFile so that opening an existing document will
// reinitialize the filesystem instead of keeping the old one.
auto fileSystem = getFileSystem(vfsOptions, /*primaryFile=*/None, error);
if (!fileSystem)
return Consumer.handleRequestError(error.c_str());
ImmutableTextSnapshotRef Snapshot = nullptr;
auto EditorDoc = EditorDocuments->getByUnresolvedName(Name);
if (!EditorDoc) {
EditorDoc = new SwiftEditorDocument(Name, *this, fileSystem);
Snapshot = EditorDoc->initializeText(
Buf, Args, Consumer.needsSemanticInfo(), fileSystem);
EditorDoc->parse(Snapshot, *this, Consumer.syntaxTreeEnabled());
if (EditorDocuments->getOrUpdate(Name, *this, EditorDoc)) {
// Document already exists, re-initialize it. This should only happen
// if we get OPEN request while the previous document is not closed.
LOG_WARN_FUNC("Document already exists in editorOpen(..): " << Name);
Snapshot = nullptr;
}
auto numOpen = ++Stats->numOpenDocs;
Stats->maxOpenDocs.updateMax(numOpen);
}
if (!Snapshot) {
Snapshot = EditorDoc->initializeText(
Buf, Args, Consumer.needsSemanticInfo(), fileSystem);
EditorDoc->parse(Snapshot, *this, Consumer.syntaxTreeEnabled());
}
if (Consumer.needsSemanticInfo()) {
EditorDoc->updateSemaInfo();
}
EditorDoc->readSyntaxInfo(Consumer);
EditorDoc->readSemanticInfo(Snapshot, Consumer);
if (Consumer.syntaxTreeEnabled()) {
assert(EditorDoc->getSyntaxTree().hasValue());
std::unordered_set<unsigned> ReusedNodeIds;
Consumer.handleSyntaxTree(EditorDoc->getSyntaxTree().getValue(),
ReusedNodeIds);
}
}
//===----------------------------------------------------------------------===//
// EditorClose
//===----------------------------------------------------------------------===//
void SwiftLangSupport::editorClose(StringRef Name, bool RemoveCache) {
auto Removed = EditorDocuments->remove(Name);
if (Removed) {
--Stats->numOpenDocs;
} else {
IFaceGenContexts.remove(Name);
}
if (Removed && RemoveCache)
Removed->removeCachedAST();
// FIXME: Report error if Name did not apply to anything ?
}
//===----------------------------------------------------------------------===//
// EditorReplaceText
//===----------------------------------------------------------------------===//
void verifyIncrementalParse(SwiftEditorDocumentRef EditorDoc,
unsigned EditOffset, unsigned EditLength,
StringRef PreEditText, StringRef ReplaceText) {
swift::json::Output::UserInfoMap JsonUserInfo;
JsonUserInfo[swift::json::DontSerializeNodeIdsUserInfoKey] =
reinterpret_cast<void *>(true);
// Dump the incremental syntax tree
std::string IncrTreeString;
llvm::raw_string_ostream IncrTreeStream(IncrTreeString);
swift::json::Output IncrTreeOutput(IncrTreeStream, JsonUserInfo);
IncrTreeOutput << *EditorDoc->getSyntaxTree()->getRaw();
// Reparse the file from scratch
CompilerInvocation Invocation;
Invocation.getLangOptions().BuildSyntaxTree = true;
std::vector<std::string> Args;
SwiftDocumentSyntaxInfo ScratchSyntaxInfo(Invocation,
EditorDoc->getLatestSnapshot(),
Args, EditorDoc->getFilePath());
ScratchSyntaxInfo.parse();
// Dump the from-scratch syntax tree
std::string FromScratchTreeString;
llvm::raw_string_ostream ScratchTreeStream(FromScratchTreeString);
swift::json::Output ScratchTreeOutput(ScratchTreeStream, JsonUserInfo);
auto SyntaxRoot = ScratchSyntaxInfo.getSourceFile().getSyntaxRoot();
ScratchTreeOutput << *SyntaxRoot.getRaw();
// If the serialized format of the two trees doesn't match incremental parsing
// we have found an error.
if (IncrTreeStream.str().compare(ScratchTreeStream.str())) {
LOG_SECTION("Incremental Parsing", Warning) {
Log->getOS() << "Incremental parsing different to from scratch parsing\n";
Log->getOS() << "Edit was " << EditOffset << "-"
<< (EditOffset + EditLength) << "='" << ReplaceText << "'"
<< " pre-edit-text: '" << PreEditText << "'\n";
SmallString<32> DirectoryName;
if (llvm::sys::fs::createUniqueDirectory(
"SourceKit-IncrementalParsing-Inconsistency", DirectoryName)) {
Log->getOS() << "Failed to create log directory\n";
}
std::error_code ErrorCode;
// Write the incremental syntax tree
auto IncrTreeFilename = DirectoryName + "/incrementalTree.json";
llvm::raw_fd_ostream IncrementalFilestream(
IncrTreeFilename.str(), ErrorCode,
llvm::sys::fs::FA_Read | llvm::sys::fs::FA_Write);
IncrementalFilestream << IncrTreeStream.str();
if (ErrorCode) {
Log->getOS() << "Failed to write incremental syntax tree to "
<< IncrTreeFilename << "(error code " << ErrorCode.value()
<< ": " << ErrorCode.message() << ")\n";
} else {
Log->getOS() << "Incremental syntax tree written to "
<< IncrTreeFilename << '\n';
}
// Write from-scratch syntax tree
auto ScratchTreeFilename = DirectoryName + "/fromScratchTree.json";
llvm::raw_fd_ostream ScratchTreeFilestream(
ScratchTreeFilename.str(), ErrorCode,
llvm::sys::fs::FA_Read | llvm::sys::fs::FA_Write);
ScratchTreeFilestream << ScratchTreeStream.str();
if (ErrorCode) {
Log->getOS() << "Failed to write from-scratch syntax tree to "
<< ScratchTreeFilename << "(error code "
<< ErrorCode.value() << ": " << ErrorCode.message()
<< ")\n";
} else {
Log->getOS() << "From-scratch syntax tree written to "
<< ScratchTreeFilename << '\n';
}
// Write source file
auto SourceFilename = DirectoryName + "/postEditSource.swift";
llvm::raw_fd_ostream SourceFilestream(SourceFilename.str(), ErrorCode,
llvm::sys::fs::FA_Read | llvm::sys::fs::FA_Write);
auto FileBuffer = EditorDoc->getLatestSnapshot()->getBuffer();
SourceFilestream << FileBuffer->getText();
}
}
}
void SwiftLangSupport::editorReplaceText(StringRef Name,
llvm::MemoryBuffer *Buf,
unsigned Offset, unsigned Length,
EditorConsumer &Consumer) {
bool LogReuseRegions = ::getenv("SOURCEKIT_LOG_INCREMENTAL_REUSE_REGIONS");
bool ValidateSyntaxTree = ::getenv("SOURCEKIT_INCREMENTAL_PARSE_VALIDATION");
auto EditorDoc = EditorDocuments->getByUnresolvedName(Name);
if (!EditorDoc) {
Consumer.handleRequestError("No associated Editor Document");
return;
}
ImmutableTextSnapshotRef Snapshot;
if (Length != 0 || Buf->getBufferSize() != 0) {
std::string PreEditText;
if (ValidateSyntaxTree) {
auto CurBuffer = EditorDoc->getLatestSnapshot()->getBuffer();
auto BufferStart = CurBuffer->getInternalBuffer()->getBufferStart();
StringRef PreEditTextRef(BufferStart + Offset, Length);
PreEditText = PreEditTextRef.str();
}
std::string error;
Snapshot = EditorDoc->replaceText(Offset, Length, Buf,
Consumer.needsSemanticInfo(), error);
if (!Snapshot) {
assert(error.size());
Consumer.handleRequestError(error.c_str());
return;
}
llvm::Optional<SyntaxParsingCache> SyntaxCache = llvm::None;
if (EditorDoc->getSyntaxTree().hasValue()) {
SyntaxCache.emplace(EditorDoc->getSyntaxTree().getValue());
SyntaxCache->addEdit(Offset, Offset + Length, Buf->getBufferSize());
}
SyntaxParsingCache *SyntaxCachePtr = nullptr;
if (SyntaxCache.hasValue()) {
SyntaxCachePtr = SyntaxCache.getPointer();
}
EditorDoc->parse(Snapshot, *this, Consumer.syntaxTreeEnabled(),
SyntaxCachePtr);
EditorDoc->readSyntaxInfo(Consumer);
// Log reuse information
if (SyntaxCache.hasValue() && LogReuseRegions) {
auto &SyntaxTree = EditorDoc->getSyntaxTree();
auto ReuseRegions = SyntaxCache->getReusedRegions(*SyntaxTree);
LOG_SECTION("SyntaxCache", InfoHighPrio) {
Log->getOS() << "Reused ";
bool FirstIteration = true;
for (auto ReuseRegion : ReuseRegions) {
if (!FirstIteration) {
Log->getOS() << ", ";
} else {
FirstIteration = false;
}
Log->getOS() << ReuseRegion.Start << " - " << ReuseRegion.End;
}
}
}
if (Consumer.syntaxTreeEnabled()) {
std::unordered_set<unsigned> ReusedNodeIds;
if (SyntaxCache.hasValue()) {
auto &ReusedVector = SyntaxCache->getReusedNodeIds();
ReusedNodeIds = std::unordered_set<unsigned>(ReusedVector.begin(),
ReusedVector.end());
}
Consumer.handleSyntaxTree(EditorDoc->getSyntaxTree().getValue(),
ReusedNodeIds);
}
if (ValidateSyntaxTree) {
verifyIncrementalParse(EditorDoc, Offset, Length, PreEditText,
Buf->getBuffer());
}
} else {
Snapshot = EditorDoc->getLatestSnapshot();
}
EditorDoc->readSemanticInfo(Snapshot, Consumer);
}
//===----------------------------------------------------------------------===//
// EditorFormatText
//===----------------------------------------------------------------------===//
void SwiftLangSupport::editorApplyFormatOptions(StringRef Name,
OptionsDictionary &FmtOptions) {
auto EditorDoc = EditorDocuments->getByUnresolvedName(Name);
if (EditorDoc)
EditorDoc->applyFormatOptions(FmtOptions);
}
void SwiftLangSupport::editorFormatText(StringRef Name, unsigned Line,
unsigned Length,
EditorConsumer &Consumer) {
auto EditorDoc = EditorDocuments->getByUnresolvedName(Name);
if (!EditorDoc) {
Consumer.handleRequestError("No associated Editor Document");
return;
}
if (!EditorDoc->hasUpToDateAST()) {
// An up-to-date AST is needed for formatting. If it does not exist, fall
// back to a full reparse of the file
EditorDoc->parse(EditorDoc->getLatestSnapshot(), *this,
/*BuildSyntaxTree=*/true);
}
EditorDoc->formatText(Line, Length, Consumer);
}
void SwiftLangSupport::editorExtractTextFromComment(StringRef Source,
EditorConsumer &Consumer) {
Consumer.handleSourceText(extractPlainTextFromComment(Source));
}
void SwiftLangSupport::editorConvertMarkupToXML(StringRef Source,
EditorConsumer &Consumer) {
std::string Result;
llvm::raw_string_ostream OS(Result);
if (convertMarkupToXML(Source, OS)) {
Consumer.handleRequestError("Conversion failed.");
return;
}
Consumer.handleSourceText(Result);
}
//===----------------------------------------------------------------------===//
// EditorExpandPlaceholder
//===----------------------------------------------------------------------===//
void SwiftLangSupport::editorExpandPlaceholder(StringRef Name, unsigned Offset,
unsigned Length,
EditorConsumer &Consumer) {
auto EditorDoc = EditorDocuments->getByUnresolvedName(Name);
if (!EditorDoc) {
Consumer.handleRequestError("No associated Editor Document");
return;
}
EditorDoc->expandPlaceholder(Offset, Length, Consumer);
}
|
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Project: Embedded Learning Library (ELL)
// File: DelayNode.tcc (nodes)
// Authors: Chuck Jacobs
//
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace ell
{
namespace nodes
{
template <typename ValueType>
DelayNode<ValueType>::DelayNode(const model::PortElements<ValueType>& input, size_t windowSize)
: CompilableNode({ &_input }, { &_output }), _input(this, input, inputPortName), _output(this, outputPortName, _input.Size()), _windowSize(windowSize)
{
auto dimension = input.Size();
for (size_t index = 0; index < windowSize; ++index)
{
_samples.push_back(std::vector<ValueType>(dimension));
}
}
template <typename ValueType>
DelayNode<ValueType>::DelayNode()
: CompilableNode({ &_input }, { &_output }), _input(this, {}, inputPortName), _output(this, outputPortName, 0), _windowSize(0)
{
}
template <typename ValueType>
void DelayNode<ValueType>::Compute() const
{
auto lastBufferedSample = _samples[0];
_samples.push_back(_input.GetValue());
_samples.erase(_samples.begin());
_output.SetOutput(lastBufferedSample);
};
template <typename ValueType>
void DelayNode<ValueType>::Copy(model::ModelTransformer& transformer) const
{
auto newPortElements = transformer.TransformPortElements(_input.GetPortElements());
auto newNode = transformer.AddNode<DelayNode<ValueType>>(newPortElements, _windowSize);
transformer.MapNodeOutput(output, newNode->output);
}
template <typename ValueType>
void DelayNode<ValueType>::Compile(model::IRMapCompiler& compiler, emitters::IRFunctionEmitter& function)
{
llvm::Value* result = compiler.EnsurePortEmitted(output);
size_t sampleSize = output.Size();
size_t windowSize = this->GetWindowSize();
size_t bufferSize = sampleSize * windowSize;
//
// Delay nodes are always long lived - either globals or heap. Currently, we use globals
// Each sample chunk is of size == sampleSize. The number of chunks we hold onto == windowSize
// We need two buffers - one for the entire lot, one for the "last" chunk forwarded to the next operator
//
emitters::Variable* delayLineVar = function.GetModule().Variables().AddVariable<emitters::InitializedVectorVariable<ValueType>>(emitters::VariableScope::global, bufferSize);
llvm::Value* delayLine = function.GetModule().EnsureEmitted(*delayLineVar);
//
// We implement a delay as a Shift Register
//
llvm::Value* inputBuffer = compiler.EnsurePortEmitted(input);
function.ShiftAndUpdate<ValueType>(delayLine, bufferSize, sampleSize, inputBuffer, result);
}
template <typename ValueType>
void DelayNode<ValueType>::WriteToArchive(utilities::Archiver& archiver) const
{
Node::WriteToArchive(archiver);
archiver[inputPortName] << _input;
archiver["windowSize"] << _windowSize;
}
template <typename ValueType>
void DelayNode<ValueType>::ReadFromArchive(utilities::Unarchiver& archiver)
{
Node::ReadFromArchive(archiver);
archiver[inputPortName] >> _input;
archiver["windowSize"] >> _windowSize;
auto dimension = _input.Size();
_samples.clear();
_samples.reserve(_windowSize);
for (size_t index = 0; index < _windowSize; ++index)
{
_samples.push_back(std::vector<ValueType>(dimension));
}
_output.SetSize(dimension);
}
}
}
|
// Copyright Ali Can Demiralp 2016 - 2021.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef GL_SYNC_HPP
#define GL_SYNC_HPP
#include <gl/opengl.hpp>
namespace gl
{
class sync
{
public:
// 4.1 Sync objects and fences.
sync () : id_(glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0))
{
}
sync (const sync& that) = delete;
sync ( sync&& temp) noexcept : id_(temp.id_)
{
temp.id_ = nullptr;
}
virtual ~sync ()
{
if (id_ != nullptr)
glDeleteSync(id_);
}
sync& operator=(const sync& that) = delete;
sync& operator=( sync&& temp) noexcept
{
if (this != &temp)
{
if (id_ != nullptr)
glDeleteSync(id_);
id_ = temp.id_;
temp.id_ = nullptr ;
}
return *this;
}
// 4.1.1 Waiting for sync objects.
[[nodiscard]]
GLenum client_wait(const GLbitfield flags = GL_SYNC_FLUSH_COMMANDS_BIT, const GLuint64 timeout_ns = 10E+10) const
{
return glClientWaitSync(id_, flags, timeout_ns);
}
void wait () const
{
return glWaitSync(id_, 0, GL_TIMEOUT_IGNORED);
}
// 4.1.3 Sync object queries.
[[nodiscard]]
GLenum status () const
{
return get_property(GL_SYNC_STATUS);
}
[[nodiscard]]
bool is_valid() const
{
return glIsSync(id_) != 0;
}
[[nodiscard]]
GLsync id() const
{
return id_;
}
protected:
[[nodiscard]]
GLint get_property(const GLenum property) const
{
GLsizei size ;
GLint value;
glGetSynciv(id_, property, 1, &size, &value);
return value;
}
GLsync id_;
};
}
#endif
|
/*
//@HEADER
// ************************************************************************
//
// KokkosKernels 0.9: Linear Algebra and Graph Kernels
// Copyright 2017 Sandia Corporation
//
// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Siva Rajamanickam (srajama@sandia.gov)
//
// ************************************************************************
//@HEADER
*/
#define KOKKOSKERNELS_IMPL_COMPILE_LIBRARY true
#include "KokkosKernels_config.h"
#if defined (KOKKOSKERNELS_INST_DOUBLE) \
&& defined (KOKKOSKERNELS_INST_LAYOUTRIGHT) \
&& defined (KOKKOSKERNELS_INST_EXECSPACE_SERIAL) \
&& defined (KOKKOSKERNELS_INST_MEMSPACE_HOSTSPACE)
#include "KokkosBlas1_axpby_spec.hpp"
namespace KokkosBlas {
namespace Impl {
KOKKOSBLAS1_AXPBY_ETI_SPEC_INST(double, Kokkos::LayoutRight, Kokkos::Serial, Kokkos::HostSpace)
} // Impl
} // KokkosBlas
#endif
|
#include "AnalyticsBlocker.h"
#include "Debug.h"
#include "Assertion.h"
#include "../Managers/Hook.h"
#include <algorithm>
bool AnalyticsBlocker::ShouldDAB = false;
const char* AnalyticsBlocker::BlockedList[] = { new char[29] { 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x75, 0x63, 0x61, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x75, 0x6e, 0x69, 0x74, 0x79, 0x33, 0x64, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[13] { 0x66, 0x61, 0x63, 0x65, 0x62, 0x6f, 0x6f, 0x6b, 0x2e, 0x6e, 0x65, 0x74, 0x00 }, new char[16] { 0x73, 0x6f, 0x66, 0x74, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x2e, 0x61, 0x74, 0x2e, 0x75, 0x61, 0x00 }, new char[13] { 0x66, 0x61, 0x63, 0x65, 0x62, 0x6f, 0x6f, 0x6b, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[21] { 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2d, 0x61, 0x6e, 0x61, 0x6c, 0x79, 0x74, 0x69, 0x63, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[7] { 0x66, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[13] { 0x76, 0x72, 0x6d, 0x6f, 0x64, 0x73, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x00 }, new char[10] { 0x66, 0x62, 0x63, 0x64, 0x6e, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[8] { 0x64, 0x69, 0x73, 0x63, 0x6f, 0x72, 0x64, 0x00 }, new char[14] { 0x6f, 0x63, 0x75, 0x6c, 0x75, 0x73, 0x63, 0x64, 0x6e, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[18] { 0x67, 0x61, 0x6d, 0x65, 0x61, 0x6e, 0x61, 0x6c, 0x79, 0x74, 0x69, 0x63, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[25] { 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x75, 0x6e, 0x69, 0x74, 0x79, 0x33, 0x64, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[18] { 0x61, 0x7a, 0x75, 0x72, 0x65, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x00 }, new char[24] { 0x61, 0x6e, 0x61, 0x6c, 0x79, 0x74, 0x69, 0x63, 0x73, 0x2e, 0x6e, 0x69, 0x6e, 0x6a, 0x61, 0x6b, 0x69, 0x77, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[10] { 0x66, 0x62, 0x63, 0x64, 0x6e, 0x2e, 0x6e, 0x65, 0x74, 0x00 }, new char[9] { 0x68, 0x61, 0x73, 0x74, 0x65, 0x62, 0x69, 0x6e, 0x00 }, new char[26] { 0x65, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x72, 0x63, 0x65, 0x2e, 0x69, 0x61, 0x70, 0x2e, 0x75, 0x6e, 0x69, 0x74, 0x79, 0x33, 0x64, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[12] { 0x69, 0x63, 0x65, 0x62, 0x75, 0x72, 0x6e, 0x2e, 0x78, 0x79, 0x7a, 0x00 }, new char[12] { 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x00 }, new char[13] { 0x72, 0x69, 0x70, 0x70, 0x65, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x00 }, new char[10] { 0x66, 0x62, 0x73, 0x62, 0x78, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[22] { 0x63, 0x64, 0x70, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x75, 0x6e, 0x69, 0x74, 0x79, 0x33, 0x64, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[6] { 0x66, 0x62, 0x2e, 0x6d, 0x65, 0x00 }, new char[8] { 0x68, 0x61, 0x74, 0x65, 0x62, 0x69, 0x6e, 0x00 }, new char[42] { 0x64, 0x61, 0x74, 0x61, 0x2d, 0x6f, 0x70, 0x74, 0x6f, 0x75, 0x74, 0x2d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x75, 0x63, 0x61, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x75, 0x6e, 0x69, 0x74, 0x79, 0x33, 0x64, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[11] { 0x69, 0x74, 0x65, 0x6f, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x00 }, new char[9] { 0x70, 0x61, 0x73, 0x74, 0x65, 0x62, 0x69, 0x6e, 0x00 }, new char[21] { 0x67, 0x6c, 0x75, 0x65, 0x68, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2d, 0x61, 0x6c, 0x75, 0x68, 0x75, 0x74, 0x2e, 0x64, 0x65, 0x00 }, new char[14] { 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x74, 0x75, 0x64, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[22] { 0x66, 0x61, 0x63, 0x65, 0x62, 0x6f, 0x6f, 0x6b, 0x2d, 0x68, 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[26] { 0x61, 0x70, 0x69, 0x2e, 0x75, 0x63, 0x61, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x75, 0x6e, 0x69, 0x74, 0x79, 0x33, 0x64, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[8] { 0x64, 0x72, 0x6f, 0x70, 0x62, 0x6f, 0x78, 0x00 }, new char[16] { 0x63, 0x72, 0x61, 0x73, 0x68, 0x6c, 0x79, 0x74, 0x69, 0x63, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[8] { 0x6d, 0x65, 0x61, 0x70, 0x2e, 0x67, 0x67, 0x00 }, new char[21] { 0x70, 0x69, 0x78, 0x65, 0x6c, 0x73, 0x74, 0x72, 0x69, 0x6b, 0x65, 0x33, 0x64, 0x61, 0x77, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[30] { 0x70, 0x65, 0x72, 0x66, 0x2d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x75, 0x6e, 0x69, 0x74, 0x79, 0x33, 0x64, 0x2e, 0x63, 0x6f, 0x6d, 0x00 } };
std::list<std::string> AnalyticsBlocker::DABList = { new char[8] { 0x6e, 0x74, 0x70, 0x2e, 0x6f, 0x72, 0x67, 0x00 }, new char[13] { 0x62, 0x6f, 0x6e, 0x65, 0x74, 0x6f, 0x6d, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[11] { 0x73, 0x61, 0x6d, 0x62, 0x6f, 0x79, 0x2e, 0x64, 0x65, 0x76, 0x00 }, new char[11] { 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[14] { 0x72, 0x75, 0x62, 0x79, 0x2d, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[16] { 0x6d, 0x65, 0x6c, 0x6f, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[22] { 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x75, 0x73, 0x65, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x00 }, new char[20] { 0x74, 0x68, 0x65, 0x74, 0x72, 0x75, 0x65, 0x79, 0x6f, 0x73, 0x68, 0x69, 0x66, 0x61, 0x6e, 0x2e, 0x63, 0x6f, 0x6d, 0x00 } };
bool AnalyticsBlocker::Initialize()
{
#ifdef _WIN64
Debug::Msg("Initializing Analytics Blocker...");
return (wsock32::Initialize()
&& ws2_32::Initialize()
);
#else
return true;
#endif
}
void AnalyticsBlocker::Hook()
{
#ifdef _WIN64
wsock32::Hooks::Initialize();
ws2_32::Hooks::Initialize();
#endif
}
bool AnalyticsBlocker::CheckHostNameOrIP(const char* host_name_or_ip)
{
std::string host_name_or_ip_str = host_name_or_ip;
std::transform(host_name_or_ip_str.begin(), host_name_or_ip_str.end(), host_name_or_ip_str.begin(), [](unsigned char c) { return std::tolower(c); });
if (host_name_or_ip_str._Equal("localhost") || host_name_or_ip_str._Equal("127.0.0.1"))
return false;
bool should_block = ShouldBlock(host_name_or_ip);
if (Debug::Enabled || ShouldDAB)
{
if (should_block)
Debug::DirectWrite(("Host Name or IP Blocked: " + host_name_or_ip_str).c_str());
else if (!HasDabbed(host_name_or_ip))
{
Debug::DirectWrite(("Unique Host Name or IP Found: " + host_name_or_ip_str).c_str());
DABList.push_back(host_name_or_ip_str);
}
}
return should_block;
}
bool AnalyticsBlocker::ShouldBlock(const char* host_name_or_ip)
{
bool found = false;
for (int i = 0; i < (sizeof(BlockedList) / sizeof(BlockedList[0])); i++)
{
if (strstr(host_name_or_ip, BlockedList[i]) != NULL)
{
found = true;
break;
}
}
return found;
}
bool AnalyticsBlocker::HasDabbed(const char* host_name_or_ip)
{
bool found = false;
for (auto entry = DABList.begin(); entry != DABList.end(); ++entry)
{
if ((*entry)._Equal(host_name_or_ip))
{
found = true;
break;
}
}
return found;
}
#pragma region wsock32
HMODULE AnalyticsBlocker::wsock32::Module = NULL;
AnalyticsBlocker::wsock32::Exports::gethostbyname_t AnalyticsBlocker::wsock32::Exports::Gethostbyname = NULL;
bool AnalyticsBlocker::wsock32::Initialize()
{
Debug::Msg("Initializing wsock32...");
Module = LoadLibraryA("wsock32.dll");
if (Module != NULL)
{
if (!Exports::Initialize())
return false;
}
else
{
// Display Warning
}
return true;
}
bool AnalyticsBlocker::wsock32::Exports::Initialize()
{
Debug::Msg("Initializing wsock32 Exports...");
Gethostbyname = (gethostbyname_t)Assertion::GetExport(Module, "gethostbyname");
return Assertion::ShouldContinue;
}
void AnalyticsBlocker::wsock32::Hooks::Initialize()
{
if (Module == NULL)
return;
Debug::Msg("Attaching Hooks to wsock32...");
Debug::Msg("gethostbyname");
Hook::Attach(&(LPVOID&)Exports::Gethostbyname, Gethostbyname);
}
void* AnalyticsBlocker::wsock32::Hooks::Gethostbyname(const char* name)
{
try
{
if ((name == NULL) || CheckHostNameOrIP(name))
name = "localhost";
return Exports::Gethostbyname(name);
}
catch(...){}
return NULL;
}
#pragma endregion
#ifdef _WIN64
#pragma region ws2_32
HMODULE AnalyticsBlocker::ws2_32::Module = NULL;
AnalyticsBlocker::ws2_32::Exports::getaddrinfo_t AnalyticsBlocker::ws2_32::Exports::Getaddrinfo = NULL;
bool AnalyticsBlocker::ws2_32::Initialize()
{
Debug::Msg("Initializing ws2_32...");
Module = LoadLibraryA("ws2_32");
if (Module != NULL)
{
if (!Exports::Initialize())
return false;
}
else
{
// Display Warning
}
return true;
}
bool AnalyticsBlocker::ws2_32::Exports::Initialize()
{
Debug::Msg("Initializing ws2_32 Exports...");
Getaddrinfo = (getaddrinfo_t)Assertion::GetExport(Module, "getaddrinfo");
return Assertion::ShouldContinue;
}
void AnalyticsBlocker::ws2_32::Hooks::Initialize()
{
if (Module == NULL)
return;
Debug::Msg("Attaching Hooks to ws2_32...");
Debug::Msg("getaddrinfo");
Hook::Attach(&(LPVOID&)Exports::Getaddrinfo, Getaddrinfo);
}
int AnalyticsBlocker::ws2_32::Hooks::Getaddrinfo(PCSTR pNodeName, PCSTR pServiceName, void* pHints, void* ppResult)
{
try
{
if ((pNodeName == NULL) || CheckHostNameOrIP(pNodeName))
pNodeName = "localhost";
return Exports::Getaddrinfo(pNodeName, pServiceName, pHints, ppResult);
}
catch (...){}
return WSATRY_AGAIN;
}
#pragma endregion
#endif
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the OpenCV Foundation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_CORE_OCL_RUNTIME_OPENCL_CORE_HPP__
#define __OPENCV_CORE_OCL_RUNTIME_OPENCL_CORE_HPP__
#ifdef HAVE_OPENCL
#if defined(HAVE_OPENCL_STATIC)
#if defined __APPLE__
#include <OpenCL/cl.h>
#else
#include <CL/cl.h>
#endif
#else // HAVE_OPENCL_STATIC
#ifndef CL_RUNTIME_EXPORT
#if (defined(BUILD_SHARED_LIBS) || defined(OPENCV_CORE_SHARED)) && (defined WIN32 || defined _WIN32 || defined WINCE)
#define CL_RUNTIME_EXPORT __declspec(dllimport)
#else
#define CL_RUNTIME_EXPORT
#endif
#endif
#ifdef HAVE_OPENCL_SVM
#define clSVMAlloc clSVMAlloc_
#define clSVMFree clSVMFree_
#define clSetKernelArgSVMPointer clSetKernelArgSVMPointer_
#define clSetKernelExecInfo clSetKernelExecInfo_
#define clEnqueueSVMFree clEnqueueSVMFree_
#define clEnqueueSVMMemcpy clEnqueueSVMMemcpy_
#define clEnqueueSVMMemFill clEnqueueSVMMemFill_
#define clEnqueueSVMMap clEnqueueSVMMap_
#define clEnqueueSVMUnmap clEnqueueSVMUnmap_
#endif
#include "autogenerated/opencl_core.hpp"
#endif // HAVE_OPENCL_STATIC
#ifndef CL_DEVICE_DOUBLE_FP_CONFIG
#define CL_DEVICE_DOUBLE_FP_CONFIG 0x1032
#endif
#ifndef CL_DEVICE_HALF_FP_CONFIG
#define CL_DEVICE_HALF_FP_CONFIG 0x1033
#endif
#ifndef CL_VERSION_1_2
#define CV_REQUIRE_OPENCL_1_2_ERROR CV_ErrorNoReturn(cv::Error::OpenCLApiCallError, "OpenCV compiled without OpenCL v1.2 support, so we can't use functionality from OpenCL v1.2")
#endif
#endif // HAVE_OPENCL
#endif // __OPENCV_CORE_OCL_RUNTIME_OPENCL_CORE_HPP__
|
/*
Copyright (C) 2003-2013 by David White <davewx7@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef BLUR_HPP_INCLUDED
#define BLUR_HPP_INCLUDED
#include <deque>
class frame;
//class which represents the blur information for a single object.
//a blur contains three parameters:
// - alpha: the initial alpha value of the blurred vision of the object.
// - fade: the rate at which the alpha fades each frame
// - granularity: the number of copies of the object that are made
// every cycle.
class blur_info
{
public:
blur_info(double alpha, double fade, int granularity);
//function to copy settings into another blur_info instance. This will
//keep our blur_frames as they are, but copy in the alpha/fade/granularity
//settings and so change our blur behavior from then on.
void copy_settings(const blur_info& info);
//function to progress to the next frame. We are given starting and
//ending position of the object, along with its drawing settings.
//
//'granularity' copies of the object's image will be made, linearly
//interpolated between start_x,start_y and end_x,end_y.
void next_frame(int start_x, int start_y, int end_x, int end_y,
const frame* f, int time_in_frame, bool facing,
bool upside_down, float start_rotate, float rotate);
void draw() const;
//returns true iff our granularity is now 0 and we have no blur_frames.
bool destroyed() const;
private:
struct blur_frame {
const frame* object_frame;
int time_in_frame;
double x, y;
bool facing, upside_down;
float rotate;
double fade;
};
double alpha_;
double fade_;
int granularity_;
std::deque<blur_frame> frames_;
};
#endif
|
#include "Goldfish/libGoldfish.h"
#include <ctime>
[[maybe_unused]] Goldfish::Log::Log(const std::string &filename) {
this->fileName = filename + ".md";
this->startTime = std::time(nullptr);
this->endTime = std::time(nullptr);
}
[[maybe_unused]] void Goldfish::Log::start() {
this->log.open(this->fileName);
this->startTime = std::time(nullptr);
log << "# " << this->fileName << "\n\n## Start\n\nDate: " << std::asctime(std::localtime(&this->startTime)) << "\n"
<< "### Tests\n\n";
}
[[maybe_unused]] void Goldfish::Log::write(const Goldfish::Answer &input) {
if (input.passed) {
log << "✔ " << input.out << "\n\n";
} else {
log << "❌ " << input.out << "\n\n";
}
}
[[maybe_unused]] void Goldfish::Log::write(const Goldfish::TestSuit &input) {
log << input.answer.out << "\n\n";
}
[[maybe_unused]] void Goldfish::Log::write(const std::string &input) {
log << input << "\n\n";
}
[[maybe_unused]] void Goldfish::Log::stop() {
this->endTime = std::time(nullptr);
log << "## End\n\nTest Duration: **" << endTime - startTime << " seconds**\n";
log.close();
}
Goldfish::Log::~Log() = default;
|
// Copyright (c) 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "source/fuzz/fuzzer_pass_add_stores.h"
#include "source/fuzz/fuzzer_util.h"
#include "source/fuzz/transformation_store.h"
namespace spvtools {
namespace fuzz {
FuzzerPassAddStores::FuzzerPassAddStores(
opt::IRContext* ir_context, TransformationContext* transformation_context,
FuzzerContext* fuzzer_context,
protobufs::TransformationSequence* transformations)
: FuzzerPass(ir_context, transformation_context, fuzzer_context,
transformations) {}
FuzzerPassAddStores::~FuzzerPassAddStores() = default;
void FuzzerPassAddStores::Apply() {
ForEachInstructionWithInstructionDescriptor(
[this](opt::Function* function, opt::BasicBlock* block,
opt::BasicBlock::iterator inst_it,
const protobufs::InstructionDescriptor& instruction_descriptor)
-> void {
assert(inst_it->opcode() ==
instruction_descriptor.target_instruction_opcode() &&
"The opcode of the instruction we might insert before must be "
"the same as the opcode in the descriptor for the instruction");
// Check whether it is legitimate to insert a store before this
// instruction.
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(SpvOpStore,
inst_it)) {
return;
}
// Randomly decide whether to try inserting a store here.
if (!GetFuzzerContext()->ChoosePercentage(
GetFuzzerContext()->GetChanceOfAddingStore())) {
return;
}
// Look for pointers we might consider storing to.
std::vector<opt::Instruction*> relevant_pointers =
FindAvailableInstructions(
function, block, inst_it,
[this, block](opt::IRContext* context,
opt::Instruction* instruction) -> bool {
if (!instruction->result_id() || !instruction->type_id()) {
return false;
}
auto type_inst = context->get_def_use_mgr()->GetDef(
instruction->type_id());
if (type_inst->opcode() != SpvOpTypePointer) {
// Not a pointer.
return false;
}
if (instruction->IsReadOnlyPointer()) {
// Read only: cannot store to it.
return false;
}
switch (instruction->result_id()) {
case SpvOpConstantNull:
case SpvOpUndef:
// Do not allow storing to a null or undefined pointer;
// this might be OK if the block is dead, but for now we
// conservatively avoid it.
return false;
default:
break;
}
return GetTransformationContext()
->GetFactManager()
->BlockIsDead(block->id()) ||
GetTransformationContext()
->GetFactManager()
->PointeeValueIsIrrelevant(
instruction->result_id());
});
// At this point, |relevant_pointers| contains all the pointers we might
// think of storing to.
if (relevant_pointers.empty()) {
return;
}
auto pointer = relevant_pointers[GetFuzzerContext()->RandomIndex(
relevant_pointers)];
std::vector<opt::Instruction*> relevant_values =
FindAvailableInstructions(
function, block, inst_it,
[pointer](opt::IRContext* context,
opt::Instruction* instruction) -> bool {
if (!instruction->result_id() || !instruction->type_id()) {
return false;
}
return instruction->type_id() ==
context->get_def_use_mgr()
->GetDef(pointer->type_id())
->GetSingleWordInOperand(1);
});
if (relevant_values.empty()) {
return;
}
// Choose a value at random, and create and apply a storing
// transformation based on it and the pointer.
ApplyTransformation(TransformationStore(
pointer->result_id(),
relevant_values[GetFuzzerContext()->RandomIndex(relevant_values)]
->result_id(),
instruction_descriptor));
});
}
} // namespace fuzz
} // namespace spvtools
|
#include <string.h>
#include <freertos/FreeRTOS.h>
#include <freertos/task.h>
#include <esp_log.h>
#include "include/i2c.hh"
static const char *TAG = "I2C_DEV";
xSemaphoreHandle *I2C::locks ={0};
esp_err_t I2C::Init()
{
I2C::locks = new xSemaphoreHandle[I2C_NUM_MAX];
for (int i = 0; i < I2C_NUM_MAX; i++)
{
I2C::locks[i] = xSemaphoreCreateMutex();
}
return ESP_OK;
}
esp_err_t I2C::WriteReg(const i2c_port_t port, uint8_t address7bit, uint8_t reg_addr, uint8_t *reg_data, size_t len)
{
if (!xSemaphoreTake(locks[port], 1000 / portTICK_RATE_MS))
{
ESP_LOGE(TAG, "Could not take port mutex %d", port);
return ESP_ERR_TIMEOUT;
}
esp_err_t espRc;
i2c_cmd_handle_t cmd = i2c_cmd_link_create();
i2c_master_start(cmd);
i2c_master_write_byte(cmd, (address7bit << 1) | I2C_MASTER_WRITE, true);
i2c_master_write_byte(cmd, reg_addr, true);
i2c_master_write(cmd, (uint8_t *)reg_data, len, true);
i2c_master_stop(cmd);
espRc = i2c_master_cmd_begin(port, cmd, 10 / portTICK_PERIOD_MS);
i2c_cmd_link_delete(cmd);
xSemaphoreGive(locks[port]);
return espRc;
}
esp_err_t I2C::ReadReg(const i2c_port_t port, uint8_t address7bit, uint8_t reg_addr, uint8_t *reg_data, size_t len)
{
if (!xSemaphoreTake(locks[port], 1000 / portTICK_RATE_MS))
{
ESP_LOGE(TAG, "Could not take port mutex %d", port);
return ESP_ERR_TIMEOUT;
}
esp_err_t espRc;
i2c_cmd_handle_t cmd = i2c_cmd_link_create();
i2c_master_start(cmd);
i2c_master_write_byte(cmd, (address7bit << 1) | I2C_MASTER_WRITE, true);
i2c_master_write_byte(cmd, reg_addr, true);
i2c_master_start(cmd);
i2c_master_write_byte(cmd, (address7bit << 1) | I2C_MASTER_READ, true);
if (len > 1)
{
i2c_master_read(cmd, reg_data, len - 1, I2C_MASTER_ACK);
}
i2c_master_read_byte(cmd, reg_data + len - 1, I2C_MASTER_NACK);
i2c_master_stop(cmd);
espRc = i2c_master_cmd_begin(port, cmd, 10 / portTICK_PERIOD_MS);
i2c_cmd_link_delete(cmd);
xSemaphoreGive(locks[port]);
return espRc;
}
esp_err_t I2C::IsAvailable(const i2c_port_t port, uint8_t address7bit){
if (!xSemaphoreTake(locks[port], 1000 / portTICK_RATE_MS))
{
ESP_LOGE(TAG, "Could not take port mutex %d", port);
return ESP_ERR_TIMEOUT;
}
//Nothing to init. Just check if it is there...
i2c_cmd_handle_t cmd = i2c_cmd_link_create();
i2c_master_start(cmd);
i2c_master_write_byte(cmd, (address7bit << 1) | I2C_MASTER_WRITE , true);
i2c_master_stop(cmd);
esp_err_t ret = i2c_master_cmd_begin(port, cmd, 50 / portTICK_RATE_MS);
i2c_cmd_link_delete(cmd);
xSemaphoreGive(locks[port]);
return ret;
}
esp_err_t I2C::Read(const i2c_port_t port, uint8_t address7bit, uint8_t *data, size_t len){
if (!xSemaphoreTake(locks[port], 1000 / portTICK_RATE_MS))
{
ESP_LOGE(TAG, "Could not take port mutex %d", port);
return ESP_ERR_TIMEOUT;
}
esp_err_t espRc;
i2c_cmd_handle_t cmd = i2c_cmd_link_create();
i2c_master_start(cmd);
i2c_master_write_byte(cmd, (address7bit << 1) | I2C_MASTER_READ, true);
if (len > 1)
{
i2c_master_read(cmd, data, len - 1, I2C_MASTER_ACK);
}
i2c_master_read_byte(cmd, data + len - 1, I2C_MASTER_NACK);
i2c_master_stop(cmd);
espRc = i2c_master_cmd_begin(port, cmd, 10 / portTICK_PERIOD_MS);
i2c_cmd_link_delete(cmd);
xSemaphoreGive(locks[port]);
return espRc;
}
esp_err_t I2C::Write(const i2c_port_t port, uint8_t address7bit, uint8_t *data, size_t len){
if (!xSemaphoreTake(locks[port], 1000 / portTICK_RATE_MS))
{
ESP_LOGE(TAG, "Could not take port mutex %d", port);
return ESP_ERR_TIMEOUT;
}
esp_err_t espRc;
i2c_cmd_handle_t cmd = i2c_cmd_link_create();
i2c_master_start(cmd);
i2c_master_write_byte(cmd, address7bit << 1 | I2C_MASTER_WRITE, true);
if(len>0)
i2c_master_write(cmd, data, len, true);//Needs modifications in teh current esp-idf --> make data const!
i2c_master_stop(cmd);
espRc = i2c_master_cmd_begin(port, cmd, 1000 / portTICK_RATE_MS);
i2c_cmd_link_delete(cmd);
xSemaphoreGive(locks[port]);
return espRc;
}
|
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/AST/Stmt.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/DenseMap.h"
#include <deque>
#include <algorithm>
#include <vector>
using namespace clang;
namespace {
class DataflowWorklist {
SmallVector<const CFGBlock *, 20> worklist;
llvm::BitVector enqueuedBlocks;
PostOrderCFGView *POV;
public:
DataflowWorklist(const CFG &cfg, AnalysisDeclContext &Ctx)
: enqueuedBlocks(cfg.getNumBlockIDs()),
POV(Ctx.getAnalysis<PostOrderCFGView>()) {}
void enqueueBlock(const CFGBlock *block);
void enqueueSuccessors(const CFGBlock *block);
void enqueuePredecessors(const CFGBlock *block);
const CFGBlock *dequeue();
void sortWorklist();
};
}
void DataflowWorklist::enqueueBlock(const clang::CFGBlock *block) {
if (block && !enqueuedBlocks[block->getBlockID()]) {
enqueuedBlocks[block->getBlockID()] = true;
worklist.push_back(block);
}
}
void DataflowWorklist::enqueueSuccessors(const clang::CFGBlock *block) {
const unsigned OldWorklistSize = worklist.size();
for (CFGBlock::const_succ_iterator I = block->succ_begin(),
E = block->succ_end(); I != E; ++I) {
enqueueBlock(*I);
}
if (OldWorklistSize == 0 || OldWorklistSize == worklist.size())
return;
sortWorklist();
}
void DataflowWorklist::enqueuePredecessors(const clang::CFGBlock *block) {
const unsigned OldWorklistSize = worklist.size();
for (CFGBlock::const_pred_iterator I = block->pred_begin(),
E = block->pred_end(); I != E; ++I) {
enqueueBlock(*I);
}
if (OldWorklistSize == 0 || OldWorklistSize == worklist.size())
return;
sortWorklist();
}
void DataflowWorklist::sortWorklist() {
std::sort(worklist.begin(), worklist.end(), POV->getComparator());
}
const CFGBlock *DataflowWorklist::dequeue() {
if (worklist.empty())
return 0;
const CFGBlock *b = worklist.back();
worklist.pop_back();
enqueuedBlocks[b->getBlockID()] = false;
return b;
}
namespace {
class LiveVariablesImpl {
public:
AnalysisDeclContext &analysisContext;
std::vector<LiveVariables::LivenessValues> cfgBlockValues;
llvm::ImmutableSet<const Stmt *>::Factory SSetFact;
llvm::ImmutableSet<const VarDecl *>::Factory DSetFact;
llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues> blocksEndToLiveness;
llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues> blocksBeginToLiveness;
llvm::DenseMap<const Stmt *, LiveVariables::LivenessValues> stmtsToLiveness;
llvm::DenseMap<const DeclRefExpr *, unsigned> inAssignment;
const bool killAtAssign;
LiveVariables::LivenessValues
merge(LiveVariables::LivenessValues valsA,
LiveVariables::LivenessValues valsB);
LiveVariables::LivenessValues runOnBlock(const CFGBlock *block,
LiveVariables::LivenessValues val,
LiveVariables::Observer *obs = 0);
void dumpBlockLiveness(const SourceManager& M);
LiveVariablesImpl(AnalysisDeclContext &ac, bool KillAtAssign)
: analysisContext(ac),
SSetFact(false), // Do not canonicalize ImmutableSets by default.
DSetFact(false), // This is a *major* performance win.
killAtAssign(KillAtAssign) {}
};
}
static LiveVariablesImpl &getImpl(void *x) {
return *((LiveVariablesImpl *) x);
}
//===----------------------------------------------------------------------===//
// Operations and queries on LivenessValues.
//===----------------------------------------------------------------------===//
bool LiveVariables::LivenessValues::isLive(const Stmt *S) const {
return liveStmts.contains(S);
}
bool LiveVariables::LivenessValues::isLive(const VarDecl *D) const {
return liveDecls.contains(D);
}
namespace {
template <typename SET>
SET mergeSets(SET A, SET B) {
if (A.isEmpty())
return B;
for (typename SET::iterator it = B.begin(), ei = B.end(); it != ei; ++it) {
A = A.add(*it);
}
return A;
}
}
void LiveVariables::Observer::anchor() { }
LiveVariables::LivenessValues
LiveVariablesImpl::merge(LiveVariables::LivenessValues valsA,
LiveVariables::LivenessValues valsB) {
llvm::ImmutableSetRef<const Stmt *>
SSetRefA(valsA.liveStmts.getRootWithoutRetain(), SSetFact.getTreeFactory()),
SSetRefB(valsB.liveStmts.getRootWithoutRetain(), SSetFact.getTreeFactory());
llvm::ImmutableSetRef<const VarDecl *>
DSetRefA(valsA.liveDecls.getRootWithoutRetain(), DSetFact.getTreeFactory()),
DSetRefB(valsB.liveDecls.getRootWithoutRetain(), DSetFact.getTreeFactory());
SSetRefA = mergeSets(SSetRefA, SSetRefB);
DSetRefA = mergeSets(DSetRefA, DSetRefB);
// asImmutableSet() canonicalizes the tree, allowing us to do an easy
// comparison afterwards.
return LiveVariables::LivenessValues(SSetRefA.asImmutableSet(),
DSetRefA.asImmutableSet());
}
bool LiveVariables::LivenessValues::equals(const LivenessValues &V) const {
return liveStmts == V.liveStmts && liveDecls == V.liveDecls;
}
//===----------------------------------------------------------------------===//
// Query methods.
//===----------------------------------------------------------------------===//
static bool isAlwaysAlive(const VarDecl *D) {
return D->hasGlobalStorage();
}
bool LiveVariables::isLive(const CFGBlock *B, const VarDecl *D) {
return isAlwaysAlive(D) || getImpl(impl).blocksEndToLiveness[B].isLive(D);
}
bool LiveVariables::isLive(const Stmt *S, const VarDecl *D) {
return isAlwaysAlive(D) || getImpl(impl).stmtsToLiveness[S].isLive(D);
}
bool LiveVariables::isLive(const Stmt *Loc, const Stmt *S) {
return getImpl(impl).stmtsToLiveness[Loc].isLive(S);
}
//===----------------------------------------------------------------------===//
// Dataflow computation.
//===----------------------------------------------------------------------===//
namespace {
class TransferFunctions : public StmtVisitor<TransferFunctions> {
LiveVariablesImpl &LV;
LiveVariables::LivenessValues &val;
LiveVariables::Observer *observer;
const CFGBlock *currentBlock;
public:
TransferFunctions(LiveVariablesImpl &im,
LiveVariables::LivenessValues &Val,
LiveVariables::Observer *Observer,
const CFGBlock *CurrentBlock)
: LV(im), val(Val), observer(Observer), currentBlock(CurrentBlock) {}
void VisitBinaryOperator(BinaryOperator *BO);
void VisitBlockExpr(BlockExpr *BE);
void VisitDeclRefExpr(DeclRefExpr *DR);
void VisitDeclStmt(DeclStmt *DS);
void VisitObjCForCollectionStmt(ObjCForCollectionStmt *OS);
void VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *UE);
void VisitUnaryOperator(UnaryOperator *UO);
void Visit(Stmt *S);
};
}
static const VariableArrayType *FindVA(QualType Ty) {
const Type *ty = Ty.getTypePtr();
while (const ArrayType *VT = dyn_cast<ArrayType>(ty)) {
if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(VT))
if (VAT->getSizeExpr())
return VAT;
ty = VT->getElementType().getTypePtr();
}
return 0;
}
static const Stmt *LookThroughStmt(const Stmt *S) {
while (S) {
if (const Expr *Ex = dyn_cast<Expr>(S))
S = Ex->IgnoreParens();
if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(S)) {
S = EWC->getSubExpr();
continue;
}
if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(S)) {
S = OVE->getSourceExpr();
continue;
}
break;
}
return S;
}
static void AddLiveStmt(llvm::ImmutableSet<const Stmt *> &Set,
llvm::ImmutableSet<const Stmt *>::Factory &F,
const Stmt *S) {
Set = F.add(Set, LookThroughStmt(S));
}
void TransferFunctions::Visit(Stmt *S) {
if (observer)
observer->observeStmt(S, currentBlock, val);
StmtVisitor<TransferFunctions>::Visit(S);
if (isa<Expr>(S)) {
val.liveStmts = LV.SSetFact.remove(val.liveStmts, S);
}
// Mark all children expressions live.
switch (S->getStmtClass()) {
default:
break;
case Stmt::StmtExprClass: {
// For statement expressions, look through the compound statement.
S = cast<StmtExpr>(S)->getSubStmt();
break;
}
case Stmt::CXXMemberCallExprClass: {
// Include the implicit "this" pointer as being live.
CXXMemberCallExpr *CE = cast<CXXMemberCallExpr>(S);
if (Expr *ImplicitObj = CE->getImplicitObjectArgument()) {
AddLiveStmt(val.liveStmts, LV.SSetFact, ImplicitObj);
}
break;
}
case Stmt::DeclStmtClass: {
const DeclStmt *DS = cast<DeclStmt>(S);
if (const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl())) {
for (const VariableArrayType* VA = FindVA(VD->getType());
VA != 0; VA = FindVA(VA->getElementType())) {
AddLiveStmt(val.liveStmts, LV.SSetFact, VA->getSizeExpr());
}
}
break;
}
case Stmt::PseudoObjectExprClass: {
// A pseudo-object operation only directly consumes its result
// expression.
Expr *child = cast<PseudoObjectExpr>(S)->getResultExpr();
if (!child) return;
if (OpaqueValueExpr *OV = dyn_cast<OpaqueValueExpr>(child))
child = OV->getSourceExpr();
child = child->IgnoreParens();
val.liveStmts = LV.SSetFact.add(val.liveStmts, child);
return;
}
// FIXME: These cases eventually shouldn't be needed.
case Stmt::ExprWithCleanupsClass: {
S = cast<ExprWithCleanups>(S)->getSubExpr();
break;
}
case Stmt::CXXBindTemporaryExprClass: {
S = cast<CXXBindTemporaryExpr>(S)->getSubExpr();
break;
}
case Stmt::UnaryExprOrTypeTraitExprClass: {
// No need to unconditionally visit subexpressions.
return;
}
}
for (Stmt::child_iterator it = S->child_begin(), ei = S->child_end();
it != ei; ++it) {
if (Stmt *child = *it)
AddLiveStmt(val.liveStmts, LV.SSetFact, child);
}
}
void TransferFunctions::VisitBinaryOperator(BinaryOperator *B) {
if (B->isAssignmentOp()) {
if (!LV.killAtAssign)
return;
// Assigning to a variable?
Expr *LHS = B->getLHS()->IgnoreParens();
if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(LHS))
if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
// Assignments to references don't kill the ref's address
if (VD->getType()->isReferenceType())
return;
if (!isAlwaysAlive(VD)) {
// The variable is now dead.
val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
}
if (observer)
observer->observerKill(DR);
}
}
}
void TransferFunctions::VisitBlockExpr(BlockExpr *BE) {
AnalysisDeclContext::referenced_decls_iterator I, E;
llvm::tie(I, E) =
LV.analysisContext.getReferencedBlockVars(BE->getBlockDecl());
for ( ; I != E ; ++I) {
const VarDecl *VD = *I;
if (isAlwaysAlive(VD))
continue;
val.liveDecls = LV.DSetFact.add(val.liveDecls, VD);
}
}
void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *DR) {
if (const VarDecl *D = dyn_cast<VarDecl>(DR->getDecl()))
if (!isAlwaysAlive(D) && LV.inAssignment.find(DR) == LV.inAssignment.end())
val.liveDecls = LV.DSetFact.add(val.liveDecls, D);
}
void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
for (DeclStmt::decl_iterator DI=DS->decl_begin(), DE = DS->decl_end();
DI != DE; ++DI)
if (VarDecl *VD = dyn_cast<VarDecl>(*DI)) {
if (!isAlwaysAlive(VD))
val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
}
}
void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *OS) {
// Kill the iteration variable.
DeclRefExpr *DR = 0;
const VarDecl *VD = 0;
Stmt *element = OS->getElement();
if (DeclStmt *DS = dyn_cast<DeclStmt>(element)) {
VD = cast<VarDecl>(DS->getSingleDecl());
}
else if ((DR = dyn_cast<DeclRefExpr>(cast<Expr>(element)->IgnoreParens()))) {
VD = cast<VarDecl>(DR->getDecl());
}
if (VD) {
val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
if (observer && DR)
observer->observerKill(DR);
}
}
void TransferFunctions::
VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *UE)
{
// While sizeof(var) doesn't technically extend the liveness of 'var', it
// does extent the liveness of metadata if 'var' is a VariableArrayType.
// We handle that special case here.
if (UE->getKind() != UETT_SizeOf || UE->isArgumentType())
return;
const Expr *subEx = UE->getArgumentExpr();
if (subEx->getType()->isVariableArrayType()) {
assert(subEx->isLValue());
val.liveStmts = LV.SSetFact.add(val.liveStmts, subEx->IgnoreParens());
}
}
void TransferFunctions::VisitUnaryOperator(UnaryOperator *UO) {
// Treat ++/-- as a kill.
// Note we don't actually have to do anything if we don't have an observer,
// since a ++/-- acts as both a kill and a "use".
if (!observer)
return;
switch (UO->getOpcode()) {
default:
return;
case UO_PostInc:
case UO_PostDec:
case UO_PreInc:
case UO_PreDec:
break;
}
if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(UO->getSubExpr()->IgnoreParens()))
if (isa<VarDecl>(DR->getDecl())) {
// Treat ++/-- as a kill.
observer->observerKill(DR);
}
}
LiveVariables::LivenessValues
LiveVariablesImpl::runOnBlock(const CFGBlock *block,
LiveVariables::LivenessValues val,
LiveVariables::Observer *obs) {
TransferFunctions TF(*this, val, obs, block);
// Visit the terminator (if any).
if (const Stmt *term = block->getTerminator())
TF.Visit(const_cast<Stmt*>(term));
// Apply the transfer function for all Stmts in the block.
for (CFGBlock::const_reverse_iterator it = block->rbegin(),
ei = block->rend(); it != ei; ++it) {
const CFGElement &elem = *it;
if (!isa<CFGStmt>(elem))
continue;
const Stmt *S = cast<CFGStmt>(elem).getStmt();
TF.Visit(const_cast<Stmt*>(S));
stmtsToLiveness[S] = val;
}
return val;
}
void LiveVariables::runOnAllBlocks(LiveVariables::Observer &obs) {
const CFG *cfg = getImpl(impl).analysisContext.getCFG();
for (CFG::const_iterator it = cfg->begin(), ei = cfg->end(); it != ei; ++it)
getImpl(impl).runOnBlock(*it, getImpl(impl).blocksEndToLiveness[*it], &obs);
}
LiveVariables::LiveVariables(void *im) : impl(im) {}
LiveVariables::~LiveVariables() {
delete (LiveVariablesImpl*) impl;
}
LiveVariables *
LiveVariables::computeLiveness(AnalysisDeclContext &AC,
bool killAtAssign) {
// No CFG? Bail out.
CFG *cfg = AC.getCFG();
if (!cfg)
return 0;
LiveVariablesImpl *LV = new LiveVariablesImpl(AC, killAtAssign);
// Construct the dataflow worklist. Enqueue the exit block as the
// start of the analysis.
DataflowWorklist worklist(*cfg, AC);
llvm::BitVector everAnalyzedBlock(cfg->getNumBlockIDs());
// FIXME: we should enqueue using post order.
for (CFG::const_iterator it = cfg->begin(), ei = cfg->end(); it != ei; ++it) {
const CFGBlock *block = *it;
worklist.enqueueBlock(block);
// FIXME: Scan for DeclRefExprs using in the LHS of an assignment.
// We need to do this because we lack context in the reverse analysis
// to determine if a DeclRefExpr appears in such a context, and thus
// doesn't constitute a "use".
if (killAtAssign)
for (CFGBlock::const_iterator bi = block->begin(), be = block->end();
bi != be; ++bi) {
if (const CFGStmt *cs = bi->getAs<CFGStmt>()) {
if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(cs->getStmt())) {
if (BO->getOpcode() == BO_Assign) {
if (const DeclRefExpr *DR =
dyn_cast<DeclRefExpr>(BO->getLHS()->IgnoreParens())) {
LV->inAssignment[DR] = 1;
}
}
}
}
}
}
worklist.sortWorklist();
while (const CFGBlock *block = worklist.dequeue()) {
// Determine if the block's end value has changed. If not, we
// have nothing left to do for this block.
LivenessValues &prevVal = LV->blocksEndToLiveness[block];
// Merge the values of all successor blocks.
LivenessValues val;
for (CFGBlock::const_succ_iterator it = block->succ_begin(),
ei = block->succ_end(); it != ei; ++it) {
if (const CFGBlock *succ = *it) {
val = LV->merge(val, LV->blocksBeginToLiveness[succ]);
}
}
if (!everAnalyzedBlock[block->getBlockID()])
everAnalyzedBlock[block->getBlockID()] = true;
else if (prevVal.equals(val))
continue;
prevVal = val;
// Update the dataflow value for the start of this block.
LV->blocksBeginToLiveness[block] = LV->runOnBlock(block, val);
// Enqueue the value to the predecessors.
worklist.enqueuePredecessors(block);
}
return new LiveVariables(LV);
}
static bool compare_entries(const CFGBlock *A, const CFGBlock *B) {
return A->getBlockID() < B->getBlockID();
}
static bool compare_vd_entries(const Decl *A, const Decl *B) {
SourceLocation ALoc = A->getLocStart();
SourceLocation BLoc = B->getLocStart();
return ALoc.getRawEncoding() < BLoc.getRawEncoding();
}
void LiveVariables::dumpBlockLiveness(const SourceManager &M) {
getImpl(impl).dumpBlockLiveness(M);
}
void LiveVariablesImpl::dumpBlockLiveness(const SourceManager &M) {
std::vector<const CFGBlock *> vec;
for (llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues>::iterator
it = blocksEndToLiveness.begin(), ei = blocksEndToLiveness.end();
it != ei; ++it) {
vec.push_back(it->first);
}
std::sort(vec.begin(), vec.end(), compare_entries);
std::vector<const VarDecl*> declVec;
for (std::vector<const CFGBlock *>::iterator
it = vec.begin(), ei = vec.end(); it != ei; ++it) {
llvm::errs() << "\n[ B" << (*it)->getBlockID()
<< " (live variables at block exit) ]\n";
LiveVariables::LivenessValues vals = blocksEndToLiveness[*it];
declVec.clear();
for (llvm::ImmutableSet<const VarDecl *>::iterator si =
vals.liveDecls.begin(),
se = vals.liveDecls.end(); si != se; ++si) {
declVec.push_back(*si);
}
std::sort(declVec.begin(), declVec.end(), compare_vd_entries);
for (std::vector<const VarDecl*>::iterator di = declVec.begin(),
de = declVec.end(); di != de; ++di) {
llvm::errs() << " " << (*di)->getDeclName().getAsString()
<< " <";
(*di)->getLocation().dump(M);
llvm::errs() << ">\n";
}
}
llvm::errs() << "\n";
}
const void *LiveVariables::getTag() { static int x; return &x; }
const void *RelaxedLiveVariables::getTag() { static int x; return &x; }
|
#include "Stdafx.h"
#include "..\MarshalCollections.h"
#using <System.Core.dll>
using namespace msclr::interop;
using namespace System;
using namespace System::Text;
using namespace System::Collections::Generic;
using namespace NUnit::Framework;
namespace WebRtcInterop { namespace Marshaling { namespace UnitTests
{
[TestFixture]
public ref class MarshalCollectionsTests
{
public:
[Test]
void marshal_vector_as_test()
{
std::vector<std::string> nativeVector = { "TestString" };
auto result = marshal_vector_as<String ^>(nativeVector);
auto resultArray = Linq::Enumerable::ToArray(result);
auto tst = marshal_as<String^>(std::string("test"));
Assert::AreEqual(1, resultArray->Length);
Assert::AreEqual("TestString", resultArray[0]);
}
[Test]
void marshal_vector_as_empty_source_test()
{
std::vector<std::string> nativeVector;
auto result = marshal_vector_as<String ^>(nativeVector);
auto resultArray = Linq::Enumerable::ToArray(result);
Assert::AreEqual((Object ^)0, resultArray->Length);
}
[Test]
void marshal_map_as_test()
{
std::map<std::string, std::string> nativeMap = { {"1","a"}, {"2","b"}, {"3", "c"} };
auto result = marshal_map_as<String ^, String ^>(nativeMap);
Assert::AreEqual(3, result->Count);
Assert::AreEqual("a", result["1"]);
Assert::AreEqual("b", result["2"]);
Assert::AreEqual("c", result["3"]);
}
[Test]
void marshal_map_as_empty_test()
{
std::map<std::string, std::string> nativeMap;
auto result = marshal_map_as<String ^, String ^>(nativeMap);
Assert::AreEqual((Object ^)0, result->Count);
}
[Test]
void marshal_enumerable_as_test()
{
auto managedEnumerable = gcnew array<String ^>{ "a", "b", "c" };
auto result = marshal_enumerable_as<std::string, String ^>(managedEnumerable);
Assert::AreEqual((Object ^)3, result.size());
Assert::AreEqual((Object ^)0, result[0].compare("a"));
Assert::AreEqual((Object ^)0, result[1].compare("b"));
Assert::AreEqual((Object ^)0, result[2].compare("c"));
}
};
}}}
|
/*
Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include "engine/alice/alice.hpp"
#include "messages/messages.hpp"
namespace isaac {
// Play an audio file periodically
class AudioPlaybackFileIndex : public alice::Codelet {
public:
// Publish file index
ISAAC_PROTO_TX(AudioFilePlaybackProto, audio_fileindex)
// File index
ISAAC_PARAM(int, file_index, 0)
void start() override;
void tick() override;
};
} // namespace isaac
ISAAC_ALICE_REGISTER_CODELET(isaac::AudioPlaybackFileIndex);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.