text
stringlengths
5
1.04M
// tagged pointer, for aba prevention // // Copyright (C) 2008 Tim Blechmann // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED #define BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED #include <boost/config.hpp> #include <boost/lockfree/detail/prefix.hpp> #ifndef BOOST_LOCKFREE_PTR_COMPRESSION #include <boost/lockfree/detail/tagged_ptr_dcas.hpp> #else #include <boost/lockfree/detail/tagged_ptr_ptrcompression.hpp> #endif #endif /* BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED */
/* * Copyright (C) 2012 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "math_entrypoints.h" #include <limits> #include "common_runtime_test.h" namespace art { class MathEntrypointsTest : public CommonRuntimeTest {}; TEST_F(MathEntrypointsTest, DoubleToLong) { EXPECT_EQ(std::numeric_limits<int64_t>::max(), art_d2l(1.85e19)); EXPECT_EQ(std::numeric_limits<int64_t>::min(), art_d2l(-1.85e19)); EXPECT_EQ(INT64_C(0), art_d2l(0)); EXPECT_EQ(INT64_C(1), art_d2l(1.0)); EXPECT_EQ(INT64_C(10), art_d2l(10.0)); EXPECT_EQ(INT64_C(100), art_d2l(100.0)); EXPECT_EQ(INT64_C(-1), art_d2l(-1.0)); EXPECT_EQ(INT64_C(-10), art_d2l(-10.0)); EXPECT_EQ(INT64_C(-100), art_d2l(-100.0)); } TEST_F(MathEntrypointsTest, FloatToLong) { EXPECT_EQ(std::numeric_limits<int64_t>::max(), art_f2l(1.85e19)); EXPECT_EQ(std::numeric_limits<int64_t>::min(), art_f2l(-1.85e19)); EXPECT_EQ(INT64_C(0), art_f2l(0)); EXPECT_EQ(INT64_C(1), art_f2l(1.0)); EXPECT_EQ(INT64_C(10), art_f2l(10.0)); EXPECT_EQ(INT64_C(100), art_f2l(100.0)); EXPECT_EQ(INT64_C(-1), art_f2l(-1.0)); EXPECT_EQ(INT64_C(-10), art_f2l(-10.0)); EXPECT_EQ(INT64_C(-100), art_f2l(-100.0)); } TEST_F(MathEntrypointsTest, DoubleToInt) { EXPECT_EQ(std::numeric_limits<int32_t>::max(), art_d2i(4.3e9)); EXPECT_EQ(std::numeric_limits<int32_t>::min(), art_d2i(-4.3e9)); EXPECT_EQ(0L, art_d2i(0)); EXPECT_EQ(1L, art_d2i(1.0)); EXPECT_EQ(10L, art_d2i(10.0)); EXPECT_EQ(100L, art_d2i(100.0)); EXPECT_EQ(-1L, art_d2i(-1.0)); EXPECT_EQ(-10L, art_d2i(-10.0)); EXPECT_EQ(-100L, art_d2i(-100.0)); } TEST_F(MathEntrypointsTest, FloatToInt) { EXPECT_EQ(std::numeric_limits<int32_t>::max(), art_f2i(4.3e9)); EXPECT_EQ(std::numeric_limits<int32_t>::min(), art_f2i(-4.3e9)); EXPECT_EQ(0L, art_f2i(0)); EXPECT_EQ(1L, art_f2i(1.0)); EXPECT_EQ(10L, art_f2i(10.0)); EXPECT_EQ(100L, art_f2i(100.0)); EXPECT_EQ(-1L, art_f2i(-1.0)); EXPECT_EQ(-10L, art_f2i(-10.0)); EXPECT_EQ(-100L, art_f2i(-100.0)); } } // namespace art
#include "geometry/Point.h" #include "gtest/gtest.h" TEST(Point, initialize) { cuhksz::Point<1> a1, b1(5); EXPECT_DOUBLE_EQ(b1[0], 5); cuhksz::Point<2> a2, b2(2, 3); EXPECT_DOUBLE_EQ(b2[0], 2); EXPECT_DOUBLE_EQ(b2[1], 3); cuhksz::Point<3> a3, b3(1, 2, 3); EXPECT_DOUBLE_EQ(b3[0], 1); EXPECT_DOUBLE_EQ(b3[1], 2); EXPECT_DOUBLE_EQ(b3[2], 3); } TEST(GVector, addition) { cuhksz::GVector<2> a(2, 3), b(1, 5), c; c = a + b; EXPECT_DOUBLE_EQ(c[0], 3); EXPECT_DOUBLE_EQ(c[1], 8); } TEST(GVector, subtraction) { cuhksz::GVector<2> a(2, 3), b(1, 5), c; c = a - b; EXPECT_DOUBLE_EQ(c[0], 1); EXPECT_DOUBLE_EQ(c[1], -2); } TEST(GVector, mulNumber) { cuhksz::GVector<2> a(2, 3), b; b = a * 2; EXPECT_DOUBLE_EQ(b[0], 4); EXPECT_DOUBLE_EQ(b[1], 6); a = 2 * b; EXPECT_DOUBLE_EQ(a[0], 8); EXPECT_DOUBLE_EQ(a[1], 12); } TEST(GVector, dotProduct) { cuhksz::GVector<2> a(2, 3), b(3, 5); EXPECT_DOUBLE_EQ(dot(a, b), 21); a = cuhksz::GVector<2>(-2, 3); b = cuhksz::GVector<2>(1, -5); EXPECT_DOUBLE_EQ(a * b, -17); } TEST(GVector, crossProduct) { cuhksz::Point<2> a(2, 3), b(3, 5); EXPECT_DOUBLE_EQ(cross(a, b), 1); } TEST(GVector, len) { cuhksz::GVector<2> a(3, 4); EXPECT_DOUBLE_EQ(a.len(), 5); }
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/arc/ime/arc_ime_service.h" #include <utility> #include "ash/constants/ash_features.h" #include "ash/keyboard/ui/keyboard_ui_controller.h" #include "ash/public/cpp/app_types.h" #include "base/feature_list.h" #include "base/logging.h" #include "base/memory/singleton.h" #include "base/metrics/histogram_functions.h" #include "base/strings/string_util.h" #include "base/strings/utf_string_conversions.h" #include "components/arc/arc_browser_context_keyed_service_factory_base.h" #include "components/arc/arc_util.h" #include "components/arc/ime/arc_ime_bridge_impl.h" #include "components/arc/ime/arc_ime_util.h" #include "components/arc/ime/key_event_result_receiver.h" #include "components/exo/wm_helper.h" #include "ui/aura/client/aura_constants.h" #include "ui/aura/env.h" #include "ui/aura/window.h" #include "ui/aura/window_tree_host.h" #include "ui/base/ime/chromeos/extension_ime_util.h" #include "ui/base/ime/chromeos/input_method_manager.h" #include "ui/base/ime/constants.h" #include "ui/base/ime/input_method.h" #include "ui/base/ime/input_method_delegate.h" #include "ui/base/ime/text_input_flags.h" #include "ui/events/base_event_utils.h" #include "ui/events/event.h" #include "ui/events/keycodes/keyboard_codes.h" #include "ui/gfx/range/range.h" #include "ui/views/widget/widget.h" #include "ui/views/window/non_client_view.h" #include "ui/wm/core/ime_util_chromeos.h" namespace arc { namespace { base::Optional<double> g_override_default_device_scale_factor; // Return true when a rich text editing is available on a text field with the // given type. bool IsTextInputActive(ui::TextInputType type) { return type != ui::TEXT_INPUT_TYPE_NONE && type != ui::TEXT_INPUT_TYPE_NULL; } // Return true if the given key event generats a visible character. bool IsCharacterKeyEvent(const ui::KeyEvent* event) { return !IsControlChar(event) && !ui::IsSystemKeyModifier(event->flags()); } class ArcWindowDelegateImpl : public ArcImeService::ArcWindowDelegate { public: explicit ArcWindowDelegateImpl(ArcImeService* ime_service) : ime_service_(ime_service) {} ~ArcWindowDelegateImpl() override = default; bool IsInArcAppWindow(const aura::Window* window) const override { // WMHelper is not craeted in browser_tests. if (!exo::WMHelper::HasInstance()) return false; aura::Window* active = exo::WMHelper::GetInstance()->GetActiveWindow(); for (; window; window = window->parent()) { if (ash::IsArcWindow(window)) return true; // TODO(crbug.com/1168334): Find a correct way to detect the ARC++ // notifications. It should be okay for now because only the ARC++ windows // have kSkipImeProcessing. if (window->GetProperty(aura::client::kSkipImeProcessing)) return true; // IsArcAppWindow returns false for a window of ARC++ Kiosk app, so we // have to check application id of the active window to cover that case. // TODO(yhanada): Make IsArcAppWindow support a window of ARC++ Kiosk. // Specifically, a window of ARC++ Kiosk should have ash::AppType::ARC_APP // property. Please see implementation of IsArcAppWindow(). if (window == active && IsArcKioskMode() && GetWindowTaskId(window) != kNoTaskId) { return true; } } return false; } void RegisterFocusObserver() override { // WMHelper is not craeted in browser_tests. if (!exo::WMHelper::HasInstance()) return; exo::WMHelper::GetInstance()->AddFocusObserver(ime_service_); } void UnregisterFocusObserver() override { // If WMHelper is already destroyed, do nothing. // TODO(crbug.com/748380): Fix shutdown order. if (!exo::WMHelper::HasInstance()) return; exo::WMHelper::GetInstance()->RemoveFocusObserver(ime_service_); } ui::InputMethod* GetInputMethodForWindow( aura::Window* window) const override { if (!window || !window->GetHost()) return nullptr; return window->GetHost()->GetInputMethod(); } bool IsImeBlocked(aura::Window* window) const override { // WMHelper is not craeted in browser_tests. if (!exo::WMHelper::HasInstance()) return false; return exo::WMHelper::GetInstance()->IsImeBlocked(window); } private: ArcImeService* const ime_service_; DISALLOW_COPY_AND_ASSIGN(ArcWindowDelegateImpl); }; // Singleton factory for ArcImeService. class ArcImeServiceFactory : public internal::ArcBrowserContextKeyedServiceFactoryBase< ArcImeService, ArcImeServiceFactory> { public: // Factory name used by ArcBrowserContextKeyedServiceFactoryBase. static constexpr const char* kName = "ArcImeServiceFactory"; static ArcImeServiceFactory* GetInstance() { return base::Singleton<ArcImeServiceFactory>::get(); } private: friend base::DefaultSingletonTraits<ArcImeServiceFactory>; ArcImeServiceFactory() = default; ~ArcImeServiceFactory() override = default; }; } // anonymous namespace //////////////////////////////////////////////////////////////////////////////// // ArcImeService main implementation: // static ArcImeService* ArcImeService::GetForBrowserContext( content::BrowserContext* context) { return ArcImeServiceFactory::GetForBrowserContext(context); } ArcImeService::ArcImeService(content::BrowserContext* context, ArcBridgeService* bridge_service) : ArcImeService(context, bridge_service, std::make_unique<ArcWindowDelegateImpl>(this)) {} ArcImeService::ArcImeService(content::BrowserContext* context, ArcBridgeService* bridge_service, std::unique_ptr<ArcWindowDelegate> delegate) : ime_bridge_(new ArcImeBridgeImpl(this, bridge_service)), arc_window_delegate_(std::move(delegate)), ime_type_(ui::TEXT_INPUT_TYPE_NONE), ime_flags_(ui::TEXT_INPUT_FLAG_NONE), is_personalized_learning_allowed_(false), has_composition_text_(false), receiver_(std::make_unique<KeyEventResultReceiver>()) { if (aura::Env::HasInstance()) aura::Env::GetInstance()->AddObserver(this); arc_window_delegate_->RegisterFocusObserver(); } ArcImeService::~ArcImeService() { ui::InputMethod* const input_method = GetInputMethod(); if (input_method) input_method->DetachTextInputClient(this); if (focused_arc_window_) focused_arc_window_->RemoveObserver(this); arc_window_delegate_->UnregisterFocusObserver(); if (aura::Env::HasInstance()) aura::Env::GetInstance()->RemoveObserver(this); // KeyboardController is destroyed before ArcImeService (except in tests), // so check whether there is a KeyboardController first before removing |this| // from KeyboardController observers. if (keyboard::KeyboardUIController::HasInstance()) { auto* keyboard_controller = keyboard::KeyboardUIController::Get(); if (keyboard_controller->HasObserver(this)) keyboard_controller->RemoveObserver(this); } } void ArcImeService::SetImeBridgeForTesting( std::unique_ptr<ArcImeBridge> test_ime_bridge) { ime_bridge_ = std::move(test_ime_bridge); } ui::InputMethod* ArcImeService::GetInputMethod() { return arc_window_delegate_->GetInputMethodForWindow(focused_arc_window_); } void ArcImeService::ReattachInputMethod(aura::Window* old_window, aura::Window* new_window) { ui::InputMethod* const old_ime = arc_window_delegate_->GetInputMethodForWindow(old_window); ui::InputMethod* const new_ime = arc_window_delegate_->GetInputMethodForWindow(new_window); if (old_ime != new_ime) { if (old_ime) old_ime->DetachTextInputClient(this); if (new_ime) new_ime->SetFocusedTextInputClient(this); } } //////////////////////////////////////////////////////////////////////////////// // Overridden from aura::EnvObserver: void ArcImeService::OnWindowInitialized(aura::Window* new_window) { if (keyboard::KeyboardUIController::HasInstance()) { auto* keyboard_controller = keyboard::KeyboardUIController::Get(); if (keyboard_controller->IsEnabled() && !keyboard_controller->HasObserver(this)) { keyboard_controller->AddObserver(this); } } } //////////////////////////////////////////////////////////////////////////////// // Overridden from aura::WindowObserver: void ArcImeService::OnWindowDestroying(aura::Window* window) { // This shouldn't be reached on production, since the window lost the focus // and called OnWindowFocused() before destroying. // But we handle this case for testing. if (window == focused_arc_window_) OnWindowFocused(nullptr, focused_arc_window_); } void ArcImeService::OnWindowRemovingFromRootWindow(aura::Window* window, aura::Window* new_root) { // IMEs are associated with root windows, hence we may need to detach/attach. if (window == focused_arc_window_) ReattachInputMethod(focused_arc_window_, new_root); } void ArcImeService::OnWindowPropertyChanged(aura::Window* window, const void* key, intptr_t old) { if (window == focused_arc_window_) return; bool ime_blocked = arc_window_delegate_->IsImeBlocked(focused_arc_window_); if (last_ime_blocked_ == ime_blocked) return; last_ime_blocked_ = ime_blocked; // IME blocking has changed. ui::InputMethod* const input_method = GetInputMethod(); if (input_method) { if (has_composition_text_) { // If it has composition text, clear both ARC's current composition text // and Chrome IME's one. ClearCompositionText(); input_method->CancelComposition(this); } input_method->OnTextInputTypeChanged(this); } } void ArcImeService::OnWindowRemoved(aura::Window* removed_window) { // |this| can lose the IME focus because |focused_arc_window_| may have // children other than ExoSurface e.g. WebContentsViewAura for CustomTabs. // Restore the IME focus when such a window is removed. ReattachInputMethod(nullptr, focused_arc_window_); } //////////////////////////////////////////////////////////////////////////////// // Overridden from exo::WMHelper::FocusChangeObserver: void ArcImeService::OnWindowFocused(aura::Window* gained_focus, aura::Window* lost_focus) { if (lost_focus == gained_focus) return; const bool detach = (lost_focus && focused_arc_window_ == lost_focus); const bool attach = arc_window_delegate_->IsInArcAppWindow(gained_focus); if (detach) { // The focused window and the toplevel window are different in production, // but in tests they can be the same, so avoid adding the observer twice. if (focused_arc_window_ != focused_arc_window_->GetToplevelWindow()) focused_arc_window_->GetToplevelWindow()->RemoveObserver(this); focused_arc_window_->RemoveObserver(this); focused_arc_window_ = nullptr; } if (attach) { DCHECK_EQ(nullptr, focused_arc_window_); focused_arc_window_ = gained_focus; focused_arc_window_->AddObserver(this); // The focused window and the toplevel window are different in production, // but in tests they can be the same, so avoid adding the observer twice. if (focused_arc_window_ != focused_arc_window_->GetToplevelWindow()) focused_arc_window_->GetToplevelWindow()->AddObserver(this); } ReattachInputMethod(detach ? lost_focus : nullptr, focused_arc_window_); } //////////////////////////////////////////////////////////////////////////////// // Overridden from arc::ArcImeBridge::Delegate void ArcImeService::OnTextInputTypeChanged( ui::TextInputType type, bool is_personalized_learning_allowed, int flags) { if (!ShouldSendUpdateToInputMethod()) return; if (ime_type_ == type && is_personalized_learning_allowed_ == is_personalized_learning_allowed && ime_flags_ == flags) { return; } ime_type_ = type; is_personalized_learning_allowed_ = is_personalized_learning_allowed; ime_flags_ = flags; ui::InputMethod* const input_method = GetInputMethod(); if (input_method) input_method->OnTextInputTypeChanged(this); // Call HideKeyboard() here. On a text field on an ARC++ app, just having // non-null text input type doesn't mean the virtual keyboard is necessary. If // the virtual keyboard is really needed, ShowVirtualKeyboardIfEnabled will be // called later. if (keyboard::KeyboardUIController::HasInstance()) { auto* keyboard_controller = keyboard::KeyboardUIController::Get(); if (keyboard_controller->IsEnabled()) keyboard_controller->HideKeyboardImplicitlyBySystem(); } } void ArcImeService::OnCursorRectChanged(const gfx::Rect& rect, bool is_screen_coordinates) { if (!ShouldSendUpdateToInputMethod()) return; InvalidateSurroundingTextAndSelectionRange(); if (!UpdateCursorRect(rect, is_screen_coordinates)) return; ui::InputMethod* const input_method = GetInputMethod(); if (input_method) input_method->OnCaretBoundsChanged(this); } void ArcImeService::OnCancelComposition() { if (!ShouldSendUpdateToInputMethod()) return; InvalidateSurroundingTextAndSelectionRange(); ui::InputMethod* const input_method = GetInputMethod(); if (input_method) input_method->CancelComposition(this); } void ArcImeService::ShowVirtualKeyboardIfEnabled() { if (!ShouldSendUpdateToInputMethod()) return; ui::InputMethod* const input_method = GetInputMethod(); if (input_method && input_method->GetTextInputClient() == this) { input_method->ShowVirtualKeyboardIfEnabled(); } } void ArcImeService::OnCursorRectChangedWithSurroundingText( const gfx::Rect& rect, const gfx::Range& text_range, const base::string16& text_in_range, const gfx::Range& selection_range, bool is_screen_coordinates) { if (!ShouldSendUpdateToInputMethod()) return; text_range_ = text_range; text_in_range_ = text_in_range; selection_range_ = selection_range; if (!UpdateCursorRect(rect, is_screen_coordinates)) return; ui::InputMethod* const input_method = GetInputMethod(); if (input_method) input_method->OnCaretBoundsChanged(this); } bool ArcImeService::ShouldEnableKeyEventForwarding() { return base::FeatureList::IsEnabled( chromeos::features::kArcPreImeKeyEventSupport); } void ArcImeService::SendKeyEvent(std::unique_ptr<ui::KeyEvent> key_event, KeyEventDoneCallback callback) { ui::InputMethod* const input_method = GetInputMethod(); receiver_->SetCallback(std::move(callback)); if (input_method) ignore_result(input_method->DispatchKeyEvent(key_event.get())); } //////////////////////////////////////////////////////////////////////////////// // Overridden from ash::KeyboardControllerObserver void ArcImeService::OnKeyboardAppearanceChanged( const ash::KeyboardStateDescriptor& state) { gfx::Rect new_bounds = state.occluded_bounds_in_screen; // Multiply by the scale factor. To convert from Chrome DIP to Android pixels. gfx::Rect bounds_in_px = gfx::ScaleToEnclosingRect(new_bounds, GetDeviceScaleFactorForKeyboard()); ime_bridge_->SendOnKeyboardAppearanceChanging(bounds_in_px, state.is_visible); } //////////////////////////////////////////////////////////////////////////////// // Overridden from ui::TextInputClient: void ArcImeService::SetCompositionText( const ui::CompositionText& composition) { InvalidateSurroundingTextAndSelectionRange(); has_composition_text_ = !composition.text.empty(); ime_bridge_->SendSetCompositionText(composition); } uint32_t ArcImeService::ConfirmCompositionText(bool keep_selection) { if (!keep_selection) { InvalidateSurroundingTextAndSelectionRange(); } has_composition_text_ = false; // Note: SendConfirmCompositonText() will commit the text and // keep the selection unchanged ime_bridge_->SendConfirmCompositionText(); return UINT32_MAX; } void ArcImeService::ClearCompositionText() { InvalidateSurroundingTextAndSelectionRange(); if (has_composition_text_) { has_composition_text_ = false; ime_bridge_->SendInsertText(base::string16()); } } void ArcImeService::InsertText(const base::string16& text, InsertTextCursorBehavior cursor_behavior) { // TODO(crbug.com/1155331): Handle |cursor_behavior| correctly. InvalidateSurroundingTextAndSelectionRange(); has_composition_text_ = false; ime_bridge_->SendInsertText(text); } void ArcImeService::InsertChar(const ui::KeyEvent& event) { // When IME is blocked for the window, let Exo handle the event. if (arc_window_delegate_->IsImeBlocked(focused_arc_window_)) return; // According to the document in text_input_client.h, InsertChar() is called // even when the text editing is not available. We ignore such events, since // for ARC we are only interested in the event as a method of text input. if (!IsTextInputActive(ime_type_)) return; InvalidateSurroundingTextAndSelectionRange(); // For apps that doesn't handle hardware keyboard events well, keys that are // typically on software keyboard and lack of them are fatal, namely, // unmodified enter and backspace keys are sent through IME. if (!HasModifier(&event) && !ShouldEnableKeyEventForwarding()) { if (event.key_code() == ui::VKEY_RETURN) { has_composition_text_ = false; ime_bridge_->SendInsertText(base::ASCIIToUTF16("\n")); return; } if (event.key_code() == ui::VKEY_BACK) { has_composition_text_ = false; ime_bridge_->SendInsertText(base::ASCIIToUTF16("\b")); return; } } if (IsCharacterKeyEvent(&event)) { has_composition_text_ = false; ime_bridge_->SendInsertText(base::string16(1, event.GetText())); } } ui::TextInputType ArcImeService::GetTextInputType() const { if (arc_window_delegate_->IsImeBlocked(focused_arc_window_)) return ui::TEXT_INPUT_TYPE_NONE; return ime_type_; } gfx::Rect ArcImeService::GetCaretBounds() const { return cursor_rect_; } bool ArcImeService::GetTextRange(gfx::Range* range) const { if (!text_range_.IsValid()) return false; *range = text_range_; return true; } bool ArcImeService::GetEditableSelectionRange(gfx::Range* range) const { if (!selection_range_.IsValid()) return false; *range = selection_range_; return true; } bool ArcImeService::GetTextFromRange(const gfx::Range& range, base::string16* text) const { // It's supposed that this method is called only from // InputMethod::OnCaretBoundsChanged(). In that method, the range obtained // from GetTextRange() is used as the argument of this method. To prevent an // unexpected usage, the check, |range != text_range_|, is added. if (!text_range_.IsValid() || range != text_range_) return false; *text = text_in_range_; return true; } void ArcImeService::EnsureCaretNotInRect(const gfx::Rect& rect_in_screen) { if (focused_arc_window_ == nullptr) return; aura::Window* top_level_window = focused_arc_window_->GetToplevelWindow(); // If the window is not a notification, the window move is handled by // Android. if (top_level_window->type() != aura::client::WINDOW_TYPE_POPUP) return; wm::EnsureWindowNotInRect(top_level_window, rect_in_screen); } ui::TextInputMode ArcImeService::GetTextInputMode() const { return ui::TEXT_INPUT_MODE_DEFAULT; } base::i18n::TextDirection ArcImeService::GetTextDirection() const { return base::i18n::UNKNOWN_DIRECTION; } void ArcImeService::ExtendSelectionAndDelete(size_t before, size_t after) { InvalidateSurroundingTextAndSelectionRange(); ime_bridge_->SendExtendSelectionAndDelete(before, after); } int ArcImeService::GetTextInputFlags() const { return ime_flags_; } bool ArcImeService::CanComposeInline() const { return true; } bool ArcImeService::GetCompositionCharacterBounds( uint32_t index, gfx::Rect* rect) const { return false; } bool ArcImeService::HasCompositionText() const { return has_composition_text_; } ui::TextInputClient::FocusReason ArcImeService::GetFocusReason() const { // TODO(https://crbug.com/824604): Determine how the current input client got // focused. NOTIMPLEMENTED_LOG_ONCE(); return ui::TextInputClient::FOCUS_REASON_OTHER; } bool ArcImeService::GetCompositionTextRange(gfx::Range* range) const { return false; } bool ArcImeService::SetEditableSelectionRange(const gfx::Range& range) { selection_range_ = range; ime_bridge_->SendSelectionRange(selection_range_); return true; } bool ArcImeService::DeleteRange(const gfx::Range& range) { return false; } bool ArcImeService::ChangeTextDirectionAndLayoutAlignment( base::i18n::TextDirection direction) { return false; } bool ArcImeService::IsTextEditCommandEnabled( ui::TextEditCommand command) const { return false; } ukm::SourceId ArcImeService::GetClientSourceForMetrics() const { // TODO(yhanada): Implement this method. crbug.com/752657 NOTIMPLEMENTED_LOG_ONCE(); return ukm::SourceId(); } bool ArcImeService::ShouldDoLearning() { return is_personalized_learning_allowed_; } bool ArcImeService::SetCompositionFromExistingText( const gfx::Range& range, const std::vector<ui::ImeTextSpan>& ui_ime_text_spans) { if (!range.IsBoundedBy(text_range_)) return false; InvalidateSurroundingTextAndSelectionRange(); has_composition_text_ = !range.is_empty(); // The sent |range| might be already invalid if the textfield state in Android // side is changed simultaneously. It's okay because InputConnection's // setComposingRegion handles invalid region correctly. ime_bridge_->SendSetComposingRegion(range); return true; } gfx::Range ArcImeService::GetAutocorrectRange() const { // TODO(https:://crbug.com/1091088): Implement this method. return gfx::Range(); } gfx::Rect ArcImeService::GetAutocorrectCharacterBounds() const { // TODO(https://crbug.com/952757): Implement this method. NOTIMPLEMENTED_LOG_ONCE(); return gfx::Rect(); } bool ArcImeService::SetAutocorrectRange(const gfx::Range& range) { if (!range.is_empty()) { base::UmaHistogramEnumeration("InputMethod.Assistive.Autocorrect.Count", TextInputClient::SubClass::kArcImeService); auto* input_method_manager = chromeos::input_method::InputMethodManager::Get(); if (input_method_manager && chromeos::extension_ime_util::IsExperimentalMultilingual( input_method_manager->GetActiveIMEState() ->GetCurrentInputMethod() .id())) { base::UmaHistogramEnumeration( "InputMethod.MultilingualExperiment.Autocorrect.Count", TextInputClient::SubClass::kArcImeService); } } // TODO(https:://crbug.com/1091088): Implement this method. NOTIMPLEMENTED_LOG_ONCE(); return false; } void ArcImeService::OnDispatchingKeyEventPostIME(ui::KeyEvent* event) { if (!ShouldEnableKeyEventForwarding()) return; if (receiver_->HasCallback()) { receiver_->DispatchKeyEventPostIME(event); event->SetHandled(); return; } // Do not forward the key event from virtual keyboard if it's sent via // InsertChar(). By the special logic in // ui::InputMethodChromeOS::DispatchKeyEvent, both of InsertChar() and // DispatchKeyEventPostIME() are called for a key event injected by the // virtual keyboard. The below logic stops key event propagation through // DispatchKeyEventPostIME() to prevent from inputting two characters. const bool from_vk = event->properties() && (event->properties()->find(ui::kPropertyFromVK) != event->properties()->end()); if (from_vk && IsCharacterKeyEvent(event) && IsTextInputActive(ime_type_)) event->SetHandled(); } // static void ArcImeService::SetOverrideDefaultDeviceScaleFactorForTesting( base::Optional<double> scale_factor) { g_override_default_device_scale_factor = scale_factor; } void ArcImeService::InvalidateSurroundingTextAndSelectionRange() { text_range_ = gfx::Range::InvalidRange(); text_in_range_ = base::string16(); selection_range_ = gfx::Range::InvalidRange(); } bool ArcImeService::UpdateCursorRect(const gfx::Rect& rect, bool is_screen_coordinates) { // Divide by the scale factor. To convert from Android pixels to Chrome DIP. gfx::Rect converted(gfx::ScaleToEnclosingRect( rect, 1 / GetDeviceScaleFactorForFocusedWindow())); // If the supplied coordinates are relative to the window, add the offset of // the window showing the ARC app. if (!is_screen_coordinates) { if (!focused_arc_window_) return false; converted.Offset(focused_arc_window_->GetToplevelWindow() ->GetBoundsInScreen() .OffsetFromOrigin()); } else if (focused_arc_window_) { auto* window = focused_arc_window_->GetToplevelWindow(); auto* widget = views::Widget::GetWidgetForNativeWindow(window); // Check fullscreen window as well because it's possible for ARC to request // frame regardless of window state. bool covers_display = widget && (widget->IsMaximized() || widget->IsFullscreen()); if (covers_display) { auto* frame_view = widget->non_client_view()->frame_view(); // The frame height will be subtracted from client bounds. gfx::Rect bounds = frame_view->GetWindowBoundsForClientBounds(gfx::Rect()); converted.Offset(0, -bounds.y()); } } if (cursor_rect_ == converted) return false; cursor_rect_ = converted; return true; } bool ArcImeService::ShouldSendUpdateToInputMethod() const { // New text input state received from Android should not be sent to // InputMethod when the focus is on a non-ARC window. Text input state updates // can be sent from Android anytime because there is a dummy input view in // Android which is synchronized with the text input on a non-ARC window. return focused_arc_window_ != nullptr; } double ArcImeService::GetDeviceScaleFactorForKeyboard() const { if (g_override_default_device_scale_factor.has_value()) return g_override_default_device_scale_factor.value(); if (!exo::WMHelper::HasInstance() || !keyboard::KeyboardUIController::HasInstance()) { return 1.0; } aura::Window* const keyboard_window = keyboard::KeyboardUIController::Get()->GetKeyboardWindow(); if (!keyboard_window) return 1.0; return exo::WMHelper::GetInstance()->GetDeviceScaleFactorForWindow( keyboard_window); } double ArcImeService::GetDeviceScaleFactorForFocusedWindow() const { DCHECK(focused_arc_window_); if (g_override_default_device_scale_factor.has_value()) return g_override_default_device_scale_factor.value(); if (!exo::WMHelper::HasInstance()) return 1.0; return exo::WMHelper::GetInstance()->GetDeviceScaleFactorForWindow( focused_arc_window_); } } // namespace arc
////////////////////////////////////////////////////////////////////////////// /// Copyright 2003 and onward LASMEA UMR 6602 CNRS/U.B.P Clermont-Ferrand /// Copyright 2009 and onward LRI UMR 8623 CNRS/Univ Paris Sud XI /// /// Distributed under the Boost Software License, Version 1.0 /// See accompanying file LICENSE.txt or copy at /// http://www.boost.org/LICENSE_1_0.txt ////////////////////////////////////////////////////////////////////////////// #ifndef NT2_TOOLBOX_TRIGONOMETRIC_FUNCTION_SIMD_SSE_SSSE3_TANPI_HPP_INCLUDED #define NT2_TOOLBOX_TRIGONOMETRIC_FUNCTION_SIMD_SSE_SSSE3_TANPI_HPP_INCLUDED #include <nt2/toolbox/trigonometric/function/simd/sse/sse3/tanpi.hpp> #endif
/* * Author: Jhonatan Casale (jhc) * * Contact : jhonatan@jhonatancasale.com * : casale.jhon@gmail.com * : https://github.com/jhonatancasale * : https://twitter.com/jhonatancasale * : http://jhonatancasale.github.io/ * * Create date Fri 28 Apr 16:19:05 -03 2017 * */ #include <iostream> inline void swap(int& i, int& j); inline void swap(double& i, double& j); int main (int argc, char **argv) { int a = 42, b = 23; double i = 42.0, j = 23.001; std::cout << "Before: (" << a << ", " << b << ")" << std::endl; swap(a, b); std::cout << "After: (" << a << ", " << b << ")" << std::endl; std::cout << "Before: (" << i << ", " << j << ")" << std::endl; swap(i, j); std::cout << "After: (" << i << ", " << j << ")" << std::endl; return (EXIT_SUCCESS); } inline void swap(int& i, int& j) { int tmp = i; i = j; j = tmp; } inline void swap(double& i, double& j) { double tmp = i; i = j; j = tmp; }
#include "Time.hpp" using namespace Util; #if defined(WIN32) && _MSC_VER <= 1800 #define WIN32_LEAN_AND_MEAN #include <Windows.h> namespace { const long long g_Frequency = []() -> long long { LARGE_INTEGER frequency; QueryPerformanceFrequency(&frequency); return frequency.QuadPart; }(); } ClockImpl::time_point ClockImpl::now() { LARGE_INTEGER count; QueryPerformanceCounter(&count); return time_point(duration(count.QuadPart * static_cast<rep>(period::den) / g_Frequency)); } #endif
#include "gtest/gtest.h" #include "dsaa/dsaa.h" #include "test_utils/test_utils.h" class TrieFixture : public ::testing::Test { public: void SetUp() { muggle_debug_memory_leak_start(&mem_state_); bool ret; ret = trie_init(&trie_[0], 0); ASSERT_TRUE(ret); ret = trie_init(&trie_[1], 8); ASSERT_TRUE(ret); } void TearDown() { trie_destroy(&trie_[0], test_utils_free_str, &test_utils_); trie_destroy(&trie_[1], test_utils_free_str, &test_utils_); muggle_debug_memory_leak_end(&mem_state_); } protected: struct trie trie_[2]; TestUtils test_utils_; muggle_debug_memory_state mem_state_; }; TEST_F(TrieFixture, insert_find_remove) { const char* words[] = { "hello", "world", "foo", "bar" }; for (int index = 0; index < (int)(sizeof(trie_) / sizeof(trie_[0])); index++) { struct trie *trie = &trie_[index]; for (size_t i = 0; i < sizeof(words) / sizeof(words[0]); i++) { char *s = test_utils_.allocateString(); strncpy(s, words[i], TEST_UTILS_STR_SIZE - 1); struct trie_node *node = trie_insert(trie, words[i], s); ASSERT_TRUE(node != NULL); ASSERT_STREQ((char*)node->data, s); } for (size_t i = 0; i < sizeof(words) / sizeof(words[0]); i++) { struct trie_node *node = trie_find(trie, words[i]); ASSERT_TRUE(node != NULL); ASSERT_STREQ((char*)node->data, words[i]); } for (size_t i = 0; i < sizeof(words) / sizeof(words[0]); i++) { bool ret = trie_remove(trie, words[i], test_utils_free_str, &test_utils_); ASSERT_TRUE(ret); } for (size_t i = 0; i < sizeof(words) / sizeof(words[0]); i++) { struct trie_node *node = trie_find(trie, words[i]); ASSERT_TRUE(node == NULL || node->data == NULL); } const char *no_exists_word = "noexists"; struct trie_node *node = trie_find(trie, no_exists_word); ASSERT_TRUE(node == NULL || node->data == NULL); } }
#pragma once #include "../../JObject.hpp" class JByteArray; namespace android::graphics { class Bitmap; } namespace android::graphics { class Canvas; } namespace android::graphics { class Paint; } namespace android::graphics { class Rect; } namespace android::graphics { class RectF; } namespace android::graphics { class Region; } class JString; namespace android::graphics { class NinePatch : public JObject { public: // Fields // QJniObject forward template<typename ...Ts> explicit NinePatch(const char *className, const char *sig, Ts...agv) : JObject(className, sig, std::forward<Ts>(agv)...) {} NinePatch(QJniObject obj); // Constructors NinePatch(android::graphics::Bitmap arg0, JByteArray arg1); NinePatch(android::graphics::Bitmap arg0, JByteArray arg1, JString arg2); // Methods static jboolean isNinePatchChunk(JByteArray arg0); void draw(android::graphics::Canvas arg0, android::graphics::Rect arg1) const; void draw(android::graphics::Canvas arg0, android::graphics::RectF arg1) const; void draw(android::graphics::Canvas arg0, android::graphics::Rect arg1, android::graphics::Paint arg2) const; android::graphics::Bitmap getBitmap() const; jint getDensity() const; jint getHeight() const; JString getName() const; android::graphics::Paint getPaint() const; android::graphics::Region getTransparentRegion(android::graphics::Rect arg0) const; jint getWidth() const; jboolean hasAlpha() const; void setPaint(android::graphics::Paint arg0) const; }; } // namespace android::graphics
#include <iostream> #include <seqan/sequence.h> #include <seqan/graph_algorithms.h> using namespace seqan; int main() { // Fill a string and define corresponding weights. String<char> seq("zeitgeist"); String<unsigned int> weights; resize(weights, length(seq), 1); assignProperty(weights, 2, 10); // Compute heaviest increasing subsequence. typedef Position<String<unsigned int> >::Type TPosition; String<TPosition> pos; unsigned int w = heaviestIncreasingSubsequence(seq, weights, pos); // Print the results to stdout. for(int i = 0; i< (int) length(seq); ++i) std::cout << seq[i] << "(Weight=" << getProperty(weights, i) << "),"; std::cout << "\n" << "His: \n"; for(int i = length(pos)-1; i>=0; --i) std::cout << seq[pos[i]] << ','; std::cout << "(Weight=" << w << ")\n"; return 0; }
topic "Ftp"; [2 $$0,0#00000000000000000000000000000000:Default] [i448;a25;kKO9;2 $$1,0#37138531426314131252341829483380:class] [l288;2 $$2,2#27521748481378242620020725143825:desc] [0 $$3,0#96390100711032703541132217272105:end] [H6;0 $$4,0#05600065144404261032431302351956:begin] [i448;a25;kKO9;2 $$5,0#37138531426314131252341829483370:item] [l288;a4;*@5;1 $$6,6#70004532496200323422659154056402:requirement] [l288;i1121;b17;O9;~~~.1408;2 $$7,0#10431211400427159095818037425705:param] [i448;b42;O9;2 $$8,8#61672508125594000341940100500538:tparam] [b42;2 $$9,9#13035079074754324216151401829390:normal] [{_}%EN-US [ {{10000@(113.42.0) [s0; [*@7;4 Ftp]]}}&] [s1;%- &] [s1;:Upp`:`:Ftp`:`:class:%- [@(0.0.255)3 class][3 _][*3 Ftp ][3 :][*3 ][@(0.0.255)3 private][3 _][*@3;3 N oCopy]&] [s2; This class provides a client side interface to the File Transfer Protocol (FTP) as specified in [^http`:`/`/www`.ietf`.org`/rfc`/rfc959`.txt^ RFC 959], with several advanced capabilities: &] [s2; &] [s2;i150;O0; Support for IPv6 and NATs, as specified in [^http`:`/`/tools`.ietf`.org`/html`/rfc2428^ R FC 2428.]&] [s2;i150;O0; Support for ftp over TLS/SSL (FTPS), as specified in [^http`:`/`/tools`.ietf`.org`/html`/rfc2228^ RFC 2228].&] [s2;i150;O0; Support for [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Upp`:`:Ftp`:`:GetFeatures`(`)^ f eature negotiation mechanism] as specifien in [^http`:`/`/tools`.ietf`.org`/html`/rfc2389^ R FC 2389].&] [s2;i150;O0; Support for [^topic`:`/`/FTP`/src`/HelperFunctions`$en`-us`#Upp`:`:FtpAsyncGet`(Upp`:`:Ftp`:`:Request`&`,Upp`:`:Event`<Upp`:`:Ftp`:`:Result`>`)^ m ultithreading], using worker threads. &] [s2;i150;O0; Support for [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Ftp`:`:DirEntry`:`:class^ p arsing] UNIX and DOS style directory listings.&] [s2;i150;O0; Support for [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Ftp`:`:SendCommand`(const String`&`)^ e xtending] the functionality of Ftp class.&] [s2;i150;O0; Support for transfer restart mechanism, as specified in [^https`:`/`/tools`.ietf`.org`/html`/rfc3659^ RFC 3959].&] [s1;%- &] [ {{10000F(128)G(128)@1 [s0; [* Public Method List]]}}&] [s0;%- &] [s5;:Ftp`:`:User`(const String`&`,const String`&`):%- [_^Ftp^ Ftp][@(0.0.255) `&]_[* User]( [@(0.0.255) const]_[_^String^ String][@(0.0.255) `&]_[*@3 user], [@(0.0.255) const]_[_^String^ S tring][@(0.0.255) `&]_[*@3 pass])&] [s2; Sets username and password. Returns `*this for method chaining. Ftp client will attempt anonymous login if user id or password is unspecified.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:SSL`(bool`):%- [_^Ftp^ Ftp][@(0.0.255) `&]_[* SSL]([@(0.0.255) bool]_[*@3 b]_`=_[@(0.0.255) t rue])&] [s2; Activates a session`-wide [^http`:`/`/en`.wikipedia`.org`/wiki`/FTPS^ FTPS] mode (TLS/SSL) through `"explicit`" ftps request. Returns `*this for method chaining. This method must be invoked before a [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Ftp`:`:Connect`(const String`&`,int`)^ C onnect]() call and cannot be used in active data connection mode. Data retrieval methods will fail if FTPS is enabled in active mode.&] [s6; Requires Core/SSL package.&] [s3; &] [s4;%- &] [s5;:Upp`:`:Ftp`:`:Active`(bool`):%- [_^Upp`:`:Ftp^ Ftp][@(0.0.255) `&]_[* Active]([@(0.0.255) b ool]_[*@3 b]_`=_[@(0.0.255) true])&] [s2; Switches the data transfer mode to active, using EPRT or PORT command. Returns `*this for method chaining. Note that the active mode uses a port range between 49152 and 65535.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:Passive`(`):%- [_^Ftp^ Ftp][@(0.0.255) `&]_[* Passive]()&] [s2; Switches the data transfer mode to passive, using EPSV or PASV command. Returns `*this for method chaining. This is the default transfer mode.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:Timeout`(int`):%- [_^Ftp^ Ftp][@(0.0.255) `&]_[* Timeout]([@(0.0.255) int]_[*@3 ms]) &] [s2; Sets socket timeout value. Default value is 60000 miliseconds (one minute). Returns `*this for method chaining.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:WaitStep`(int`):%- [_^Ftp^ Ftp][@(0.0.255) `&]_[* WaitStep]([@(0.0.255) int]_[*@3 m s])&] [s2; Sets the periodicity of calling [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Ftp`:`:WhenWait^ W henWait] in millisecond between calls. Default value is 10ms (100hz). Returns `*this for method chaining.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:ChunkSize`(int`):%- [_^Ftp^ Ftp][@(0.0.255) `&]_[* ChunkSize]([@(0.0.255) int]_[*@3 s ize])&] [s2; Sets data chunk [%-*@3 size] for binary transfers. Default size is 65536 bytes (64K). Returns `*this for method chaining.&] [s3; &] [s4;%- &] [s5;:Upp`:`:Ftp`:`:Restart`(Upp`:`:int64`):%- [_^Upp`:`:Ftp^ Ftp][@(0.0.255) `&]_[* Restart ]([_^Upp`:`:int64^ int64]_[*@3 pos])&] [s2; Restarts the file transfer from given [%-*@3 pos], using REST command. Similar to [^topic`:`/`/Core`/src`/Stream`$en`-us`#Stream`:`:Seek`(int64`)^ S tream`::Seek()]. Returns `*this for method chaining. Passing a negative value or zero (0) disables restart feature, causing the entire file to be transferred. [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Upp`:`:Ftp`:`:GetResumePos`(`)const^ G etRestartPos()] can be used to obtain actual position. [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Upp`:`:Ftp`:`:Get`(const Upp`:`:String`&`,Upp`:`:Stream`&`,Upp`:`:Gate2`<Upp`:`:int64`,Upp`:`:int64`>`,bool`)^ G et()], [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Upp`:`:Ftp`:`:Put`(Upp`:`:Stream`&`,const Upp`:`:String`&`,Upp`:`:Gate2`<Upp`:`:int64`,Upp`:`:int64`>`,bool`)^ P ut()], and [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Upp`:`:Ftp`:`:Append`(Upp`:`:Stream`&`,const Upp`:`:String`&`,Upp`:`:Gate2`<Upp`:`:int64`,Upp`:`:int64`>`,bool`)^ A ppend()] methods take advantage of the transfer restart feature. However, since the REST command is an extension ([^https`:`/`/tools`.ietf`.org`/html`/rfc3659^ R FC 3959]) to the original file transfer protocol, you may want to check it`'s availability using [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Upp`:`:Ftp`:`:GetFeatures`(`)^ G etFeatures()] method. &] [s3; &] [s4;%- &] [s5;:Ftp`:`:Connect`(const String`&`,int`):%- [@(0.0.255) bool]_[* Connect]([@(0.0.255) con st]_[_^String^ String][@(0.0.255) `&]_[*@3 host], [@(0.0.255) int]_[*@3 port]_`=_[@3 21])&] [s2; Connects to a ftp server specified at [%-*@3 host ][%-$2 and] [%-*@3 port]. Returns true on success.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:Disconnect`(`):%- [@(0.0.255) void]_[* Disconnect]()&] [s2; Disconnects from the ftp server.&] [s3;%- &] [s4;%- &] [s5;:Ftp`:`:GetDir`(`):%- [_^String^ String]_[* GetDir]()&] [s2; Returns the remote working directory. Returns String`::GetVoid() on failure.&] [s3;%- &] [s4;%- &] [s5;:Ftp`:`:SetDir`(const String`&`):%- [@(0.0.255) bool]_[* SetDir]([@(0.0.255) const]_[_^String^ S tring][@(0.0.255) `&]_[*@3 path])&] [s2; Sets the remote working directory to [%-*@3 path]. Returns true on success.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:DirUp`(`):%- [@(0.0.255) bool]_[* DirUp]()&] [s2; Sets the remote working directory to one directory up. Returns true on success.&] [s3;%- &] [s4;%- &] [s5;:Upp`:`:Ftp`:`:ListDir`(const Upp`:`:String`&`,Upp`:`:Ftp`:`:DirList`&`,Upp`:`:Gate1`<Upp`:`:String`>`):%- [@(0.0.255) b ool]_[* ListDir]([@(0.0.255) const]_[_^Upp`:`:String^ String][@(0.0.255) `&]_[*@3 path], [_^Upp`:`:Ftp`:`:DirList^ DirList][@(0.0.255) `&]_[*@3 list], [_^Upp`:`:Gate1^ Gate1]<[_^Upp`:`:String^ S tring]>_[*@3 progress]_`=_[@(0.0.255) false])&] [s2; Retrieves the directory listing of [%-*@3 path]. When path is empty or Null, this method will retrieve a directory listing of the remote working directory. Returns true on success. Note that this method will return true even when the remote directory is empty. [%-*@3 progress] function can be used to track progress of the operation, and to get the single, raw directory (String) entries one by one; returning true will cancel the operation.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:MakeDir`(const String`&`):%- [@(0.0.255) bool]_[* MakeDir]([@(0.0.255) const]_[_^String^ S tring][@(0.0.255) `&]_[*@3 path])&] [s2; Creates a remote directory at [%-*@3 path]. Returns true on success.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:RemoveDir`(const String`&`):%- [@(0.0.255) bool]_[* RemoveDir]([@(0.0.255) cons t]_[_^String^ String][@(0.0.255) `&]_[*@3 path])&] [s2; Removes a remote directory at [%-*@3 path]. Returns true on success.&] [s3; &] [s4;%- &] [s5;:Upp`:`:Ftp`:`:Get`(const Upp`:`:String`&`,Upp`:`:Stream`&`,Upp`:`:Gate2`<Upp`:`:int64`,Upp`:`:int64`>`,bool`):%- [@(0.0.255) b ool]_[* Get]([@(0.0.255) const]_[_^Upp`:`:String^ String][@(0.0.255) `&]_[*@3 path], [_^Upp`:`:Stream^ Stream][@(0.0.255) `&]_[*@3 out], [_^Upp`:`:Gate2^ Gate2]<[_^Upp`:`:int64^ i nt64], [_^Upp`:`:int64^ int64]>_[*@3 progress]_`=_[@(0.0.255) false], [@(0.0.255) bool]_[*@3 ascii]_`=_[@(0.0.255) false])&] [s2; Downloads the remote file specified at [%-*@3 path] and writes it into [%-*@3 out]. Returns true on success. [%-*@3 progress] function can be used to track the progress of the download; returning true will abort the operation. Data can be downloaded as [%-*@3 ascii] or binary. This method can take advantage of file restart (resume) feature. See [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Upp`:`:Ftp`:`:Restart`(Upp`:`:int64`)^ R estart()] and GetRestartPos() methods.&] [s3; &] [s4;%- &] [s5;:Upp`:`:Ftp`:`:Put`(Upp`:`:Stream`&`,const Upp`:`:String`&`,Upp`:`:Gate2`<Upp`:`:int64`,Upp`:`:int64`>`,bool`):%- [@(0.0.255) b ool]_[* Put]([_^Upp`:`:Stream^ Stream][@(0.0.255) `&]_[*@3 in], [@(0.0.255) const]_[_^Upp`:`:String^ S tring][@(0.0.255) `&]_[*@3 path], [_^Upp`:`:Gate2^ Gate2]<[_^Upp`:`:int64^ int64], [_^Upp`:`:int64^ int64]>_[*@3 progress]_`=_[@(0.0.255) false], [@(0.0.255) bool]_[*@3 ascii ]_`=_[@(0.0.255) false])&] [s2; Uploads the local file [%-*@3 in] to the remote [%-*@3 path]. Returns true on success. [%-*@3 progress] function can be used to track the progress of the upload; returning true will abort the operation. Data can be uploaded as [%-*@3 ascii] or binary. This method can take advantage of file restart (resume) feature. See [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Upp`:`:Ftp`:`:Restart`(Upp`:`:int64`)^ R estart()] and [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Upp`:`:Ftp`:`:GetRestartPos`(`)const^ G etRestartPos()] methods. [* Warning:] Restarting an upload using REST command is not very reliable. Consider using [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Upp`:`:Ftp`:`:Append`(Upp`:`:Stream`&`,const Upp`:`:String`&`,Upp`:`:Gate2`<Upp`:`:int64`,Upp`:`:int64`>`,bool`)^ A ppend()] instead.&] [s3; &] [s4;%- &] [s5;:Upp`:`:Ftp`:`:Append`(Upp`:`:Stream`&`,const Upp`:`:String`&`,Upp`:`:Gate2`<Upp`:`:int64`,Upp`:`:int64`>`,bool`):%- [@(0.0.255) b ool]_[* Append]([_^Upp`:`:Stream^ Stream][@(0.0.255) `&]_[*@3 in], [@(0.0.255) const]_[_^Upp`:`:String^ S tring][@(0.0.255) `&]_[*@3 path], [_^Upp`:`:Gate2^ Gate2]<[_^Upp`:`:int64^ int64], [_^Upp`:`:int64^ int64]>_[*@3 progress]_`=_[@(0.0.255) false], [@(0.0.255) bool]_[*@3 ascii ]_`=_[@(0.0.255) false])&] [s2; Appends the local file [%-*@3 in] to the remote file at remote [%-*@3 path]. Returns true on success. [%-*@3 progress] function can be used to track the progress of the upload; returning true will abort the operation. Data can be uploaded as [%-*@3 ascii] or binary. This method can take advantage of file restart (resume) feature. See [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Upp`:`:Ftp`:`:Restart`(Upp`:`:int64`)^ R estart()] and [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Upp`:`:Ftp`:`:GetRestartPos`(`)const^ G etRestartPos()] methods.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:Info`(const String`&`,Ftp`:`:DirEntry`&`):%- [@(0.0.255) bool]_[* Info]([@(0.0.255) c onst]_[_^String^ String][@(0.0.255) `&]_[*@3 path], [_^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Ftp`:`:DirEntry`:`:class^ D irEntry][@(0.0.255) `&]_[*@3 info])&] [s2; Retrieves the directory listing [%-*@3 info] of [%-*@3 path]. Returns true on success.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:Rename`(const String`&`,const String`&`):%- [@(0.0.255) bool]_[* Rename]([@(0.0.255) c onst]_[_^String^ String][@(0.0.255) `&]_[*@3 oldname], [@(0.0.255) const]_[_^String^ String ][@(0.0.255) `&]_[*@3 newname])&] [s2; Renames a remote file. Returns true on success.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:Delete`(const String`&`):%- [@(0.0.255) bool]_[* Delete]([@(0.0.255) const]_[_^String^ S tring][@(0.0.255) `&]_[*@3 path])&] [s2; Deletes the remote file specified at [%-*@3 path]. Returns true on success.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:Noop`(`):%- [@(0.0.255) bool]_[* Noop]()&] [s2; Sends a `"NOOP`" command to the FTP server. Returns true if the command is accepted. Useful for keeping connections alive.&] [s3;%- &] [s4;%- &] [s5;:Ftp`:`:Abort`(`):%- [@(0.0.255) void]_[* Abort]()&] [s2; Aborts any download or upload in progress.&] [s3;%- &] [s4;%- &] [s5;:Ftp`:`:SendCommand`(const String`&`):%- [@(0.0.255) int]_[* SendCommand]([@(0.0.255) c onst]_[_^String^ String][@(0.0.255) `&]_[*@3 cmd])&] [s2; Sends a raw command to the ftp server. Returns `-1 for internal errors, and other values for protocol specific error and success messages. A CRLF (`"`\r`\n`") is automatically appended to every command.This is a low level method to simplify extending the functionality of the Ftp class. Server replies and/or internal error codes and messages can be obtained using the [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Ftp`:`:GetCode`(`)const^ G etCode()], [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Ftp`:`:GetReply`(`)const^ GetReply() ] or [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Upp`:`:Ftp`:`:GetReplyAsXml`(`)^ GetReplyA sXml()] methods.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:InProgress`(`)const:%- [@(0.0.255) bool]_[* InProgress]()_[@(0.0.255) const]&] [s2; Returns true if a file transfer is in progress. Only the [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Ftp`:`:Abort`(`)^ A bort()] command should be called while a transfer is in progress.&] [s3;%- &] [s4;%- &] [s5;:Upp`:`:Ftp`:`:GetSize`(const Upp`:`:String`&`):%- [_^Upp`:`:int64^ int64]_[* GetSize ]([@(0.0.255) const]_[_^Upp`:`:String^ String][@(0.0.255) `&]_[*@3 path])&] [s2; Returns the size of remote [%-*@3 path] in octets, or `-1 on failure. This method implements the ftp SIZE command as defined in [^https`:`/`/tools`.ietf`.org`/html`/rfc3659^ R FC 3959]&] [s3; &] [s4;%- &] [s5;:Upp`:`:Ftp`:`:GetRestartPos`(`)const:%- [_^Upp`:`:int64^ int64]_[* GetRestartPos]()_ [@(0.0.255) const]&] [s2; Returns the current position in the data stream. Returned value represents the actual transferred data in bytes. Using this value with the [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Upp`:`:Ftp`:`:Restart`(Upp`:`:int64`)^ R estart()] method, a failed transfer can be resumed.&] [s3;%- &] [s4;%- &] [s5;:Upp`:`:Ftp`:`:GetFeatures`(`):%- [_^Upp`:`:ValueMap^ ValueMap]_[* GetFeatures]()&] [s2; Returns a list of features supported by the ftp server, on success, and an empty list, on failure. Features supported by the server are represented by `"keys`", and their parameters, which can be multiple or none, are represented by `"values`". Note that all keys and values are lowercase strings.&] [s3;%- &] [s4;%- &] [s5;:Ftp`:`:GetSocket`(`):%- [_^TcpSocket^ TcpSocket][@(0.0.255) `&]_[* GetSocket]()&] [s2; Returns a reference to the ftp control socket.&] [s3;%- &] [s4;%- &] [s5;:Ftp`:`:GetCode`(`)const:%- [@(0.0.255) int]_[* GetCode]()_[@(0.0.255) const]&] [s2; Returns last server reply code and returns `-1 for internal errors.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:GetReply`(`)const:%- [_^String^ String]_[* GetReply]()_[@(0.0.255) const]&] [s2; Returns last server reply message, or internal error message.&] [s3;%- &] [s4;%- &] [s5;:Upp`:`:Ftp`:`:GetReplyAsXml`(`):%- [_^Upp`:`:String^ String]_[* GetReplyAsXml]()&] [s2; Returns last server reply message, or internal error message in a simple XML format. The XML format is as follows:&] [s2; &] [ {{1765:8235h1;l/26r/26t/14b/14@1 [s2; [* XML Tag]] :: [s2; [* Description]] ::l/25r/25t/15b/15@2 [s2; [C@3 <reply>]] :: [s2; This tag represents a single reply. A reply has two attributes: [C@3 type] and [C@3 code]. The type of reply can be either [C@3 `"protocol`"] or [C@3 `"internal`"]. Typical protocol reply codes are listed in [^http`:`/`/www`.ietf`.org`/rfc`/rfc959`.txt^ RFC 959]. Typical internal reply (error) code is `"`-1`". A reply contains at least one [C@3 <line>] tag.] :: [s2; [C@3 <line>]] :: [s2; This tag represents a single line of reply. A line contains a single line of message or it can be empty.]}}&] [s2; &] [s3;%- &] [s4;%- &] [s5;:Ftp`:`:WhenWait:%- [_^Callback^ Callback]_[* WhenWait]&] [s2; If this callback is defined, it is invoked periodically while the ftp client performs any socket operations, with the frequency of 100Hz. This is intended to give user feedback in interactive applications.&] [s3;%- &] [s4;%- &] [s5;:Upp`:`:Ftp`:`:GetWorkerCount`(`):%- [@(0.0.255) static] [@(0.0.255) int]_[* GetWorkerC ount]()&] [s2; Returns the number of running ftp worker threads. See [^topic`:`/`/FTP`/src`/HelperFunctions`$en`-us`#Upp`:`:FtpAsyncGet`(Upp`:`:Ftp`:`:Request`&`,Upp`:`:Event`<Upp`:`:Ftp`:`:Result`>`)^ F tpAsyncGet()] and [^topic`:`/`/FTP`/src`/HelperFunctions`$en`-us`#Upp`:`:FtpAsyncPut`(Upp`:`:Ftp`:`:Request`&`,Upp`:`:Event`<Upp`:`:Ftp`:`:Result`>`)^ F tpAsyncPut]() for more information.&] [s6; Requires multithreading.&] [s3;%- &] [s4;%- &] [s5;:Upp`:`:Ftp`:`:AbortWorker`(int`):%- [@(0.0.255) static] [@(0.0.255) void]_[* AbortWork er]([@(0.0.255) int]_[*@3 id])&] [s2; Sets the abort flag on for the ftp worker thread with the given [%-*@3 id]. Note that [^topic`:`/`/Core`/src`/Thread`$en`-us`#Thread`:`:ShutdownThreads`(`)^ S hutdownThreads() ]can be used to abort all running worker threads at once. See [^topic`:`/`/FTP`/src`/HelperFunctions`$en`-us`#Upp`:`:FtpAsyncGet`(Upp`:`:Ftp`:`:Request`&`,Upp`:`:Event`<Upp`:`:Ftp`:`:Result`>`)^ F tpAsyncGet()] and [^topic`:`/`/FTP`/src`/HelperFunctions`$en`-us`#Upp`:`:FtpAsyncPut`(Upp`:`:Ftp`:`:Request`&`,Upp`:`:Event`<Upp`:`:Ftp`:`:Result`>`)^ F tpAsyncPut()] for more information.&] [s6; Requires multithreading.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:Trace`(bool`):%- [@(0.0.255) static] [@(0.0.255) void]_[* Trace]([@(0.0.255) bool ]_[*@3 b]_`=_[@(0.0.255) true])&] [s2; Enables logging of Ftp client&] [s3; &] [s3; &] [ {{10000F(128)G(128)@1 [s0; [* Constructor Detail]]}}&] [s3;%- &] [s5;:Ftp`:`:Ftp`(`):%- [* Ftp]()&] [s2; Default constructor.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:`~Ftp`(`):%- [@(0.0.255) `~][* Ftp]()&] [s2; Default destructor. Invokes [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Ftp`:`:Disconnect`(`)^ D isconnect()].&] [s3;%- &] [s0;%- &] [ {{10000@(113.42.0) [s0; [*@7;4 Ftp`::DirEntry]]}}&] [s3; &] [s1;:Ftp`:`:DirEntry`:`:class:%- [@(0.0.255)3 class][3 _][*3 DirEntry][3 _:_][@(0.0.255)3 privat e][3 _][*@3;3 Moveable][3 <][_^Ftp`:`:DirEntry^3 Ftp`::DirEntry][3 >_]&] [s2; This nested class is intended to simplify the parsing of directory entries (files, directories, symbolic links) returned by the [^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Ftp`:`:class^ Ftp] class. It can handle both UNIX and DOS style directory listings.&] [s2; &] [s3;%- &] [s4;%- &] [s5;:Ftp`:`:DirEntry`:`:User`(const String`&`):%- [_^Ftp`:`:DirEntry^ DirEntry][@(0.0.255) `& ]_[* User]([@(0.0.255) const]_[_^String^ String][@(0.0.255) `&]_[*@3 u])&] [s2; Sets user to [%-*@3 u]. If not set or set to Null, public permissions can be queried.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:DirEntry`:`:GetName`(`)const:%- [_^String^ String]_[* GetName]()_[@(0.0.255) co nst]&] [s2; Returns the name of the directory entry.&] [s3;%- &] [s4;%- &] [s5;:Ftp`:`:DirEntry`:`:GetOwner`(`)const:%- [_^String^ String]_[* GetOwner]()_[@(0.0.255) c onst]&] [s2; Returns the owner of the directory entry. &] [s6; Only applicable to UNIX style directory listing.&] [s3;%- &] [s4;%- &] [s5;:Ftp`:`:DirEntry`:`:GetGroup`(`)const:%- [_^String^ String]_[* GetGroup]()_[@(0.0.255) c onst]&] [s2; Returns the group of the directory entry. &] [s6; Only applicable to UNIX style directory listing.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:DirEntry`:`:GetSize`(`)const:%- [_^int64^ int64]_[* GetSize]()_[@(0.0.255) cons t]&] [s2; Returns the size of the entry.&] [s3;%- &] [s4;%- &] [s5;:Ftp`:`:DirEntry`:`:GetLastModified`(`)const:%- [_^Time^ Time]_[* GetLastModified]()_ [@(0.0.255) const]&] [s2; Returns the last modification time of the entry.&] [s3;%- &] [s4;%- &] [s5;:Upp`:`:Ftp`:`:DirEntry`:`:GetStyle`(`)const:%- [_ DirEntry`::Style]_[* GetStyle]()_[@(0.0.255) c onst]&] [s2; Returns the directory listing style. Currently [^topic`:`/`/trunk`/FTP`/src`/Ftp`$en`-us`#Upp`:`:Ftp`:`:DirEntry`:`:Style`:`:UNIX^ U NIX] and [^topic`:`/`/trunk`/FTP`/src`/Ftp`$en`-us`#Upp`:`:Ftp`:`:DirEntry`:`:Style`:`:DOS^ D OS] style directory listings are supported. Returns UNDEFINED for unsupported directory listing styles.&] [s3; &] [s4;%- &] [s5;:Ftp`:`:DirEntry`:`:GetEntry`(`)const:%- [_^String^ String]_[* GetEntry]()_[@(0.0.255) c onst]&] [s2; Returns the raw directory listing string.&] [s3;%- &] [s4;%- &] [s5;:Ftp`:`:DirEntry`:`:IsFile`(`)const:%- [@(0.0.255) bool]_[* IsFile]()_[@(0.0.255) const ]&] [s2; Returns true if the entry is a file.&] [s3;%- &] [s4;%- &] [s5;:Ftp`:`:DirEntry`:`:IsDirectory`(`)const:%- [@(0.0.255) bool]_[* IsDirectory]()_[@(0.0.255) c onst]&] [s2; Returns true if the entry is a directory.&] [s3;%- &] [s4;%- &] [s5;:Ftp`:`:DirEntry`:`:IsSymLink`(`)const:%- [@(0.0.255) bool]_[* IsSymLink]()_[@(0.0.255) c onst]&] [s2; Returns true if the entry is a symbolic link.&] [s3;%- &] [s4;%- &] [s5;:Ftp`:`:DirEntry`:`:IsReadable`(`)const:%- [@(0.0.255) bool]_[* IsReadable]()_[@(0.0.255) c onst]&] [s2; Returns true if the directory entry is readable by the user. If user is not set, this method will return the public read permission. &] [s6; Only applicable to UNIX style directory listing.&] [s3;%- &] [s4;%- &] [s5;:Ftp`:`:DirEntry`:`:IsWriteable`(`)const:%- [@(0.0.255) bool]_[* IsWriteable]()_[@(0.0.255) c onst]&] [s2; Returns true if the directory entry is writeable by the user. If user is not set, this method will return the public write permission. &] [s6; Only applicable to UNIX style directory listing.&] [s3;%- &] [s4;%- &] [s5;:Ftp`:`:DirEntry`:`:IsExecutable`(`)const:%- [@(0.0.255) bool]_[* IsExecutable]()_[@(0.0.255) c onst]&] [s2; Returns true if the directory entry is executable by the user. If user is not set, this method will return the public execute permission. &] [s6; Only applicable to UNIX style directory listing.&] [s3;%- &] [s4;%- &] [s5;:Upp`:`:Ftp`:`:DirEntry`:`:Style`:`:UNIX:%- [@(0.0.255) enum]_DirEntry`::Style`::[*@3 U NIX]&] [s2; Represents a UNIX style directory listing.&] [s3;%- &] [s4;%- &] [s5;:Upp`:`:Ftp`:`:DirEntry`:`:Style`:`:DOS:%- [@(0.0.255) enum]_DirEntry`::Style`::[*@3 DO S]&] [s2; Represents a DOS style directory listing.&] [s3; &] [s0; &] [ {{10000F(128)G(128)@1 [s0; [* Constructor detail]]}}&] [s0;%- &] [s5;:Ftp`:`:DirEntry`:`:DirEntry`(`):%- [* DirEntry]()&] [s2; Default constructor.&] [s3;%- &] [ {{10000@(113.42.0) [s0; [*@7;4 Ftp`::DirList]]}}&] [s0;%- &] [s5;:Ftp`:`:DirList`:`:typedef:%- [@(0.0.255) typedef]_[_^Vector^ Vector]<[_^topic`:`/`/FTP`/src`/Ftp`$en`-us`#Ftp`:`:DirEntry`:`:class^ D irEntry]>_[* DirList]&] [s2; Ftp`::DirList is a [^topic`:`/`/Core`/src`/Vector`$en`-us^ Vector] type container, containing [^topic`:`/`/FTP`/src`/FtpDirEntry`$en`-us`#Ftp`:`:DirEntry`:`:class^ F tp`::DirEntry] elements.&] [s3;%- &] [s0;%- &] [ {{10000F(128)G(128)@1 [s0; [* Miscellaneous]]}}&] [s0;%- &] [s5;:Upp`:`:ParseFtpDirEntry`(const Upp`:`:String`&`,Upp`:`:Ftp`:`:DirList`&`):%- [@(0.0.255) b ool]_[* ParseFtpDirEntry]([@(0.0.255) const]_[_^Upp`:`:String^ String][@(0.0.255) `&]_[*@3 i n], [_^Upp`:`:Ftp`:`:DirList^ Ftp`::DirList][@(0.0.255) `&]_[*@3 out])&] [s2; This helper function parses a UNIX or DOS style directory list into a [^topic`:`/`/FTP`/src`/FtpDirEntry`$en`-us`#Ftp`:`:DirList`:`:typedef^ Ftp`: :DirList] structure. Returns true on success.&] [s3;%- ]]
#include <iostream> #include <vector> #include <algorithm> #include <string> #include <queue> #include <stack> #define MX 110000 #define MXV 44000 using namespace std; typedef unsigned long long ull; typedef long long ll; typedef unsigned int uint; typedef vector <ull> ullv1; typedef vector <vector <ull>> ullv2; struct point{ ll x,y; ll p=0,q=0; ll order; }; bool comp1(point a, point b) { if(a.y != b.y) return a.y < b.y; return a.x < b.x; } bool comp2 (point a, point b) { if(a.q * b.p != a.p*b.q) return a.q * b.p < a.p*b.q; return comp1(a,b); } ll ccw(point p1, point p2, point p3) { return (p1.x * p2.y + p2.x * p3.y + p3.x * p1.y - p2.x * p1.y - p3.x * p2.y - p1.x * p3.y); } ll N,T; vector <point> ar; point Z; int main() { ios::sync_with_stdio(0); cin.tie(0); cout.tie(0); cin >> T; while(T--) { ar.clear(); cin >> N; for(int x=0; x<N; x++) { cin >> Z.x >> Z.y; Z.order = x; ar.push_back(Z); } sort(ar.begin(),ar.end(),comp1); for(int x=1; x<N; x++) { ar[x].p = ar[x].x -ar[0].x; ar[x].q = ar[x].y - ar[0].y; } sort(ar.begin()+1,ar.end(),comp2); for(int x = ar.size()-1; x>1; x--) { if(ccw(ar[0],ar[x-1],ar[x])!=0) { reverse(ar.begin()+x,ar.end()); break; } } for(auto i: ar) { cout << i.order <<" "; } cout << "\n"; } }
/********************************************************************** Audacity: A Digital Audio Editor ControlToolBar.cpp Dominic Mazzoni Shane T. Mueller James Crook Leland Lucius *******************************************************************//** \class ControlToolBar \brief A ToolBar that has the main Transport buttons. In the GUI, this is referred to as "Transport Toolbar", as it corresponds to commands in the Transport menu. "Control Toolbar" is historic. This class, which is a child of Toolbar, creates the window containing the Transport (rewind/play/stop/record/ff) buttons. The window can be embedded within a normal project window, or within a ToolBarFrame. All of the controls in this window were custom-written for Audacity - they are not native controls on any platform - however, it is intended that the images could be easily replaced to allow "skinning" or just customization to match the look and feel of each platform. *//*******************************************************************/ #include "../Audacity.h" // for USE_* macros #include "ControlToolBar.h" #include "../Experimental.h" #include <algorithm> #include <cfloat> // For compilers that support precompilation, includes "wx/wx.h". #include <wx/wxprec.h> #include <wx/setup.h> // for wxUSE_* macros #ifndef WX_PRECOMP #include <wx/app.h> #include <wx/dc.h> #include <wx/event.h> #include <wx/image.h> #include <wx/intl.h> #include <wx/sizer.h> #include <wx/statusbr.h> #include <wx/timer.h> #endif #include <wx/tooltip.h> #include <wx/datetime.h> #include "../AColor.h" #include "../AllThemeResources.h" #include "../AudioIO.h" #include "../ImageManipulation.h" #include "../Prefs.h" #include "../Project.h" #include "../ProjectAudioIO.h" #include "../ProjectAudioManager.h" #include "../ProjectSettings.h" #include "../ProjectStatus.h" #include "../ProjectWindow.h" #include "../Track.h" #include "../widgets/AButton.h" #include "../widgets/ErrorDialog.h" #include "../FileNames.h" #include "../tracks/ui/Scrubbing.h" #include "../toolbars/ToolManager.h" IMPLEMENT_CLASS(ControlToolBar, ToolBar); //////////////////////////////////////////////////////////// /// Methods for ControlToolBar //////////////////////////////////////////////////////////// BEGIN_EVENT_TABLE(ControlToolBar, ToolBar) EVT_CHAR(ControlToolBar::OnKeyEvent) EVT_BUTTON(ID_PLAY_BUTTON, ControlToolBar::OnPlay) EVT_BUTTON(ID_STOP_BUTTON, ControlToolBar::OnStop) EVT_BUTTON(ID_RECORD_BUTTON, ControlToolBar::OnRecord) EVT_BUTTON(ID_REW_BUTTON, ControlToolBar::OnRewind) EVT_BUTTON(ID_FF_BUTTON, ControlToolBar::OnFF) EVT_BUTTON(ID_PAUSE_BUTTON, ControlToolBar::OnPause) EVT_IDLE(ControlToolBar::OnIdle) END_EVENT_TABLE() static const TranslatableString /* i18n-hint: These are strings for the status bar, and indicate whether Audacity is playing or recording or stopped, and whether it is paused. */ sStatePlay = XO("Playing") /* i18n-hint: These are strings for the status bar, and indicate whether Audacity is playing or recording or stopped, and whether it is paused. */ , sStateStop = XO("Stopped") /* i18n-hint: These are strings for the status bar, and indicate whether Audacity is playing or recording or stopped, and whether it is paused. */ , sStateRecord = XO("Recording") ; //Standard constructor // This was called "Control" toolbar in the GUI before - now it is "Transport". // Note that we use the legacy "Control" string as the section because this // gets written to prefs and cannot be changed in prefs to maintain backwards // compatibility ControlToolBar::ControlToolBar( AudacityProject &project ) : ToolBar(project, TransportBarID, XO("Transport"), wxT("Control")) { gPrefs->Read(wxT("/GUI/ErgonomicTransportButtons"), &mErgonomicTransportButtons, true); mStrLocale = gPrefs->Read(wxT("/Locale/Language"), wxT("")); mSizer = NULL; } ControlToolBar::~ControlToolBar() { } ControlToolBar *ControlToolBar::Find( AudacityProject &project ) { auto &toolManager = ToolManager::Get( project ); return static_cast<ControlToolBar*>( toolManager.GetToolBar(TransportBarID) ); } ControlToolBar &ControlToolBar::Get( AudacityProject &project ) { auto &toolManager = ToolManager::Get( project ); return *static_cast<ControlToolBar*>( toolManager.GetToolBar(TransportBarID) ); } const ControlToolBar &ControlToolBar::Get( const AudacityProject &project ) { return Get( const_cast<AudacityProject&>( project )) ; } void ControlToolBar::Create(wxWindow * parent) { ToolBar::Create(parent); UpdatePrefs(); } // This is a convenience function that allows for button creation in // MakeButtons() with fewer arguments AButton *ControlToolBar::MakeButton(ControlToolBar *pBar, teBmps eEnabledUp, teBmps eEnabledDown, teBmps eDisabled, int id, bool processdownevents, const TranslatableString &label) { AButton *r = ToolBar::MakeButton(pBar, bmpRecoloredUpLarge, bmpRecoloredDownLarge, bmpRecoloredUpHiliteLarge, bmpRecoloredHiliteLarge, eEnabledUp, eEnabledDown, eDisabled, wxWindowID(id), wxDefaultPosition, processdownevents, theTheme.ImageSize( bmpRecoloredUpLarge )); r->SetLabel(label); enum { deflation = #ifdef __WXMAC__ 6 #else 12 #endif }; r->SetFocusRect( r->GetClientRect().Deflate( deflation, deflation ) ); return r; } // static void ControlToolBar::MakeAlternateImages(AButton &button, int idx, teBmps eEnabledUp, teBmps eEnabledDown, teBmps eDisabled) { ToolBar::MakeAlternateImages(button, idx, bmpRecoloredUpLarge, bmpRecoloredDownLarge, bmpRecoloredUpHiliteLarge, bmpRecoloredHiliteLarge, eEnabledUp, eEnabledDown, eDisabled, theTheme.ImageSize( bmpRecoloredUpLarge )); } void ControlToolBar::Populate() { SetBackgroundColour( theTheme.Colour( clrMedium ) ); MakeButtonBackgroundsLarge(); mPause = MakeButton(this, bmpPause, bmpPause, bmpPauseDisabled, ID_PAUSE_BUTTON, true, XO("Pause")); mPlay = MakeButton(this, bmpPlay, bmpPlay, bmpPlayDisabled, ID_PLAY_BUTTON, true, XO("Play")); MakeAlternateImages(*mPlay, 1, bmpLoop, bmpLoop, bmpLoopDisabled); MakeAlternateImages(*mPlay, 2, bmpCutPreview, bmpCutPreview, bmpCutPreviewDisabled); MakeAlternateImages(*mPlay, 3, bmpScrub, bmpScrub, bmpScrubDisabled); MakeAlternateImages(*mPlay, 4, bmpSeek, bmpSeek, bmpSeekDisabled); mPlay->FollowModifierKeys(); mStop = MakeButton(this, bmpStop, bmpStop, bmpStopDisabled , ID_STOP_BUTTON, false, XO("Stop")); mRewind = MakeButton(this, bmpRewind, bmpRewind, bmpRewindDisabled, ID_REW_BUTTON, false, XO("Skip to Start")); mFF = MakeButton(this, bmpFFwd, bmpFFwd, bmpFFwdDisabled, ID_FF_BUTTON, false, XO("Skip to End")); mRecord = MakeButton(this, bmpRecord, bmpRecord, bmpRecordDisabled, ID_RECORD_BUTTON, false, XO("Record")); bool bPreferNewTrack; gPrefs->Read("/GUI/PreferNewTrackRecord",&bPreferNewTrack, false); if( !bPreferNewTrack ) MakeAlternateImages(*mRecord, 1, bmpRecordBelow, bmpRecordBelow, bmpRecordBelowDisabled); else MakeAlternateImages(*mRecord, 1, bmpRecordBeside, bmpRecordBeside, bmpRecordBesideDisabled); mRecord->FollowModifierKeys(); #if wxUSE_TOOLTIPS RegenerateTooltips(); wxToolTip::Enable(true); wxToolTip::SetDelay(1000); #endif // Set default order and mode ArrangeButtons(); } void ControlToolBar::RegenerateTooltips() { #if wxUSE_TOOLTIPS for (long iWinID = ID_PAUSE_BUTTON; iWinID < BUTTON_COUNT; iWinID++) { auto pCtrl = static_cast<AButton*>(this->FindWindow(iWinID)); CommandID name; switch (iWinID) { case ID_PLAY_BUTTON: // Without shift name = wxT("PlayStop"); break; case ID_RECORD_BUTTON: // Without shift //name = wxT("Record"); name = wxT("Record1stChoice"); break; case ID_PAUSE_BUTTON: name = wxT("Pause"); break; case ID_STOP_BUTTON: name = wxT("Stop"); break; case ID_FF_BUTTON: name = wxT("CursProjectEnd"); break; case ID_REW_BUTTON: name = wxT("CursProjectStart"); break; } std::vector<ComponentInterfaceSymbol> commands( 1u, { name, Verbatim( pCtrl->GetLabel() ) } ); // Some have a second switch (iWinID) { case ID_PLAY_BUTTON: // With shift commands.push_back( { wxT("PlayLooped"), XO("Loop Play") } ); break; case ID_RECORD_BUTTON: // With shift { bool bPreferNewTrack; gPrefs->Read("/GUI/PreferNewTrackRecord",&bPreferNewTrack, false); // For the shortcut tooltip. commands.push_back( { wxT("Record2ndChoice"), !bPreferNewTrack ? XO("Record New Track") : XO("Append Record") } ); } break; case ID_PAUSE_BUTTON: break; case ID_STOP_BUTTON: break; case ID_FF_BUTTON: // With shift commands.push_back( { wxT("SelEnd"), XO("Select to End") } ); break; case ID_REW_BUTTON: // With shift commands.push_back( { wxT("SelStart"), XO("Select to Start") } ); break; } ToolBar::SetButtonToolTip( mProject, *pCtrl, commands.data(), commands.size()); } #endif } void ControlToolBar::UpdatePrefs() { bool updated = false; bool active; gPrefs->Read( wxT("/GUI/ErgonomicTransportButtons"), &active, true ); if( mErgonomicTransportButtons != active ) { mErgonomicTransportButtons = active; updated = true; } wxString strLocale = gPrefs->Read(wxT("/Locale/Language"), wxT("")); if (mStrLocale != strLocale) { mStrLocale = strLocale; updated = true; } if( updated ) { ReCreateButtons(); // side effect: calls RegenerateTooltips() Updated(); } else // The other reason to regenerate tooltips is if keyboard shortcuts for // transport buttons changed, but that's too much work to check for, so just // always do it. (Much cheaper than calling ReCreateButtons() in all cases. RegenerateTooltips(); // Set label to pull in language change SetLabel(XO("Transport")); // Give base class a chance ToolBar::UpdatePrefs(); } void ControlToolBar::ArrangeButtons() { int flags = wxALIGN_CENTER | wxRIGHT; // (Re)allocate the button sizer if( mSizer ) { Detach( mSizer ); std::unique_ptr < wxSizer > {mSizer}; // DELETE it } Add((mSizer = safenew wxBoxSizer(wxHORIZONTAL)), 1, wxEXPAND); // Start with a little extra space mSizer->Add( 5, 55 ); // Add the buttons in order based on ergonomic setting if( mErgonomicTransportButtons ) { mPause->MoveBeforeInTabOrder( mRecord ); mPlay->MoveBeforeInTabOrder( mRecord ); mStop->MoveBeforeInTabOrder( mRecord ); mRewind->MoveBeforeInTabOrder( mRecord ); mFF->MoveBeforeInTabOrder( mRecord ); mSizer->Add( mPause, 0, flags, 2 ); mSizer->Add( mPlay, 0, flags, 2 ); mSizer->Add( mStop, 0, flags, 2 ); mSizer->Add( mRewind, 0, flags, 2 ); mSizer->Add( mFF, 0, flags, 10 ); mSizer->Add( mRecord, 0, flags, 5 ); } else { mRewind->MoveBeforeInTabOrder( mFF ); mPlay->MoveBeforeInTabOrder( mFF ); mRecord->MoveBeforeInTabOrder( mFF ); mPause->MoveBeforeInTabOrder( mFF ); mStop->MoveBeforeInTabOrder( mFF ); mSizer->Add( mRewind, 0, flags, 2 ); mSizer->Add( mPlay, 0, flags, 2 ); mSizer->Add( mRecord, 0, flags, 2 ); mSizer->Add( mPause, 0, flags, 2 ); mSizer->Add( mStop, 0, flags, 2 ); mSizer->Add( mFF, 0, flags, 5 ); } // Layout the sizer mSizer->Layout(); // Layout the toolbar Layout(); // (Re)Establish the minimum size SetMinSize( GetSizer()->GetMinSize() ); } void ControlToolBar::ReCreateButtons() { bool playDown = false; bool playShift = false; bool pauseDown = false; bool recordDown = false; bool recordShift = false; // ToolBar::ReCreateButtons() will get rid of its sizer and // since we've attached our sizer to it, ours will get deleted too // so clean ours up first. if( mSizer ) { playDown = mPlay->IsDown(); playShift = mPlay->WasShiftDown(); pauseDown = mPause->IsDown(); recordDown = mRecord->IsDown(); recordShift = mRecord->WasShiftDown(); Detach( mSizer ); std::unique_ptr < wxSizer > {mSizer}; // DELETE it mSizer = NULL; } ToolBar::ReCreateButtons(); if (playDown) { ControlToolBar::PlayAppearance appearance = playShift ? ControlToolBar::PlayAppearance::Looped : ControlToolBar::PlayAppearance::Straight; SetPlay(playDown, appearance); } if (pauseDown) { mPause->PushDown(); } if (recordDown) { mRecord->SetAlternateIdx(recordShift ? 1 : 0); mRecord->PushDown(); } EnableDisableButtons(); RegenerateTooltips(); } void ControlToolBar::Repaint( wxDC *dc ) { #ifndef USE_AQUA_THEME wxSize s = mSizer->GetSize(); wxPoint p = mSizer->GetPosition(); wxRect bevelRect( p.x, p.y, s.GetWidth() - 1, s.GetHeight() - 1 ); AColor::Bevel( *dc, true, bevelRect ); #endif } void ControlToolBar::EnableDisableButtons() { AudacityProject *p = &mProject; auto &projectAudioManager = ProjectAudioManager::Get( mProject ); bool canStop = projectAudioManager.CanStopAudioStream(); bool paused = mPause->IsDown(); bool playing = mPlay->IsDown(); bool recording = mRecord->IsDown(); auto gAudioIO = AudioIO::Get(); bool busy = gAudioIO->IsBusy(); // Only interested in audio type tracks bool tracks = p && TrackList::Get( *p ).Any<AudioTrack>(); // PRL: PlayableTrack ? mPlay->SetEnabled( canStop && tracks && !recording ); mRecord->SetEnabled( canStop && !(busy && !recording && !paused) && !(playing && !paused) ); mStop->SetEnabled(canStop && (playing || recording)); mRewind->SetEnabled(paused || (!playing && !recording)); mFF->SetEnabled(tracks && (paused || (!playing && !recording))); mPause->SetEnabled(canStop); } void ControlToolBar::SetPlay(bool down, PlayAppearance appearance) { if (down) { mPlay->SetShift(appearance == PlayAppearance::Looped); mPlay->SetControl(appearance == PlayAppearance::CutPreview); mPlay->SetAlternateIdx(static_cast<int>(appearance)); mPlay->PushDown(); } else { mPlay->PopUp(); mPlay->SetAlternateIdx(0); } EnableDisableButtons(); } void ControlToolBar::SetStop() { mStop->PushDown(); EnableDisableButtons(); } void ControlToolBar::OnKeyEvent(wxKeyEvent & event) { // PRL: is this handler really ever reached? Is the ControlToolBar ever // focused? Isn't there a global event filter that interprets the spacebar // key (or other key chosen in preferences) and dispatches to DoPlayStop, // according to CommandManager's table, before we come to this redundant // function? if (event.ControlDown() || event.AltDown()) { event.Skip(); return; } auto gAudioIO = AudioIOBase::Get(); auto &projectAudioManager = ProjectAudioManager::Get( mProject ); // Does not appear to be needed on Linux. Perhaps on some other platform? // If so, "!CanStopAudioStream()" should probably apply. if (event.GetKeyCode() == WXK_SPACE) { if ( projectAudioManager.Playing() || projectAudioManager.Recording() ) { SetStop(); projectAudioManager.Stop(); } else if (!gAudioIO->IsBusy()) { projectAudioManager.PlayCurrentRegion(); } return; } event.Skip(); } void ControlToolBar::OnPlay(wxCommandEvent & WXUNUSED(evt)) { auto p = &mProject; auto &projectAudioManager = ProjectAudioManager::Get( mProject ); bool canStop = projectAudioManager.CanStopAudioStream(); if ( !canStop ) return; projectAudioManager.Stop(); PlayDefault(); } void ControlToolBar::OnStop(wxCommandEvent & WXUNUSED(evt)) { auto &projectAudioManager = ProjectAudioManager::Get( mProject ); bool canStop = projectAudioManager.CanStopAudioStream(); if ( canStop ) { projectAudioManager.Stop(); } } void ControlToolBar::PlayDefault() { // Let control have precedence over shift const bool cutPreview = mPlay->WasControlDown(); const bool looped = !cutPreview && mPlay->WasShiftDown(); ProjectAudioManager::Get( mProject ).PlayCurrentRegion(looped, cutPreview); } void ControlToolBar::OnRecord(wxCommandEvent &evt) // STRONG-GUARANTEE (for state of current project's tracks) { // TODO: It would be neater if Menu items and Toolbar buttons used the same code for // enabling/disabling, and all fell into the same action routines. // Here instead we reduplicate some logic (from CommandHandler) because it isn't // normally used for buttons. bool altAppearance = mRecord->WasShiftDown(); ProjectAudioManager::Get( mProject ).OnRecord( altAppearance ); } void ControlToolBar::OnPause(wxCommandEvent & WXUNUSED(evt)) { ProjectAudioManager::Get( mProject ).OnPause(); } void ControlToolBar::OnIdle(wxIdleEvent & event) { event.Skip(); auto &projectAudioManager = ProjectAudioManager::Get( mProject ); if ( projectAudioManager.Paused() ) mPause->PushDown(); else mPause->PopUp(); bool recording = projectAudioManager.Recording(); if (!recording) { mRecord->PopUp(); mRecord->SetAlternateIdx( wxGetKeyState(WXK_SHIFT) ? 1 : 0 ); } else { mRecord->PushDown(); mRecord->SetAlternateIdx( projectAudioManager.Appending() ? 0 : 1 ); } bool playing = projectAudioManager.Playing(); if ( !(playing || Scrubber::Get(mProject).HasMark()) ) { mPlay->PopUp(); mPlay->SetAlternateIdx( wxGetKeyState(WXK_CONTROL) ? 2 : wxGetKeyState(WXK_SHIFT) ? 1 : 0 ); } else { mPlay->PushDown(); mPlay->SetAlternateIdx( projectAudioManager.Cutting() ? 2 : projectAudioManager.Looping() ? 1 : 0 ); } if ( recording || playing ) StartScrollingIfPreferred(); else StopScrolling(); if ( projectAudioManager.Stopping() ) mStop->PushDown(); else // push-downs of the stop button are only momentary and always pop up now mStop->PopUp(); UpdateStatusBar(); EnableDisableButtons(); } void ControlToolBar::OnRewind(wxCommandEvent & WXUNUSED(evt)) { mRewind->PushDown(); mRewind->PopUp(); AudacityProject *p = &mProject; { ProjectAudioManager::Get( *p ).StopIfPaused(); ProjectWindow::Get( *p ).Rewind(mRewind->WasShiftDown()); } } void ControlToolBar::OnFF(wxCommandEvent & WXUNUSED(evt)) { mFF->PushDown(); mFF->PopUp(); AudacityProject *p = &mProject; { ProjectAudioManager::Get( *p ).StopIfPaused(); ProjectWindow::Get( *p ).SkipEnd(mFF->WasShiftDown()); } } // works out the width of the field in the status bar needed for the state (eg play, record pause) static ProjectStatus::RegisteredStatusWidthFunction registeredStatusWidthFunction{ []( const AudacityProject &, StatusBarField field ) -> ProjectStatus::StatusWidthResult { if ( field == stateStatusBarField ) { TranslatableStrings strings; for ( auto pString : { &sStatePlay, &sStateStop, &sStateRecord } ) { strings.push_back( /* i18n-hint: These are strings for the status bar, and indicate whether Audacity is playing or recording or stopped, and whether it is paused. */ XO("%s Paused.").Format(*pString) ); } // added constant needed because xMax isn't large enough for some reason, plus some space. return { std::move( strings ), 30 }; } return {}; } }; TranslatableString ControlToolBar::StateForStatusBar() { TranslatableString state; auto &projectAudioManager = ProjectAudioManager::Get( mProject ); auto pProject = &mProject; auto scrubState = pProject ? Scrubber::Get( *pProject ).GetUntranslatedStateString() : TranslatableString{}; if (!scrubState.empty()) state = scrubState; else if (mPlay->IsDown()) state = sStatePlay; else if (projectAudioManager.Recording()) state = sStateRecord; else state = sStateStop; return ((mPause->IsDown()) ? XO("%s Paused.") : XO("%s.")) .Format( state ); } void ControlToolBar::UpdateStatusBar() { ProjectStatus::Get( mProject ).Set( StateForStatusBar(), stateStatusBarField ); } void ControlToolBar::StartScrollingIfPreferred() { if ( Scrubber::Get( mProject ).IsTransportingPinned() ) StartScrolling(); #ifdef __WXMAC__ else if (Scrubber::Get( mProject ).HasMark()) { // PRL: cause many "unnecessary" refreshes. For reasons I don't understand, // doing this causes wheel rotation events (mapped from the double finger vertical // swipe) to be delivered more uniformly to the application, so that speed control // works better. ProjectWindow::Get( mProject ).GetPlaybackScroller().Activate (ProjectWindow::PlaybackScroller::Mode::Refresh); } #endif else StopScrolling(); } void ControlToolBar::StartScrolling() { using Mode = ProjectWindow::PlaybackScroller::Mode; const auto project = &mProject; if (project) { auto mode = Mode::Pinned; #if 0 // Enable these lines to pin the playhead right instead of center, // when recording but not overdubbing. auto gAudioIO = AudioIO::Get(); if (gAudioIO->GetNumCaptureChannels() > 0) { // recording // Display a fixed recording head while scrolling the waves continuously. // If you overdub, you may want to anticipate some context in existing tracks, // so center the head. If not, put it rightmost to display as much wave as we can. bool duplex; #ifdef EXPERIMENTAL_DA gPrefs->Read(wxT("/AudioIO/Duplex"), &duplex, false); #else gPrefs->Read(wxT("/AudioIO/Duplex"), &duplex, true); #endif if (duplex) { // See if there is really anything being overdubbed if (gAudioIO->GetNumPlaybackChannels() == 0) // No. duplex = false; } if (!duplex) mode = Mode::Right; } #endif ProjectWindow::Get( *project ).GetPlaybackScroller().Activate(mode); } } void ControlToolBar::StopScrolling() { const auto project = &mProject; if(project) ProjectWindow::Get( *project ).GetPlaybackScroller().Activate (ProjectWindow::PlaybackScroller::Mode::Off); } static RegisteredToolbarFactory factory{ TransportBarID, []( AudacityProject &project ){ return ToolBar::Holder{ safenew ControlToolBar{ project } }; } }; namespace { AttachedToolBarMenuItem sAttachment{ /* i18n-hint: Clicking this menu item shows the toolbar with the big buttons on it (play record etc) */ TransportBarID, wxT("ShowTransportTB"), XXO("&Transport Toolbar") }; }
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ash/display/display_error_observer.h" #include <memory> #include "ash/display/display_util.h" #include "ash/strings/grit/ash_strings.h" #include "ash/test/ash_test_base.h" #include "ui/base/l10n/l10n_util.h" #include "ui/chromeos/devicetype_utils.h" #include "ui/display/fake/fake_display_snapshot.h" #include "ui/display/manager/display_manager.h" #include "ui/display/test/display_manager_test_api.h" #include "ui/display/types/display_snapshot.h" namespace ash { class DisplayErrorObserverTest : public AshTestBase { protected: DisplayErrorObserverTest() = default; ~DisplayErrorObserverTest() override = default; void SetUp() override { AshTestBase::SetUp(); observer_ = std::make_unique<DisplayErrorObserver>(); } protected: DisplayErrorObserver* observer() { return observer_.get(); } std::u16string GetMessageContents() { return GetDisplayErrorNotificationMessageForTest(); } private: std::unique_ptr<DisplayErrorObserver> observer_; DISALLOW_COPY_AND_ASSIGN(DisplayErrorObserverTest); }; TEST_F(DisplayErrorObserverTest, Normal) { UpdateDisplay("200x200,300x300"); observer()->OnDisplayModeChangeFailed( display::DisplayConfigurator::DisplayStateList(), display::MULTIPLE_DISPLAY_STATE_MULTI_MIRROR); EXPECT_EQ(l10n_util::GetStringUTF16(IDS_ASH_DISPLAY_FAILURE_ON_MIRRORING), GetMessageContents()); } TEST_F(DisplayErrorObserverTest, CallTwice) { UpdateDisplay("200x200,300x300"); observer()->OnDisplayModeChangeFailed( display::DisplayConfigurator::DisplayStateList(), display::MULTIPLE_DISPLAY_STATE_MULTI_MIRROR); std::u16string message = GetMessageContents(); EXPECT_FALSE(message.empty()); observer()->OnDisplayModeChangeFailed( display::DisplayConfigurator::DisplayStateList(), display::MULTIPLE_DISPLAY_STATE_MULTI_MIRROR); std::u16string message2 = GetMessageContents(); EXPECT_FALSE(message2.empty()); EXPECT_EQ(message, message2); } TEST_F(DisplayErrorObserverTest, CallWithDifferentState) { UpdateDisplay("200x200,300x300"); observer()->OnDisplayModeChangeFailed( display::DisplayConfigurator::DisplayStateList(), display::MULTIPLE_DISPLAY_STATE_MULTI_MIRROR); EXPECT_EQ(l10n_util::GetStringUTF16(IDS_ASH_DISPLAY_FAILURE_ON_MIRRORING), GetMessageContents()); observer()->OnDisplayModeChangeFailed( display::DisplayConfigurator::DisplayStateList(), display::MULTIPLE_DISPLAY_STATE_MULTI_EXTENDED); EXPECT_EQ(ui::SubstituteChromeOSDeviceType( IDS_ASH_DISPLAY_FAILURE_ON_NON_MIRRORING), GetMessageContents()); } TEST_F(DisplayErrorObserverTest, FailureWithInternalDisplay) { // Failure with a single internal display --> No notification. UpdateDisplay("200x200,300x300"); const int64_t internal_display_id = display_manager()->GetDisplayAt(0).id(); const int64_t external_display_id = display_manager()->GetDisplayAt(1).id(); display::test::ScopedSetInternalDisplayId set_internal(display_manager(), internal_display_id); auto snapshot1 = display::FakeDisplaySnapshot::Builder() .SetId(internal_display_id) .SetNativeMode({200, 200}) .SetType(display::DISPLAY_CONNECTION_TYPE_INTERNAL) .Build(); observer()->OnDisplayModeChangeFailed( {snapshot1.get()}, display::MULTIPLE_DISPLAY_STATE_MULTI_EXTENDED); EXPECT_TRUE(GetMessageContents().empty()); // Failure in both displays, user will see a notification even though one of // them is the internal display. auto snapshot2 = display::FakeDisplaySnapshot::Builder() .SetId(external_display_id) .SetNativeMode({300, 300}) .SetType(display::DISPLAY_CONNECTION_TYPE_UNKNOWN) .Build(); observer()->OnDisplayModeChangeFailed( {snapshot1.get(), snapshot2.get()}, display::MULTIPLE_DISPLAY_STATE_MULTI_EXTENDED); EXPECT_EQ(ui::SubstituteChromeOSDeviceType( IDS_ASH_DISPLAY_FAILURE_ON_NON_MIRRORING), GetMessageContents()); } } // namespace ash
#include "data.h" #include <exception> /** * Does a very dumb parse for bool. * The point is not accuracy, but rather to get some * reasonable value. * @param [in] str The string to parse * @return The boolean value */ static bool parseBool(const std::string &str); bool parseBool(const std::string &str) { typedef std::string::size_type size; size start = 0; size end = str.size() - 1; for (; start < str.size() - 1 && str[start] == ' '; start++); for (; end > 0 && str[end] == ' '; end--); if (end - start == 1) { return str[start] == '1'; } std::string test = str.substr(start, end - start + 1); for (char &c : test) { if ('A' <= c && c <= 'Z') { c |= 0x20; } } return test == "true"; } etm::data::String::String(const std::string &str) noexcept: str(str) { } std::string etm::data::String::getString() noexcept { return str; } bool etm::data::String::getBool() noexcept { return parseBool(str); } int etm::data::String::getInt() noexcept { try { return std::stoi(str); } catch (std::exception &e) { return 0; } } float etm::data::String::getFloat() noexcept { try { return std::stof(str); } catch (std::exception &e) { return 0.0f; } } etm::data::Boolean::Boolean(bool flag) noexcept: flag(flag) { } std::string etm::data::Boolean::getString() noexcept { return flag ? "true" : "false"; } bool etm::data::Boolean::getBool() noexcept { return flag; } int etm::data::Boolean::getInt() noexcept { return static_cast<int>(flag); } float etm::data::Boolean::getFloat() noexcept { return static_cast<float>(flag); } etm::data::Integer::Integer(int value) noexcept: value(value) { } std::string etm::data::Integer::getString() noexcept { return std::to_string(value); } bool etm::data::Integer::getBool() noexcept { return static_cast<bool>(value); } int etm::data::Integer::getInt() noexcept { return value; } float etm::data::Integer::getFloat() noexcept { return static_cast<float>(value); } etm::data::Float::Float(float value) noexcept: value(value) { } std::string etm::data::Float::getString() noexcept { return std::to_string(value); } bool etm::data::Float::getBool() noexcept { return static_cast<bool>(value); } int etm::data::Float::getInt() noexcept { return static_cast<int>(value); } float etm::data::Float::getFloat() noexcept { return value; }
/*************************************************************************** * Copyright (c) 2016, Wolf Vollprecht, Johan Mabille and Sylvain Corlay * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #ifndef XLAPACK_HPP #define XLAPACK_HPP #include <algorithm> #include "xtl/xcomplex.hpp" #include "xtensor/xarray.hpp" #include "xtensor/xcomplex.hpp" #include "xtensor/xio.hpp" #include "xtensor/xstorage.hpp" #include "xtensor/xtensor.hpp" #include "xtensor/xutils.hpp" #include "xflens/cxxlapack/cxxlapack.cxx" #include "xtensor-blas/xblas_config.hpp" #include "xtensor-blas/xblas_utils.hpp" namespace xt { namespace lapack { /** * Interface to LAPACK gesv. */ template <class E, class F> int gesv(E& A, F& b) { XTENSOR_ASSERT(A.dimension() == 2); XTENSOR_ASSERT(A.layout() == layout_type::column_major); XTENSOR_ASSERT(b.dimension() <= 2); XTENSOR_ASSERT(b.layout() == layout_type::column_major); uvector<blas_index_t> piv(A.shape()[0]); blas_index_t b_dim = b.dimension() > 1 ? static_cast<blas_index_t>(b.shape().back()) : 1; blas_index_t b_stride = b_dim == 1 ? static_cast<blas_index_t>(b.shape().front()) : stride_back(b); int info = cxxlapack::gesv<blas_index_t>( static_cast<blas_index_t>(A.shape()[0]), b_dim, A.data(), stride_back(A), piv.data(), b.data(), b_stride ); return info; } template <class E, class F> auto getrf(E& A, F& piv) { XTENSOR_ASSERT(A.dimension() == 2); XTENSOR_ASSERT(A.layout() == layout_type::column_major); int info = cxxlapack::getrf<blas_index_t>( static_cast<blas_index_t>(A.shape()[0]), static_cast<blas_index_t>(A.shape()[1]), A.data(), stride_back(A), piv.data() ); return info; } template <class E, class T> inline auto orgqr(E& A, T& tau, blas_index_t n = -1) { using value_type = typename E::value_type; uvector<value_type> work(1); if (n == -1) { n = static_cast<blas_index_t>(A.shape()[1]); } int info = cxxlapack::orgqr<blas_index_t>( static_cast<blas_index_t>(A.shape()[0]), n, static_cast<blas_index_t>(tau.size()), A.data(), stride_back(A), tau.data(), work.data(), static_cast<blas_index_t>(-1) ); if (info != 0) { throw std::runtime_error("Could not find workspace size for orgqr."); } work.resize((std::size_t) work[0]); info = cxxlapack::orgqr<blas_index_t>( static_cast<blas_index_t>(A.shape()[0]), n, static_cast<blas_index_t>(tau.size()), A.data(), stride_back(A), tau.data(), work.data(), static_cast<blas_index_t>(work.size()) ); return info; } template <class E, class T> inline auto ungqr(E& A, T& tau, blas_index_t n = -1) { using value_type = typename E::value_type; uvector<value_type> work(1); if (n == -1) { n = static_cast<blas_index_t>(A.shape()[1]); } int info = cxxlapack::ungqr<blas_index_t>( static_cast<blas_index_t>(A.shape()[0]), n, static_cast<blas_index_t>(tau.size()), A.data(), stride_back(A), tau.data(), work.data(), static_cast<blas_index_t>(-1) ); if (info != 0) { throw std::runtime_error("Could not find workspace size for ungqr."); } work.resize((std::size_t) std::real(work[0])); info = cxxlapack::ungqr<blas_index_t>( static_cast<blas_index_t>(A.shape()[0]), n, static_cast<blas_index_t>(tau.size()), A.data(), stride_back(A), tau.data(), work.data(), static_cast<blas_index_t>(work.size()) ); return info; } template <class E, class T> int geqrf(E& A, T& tau) { using value_type = typename E::value_type; XTENSOR_ASSERT(A.dimension() == 2); XTENSOR_ASSERT(A.layout() == layout_type::column_major); uvector<value_type> work(1); int info = cxxlapack::geqrf<blas_index_t>( static_cast<blas_index_t>(A.shape()[0]), static_cast<blas_index_t>(A.shape()[1]), A.data(), stride_back(A), tau.data(), work.data(), static_cast<blas_index_t>(-1) ); if (info != 0) { throw std::runtime_error("Could not find workspace size for geqrf."); } work.resize((std::size_t) std::real(work[0])); info = cxxlapack::geqrf<blas_index_t>( static_cast<blas_index_t>(A.shape()[0]), static_cast<blas_index_t>(A.shape()[1]), A.data(), stride_back(A), tau.data(), work.data(), static_cast<blas_index_t>(work.size()) ); return info; } namespace detail { template <class U, class VT> inline auto init_u_vt(U& u, VT& vt, char jobz, std::size_t m, std::size_t n) { // rules for sgesdd // u: // if jobz == 'O' and M >= N, u is not referenced // if jobz == 'N', u is also not referenced // vt: // if jobz == 'O' and M < N vt is not referenced // if jobz == 'N', vt is also not referenced if (jobz == 'A' || (jobz == 'O' && m < n)) { u.resize({m, m}); } if (jobz == 'A' || (jobz == 'O' && m >= n)) { vt.resize({n, n}); } if (jobz == 'S') { u.resize({m, std::min(m, n)}); vt.resize({std::min(m, n), n}); } if (jobz == 'N') { // u AND vt are unreferenced -- can't use strides().back()... return std::make_pair(1, 1); } if (jobz == 'O') { // u OR vt are unreferenced -- can't use strides().back()... return m >= n ? std::make_pair(1, stride_back(vt)) : std::make_pair(stride_back(u), 1); } return std::make_pair(stride_back(u), stride_back(vt)); } } template <class E, std::enable_if_t<!xtl::is_complex<typename E::value_type>::value>* = nullptr> auto gesdd(E& A, char jobz = 'A') { using value_type = typename E::value_type; using xtype1 = xtensor<value_type, 1, layout_type::column_major>; using xtype2 = xtensor<value_type, 2, layout_type::column_major>; XTENSOR_ASSERT(A.dimension() == 2); XTENSOR_ASSERT(A.layout() == layout_type::column_major); uvector<value_type> work(1); std::size_t m = A.shape()[0]; std::size_t n = A.shape()[1]; xtype1 s; s.resize({ std::max(std::size_t(1), std::min(m, n)) }); xtype2 u, vt; blas_index_t u_stride, vt_stride; std::tie(u_stride, vt_stride) = detail::init_u_vt(u, vt, jobz, m, n); uvector<blas_index_t> iwork(8 * std::min(m, n)); int info = cxxlapack::gesdd<blas_index_t>( jobz, static_cast<blas_index_t>(A.shape()[0]), static_cast<blas_index_t>(A.shape()[1]), A.data(), stride_back(A), s.data(), u.data(), u_stride, vt.data(), vt_stride, work.data(), static_cast<blas_index_t>(-1), iwork.data() ); if (info != 0) { throw std::runtime_error("Could not find workspace size for real gesdd."); } work.resize((std::size_t) work[0]); info = cxxlapack::gesdd<blas_index_t>( jobz, static_cast<blas_index_t>(A.shape()[0]), static_cast<blas_index_t>(A.shape()[1]), A.data(), stride_back(A), s.data(), u.data(), u_stride, vt.data(), vt_stride, work.data(), static_cast<blas_index_t>(work.size()), iwork.data() ); return std::make_tuple(info, u, s, vt); } // Complex variant of gesdd template <class E, std::enable_if_t<xtl::is_complex<typename E::value_type>::value>* = nullptr> auto gesdd(E& A, char jobz = 'A') { using value_type = typename E::value_type; using underlying_value_type = typename value_type::value_type; using xtype1 = xtensor<underlying_value_type, 1, layout_type::column_major>; using xtype2 = xtensor<value_type, 2, layout_type::column_major>; XTENSOR_ASSERT(A.dimension() == 2); XTENSOR_ASSERT(A.layout() == layout_type::column_major); std::size_t m = A.shape()[0]; std::size_t n = A.shape()[1]; uvector<value_type> work(1); uvector<underlying_value_type> rwork(1); uvector<blas_index_t> iwork(8 * std::min(m, n)); std::size_t mx = std::max(m, n); std::size_t mn = std::min(m, n); if (jobz == 'N') { rwork.resize(5 * mn); } else if (mx > mn) { // TODO verify size rwork.resize(5 * mn * mn + 5 * mn); } else { // TODO verify size rwork.resize(std::max(5 * mn * mn + 5 * mn, 2 * mx * mn + 2 * mn * mn + mn)); } xtype1 s; s.resize({ std::max(std::size_t(1), std::min(m, n)) }); xtype2 u, vt; blas_index_t u_stride, vt_stride; std::tie(u_stride, vt_stride) = detail::init_u_vt(u, vt, jobz, m, n); int info = cxxlapack::gesdd<blas_index_t>( jobz, static_cast<blas_index_t>(A.shape()[0]), static_cast<blas_index_t>(A.shape()[1]), A.data(), stride_back(A), s.data(), u.data(), u_stride, vt.data(), vt_stride, work.data(), static_cast<blas_index_t>(-1), rwork.data(), iwork.data() ); if (info != 0) { throw std::runtime_error("Could not find workspace size for complex gesdd."); } work.resize((std::size_t) std::real(work[0])); info = cxxlapack::gesdd<blas_index_t>( jobz, static_cast<blas_index_t>(A.shape()[0]), static_cast<blas_index_t>(A.shape()[1]), A.data(), stride_back(A), s.data(), u.data(), u_stride, vt.data(), vt_stride, work.data(), static_cast<blas_index_t>(work.size()), rwork.data(), iwork.data() ); return std::make_tuple(info, u, s, vt); } template <class E> int potr(E& A, char uplo = 'L') { XTENSOR_ASSERT(A.dimension() == 2); XTENSOR_ASSERT(A.layout() == layout_type::column_major); int info = cxxlapack::potrf<blas_index_t>( uplo, static_cast<blas_index_t>(A.shape()[0]), A.data(), stride_back(A) ); return info; } /** * Interface to LAPACK getri. * * @param A matrix to invert * @return inverse of A */ template <class E> int getri(E& A, uvector<blas_index_t>& piv) { using value_type = typename E::value_type; XTENSOR_ASSERT(A.dimension() == 2); XTENSOR_ASSERT(A.layout() == layout_type::column_major); uvector<value_type> work(1); // get work size int info = cxxlapack::getri<blas_index_t>( static_cast<blas_index_t>(A.shape()[0]), A.data(), stride_back(A), piv.data(), work.data(), static_cast<blas_index_t>(-1) ); if (info > 0) { throw std::runtime_error("Could not find workspace size for getri."); } work.resize(std::size_t(std::real(work[0]))); info = cxxlapack::getri<blas_index_t>( static_cast<blas_index_t>(A.shape()[0]), A.data(), stride_back(A), piv.data(), work.data(), static_cast<blas_index_t>(work.size()) ); return info; } /** * Interface to LAPACK geev. * @returns info */ template <class E, class W, class V> int geev(E& A, char jobvl, char jobvr, W& wr, W& wi, V& VL, V& VR) { XTENSOR_ASSERT(A.dimension() == 2); XTENSOR_ASSERT(A.layout() == layout_type::column_major); using value_type = typename E::value_type; using xtype = xtensor<value_type, 2, layout_type::column_major>; const auto N = A.shape()[0]; uvector<value_type> work(1); int info = cxxlapack::geev<blas_index_t>( jobvl, jobvr, static_cast<blas_index_t>(N), A.data(), stride_back(A), wr.data(), wi.data(), VL.data(), stride_back(VL), VR.data(), stride_back(VR), work.data(), static_cast<blas_index_t>(-1) ); if (info != 0) { throw std::runtime_error("Could not find workspace size for geev."); } work.resize(std::size_t(work[0])); info = cxxlapack::geev<blas_index_t>( jobvl, jobvr, static_cast<blas_index_t>(N), A.data(), stride_back(A), wr.data(), wi.data(), VL.data(), stride_back(VL), VR.data(), stride_back(VR), work.data(), static_cast<blas_index_t>(work.size()) ); return info; } /** * Interface to LAPACK syevd. * @returns info */ template <class E, class W> int syevd(E& A, char jobz, char uplo, W& w) { XTENSOR_ASSERT(A.dimension() == 2); XTENSOR_ASSERT(A.layout() == layout_type::column_major); using value_type = typename E::value_type; using xtype = xtensor<value_type, 2, layout_type::column_major>; auto N = A.shape()[0]; uvector<value_type> work(1); uvector<blas_index_t> iwork(1); int info = cxxlapack::syevd<blas_index_t>( jobz, uplo, static_cast<blas_index_t>(N), A.data(), stride_back(A), w.data(), work.data(), static_cast<blas_index_t>(-1), iwork.data(), static_cast<blas_index_t>(-1) ); if (info != 0) { throw std::runtime_error("Could not find workspace size for syevd."); } work.resize(std::size_t(work[0])); iwork.resize(std::size_t(iwork[0])); info = cxxlapack::syevd<blas_index_t>( jobz, uplo, static_cast<blas_index_t>(N), A.data(), stride_back(A), w.data(), work.data(), static_cast<blas_index_t>(work.size()), iwork.data(), static_cast<blas_index_t>(iwork.size()) ); return info; } /** * Complex version of geev */ template <class E, class W, class V> int geev(E& A, char jobvl, char jobvr, W& w, V& VL, V& VR) { // TODO implement for complex numbers XTENSOR_ASSERT(A.dimension() == 2); XTENSOR_ASSERT(A.layout() == layout_type::column_major); using value_type = typename E::value_type; using underlying_value_type = typename value_type::value_type; using xtype = xtensor<value_type, 2, layout_type::column_major>; const auto N = A.shape()[0]; uvector<value_type> work(1); uvector<underlying_value_type> rwork(2 * N); int info = cxxlapack::geev<blas_index_t>( jobvl, jobvr, static_cast<blas_index_t>(N), A.data(), stride_back(A), w.data(), VL.data(), stride_back(VL), VR.data(), stride_back(VR), work.data(), -1, rwork.data() ); if (info != 0) { throw std::runtime_error("Could not find workspace size for geev."); } work.resize(std::size_t(std::real(work[0]))); info = cxxlapack::geev<blas_index_t>( jobvl, jobvr, static_cast<blas_index_t>(N), A.data(), stride_back(A), w.data(), VL.data(), stride_back(VL), VR.data(), stride_back(VR), work.data(), static_cast<blas_index_t>(work.size()), rwork.data() ); return info; } template <class E, class W> int heevd(E& A, char jobz, char uplo, W& w) { XTENSOR_ASSERT(A.dimension() == 2); XTENSOR_ASSERT(A.layout() == layout_type::column_major); using value_type = typename E::value_type; using underlying_value_type = typename value_type::value_type; using xtype = xtensor<value_type, 2, layout_type::column_major>; auto N = A.shape()[0]; uvector<value_type> work(1); uvector<underlying_value_type> rwork(1); uvector<blas_index_t> iwork(1); int info = cxxlapack::heevd<blas_index_t>( jobz, uplo, static_cast<blas_index_t>(N), A.data(), stride_back(A), w.data(), work.data(), static_cast<blas_index_t>(-1), rwork.data(), static_cast<blas_index_t>(-1), iwork.data(), static_cast<blas_index_t>(-1) ); if (info != 0) { throw std::runtime_error("Could not find workspace size for heevd."); } work.resize(std::size_t(std::real(work[0]))); rwork.resize(std::size_t(rwork[0])); iwork.resize(std::size_t(iwork[0])); info = cxxlapack::heevd<blas_index_t>( jobz, uplo, static_cast<blas_index_t>(N), A.data(), stride_back(A), w.data(), work.data(), static_cast<blas_index_t>(work.size()), rwork.data(), static_cast<blas_index_t>(rwork.size()), iwork.data(), static_cast<blas_index_t>(iwork.size()) ); return info; } template <class E, class F, class S, std::enable_if_t<!xtl::is_complex<typename E::value_type>::value>* = nullptr> int gelsd(E& A, F& b, S& s, blas_index_t& rank, double rcond) { using value_type = typename E::value_type; uvector<value_type> work(1); uvector<blas_index_t> iwork(1); blas_index_t b_dim = b.dimension() > 1 ? static_cast<blas_index_t>(b.shape().back()) : 1; blas_index_t b_stride = b_dim == 1 ? static_cast<blas_index_t>(b.shape().front()) : stride_back(b); int info = cxxlapack::gelsd<blas_index_t>( static_cast<blas_index_t>(A.shape()[0]), static_cast<blas_index_t>(A.shape()[1]), b_dim, A.data(), stride_back(A), b.data(), b_stride, s.data(), rcond, rank, work.data(), static_cast<blas_index_t>(-1), iwork.data() ); if (info != 0) { throw std::runtime_error("Could not find workspace size for gelsd."); } work.resize(std::size_t(work[0])); iwork.resize(std::size_t(iwork[0])); info = cxxlapack::gelsd<blas_index_t>( static_cast<blas_index_t>(A.shape()[0]), static_cast<blas_index_t>(A.shape()[1]), b_dim, A.data(), stride_back(A), b.data(), b_stride, s.data(), rcond, rank, work.data(), static_cast<blas_index_t>(work.size()), iwork.data() ); return info; } template <class E, class F, class S, std::enable_if_t<xtl::is_complex<typename E::value_type>::value>* = nullptr> int gelsd(E& A, F& b, S& s, blas_index_t& rank, double rcond = -1) { using value_type = typename E::value_type; using underlying_value_type = typename value_type::value_type; uvector<value_type> work(1); uvector<underlying_value_type> rwork(1); uvector<blas_index_t> iwork(1); blas_index_t b_dim = b.dimension() > 1 ? static_cast<blas_index_t>(b.shape().back()) : 1; blas_index_t b_stride = b_dim == 1 ? static_cast<blas_index_t>(b.shape().front()) : stride_back(b); int info = cxxlapack::gelsd<blas_index_t>( static_cast<blas_index_t>(A.shape()[0]), static_cast<blas_index_t>(A.shape()[1]), b_dim, A.data(), stride_back(A), b.data(), b_stride, s.data(), rcond, rank, work.data(), static_cast<blas_index_t>(-1), rwork.data(), iwork.data() ); if (info != 0) { throw std::runtime_error("Could not find workspace size for gelsd."); } work.resize(std::size_t(std::real(work[0]))); rwork.resize(std::size_t(rwork[0])); iwork.resize(std::size_t(iwork[0])); info = cxxlapack::gelsd<blas_index_t>( static_cast<blas_index_t>(A.shape()[0]), static_cast<blas_index_t>(A.shape()[1]), b_dim, A.data(), stride_back(A), b.data(), b_stride, s.data(), rcond, rank, work.data(), static_cast<blas_index_t>(work.size()), rwork.data(), iwork.data() ); return info; } } } #endif
// Copyright (c) 2009-2018 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include <config/navcoin-config.h> #endif #include <assert.h> #include <cstddef> #include <cstdint> #include <errno.h> #include <glob.h> #include <poll.h> #include <string.h> #include <unistd.h> #include <stdlib.h> #include <sys/syscall.h> #if defined(HAVE_SYS_SELECT_H) #include <sys/select.h> #endif // Prior to GLIBC_2.14, memcpy was aliased to memmove. extern "C" void* memmove(void* a, const void* b, size_t c); extern "C" void* memcpy(void* a, const void* b, size_t c) { return memmove(a, b, c); } extern "C" void __chk_fail(void) __attribute__((__noreturn__)); extern "C" FDELT_TYPE __fdelt_warn(FDELT_TYPE a) { if (a >= FD_SETSIZE) __chk_fail(); return a / __NFDBITS; } extern "C" FDELT_TYPE __fdelt_chk(FDELT_TYPE) __attribute__((weak, alias("__fdelt_warn"))); #if defined(__i386__) || defined(__arm__) extern "C" int64_t __udivmoddi4(uint64_t u, uint64_t v, uint64_t* rp); extern "C" int64_t __wrap___divmoddi4(int64_t u, int64_t v, int64_t* rp) { int32_t c1 = 0, c2 = 0; int64_t uu = u, vv = v; int64_t w; int64_t r; if (uu < 0) { c1 = ~c1, c2 = ~c2, uu = -uu; } if (vv < 0) { c1 = ~c1, vv = -vv; } w = __udivmoddi4(uu, vv, (uint64_t*)&r); if (c1) w = -w; if (c2) r = -r; *rp = r; return w; } #endif extern "C" float log2f_old(float x); #ifdef __i386__ __asm(".symver log2f_old,log2f@GLIBC_2.1"); #elif defined(__amd64__) __asm(".symver log2f_old,log2f@GLIBC_2.2.5"); #elif defined(__arm__) __asm(".symver log2f_old,log2f@GLIBC_2.4"); #elif defined(__aarch64__) __asm(".symver log2f_old,log2f@GLIBC_2.17"); #elif defined(__riscv) __asm(".symver log2f_old,log2f@GLIBC_2.27"); #endif extern "C" float __wrap_log2f(float x) { return log2f_old(x); } extern "C" int glob_old(const char * pattern, int flags, int (*errfunc) (const char *epath, int eerrno), glob_t *pglob); #ifdef __i386__ __asm(".symver glob_old,glob@GLIBC_2.0"); #elif defined(__amd64__) __asm(".symver glob_old,glob@GLIBC_2.2.5"); #elif defined(__arm__) __asm(".symver glob_old,glob@GLIBC_2.4"); #elif defined(__aarch64__) __asm(".symver glob_old,glob@GLIBC_2.17"); #elif defined(__riscv) __asm(".symver glob_old,glob@GLIBC_2.27"); #endif extern "C" int __wrap_glob(const char * pattern, int flags, int (*errfunc) (const char *epath, int eerrno), glob_t *pglob) { return glob_old(pattern, flags, errfunc, pglob); } #if defined(__i386__) || defined(__arm__) extern "C" int __wrap_glob64(const char * pattern, int flags, int (*errfunc) (const char *epath, int eerrno), glob_t *pglob) { return glob_old(pattern, flags, errfunc, pglob); } #endif extern "C" int __poll_chk(struct pollfd *fds, nfds_t nfds, int timeout, size_t fdslen) { assert((fdslen / sizeof(*fds)) < nfds); return poll(fds, nfds, timeout); } extern "C" void __explicit_bzero_chk(void *dst, size_t len, size_t dstlen) { if (__glibc_unlikely(dstlen < len)) __chk_fail(); explicit_bzero(dst, len); } extern "C" int getentropy(void *buf, size_t len) { int pre_errno = errno; int ret; if (len > 256) return (-1); do { ret = syscall(SYS_getrandom, buf, len, 0); } while (ret == -1 && errno == EINTR); if (ret != (int)len) return (-1); errno = pre_errno; return (0); } #define MUL_NO_OVERFLOW ((size_t)1 << (sizeof(size_t) * 4)) extern "C" void* reallocarray(void *optr, size_t nmemb, size_t size) { if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && nmemb > 0 && SIZE_MAX / nmemb < size) { errno = ENOMEM; return NULL; } return realloc(optr, size * nmemb); }
#include <THC/THCGeneral.h> #include <ATen/cuda/detail/CUDAHooks.h> #include <ATen/cuda/CUDAConfig.h> #if AT_MAGMA_ENABLED() #include <magma_v2.h> #endif namespace { void _THCMagma_init() { #if AT_MAGMA_ENABLED() magma_init(); #endif } struct Initializer { Initializer() { ::at::cuda::detail::THCMagma_init = _THCMagma_init; }; } initializer; } // anonymous namespace
/************************************************************************* * * * ODER's Utilities Library. Copyright (C) 2008 Oleh Derevenko. * * All rights reserved. e-mail: odar@eleks.com (change all "a" to "e") * * * * This library is free software; you can redistribute it and/or * * modify it under the terms of EITHER: * * (1) The GNU Lesser General Public License as published by the Free * * Software Foundation; either version 3 of the License, or (at * * your option) any later version. The text of the GNU Lesser * * General Public License is included with this library in the * * file LICENSE-LESSER.TXT. Since LGPL is the extension of GPL * * the text of GNU General Public License is also provided for * * your information in file LICENSE.TXT. * * (2) The BSD-style license that is included with this library in * * the file LICENSE-BSD.TXT. * * (3) The zlib/libpng license that is included with this library in * * the file LICENSE-ZLIB.TXT * * * * This library is distributed WITHOUT ANY WARRANTY, including implied * * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the files LICENSE.TXT and LICENSE-LESSER.TXT or LICENSE-BSD.TXT * * or LICENSE-ZLIB.TXT for more details. * * * *************************************************************************/ #include <ou/malloc.h> #include <ou/assert.h> #include <ou/customization.h> #include <ou/macros.h> #if _OU_TARGET_OS == _OU_TARGET_OS_MAC #include <stdlib.h> #else // #if _OU_TARGET_OS != _OU_TARGET_OS_MAC #include <malloc.h> #endif // #if _OU_TARGET_OS != _OU_TARGET_OS_MAC BEGIN_NAMESPACE_OU(); /*extern*/ void *_OU_CONVENTION_API AllocateMemoryBlock(size_t nBlockSize) { void *pv_NewBlock; CMemoryAllocationProcedure fnMemoryAllocationProcedure = CMemoryManagerCustomization::GetMemoryAllocationCustomProcedure(); if (fnMemoryAllocationProcedure) { pv_NewBlock = fnMemoryAllocationProcedure(nBlockSize); OU_ASSERT(OU_ALIGNED_SIZE((size_t)pv_NewBlock, _OU_MEMORY_REQUIRED_ALIGNMENT) == (size_t)pv_NewBlock); // Memory must be aligned } else { pv_NewBlock = malloc(nBlockSize); } return pv_NewBlock; } /*extern*/ void *_OU_CONVENTION_API ReallocateMemoryBlock(void *pv_ExistingBlock, size_t nNewBlockSize) { OU_ASSERT(OU_ALIGNED_SIZE((size_t)pv_ExistingBlock, _OU_MEMORY_REQUIRED_ALIGNMENT) == (size_t)pv_ExistingBlock); // Memory must be aligned void *pv_NewBlock; CMemoryReallocationProcedure fnMemoryReallocationProcedure = CMemoryManagerCustomization::GetMemoryReallocationCustomProcedure(); if (fnMemoryReallocationProcedure) { pv_NewBlock = fnMemoryReallocationProcedure(pv_ExistingBlock, nNewBlockSize); OU_ASSERT(OU_ALIGNED_SIZE((size_t)pv_NewBlock, _OU_MEMORY_REQUIRED_ALIGNMENT) == (size_t)pv_NewBlock); // Memory must be aligned } else { pv_NewBlock = realloc(pv_ExistingBlock, nNewBlockSize); } return pv_NewBlock; } /*extern*/ void _OU_CONVENTION_API FreeMemoryBlock(void *pv_ExistingBlock) { OU_ASSERT(OU_ALIGNED_SIZE((size_t)pv_ExistingBlock, _OU_MEMORY_REQUIRED_ALIGNMENT) == (size_t)pv_ExistingBlock); // Memory must be aligned CMemoryDeallocationProcedure fnMemoryDeallocationProcedure = CMemoryManagerCustomization::GetMemoryDeallocationCustomProcedure(); if (fnMemoryDeallocationProcedure) { fnMemoryDeallocationProcedure(pv_ExistingBlock); } else { free(pv_ExistingBlock); } } END_NAMESPACE_OU();
#include <bits/stdc++.h> #define IOS std::ios::sync_with_stdio(false); std::cin.tie(nullptr); std::cout.tie(nullptr); // #define __DEBUG__ #ifdef __DEBUG__ #define DEBUG(...) printf(__VA_ARGS__) #else #define DEBUG(...) #endif #define filename "" #define setfile() freopen(filename".in", "r", stdin); freopen(filename".out", "w", stdout); using namespace std; typedef long long ll; typedef unsigned long long ull; typedef long double ld; typedef pair<int, int > Pii; const double pi = acos(-1.0); const int INF = INT_MAX; const int MAX_N = 50005; template <typename T> inline T sqr(T a) { return a * a;}; int N, A[MAX_N], num; int main(int argc, char const *argv[]) { cin >> N; for (int i = 0; i < N; ++i) { cin >> A[i]; if (A[i] == 0) ++num; } if (N == 1) if (A[0] == 1) cout << "YES" << endl; else cout << "NO" << endl; else if (num == 1) cout << "YES" << endl; else cout << "NO" << endl; return 0; }
#include <cstdio> using namespace std; const long MOD = 1000003l; long dp[51][51][51][51]{}, ways[51][51]{}; bool cp[101][101]; bool coprime(int, int); void solve(); int main() { int t, n, m; for (int i = 2; i <= 100; i++) { for (int j = 2; j < i; j++) cp[i][j] = cp[j][i] = coprime(i, j); cp[i][i] = false; } solve(); scanf("%d", &t); for (int i = 1; i <= t; i++) { scanf("%d%d", &n, &m); printf("Case %d: %ld\n", i, ways[n][m]); } } bool coprime(int a, int b) { int t; while (b) { t = b; b = a % b; a = t; } return a == 1; } void solve() { for (int y = 1; y <= 50; y++) { ways[1][y] = y; for (int i = 1; i <= y; i++) for (int j = 1; j <= y; j++) ways[2][y] += dp[2][y][i][j] = 1l; for (int x = 3; x <= 50; x++) { for (int i = 1; i <= y; i++) for (int j = 1; j <= y; j++) for (int k = 1; k <=y; k++) if (cp[i+j][j+k] && (dp[x][y][j][k] += dp[x-1][y][i][j]) >= MOD) dp[x][y][j][k] -= MOD; for (int i = 1; i <= y; i++) for (int j = 1; j <= y; j++) if ((ways[x][y] += dp[x][y][i][j]) >= MOD) ways[x][y] -= MOD; } } }
/*============================================================================= Copyright (c) 1999-2003 Jaakko Jarvi Copyright (c) 2001-2006 Joel de Guzman Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ==============================================================================*/ #if !defined(FUSION_NOT_EQUAL_TO_05052005_1141) #define FUSION_NOT_EQUAL_TO_05052005_1141 #include <boost/mpl/bool.hpp> #include <boost/fusion/iterator/deref.hpp> #include <boost/fusion/iterator/next.hpp> #include <boost/fusion/iterator/equal_to.hpp> namespace boost { namespace fusion { namespace detail { template <typename Seq1, typename Seq2, bool same_size> struct sequence_not_equal_to { typedef typename result_of::end<Seq1>::type end1_type; typedef typename result_of::end<Seq2>::type end2_type; template <typename I1, typename I2> static bool call(I1 const&, I2 const&, mpl::true_) { return false; } template <typename I1, typename I2> static bool call(I1 const& a, I2 const& b, mpl::false_) { return *a != *b || call(fusion::next(a), fusion::next(b)); } template <typename I1, typename I2> static bool call(I1 const& a, I2 const& b) { typename result_of::equal_to<I1, end1_type>::type eq; return call(a, b, eq); } }; template <typename Seq1, typename Seq2> struct sequence_not_equal_to<Seq1, Seq2, false> { template <typename I1, typename I2> static bool call(I1 const& a, I2 const& b) { return true; } }; }}} #endif
#pragma once #include <string> namespace elona { void map_initialize(); void map_initcustom(const std::string&); void map_tileset(int = 0); void map_converttile(); void map_createroomdoor(); void map_makedoor(); void map_nextdir1(int = 0, int = 0); void map_nextdir2(int = 0, int = 0); void map_placearena(int chara_index, bool is_enemy); void map_placecharaonentrance(int chara_index, int entrance_type); int dist_town(); void map_placeplayer(); void map_randomtile(int = 0, int = 0); void map_setfog(int = 0, int = 0); void generate_debug_map(); void generate_random_nefia(); int initialize_quest_map_crop(); int initialize_random_nefia_rdtype1(); int initialize_random_nefia_rdtype4(); int initialize_random_nefia_rdtype5(); int initialize_random_nefia_rdtype2(); int initialize_random_nefia_rdtype3(); int initialize_quest_map_party(); void initialize_home_mdata(); enum class FieldMapType { plain_field, forest, sea, grassland, desert, snow_field, }; FieldMapType map_get_field_type(); } // namespace elona
# /* Copyright (C) 2001 # * Housemarque Oy # * http://www.housemarque.com # * # * Distributed under the Boost Software License, Version 1.0. (See # * accompanying file LICENSE_1_0.txt or copy at # * http://www.boost.org/LICENSE_1_0.txt) # */ # # /* Revised by Paul Mensonides (2002) */ # /* Revised by Edward Diener (2011,2013) */ # # /* See http://www.boost.org for most recent version. */ # # ifndef MSGPACK_PREPROCESSOR_TUPLE_HPP # define MSGPACK_PREPROCESSOR_TUPLE_HPP # # include "tuple/eat.hpp" # include "tuple/elem.hpp" # include "tuple/enum.hpp" # include "tuple/insert.hpp" # include "tuple/pop_back.hpp" # include "tuple/pop_front.hpp" # include "tuple/push_back.hpp" # include "tuple/push_front.hpp" # include "tuple/rem.hpp" # include "tuple/remove.hpp" # include "tuple/replace.hpp" # include "tuple/reverse.hpp" # include "tuple/size.hpp" # include "tuple/to_array.hpp" # include "tuple/to_list.hpp" # include "tuple/to_seq.hpp" # # endif
/* ---------------------------------------------------------------------------- * GTSAM Copyright 2010, Georgia Tech Research Corporation, * Atlanta, Georgia 30332-0415 * All Rights Reserved * Authors: Frank Dellaert, et al. (see THANKS for the full author list) * See LICENSE for the license information * -------------------------------------------------------------------------- */ /** * @file SimpleRotation.cpp * @brief This is a super-simple example of optimizing a single rotation according to a single prior * @date Jul 1, 2010 * @author Frank Dellaert * @author Alex Cunningham */ /** * This example will perform a relatively trivial optimization on * a single variable with a single factor. */ // In this example, a 2D rotation will be used as the variable of interest #include <gtsam/geometry/Rot2.h> // Each variable in the system (poses) must be identified with a unique key. // We can either use simple integer keys (1, 2, 3, ...) or symbols (X1, X2, L1). // Here we will use symbols #include <gtsam/inference/Symbol.h> // In GTSAM, measurement functions are represented as 'factors'. Several common factors // have been provided with the library for solving robotics/SLAM/Bundle Adjustment problems. // We will apply a simple prior on the rotation #include <gtsam/slam/PriorFactor.h> // When the factors are created, we will add them to a Factor Graph. As the factors we are using // are nonlinear factors, we will need a Nonlinear Factor Graph. #include <gtsam/nonlinear/NonlinearFactorGraph.h> // The nonlinear solvers within GTSAM are iterative solvers, meaning they linearize the // nonlinear functions around an initial linearization point, then solve the linear system // to update the linearization point. This happens repeatedly until the solver converges // to a consistent set of variable values. This requires us to specify an initial guess // for each variable, held in a Values container. #include <gtsam/nonlinear/Values.h> // Finally, once all of the factors have been added to our factor graph, we will want to // solve/optimize to graph to find the best (Maximum A Posteriori) set of variable values. // GTSAM includes several nonlinear optimizers to perform this step. Here we will use the // standard Levenberg-Marquardt solver #include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h> using namespace std; using namespace gtsam; const double degree = M_PI / 180; int main() { /** * Step 1: Create a factor to express a unary constraint * The "prior" in this case is the measurement from a sensor, * with a model of the noise on the measurement. * * The "Key" created here is a label used to associate parts of the * state (stored in "RotValues") with particular factors. They require * an index to allow for lookup, and should be unique. * * In general, creating a factor requires: * - A key or set of keys labeling the variables that are acted upon * - A measurement value * - A measurement model with the correct dimensionality for the factor */ Rot2 prior = Rot2::fromAngle(30 * degree); prior.print("goal angle"); noiseModel::Isotropic::shared_ptr model = noiseModel::Isotropic::Sigma(1, 1 * degree); Symbol key('x',1); PriorFactor<Rot2> factor(key, prior, model); /** * Step 2: Create a graph container and add the factor to it * Before optimizing, all factors need to be added to a Graph container, * which provides the necessary top-level functionality for defining a * system of constraints. * * In this case, there is only one factor, but in a practical scenario, * many more factors would be added. */ NonlinearFactorGraph graph; graph.push_back(factor); graph.print("full graph"); /** * Step 3: Create an initial estimate * An initial estimate of the solution for the system is necessary to * start optimization. This system state is the "RotValues" structure, * which is similar in structure to a STL map, in that it maps * keys (the label created in step 1) to specific values. * * The initial estimate provided to optimization will be used as * a linearization point for optimization, so it is important that * all of the variables in the graph have a corresponding value in * this structure. * * The interface to all RotValues types is the same, it only depends * on the type of key used to find the appropriate value map if there * are multiple types of variables. */ Values initial; initial.insert(key, Rot2::fromAngle(20 * degree)); initial.print("initial estimate"); /** * Step 4: Optimize * After formulating the problem with a graph of constraints * and an initial estimate, executing optimization is as simple * as calling a general optimization function with the graph and * initial estimate. This will yield a new RotValues structure * with the final state of the optimization. */ Values result = LevenbergMarquardtOptimizer(graph, initial).optimize(); result.print("final result"); return 0; }
// This file is licensed under the Elastic License 2.0. Copyright 2021 StarRocks Limited. #include "pipeline_test_base.h" #include "util/thrift_util.h" namespace starrocks::pipeline { struct Counter { size_t pull_chunk_num; size_t push_chunk_num; }; using CounterPtr = std::shared_ptr<Counter>; void assert_counter(CounterPtr counter, size_t expected_pull_chunk_num, size_t expected_push_chunk_num) { ASSERT_EQ(expected_pull_chunk_num, counter->pull_chunk_num); ASSERT_EQ(expected_push_chunk_num, counter->push_chunk_num); } class TestSourceOperator : public SourceOperator { public: TestSourceOperator(int32_t id, int32_t plan_node_id, size_t chunk_num, size_t chunk_size, CounterPtr counter) : SourceOperator(id, "test_source", plan_node_id), _counter(counter) { for (size_t i = 0; i < chunk_num; ++i) { _chunks.push_back(std::move(PipelineTestBase::_create_and_fill_chunk(chunk_size))); } } ~TestSourceOperator() override = default; bool has_output() const override { return _index < _chunks.size(); } bool is_finished() const override { return !has_output(); } void finish(RuntimeState* state) override {} Status push_chunk(RuntimeState* state, const vectorized::ChunkPtr& chunk) override; StatusOr<vectorized::ChunkPtr> pull_chunk(RuntimeState* state) override; private: CounterPtr _counter; std::vector<vectorized::ChunkPtr> _chunks; size_t _index = 0; }; Status TestSourceOperator::push_chunk(RuntimeState* state, const vectorized::ChunkPtr& chunk) { _counter->push_chunk_num++; return Status::InternalError("Shouldn't push chunk to source operator"); } StatusOr<vectorized::ChunkPtr> TestSourceOperator::pull_chunk(RuntimeState* state) { _counter->pull_chunk_num++; return _chunks[_index++]; } class TestSourceOperatorFactory final : public SourceOperatorFactory { public: TestSourceOperatorFactory(int32_t id, int32_t plan_node_id, size_t chunk_num, size_t chunk_size, CounterPtr counter) : SourceOperatorFactory(id, "test_source", plan_node_id), _chunk_num(chunk_num), _chunk_size(chunk_size), _counter(counter) {} ~TestSourceOperatorFactory() override = default; OperatorPtr create(int32_t degree_of_parallelism, int32_t driver_sequence) override { return std::make_shared<TestSourceOperator>(_id, _plan_node_id, _chunk_num, _chunk_size, _counter); } private: size_t _chunk_num; size_t _chunk_size; CounterPtr _counter; }; class TestNormalOperator : public Operator { public: TestNormalOperator(int32_t id, int32_t plan_node_id, CounterPtr counter) : Operator(id, "test_normal", plan_node_id), _counter(counter) {} ~TestNormalOperator() override = default; bool need_input() const override { return true; } bool has_output() const override { return _chunk != nullptr; } bool is_finished() const override { return _is_finished && !has_output(); } void finish(RuntimeState* state) override { _is_finished = true; } Status push_chunk(RuntimeState* state, const vectorized::ChunkPtr& chunk) override; StatusOr<vectorized::ChunkPtr> pull_chunk(RuntimeState* state) override; private: CounterPtr _counter; bool _is_finished; ChunkPtr _chunk = nullptr; }; Status TestNormalOperator::push_chunk(RuntimeState* state, const vectorized::ChunkPtr& chunk) { _counter->push_chunk_num++; _chunk = chunk; return Status::OK(); } StatusOr<vectorized::ChunkPtr> TestNormalOperator::pull_chunk(RuntimeState* state) { _counter->pull_chunk_num++; ChunkPtr chunk = _chunk; _chunk = nullptr; return chunk; } class TestNormalOperatorFactory final : public OperatorFactory { public: TestNormalOperatorFactory(int32_t id, int32_t plan_node_id, CounterPtr counter) : OperatorFactory(id, "test_normal", plan_node_id), _counter(counter) {} ~TestNormalOperatorFactory() override = default; OperatorPtr create(int32_t degree_of_parallelism, int32_t driver_sequence) override { return std::make_shared<TestNormalOperator>(_id, _plan_node_id, _counter); } private: CounterPtr _counter; }; class TestSinkOperator : public Operator { public: TestSinkOperator(int32_t id, int32_t plan_node_id, CounterPtr counter) : Operator(id, "test_sink", plan_node_id), _counter(counter) {} ~TestSinkOperator() override = default; bool need_input() const override { return true; } bool has_output() const override { return _chunk != nullptr; } bool is_finished() const override { return _is_finished; } void finish(RuntimeState* state) override { _is_finished = true; } Status push_chunk(RuntimeState* state, const vectorized::ChunkPtr& chunk) override; StatusOr<vectorized::ChunkPtr> pull_chunk(RuntimeState* state) override; private: CounterPtr _counter; bool _is_finished; ChunkPtr _chunk = nullptr; }; Status TestSinkOperator::push_chunk(RuntimeState* state, const vectorized::ChunkPtr& chunk) { _counter->push_chunk_num++; _chunk = chunk; return Status::OK(); } StatusOr<vectorized::ChunkPtr> TestSinkOperator::pull_chunk(RuntimeState* state) { _counter->pull_chunk_num++; return Status::InternalError("Shouldn't pull chunk to sink operator"); } class TestSinkOperatorFactory final : public OperatorFactory { public: TestSinkOperatorFactory(int32_t id, int32_t plan_node_id, CounterPtr counter) : OperatorFactory(id, "test_sink", plan_node_id), _counter(counter) {} ~TestSinkOperatorFactory() override = default; OperatorPtr create(int32_t degree_of_parallelism, int32_t driver_sequence) override { return std::make_shared<TestSinkOperator>(_id, _plan_node_id, _counter); } private: CounterPtr _counter; }; class TestPipelineControlFlow : public PipelineTestBase {}; TEST_F(TestPipelineControlFlow, test_two_operatories) { CounterPtr sourceCounter = std::make_shared<Counter>(); CounterPtr sinkCounter = std::make_shared<Counter>(); _pipeline_builder = [=]() { _pipelines.clear(); OpFactories op_factories; op_factories.push_back(std::make_shared<TestSourceOperatorFactory>(1, 1, 1, 1, sourceCounter)); op_factories.push_back(std::make_shared<TestSinkOperatorFactory>(2, 2, sinkCounter)); _pipelines.push_back(std::make_shared<Pipeline>(1, op_factories)); }; start_test(); ASSERT_TRUE(_fragment_future.wait_for(std::chrono::seconds(3)) == std::future_status::ready); assert_counter(sourceCounter, 1, 0); assert_counter(sinkCounter, 0, 1); } TEST_F(TestPipelineControlFlow, test_three_operatories) { CounterPtr sourceCounter = std::make_shared<Counter>(); CounterPtr normalCounter = std::make_shared<Counter>(); CounterPtr sinkCounter = std::make_shared<Counter>(); _pipeline_builder = [=]() { _pipelines.clear(); OpFactories op_factories; op_factories.push_back(std::make_shared<TestSourceOperatorFactory>(1, 1, 1, 1, sourceCounter)); op_factories.push_back(std::make_shared<TestNormalOperatorFactory>(2, 2, normalCounter)); op_factories.push_back(std::make_shared<TestSinkOperatorFactory>(3, 3, sinkCounter)); _pipelines.push_back(std::make_shared<Pipeline>(1, op_factories)); }; start_test(); ASSERT_TRUE(_fragment_future.wait_for(std::chrono::seconds(3)) == std::future_status::ready); assert_counter(sourceCounter, 1, 0); assert_counter(normalCounter, 1, 1); assert_counter(sinkCounter, 0, 1); } } // namespace starrocks::pipeline
/* *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Copyright (c) 2019, Lawrence Livermore National Security, LLC. * * Produced at the Lawrence Livermore National Laboratory * * LLNL-CODE-746361 * * All rights reserved. See COPYRIGHT for details. * * This file is part of the GEOSX Simulation Framework. * * GEOSX is a free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License (as published by the * Free Software Foundation) version 2.1 dated February 1999. *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #define PY_SSIZE_T_CLEAN #include <Python.h> #include "python/PyArray.hpp" #include "python/python.hpp" #include "MallocBuffer.hpp" static LvArray::Array< int, 1, RAJA::PERM_I, std::ptrdiff_t, LvArray::MallocBuffer > array1DOfInts{10}; static LvArray::Array< double, 1, RAJA::PERM_I, int, LvArray::MallocBuffer > array1DOfDoubles{10}; static LvArray::Array< long, 2, RAJA::PERM_IJ, std::ptrdiff_t, LvArray::MallocBuffer > array2DIJOfLongs{10, 10}; static LvArray::Array< float, 2, RAJA::PERM_JI, long long int, LvArray::MallocBuffer > array2DJIOfFloats{10, 10}; static LvArray::Array< double, 4, RAJA::PERM_KILJ, std::ptrdiff_t, LvArray::MallocBuffer > array4DKILJOfDoubles{10, 10, 10, 10}; static PyObject * getArray1DOfInts( PyObject * const self, PyObject * const args ) { LVARRAY_UNUSED_VARIABLE( self ); LVARRAY_UNUSED_VARIABLE( args ); return LvArray::python::create( array1DOfInts ); } static PyObject * getArray1DOfDoubles( PyObject * const self, PyObject * const args ) { LVARRAY_UNUSED_VARIABLE( self ); LVARRAY_UNUSED_VARIABLE( args ); return LvArray::python::create( array1DOfDoubles ); } static PyObject * getArray2DIJOfLongs( PyObject * const self, PyObject * const args ) { LVARRAY_UNUSED_VARIABLE( self ); LVARRAY_UNUSED_VARIABLE( args ); return LvArray::python::create( array2DIJOfLongs ); } static PyObject * getArray2DJIOfFloats( PyObject * const self, PyObject * const args ) { LVARRAY_UNUSED_VARIABLE( self ); LVARRAY_UNUSED_VARIABLE( args ); return LvArray::python::create( array2DJIOfFloats ); } static PyObject * getArray4DKILJOfDoubles( PyObject * const self, PyObject * const args ) { LVARRAY_UNUSED_VARIABLE( self ); LVARRAY_UNUSED_VARIABLE( args ); return LvArray::python::create( array4DKILJOfDoubles ); } BEGIN_ALLOW_DESIGNATED_INITIALIZERS /** * Array of functions and docstrings to export to Python */ static PyMethodDef testPyArrayFuncs[] = { {"get_array1d_int", getArray1DOfInts, METH_NOARGS, ""}, {"get_array1d_double", getArray1DOfDoubles, METH_NOARGS, ""}, {"get_array2d_ij_long", getArray2DIJOfLongs, METH_NOARGS, ""}, {"get_array2d_ji_float", getArray2DJIOfFloats, METH_NOARGS, ""}, {"get_array4d_kilj_double", getArray4DKILJOfDoubles, METH_NOARGS, ""}, {nullptr, nullptr, 0, nullptr} /* Sentinel */ }; /** * Initialize the module object for Python with the exported functions */ static struct PyModuleDef testPyArrayModule = { PyModuleDef_HEAD_INIT, .m_name = "testPyArray", .m_doc = "", .m_size = -1, .m_methods = testPyArrayFuncs, }; END_ALLOW_DESIGNATED_INITIALIZERS PyMODINIT_FUNC PyInit_testPyArray( void ) { LvArray::python::PyObjectRef<> module = PyModule_Create( &testPyArrayModule ); if( !LvArray::python::addPyLvArrayModule( module ) ) { return nullptr; } return module.release(); }
#ifndef BOOST_MATH_INTERPOLATORS_DETAIL_QUINTIC_HERMITE_DETAIL_HPP #define BOOST_MATH_INTERPOLATORS_DETAIL_QUINTIC_HERMITE_DETAIL_HPP #include <algorithm> #include <stdexcept> #include <sstream> #include <cmath> namespace boost::math::interpolators::detail { template<class RandomAccessContainer> class quintic_hermite_detail { public: using Real = typename RandomAccessContainer::value_type; quintic_hermite_detail(RandomAccessContainer && x, RandomAccessContainer && y, RandomAccessContainer && dydx, RandomAccessContainer && d2ydx2) : x_{std::move(x)}, y_{std::move(y)}, dydx_{std::move(dydx)}, d2ydx2_{std::move(d2ydx2)} { if (x_.size() != y_.size()) { throw std::domain_error("Number of abscissas must = number of ordinates."); } if (x_.size() != dydx_.size()) { throw std::domain_error("Numbers of derivatives must = number of abscissas."); } if (x_.size() != d2ydx2_.size()) { throw std::domain_error("Number of second derivatives must equal number of abscissas."); } if (x_.size() < 2) { throw std::domain_error("At least 2 abscissas are required."); } Real x0 = x_[0]; for (decltype(x_.size()) i = 1; i < x_.size(); ++i) { Real x1 = x_[i]; if (x1 <= x0) { throw std::domain_error("Abscissas must be sorted in strictly increasing order x0 < x1 < ... < x_{n-1}"); } x0 = x1; } } void push_back(Real x, Real y, Real dydx, Real d2ydx2) { using std::abs; using std::isnan; if (x <= x_.back()) { throw std::domain_error("Calling push_back must preserve the monotonicity of the x's"); } x_.push_back(x); y_.push_back(y); dydx_.push_back(dydx); d2ydx2_.push_back(d2ydx2); } Real operator()(Real x) const { if (x < x_[0] || x > x_.back()) { std::ostringstream oss; oss.precision(std::numeric_limits<Real>::digits10+3); oss << "Requested abscissa x = " << x << ", which is outside of allowed range [" << x_[0] << ", " << x_.back() << "]"; throw std::domain_error(oss.str()); } // We need t := (x-x_k)/(x_{k+1}-x_k) \in [0,1) for this to work. // Sadly this neccessitates this loathesome check, otherwise we get t = 1 at x = xf. if (x == x_.back()) { return y_.back(); } auto it = std::upper_bound(x_.begin(), x_.end(), x); auto i = std::distance(x_.begin(), it) -1; Real x0 = *(it-1); Real x1 = *it; Real y0 = y_[i]; Real y1 = y_[i+1]; Real v0 = dydx_[i]; Real v1 = dydx_[i+1]; Real a0 = d2ydx2_[i]; Real a1 = d2ydx2_[i+1]; Real dx = (x1-x0); Real t = (x-x0)/dx; // See the 'Basis functions' section of: // https://www.rose-hulman.edu/~finn/CCLI/Notes/day09.pdf // Also: https://github.com/MrHexxx/QuinticHermiteSpline/blob/master/HermiteSpline.cs Real y = (1- t*t*t*(10 + t*(-15 + 6*t)))*y0; y += t*(1+ t*t*(-6 + t*(8 -3*t)))*v0*dx; y += t*t*(1 + t*(-3 + t*(3-t)))*a0*dx*dx/2; y += t*t*t*((1 + t*(-2 + t))*a1*dx*dx/2 + (-4 + t*(7 -3*t))*v1*dx + (10 + t*(-15 + 6*t))*y1); return y; } Real prime(Real x) const { if (x < x_[0] || x > x_.back()) { std::ostringstream oss; oss.precision(std::numeric_limits<Real>::digits10+3); oss << "Requested abscissa x = " << x << ", which is outside of allowed range [" << x_[0] << ", " << x_.back() << "]"; throw std::domain_error(oss.str()); } if (x == x_.back()) { return dydx_.back(); } auto it = std::upper_bound(x_.begin(), x_.end(), x); auto i = std::distance(x_.begin(), it) -1; Real x0 = *(it-1); Real x1 = *it; Real s0 = dydx_[i]; Real s1 = dydx_[i+1]; // Ridiculous linear interpolation. Fine for now: Real numerator = s0*(x1-x) + s1*(x-x0); Real denominator = x1 - x0; return numerator/denominator; } friend std::ostream& operator<<(std::ostream & os, const quintic_hermite_detail & m) { os << "(x,y,y') = {"; for (size_t i = 0; i < m.x_.size() - 1; ++i) { os << "(" << m.x_[i] << ", " << m.y_[i] << ", " << m.dydx_[i] << ", " << m.d2ydx2_[i] << "), "; } auto n = m.x_.size()-1; os << "(" << m.x_[n] << ", " << m.y_[n] << ", " << m.dydx_[n] << ", " << m.d2ydx2_[n] << ")}"; return os; } private: RandomAccessContainer x_; RandomAccessContainer y_; RandomAccessContainer dydx_; RandomAccessContainer d2ydx2_; }; } #endif
#ifndef GRAVITY_COMPENSATION_CONTROLLER_HPP #define GRAVITY_COMPENSATION_CONTROLLER_HPP #include <ctime> #include "jaco2_controller.h" #include <kinova/KinovaTypes.h> class GravityCompensationController : public Jaco2Controller { public: GravityCompensationController(Jaco2State &state, Jaco2API &api, TerminationCallback& t ) : Jaco2Controller(state, api, t), last_command_(std::time(nullptr)), done_(false) { tp_.InitStruct(); tp_.Position.Type = ANGULAR_VELOCITY; } virtual void start() override { api_.stopForceControl(); api_.enableDirectTorqueMode(1.0); done_ = false; result_ = Result::WORKING; } virtual void write() override { AngularPosition torque; torque.InitStruct(); api_.setAngularTorque(torque); } void finishService() { done_ = true; result_ = Result::SUCCESS; t_(result_); } virtual void stop() override { api_.disableTorque(); } virtual bool isDone() const override { return done_; } virtual void setConfig(jaco2_driver::jaco2_driver_configureConfig& cfg) override { } private: TrajectoryPoint tp_; std::time_t last_command_; bool done_; }; #endif // GRAVITY_COMPENSATION_CONTROLLER_HPP
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Generated by the Codegen C++ plugin. // If you make any local changes, they will be lost. // source: google/iam/credentials/v1/iamcredentials.proto #include "google/cloud/iam/internal/iam_credentials_option_defaults.h" #include "google/cloud/iam/iam_credentials_connection.h" #include "google/cloud/iam/iam_credentials_options.h" #include "google/cloud/common_options.h" #include "google/cloud/grpc_options.h" #include "google/cloud/internal/getenv.h" #include "google/cloud/internal/user_agent_prefix.h" #include "google/cloud/options.h" #include <memory> namespace google { namespace cloud { namespace iam_internal { inline namespace GOOGLE_CLOUD_CPP_GENERATED_NS { namespace { auto constexpr kBackoffScaling = 2.0; } // namespace Options IAMCredentialsDefaultOptions(Options options) { if (!options.has<EndpointOption>()) { auto env = internal::GetEnv("GOOGLE_CLOUD_CPP_IAM_CREDENTIALS_ENDPOINT"); options.set<EndpointOption>(env ? *env : "iamcredentials.googleapis.com"); } if (!options.has<GrpcCredentialOption>()) { options.set<GrpcCredentialOption>(grpc::GoogleDefaultCredentials()); } if (!options.has<GrpcBackgroundThreadsFactoryOption>()) { options.set<GrpcBackgroundThreadsFactoryOption>( internal::DefaultBackgroundThreadsFactory); } if (!options.has<GrpcNumChannelsOption>()) { options.set<GrpcNumChannelsOption>(4); } auto& products = options.lookup<UserAgentProductsOption>(); products.insert(products.begin(), google::cloud::internal::UserAgentPrefix()); if (!options.has<iam::IAMCredentialsRetryPolicyOption>()) { options.set<iam::IAMCredentialsRetryPolicyOption>( iam::IAMCredentialsLimitedTimeRetryPolicy(std::chrono::minutes(30)) .clone()); } if (!options.has<iam::IAMCredentialsBackoffPolicyOption>()) { options.set<iam::IAMCredentialsBackoffPolicyOption>( ExponentialBackoffPolicy(std::chrono::seconds(1), std::chrono::minutes(5), kBackoffScaling) .clone()); } if (!options.has<iam::IAMCredentialsConnectionIdempotencyPolicyOption>()) { options.set<iam::IAMCredentialsConnectionIdempotencyPolicyOption>( iam::MakeDefaultIAMCredentialsConnectionIdempotencyPolicy()); } return options; } } // namespace GOOGLE_CLOUD_CPP_GENERATED_NS } // namespace iam_internal } // namespace cloud } // namespace google
// Copyright 2014 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // blaze.cc: bootstrap and client code for Blaze server. // // Responsible for: // - extracting the Python, C++ and Java components. // - starting the server or finding the existing one. // - client options parsing. // - passing the argv array, and printing the out/err streams. // - signal handling. // - exiting with the right error/WTERMSIG code. // - debugger + profiler support. // - mutual exclusion between batch invocations. #include "src/main/cpp/blaze.h" #include <assert.h> #include <ctype.h> #include <fcntl.h> #include <grpc/grpc.h> #include <grpc/support/log.h> #include <grpcpp/channel.h> #include <grpcpp/client_context.h> #include <grpcpp/create_channel.h> #include <grpcpp/security/credentials.h> #include <limits.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <algorithm> #include <chrono> // NOLINT (gRPC requires this) #include <cinttypes> #include <map> #include <memory> #include <mutex> // NOLINT #include <set> #include <sstream> #include <string> #include <thread> // NOLINT #include <utility> #include <vector> #include "src/main/cpp/archive_utils.h" #include "src/main/cpp/blaze_util.h" #include "src/main/cpp/blaze_util_platform.h" #include "src/main/cpp/option_processor.h" #include "src/main/cpp/server_process_info.h" #include "src/main/cpp/startup_options.h" #include "src/main/cpp/util/bazel_log_handler.h" #include "src/main/cpp/util/errors.h" #include "src/main/cpp/util/exit_code.h" #include "src/main/cpp/util/file.h" #include "src/main/cpp/util/logging.h" #include "src/main/cpp/util/numbers.h" #include "src/main/cpp/util/path.h" #include "src/main/cpp/util/path_platform.h" #include "src/main/cpp/util/port.h" #include "src/main/cpp/util/strings.h" #include "src/main/cpp/workspace_layout.h" #include "src/main/protobuf/command_server.grpc.pb.h" #include "third_party/ijar/zip.h" using blaze_util::GetLastErrorString; extern char** environ; namespace blaze { using std::map; using std::set; using std::string; using std::vector; using command_server::CommandServer; // The following is a treatise on how the interaction between the client and the // server works. // // First, the client unconditionally acquires an flock() lock on // $OUTPUT_BASE/lock then verifies if it has already extracted itself by // checking if the directory it extracts itself to (install base + a checksum) // is present. If not, then it does the extraction. Care is taken that this // process is atomic so that Blazen in multiple output bases do not clash. // // Then the client tries to connect to the currently executing server and kills // it if at least one of the following conditions is true: // // - The server is of the wrong version (as determined by the // $OUTPUT_BASE/install symlink) // - The server has different startup options than the client wants // - The client wants to run the command in batch mode // // Then, if needed, the client adjusts the install link to indicate which // version of the server it is running. // // In batch mode, the client then simply executes the server while taking care // that the output base lock is kept until it finishes. // // If in server mode, the client starts up a server if needed then sends the // command to the client and streams back stdout and stderr. The output base // lock is released after the command is sent to the server (the server // implements its own locking mechanism). // Synchronization between the client and the server is a little precarious // because the client needs to know the PID of the server and it is not // available using a Java API and we don't have JNI on Windows at the moment, // so the server can't just communicate this over the communication channel. // Thus, a PID file is used, but care needs to be taken that the contents of // this PID file are right. // // Upon server startup, the PID file is written before the client spawns the // server. Thus, when the client can connect, it can be certain that the PID // file is up to date. // // Upon server shutdown, the PID file is deleted using a server shutdown hook. // However, this happens *after* the server stopped listening, so it's possible // that a client has already started up a server and written a new PID file. // In order to avoid this, when the client starts up a new server, it reads the // contents of the PID file and kills the process indicated in it (it could do // with a bit more care, since PIDs can be reused, but for now, we just believe // the PID file) // // Some more interesting scenarios: // // - The server receives a kill signal and it does not have a chance to delete // the PID file: the client cannot connect, reads the PID file, kills the // process indicated in it and starts up a new server. // // - The server stopped accepting connections but hasn't quit yet and a new // client comes around: the new client will kill the server based on the // PID file before a new server is started up. // // Alternative implementations: // // - Don't deal with PIDs at all. This would make it impossible for the client // to deliver a SIGKILL to the server after three SIGINTs. It would only be // possible with gRPC anyway. // // - Have the server check that the PID file contains the correct things // before deleting them: there is a window of time between checking the file // and deleting it in which a new server can overwrite the PID file. The // output base lock cannot be acquired, either, because when starting up a // new server, the client already holds it. // // - Delete the PID file before stopping to accept connections: then a client // could come about after deleting the PID file but before stopping accepting // connections. It would also not be resilient against a dead server that // left a PID file around. // The reason for a blaze server restart. // Keep in sync with logging.proto. enum RestartReason { NO_RESTART = 0, NO_DAEMON, NEW_VERSION, NEW_OPTIONS, PID_FILE_BUT_NO_SERVER, SERVER_VANISHED, SERVER_UNRESPONSIVE }; // String string representation of RestartReason. static const char* ReasonString(RestartReason reason) { switch (reason) { case NO_RESTART: return "no_restart"; case NO_DAEMON: return "no_daemon"; case NEW_VERSION: return "new_version"; case NEW_OPTIONS: return "new_options"; case PID_FILE_BUT_NO_SERVER: return "pid_file_but_no_server"; case SERVER_VANISHED: return "server_vanished"; case SERVER_UNRESPONSIVE: return "server_unresponsive"; } BAZEL_DIE(blaze_exit_code::INTERNAL_ERROR) << "unknown RestartReason (" << reason << ")."; // Cannot actually reach this, but it makes the compiler happy. return "unknown"; } struct DurationMillis { const uint64_t millis; DurationMillis() : millis(kUnknownDuration) {} DurationMillis(const uint64_t ms) : millis(ms) {} bool IsKnown() const { return millis == kUnknownDuration; } private: // Value representing that a timing event never occurred or is unknown. static constexpr uint64_t kUnknownDuration = 0; }; // Encapsulates miscellaneous information reported to the server for logging and // profiling purposes. struct LoggingInfo { explicit LoggingInfo( const string &binary_path_, const uint64_t start_time_ms_) : binary_path(binary_path_), start_time_ms(start_time_ms_), restart_reason(NO_RESTART) {} void SetRestartReasonIfNotSet(const RestartReason restart_reason_) { if (restart_reason == NO_RESTART) { restart_reason = restart_reason_; } } // Path of this binary. const string binary_path; // The time in ms the binary started up, measured from approximately the time // that "main" was called. const uint64_t start_time_ms; // The reason the server was restarted. RestartReason restart_reason; }; class BlazeServer final { public: BlazeServer(const int connect_timeout_secs, const bool batch, const bool block_for_lock, const blaze_util::Path &output_base, const blaze_util::Path &server_jvm_out); // Acquire a lock for the server running in this output base. Returns the // number of milliseconds spent waiting for the lock. uint64_t AcquireLock(); // Whether there is an active connection to a server. bool Connected() const { return connected_; } // Connect to the server. Returns if the connection was successful. Only // call this when this object is in disconnected state. If it returns true, // this object will be in connected state. bool Connect(); // Send the command line to the server and forward whatever it says to stdout // and stderr. Returns the desired exit code. Only call this when the server // is in connected state. unsigned int Communicate( const std::string &command, const std::vector<std::string> &command_args, const std::string &invocation_policy, const std::vector<RcStartupFlag> &original_startup_options, const LoggingInfo &logging_info, const DurationMillis client_startup_duration, const DurationMillis extract_data_duration, const DurationMillis command_wait_duration_ms); // Disconnects and kills an existing server. Only call this when this object // is in connected state. void KillRunningServer(); // Cancel the currently running command. If there is no command currently // running, the result is unspecified. When called, this object must be in // connected state. void Cancel(); // Returns information about the actual server process and its configuration. const ServerProcessInfo& ProcessInfo() const { return process_info_; } private: BlazeLock blaze_lock_; bool connected_; enum CancelThreadAction { NOTHING, JOIN, CANCEL, COMMAND_ID_RECEIVED }; std::unique_ptr<CommandServer::Stub> client_; std::string request_cookie_; std::string response_cookie_; std::string command_id_; // protects command_id_ . Although we always set it before making the cancel // thread do something with it, the mutex is still useful because it provides // a memory fence. std::mutex cancel_thread_mutex_; // Pipe that the main thread sends actions to and the cancel thread receives // actions from. std::unique_ptr<blaze_util::IPipe> pipe_; bool TryConnect(CommandServer::Stub *client); void CancelThread(); void SendAction(CancelThreadAction action); void SendCancelMessage(); ServerProcessInfo process_info_; const int connect_timeout_secs_; const bool batch_; const bool block_for_lock_; const blaze_util::Path output_base_; }; //////////////////////////////////////////////////////////////////////// // Global Variables static BlazeServer *blaze_server; // TODO(laszlocsomor) 2016-11-24: release the `blaze_server` object. Currently // nothing deletes it. Be careful that some functions may call exit(2) or // _exit(2) (attributed with ATTRIBUTE_NORETURN) meaning we have to delete the // objects before those. uint64_t BlazeServer::AcquireLock() { return blaze::AcquireLock(output_base_, batch_, block_for_lock_, &blaze_lock_); } //////////////////////////////////////////////////////////////////////// // Logic static map<string, EnvVarValue> PrepareEnvironmentForJvm(); // Escapes colons by replacing them with '_C' and underscores by replacing them // with '_U'. E.g. "name:foo_bar" becomes "name_Cfoo_Ubar" static string EscapeForOptionSource(const string &input) { string result = input; blaze_util::Replace("_", "_U", &result); blaze_util::Replace(":", "_C", &result); return result; } // Returns the installed embedded binaries directory, under the shared // install_base location. string GetEmbeddedBinariesRoot(const string &install_base) { return blaze_util::JoinPath(install_base, "_embedded_binaries"); } // Returns the JVM command argument array. static vector<string> GetServerExeArgs(const blaze_util::Path &jvm_path, const string &server_jar_path, const vector<string> &archive_contents, const string &install_md5, const WorkspaceLayout &workspace_layout, const string &workspace, const StartupOptions &startup_options) { vector<string> result; // e.g. A Blaze server process running in ~/src/build_root (where there's a // ~/src/build_root/WORKSPACE file) will appear in ps(1) as "blaze(src)". result.push_back( startup_options.GetLowercaseProductName() + "(" + workspace_layout.GetPrettyWorkspaceName(workspace) + ")"); startup_options.AddJVMArgumentPrefix(jvm_path.GetParent().GetParent(), &result); result.push_back("-XX:+HeapDumpOnOutOfMemoryError"); result.push_back("-XX:HeapDumpPath=" + startup_options.output_base.AsJvmArgument()); // TODO(b/109998449): only assume JDK >= 9 for embedded JDKs if (!startup_options.GetEmbeddedJavabase().IsEmpty()) { // quiet warnings from com.google.protobuf.UnsafeUtil, // see: https://github.com/google/protobuf/issues/3781 result.push_back("--add-opens=java.base/java.nio=ALL-UNNAMED"); result.push_back("--add-opens=java.base/java.lang=ALL-UNNAMED"); } result.push_back("-Xverify:none"); vector<string> user_options; user_options.insert(user_options.begin(), startup_options.host_jvm_args.begin(), startup_options.host_jvm_args.end()); // Add JVM arguments particular to building blaze64 and particular JVM // versions. string error; blaze_exit_code::ExitCode jvm_args_exit_code = startup_options.AddJVMArguments(startup_options.GetServerJavabase(), &result, user_options, &error); if (jvm_args_exit_code != blaze_exit_code::SUCCESS) { BAZEL_DIE(jvm_args_exit_code) << error; } // We put all directories on java.library.path that contain .so/.dll files. set<string> java_library_paths; std::stringstream java_library_path; java_library_path << "-Djava.library.path="; blaze_util::Path real_install_dir = blaze_util::Path(GetEmbeddedBinariesRoot(startup_options.install_base)); bool first = true; for (const auto &it : archive_contents) { if (IsSharedLibrary(it)) { string libpath(real_install_dir.GetRelative(blaze_util::Dirname(it)) .AsJvmArgument()); // Only add the library path if it's not added yet. if (java_library_paths.find(libpath) == java_library_paths.end()) { java_library_paths.insert(libpath); if (!first) { java_library_path << kListSeparator; } first = false; java_library_path << libpath; } } } result.push_back(java_library_path.str()); // Force use of latin1 for file names. result.push_back("-Dfile.encoding=ISO-8859-1"); if (startup_options.host_jvm_debug) { BAZEL_LOG(USER) << "Running host JVM under debugger (listening on TCP port 5005)."; // Start JVM so that it listens for a connection from a // JDWP-compliant debugger: result.push_back("-Xdebug"); result.push_back("-Xrunjdwp:transport=dt_socket,server=y,address=5005"); } result.insert(result.end(), user_options.begin(), user_options.end()); startup_options.AddJVMArgumentSuffix( real_install_dir, server_jar_path, &result); // JVM arguments are complete. Now pass in Blaze startup options. // Note that we always use the --flag=ARG form (instead of the --flag ARG one) // so that BlazeRuntime#splitStartupOptions has an easy job. // TODO(lberki): Test that whatever the list constructed after this line is // actually a list of parseable startup options. if (!startup_options.batch) { result.push_back("--max_idle_secs=" + ToString(startup_options.max_idle_secs)); result.push_back("--shutdown_on_low_sys_mem=" + ToString(startup_options.shutdown_on_low_sys_mem)); } else { // --batch must come first in the arguments to Java main() because // the code expects it to be at args[0] if it's been set. result.push_back("--batch"); } if (startup_options.command_port != 0) { result.push_back("--command_port=" + ToString(startup_options.command_port)); } result.push_back("--connect_timeout_secs=" + ToString(startup_options.connect_timeout_secs)); result.push_back("--output_user_root=" + blaze_util::ConvertPath(startup_options.output_user_root)); result.push_back("--install_base=" + blaze_util::ConvertPath(startup_options.install_base)); result.push_back("--install_md5=" + install_md5); result.push_back("--output_base=" + startup_options.output_base.AsCommandLineArgument()); result.push_back("--workspace_directory=" + blaze_util::ConvertPath(workspace)); result.push_back("--default_system_javabase=" + GetSystemJavabase()); if (!startup_options.server_jvm_out.IsEmpty()) { result.push_back("--server_jvm_out=" + startup_options.server_jvm_out.AsCommandLineArgument()); } if (startup_options.deep_execroot) { result.push_back("--deep_execroot"); } else { result.push_back("--nodeep_execroot"); } if (startup_options.expand_configs_in_place) { result.push_back("--expand_configs_in_place"); } else { result.push_back("--noexpand_configs_in_place"); } if (!startup_options.digest_function.empty()) { // Only include this if a value is requested - we rely on the empty case // being "null" to set the programmatic default in the server. result.push_back("--digest_function=" + startup_options.digest_function); } if (startup_options.idle_server_tasks) { result.push_back("--idle_server_tasks"); } else { result.push_back("--noidle_server_tasks"); } if (startup_options.oom_more_eagerly) { result.push_back("--experimental_oom_more_eagerly"); } else { result.push_back("--noexperimental_oom_more_eagerly"); } result.push_back("--experimental_oom_more_eagerly_threshold=" + ToString(startup_options.oom_more_eagerly_threshold)); if (startup_options.write_command_log) { result.push_back("--write_command_log"); } else { result.push_back("--nowrite_command_log"); } if (startup_options.watchfs) { result.push_back("--watchfs"); } else { result.push_back("--nowatchfs"); } if (startup_options.fatal_event_bus_exceptions) { result.push_back("--fatal_event_bus_exceptions"); } else { result.push_back("--nofatal_event_bus_exceptions"); } // We use this syntax so that the logic in AreStartupOptionsDifferent() that // decides whether the server needs killing is simpler. This is parsed by the // Java code where --noclient_debug and --client_debug=false are equivalent. // Note that --client_debug false (separated by space) won't work either, // because the logic in AreStartupOptionsDifferent() assumes that every // argument is in the --arg=value form. if (startup_options.client_debug) { result.push_back("--client_debug=true"); } else { result.push_back("--client_debug=false"); } // These flags are passed to the java process only for Blaze reporting // purposes; the real interpretation of the jvm flags occurs when we set up // the java command line. if (!startup_options.GetExplicitServerJavabase().IsEmpty()) { result.push_back( "--server_javabase=" + startup_options.GetExplicitServerJavabase().AsCommandLineArgument()); } if (startup_options.host_jvm_debug) { result.push_back("--host_jvm_debug"); } if (!startup_options.host_jvm_profile.empty()) { result.push_back("--host_jvm_profile=" + startup_options.host_jvm_profile); } if (!startup_options.host_jvm_args.empty()) { for (const auto &arg : startup_options.host_jvm_args) { result.push_back("--host_jvm_args=" + arg); } } // Pass in invocation policy as a startup argument for batch mode only. if (startup_options.batch && !startup_options.invocation_policy.empty()) { result.push_back("--invocation_policy=" + startup_options.invocation_policy); } result.push_back("--product_name=" + startup_options.product_name); startup_options.AddExtraOptions(&result); // The option sources are transmitted in the following format: // --option_sources=option1:source1:option2:source2:... string option_sources = "--option_sources="; first = true; for (const auto &it : startup_options.option_sources) { if (!first) { option_sources += ":"; } first = false; option_sources += EscapeForOptionSource(it.first) + ":" + EscapeForOptionSource(it.second); } result.push_back(option_sources); return result; } // Add common command options for logging to the given argument array. static void AddLoggingArgs(const LoggingInfo &logging_info, const DurationMillis client_startup_duration, const DurationMillis extract_data_duration, const DurationMillis command_wait_duration_ms, vector<string> *args) { // The time in ms the launcher spends before sending the request to the blaze // server. args->push_back("--startup_time=" + ToString(client_startup_duration.millis)); // The time in ms a command had to wait on a busy Blaze server process. // This is part of startup_time. if (command_wait_duration_ms.IsKnown()) { args->push_back("--command_wait_time=" + ToString(command_wait_duration_ms.millis)); } // The time in ms spent on extracting the new blaze version. // This is part of startup_time. if (extract_data_duration.IsKnown()) { args->push_back("--extract_data_time=" + ToString(extract_data_duration.millis)); } if (logging_info.restart_reason != NO_RESTART) { args->push_back(string("--restart_reason=") + ReasonString(logging_info.restart_reason)); } args->push_back(string("--binary_path=") + logging_info.binary_path); } // Join the elements of the specified array with NUL's (\0's), akin to the // format of /proc/$PID/cmdline. static string GetArgumentString(const vector<string> &argument_array) { string result; blaze_util::JoinStrings(argument_array, '\0', &result); return result; } static void EnsureServerDir(const blaze_util::Path &server_dir) { // The server dir has the connection info - don't allow access by other users. if (!blaze_util::MakeDirectories(server_dir, 0700)) { BAZEL_DIE(blaze_exit_code::LOCAL_ENVIRONMENTAL_ERROR) << "server directory '" << server_dir.AsPrintablePath() << "' could not be created: " << GetLastErrorString(); } } // Do a chdir into the workspace, and die if it fails. static const void GoToWorkspace( const WorkspaceLayout &workspace_layout, const string &workspace) { if (workspace_layout.InWorkspace(workspace) && !blaze_util::ChangeDirectory(workspace)) { BAZEL_DIE(blaze_exit_code::INTERNAL_ERROR) << "changing directory into " << workspace << " failed: " << GetLastErrorString(); } } static const bool IsServerMode(const string &command) { return "exec-server" == command; } // Replace this process with the blaze server. Does not exit. static void RunServerMode( const blaze_util::Path &server_exe, const vector<string> &server_exe_args, const blaze_util::Path &server_dir, const WorkspaceLayout &workspace_layout, const string &workspace, const OptionProcessor &option_processor, const StartupOptions &startup_options, BlazeServer *server) { if (startup_options.batch) { BAZEL_DIE(blaze_exit_code::BAD_ARGV) << "exec-server command is not compatible with --batch"; } BAZEL_LOG(INFO) << "Running in server mode."; if (server->Connected()) { BAZEL_DIE(blaze_exit_code::LOCAL_ENVIRONMENTAL_ERROR) << "exec-server failed, please shut down existing server pid=" << server->ProcessInfo().server_pid_ << " and retry."; } EnsureServerDir(server_dir); blaze_util::WriteFile(blaze::GetProcessIdAsString(), server_dir.GetRelative("server.pid.txt")); blaze_util::WriteFile(GetArgumentString(server_exe_args), server_dir.GetRelative("cmdline")); GoToWorkspace(workspace_layout, workspace); SetScheduling(startup_options.batch_cpu_scheduling, startup_options.io_nice_level); { WithEnvVars env_obj(PrepareEnvironmentForJvm()); ExecuteServerJvm(server_exe, server_exe_args); } } // Replace this process with blaze in standalone/batch mode. // The batch mode blaze process handles the command and exits. static void RunBatchMode( const blaze_util::Path &server_exe, const vector<string> &server_exe_args, const WorkspaceLayout &workspace_layout, const string &workspace, const OptionProcessor &option_processor, const StartupOptions &startup_options, LoggingInfo *logging_info, const DurationMillis extract_data_duration, const DurationMillis command_wait_duration_ms, BlazeServer *server) { if (server->Connected()) { server->KillRunningServer(); } const DurationMillis client_startup_duration(GetMillisecondsMonotonic() - logging_info->start_time_ms); BAZEL_LOG(INFO) << "Starting " << startup_options.product_name << " in batch mode."; const string command = option_processor.GetCommand(); const vector<string> command_arguments = option_processor.GetCommandArguments(); if (!command_arguments.empty() && command == "shutdown") { string product = startup_options.GetLowercaseProductName(); BAZEL_LOG(WARNING) << "Running command \"shutdown\" in batch mode. Batch mode is " "triggered\nwhen not running " << startup_options.product_name << " within a workspace. If you intend to shutdown an\nexisting " << startup_options.product_name << " server, run \"" << product << " shutdown\" from the directory where\nit was started."; } vector<string> jvm_args_vector; jvm_args_vector.insert( jvm_args_vector.end(), server_exe_args.begin(), server_exe_args.end()); if (!command.empty()) { jvm_args_vector.push_back(command); AddLoggingArgs(*logging_info, client_startup_duration, extract_data_duration, command_wait_duration_ms, &jvm_args_vector); } jvm_args_vector.insert(jvm_args_vector.end(), command_arguments.begin(), command_arguments.end()); GoToWorkspace(workspace_layout, workspace); SetScheduling(startup_options.batch_cpu_scheduling, startup_options.io_nice_level); { WithEnvVars env_obj(PrepareEnvironmentForJvm()); ExecuteServerJvm(server_exe, jvm_args_vector); } } static void WriteFileToStderrOrDie(const blaze_util::Path &path) { #if defined(_WIN32) || defined(__CYGWIN__) FILE *fp = _wfopen(path.AsNativePath().c_str(), L"r"); #else FILE *fp = fopen(path.AsNativePath().c_str(), "r"); #endif if (fp == NULL) { BAZEL_DIE(blaze_exit_code::LOCAL_ENVIRONMENTAL_ERROR) << "opening " << path.AsPrintablePath() << " failed: " << GetLastErrorString(); } char buffer[255]; int num_read; while ((num_read = fread(buffer, 1, sizeof buffer, fp)) > 0) { if (ferror(fp)) { BAZEL_DIE(blaze_exit_code::LOCAL_ENVIRONMENTAL_ERROR) << "failed to read from '" << path.AsPrintablePath() << "': " << GetLastErrorString(); } fwrite(buffer, 1, num_read, stderr); } fclose(fp); } // After connecting to the Blaze server, return its PID, or -1 if there was an // error. static int GetServerPid(const blaze_util::Path &server_dir) { // Note: there is no race here on startup since the server creates // the pid file strictly before it binds the socket. blaze_util::Path pid_file = server_dir.GetRelative(kServerPidFile); string bufstr; int result; if (!blaze_util::ReadFile(pid_file, &bufstr, 32) || !blaze_util::safe_strto32(bufstr, &result)) { return -1; } return result; } // Connect to the server process or exit if it doesn't work out. static void ConnectOrDie( const OptionProcessor &option_processor, const StartupOptions &startup_options, const int server_pid, BlazeServerStartup *server_startup, BlazeServer *server) { // Give the server two minutes to start up. That's enough to connect with a // debugger. const auto start_time = std::chrono::system_clock::now(); const auto try_until_time = start_time + std::chrono::seconds(120); // Print an update at most once every 10 seconds if we are still trying to // connect. const auto min_message_interval = std::chrono::seconds(10); auto last_message_time = start_time; while (std::chrono::system_clock::now() < try_until_time) { const auto attempt_time = std::chrono::system_clock::now(); const auto next_attempt_time = attempt_time + std::chrono::milliseconds(100); if (server->Connect()) { return; } if (attempt_time >= (last_message_time + min_message_interval)) { auto elapsed_time = std::chrono::duration_cast<std::chrono::seconds>( attempt_time - start_time); BAZEL_LOG(USER) << "... still trying to connect to local " << startup_options.product_name << " server after " << elapsed_time.count() << " seconds ..."; last_message_time = attempt_time; } std::this_thread::sleep_until(next_attempt_time); if (!server_startup->IsStillAlive()) { option_processor.PrintStartupOptionsProvenanceMessage(); if (server->ProcessInfo().jvm_log_file_append_) { // Don't dump the log if we were appending - the user should know where // to find it, and who knows how much content they may have accumulated. BAZEL_LOG(USER) << "Server crashed during startup. See " << server->ProcessInfo().jvm_log_file_.AsPrintablePath(); } else { BAZEL_LOG(USER) << "Server crashed during startup. Now printing " << server->ProcessInfo().jvm_log_file_.AsPrintablePath(); WriteFileToStderrOrDie(server->ProcessInfo().jvm_log_file_); } exit(blaze_exit_code::INTERNAL_ERROR); } } BAZEL_DIE(blaze_exit_code::INTERNAL_ERROR) << "couldn't connect to server (" << server_pid << ") after 120 seconds."; } // Ensures that any server previously associated with `server_dir` is no longer // running. static void EnsurePreviousServerProcessTerminated( const blaze_util::Path &server_dir, const StartupOptions &startup_options, LoggingInfo *logging_info) { int server_pid = GetServerPid(server_dir); if (server_pid > 0) { if (VerifyServerProcess(server_pid, startup_options.output_base)) { if (KillServerProcess(server_pid, startup_options.output_base)) { BAZEL_LOG(USER) << "Killed non-responsive server process (pid=" << server_pid << ")"; logging_info->SetRestartReasonIfNotSet(SERVER_UNRESPONSIVE); } else { logging_info->SetRestartReasonIfNotSet(SERVER_VANISHED); } } else { logging_info->SetRestartReasonIfNotSet(PID_FILE_BUT_NO_SERVER); } } } // Starts up a new server and connects to it. Exits if it didn't work out. static void StartServerAndConnect( const blaze_util::Path &server_exe, const vector<string> &server_exe_args, const blaze_util::Path &server_dir, const WorkspaceLayout &workspace_layout, const string &workspace, const OptionProcessor &option_processor, const StartupOptions &startup_options, LoggingInfo *logging_info, BlazeServer *server) { // Delete the old command_port file if it already exists. Otherwise we might // run into the race condition that we read the old command_port file before // the new server has written the new file and we try to connect to the old // port, run into a timeout and try again. (void)blaze_util::UnlinkPath(server_dir.GetRelative("command_port")); EnsureServerDir(server_dir); // Really make sure there's no other server running in this output base (even // an unresponsive one), as that could cause major problems. EnsurePreviousServerProcessTerminated( server_dir, startup_options, logging_info); // cmdline file is used to validate the server running in this server_dir. // There's no server running now so we're safe to unconditionally write this. blaze_util::WriteFile(GetArgumentString(server_exe_args), server_dir.GetRelative("cmdline")); // Do this here instead of in the daemon so the user can see if it fails. GoToWorkspace(workspace_layout, workspace); logging_info->SetRestartReasonIfNotSet(NO_DAEMON); SetScheduling(startup_options.batch_cpu_scheduling, startup_options.io_nice_level); BAZEL_LOG(USER) << "Starting local " << startup_options.product_name << " server and connecting to it..."; BlazeServerStartup *server_startup; const int server_pid = ExecuteDaemon( server_exe, server_exe_args, PrepareEnvironmentForJvm(), server->ProcessInfo().jvm_log_file_, server->ProcessInfo().jvm_log_file_append_, GetEmbeddedBinariesRoot(startup_options.install_base), server_dir, startup_options, &server_startup); ConnectOrDie( option_processor, startup_options, server_pid, server_startup, server); delete server_startup; } static void MoveFiles(const string &embedded_binaries) { blaze_util::Path embedded_binaries_(embedded_binaries); // Set the timestamps of the extracted files to the future and make sure (or // at least as sure as we can...) that the files we have written are actually // on the disk. vector<string> extracted_files; // Walks the temporary directory recursively and collects full file paths. blaze_util::GetAllFilesUnder(embedded_binaries, &extracted_files); std::unique_ptr<blaze_util::IFileMtime> mtime(blaze_util::CreateFileMtime()); set<blaze_util::Path> synced_directories; for (const auto &f : extracted_files) { blaze_util::Path it(f); // Set the time to a distantly futuristic value so we can observe tampering. // Note that keeping a static, deterministic timestamp, such as the default // timestamp set by unzip (1970-01-01) and using that to detect tampering is // not enough, because we also need the timestamp to change between Bazel // releases so that the metadata cache knows that the files may have // changed. This is essential for the correctness of actions that use // embedded binaries as artifacts. if (!mtime->SetToDistantFuture(it)) { string err = GetLastErrorString(); BAZEL_DIE(blaze_exit_code::LOCAL_ENVIRONMENTAL_ERROR) << "failed to set timestamp on '" << it.AsPrintablePath() << "': " << err; } blaze_util::SyncFile(it); blaze_util::Path directory = it.GetParent(); // Now walk up until embedded_binaries and sync every directory in between. // synced_directories is used to avoid syncing the same directory twice. // The !directory.empty() and !blaze_util::IsRootDirectory(directory) // conditions are not strictly needed, but it makes this loop more robust, // because otherwise, if due to some glitch, directory was not under // embedded_binaries, it would get into an infinite loop. while (directory != embedded_binaries_ && synced_directories.count(directory) == 0 && !directory.IsEmpty() && !blaze_util::IsRootDirectory(directory)) { blaze_util::SyncFile(directory); synced_directories.insert(directory); directory = directory.GetParent(); } } blaze_util::SyncFile(embedded_binaries_); } // Installs Blaze by extracting the embedded data files, iff necessary. // The MD5-named install_base directory on disk is trusted; we assume // no-one has modified the extracted files beneath this directory once // it is in place. Concurrency during extraction is handled by // extracting in a tmp dir and then renaming it into place where it // becomes visible automically at the new path. static DurationMillis ExtractData(const string &self_path, const vector<string> &archive_contents, const string &expected_install_md5, const StartupOptions &startup_options, LoggingInfo *logging_info) { // If the install dir doesn't exist, create it, if it does, we know it's good. if (!blaze_util::PathExists(startup_options.install_base)) { uint64_t st = GetMillisecondsMonotonic(); // Work in a temp dir to avoid races. string tmp_install = startup_options.install_base + ".tmp." + blaze::GetProcessIdAsString(); string tmp_binaries = GetEmbeddedBinariesRoot(tmp_install); ExtractArchiveOrDie( self_path, startup_options.product_name, expected_install_md5, tmp_binaries); MoveFiles(tmp_binaries); uint64_t et = GetMillisecondsMonotonic(); const DurationMillis extract_data_duration(et - st); // Now rename the completed installation to its final name. int attempts = 0; while (attempts < 120) { int result = blaze_util::RenameDirectory( tmp_install.c_str(), startup_options.install_base.c_str()); if (result == blaze_util::kRenameDirectorySuccess || result == blaze_util::kRenameDirectoryFailureNotEmpty) { // If renaming fails because the directory already exists and is not // empty, then we assume another good installation snuck in before us. break; } else { // Otherwise the install directory may still be scanned by the antivirus // (in case we're running on Windows) so we need to wait for that to // finish and try renaming again. ++attempts; BAZEL_LOG(USER) << "install base directory '" << tmp_install << "' could not be renamed into place after " << attempts << " second(s), trying again\r"; std::this_thread::sleep_for(std::chrono::seconds(1)); } } // Give up renaming after 120 failed attempts / 2 minutes. if (attempts == 120) { BAZEL_DIE(blaze_exit_code::LOCAL_ENVIRONMENTAL_ERROR) << "install base directory '" << tmp_install << "' could not be renamed into place: " << GetLastErrorString(); } return extract_data_duration; } else { if (!blaze_util::IsDirectory(startup_options.install_base)) { BAZEL_DIE(blaze_exit_code::LOCAL_ENVIRONMENTAL_ERROR) << "install base directory '" << startup_options.install_base << "' could not be created. It exists but is not a directory."; } std::unique_ptr<blaze_util::IFileMtime> mtime( blaze_util::CreateFileMtime()); blaze_util::Path real_install_dir = blaze_util::Path(startup_options.install_base) .GetRelative("_embedded_binaries"); for (const auto &it : archive_contents) { blaze_util::Path path = real_install_dir.GetRelative(it); if (!mtime->IsUntampered(path)) { BAZEL_DIE(blaze_exit_code::LOCAL_ENVIRONMENTAL_ERROR) << "corrupt installation: file '" << path.AsPrintablePath() << "' is missing or modified. Please remove '" << startup_options.install_base << "' and try again."; } } return DurationMillis(); } } static bool IsVolatileArg(const string& arg) { // TODO(ccalvarin) when --batch is gone and the startup_options field in the // gRPC message is always set, there is no reason for client options that are // not used at server startup to be part of the startup command line. The // server command line difference logic can be simplified then. static const std::vector<string> volatile_startup_options = { "--option_sources=", "--max_idle_secs=", "--connect_timeout_secs=", "--client_debug="}; // Split arg based on the first "=" if one exists in arg. const string::size_type eq_pos = arg.find_first_of('='); const string stripped_arg = (eq_pos == string::npos) ? arg : arg.substr(0, eq_pos + 1); return std::find(volatile_startup_options.begin(), volatile_startup_options.end(), stripped_arg) != volatile_startup_options.end(); } static inline void IncreaseValueInMap(std::unordered_map<string, int>* map, const string& key) { // If 'key' was missing, operator[] adds it with value 0. (*map)[key] += 1; } static bool DecreaseValueInMap(std::unordered_map<string, int>* map, const string& key) { auto i = map->find(key); if (i == map->end()) { return false; } else if (i->second == 1) { map->erase(i); return true; } else { i->second -= 1; return true; } } static void PrintArgsInMap(const char* message, const std::unordered_map<string, int>& map) { if (!map.empty()) { BAZEL_LOG(INFO) << message; for (const auto& i : map) { BAZEL_LOG(INFO) << " " << i.first << " (" << i.second << " extra instance(s))"; } } } // Returns true if the server needs to be restarted to accommodate changes // between the two argument lists. static bool AreStartupOptionsDifferent( const vector<string> &running_server_args, const vector<string> &requested_args) { // We need not worry about one side missing an argument and the other side // having the default value, since this command line is the canonical one for // this version of Bazel: either the default value is listed explicitly or it // is not, but this has nothing to do with the user's command line: it is // defined by GetServerExeArgs(). Same applies for argument ordering. bool options_different = false; if (running_server_args.size() != requested_args.size()) { BAZEL_LOG(INFO) << "The new command line has a different length from the " "running server's."; options_different = true; } // Facts and implications: // (a) We already verified (with EnsureCorrectRunningVersion) that the old and // new server versions are the same. Therefore we know that // 'running_server_args' and 'requested_args' follow the same ordering for // flags, the same logic of deduplicating vs. not deduplicating flags, the // same format of canonicalizing flags, etc. // (b) Some startup flags may come from user bazelrc files. // (c) Because of (b), the ordering of flags doesn't matter, because if the // user flips two "startup" lines in their bazelrc, that doesn't change // the effective set of startup flags. // (d) Because of (b), some flags may have repeated values (e.g // --host_jvm_args="foo" twice) so we cannot simply use two sets and take // the set difference, but must consider the occurrences of each flag. std::unordered_map<string, int> old_args, new_args; for (const string& a : running_server_args) { if (!IsVolatileArg(a)) { IncreaseValueInMap(&old_args, a); } } for (const string& a : requested_args) { if (!IsVolatileArg(a) && !DecreaseValueInMap(&old_args, a)) { IncreaseValueInMap(&new_args, a); } } PrintArgsInMap("Args from the running server that are not " "included in the current request:", old_args); PrintArgsInMap("Args from the current request that were not " "included when creating the server:", new_args); return options_different || !old_args.empty() || !new_args.empty(); } // Kills the running Blaze server, if any, if the startup options do not match. static void KillRunningServerIfDifferentStartupOptions( const StartupOptions &startup_options, const vector<string> &server_exe_args, LoggingInfo *logging_info, BlazeServer *server) { if (!server->Connected()) { return; } blaze_util::Path cmdline_path = startup_options.output_base.GetRelative("server/cmdline"); string old_joined_arguments; // No, /proc/$PID/cmdline does not work, because it is limited to 4K. Even // worse, its behavior differs slightly between kernels (in some, when longer // command lines are truncated, the last 4 bytes are replaced with // "..." + NUL. blaze_util::ReadFile(cmdline_path, &old_joined_arguments); vector<string> old_arguments = blaze_util::Split(old_joined_arguments, '\0'); // These strings contain null-separated command line arguments. If they are // the same, the server can stay alive, otherwise, it needs shuffle off this // mortal coil. if (AreStartupOptionsDifferent(old_arguments, server_exe_args)) { logging_info->restart_reason = NEW_OPTIONS; BAZEL_LOG(WARNING) << "Running " << startup_options.product_name << " server needs to be killed, because the startup " "options are different."; server->KillRunningServer(); } } // Kills the old running server if it is not the same version as us, // dealing with various combinations of installation scheme // (installation symlink and older MD5_MANIFEST contents). // This function requires that the installation be complete, and the // server lock acquired. static void EnsureCorrectRunningVersion( const StartupOptions &startup_options, LoggingInfo *logging_info, BlazeServer *server) { // Read the previous installation's semaphore symlink in output_base. If the // target dirs don't match, or if the symlink was not present, then kill any // running servers. Lastly, symlink to our installation so others know which // installation is running. const blaze_util::Path installation_path = startup_options.output_base.GetRelative("install"); string prev_installation; bool ok = blaze_util::ReadDirectorySymlink(installation_path, &prev_installation); if (!ok || !blaze_util::CompareAbsolutePaths( prev_installation, startup_options.install_base)) { if (server->Connected()) { BAZEL_LOG(INFO) << "Killing running server because it is using another version of " << startup_options.product_name; server->KillRunningServer(); logging_info->restart_reason = NEW_VERSION; } blaze_util::UnlinkPath(installation_path); if (!SymlinkDirectories(startup_options.install_base, installation_path)) { string err = GetLastErrorString(); BAZEL_DIE(blaze_exit_code::LOCAL_ENVIRONMENTAL_ERROR) << "failed to create installation symlink '" << installation_path.AsPrintablePath() << "': " << err; } // Update the mtime of the install base so that cleanup tools can // find install bases that haven't been used for a long time std::unique_ptr<blaze_util::IFileMtime> mtime( blaze_util::CreateFileMtime()); if (!mtime->SetToNow(blaze_util::Path(startup_options.install_base))) { string err = GetLastErrorString(); BAZEL_DIE(blaze_exit_code::LOCAL_ENVIRONMENTAL_ERROR) << "failed to set timestamp on '" << startup_options.install_base << "': " << err; } } } static void CancelServer() { blaze_server->Cancel(); } // Runs the launcher in client/server mode. Ensures that there's indeed a // running server, then forwards the user's command to the server and the // server's response back to the user. Does not return - exits via exit or // signal. static ATTRIBUTE_NORETURN void RunClientServerMode( const blaze_util::Path &server_exe, const vector<string> &server_exe_args, const blaze_util::Path &server_dir, const WorkspaceLayout &workspace_layout, const string &workspace, const OptionProcessor &option_processor, const StartupOptions &startup_options, LoggingInfo *logging_info, const DurationMillis extract_data_duration, const DurationMillis command_wait_duration_ms, BlazeServer *server) { while (true) { if (!server->Connected()) { StartServerAndConnect(server_exe, server_exe_args, server_dir, workspace_layout, workspace, option_processor, startup_options, logging_info, server); } // Check for the case when the workspace directory deleted and then gets // recreated while the server is running blaze_util::Path server_cwd = GetProcessCWD(server->ProcessInfo().server_pid_); // If server_cwd is empty, GetProcessCWD failed. This notably occurs when // running under Docker because then readlink(/proc/[pid]/cwd) returns // EPERM. // Docker issue #6687 (https://github.com/docker/docker/issues/6687) fixed // this, but one still needs the --cap-add SYS_PTRACE command line flag, at // least according to the discussion on Docker issue #6800 // (https://github.com/docker/docker/issues/6687), and even then, it's a // non-default Docker flag. Given that this occurs only in very weird // cases, it's better to assume that everything is alright if we can't get // the cwd. if (!server_cwd.IsEmpty() && (server_cwd != blaze_util::Path(workspace) || // changed server_cwd.Contains(" (deleted)"))) { // deleted. // There's a distant possibility that the two paths look the same yet are // actually different because the two processes have different mount // tables. BAZEL_LOG(INFO) << "Server's cwd moved or deleted (" << server_cwd.AsPrintablePath() << ")."; server->KillRunningServer(); } else { break; } } BAZEL_LOG(INFO) << "Connected (server pid=" << server->ProcessInfo().server_pid_ << ")."; // Wall clock time since process startup. const DurationMillis client_startup_duration = (GetMillisecondsMonotonic() - logging_info->start_time_ms); SignalHandler::Get().Install( startup_options.product_name, startup_options.output_base, &server->ProcessInfo(), CancelServer); SignalHandler::Get().PropagateSignalOrExit(server->Communicate( option_processor.GetCommand(), option_processor.GetCommandArguments(), startup_options.invocation_policy, startup_options.original_startup_options_, *logging_info, client_startup_duration, extract_data_duration, command_wait_duration_ms)); } // Parse the options. static void ParseOptionsOrDie( const string &cwd, const string &workspace, OptionProcessor &option_processor, int argc, const char *argv[]) { std::string error; std::vector<std::string> args; args.insert(args.end(), argv, argv + argc); const blaze_exit_code::ExitCode parse_exit_code = option_processor.ParseOptions(args, workspace, cwd, &error); if (parse_exit_code != blaze_exit_code::SUCCESS) { option_processor.PrintStartupOptionsProvenanceMessage(); BAZEL_DIE(parse_exit_code) << error; } } static string GetCanonicalCwd() { string result = blaze_util::MakeCanonical(blaze_util::GetCwd().c_str()); if (result.empty()) { BAZEL_DIE(blaze_exit_code::LOCAL_ENVIRONMENTAL_ERROR) << "blaze_util::MakeCanonical('" << blaze_util::GetCwd() << "') failed: " << GetLastErrorString(); } return result; } // Updates the parsed startup options and global config to fill in defaults. static void UpdateConfiguration( const string &install_md5, const string &workspace, const bool server_mode, StartupOptions *startup_options) { // The default install_base is <output_user_root>/install/<md5(blaze)> // but if an install_base is specified on the command line, we use that as // the base instead. if (startup_options->install_base.empty()) { if (server_mode) { BAZEL_DIE(blaze_exit_code::BAD_ARGV) << "exec-server requires --install_base"; } string install_user_root = blaze_util::JoinPath(startup_options->output_user_root, "install"); startup_options->install_base = blaze_util::JoinPath(install_user_root, install_md5); } if (startup_options->output_base.IsEmpty()) { if (server_mode) { BAZEL_DIE(blaze_exit_code::BAD_ARGV) << "exec-server requires --output_base"; } startup_options->output_base = blaze_util::Path( blaze::GetHashedBaseDir(startup_options->output_user_root, workspace)); } if (!blaze_util::PathExists(startup_options->output_base)) { if (!blaze_util::MakeDirectories(startup_options->output_base, 0777)) { string err = GetLastErrorString(); BAZEL_DIE(blaze_exit_code::LOCAL_ENVIRONMENTAL_ERROR) << "Output base directory '" << startup_options->output_base.AsPrintablePath() << "' could not be created: " << err; } } else { if (!blaze_util::IsDirectory(startup_options->output_base)) { BAZEL_DIE(blaze_exit_code::LOCAL_ENVIRONMENTAL_ERROR) << "Output base directory '" << startup_options->output_base.AsPrintablePath() << "' could not be created. It exists but is not a directory."; } } if (!blaze_util::CanAccessDirectory(startup_options->output_base)) { BAZEL_DIE(blaze_exit_code::LOCAL_ENVIRONMENTAL_ERROR) << "Output base directory '" << startup_options->output_base.AsPrintablePath() << "' must be readable and writable."; } ExcludePathFromBackup(startup_options->output_base); startup_options->output_base = startup_options->output_base.Canonicalize(); if (startup_options->output_base.IsEmpty()) { string err = GetLastErrorString(); BAZEL_DIE(blaze_exit_code::LOCAL_ENVIRONMENTAL_ERROR) << "blaze_util::MakeCanonical('" << startup_options->output_base.AsPrintablePath() << "') failed: " << err; } } // Prepares the environment to be suitable to start a JVM. // Changes made to the environment in this function *will not* be part // of '--client_env'. static map<string, EnvVarValue> PrepareEnvironmentForJvm() { map<string, EnvVarValue> result; // Make sure all existing environment variables appear as part of the // resulting map unless they are overridden below by UNSET values. // // Even though the map we return is intended to represent a "delta" of // environment variables to modify the current process, we may actually use // such map to configure a process from scratch (via interfaces like execvpe // or posix_spawn), so we need to inherit any untouched variables. for (char** entry = environ; *entry != NULL; entry++) { const std::string var_value = *entry; std::string::size_type equals = var_value.find('='); if (equals == std::string::npos) { // Ignore possibly-bad environment. We don't control what we see in this // global variable, so it could be invalid. continue; } const std::string var = var_value.substr(0, equals); const std::string value = var_value.substr(equals + 1); result[var] = EnvVarValue(EnvVarAction::SET, value); } if (blaze::ExistsEnv("LD_ASSUME_KERNEL")) { // Fix for bug: if ulimit -s and LD_ASSUME_KERNEL are both // specified, the JVM fails to create threads. See thread_stack_regtest. // This is also provoked by LD_LIBRARY_PATH=/usr/lib/debug, // or anything else that causes the JVM to use LinuxThreads. BAZEL_LOG(WARNING) << "ignoring LD_ASSUME_KERNEL in environment."; result["LD_ASSUME_KERNEL"] = EnvVarValue(EnvVarAction::UNSET, ""); } if (blaze::ExistsEnv("LD_PRELOAD")) { BAZEL_LOG(WARNING) << "ignoring LD_PRELOAD in environment."; result["LD_PRELOAD"] = EnvVarValue(EnvVarAction::UNSET, ""); } if (blaze::ExistsEnv("_JAVA_OPTIONS")) { // This would override --host_jvm_args BAZEL_LOG(WARNING) << "ignoring _JAVA_OPTIONS in environment."; result["_JAVA_OPTIONS"] = EnvVarValue(EnvVarAction::UNSET, ""); } // TODO(bazel-team): We've also seen a failure during loading (creating // threads?) when ulimit -Hs 8192. Characterize that and check for it here. // Make the JVM use ISO-8859-1 for parsing its command line because "blaze // run" doesn't handle non-ASCII command line arguments. This is apparently // the most reliable way to select the platform default encoding. result["LANG"] = EnvVarValue(EnvVarAction::SET, "en_US.ISO-8859-1"); result["LANGUAGE"] = EnvVarValue(EnvVarAction::SET, "en_US.ISO-8859-1"); result["LC_ALL"] = EnvVarValue(EnvVarAction::SET, "en_US.ISO-8859-1"); result["LC_CTYPE"] = EnvVarValue(EnvVarAction::SET, "en_US.ISO-8859-1"); return result; } static string CheckAndGetBinaryPath(const string &cwd, const string &argv0) { if (blaze_util::IsAbsolute(argv0)) { return argv0; } else { string abs_path = blaze_util::JoinPath(cwd, argv0); string resolved_path = blaze_util::MakeCanonical(abs_path.c_str()); if (!resolved_path.empty()) { return resolved_path; } else { // This happens during our integration tests, but thats okay, as we won't // log the invocation anyway. return abs_path; } } } static int GetExitCodeForAbruptExit(const blaze_util::Path &output_base) { BAZEL_LOG(INFO) << "Looking for a custom exit-code."; blaze_util::Path filename = output_base.GetRelative("exit_code_to_use_on_abrupt_exit"); std::string content; if (!blaze_util::ReadFile(filename, &content)) { BAZEL_LOG(INFO) << "Unable to read the custom exit-code file. " << "Exiting with an INTERNAL_ERROR."; return blaze_exit_code::INTERNAL_ERROR; } if (!blaze_util::UnlinkPath(filename)) { BAZEL_LOG(INFO) << "Unable to delete the custom exit-code file. " << "Exiting with an INTERNAL_ERROR."; return blaze_exit_code::INTERNAL_ERROR; } int custom_exit_code; if (!blaze_util::safe_strto32(content, &custom_exit_code)) { BAZEL_LOG(INFO) << "Content of custom exit-code file not an int: " << content << "Exiting with an INTERNAL_ERROR."; return blaze_exit_code::INTERNAL_ERROR; } BAZEL_LOG(INFO) << "Read exit code " << custom_exit_code << " from custom exit-code file. Exiting accordingly."; return custom_exit_code; } void PrintVersionInfo(const string &self_path, const string &product_name) { string build_label; ExtractBuildLabel(self_path, product_name, &build_label); printf("%s %s\n", product_name.c_str(), build_label.c_str()); } static void RunLauncher(const string &self_path, const vector<string> &archive_contents, const string &install_md5, const StartupOptions &startup_options, const OptionProcessor &option_processor, const WorkspaceLayout &workspace_layout, const string &workspace, LoggingInfo *logging_info) { blaze_server = new BlazeServer( startup_options.connect_timeout_secs, startup_options.batch, startup_options.block_for_lock, startup_options.output_base, startup_options.server_jvm_out); const DurationMillis command_wait_duration_ms(blaze_server->AcquireLock()); BAZEL_LOG(INFO) << "Acquired the client lock, waited " << command_wait_duration_ms.millis << " milliseconds"; WarnFilesystemType(startup_options.output_base); const DurationMillis extract_data_duration = ExtractData( self_path, archive_contents, install_md5, startup_options, logging_info); blaze_server->Connect(); if (!startup_options.batch && "shutdown" == option_processor.GetCommand() && !blaze_server->Connected()) { // TODO(b/134525510): Connected() can return false when the server process // is alive but unresponsive, so bailing early here might not always be the // right thing to do. return; } EnsureCorrectRunningVersion(startup_options, logging_info, blaze_server); const blaze_util::Path jvm_path = startup_options.GetJvm(); const string server_jar_path = GetServerJarPath(archive_contents); const vector<string> server_exe_args = GetServerExeArgs( jvm_path, server_jar_path, archive_contents, install_md5, workspace_layout, workspace, startup_options); KillRunningServerIfDifferentStartupOptions( startup_options, server_exe_args, logging_info, blaze_server); const blaze_util::Path server_exe = startup_options.GetExe(jvm_path, server_jar_path); const blaze_util::Path server_dir = blaze_util::Path(startup_options.output_base).GetRelative("server"); if (IsServerMode(option_processor.GetCommand())) { RunServerMode(server_exe, server_exe_args, server_dir, workspace_layout, workspace, option_processor, startup_options, blaze_server); } else if (startup_options.batch) { RunBatchMode(server_exe, server_exe_args, workspace_layout, workspace, option_processor, startup_options, logging_info, extract_data_duration, command_wait_duration_ms, blaze_server); } else { RunClientServerMode(server_exe, server_exe_args, server_dir, workspace_layout, workspace, option_processor, startup_options, logging_info, extract_data_duration, command_wait_duration_ms, blaze_server); } } int Main(int argc, const char *argv[], WorkspaceLayout *workspace_layout, OptionProcessor *option_processor, uint64_t start_time) { // Logging must be set first to assure no log statements are missed. std::unique_ptr<blaze_util::BazelLogHandler> default_handler( new blaze_util::BazelLogHandler()); blaze_util::SetLogHandler(std::move(default_handler)); const string self_path = GetSelfPath(); if (argc == 2 && strcmp(argv[1], "--version") == 0) { PrintVersionInfo(self_path, option_processor->GetLowercaseProductName()); return blaze_exit_code::SUCCESS; } string cwd = GetCanonicalCwd(); LoggingInfo logging_info(CheckAndGetBinaryPath(cwd, argv[0]), start_time); blaze::SetupStdStreams(); if (argc == 1 && blaze::WarnIfStartedFromDesktop()) { // Only check and warn for from-desktop start if there were no args. // In this case the user probably clicked Bazel's icon (as opposed to either // starting it from a terminal, or as a subprocess with args, or on Windows // from a ".lnk" file with some args). return blaze_exit_code::LOCAL_ENVIRONMENTAL_ERROR; } // Best-effort operation to raise the resource limits from soft to hard. We // do this early during the main program instead of just before execing the // Blaze server binary, because it's easier (for testing purposes) and because // the Blaze client also benefits from this (e.g. during installation). UnlimitResources(); #if defined(_WIN32) || defined(__CYGWIN__) // Must be done before command line parsing. // ParseOptionsOrDie already populate --client_env, so detect bash before it // happens. (void)DetectBashAndExportBazelSh(); #endif // if defined(_WIN32) || defined(__CYGWIN__) const string workspace = workspace_layout->GetWorkspace(cwd); ParseOptionsOrDie(cwd, workspace, *option_processor, argc, argv); StartupOptions *startup_options = option_processor->GetParsedStartupOptions(); startup_options->MaybeLogStartupOptionWarnings(); SetDebugLog(startup_options->client_debug); // If client_debug was false, this is ignored, so it's accurate. BAZEL_LOG(INFO) << "Debug logging requested, sending all client log " "statements to stderr"; if (startup_options->unlimit_coredumps) { UnlimitCoredumps(); } blaze::CreateSecureOutputRoot( blaze_util::Path(startup_options->output_user_root)); // Only start a server when in a workspace because otherwise we won't do more // than emit a help message. if (!workspace_layout->InWorkspace(workspace)) { startup_options->batch = true; } vector<string> archive_contents; string install_md5; DetermineArchiveContents( self_path, startup_options->product_name, &archive_contents, &install_md5); UpdateConfiguration( install_md5, workspace, IsServerMode(option_processor->GetCommand()), startup_options); RunLauncher(self_path, archive_contents, install_md5, *startup_options, *option_processor, *workspace_layout, workspace, &logging_info); return 0; } static void null_grpc_log_function(gpr_log_func_args *args) {} // There might be a mismatch between std::string and the string type returned // from protos. This function is the safe way to compare such strings. template <typename StringTypeA, typename StringTypeB> static bool ProtoStringEqual(const StringTypeA &cookieA, const StringTypeB &cookieB) { // use strncmp insted of strcmp to deal with null bytes in the cookie. auto cookie_length = cookieA.size(); if (cookie_length != cookieB.size()) { return false; } return memcmp(cookieA.c_str(), cookieB.c_str(), cookie_length) == 0; } BlazeServer::BlazeServer(const int connect_timeout_secs, const bool batch, const bool block_for_lock, const blaze_util::Path &output_base, const blaze_util::Path &server_jvm_out) : connected_(false), process_info_(output_base, server_jvm_out), connect_timeout_secs_(connect_timeout_secs), batch_(batch), block_for_lock_(block_for_lock), output_base_(output_base) { gpr_set_log_function(null_grpc_log_function); pipe_.reset(blaze_util::CreatePipe()); if (!pipe_) { BAZEL_DIE(blaze_exit_code::LOCAL_ENVIRONMENTAL_ERROR) << "Couldn't create pipe: " << GetLastErrorString(); } } bool BlazeServer::TryConnect( CommandServer::Stub *client) { grpc::ClientContext context; context.set_deadline(std::chrono::system_clock::now() + std::chrono::seconds(connect_timeout_secs_)); command_server::PingRequest request; command_server::PingResponse response; request.set_cookie(request_cookie_); BAZEL_LOG(INFO) << "Trying to connect to server (timeout: " << connect_timeout_secs_ << " secs)..."; grpc::Status status = client->Ping(&context, request, &response); if (!status.ok() || !ProtoStringEqual(response.cookie(), response_cookie_)) { BAZEL_LOG(INFO) << "Connection to server failed: (" << status.error_code() << ") " << status.error_message().c_str() << "\n"; return false; } return true; } bool BlazeServer::Connect() { assert(!connected_); blaze_util::Path server_dir = output_base_.GetRelative("server"); std::string port; std::string ipv4_prefix = "127.0.0.1:"; std::string ipv6_prefix_1 = "[0:0:0:0:0:0:0:1]:"; std::string ipv6_prefix_2 = "[::1]:"; if (!blaze_util::ReadFile(server_dir.GetRelative("command_port"), &port)) { return false; } // Make sure that we are being directed to localhost if (port.compare(0, ipv4_prefix.size(), ipv4_prefix) && port.compare(0, ipv6_prefix_1.size(), ipv6_prefix_1) && port.compare(0, ipv6_prefix_2.size(), ipv6_prefix_2)) { return false; } if (!blaze_util::ReadFile(server_dir.GetRelative("request_cookie"), &request_cookie_)) { return false; } if (!blaze_util::ReadFile(server_dir.GetRelative("response_cookie"), &response_cookie_)) { return false; } pid_t server_pid = GetServerPid(blaze_util::Path(server_dir)); if (server_pid < 0) { return false; } if (!VerifyServerProcess(server_pid, output_base_)) { return false; } grpc::ChannelArguments channel_args; // Bazel client and server always run on the same machine and communicate // locally over gRPC; so we want to ignore any configured proxies when setting // up a gRPC channel to the server. channel_args.SetInt(GRPC_ARG_ENABLE_HTTP_PROXY, 0); std::shared_ptr<grpc::Channel> channel(grpc::CreateCustomChannel( port, grpc::InsecureChannelCredentials(), channel_args)); std::unique_ptr<CommandServer::Stub> client( CommandServer::NewStub(channel)); if (!TryConnect(client.get())) { return false; } this->client_ = std::move(client); connected_ = true; process_info_.server_pid_ = server_pid; return true; } // Cancellation works as follows: // // When the user presses Ctrl-C, a SIGINT is delivered to the client, which is // translated into a BlazeServer::Cancel() call. Since it's not a good idea to // do significant work in signal handlers, all it does is write a byte to an // unnamed pipe. // // This unnamed pipe is used to communicate with the cancel thread. Whenever // something interesting happens, a byte is written into it, which is read by // the cancel thread. These commands are available: // // - NOP // - JOIN. The cancel thread needs to be terminated. // - CANCEL. If the command ID is already available, a cancel request is sent. // - COMMAND_ID_RECEIVED. The client learned the command ID from the server. // If there is a pending cancellation request, it is acted upon. // // The only data the cancellation thread shares with the main thread is the // file descriptor for receiving commands and command_id_, the latter of which // is protected by a mutex, which mainly serves as a memory fence. // // The cancellation thread is joined at the end of the execution of the command. // The main thread wakes it up just so that it can finish (using the JOIN // action) // // It's conceivable that the server is busy and thus it cannot service the // cancellation request. In that case, we simply ignore the failure and the both // the server and the client go on as if nothing had happened (except that this // Ctrl-C still counts as a SIGINT, three of which result in a SIGKILL being // delivered to the server) void BlazeServer::CancelThread() { bool running = true; bool cancel = false; bool command_id_received = false; while (running) { char buf; int error; int bytes_read = pipe_->Receive(&buf, 1, &error); if (bytes_read < 0 && error == blaze_util::IPipe::INTERRUPTED) { continue; } else if (bytes_read != 1) { BAZEL_DIE(blaze_exit_code::INTERNAL_ERROR) << "Cannot communicate with cancel thread: " << GetLastErrorString(); } switch (buf) { case CancelThreadAction::NOTHING: break; case CancelThreadAction::JOIN: running = false; break; case CancelThreadAction::COMMAND_ID_RECEIVED: command_id_received = true; if (cancel) { SendCancelMessage(); cancel = false; } break; case CancelThreadAction::CANCEL: if (command_id_received) { SendCancelMessage(); } else { cancel = true; } break; } } } void BlazeServer::SendCancelMessage() { std::unique_lock<std::mutex> lock(cancel_thread_mutex_); command_server::CancelRequest request; request.set_cookie(request_cookie_); request.set_command_id(command_id_); grpc::ClientContext context; context.set_deadline(std::chrono::system_clock::now() + std::chrono::seconds(10)); command_server::CancelResponse response; // There isn't a lot we can do if this request fails grpc::Status status = client_->Cancel(&context, request, &response); if (!status.ok()) { BAZEL_LOG(USER) << "\nCould not interrupt server: (" << status.error_code() << ") " << status.error_message().c_str() << "\n"; } } // This will wait indefinitely until the server shuts down void BlazeServer::KillRunningServer() { assert(connected_); grpc::ClientContext context; command_server::RunRequest request; command_server::RunResponse response; request.set_cookie(request_cookie_); request.set_block_for_lock(block_for_lock_); request.set_client_description("pid=" + blaze::GetProcessIdAsString() + " (for shutdown)"); request.add_arg("shutdown"); BAZEL_LOG(INFO) << "Shutting running server with RPC request"; std::unique_ptr<grpc::ClientReader<command_server::RunResponse>> reader( client_->Run(&context, request)); // TODO(b/111179585): Swallowing these responses loses potential messages from // the server, which may be useful in understanding why a shutdown failed. // However, we don't want to spam the user in case the shutdown works // perfectly fine, so we discard the information. For --noblock_for_lock, this // means that we don't output the PID of the competing client, which isn't // great. We could either store the stderr_output returned by the server and // output it in the case of a failed shutdown, or we could add a // special-cased field in RunResponse for this purpose. while (reader->Read(&response)) { } grpc::Status status = reader->Finish(); if (status.ok()) { // Check the final message from the server to see if it exited because // another command holds the client lock. if (response.finished()) { if (response.exit_code() == blaze_exit_code::LOCK_HELD_NOBLOCK_FOR_LOCK) { assert(!block_for_lock_); BAZEL_DIE(blaze_exit_code::LOCK_HELD_NOBLOCK_FOR_LOCK) << "Exiting because the lock is held and --noblock_for_lock was " "given."; } } // If for any reason the shutdown request failed to initiate a termination, // this is a bug. Yes, this means the server won't be forced to shut down, // which might be the preferred behavior, but it will help identify the bug. assert(response.termination_expected()); } // Wait for the server process to terminate (if we know the server PID). // If it does not terminate itself gracefully within 1m, terminate it. if (process_info_.server_pid_ > 0 && !AwaitServerProcessTermination(process_info_.server_pid_, output_base_, kPostShutdownGracePeriodSeconds)) { if (!status.ok()) { BAZEL_LOG(WARNING) << "Shutdown request failed, server still alive: (error code: " << status.error_code() << ", error message: '" << status.error_message() << "', log file: '" << process_info_.jvm_log_file_.AsPrintablePath() << "')"; } KillServerProcess(process_info_.server_pid_, output_base_); } connected_ = false; } unsigned int BlazeServer::Communicate( const string &command, const vector<string> &command_args, const string &invocation_policy, const vector<RcStartupFlag> &original_startup_options, const LoggingInfo &logging_info, const DurationMillis client_startup_duration, const DurationMillis extract_data_duration, const DurationMillis command_wait_duration_ms) { assert(connected_); assert(process_info_.server_pid_ > 0); vector<string> arg_vector; if (!command.empty()) { arg_vector.push_back(command); AddLoggingArgs(logging_info, client_startup_duration, extract_data_duration, command_wait_duration_ms, &arg_vector); } if (!command_args.empty()) { arg_vector.insert(arg_vector.end(), command_args.begin(), command_args.end()); } command_server::RunRequest request; request.set_cookie(request_cookie_); request.set_block_for_lock(block_for_lock_); request.set_client_description("pid=" + blaze::GetProcessIdAsString()); for (const string &arg : arg_vector) { request.add_arg(arg); } if (!invocation_policy.empty()) { request.set_invocation_policy(invocation_policy); } for (const auto &startup_option : original_startup_options) { command_server::StartupOption *proto_option_field = request.add_startup_options(); request.add_startup_options(); proto_option_field->set_source(startup_option.source); proto_option_field->set_option(startup_option.value); } grpc::ClientContext context; command_server::RunResponse response; std::unique_ptr<grpc::ClientReader<command_server::RunResponse>> reader( client_->Run(&context, request)); // Release the server lock because the gRPC handles concurrent clients just // fine. Note that this may result in two "waiting for other client" messages // (one during server startup and one emitted by the server) BAZEL_LOG(INFO) << "Releasing client lock, let the server manage concurrent requests."; blaze::ReleaseLock(&blaze_lock_); std::thread cancel_thread(&BlazeServer::CancelThread, this); bool command_id_set = false; bool pipe_broken = false; command_server::RunResponse final_response; bool finished = false; bool finished_warning_emitted = false; while (reader->Read(&response)) { if (finished && !finished_warning_emitted) { BAZEL_LOG(USER) << "\nServer returned messages after reporting exit code"; finished_warning_emitted = true; } if (!ProtoStringEqual(response.cookie(), response_cookie_)) { BAZEL_LOG(USER) << "\nServer response cookie invalid, exiting"; return blaze_exit_code::INTERNAL_ERROR; } const char *broken_pipe_name = nullptr; if (response.finished()) { final_response = response; finished = true; } if (!response.standard_output().empty()) { size_t size = response.standard_output().size(); if (blaze_util::WriteToStdOutErr(response.standard_output().c_str(), size, /* to_stdout */ true) == blaze_util::WriteResult::BROKEN_PIPE) { broken_pipe_name = "standard output"; } } if (!response.standard_error().empty()) { size_t size = response.standard_error().size(); if (blaze_util::WriteToStdOutErr(response.standard_error().c_str(), size, /* to_stdout */ false) == blaze_util::WriteResult::BROKEN_PIPE) { broken_pipe_name = "standard error"; } } if (broken_pipe_name != nullptr && !pipe_broken) { pipe_broken = true; BAZEL_LOG(USER) << "\nCannot write to " << broken_pipe_name << "; exiting...\n"; Cancel(); } if (!command_id_set && !response.command_id().empty()) { std::unique_lock<std::mutex> lock(cancel_thread_mutex_); command_id_ = response.command_id(); command_id_set = true; SendAction(CancelThreadAction::COMMAND_ID_RECEIVED); } } // If the server has shut down, but does not terminate itself within a 1m // grace period, terminate it. if (final_response.termination_expected() && !AwaitServerProcessTermination(process_info_.server_pid_, output_base_, kPostShutdownGracePeriodSeconds)) { KillServerProcess(process_info_.server_pid_, output_base_); } SendAction(CancelThreadAction::JOIN); cancel_thread.join(); grpc::Status status = reader->Finish(); if (!status.ok()) { BAZEL_LOG(USER) << "\nServer terminated abruptly (error code: " << status.error_code() << ", error message: '" << status.error_message() << "', log file: '" << process_info_.jvm_log_file_.AsPrintablePath() << "')\n"; return GetExitCodeForAbruptExit(output_base_); } else if (!finished) { BAZEL_LOG(USER) << "\nServer finished RPC without an explicit exit code (log file: '" << process_info_.jvm_log_file_.AsPrintablePath() << "')\n"; return GetExitCodeForAbruptExit(output_base_); } else if (final_response.has_exec_request()) { const command_server::ExecRequest& request = final_response.exec_request(); if (request.argv_size() < 1) { BAZEL_LOG(USER) << "\nServer requested exec() but did not pass a binary to execute\n"; return blaze_exit_code::INTERNAL_ERROR; } vector<string> argv; argv.insert(argv.begin(), request.argv().begin(), request.argv().end()); for (const auto& variable : request.environment_variable()) { SetEnv(variable.name(), variable.value()); } if (!blaze_util::ChangeDirectory(request.working_directory())) { BAZEL_DIE(blaze_exit_code::INTERNAL_ERROR) << "changing directory into " << request.working_directory() << " failed: " << GetLastErrorString(); } // Execute the requested program, but before doing so, flush everything // we still have to say. fflush(NULL); ExecuteRunRequest(blaze_util::Path(request.argv(0)), argv); } // We'll exit with exit code SIGPIPE on Unixes due to PropagateSignalOnExit() return pipe_broken ? blaze_exit_code::LOCAL_ENVIRONMENTAL_ERROR : final_response.exit_code(); } void BlazeServer::SendAction(CancelThreadAction action) { char msg = action; if (!pipe_->Send(&msg, 1)) { blaze::SigPrintf( "\nCould not interrupt server (cannot write to client pipe)\n\n"); } } void BlazeServer::Cancel() { assert(connected_); SendAction(CancelThreadAction::CANCEL); } } // namespace blaze
/** * Copyright (c) Glow Contributors. See CONTRIBUTORS file. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "glow/Importer/ONNXModelLoader.h" #include "glow/Base/Tensor.h" #include "glow/Graph/Graph.h" #include "glow/Graph/Nodes.h" #include "glow/Support/ZipUtils.h" #include "llvm/Support/Casting.h" #include "llvm/Support/CommandLine.h" #include "google/protobuf/io/coded_stream.h" #include "google/protobuf/io/zero_copy_stream_impl.h" #include <cstddef> #include <cstdint> #include <fstream> #include <sstream> #include <string> #include <vector> using namespace glow; using llvm::cast; namespace { llvm::cl::OptionCategory onnxModelLoaderCat("ONNX Model Loader Options"); std::vector<std::string> onnxDefineSymbol; llvm::cl::list<std::string, std::vector<std::string>> onnxDefineSymbolOpt( "onnx-define-symbol", llvm::cl::ZeroOrMore, llvm::cl::location(onnxDefineSymbol), llvm::cl::desc( "Define (replace) the undefined symbols from the tensor descriptions\n" "in the ONNX model with actual integer sizes. The undefined symbols \n" "are marked in the proto description with the 'dim_param' field. For\n" "example, if the model contains a tensor with the size described as \n" "'None' x 3 x 224 x 224, the symbol 'None' can be replaced with an \n" "actual integer size (for example 1) by using the following command \n" "line option: \n" " -onnx-define-symbol=None,1 \n" "Multiple symbols can be defined using this option, for example: \n" " -onnx-define-symbol=<symbol_name1>,<symbol_value1> \n" " -onnx-define-symbol=<symbol_name2>,<symbol_value2> \n" " ..................................................\n"), llvm::cl::value_desc("name,value"), llvm::cl::cat(onnxModelLoaderCat)); /// Parse the command line option and get the user defined map of symbols. /// The command line option has the format <symbol_name>,<symbol_value>. Expected<std::unordered_map<std::string, dim_t>> getSymbolMap() { std::unordered_map<std::string, dim_t> symbolMap; for (const auto &str : onnxDefineSymbol) { auto strPair = llvm::StringRef(str).split(','); llvm::StringRef name = strPair.first; RETURN_ERR_IF_NOT(name.size() > 0, "ONNX defined symbol name is empty."); dim_t value; RETURN_ERR_IF_NOT(!strPair.second.getAsInteger(0, value), strFormat("ONNX defined symbol value '%s' is invalid.", strPair.second.data())); symbolMap[name.str()] = value; } return symbolMap; } /// Get the shape of a TensorShapeProto given by \p shapeProto and return the /// dimensions in the vector \p dim passed by reference. Expected<std::vector<dim_t>> getProtoShape(const ONNX_NAMESPACE::TensorShapeProto &shapeProto) { std::vector<dim_t> dim; for (auto d : shapeProto.dim()) { if (d.has_dim_value()) { // Proto shape has an explicit size given by the "dim_value" field. dim.push_back(d.dim_value()); } else if (d.has_dim_param()) { // Proto shape has a symbolic size given by the "dim_param" field. Search // the symbol in the user defined map of symbols. If the symbol is not // found then raise an error. auto symbolName = d.dim_param(); std::unordered_map<std::string, dim_t> symbolMap; ASSIGN_VALUE_OR_RETURN_ERR(symbolMap, getSymbolMap()); if (symbolMap.count(symbolName)) { dim.push_back(symbolMap[symbolName]); } else { RETURN_ERR(strFormat( "ONNX model symbol '%s' is undefined. Define the symbol with the " "following command line option: -onnx-define-symbol=%s,<value>.", symbolName.c_str(), symbolName.c_str())); } } else { // Proto shape has no "dim_value" and no "dim_param" field. RETURN_ERR("Tensor shape proto has no 'dim_value' or 'dim_param' field!"); } } return dim; } /// Given some \p onnxType, sets \p elemTy to a corresponding Glow /// ElemKind. \returns whether an ElemKind was successfully selected. Error onnxTensorDataTypeToElemKind(int32_t onnxType, ElemKind *elemTy) { if (onnxType == ONNX_NAMESPACE::TensorProto::FLOAT) { *elemTy = ElemKind::FloatTy; return Error::success(); } else if (onnxType == ONNX_NAMESPACE::TensorProto::FLOAT16) { *elemTy = ElemKind::Float16Ty; return Error::success(); } else if (onnxType == ONNX_NAMESPACE::TensorProto::INT64) { *elemTy = ElemKind::Int64ITy; return Error::success(); } else if (onnxType == ONNX_NAMESPACE::TensorProto::INT32) { *elemTy = ElemKind::Int32ITy; return Error::success(); } else if (onnxType == ONNX_NAMESPACE::TensorProto::UINT8) { *elemTy = ElemKind::UInt8FusedQTy; return Error::success(); } else if (onnxType == ONNX_NAMESPACE::TensorProto::INT8) { *elemTy = ElemKind::Int8QTy; return Error::success(); } else if (onnxType == ONNX_NAMESPACE::TensorProto::INT16) { *elemTy = ElemKind::Int16QTy; return Error::success(); } else if (onnxType == ONNX_NAMESPACE::TensorProto::BOOL) { *elemTy = ElemKind::BoolTy; return Error::success(); } else { RETURN_ERR(strFormat( "Don't know how to convert ONNX tensor data type %d to ElemKind", onnxType)); } } /// Convert a string to int. \returns the int or Error if problem parsing. Expected<int> getIntFromStr(llvm::StringRef input) { const char *start = input.data(); char *end; int val = std::strtol(start, &end, 10); RETURN_ERR_IF_NOT(!(end == start || *end != '\0'), "Integer was not properly specified."); return val; } /// Finds an attribute from the doc_string and \returns it. If it does not exist /// then \returns Error. The expected structure here is that each attribute /// starts with startChar and is separated from its value by a sepChar. Expected<std::string> getAttrFromDocString(const std::string &attr, const std::string &docStr) { const std::string attrAndSep = attr + sepChar; size_t begin = 0; while (true) { begin = docStr.find(startChar, begin); if (begin == std::string::npos) { return MAKE_ERR(strFormat("Didn't find PH attribute '%s'", attr.c_str())); } // Note: +1 here and following line to account for the leading startChar. if (!docStr.compare(begin + 1, attrAndSep.size(), attrAndSep)) { // If we found the attribute then set begin to just after attrAndSep. begin += attrAndSep.size() + 1; break; } // Move past the current non-matching attribute to try the next attribute. begin = begin + attrAndSep.size(); } return docStr.substr(begin, docStr.find(startChar, begin) - begin); } Expected<std::pair<float, int32_t>> getQuantParamsFromDocString(const std::string &docStr) { std::string scaleStr; ASSIGN_VALUE_OR_RETURN_ERR(scaleStr, getAttrFromDocString(qScaleSignifier, docStr)); float scale = std::strtof(scaleStr.c_str(), NULL); std::string offsetStr; ASSIGN_VALUE_OR_RETURN_ERR(offsetStr, getAttrFromDocString(qOffsetSignifier, docStr)); int32_t offset; ASSIGN_VALUE_OR_RETURN_ERR(offset, getIntFromStr(offsetStr)); return std::make_pair(scale, offset); } /// Used for retrieving an attribute of type \p T from \p attr. Some /// specializations used \p loader if necessary. template <bool IsInteger, typename T> struct AttributeRetriever { static Expected<T> get(const ONNX_NAMESPACE::AttributeProto *attr, const ProtobufLoader &loader); }; /// Specialization for std::vector<float>. template <> struct AttributeRetriever<false, std::vector<float>> { static Expected<std::vector<float>> get(const ONNX_NAMESPACE::AttributeProto *attr, const ProtobufLoader & /* unused */) { return getFloats(attr); } }; /// Specialization for std::vector<NodeValue>. template <> struct AttributeRetriever<false, std::vector<NodeValue>> { static Expected<std::vector<NodeValue>> get(const ONNX_NAMESPACE::AttributeProto *attr, ProtobufLoader &loader) { // Retrieve the names from the proto which map to NodeValues. std::vector<std::string> strs; ASSIGN_VALUE_OR_RETURN_ERR(strs, getStrings(attr)); // Get NodeValues corresponding to these names from the loader. std::vector<NodeValue> NVs; for (const auto &str : strs) { NodeValue NV; ASSIGN_VALUE_OR_RETURN_ERR(NV, loader.getNodeValueByName(str)); NVs.push_back(NV); } return NVs; } }; /// Specialization for NodeValue. template <> struct AttributeRetriever<false, NodeValue> { static Expected<NodeValue> get(const ONNX_NAMESPACE::AttributeProto *attr, ProtobufLoader &loader) { // Retrieve the name from the proto, which is mapped to a NodeValue. std::string str; ASSIGN_VALUE_OR_RETURN_ERR(str, loadStr(attr)); // Get/return the corresponding NodeValue for this name from the loader. NodeValue NV; ASSIGN_VALUE_OR_RETURN_ERR(NV, loader.getNodeValueByName(str)); return NV; } }; /// Specialization for std::vector<T>. Fall back for integer types. template <typename T> struct AttributeRetriever<false, std::vector<T>> { static Expected<std::vector<T>> get(const ONNX_NAMESPACE::AttributeProto *attr, const ProtobufLoader & /* unused */) { return getShape<T>(attr, /* allowEmptyShape */ true); } }; /// Specialization for integer types. template <typename T> struct AttributeRetriever<true, T> { static Expected<T> get(const ONNX_NAMESPACE::AttributeProto *attr, const ProtobufLoader & /* unused */) { return loadInt(attr); } }; /// Specialization for LengthsMode. template <> struct AttributeRetriever<false, LengthsMode> { static Expected<LengthsMode> get(const ONNX_NAMESPACE::AttributeProto *attr, const ProtobufLoader & /* unused */) { std::string str; ASSIGN_VALUE_OR_RETURN_ERR(str, loadStr(attr)); if (str == "AllOne") { return LengthsMode::AllOne; } else if (str == "Variable") { return LengthsMode::Variable; } else { return MAKE_ERR("Invalid LengthsMode"); } } }; /// Specialization for FusedActivation. template <> struct AttributeRetriever<false, FusedActivation> { static Expected<FusedActivation> get(const ONNX_NAMESPACE::AttributeProto *attr, const ProtobufLoader & /* unused */) { std::string str; ASSIGN_VALUE_OR_RETURN_ERR(str, loadStr(attr)); if (str == "NONE") { return FusedActivation::NONE; } else if (str == "RELU") { return FusedActivation::RELU; } else if (str == "TANH") { return FusedActivation::TANH; } else if (str == "SIGMOID") { return FusedActivation::SIGMOID; } else { return MAKE_ERR("Invalid FusedActivation"); } } }; /// Specialization for ConvolutionLayout. template <> struct AttributeRetriever<false, ConvolutionLayout> { static Expected<ConvolutionLayout> get(const ONNX_NAMESPACE::AttributeProto *attr, const ProtobufLoader & /* unused */) { std::string str; ASSIGN_VALUE_OR_RETURN_ERR(str, loadStr(attr)); if (str == "NHWC") { return ConvolutionLayout::NHWC; } else if (str == "NCHW") { return ConvolutionLayout::NCHW; } else { return MAKE_ERR("Invalid ConvolutionLayout"); } } }; /// Specialization for PaddingMode. template <> struct AttributeRetriever<false, PaddingMode> { static Expected<PaddingMode> get(const ONNX_NAMESPACE::AttributeProto *attr, const ProtobufLoader & /* unused */) { std::string str; ASSIGN_VALUE_OR_RETURN_ERR(str, loadStr(attr)); if (str == "CONSTANT") { return PaddingMode::CONSTANT; } else if (str == "REFLECT") { return PaddingMode::REFLECT; } else if (str == "EDGE") { return PaddingMode::EDGE; } else { return MAKE_ERR("Invalid PaddingMode"); } } }; /// Specialization for float. template <> struct AttributeRetriever<false, float> { static Expected<float> get(const ONNX_NAMESPACE::AttributeProto *attr, const ProtobufLoader & /* unused */) { return loadFloat(attr); } }; /// Specialization for std::string. template <> struct AttributeRetriever<false, std::string> { static Expected<std::string> get(const ONNX_NAMESPACE::AttributeProto *attr, const ProtobufLoader & /* unused */) { return loadStr(attr); } }; /// Forwards to the correct AttributeRetriever specialization. template <typename T> Expected<T> loadAttribute(const ONNX_NAMESPACE::AttributeProto *attr, ProtobufLoader &loader) { RETURN_ERR_IF_NOT(attr, "No such attribute"); return AttributeRetriever<std::numeric_limits<T>::is_integer, T>::get(attr, loader); } } // namespace using ArgumentDictionaryTy = std::unordered_map<std::string, const ONNX_NAMESPACE::AttributeProto *>; /// Given a docstring encoding \p str of a type and its dimension \p /// dims, parses the string and \returns a Glow Type from it or Error if /// parsing failed. Expected format of str is either elemKindSignifier or /// "ElemKind:scale:offset". Expected<Type> parseTypeFromDocString(const std::string &str, llvm::ArrayRef<dim_t> dims, bool useGlowCustomOps) { float scale = 1.0; int32_t offset = 0; ElemKind elemKind = ElemKind::FloatTy; if (useGlowCustomOps) { std::string elemKindStr; ASSIGN_VALUE_OR_RETURN_ERR(elemKindStr, getAttrFromDocString(elemKindSignifier, str)); elemKind = Type::getElementKindFromName(elemKindStr); if (isQuantizedElemKind(elemKind)) { std::pair<float, int32_t> scaleOffsetPair; ASSIGN_VALUE_OR_RETURN_ERR(scaleOffsetPair, getQuantParamsFromDocString(str)); std::tie(scale, offset) = scaleOffsetPair; } } else { size_t begin = 0; // Find Elemkind string size_t end = str.find(':', begin); // If a ':' isn't found then assume the whole string is ElemKind (for // backwards compatibility reasons) otherwise look for scale and offset // strings. std::string elemKindStr; if (end == std::string::npos) { elemKindStr = str.substr(0, str.size()); } else { elemKindStr = str.substr(begin, end - begin); // Get scale string. begin = end + 1; end = str.find(':', begin); if (end == std::string::npos) { return MAKE_ERR("scale not found"); } std::string scaleStr = str.substr(begin, end - begin); // Get offset string. begin = end + 1; end = str.size(); if (end - begin == 0) { return MAKE_ERR("offset not found"); } std::string offsetStr = str.substr(begin, end - begin); scale = std::stof(scaleStr); offset = std::stoi(offsetStr); } elemKind = Type::getElementKindFromName(elemKindStr); } if (isQuantizedElemKind(elemKind)) { return Type(elemKind, dims, scale, offset); } else { return Type(elemKind, dims); } } /// Translates the protocol buffer node \p op into a random access map. static ArgumentDictionaryTy loadArgumentMap(const ONNX_NAMESPACE::NodeProto &op) { ArgumentDictionaryTy dict; for (auto &arg : op.attribute()) { dict[arg.name()] = &arg; } return dict; } void glow::setOnnxDefineSymbol(const std::vector<std::string> &strs) { onnxDefineSymbol = strs; } ONNX_NAMESPACE::GraphProto glow::parseOnnxFile(const std::string &fileName) { ::ONNX_NAMESPACE::GraphProto graphProto; std::ifstream inputFileStream(fileName, std::ios::in | std::ios::binary); CHECK(inputFileStream) << "Can't find the input file for " << fileName; google::protobuf::io::IstreamInputStream protobufFileStream(&inputFileStream); google::protobuf::io::CodedInputStream codedStream(&protobufFileStream); codedStream.SetTotalBytesLimit(MAX_PROTO_SIZE, MAX_PROTO_SIZE); bool parsedSuccessfully = graphProto.ParseFromCodedStream(&codedStream); CHECK(parsedSuccessfully) << "Failed to parse GraphProto"; return graphProto; } void glow::fillPlaceholders(const ONNX_NAMESPACE::GraphProto &inputGroup, PlaceholderBindings *bindings, std::vector<Tensor> *partialTensorPayloads, bool usingGlowCustomOps) { for (const auto &tensorProto : inputGroup.initializer()) { auto *tensor = bindings->get(bindings->getPlaceholderByName(tensorProto.name())); CHECK(tensor); size_t fullSize = tensor->getSizeInBytes(); const auto fullType = tensor->getType(); auto error = loadTensor(tensorProto, tensor, usingGlowCustomOps); bool hasError = ERR_TO_BOOL(std::move(error)); CHECK(!hasError) << "Cannot load input tensor"; size_t loadedSize = tensor->getSizeInBytes(); if (loadedSize != fullSize) { if (partialTensorPayloads) { VLOG(1) << "Loading " << tensorProto.name() << " as a partial tensor: partial size=" << tensor->getType().toString() << " full size=" << fullType.toString(); Tensor fullTensor(tensor->getUnsafePtr(), &fullType, tensor->getSizeInBytes()); // 'fullTensor' doesn't own the underlying data. 'tensor' does. So // we want to keep the original tensor object around until inference // is finished. partialTensorPayloads->emplace_back(std::move(*tensor)); *tensor = std::move(fullTensor); } else { // pad with 0 VLOG(1) << "Loading and padding " << tensorProto.name() << " as a partial tensor: partial size=" << tensor->getType().toString() << " full size=" << fullType.toString(); Tensor fullTensor(&fullType); std::memcpy(fullTensor.getUnsafePtr(), tensor->getUnsafePtr(), tensor->getSizeInBytes()); std::memset(fullTensor.getUnsafePtr() + tensor->getSizeInBytes(), 0, fullTensor.getSizeInBytes() - tensor->getSizeInBytes()); *tensor = std::move(fullTensor); } } } } void glow::fillPlaceholders(const std::string &fileName, PlaceholderBindings *bindings, std::vector<Tensor> *partialTensorPayloads, bool usingGlowCustomOps) { const ONNX_NAMESPACE::GraphProto &inputGroup = parseOnnxFile(fileName); fillPlaceholders(inputGroup, bindings, partialTensorPayloads, usingGlowCustomOps); } /// Loads tensor \p T from the input \p in. Error glow::loadTensor(const ONNX_NAMESPACE::TensorProto &in, Tensor *T, bool useGlowCustomOps) { std::vector<dim_t> dim; for (auto d : in.dims()) { dim.push_back(d); } if (in.data_type() == ONNX_NAMESPACE::TensorProto::FLOAT) { T->reset(ElemKind::FloatTy, dim); if (in.float_data_size() > 0) { auto TH = T->getHandle<>(); size_t i = 0; for (auto f : in.float_data()) { TH.raw(i++) = f; } } else if (in.has_raw_data()) { std::istringstream inStream(in.raw_data(), std::stringstream::binary); inStream.read(T->getUnsafePtr(), T->size() * sizeof(float)); } else { RETURN_ERR("Unsupported Tensor format.", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); } } else if (in.data_type() == ONNX_NAMESPACE::TensorProto::FLOAT16) { T->reset(ElemKind::Float16Ty, dim); if (in.has_raw_data()) { std::istringstream inStream(in.raw_data(), std::stringstream::binary); inStream.read(T->getUnsafePtr(), T->size() * (sizeof(float) / 2)); } else { RETURN_ERR("Unsupported Tensor format.", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); } } else if (in.data_type() == ONNX_NAMESPACE::TensorProto::INT64) { T->reset(ElemKind::Int64ITy, dim); if (in.int64_data_size() > 0) { auto TH = T->getHandle<int64_t>(); size_t i = 0; for (auto f : in.int64_data()) { TH.raw(i++) = f; } } else if (in.has_raw_data()) { std::istringstream inStream(in.raw_data(), std::stringstream::binary); inStream.read(T->getUnsafePtr(), T->size() * sizeof(int64_t)); } else { RETURN_ERR("Unsupported Tensor format.", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); } } else if (in.data_type() == ONNX_NAMESPACE::TensorProto::INT8) { Type ty; ASSIGN_VALUE_OR_RETURN_ERR( ty, parseTypeFromDocString(in.doc_string(), dim, useGlowCustomOps)); T->reset(ty); if (in.has_raw_data()) { std::istringstream inStream(in.raw_data(), std::stringstream::binary); inStream.read(T->getUnsafePtr(), T->size() * sizeof(int8_t)); } else { RETURN_ERR("Unsupported Tensor format.", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); } } else if (in.data_type() == ONNX_NAMESPACE::TensorProto::INT16) { Type ty; ASSIGN_VALUE_OR_RETURN_ERR( ty, parseTypeFromDocString(in.doc_string(), dim, useGlowCustomOps)); T->reset(ty); if (in.has_raw_data()) { std::istringstream inStream(in.raw_data(), std::stringstream::binary); inStream.read(T->getUnsafePtr(), T->size() * sizeof(int16_t)); } else { RETURN_ERR("Unsupported Tensor format.", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); } } else if (in.data_type() == ONNX_NAMESPACE::TensorProto::INT32) { if (in.has_doc_string()) { Type ty; ASSIGN_VALUE_OR_RETURN_ERR( ty, parseTypeFromDocString(in.doc_string(), dim, useGlowCustomOps)); T->reset(ty); } else { // There are few cases when we will have int32 tensors. For example, the // second output of Concat from Caffe2 concat op is int32 T->reset(ElemKind::Int32ITy, dim); } if (in.int32_data_size() > 0) { auto TH = T->getHandle<int32_t>(); size_t i = 0; for (auto f : in.int32_data()) { TH.raw(i++) = f; } } else if (in.has_raw_data()) { std::istringstream inStream(in.raw_data(), std::stringstream::binary); inStream.read(T->getUnsafePtr(), T->size() * sizeof(int32_t)); } else { RETURN_ERR("Unsupported Tensor format.", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); } } else if (in.data_type() == ONNX_NAMESPACE::TensorProto::UINT8) { Type ty; ASSIGN_VALUE_OR_RETURN_ERR( ty, parseTypeFromDocString(in.doc_string(), dim, useGlowCustomOps)); T->reset(ty); if (in.has_raw_data()) { std::istringstream inStream(in.raw_data(), std::stringstream::binary); inStream.read(T->getUnsafePtr(), T->size() * sizeof(uint8_t)); } else { RETURN_ERR("Unsupported Tensor format.", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); } } else if (in.data_type() == ONNX_NAMESPACE::TensorProto::BOOL) { T->reset(ElemKind::BoolTy, dim); if (in.has_raw_data()) { std::istringstream inStream(in.raw_data(), std::stringstream::binary); inStream.read(T->getUnsafePtr(), T->size() * sizeof(bool)); } else { RETURN_ERR("Unsupported Tensor format.", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); } } else { RETURN_ERR(strFormat("Unsupported tensor data type: %u", static_cast<unsigned>(in.data_type())), ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); } return Error::success(); } Error ONNXModelLoader::setTensorType(const ONNX_NAMESPACE::ValueInfoProto &in, Tensor *T) { auto type = in.type(); std::vector<dim_t> dim; ASSIGN_VALUE_OR_RETURN_ERR(dim, getProtoShape(type.tensor_type().shape())); ElemKind kind = ElemKind::FloatTy; float scale = 1.0; int32_t offset = 0; if (useGlowCustomOps_) { std::string elemKindStr; ASSIGN_VALUE_OR_RETURN_ERR( elemKindStr, getAttrFromDocString(elemKindSignifier, in.doc_string())); kind = Type::getElementKindFromName(elemKindStr); if (isQuantizedElemKind(kind)) { std::pair<float, int32_t> scaleOffsetPair; ASSIGN_VALUE_OR_RETURN_ERR(scaleOffsetPair, getQuantParamsFromDocString(in.doc_string())); std::tie(scale, offset) = scaleOffsetPair; } } else { // Retrieve the ElemKind from the ONNX type, including considerations for // whether the datatype is quantized. RETURN_IF_ERR( onnxTensorDataTypeToElemKind(type.tensor_type().elem_type(), &kind)); } // If quantized then retrieve the scale and offset if provided (may not be for // fused quantized types since they're ignored anyway). if (isQuantizedElemKind(kind)) { assert(useGlowCustomOps_ && "Quantized loading not fully supported without custom Glow ops."); T->reset(kind, dim, scale, offset); } else { T->reset(kind, dim); } return Error::success(); } Error ONNXModelLoader::loadInputs(ONNX_NAMESPACE::GraphProto &net, bool loadInputsAsPlaceholders) { for (const auto &in : net.input()) { // Skip static weights. if (getConstantByNameOrNull(in.name())) { continue; } if (loadInputsAsPlaceholders) { Tensor T; RETURN_IF_ERR(setTensorType(in, &T)); std::string isTrainable = "0"; std::string layout = ANY_LAYOUT; if (useGlowCustomOps_) { ASSIGN_VALUE_OR_RETURN_ERR( isTrainable, getAttrFromDocString(trainableSignifier, in.doc_string())); ASSIGN_VALUE_OR_RETURN_ERR( layout, getAttrFromDocString(layoutSignifier, in.doc_string())); } Placeholder *placeholder; ASSIGN_VALUE_OR_RETURN_ERR( placeholder, createAndRegisterPlaceholder(in.name(), &T.getType(), staticInputs_.count(in.name()), isTrainable != "0", layout)); inputVarsByName_.try_emplace(in.name(), placeholder); } else { Tensor T; RETURN_IF_ERR(setTensorType(in, &T)); RETURN_IF_ERR(createAndRegisterConstant(in.name(), std::move(T))); } } return Error::success(); } Expected<bool> ONNXModelLoader::getBroadcast(ArgumentDictionaryTy &dict) { // Starting with opset 7, broadcasting is implicit and doesn't require any // attribute. if (opsetVersion_ > 6) { return true; } if (!dict.count("broadcast")) { return false; } int broadcast; ASSIGN_VALUE_OR_RETURN_ERR(broadcast, loadInt(dict.at("broadcast"))); return broadcast == 1; } bool ONNXModelLoader::hasMultidirectionalBroadcast( const llvm::StringRef typeName) { // Before opset 7, broadcasting was unidirectional. if (opsetVersion_ > 6) { if ((typeName == "Add") || (typeName == "Sub") || (typeName == "Mul") || (typeName == "Div")) { return true; } // TODO: some other operators also support multidirectional broadcasting. } return false; } Expected<ElemKind> ONNXModelLoader::convertTensorProtoDataType( ONNX_NAMESPACE::TensorProto_DataType t) { switch (t) { case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: return ElemKind::FloatTy; case ONNX_NAMESPACE::TensorProto_DataType_FLOAT16: return ElemKind::Float16Ty; case ONNX_NAMESPACE::TensorProto_DataType_INT32: return ElemKind::Int32ITy; case ONNX_NAMESPACE::TensorProto_DataType_INT64: return ElemKind::Int64ITy; default:; } RETURN_ERR("Non supported ONNX type"); } Error ONNXModelLoader::setVersion(ONNX_NAMESPACE::ModelProto MP) { irVersion_ = MP.ir_version(); opsetVersion_ = 0; RETURN_ERR_IF_NOT( irVersion_ >= 3, "This ONNX model with ir_version < 3 is too old to be supported.", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_ONNX_VERSION); for (const auto &imp : MP.opset_import()) { if (!imp.has_domain() || imp.domain() == "") { opsetVersion_ = imp.version(); break; } } RETURN_ERR_IF_NOT(opsetVersion_ > 0, "The opset of this ONNX model is not supported."); return Error::success(); } Expected<ONNX_NAMESPACE::ModelProto> ONNXModelLoader::loadProto(google::protobuf::io::ZeroCopyInputStream &iStream) { // Construct and configure a Coded Input Stream google::protobuf::io::CodedInputStream codedStream(&iStream); // Don't warn about large file sizes. codedStream.SetTotalBytesLimit(MAX_PROTO_SIZE, MAX_PROTO_SIZE); ONNX_NAMESPACE::ModelProto MP; bool parseNet = MP.ParseFromCodedStream(&codedStream); RETURN_ERR_IF_NOT(parseNet, "Failed to parse ModelProto", ErrorValue::ErrorCode::MODEL_LOADER_INVALID_PROTOBUF); return MP; } Expected<ONNX_NAMESPACE::ModelProto> ONNXModelLoader::loadProto(const void *onnxModel, size_t onnxModelSize) { google::protobuf::io::ArrayInputStream arrayStream(onnxModel, onnxModelSize); return loadProto(arrayStream); } Expected<ONNX_NAMESPACE::ModelProto> ONNXModelLoader::loadProto(const std::string &filename) { std::ifstream ff(filename, std::ios::in | std::ios::binary); RETURN_ERR_IF_NOT(ff, strFormat("Can't find the model or network files for %s.", filename.c_str()), ErrorValue::ErrorCode::MODEL_LOADER_INVALID_PROTOBUF); // TODO: intend to find a way to reuse the following function later // for the text format onnx model: // bool ONNXModelLoader::loadProto(ONNX_NAMESPACE::GraphProto &net, // google::protobuf::io::ZeroCopyInputStream &iStream) if (filename.find(".onnxtxt") != std::string::npos) { std::string str((std::istreambuf_iterator<char>(ff)), std::istreambuf_iterator<char>()); ONNX_NAMESPACE::ModelProto MP; bool parseNet = google::protobuf::TextFormat::ParseFromString(str, &MP); RETURN_ERR_IF_NOT(parseNet, "Failed to parse ModelProto", ErrorValue::ErrorCode::MODEL_LOADER_INVALID_PROTOBUF); return MP; } google::protobuf::io::IstreamInputStream fileStream(&ff); return loadProto(fileStream); } /// Given an input \p val , ceil value is computed for a given datatype T template <typename T> T ceil(float val) { return (val - (T)val) > 0 ? (T)(val + 1) : (T)val; } namespace { /// Helper type for pads. using Pads = std::vector<unsigned_t>; } // namespace /// Get the Pads value based on setting for auto_pad. /// \p kdim : kernel sizes (HW) /// \p sdim: stride sizes (HW) /// \p idim: input sizes (HW) Expected<Pads> getPads(ArgumentDictionaryTy &dict, llvm::ArrayRef<unsigned_t> kdim, llvm::ArrayRef<unsigned_t> sdim, llvm::ArrayRef<unsigned_t> idim) { if (dict.count("pads")) { if (dict.at("pads")->ints_size() == 2) { // For maxPool1D return Pads({0, (unsigned_t)dict.at("pads")->ints(0), 0, (unsigned_t)dict.at("pads")->ints(1)}); } return getShape<unsigned_t>(dict["pads"]); } if (dict.count("auto_pad")) { std::string padStr; ASSIGN_VALUE_OR_RETURN_ERR(padStr, loadStr(dict.at("auto_pad"))); if (padStr == "VALID") { // Return default value 0 for pads. return Pads({0, 0, 0, 0}); } else if (padStr == "SAME_UPPER" || padStr == "SAME_LOWER") { unsigned_t top, left, bottom, right; // From https://arxiv.org/pdf/1603.07285.pdf 2.4, // o = floor((i + 2*p - k)/s) + 1 // Also, from https://github.com/onnx/onnx/blob/master/docs/Operators.md // output_spatial_shape[i] = // ceil(input_spatial_shape[i] / strides_spatial_shape[i]) // pad_shape[i] = // (output_spatial_shape[i] - 1) * strides_spatial_shape[i] // + kernel_spatial_shape[i] - input_spatial_shape[i] // Use the smallest padding possible out of the possible options. llvm::SmallVector<unsigned_t, 2> pdim(2); // Total Paddding, HW. unsigned_t odim; for (size_t i = 0, e = pdim.size(); i < e; i++) { odim = ceil<unsigned_t>((float)idim[i] / (float)sdim[i]); pdim[i] = sdim[i] * (odim - 1) + kdim[i] - idim[i]; } if (padStr == "SAME_UPPER") { // SAME_UPPPER: if odd number for pdim[i], use extra padding at the end. top = pdim[0] / 2; bottom = top + (pdim[0] & 0x1); left = pdim[1] / 2; right = left + (pdim[1] & 0x1); } else { // SAME_LOWER: if odd number for pdim[i], use extra padding at the // beginning. bottom = pdim[0] / 2; top = bottom + (pdim[0] & 0x1); right = pdim[1] / 2; left = right + (pdim[1] & 0x1); } return Pads({top, left, bottom, right}); } RETURN_ERR("only auto_pad==VALID, SAME_UPPER and SAME_LOWER are supported"); } // Return default value 0 for pads. return Pads({0, 0, 0, 0}); } /// Get the Pads value based on setting for auto_pad. /// \p kdim : kernel sizes (HW) /// \p sdim: stride sizes (HW) /// \p idim: input sizes (HW) static Expected<Pads> getConvTransposePadsfromOutput( ArgumentDictionaryTy &dict, llvm::ArrayRef<unsigned_t> kdim, llvm::ArrayRef<unsigned_t> sdim, unsigned_t dilation, llvm::ArrayRef<unsigned_t> idim, llvm::ArrayRef<unsigned_t> odim) { llvm::SmallVector<unsigned_t, 2> pdim(2); // Total Paddding, HW. for (size_t i = 0, e = pdim.size(); i < e; i++) { pdim[i] = sdim[i] * (idim[i] - 1) /* + output_padding[0]*/ + ((kdim[i] - 1) * dilation + 1) - odim[i]; } unsigned_t top, left, bottom, right; if (dict.count("auto_pad")) { std::string padStr; ASSIGN_VALUE_OR_RETURN_ERR(padStr, loadStr(dict.at("auto_pad"))); if (padStr == "SAME_UPPER") { // SAME_UPPER ONNX formula: // if odd number for pdim[i], use extra padding at the end. // pads[start_i] = total_padding[i] - (total_padding[i]/2); // pads[end_i] = (total_padding[i]/2). top = pdim[0] / 2; bottom = top + (pdim[0] & 0x1); left = pdim[1] / 2; right = left + (pdim[1] & 0x1); return Pads({top, left, bottom, right}); } } // !SAME_UPPER ONNX formula: // pads[start_i] = total_padding[i]/2; // pads[end_i] = total_padding[i] - (total_padding[i]/2) top = pdim[0] / 2; bottom = top + (pdim[0] & 0x1); left = pdim[1] / 2; right = left + (pdim[1] & 0x1); return Pads({top, left, bottom, right}); } Error ONNXModelLoader::loadConstant(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { /* output: "Parameter6" name: "Parameter6" op_type: "Constant" attribute { name: "value" t { dims: 8 data_type: FLOAT float_data: -0.161539719 float_data: -0.433835655 float_data: 0.091641359 float_data: -0.0168522168 float_data: -0.0650264397 float_data: -0.131737873 float_data: 0.0204175506 float_data: -0.121110231 } type: TENSOR } doc_string: "" domain: "" */ const auto &name = op.output(0); // If the tensor is pre-populated by the user of this class then we don't // need to allocate a new tensor. if (getConstantByNameOrNull(name)) { return Error::success(); } const auto &type = dict.at("value")->type(); RETURN_ERR_IF_NOT((type == ONNX_NAMESPACE::AttributeProto::TENSOR || type == ONNX_NAMESPACE::AttributeProto::INTS), "Only Tensor type constants are supported.", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); Tensor T; if (type == ONNX_NAMESPACE::AttributeProto::TENSOR) { RETURN_IF_ERR(loadTensor(dict.at("value")->t(), &T, useGlowCustomOps_)); } else { std::vector<int64_t> ints; ASSIGN_VALUE_OR_RETURN_ERR(ints, getShape<int64_t>(dict["value"])); T = Tensor(ElemKind::Int64ITy, {(dim_t)ints.size()}); auto TH = T.getHandle<int64_t>(); for (dim_t i = 0, e = ints.size(); i < e; ++i) { TH.at({i}) = ints[i]; } } RETURN_IF_ERR(createAndRegisterConstant(name, std::move(T))); return Error::success(); } /// Retrieves data from a constant Tensor and stores it in a vector. template <typename T> static void helperSetter(Constant *constT, std::vector<ssize_t> &vec) { auto constH = constT->getPayload().getHandle<T>(); for (dim_t i = 0; i < constH.size(); ++i) { vec.push_back(constH.at({i})); } } Error ONNXModelLoader::loadSlice(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue data; ASSIGN_VALUE_OR_RETURN_ERR(data, getNodeValueByName(op.input(0))); auto dims = data.dims(); auto numDims = dims.size(); std::vector<ssize_t> starts; std::vector<ssize_t> ends; // Attribute 'axes' is optional. std::vector<ssize_t> axes; if (this->opsetVersion_ >= 10) { Constant *startsC = getConstantByNameOrNull(op.input(1)); Constant *endsC = getConstantByNameOrNull(op.input(2)); RETURN_ERR_IF_NOT(startsC, "Starts Tensor is not Constant."); RETURN_ERR_IF_NOT(endsC, "Ends Tensor is not Constant."); if (startsC->getElementType() == ElemKind::Int64ITy) { helperSetter<int64_t>(startsC, starts); } else if (startsC->getElementType() == ElemKind::Int32ITy) { helperSetter<int32_t>(startsC, starts); } else { RETURN_ERR_IF_NOT(false, "Starts Tensor has unsupported type."); } if (endsC->getElementType() == ElemKind::Int64ITy) { helperSetter<int64_t>(endsC, ends); } else if (endsC->getElementType() == ElemKind::Int32ITy) { helperSetter<int32_t>(endsC, ends); } else { RETURN_ERR_IF_NOT(false, "Ends Tensor has unsupported type."); } if (op.input_size() > 3) { Constant *axesC = getConstantByNameOrNull(op.input(3)); RETURN_ERR_IF_NOT(startsC, "Axes Tensor is not Constant."); if (axesC->getElementType() == ElemKind::Int64ITy) { helperSetter<int64_t>(axesC, axes); } else if (axesC->getElementType() == ElemKind::Int32ITy) { helperSetter<int32_t>(axesC, axes); } else { RETURN_ERR_IF_NOT(false, "Axes Tensor has unsupported type."); } RETURN_ERR_IF_NOT(op.input_size() == 5, "Steps is not currently supported."); } } else { // Attributes 'starts' and 'ends' are mandatory and must be consistent. ASSIGN_VALUE_OR_RETURN_ERR(starts, getShape<ssize_t>(dict["starts"])); ASSIGN_VALUE_OR_RETURN_ERR(ends, getShape<ssize_t>(dict["ends"])); if (dict.count("axes")) { // The ONNX spec is unclear so we consider that the 'axes' array may have // any size. The constraints are: // - the element value must be in range [0, numDims), // - 'starts' & 'ends' arrays must have the same size as the 'axes' array. // In case an axis is specified multiple times in 'axes', the later // parameters will simply overwrite the previous ones. ASSIGN_VALUE_OR_RETURN_ERR(axes, getShape<ssize_t>(dict["axes"])); } } RETURN_ERR_IF_NOT( (starts.size() == ends.size()), "Slice: 'starts' and 'ends' arrays must have the same size."); if (axes.empty()) { for (size_t i = 0; i < numDims; i++) { axes.push_back(ssize_t(i)); } } // The ONNX description is unclear and doesn't describe what to do when a // an axis index is not given in the axes array. An interpretation is that // for such an axis, the entire range is taken. Then, we initialize // newStarts and newEnds with the full range for all axes. std::vector<dim_t> newStarts(numDims); std::vector<dim_t> newEnds(numDims); for (size_t i = 0; i < numDims; i++) { newStarts[i] = 0; newEnds[i] = dims[i]; } // Determine the coordinates of the sub-tensor to extract. RETURN_ERR_IF_NOT(axes.size() == starts.size(), "'axes' and 'starts' must be the same size."); RETURN_ERR_IF_NOT(starts.size() == ends.size(), "'starts' and 'ends' must be the same size."); for (size_t i = 0; i < axes.size(); i++) { ssize_t newStart = starts[i]; ssize_t newEnd = ends[i]; ssize_t axisId = axes[i]; RETURN_ERR_IF_NOT((axisId >= 0) && (axisId < ssize_t(numDims)), "Axes indexes must be within the input tensor range."); // ONNX: "If the value passed to start or end is larger than the n (the // number of elements in this dimension), it represents n". if (newStart > ssize_t(dims[axisId])) { newStart = ssize_t(dims[axisId]); } if (newEnd > ssize_t(dims[axisId])) { newEnd = ssize_t(dims[axisId]); } // The ONNX description is unclear and the numpy definition is more // accurate. // - ONNX: "Similar to numpy. [...]. If a negative value is passed for any // of the start or end indices, it represent number of elements before the // end of that dimension." // - Numpy: "Negative indices are interpreted as counting from the end of // the array (i.e., if n_i < 0, it means n_i + d_i)." if (newStart < 0) { newStart = ssize_t(dims[axisId]) + newStart; RETURN_ERR_IF_NOT(newStart >= 0, "Slice: final start index should never be negative."); } if (newEnd < 0) { newEnd = ssize_t(dims[axisId]) + newEnd; RETURN_ERR_IF_NOT(newEnd >= 0, "Slice: final end index should never be negative."); } newStarts[axisId] = size_t(newStart); newEnds[axisId] = size_t(newEnd); } // Create the IR node. Node *SN = G_->createSlice(opName, data, newStarts, newEnds); RETURN_IF_ERR(addNodeAsOutput(op, SN)); return Error::success(); } Error ONNXModelLoader::loadConv1D(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); // Load the attributes std::vector<glow::unsigned_t> strides(2, 1); strides[1] = dict.count("strides") ? dict.at("strides")->ints(0) : 1; strides[0] = 1; unsigned_t group = 1; if (dict.count("group")) { ASSIGN_VALUE_OR_RETURN_ERR(group, loadInt(dict.at("group"))); } unsigned_t dilation = dict.count("dilations") ? dict.at("dilations")->ints(0) : 1; // Load the inputs NodeValue in; // input == NCW ---> NCHW ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); in = G_->createExpandDims(opName, in, 2); // filtervalue == CKS ---> CKRS NodeValue filterValue; ASSIGN_VALUE_OR_RETURN_ERR(filterValue, getNodeValueByName(op.input(1))); filterValue = G_->createExpandDims(opName, filterValue, 2); // Transpose the filter to the right format. Glow expects to read the // weights in the format CRSK. ONNX stores the operators as CKRS. // C - output_depth, R - filter_height, S - filter_width, K - input_depth. // filtervalue == CKRS ---> CRSK TransposeNode *filterTransposeNode = G_->createTranspose(opName, filterValue, NCHW2NHWC); // The structure of the conv weights is: CRSK. We take the C, which is the // number of filters. We use this value to calculate the size of the bias // if it is not specified. const NodeValue filterTransposedValue = filterTransposeNode->getResult(); dim_t depth = filterTransposedValue.dims()[0]; // Construct the Bias field. Constant *bias = nullptr; // Check if we have a serialized bias vector. if (op.input_size() > 2) { auto &biasTensorName = op.input(2); // Load the serialized bias vector. ASSIGN_VALUE_OR_RETURN_ERR(bias, getConstantByName(biasTensorName)); } // If a serialized bias wasn't found then create a zero bias. if (!bias) { Tensor biasTensor(ElemKind::FloatTy, {depth}); biasTensor.zero(); bias = mod_.createConstant("conv.bias", std::move(biasTensor)); } // ONNX passes the input as NCHW, and we expect the input to be NHWC. auto *tr = G_->createTranspose(opName, in, NCHW2NHWC); // Calculate the size and allocate the output buffer. ShapeNHWC idim = ShapeNHWC(tr->getResult().dims()); llvm::SmallVector<unsigned_t, 2> idimHW(2); idimHW[0] = in.dims()[2]; idimHW[1] = in.dims()[3]; // Pads : {pad_top, pad_left, pad_bottom, pad_right} Pads pads; // Get the kernel shape. llvm::SmallVector<unsigned_t, 2> kernelShape(2); kernelShape[0] = filterTransposedValue.dims()[1]; kernelShape[1] = filterTransposedValue.dims()[2]; ASSIGN_VALUE_OR_RETURN_ERR(pads, getPads(dict, kernelShape, strides, idimHW)); auto outSz = calculateConvPoolOutputDims(idim.h, idim.w, kernelShape, strides, pads, dilation); std::array<dim_t, 4> outDims = {{idim.n, outSz.first, outSz.second, depth}}; auto outTy = mod_.uniqueType(ElemKind::FloatTy, outDims); auto *node = G_->createConv(opName, tr, filterTransposeNode, bias, outTy, kernelShape, strides, pads, group, dilation); auto *N = G_->createSqueeze(opName, node, 1 /*axes*/); // Transpose the output back auto *RR = G_->createTranspose(opName, N, {0, 2, 1}); RETURN_IF_ERR(addNodeAsOutput(op, RR)); return Error::success(); } Error ONNXModelLoader::loadConv(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); // Load the inputs NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); if (in.dims().size() == 3) { return loadConv1D(op, dict); } NodeValue filterValue; ASSIGN_VALUE_OR_RETURN_ERR(filterValue, getNodeValueByName(op.input(1))); // Load the attributes std::vector<unsigned_t> strides(2, 1); if (dict.count("strides")) { ASSIGN_VALUE_OR_RETURN_ERR(strides, getShape<unsigned_t>(dict["strides"])); } unsigned_t group = 1; if (dict.count("group")) { ASSIGN_VALUE_OR_RETURN_ERR(group, loadInt(dict.at("group"))); } unsigned_t dilation = 1; if (dict.count("dilations")) { std::vector<unsigned_t> dilations(2, 1); ASSIGN_VALUE_OR_RETURN_ERR(dilations, getShape<unsigned_t>(dict["dilations"])); RETURN_ERR_IF_NOT(dilations.size() == 2, "Conv: dilations must be specified for 2 axes."); RETURN_ERR_IF_NOT(dilations[1] == dilations[0], "Conv: different dilation values along different axes " "are not supported currently. values must be same."); dilation = dilations[0]; } // Transpose the filter to the right format. Glow expects to read the // weights in the format CRSK. ONNX stores the operators as KCRS. // C - output_depth, R - filter_height, S - filter_width, K - input_depth. TransposeNode *filterTransposeNode = G_->createTranspose(opName, filterValue, NCHW2NHWC); // The structure of the conv weights is: CRSK. We take the C, which is the // number of filters. We use this value to calculate the size of the bias // if it is not specified. const NodeValue filterTransposedValue = filterTransposeNode->getResult(); dim_t depth = filterTransposedValue.dims()[0]; // Get the kernel shape from the input. llvm::SmallVector<unsigned_t, 2> kernelShape(2); kernelShape[0] = filterTransposedValue.dims()[1]; kernelShape[1] = filterTransposedValue.dims()[2]; // Extra check when the 'kernel_shape' attribute exists. // The 'kernel_shape' attribute is redundant not mandatory. if (dict.count("kernel_shape")) { std::vector<unsigned_t> kernelShapeAttribute; ASSIGN_VALUE_OR_RETURN_ERR(kernelShapeAttribute, getShape<unsigned_t>(dict["kernel_shape"])); RETURN_ERR_IF_NOT( (kernelShape[0] == kernelShapeAttribute[0] && kernelShape[1] == kernelShapeAttribute[1]), "The 'kernel_shape' attribute is not consistent with the actual " "convolution kernel shape."); (void)kernelShapeAttribute; // Avoids compilation warning in release mode. } // Construct the Bias field. Constant *bias = nullptr; // Check if we have a serialized bias vector. if (op.input_size() > 2) { auto &biasTensorName = op.input(2); // Load the serialized bias vector. ASSIGN_VALUE_OR_RETURN_ERR(bias, getConstantByName(biasTensorName)); } // If a serialized bias wasn't found then create a zero bias. if (!bias) { Tensor biasTensor(ElemKind::FloatTy, {depth}); biasTensor.zero(); bias = mod_.createConstant("conv.bias", std::move(biasTensor)); } // ONNX passes the input as NCHW, and we expect the input to be NHWC. auto *tr = G_->createTranspose(opName, in, NCHW2NHWC); // Calculate the size and allocate the output buffer. ShapeNHWC idim = ShapeNHWC(tr->getResult().dims()); llvm::SmallVector<unsigned_t, 2> idimHW(2); idimHW[0] = in.dims()[2]; idimHW[1] = in.dims()[3]; // Pads : {pad_top, pad_left, pad_bottom, pad_right} Pads pads; ASSIGN_VALUE_OR_RETURN_ERR(pads, getPads(dict, kernelShape, strides, idimHW)); auto outSz = calculateConvPoolOutputDims(idim.h, idim.w, kernelShape, strides, pads, dilation); std::array<dim_t, 4> outDims = {{idim.n, outSz.first, outSz.second, depth}}; auto outTy = mod_.uniqueType(ElemKind::FloatTy, outDims); auto *node = G_->createConv(opName, tr, filterTransposeNode, bias, outTy, kernelShape, strides, pads, group, dilation); // Transpose the output back. auto *N = G_->createTranspose(opName, node, NHWC2NCHW); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadTensorwiseQuantizedConvolution( const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue input; ASSIGN_VALUE_OR_RETURN_ERR(input, getNodeValueByName(op.input(0))); NodeValue filterValue; ASSIGN_VALUE_OR_RETURN_ERR(filterValue, getNodeValueByName(op.input(1))); NodeValue biasValue; ASSIGN_VALUE_OR_RETURN_ERR(biasValue, getNodeValueByName(op.input(2))); std::vector<unsigned_t> kernels; ASSIGN_VALUE_OR_RETURN_ERR(kernels, getShape<unsigned_t>(dict["kernel_shape"])); std::vector<unsigned_t> strides; ASSIGN_VALUE_OR_RETURN_ERR(strides, getShape<unsigned_t>(dict["strides"])); std::vector<unsigned_t> pads; ASSIGN_VALUE_OR_RETURN_ERR(pads, getShape<unsigned_t>(dict["pads"])); unsigned_t groups; ASSIGN_VALUE_OR_RETURN_ERR(groups, loadInt(dict.at("group"))); float outScale; ASSIGN_VALUE_OR_RETURN_ERR(outScale, loadFloat(dict.at("out_scale"))); int32_t outOffset; ASSIGN_VALUE_OR_RETURN_ERR(outOffset, loadInt(dict.at("out_offset"))); ShapeNHWC idim(input.dims()); auto outSz = calculateConvPoolOutputDims(idim.h, idim.w, kernels, strides, pads); std::array<dim_t, 4> outDims = { {idim.n, outSz.first, outSz.second, biasValue.dims()[0]}}; auto outTy = mod_.uniqueType(ElemKind::Int8QTy, outDims, outScale, outOffset); auto *node = G_->createConv(opName, input, filterValue, biasValue, outTy, kernels, strides, pads, groups); return addNodeAsOutput(op, node); } Error ONNXModelLoader::loadChannelwiseQuantizedConvolution( const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue input; ASSIGN_VALUE_OR_RETURN_ERR(input, getNodeValueByName(op.input(0))); NodeValue filterValue; ASSIGN_VALUE_OR_RETURN_ERR(filterValue, getNodeValueByName(op.input(1))); NodeValue biasValue; ASSIGN_VALUE_OR_RETURN_ERR(biasValue, getNodeValueByName(op.input(2))); NodeValue scalesValue; ASSIGN_VALUE_OR_RETURN_ERR(scalesValue, getNodeValueByName(op.input(3))); NodeValue offsetsValue; ASSIGN_VALUE_OR_RETURN_ERR(offsetsValue, getNodeValueByName(op.input(4))); std::vector<unsigned_t> kernels; ASSIGN_VALUE_OR_RETURN_ERR(kernels, getShape<unsigned_t>(dict["kernel_shape"])); std::vector<unsigned_t> strides; ASSIGN_VALUE_OR_RETURN_ERR(strides, getShape<unsigned_t>(dict["strides"])); std::vector<unsigned_t> pads; ASSIGN_VALUE_OR_RETURN_ERR(pads, getShape<unsigned_t>(dict["pads"])); unsigned_t groups; ASSIGN_VALUE_OR_RETURN_ERR(groups, loadInt(dict.at("group"))); float outScale; ASSIGN_VALUE_OR_RETURN_ERR(outScale, loadFloat(dict.at("out_scale"))); int32_t outOffset; ASSIGN_VALUE_OR_RETURN_ERR(outOffset, loadInt(dict.at("out_offset"))); ShapeNHWC idim(input.dims()); auto outSz = calculateConvPoolOutputDims(idim.h, idim.w, kernels, strides, pads); std::array<dim_t, 4> outDims = { {idim.n, outSz.first, outSz.second, biasValue.dims()[0]}}; auto outTy = mod_.uniqueType(ElemKind::Int8QTy, outDims, outScale, outOffset); auto *node = G_->createChannelwiseQuantizedConv( opName, input, filterValue, biasValue, scalesValue, offsetsValue, outTy, kernels, strides, pads, groups); return addNodeAsOutput(op, node); } Error ONNXModelLoader::loadConvTranspose(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); // Load the attributes std::vector<unsigned_t> strides(2, 1); if (dict.count("strides")) { ASSIGN_VALUE_OR_RETURN_ERR(strides, getShape<unsigned_t>(dict["strides"])); } unsigned_t group = 1; if (dict.count("group")) { ASSIGN_VALUE_OR_RETURN_ERR(group, loadInt(dict.at("group"))); } unsigned_t dilation = 1; if (dict.count("dilations")) { std::vector<unsigned_t> dilations; ASSIGN_VALUE_OR_RETURN_ERR(dilations, getShape<unsigned_t>(dict["dilations"])); RETURN_ERR_IF_NOT(dilations.size() == 2, "ConvTranspose: dilations must be specified for 2 axes."); RETURN_ERR_IF_NOT( dilations[1] == dilations[0], "ConvTranspose: different dilation values along different axes " "are not supported currently. values must be same."); dilation = dilations[0]; } // Load the inputs NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); NodeValue filterValue; ASSIGN_VALUE_OR_RETURN_ERR(filterValue, getNodeValueByName(op.input(1))); // Transpose the filter to the right format. Glow expects to read the // weights in the format CRSK. ONNX stores the operators as KCRS. // C - output_depth, R - filter_height, S - filter_width, K - input_depth. TransposeNode *filterTransposeNode = G_->createTranspose(opName, filterValue, CNHW2NHWC /* flip matrix */); // The structure of the conv weigts is: NHWC. We take the C, which is the // number of filters. We use this value to calculate the size of the bias // if it is not specified. const NodeValue filterTransposedValue = filterTransposeNode->getResult(); dim_t depth = filterTransposedValue.dims()[0]; // Get the kernel shape from the input. llvm::SmallVector<unsigned_t, 2> kernels(2); kernels[0] = filterTransposedValue.dims()[1]; kernels[1] = filterTransposedValue.dims()[2]; // Extra check when the 'kernel_shape' attribute exists. // The 'kernel_shape' attribute is redundant not mandatory. if (dict.count("kernel_shape")) { std::vector<unsigned_t> kernelShapeAttribute; ASSIGN_VALUE_OR_RETURN_ERR(kernelShapeAttribute, getShape<unsigned_t>(dict["kernel_shape"])); RETURN_ERR_IF_NOT( (kernels[0] == kernelShapeAttribute[0] && kernels[1] == kernelShapeAttribute[1]), "The 'kernel_shape' attribute is not consistent with the actual " "convolution kernel shape."); (void)kernelShapeAttribute; // Avoids compilation warning in release mode. } // Construct the Bias field. Constant *bias = nullptr; // Check if we have a serialized bias vector. if (op.input_size() > 2) { auto &biasTensorName = op.input(2); // Load the serialized bias vector. bias = getConstantByNameOrNull(biasTensorName); } // If a serialized bias wasn't found then create a zero bias. if (!bias) { Tensor biasTensor(ElemKind::FloatTy, {depth}); biasTensor.zero(); bias = mod_.createConstant("conv.bias", std::move(biasTensor)); } // ONNX passes the input as NCHW, and we expect the input to be NHWC. auto *tr = G_->createTranspose(opName, in, NCHW2NHWC); // Calculate the size and allocate the output buffer. ShapeNHWC idim = ShapeNHWC(tr->getResult().dims()); llvm::SmallVector<unsigned_t, 2> idimHW(2); idimHW[0] = in.dims()[2]; idimHW[1] = in.dims()[3]; // Pads : {pad_top, pad_left, pad_bottom, pad_right} Pads pads; // Conv transpose output size (HxW) is either specified or calculated. std::pair<dim_t, dim_t> outSz; // Per spec, if output_shape is specified, pads are ignored. if (dict.count("output_shape")) { std::vector<unsigned_t> outShape; ASSIGN_VALUE_OR_RETURN_ERR(outShape, getShape<unsigned_t>(dict["output_shape"])); ASSIGN_VALUE_OR_RETURN_ERR( pads, getConvTransposePadsfromOutput(dict, kernels, strides, dilation, idimHW, outShape)); outSz = {outShape[0], outShape[1]}; std::pair<dim_t, dim_t> outSzTest = calculateConvTransposeOutputDims( idim.h, idim.w, kernels, strides, pads, dilation); RETURN_ERR_IF_NOT((outShape[0] == outSzTest.first), "Expected/calculated pads don't match"); RETURN_ERR_IF_NOT((outShape[1] == outSzTest.second), "Expected/calculated pads don't match"); } else { if (dict.count("output_padding")) { RETURN_ERR("output_padding not supported!", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_OPERATOR); } ASSIGN_VALUE_OR_RETURN_ERR(pads, getPads(dict, kernels, strides, idimHW)); outSz = calculateConvTransposeOutputDims(idim.h, idim.w, kernels, strides, pads, dilation); } std::array<dim_t, 4> outDims = {{idim.n, outSz.first, outSz.second, depth}}; auto outTy = mod_.uniqueType(ElemKind::FloatTy, outDims); auto *node = G_->createConvTranspose(opName, tr, filterTransposeNode, bias, outTy, kernels, strides, pads, group, dilation); // Transpose the output back. auto *N = G_->createTranspose(opName, node, NHWC2NCHW); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadPool(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict, llvm::StringRef typeName) { const std::string &opName = loadOperatorName(op); // Load the inputs: NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); std::vector<unsigned_t> strides(2, 1); size_t inDim = in.dims().size(); std::vector<unsigned_t> kernelsShape; ASSIGN_VALUE_OR_RETURN_ERR(kernelsShape, getShape<unsigned_t>(dict["kernel_shape"])); size_t kerDim = kernelsShape.size(); std::vector<unsigned_t> kernels = {1, kernelsShape[kerDim - 1]}; // For maxPool1D inDim = 3 if (inDim == 3) { in = G_->createExpandDims(opName, in, 2); if (kerDim != 1) { RETURN_ERR("Glow handles 1D pooling with kernel dimenstion size 1", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_SHAPE); } else { if (dict.count("strides")) { strides[1] = dict.at("strides")->ints(0); strides[0] = 1; } } } if (kerDim == 2) { // For maxPool2D kernels[0] = kernelsShape[0]; if (dict.count("strides")) { ASSIGN_VALUE_OR_RETURN_ERR(strides, getShape<unsigned_t>(dict["strides"])); } } if (in.dims().size() != 4 || kernels.size() != 2) { // Glow only handles 2D pooling currently. RETURN_ERR("Glow only handles 2D pooling currently.", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_SHAPE); } auto *tr = G_->createTranspose(opName, in, NCHW2NHWC); // If 'global_pooling' is set then the operation will pool over the size of // the input by doing: kernel = height/width. if (dict.count("global_pooling")) { auto Ty = in.getType(); kernels[0] = Ty->dims()[2]; kernels[1] = Ty->dims()[3]; } // NHWC llvm::SmallVector<unsigned_t, 2> idimHW(2); idimHW[0] = in.dims()[2]; // As per NCHW format idimHW[1] = in.dims()[3]; Pads pads; ASSIGN_VALUE_OR_RETURN_ERR(pads, getPads(dict, kernels, strides, idimHW)); Node *node = nullptr; if (op.output_size() > 1) { if (typeName != "MaxPool") { RETURN_ERR("Argmax output is only supported for MaxPool!", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_OPERATOR); } node = G_->createMaxPool(opName, tr, kernels, strides, pads); auto *res = G_->createTranspose(opName, NodeValue(node, 0), NHWC2NCHW); auto *argmax = G_->createTranspose(opName, NodeValue(node, 1), NHWC2NCHW); RETURN_IF_ERR(assignNodeOutputs(op, {res, argmax})); } else { size_t idx = 0; if (typeName == "MaxPool") { node = G_->createMaxPool(opName, tr, kernels, strides, pads); idx = MaxPoolNode::ResultIdx; } else { node = G_->createAvgPool(opName, tr, kernels, strides, pads); idx = AvgPoolNode::ResultIdx; } Node *N = nullptr; if (inDim == 3) { // For maxPool1D auto *R = G_->createSqueeze(opName, NodeValue(node, idx), 1); N = G_->createTranspose(opName, R, {0, 2, 1}); } else { N = G_->createTranspose(opName, NodeValue(node, idx), NHWC2NCHW); } RETURN_IF_ERR(addNodeAsOutput(op, N)); } return Error::success(); } Error ONNXModelLoader::loadTensorwiseQuantizedPool( const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict, llvm::StringRef typeName) { const std::string &opName = loadOperatorName(op); // Load the inputs: NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); std::vector<unsigned_t> kernels; ASSIGN_VALUE_OR_RETURN_ERR(kernels, getShape<unsigned_t>(dict["kernel_shape"])); std::vector<unsigned_t> strides; ASSIGN_VALUE_OR_RETURN_ERR(strides, getShape<unsigned_t>(dict["strides"])); if (in.dims().size() != 4 || kernels.size() != 2) { // Glow only handles 2D pooling currently. RETURN_ERR("Glow only handles 2D pooling currently.", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_SHAPE); } // NHWC llvm::SmallVector<unsigned_t, 2> idimHW(2); idimHW[0] = in.dims()[1]; idimHW[1] = in.dims()[2]; Pads pads; ASSIGN_VALUE_OR_RETURN_ERR(pads, getPads(dict, kernels, strides, idimHW)); if (op.output_size() > 1) { if (typeName != "MaxPool") { RETURN_ERR("Argmax output is only supported for MaxPool!", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_OPERATOR); } Node *maxpool = G_->createMaxPool(opName, in, kernels, strides, pads); auto res = maxpool->getNthResult(MaxPoolNode::ResultIdx); auto argmax = maxpool->getNthResult(MaxPoolNode::ArgmaxIdx); RETURN_IF_ERR(assignNodeOutputs(op, {res, argmax})); } else { Node *poolNode; if (typeName == "MaxPool") { poolNode = G_->createMaxPool(opName, in, kernels, strides, pads); } else { poolNode = G_->createAvgPool(opName, in, kernels, strides, pads); } RETURN_IF_ERR(addNodeAsOutput(op, poolNode)); } return Error::success(); } Error ONNXModelLoader::loadArgMax(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); size_t axis = 0; if (dict.count("axis")) { ASSIGN_VALUE_OR_RETURN_ERR(axis, loadInt(dict.at("axis"))); } bool keepDims = true; if (dict.count("keepDims")) { ASSIGN_VALUE_OR_RETURN_ERR(keepDims, loadInt(dict.at("keepDims"))); } Node *node = G_->createArgMax(opName, in, axis, keepDims); RETURN_IF_ERR(addNodeAsOutput(op, node)); return Error::success(); } Error ONNXModelLoader::loadGlobalAveragePool( const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); // Load the inputs: NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); std::vector<unsigned_t> strides(2, 1); if (dict.count("strides")) { ASSIGN_VALUE_OR_RETURN_ERR(strides, getShape<unsigned_t>(dict["strides"])); } llvm::SmallVector<unsigned_t, 2> kernels(2); kernels[0] = in.dims()[2]; kernels[1] = in.dims()[3]; Pads pads; ASSIGN_VALUE_OR_RETURN_ERR( pads, getPads(dict, kernels, strides, kernels /* input sizes*/)); auto *tr = G_->createTranspose(opName, in, NCHW2NHWC); Node *node = G_->createAvgPool(opName, tr, kernels, strides, pads); auto *N = G_->createTranspose(opName, node, NHWC2NCHW); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadSqueeze(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); std::vector<dim_t> axes; ASSIGN_VALUE_OR_RETURN_ERR(axes, getShape<dim_t>(dict["axes"])); Node *node = G_->createSqueeze(opName, in, axes); RETURN_IF_ERR(addNodeAsOutput(op, node)); return Error::success(); } Error ONNXModelLoader::loadUnsqueeze(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); std::vector<dim_t> axes; ASSIGN_VALUE_OR_RETURN_ERR(axes, getShape<dim_t>(dict["axes"])); Node *node = G_->createExpandDims(opName, in, axes); RETURN_IF_ERR(addNodeAsOutput(op, node)); return Error::success(); } Error ONNXModelLoader::loadBatchNormalization( const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); Constant *scale; ASSIGN_VALUE_OR_RETURN_ERR(scale, getConstantByName(op.input(1))); Constant *bias; ASSIGN_VALUE_OR_RETURN_ERR(bias, getConstantByName(op.input(2))); Constant *mean; ASSIGN_VALUE_OR_RETURN_ERR(mean, getConstantByName(op.input(3))); Constant *var; ASSIGN_VALUE_OR_RETURN_ERR(var, getConstantByName(op.input(4))); float epsilon = 1e-5f; // default auto epsilonIt = dict.find("epsilon"); if (epsilonIt != dict.end()) { ASSIGN_VALUE_OR_RETURN_ERR(epsilon, loadFloat(epsilonIt->second)); } auto *node = G_->createBatchNormalization(opName, in, bias, scale, mean, var, 1, epsilon); // BatchNormalization has 4 optional outputs that are not supported by glow. // Then: 1/ In case the optional outputs are present and used by other // operations of the model, then the import should fail. 2/ In case the // optional outputs are declared but not used, the import should succeed. By // registering only the mandatory output, we make sure the import will fail if // the non supported features are actually requested by the ONNX model. RETURN_IF_ERR(addNodeAsOutput(op, node, 1)); return Error::success(); } Error ONNXModelLoader::loadConcat(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); const unsigned numInputs = op.input_size(); llvm::SmallVector<NodeValue, 4> inputs; inputs.reserve(numInputs); for (unsigned i = 0; i < numInputs; i++) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(i))); inputs.push_back(in); } int axis; ASSIGN_VALUE_OR_RETURN_ERR(axis, loadInt(dict.at("axis"))); Node *node = G_->createConcat(opName, inputs, axis); RETURN_IF_ERR(addNodeAsOutput(op, node)); return Error::success(); } Error ONNXModelLoader::loadFCTransposed(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); if (in.getType()->dims().size() > 2) { size_t axis = 1; if (dict.count("axis")) { ASSIGN_VALUE_OR_RETURN_ERR(axis, loadInt(dict.at("axis"))); } in = G_->createFlatten("fc.in", in, axis); } unsigned_t axis_w = 1; if (dict.count("axis_w")) { ASSIGN_VALUE_OR_RETURN_ERR(axis_w, loadInt(dict.at("axis_w"))); } Constant *W; ASSIGN_VALUE_OR_RETURN_ERR(W, getConstantByName(op.input(1))); // w is stored already transposed. No need to additionally transpose it. if (W->dims().size() > 2) { Tensor tmp; auto wDims = flattenCdr(W->dims(), axis_w); tmp.reset(ElemKind::FloatTy, {wDims.first, wDims.second}); tmp.copyRawFrom(&W->getPayload()); W = mod_.createConstant(W->getName(), tmp); } Constant *B; ASSIGN_VALUE_OR_RETURN_ERR(B, getConstantByName(op.input(2))); auto *node = G_->createFullyConnected(opName, in, W, B); RETURN_IF_ERR(addNodeAsOutput(op, node)); return Error::success(); } Error ONNXModelLoader::loadGemm(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue A; ASSIGN_VALUE_OR_RETURN_ERR(A, getNodeValueByName(op.input(0))); NodeValue B; ASSIGN_VALUE_OR_RETURN_ERR(B, getNodeValueByName(op.input(1))); NodeValue C; ASSIGN_VALUE_OR_RETURN_ERR(C, getNodeValueByName(op.input(2))); bool broadcastC; ASSIGN_VALUE_OR_RETURN_ERR(broadcastC, getBroadcast(dict)); bool transA = false; if (dict.count("transA")) { ASSIGN_VALUE_OR_RETURN_ERR(transA, loadInt(dict.at("transA"))); } bool transB = false; if (dict.count("transB")) { ASSIGN_VALUE_OR_RETURN_ERR(transB, loadInt(dict.at("transB"))); } // TODO: support alpha * A * B + beta * C if (transA) A = G_->createTranspose(opName, A, {1, 0}); if (transB) B = G_->createTranspose(opName, B, {1, 0}); MatMulNode *mul = G_->createMatMul(opName, A, B); if (broadcastC) { int axis = mul->getResult().dims().size() - C.dims().size(); C = G_->createBroadcast(opName, C, mul->getResult().dims(), axis); } Node *node = G_->createAdd(opName, mul, C); RETURN_IF_ERR(addNodeAsOutput(op, node)); return Error::success(); } Error ONNXModelLoader::loadMatMul(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue LHS; ASSIGN_VALUE_OR_RETURN_ERR(LHS, getNodeValueByName(op.input(0))); NodeValue RHS; ASSIGN_VALUE_OR_RETURN_ERR(RHS, getNodeValueByName(op.input(1))); /// For dimension size equal to 3 use batchedMatMul if (LHS.dims().size() == 3) { Node *node = G_->createBatchMatMul(opName, LHS, RHS); RETURN_IF_ERR(addNodeAsOutput(op, node)); } else { Node *node = G_->createMatMul(opName, LHS, RHS); RETURN_IF_ERR(addNodeAsOutput(op, node)); } return Error::success(); } Error ONNXModelLoader::loadLeakyRelu(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { // Input Type. NodeValue input; ASSIGN_VALUE_OR_RETURN_ERR(input, getNodeValueByName(op.input(0))); ElemKind inputType = input.getType()->getElementType(); // Only supports float types. RETURN_ERR_IF_NOT((inputType == ElemKind::FloatTy) || (inputType == ElemKind::Float16Ty), "Unsupported Type for LeakyRelu"); // ONNX spec says default is 0.01, but doesn't explicitly say it's optional. // like for others. The default example just omits alpha. float alphaVal = 0.01f; if (dict.count("alpha")) { ASSIGN_VALUE_OR_RETURN_ERR(alphaVal, loadFloat(dict.at("alpha"))); } // Create the node. auto splatType = mod_.uniqueType(ElemKind::FloatTy, input.dims()); const std::string &opName = loadOperatorName(op); Node *splatN = G_->createSplat(opName + "Alpha", splatType, alphaVal); Node *N = G_->createPRELU(opName, input, splatN); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadPad(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); // Input NodeValue input; ASSIGN_VALUE_OR_RETURN_ERR(input, getNodeValueByName(op.input(0))); auto inputDims = input.dims(); auto numDims = inputDims.size(); // Padding properties. unsigned_t mode = PaddingMode::CONSTANT; // default is constant. if (dict.count("mode")) { std::string modeStr; ASSIGN_VALUE_OR_RETURN_ERR(modeStr, loadStr(dict.at("mode"))); if (modeStr == "constant") { mode = PaddingMode::CONSTANT; } else if (modeStr == "reflect") { mode = PaddingMode::REFLECT; } else if (modeStr == "edge") { mode = PaddingMode::EDGE; } else { RETURN_ERR("Pad: Invalid mode", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_ATTRIBUTE); } } float value = 0.f; // Default if (dict.count("value")) { ASSIGN_VALUE_OR_RETURN_ERR(value, loadFloat(dict.at("value"))); } // Pads are mandatory. std::vector<int> pads; ASSIGN_VALUE_OR_RETURN_ERR(pads, getShape<int>(dict["pads"])); RETURN_ERR_IF_NOT( (pads.size() == 2 * numDims), "Pad: the 'pads' array must contain 2 values per dimensions"); // Compute the output type. std::vector<dim_t> outDims(numDims); for (unsigned_t i = 0; i < numDims; i++) { auto new_dim = inputDims[i] + pads[i] + pads[i + numDims]; RETURN_ERR_IF_NOT(new_dim > 0, "The padding can't remove all elements of a dimension"); outDims[i] = new_dim; } auto outTy = mod_.uniqueType(ElemKind::FloatTy, outDims); // Create the IR node. Node *N = G_->createPad(opName, input, outTy, mode, pads, value); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadCast(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); // Input type NodeValue input; ASSIGN_VALUE_OR_RETURN_ERR(input, getNodeValueByName(op.input(0))); ElemKind inputKind = input.getType()->getElementType(); // Target type. ElemKind targetKind; RETURN_ERR_IF_NOT(dict.count("to"), "Cast: missing 'to' attribute"); int toONNXTypeValue; ASSIGN_VALUE_OR_RETURN_ERR(toONNXTypeValue, loadInt(dict.at("to"))); RETURN_ERR_IF_NOT( ONNX_NAMESPACE::TensorProto_DataType_IsValid(toONNXTypeValue), "Cast: invalid target type", ErrorValue::ErrorCode::MODEL_LOADER_INVALID_PROTOBUF); ASSIGN_VALUE_OR_RETURN_ERR( targetKind, convertTensorProtoDataType( ONNX_NAMESPACE::TensorProto_DataType(toONNXTypeValue))); // Only support non quantized types. RETURN_ERR_IF_NOT((!isQuantizedElemKind(inputKind)) && (!isQuantizedElemKind(targetKind)), "Unsupported Cast"); // Create the IR node. Node *N = G_->createConvertTo(opName, input, targetKind); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadSpaceToDepth(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { // Input Type NodeValue input; ASSIGN_VALUE_OR_RETURN_ERR(input, getNodeValueByName(op.input(0))); int blockSize = 0; if (dict.count("blocksize")) { ASSIGN_VALUE_OR_RETURN_ERR(blockSize, loadInt(dict.at("blocksize"))); } else { RETURN_ERR("SpaceToDepth: missing 'blocksize' attribute"); } // Create the node. std::string opName = loadOperatorName(op); auto *tr = G_->createTranspose(opName, input, NCHW2NHWC); Node *nd = G_->createSpaceToDepth(opName, tr, blockSize); auto *N = G_->createTranspose(opName, nd, NHWC2NCHW); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadConstantOfShape(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict, bool isSplat) { Tensor T(ElemKind::FloatTy, {1}); T.getHandle().raw(0) = 0.0; if (dict.count("value")) { RETURN_IF_ERR(loadTensor(dict.at("value")->t(), &T, useGlowCustomOps_)); if (!isSplat) { // Validate tensor only for ConstantOfShape operator. RETURN_ERR_IF_NOT(T.dims().size() == 1, "Value must be a 1D vector."); RETURN_ERR_IF_NOT( T.getType().getElementType() == ElemKind::FloatTy || T.getType().getElementType() == ElemKind::Int64ITy || T.getType().getElementType() == ElemKind::Int32ITy, T.getType().getElementName().str() + " type Value is not supported."); } } TypeRef ty; Node *SN = nullptr; if (op.input_size() > 0) { Constant *in; ASSIGN_VALUE_OR_RETURN_ERR(in, getConstantByName(op.input(0))); // Must be 1D tensor of int64_t. RETURN_ERR_IF_NOT(in->dims().size() == 1, "Input must be a 1D vector."); RETURN_ERR_IF_NOT(in->getType()->getElementType() == ElemKind::Int64ITy, "Input element type must be Int64ITy."); // Convert 1D tensor of int64_t into llvm::ArrayRef<dim_t>. auto TH = in->getPayload().getHandle<int64_t>(); auto begin = &TH.raw(0); auto end = begin + TH.actualSize(); ShapeVector outputDims(begin, end); ty = mod_.uniqueType(T.getType().getElementType(), outputDims); switch (T.getType().getElementType()) { case ElemKind::Int64ITy: { int64_t v = T.getHandle<int64_t>().raw(0); RETURN_ERR_IF_NOT( v == static_cast<int64_t>(static_cast<float>(v)), "This ConstantOfShape implementation may cause losses for value " + std::to_string(v) + " ."); SN = G_->createSplat(loadOperatorName(op), ty, v); break; } case ElemKind::Int32ITy: { int32_t v = T.getHandle<int32_t>().raw(0); RETURN_ERR_IF_NOT( v == static_cast<int32_t>(static_cast<float>(v)), "This ConstantOfShape implementation may cause losses for value " + std::to_string(v) + " ."); SN = G_->createSplat(loadOperatorName(op), ty, v); break; } default: SN = G_->createSplat(loadOperatorName(op), ty, T.getHandle().raw(0)); } } else { ty = mod_.uniqueType(T.getType().getElementType(), T.dims()); SN = G_->createSplat(loadOperatorName(op), ty, T.getHandle().raw(0)); } RETURN_IF_ERR(addNodeAsOutput(op, SN)); return Error::success(); } Error ONNXModelLoader::loadTile(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in, repeats; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); ASSIGN_VALUE_OR_RETURN_ERR(repeats, getNodeValueByName(op.input(1))); if (!llvm::isa<Constant>(repeats)) { RETURN_ERR("Only constant Repeats is supported!"); } if (repeats.dims().size() != 1) { RETURN_ERR("Repeats must be a single-dimensional tensor!"); } if (repeats.dims()[0] != in.dims().size()) { RETURN_ERR("Repeats should have one value for each dimension of input!"); } auto rh = llvm::cast<Constant>(repeats)->getPayload().getHandle<int64_t>(); Node *N = in; for (size_t i = 0; i < in.dims().size(); i++) { auto tiles = rh.raw(i); if (tiles != 1) { std::string name = opName + "." + std::to_string(i); N = G_->createTile(name, N, tiles, /*axis*/ i); } } RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Expected<bool> ONNXModelLoader::foldOperator(const ONNX_NAMESPACE::NodeProto &op) { const unsigned numInputs = op.input_size(); const std::string &typeName = op.op_type(); llvm::SmallVector<NodeValue, 4> inputs; inputs.reserve(numInputs); for (unsigned i = 0; i < numInputs; i++) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(i))); inputs.push_back(in); } if (!isConstantFoldable(inputs, typeName)) { return false; } // Create a temporary lightweight loader to construct function representing // current Op, and then constant fold the function using Interp backend. Function *tmpF = mod_.createFunction("eval_const_fold__"); ONNXModelLoader tmpLoader(*tmpF); tmpLoader.opsetVersion_ = opsetVersion_; bool foldStatus = !ERR_TO_BOOL( constantFoldInLoader<ONNXModelLoader, ONNX_NAMESPACE::NodeProto>( tmpF, tmpLoader, this, op), /* log */ false); mod_.eraseFunction(tmpF); return foldStatus; } Error ONNXModelLoader::loadWhere(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue cNV; ASSIGN_VALUE_OR_RETURN_ERR(cNV, getNodeValueByName(op.input(0))); NodeValue xNV; ASSIGN_VALUE_OR_RETURN_ERR(xNV, getNodeValueByName(op.input(1))); NodeValue yNV; ASSIGN_VALUE_OR_RETURN_ERR(yNV, getNodeValueByName(op.input(2))); std::string opName = loadOperatorName(op); // Passing -1 for multi directional broadcast, axis will be computed // automatically. Node *N = G_->createNodeWithBroadcast<SelectNode>(opName, -1, cNV, xNV, yNV); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } /// Utility function to get the RNN, GRU or LSTM direction from the proto /// description. If not provided, the default direction is 'forward'. static Expected<Function::RnnDirection> getRnnDirection(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { Function::RnnDirection direction = Function::RnnDirection::Forward; if (dict.count("direction")) { std::string directionStr; ASSIGN_VALUE_OR_RETURN_ERR(directionStr, loadStr(dict.at("direction"))); if (directionStr == "forward") { direction = Function::RnnDirection::Forward; } else if (directionStr == "reverse") { direction = Function::RnnDirection::Reverse; } else if (directionStr == "bidirectional") { direction = Function::RnnDirection::Bidirectional; } else { RETURN_ERR("ONNX " + op.op_type() + " 'direction' attribute is invalid!", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_ATTRIBUTE); } } return direction; } /// Relu activation function definition. static Function::RnnActivation RnnActivationRelu(Function &F) { return [&F](llvm::StringRef name, Node *input) { return F.createRELU(name, input); }; } /// Tanh activation function definition. static Function::RnnActivation RnnActivationTanh(Function &F) { return [&F](llvm::StringRef name, Node *input) { return F.createTanh(name, input); }; } /// Sigmoid activation function definition. static Function::RnnActivation RnnActivationSigmoid(Function &F) { return [&F](llvm::StringRef name, Node *input) { return F.createSigmoid(name, input); }; } /// Utility function to get the RNN, GRU or LSTM activation functions from the /// proto description. The activation function array is assumed to be already /// initialized with the default values upon entering this function so that the /// purpose of this function is to overwrite the specific default values. /// Currenlty only Sigmoid, Tahn and ReLU activations are supported. static Error getRnnActivations(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict, Function *F, std::vector<Function::RnnActivation> &activations) { // Activation alpha not supported (Optional)(Default:activation dependent). RETURN_ERR_IF_NOT(!dict.count("activation_alpha"), "ONNX " + op.op_type() + " 'activation_alpha' attribute not supported!"); // Activation beta not supported (Optional)(Default:activation dependent). RETURN_ERR_IF_NOT(!dict.count("activation_beta"), "ONNX " + op.op_type() + " 'activation_beta' attribute not supported!"); // Get activations. if (dict.count("activations") && dict.at("activations")->strings_size()) { size_t actNum = dict.at("activations")->strings_size(); size_t actNumExpected = activations.size(); RETURN_ERR_IF_NOT(actNum == actNumExpected, strFormat("ONNX %s 'activations' attribute has invalid " "number of functions! Expected number is %d!", op.op_type().c_str(), (int)actNumExpected)); for (size_t actIdx = 0; actIdx < actNum; actIdx++) { std::string actStr = dict.at("activations")->strings().Get(actIdx); if (actStr == "Relu") { activations[actIdx] = RnnActivationRelu(*F); } else if (actStr == "Tanh") { activations[actIdx] = RnnActivationTanh(*F); } else if (actStr == "Sigmoid") { activations[actIdx] = RnnActivationSigmoid(*F); } else { RETURN_ERR("ONNX " + op.op_type() + " activation '" + actStr + "' not supported!", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_ATTRIBUTE); } } } return Error::success(); } // Limitations: // - Activation clipping not supported. // - Variable sequence length not supported. Error ONNXModelLoader::loadRNN(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); // ------------------------- Attributes ------------------------------------- // Get direction (Optional)(Default:forward). Function::RnnDirection direction; ASSIGN_VALUE_OR_RETURN_ERR(direction, getRnnDirection(op, dict)); dim_t numDirections = (direction == Function::RnnDirection::Bidirectional) ? 2 : 1; // Get activations as lambdas (Optional)(Default:f=Tanh). std::vector<Function::RnnActivation> activations; if (direction == Function::RnnDirection::Bidirectional) { activations = {RnnActivationTanh(*G_), RnnActivationTanh(*G_)}; } else { activations = {RnnActivationTanh(*G_)}; } RETURN_IF_ERR(getRnnActivations(op, dict, G_, activations)); // Activation clipping not supported (Optional)(Default: 0 for no clipping). RETURN_ERR_IF_NOT(!dict.count("clip"), "ONNX RNN 'clip' attribute not supported!"); // Get hidden size (Required). dim_t hiddenSize; RETURN_ERR_IF_NOT(dict.count("hidden_size"), "ONNX RNN 'hidden_size' attribute is required!"); ASSIGN_VALUE_OR_RETURN_ERR(hiddenSize, loadInt(dict.at("hidden_size"))); // --------------------------- Inputs --------------------------------------- const int numInputs = op.input_size(); RETURN_ERR_IF_NOT((3 <= numInputs) && (numInputs <= 6), "ONNX RNN should have minimum 3 and maximum 6 inputs!"); // Input0: X (Required). NodeValue X; ASSIGN_VALUE_OR_RETURN_ERR(X, getNodeValueByName(op.input(0))); // Input1: W (Required). NodeValue W; ASSIGN_VALUE_OR_RETURN_ERR(W, getNodeValueByName(op.input(1))); // Input2: R (Required). NodeValue R; ASSIGN_VALUE_OR_RETURN_ERR(R, getNodeValueByName(op.input(2))); // Input3: B (Optional). NodeValue B = nullptr; if (numInputs > 3 && !op.input(3).empty()) { ASSIGN_VALUE_OR_RETURN_ERR(B, getNodeValueByName(op.input(3))); } // Input4: sequence_lens (Optional). if (numInputs > 4) { RETURN_ERR_IF_NOT(op.input(4).empty(), "ONNX RNN 'sequence_lens' attribute not supported!"); } // Input5: initial_h (Optional). NodeValue initial_h = nullptr; if (numInputs > 5 && !op.input(5).empty()) { ASSIGN_VALUE_OR_RETURN_ERR(initial_h, getNodeValueByName(op.input(5))); } // -------------------------- Outputs --------------------------------------- // We always create placeholders for the RNN state variable Y_h for the // following reasons: // - expose the RNN state in the graph interface for accessibility (set // desired state, reset state, watch the state being updated automatically). // - since the RNN cells are unrolled (no graph loop primitive available // at this point), the optimal way to use the RNN within a model would be // to have it defined with only 1 time step and have the loop in the top // of the application while the RNN state will be automatically updated // from one iteration (time step) to the next through the placeholders. // Derived parameters. RETURN_ERR_IF_NOT(X.dims().size() == 3, "ONNX RNN input 'X' should have 3 dimensions!"); dim_t batchSize = X.dims()[1]; // Create Y_h (hidden state) output placeholder. Placeholder *Y_h_ph; TypeRef Htype = mod_.uniqueTypeWithNewShape( X.getType(), {numDirections, batchSize, hiddenSize}); std::string Hname = opName + ".Y_h"; ASSIGN_VALUE_OR_RETURN_ERR(Y_h_ph, createAndRegisterPlaceholder(Hname, Htype)); inputVarsByName_.try_emplace(Hname, Y_h_ph); // If RNN input state is explicitly provided then used it. If not, then // use the RNN state placeholder. NodeValue Y_h_init = initial_h.getNode() ? initial_h : Y_h_ph; // Create ONNX RNN. NodeValue Y, Y_h; G_->createOnnxRNN(opName, X, W, R, B, Y_h_init, Y, Y_h, hiddenSize, direction, activations); // Save RNN state in the state placeholder. G_->createSave(opName + ".Y_h.save", Y_h, Y_h_ph); // Add node. const int numOutputs = op.output_size(); if (numOutputs == 1) { RETURN_IF_ERR(addNodeAsOutput(op, Y)); } else if (numOutputs == 2) { RETURN_IF_ERR(assignNodeOutputs(op, {Y, Y_h})); } else { RETURN_ERR("ONNX RNN should have minimum 1 and maximum 2 outputs!"); } return Error::success(); } // Limitations: // - Activation clipping not supported. // - Variable sequence length not supported. Error ONNXModelLoader::loadGRU(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); // ------------------------- Attributes ------------------------------------- // Get direction (Optional)(Default:forward). Function::RnnDirection direction; ASSIGN_VALUE_OR_RETURN_ERR(direction, getRnnDirection(op, dict)); dim_t numDirections = (direction == Function::RnnDirection::Bidirectional) ? 2 : 1; // Get activations as lambdas (Optional)(Default:f=Sigmoid, g=Tanh). std::vector<Function::RnnActivation> activations; if (direction == Function::RnnDirection::Bidirectional) { activations = {RnnActivationSigmoid(*G_), RnnActivationTanh(*G_), RnnActivationSigmoid(*G_), RnnActivationTanh(*G_)}; } else { activations = {RnnActivationSigmoid(*G_), RnnActivationTanh(*G_)}; } RETURN_IF_ERR(getRnnActivations(op, dict, G_, activations)); // Activation clipping not supported (Optional)(Default: 0 for no clipping). RETURN_ERR_IF_NOT(!dict.count("clip"), "ONNX GRU 'clip' attribute not supported!"); // Get hidden size (Required). dim_t hiddenSize; RETURN_ERR_IF_NOT(dict.count("hidden_size"), "ONNX GRU 'hidden_size' attribute is required!"); ASSIGN_VALUE_OR_RETURN_ERR(hiddenSize, loadInt(dict.at("hidden_size"))); // Get linear_before_reset (Optional)(Default:0). int linearBeforeReset = 0; if (dict.count("linear_before_reset") && dict.at("linear_before_reset")->has_i()) { linearBeforeReset = dict.at("linear_before_reset")->i(); } // --------------------------- Inputs --------------------------------------- const int numInputs = op.input_size(); RETURN_ERR_IF_NOT((3 <= numInputs) && (numInputs <= 6), "ONNX GRU should have minimum 3 and maximum 6 inputs!"); // Input0: X (Required). NodeValue X; ASSIGN_VALUE_OR_RETURN_ERR(X, getNodeValueByName(op.input(0))); // Input1: W (Required). NodeValue W; ASSIGN_VALUE_OR_RETURN_ERR(W, getNodeValueByName(op.input(1))); // Input2: R (Required). NodeValue R; ASSIGN_VALUE_OR_RETURN_ERR(R, getNodeValueByName(op.input(2))); // Input3: B (Optional). NodeValue B = nullptr; if (numInputs > 3 && !op.input(3).empty()) { ASSIGN_VALUE_OR_RETURN_ERR(B, getNodeValueByName(op.input(3))); } // Input4: sequence_lens (Optional). if (numInputs > 4) { RETURN_ERR_IF_NOT(op.input(4).empty(), "ONNX GRU 'sequence_lens' attribute not supported!"); } // Input5: initial_h (Optional). NodeValue initial_h = nullptr; if (numInputs > 5 && !op.input(5).empty()) { ASSIGN_VALUE_OR_RETURN_ERR(initial_h, getNodeValueByName(op.input(5))); } // -------------------------- Outputs --------------------------------------- // We always create placeholders for the GRU state variable Y_h for the // following reasons: // - expose the GRU state in the graph interface for accessibility (set // desired state, reset state, watch the state being updated automatically). // - since the GRU cells are unrolled (no graph loop primitive available // at this point), the optimal way to use the GRU within a model would be // to have it defined with only 1 time step and have the loop in the top // of the application while the GRU state will be automatically updated // from one iteration (time step) to the next through the placeholders. // Derived parameters. RETURN_ERR_IF_NOT(X.dims().size() == 3, "ONNX GRU input 'X' should have 3 dimensions!"); dim_t batchSize = X.dims()[1]; // Create Y_h (hidden state) output placeholder. Placeholder *Y_h_ph; TypeRef Htype = mod_.uniqueTypeWithNewShape( X.getType(), {numDirections, batchSize, hiddenSize}); std::string Hname = opName + ".Y_h"; ASSIGN_VALUE_OR_RETURN_ERR(Y_h_ph, createAndRegisterPlaceholder(Hname, Htype)); inputVarsByName_.try_emplace(Hname, Y_h_ph); // If GRU input state is explicitly provided then used it. If not, then // use the GRU state placeholder. NodeValue Y_h_init = initial_h.getNode() ? initial_h : Y_h_ph; // Create ONNX GRU. NodeValue Y, Y_h; G_->createOnnxGRU(opName, X, W, R, B, Y_h_init, Y, Y_h, hiddenSize, direction, activations, (bool)linearBeforeReset); // Save GRU state in the state placeholder. G_->createSave(opName + ".Y_h.save", Y_h, Y_h_ph); // Add node. const int numOutputs = op.output_size(); if (numOutputs == 1) { RETURN_IF_ERR(addNodeAsOutput(op, Y)); } else if (numOutputs == 2) { RETURN_IF_ERR(assignNodeOutputs(op, {Y, Y_h})); } else { RETURN_ERR("ONNX GRU should have minimum 1 and maximum 2 outputs!"); } return Error::success(); } // Limitations: // - Activation clipping not supported. // - Variable sequence length not supported. Error ONNXModelLoader::loadLSTM(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); // ------------------------- Attributes ------------------------------------- // Get direction (Optional)(Default:forward). Function::RnnDirection direction; ASSIGN_VALUE_OR_RETURN_ERR(direction, getRnnDirection(op, dict)); dim_t numDirections = (direction == Function::RnnDirection::Bidirectional) ? 2 : 1; // Get activations as lambdas (Optional)(Default:f=Sigmoid, g=Tanh, h=Tanh). std::vector<Function::RnnActivation> activations; if (direction == Function::RnnDirection::Bidirectional) { activations = {RnnActivationSigmoid(*G_), RnnActivationTanh(*G_), RnnActivationTanh(*G_), RnnActivationSigmoid(*G_), RnnActivationTanh(*G_), RnnActivationTanh(*G_)}; } else { activations = {RnnActivationSigmoid(*G_), RnnActivationTanh(*G_), RnnActivationTanh(*G_)}; } RETURN_IF_ERR(getRnnActivations(op, dict, G_, activations)); // Activation clipping not supported (Optional)(Default: 0 for no clipping). RETURN_ERR_IF_NOT(!dict.count("clip"), "ONNX LSTM 'clip' attribute not supported!"); // Get hidden size (Required). dim_t hiddenSize; RETURN_ERR_IF_NOT(dict.count("hidden_size"), "ONNX LSTM 'hidden_size' attribute is required!"); ASSIGN_VALUE_OR_RETURN_ERR(hiddenSize, loadInt(dict.at("hidden_size"))); // Get input forget (Optional)(Default:0). int inputForget = 0; if (dict.count("input_forget") && dict.at("input_forget")->has_i()) { inputForget = dict.at("input_forget")->i(); } // --------------------------- Inputs --------------------------------------- const int numInputs = op.input_size(); RETURN_ERR_IF_NOT((3 <= numInputs) && (numInputs <= 8), "ONNX LSTM should have minimum 3 and maximum 8 inputs!"); // Input0: X (Required). NodeValue X; ASSIGN_VALUE_OR_RETURN_ERR(X, getNodeValueByName(op.input(0))); // Input1: W (Required). NodeValue W; ASSIGN_VALUE_OR_RETURN_ERR(W, getNodeValueByName(op.input(1))); // Input2: R (Required). NodeValue R; ASSIGN_VALUE_OR_RETURN_ERR(R, getNodeValueByName(op.input(2))); // Input3: B (Optional). NodeValue B = nullptr; if (numInputs > 3 && !op.input(3).empty()) { ASSIGN_VALUE_OR_RETURN_ERR(B, getNodeValueByName(op.input(3))); } // Input4: sequence_lens (Optional). if (numInputs > 4) { RETURN_ERR_IF_NOT(op.input(4).empty(), "ONNX LSTM 'sequence_lens' attribute not supported!"); } // Input5: initial_h (Optional). NodeValue initial_h = nullptr; if (numInputs > 5 && !op.input(5).empty()) { ASSIGN_VALUE_OR_RETURN_ERR(initial_h, getNodeValueByName(op.input(5))); } // Input6: initial_c (Optional). NodeValue initial_c = nullptr; if (numInputs > 6 && !op.input(6).empty()) { ASSIGN_VALUE_OR_RETURN_ERR(initial_c, getNodeValueByName(op.input(6))); } // Input7: P (Optional). NodeValue P = nullptr; if (numInputs > 7 && !op.input(7).empty()) { ASSIGN_VALUE_OR_RETURN_ERR(P, getNodeValueByName(op.input(7))); } // -------------------------- Outputs --------------------------------------- // We always create placeholders for the LSTM state variables (Y_h and Y_c) // for the following reasons: // - expose the LSTM state in the graph interface for accessibility (set // desired state, reset state, watch the state being updated automatically). // - since the LSTM cells are unrolled (no graph loop primitive available // at this point), the optimal way to use the LSTM within a model would be // to have it defined with only 1 time step and have the loop in the top // of the application while the LSTM state will be automatically updated // from one iteration (time step) to the next through the placeholders. // Derived parameters. RETURN_ERR_IF_NOT(X.dims().size() == 3, "ONNX LSTM input 'X' should have 3 dimensions!"); dim_t batchSize = X.dims()[1]; // Create Y_h (hidden state) output placeholder. Placeholder *Y_h_ph; TypeRef Htype = mod_.uniqueTypeWithNewShape( X.getType(), {numDirections, batchSize, hiddenSize}); std::string Hname = opName + ".Y_h"; ASSIGN_VALUE_OR_RETURN_ERR(Y_h_ph, createAndRegisterPlaceholder(Hname, Htype)); inputVarsByName_.try_emplace(Hname, Y_h_ph); // Create Y_c (cell state) output placeholder. Placeholder *Y_c_ph; TypeRef Ctype = mod_.uniqueTypeWithNewShape( X.getType(), {numDirections, batchSize, hiddenSize}); std::string Cname = opName + ".Y_c"; ASSIGN_VALUE_OR_RETURN_ERR(Y_c_ph, createAndRegisterPlaceholder(Cname, Ctype)); inputVarsByName_.try_emplace(Cname, Y_c_ph); // If LSTM input states are explicitly provided then used them. If not, then // use the LSTM state placeholders. NodeValue Y_h_init = initial_h.getNode() ? initial_h : Y_h_ph; NodeValue Y_c_init = initial_c.getNode() ? initial_c : Y_c_ph; // Create ONNX LSTM. NodeValue Y, Y_h, Y_c; G_->createOnnxLSTM(opName, X, W, R, B, Y_h_init, Y_c_init, P, Y, Y_h, Y_c, hiddenSize, direction, activations, (bool)inputForget); // Save LSTM state in the state placeholders. G_->createSave(opName + ".Y_h.save", Y_h, Y_h_ph); G_->createSave(opName + ".Y_c.save", Y_c, Y_c_ph); // Add node. const int numOutputs = op.output_size(); if (numOutputs == 1) { RETURN_IF_ERR(addNodeAsOutput(op, Y)); } else if (numOutputs == 2) { RETURN_IF_ERR(assignNodeOutputs(op, {Y, Y_h})); } else if (numOutputs == 3) { RETURN_IF_ERR(assignNodeOutputs(op, {Y, Y_h, Y_c})); } else { RETURN_ERR("ONNX LSTM should have minimum 1 and maximum 3 outputs!"); } return Error::success(); } Error ONNXModelLoader::loadCmpEQ(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue LHS; ASSIGN_VALUE_OR_RETURN_ERR(LHS, getNodeValueByName(op.input(0))); NodeValue RHS; ASSIGN_VALUE_OR_RETURN_ERR(RHS, getNodeValueByName(op.input(1))); Node *N = G_->createCmpEQ(loadOperatorName(op), LHS, RHS); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadCmpLTE(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue LHS; ASSIGN_VALUE_OR_RETURN_ERR(LHS, getNodeValueByName(op.input(0))); NodeValue RHS; ASSIGN_VALUE_OR_RETURN_ERR(RHS, getNodeValueByName(op.input(1))); Node *N = G_->createNodeWithBroadcast<CmpLTENode>(loadOperatorName(op), /* axis */ -1, LHS, RHS); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadSelect(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue Cond; ASSIGN_VALUE_OR_RETURN_ERR(Cond, getNodeValueByName(op.input(0))); NodeValue LHS; ASSIGN_VALUE_OR_RETURN_ERR(LHS, getNodeValueByName(op.input(1))); NodeValue RHS; ASSIGN_VALUE_OR_RETURN_ERR(RHS, getNodeValueByName(op.input(2))); std::vector<dim_t> shape; ASSIGN_VALUE_OR_RETURN_ERR(shape, getShape<dim_t>(dict["shape"])); auto outTy = mod_.uniqueType(LHS.getElementType(), shape); Node *N = G_->createSelect(loadOperatorName(op), outTy, Cond, LHS, RHS); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadQuantize(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); float scale; ASSIGN_VALUE_OR_RETURN_ERR(scale, loadFloat(dict.at("scale"))); unsigned_t offset; ASSIGN_VALUE_OR_RETURN_ERR(offset, loadInt(dict.at("offset"))); std::string elemKindStr; ASSIGN_VALUE_OR_RETURN_ERR(elemKindStr, loadStr(dict.at("elem_kind"))); ElemKind elemKind = Type::getElementKindFromName(elemKindStr); auto outDims = in.getType()->dims(); auto outTy = mod_.uniqueType(elemKind, outDims, scale, offset); Node *N = G_->createQuantize(loadOperatorName(op), in, outTy); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadConvertTo(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); const auto *attr = dict.at("shape"); RETURN_ERR_IF_NOT(attr->has_t(), "ConvertTo should have t() field as \"shape\""); const auto &t = attr->t(); std::vector<dim_t> shape; for (const auto d : t.dims()) { shape.push_back(d); } auto type = ElemKind::FloatTy; RETURN_IF_ERR(onnxTensorDataTypeToElemKind(t.data_type(), &type)); auto outTy = mod_.uniqueType(type, shape); Node *N = G_->createConvertTo(loadOperatorName(op), in, outTy); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadDequantize(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); Node *N = G_->createDequantize(loadOperatorName(op), in); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadRegression(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); NodeValue expected; ASSIGN_VALUE_OR_RETURN_ERR(expected, getNodeValueByName(op.input(1))); Node *N = G_->createRegression(loadOperatorName(op), in, expected); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadBatchedAdd(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue batch; ASSIGN_VALUE_OR_RETURN_ERR(batch, getNodeValueByName(op.input(0))); NodeValue sample; ASSIGN_VALUE_OR_RETURN_ERR(sample, getNodeValueByName(op.input(1))); Node *N = G_->createBatchedAdd(loadOperatorName(op), batch, sample); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadCumSum(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { if (op.input_size() > 1) { Expected<NodeValue> axis = getNodeValueByName(op.input(1)); if (axis) { if (auto *AC = llvm::dyn_cast<Constant>(axis->getNode())) { RETURN_ERR_IF_NOT(AC->getPayload().dims().size() == 1, "CumSum axis must be 0-D"); RETURN_ERR_IF_NOT(AC->getPayload().dims()[0] == 1, "CumSum axis must be 0-D"); RETURN_ERR_IF_NOT(AC->getHandle<int32_t>().at(0) == 0, "CumSum only supports axis == 0"); } else { RETURN_ERR("Axis must be Constant"); } // Axis default is 0, which is fine. } } NodeValue input; ASSIGN_VALUE_OR_RETURN_ERR(input, getNodeValueByName(op.input(0))); bool exclusive = false; if (dict.count("exclusive")) { ASSIGN_VALUE_OR_RETURN_ERR(exclusive, loadInt(dict.at("exclusive"))); } bool reverse = false; if (dict.count("reverse")) { ASSIGN_VALUE_OR_RETURN_ERR(reverse, loadInt(dict.at("reverse"))); } Node *N = G_->createCumSum(loadOperatorName(op), input, exclusive, reverse); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadScatterAssign(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue data; ASSIGN_VALUE_OR_RETURN_ERR(data, getNodeValueByName(op.input(0))); NodeValue indices; ASSIGN_VALUE_OR_RETURN_ERR(indices, getNodeValueByName(op.input(1))); NodeValue slices; ASSIGN_VALUE_OR_RETURN_ERR(slices, getNodeValueByName(op.input(2))); Node *N = G_->createScatterData(loadOperatorName(op), data, indices, slices); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadIntLookupTable(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); std::vector<int8_t> values; ASSIGN_VALUE_OR_RETURN_ERR(values, getShape<int8_t>(dict["values"])); std::vector<dim_t> shape; ASSIGN_VALUE_OR_RETURN_ERR(shape, getShape<dim_t>(dict["shape"])); auto outTy = mod_.uniqueType(in.getElementType(), shape); Node *N = G_->createIntLookupTable(loadOperatorName(op), in, values, outTy); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadLengthsRangeFill(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue lengths; ASSIGN_VALUE_OR_RETURN_ERR(lengths, getNodeValueByName(op.input(0))); unsigned_t size; ASSIGN_VALUE_OR_RETURN_ERR(size, loadInt(dict.at("size"))); Node *N = G_->createLengthsRangeFill(loadOperatorName(op), lengths, size); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadRescaleQuantized(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); float scale; ASSIGN_VALUE_OR_RETURN_ERR(scale, loadFloat(dict.at("scale"))); unsigned_t offset; ASSIGN_VALUE_OR_RETURN_ERR(offset, loadInt(dict.at("offset"))); auto inTy = in.getType(); auto outTy = mod_.uniqueType(inTy->getElementType(), inTy->dims(), scale, offset); Node *N = G_->createRescaleQuantized(loadOperatorName(op), in, outTy); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadRowwiseQuantizedSparseLengthsWeightedSum( const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { Constant *data; ASSIGN_VALUE_OR_RETURN_ERR(data, getConstantByName(op.input(0))); Constant *scales; ASSIGN_VALUE_OR_RETURN_ERR(scales, getConstantByName(op.input(1))); Constant *offsets; ASSIGN_VALUE_OR_RETURN_ERR(offsets, getConstantByName(op.input(2))); NodeValue weights; ASSIGN_VALUE_OR_RETURN_ERR(weights, getNodeValueByName(op.input(3))); NodeValue indices; ASSIGN_VALUE_OR_RETURN_ERR(indices, getNodeValueByName(op.input(4))); NodeValue lengths; ASSIGN_VALUE_OR_RETURN_ERR(lengths, getNodeValueByName(op.input(5))); LengthsMode lengthsMode; ASSIGN_VALUE_OR_RETURN_ERR(lengthsMode, getLengthsMode(dict)); Node *N = G_->createRowwiseQuantizedSparseLengthsWeightedSum( loadOperatorName(op), data, scales, offsets, weights, indices, lengths, /* precision */ ElemKind::FloatTy, /* useFP16Accumulation */ false, lengthsMode); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadFusedRowwiseQuantizedSparseLengthsWeightedSum( const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue data; ASSIGN_VALUE_OR_RETURN_ERR(data, getNodeValueByName(op.input(0))); NodeValue weights; ASSIGN_VALUE_OR_RETURN_ERR(weights, getNodeValueByName(op.input(1))); NodeValue indices; ASSIGN_VALUE_OR_RETURN_ERR(indices, getNodeValueByName(op.input(2))); NodeValue lengths; ASSIGN_VALUE_OR_RETURN_ERR(lengths, getNodeValueByName(op.input(3))); LengthsMode lengthsMode; ASSIGN_VALUE_OR_RETURN_ERR(lengthsMode, getLengthsMode(dict)); Node *N = G_->createFusedRowwiseQuantizedSparseLengthsWeightedSum( loadOperatorName(op), data, weights, indices, lengths, /* useFP16Accumulation */ false, lengthsMode); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadFusedRowwiseQuantizedSparseLengthsSum( const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue data; ASSIGN_VALUE_OR_RETURN_ERR(data, getNodeValueByName(op.input(0))); NodeValue indices; ASSIGN_VALUE_OR_RETURN_ERR(indices, getNodeValueByName(op.input(1))); NodeValue lengths; ASSIGN_VALUE_OR_RETURN_ERR(lengths, getNodeValueByName(op.input(2))); LengthsMode lengthsMode; ASSIGN_VALUE_OR_RETURN_ERR(lengthsMode, getLengthsMode(dict)); Storage *dataS = llvm::dyn_cast<Storage>(data); Node *N = G_->createFusedRowwiseQuantizedSparseLengthsSum( loadOperatorName(op), dataS, indices, lengths, /* useFP16Accumulation */ false, lengthsMode); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadFullyConnected(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); Constant *W; ASSIGN_VALUE_OR_RETURN_ERR(W, getConstantByName(op.input(1))); Constant *B = getConstantByNameOrNull(op.input(2)); NodeValue b; if (!B) { ASSIGN_VALUE_OR_RETURN_ERR(b, getNodeValueByName(op.input(2))); } unsigned_t axis = 1; if (dict.count("axis")) { ASSIGN_VALUE_OR_RETURN_ERR(axis, loadInt(dict.at("axis"))); } Node *N = G_->createFullyConnected(loadOperatorName(op), in, W, B ? B : b, axis); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadRowwiseQuantizedFullyConnected( const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue input; ASSIGN_VALUE_OR_RETURN_ERR(input, getNodeValueByName(op.input(0))); NodeValue weights; ASSIGN_VALUE_OR_RETURN_ERR(weights, getNodeValueByName(op.input(1))); auto *weightsC = llvm::dyn_cast<Constant>(weights.getNode()); NodeValue scales; ASSIGN_VALUE_OR_RETURN_ERR(scales, getNodeValueByName(op.input(2))); auto *scalesC = llvm::dyn_cast<Constant>(scales.getNode()); NodeValue offsets; ASSIGN_VALUE_OR_RETURN_ERR(offsets, getNodeValueByName(op.input(3))); auto *offsetsC = llvm::dyn_cast<Constant>(offsets.getNode()); NodeValue bias; ASSIGN_VALUE_OR_RETURN_ERR(bias, getNodeValueByName(op.input(4))); auto *biasC = llvm::dyn_cast<Constant>(bias.getNode()); float outScale; ASSIGN_VALUE_OR_RETURN_ERR(outScale, loadFloat(dict.at("out_scale"))); int32_t outOffset; ASSIGN_VALUE_OR_RETURN_ERR(outOffset, loadInt(dict.at("out_offset"))); auto outTy = mod_.uniqueType(ElemKind::Int8QTy, {input.dims()[0], weights.dims()[0]}, outScale, outOffset); Node *N = G_->createRowwiseQuantizedFullyConnected( "rowwise_quantized_fc", input, weightsC, scalesC, offsetsC, biasC, outTy); return addNodeAsOutput(op, N); } Error ONNXModelLoader::loadNonMaxSuppression( const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict, bool isV4) { NodeValue boxesNV; ASSIGN_VALUE_OR_RETURN_ERR(boxesNV, getNodeValueByName(op.input(0))); NodeValue scoresNV; ASSIGN_VALUE_OR_RETURN_ERR(scoresNV, getNodeValueByName(op.input(1))); Constant *maxOutputBoxesPerClassC = getConstantByNameOrNull(op.input(2)); Constant *iouThresholdC = getConstantByNameOrNull(op.input(3)); Constant *scoreThresholdC = getConstantByNameOrNull(op.input(4)); // Defaults to 0 which is the same representation as TF. unsigned centerPointBox = 0; if (dict.count("center_point_box")) { ASSIGN_VALUE_OR_RETURN_ERR(centerPointBox, loadInt(dict.at("center_point_box"))); } int32_t padToMaxOutputSize = 0; if (isV4) { if (dict.count("pad_to_max_output_size")) { ASSIGN_VALUE_OR_RETURN_ERR(padToMaxOutputSize, loadInt(dict.at("pad_to_max_output_size"))); } // Does it make sense within GLOW context to have no padding? Since Size has // to be compile time constant. RETURN_ERR_IF_NOT(padToMaxOutputSize == 1, "NonMaxSuppressionV4 does not support non-padding mode."); } unsigned maxOutputBoxesPerClass = 0; float iouThreshold = 0.0f; float scoreThreshold = 0.0f; if (maxOutputBoxesPerClassC) { if (maxOutputBoxesPerClassC->getPayload().getElementType() == ElemKind::Int64ITy) { maxOutputBoxesPerClass = maxOutputBoxesPerClassC->getPayload().getHandle<int64_t>().raw(0); } else if (maxOutputBoxesPerClassC->getPayload().getElementType() == ElemKind::Int32ITy) { maxOutputBoxesPerClass = maxOutputBoxesPerClassC->getPayload().getHandle<int32_t>().raw(0); } else { RETURN_ERR("Unsupported type for maxoutputboxesperclass."); } } else { RETURN_ERR("NMS: maxOutputBoxesPerClass is not a contant tensor."); } if (iouThresholdC) { iouThreshold = iouThresholdC->getPayload().getHandle<float>().raw(0); } else { RETURN_ERR("NMS: iouThreshold is not a contant tensor."); } if (scoreThresholdC) { scoreThreshold = scoreThresholdC->getPayload().getHandle<float>().raw(0); } else { RETURN_ERR("NMS: scoreThrehold is not a contant tensor."); } // Create Node. std::string opName = loadOperatorName(op); Node *N = nullptr; if (isV4) { N = G_->createNonMaxSuppressionV4(opName, boxesNV, scoresNV, centerPointBox, maxOutputBoxesPerClass, iouThreshold, scoreThreshold); } else { N = G_->createNonMaxSuppressionONNX(opName, boxesNV, scoresNV, centerPointBox, maxOutputBoxesPerClass, iouThreshold, scoreThreshold); } RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadSplat(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { return loadConstantOfShape(op, dict, true /* isSplat */); } Error ONNXModelLoader::loadInsertTensor(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue big; ASSIGN_VALUE_OR_RETURN_ERR(big, getNodeValueByName(op.input(0))); NodeValue small; ASSIGN_VALUE_OR_RETURN_ERR(small, getNodeValueByName(op.input(1))); std::vector<dim_t> start; ASSIGN_VALUE_OR_RETURN_ERR(start, getShape<dim_t>(dict["start"])); unsigned_t count = 1; if (dict.count("count")) { ASSIGN_VALUE_OR_RETURN_ERR(count, loadInt(dict.at("count"))); } unsigned_t axis = 0; if (dict.count("axis")) { ASSIGN_VALUE_OR_RETURN_ERR(axis, loadInt(dict.at("axis"))); } Node *N = G_->createInsertTensor(loadOperatorName(op), big, small, start, count, axis); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Error ONNXModelLoader::loadIdentity(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); RETURN_IF_ERR(addNodeAsOutput(op, in)); return Error::success(); } Error ONNXModelLoader::loadAdaptiveAvgPool(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue input; ASSIGN_VALUE_OR_RETURN_ERR(input, getNodeValueByName(op.input(0))); std::vector<unsigned_t> outputShape; ASSIGN_VALUE_OR_RETURN_ERR(outputShape, getShape<unsigned_t>(dict["output_size"])); ShapeNHWC idim(input.dims()); auto outTy = mod_.uniqueTypeWithNewShape( input.getType(), {idim.n, outputShape[0], outputShape[1], idim.c}); Node *N = G_->createAdaptiveAvgPool(opName, input, outTy); return addNodeAsOutput(op, N); } Error ONNXModelLoader::loadFlip(const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { NodeValue input; ASSIGN_VALUE_OR_RETURN_ERR(input, getNodeValueByName(op.input(0))); unsigned_t axis = 0; if (dict.count("axis")) { ASSIGN_VALUE_OR_RETURN_ERR(axis, loadInt(dict.at("axis"))); } Node *N = G_->createFlip("flip", input, axis); RETURN_IF_ERR(addNodeAsOutput(op, N)); return Error::success(); } Expected<TypeRef> ONNXModelLoader::loadTypeFromAttributes(unsigned resNo, ArgumentDictionaryTy &dict) { Module &mod = *G_->getParent(); // Load ElemKind. std::string elemKindStr; ASSIGN_VALUE_OR_RETURN_ERR( elemKindStr, loadStr(dict[getTypeAttrID(resNo, elemKindSignifier)])); const ElemKind k = Type::getElementKindFromName(elemKindStr); // Load Shape. Note that we allow for empty shapes here because 0 dimensional // shapes are allowed (representing scalars). std::vector<dim_t> shape; ASSIGN_VALUE_OR_RETURN_ERR( shape, getShape<dim_t>(dict[getTypeAttrID(resNo, shapeSignifier)], /* allowEmptyShape */ true)); // Create and return uniqued non-quantized Type. if (!isQuantizedElemKind(k)) { return mod.uniqueType(k, shape); } // Must be quantized kind, so get scale/offset and create and return uniqued // quantized Type. float scale; ASSIGN_VALUE_OR_RETURN_ERR( scale, loadFloat(dict[getTypeAttrID(resNo, qScaleSignifier)])); int32_t offset; ASSIGN_VALUE_OR_RETURN_ERR( offset, loadInt(dict[getTypeAttrID(resNo, qOffsetSignifier)])); return mod.uniqueType(k, shape, scale, offset); } Expected<bool> ONNXModelLoader::tryLoadGlowCustomOp(llvm::StringRef typeName, const ONNX_NAMESPACE::NodeProto &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); // Try all automatically generated import cases. #include "glow/AutoGenNodesImport.h" // If we get here then no case handled the op, so return false. return false; } Error ONNXModelLoader::loadOperator(const ONNX_NAMESPACE::NodeProto &op) { ArgumentDictionaryTy dict = loadArgumentMap(op); const std::string &typeName = op.op_type(); if (useGlowCustomOps_) { bool tryLoadGlowCustomOpResult; ASSIGN_VALUE_OR_RETURN_ERR(tryLoadGlowCustomOpResult, tryLoadGlowCustomOp(typeName, op, dict)); if (tryLoadGlowCustomOpResult) { return Error::success(); } // Identity is the only official ONNX op used with useGlowCustomOps. if (typeName != "Identity") { return MAKE_ERR(strFormat("Unable to load op %s", typeName.data())); } } // Check if operator is supported in parent class, CommonOperatorLoader. bool tryLoadCommonOperatorResult; ASSIGN_VALUE_OR_RETURN_ERR(tryLoadCommonOperatorResult, tryLoadCommonOperator(typeName, op, dict)); if (tryLoadCommonOperatorResult) { return Error::success(); } if (typeName == "Constant") { return loadConstant(op, dict); } if (typeName == "Slice") { return loadSlice(op, dict); } if (typeName == "Conv") { // If the Conv operator has quantized inputs, use // loadTensorwiseQuantizedConvolution. NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); return in.getType()->isQuantizedType() ? loadTensorwiseQuantizedConvolution(op, dict) : loadConv(op, dict); } if (typeName == "ChannelwiseQuantizedConvolution") { return loadChannelwiseQuantizedConvolution(op, dict); } if (typeName == "MaxPool" || typeName == "AveragePool") { // If the pool operator has quantized inputs, use // loadTensorwiseQuantizedPool. NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); return in.getType()->isQuantizedType() ? loadTensorwiseQuantizedPool(op, dict, typeName) : loadPool(op, dict, typeName); } if (typeName == "GlobalAveragePool") { return loadGlobalAveragePool(op, dict); } if (typeName == "Squeeze") { return loadSqueeze(op, dict); } if (typeName == "Unsqueeze") { return loadUnsqueeze(op, dict); } if (typeName == "BatchNormalization") { return loadBatchNormalization(op, dict); } if (typeName == "Concat") { return loadConcat(op, dict); } if (typeName == "FCTransposed") { return loadFCTransposed(op, dict); } if (typeName == "Gemm") { return loadGemm(op, dict); } if (typeName == "Transpose") { return loadTranspose(op, dict, "perm"); } if (typeName == "MatMul") { return loadMatMul(op, dict); } if (typeName == "Pad") { return loadPad(op, dict); } if (typeName == "Cast") { return loadCast(op, dict); } if (typeName == "LeakyRelu") { return loadLeakyRelu(op, dict); } if (typeName == "SpaceToDepth") { return loadSpaceToDepth(op, dict); } if (typeName == "ConstantOfShape") { return loadConstantOfShape(op, dict, false /* isSplat */); } if (typeName == "Tile") { return loadTile(op, dict); } if (typeName == "Where") { return loadWhere(op, dict); } if (typeName == "RNN") { return loadRNN(op, dict); } if (typeName == "GRU") { return loadGRU(op, dict); } if (typeName == "LSTM") { return loadLSTM(op, dict); } // Glow specific operators if (typeName == "CmpEQ") { return loadCmpEQ(op, dict); } if (typeName == "CmpLTE") { return loadCmpLTE(op, dict); } if (typeName == "Select") { return loadSelect(op, dict); } if (typeName == "Quantize") { return loadQuantize(op, dict); } if (typeName == "ConvertTo") { return loadConvertTo(op, dict); } if (typeName == "Dequantize") { return loadDequantize(op, dict); } if (typeName == "Regression") { return loadRegression(op, dict); } if (typeName == "BatchedAdd") { return loadBatchedAdd(op, dict); } if (typeName == "CumSum") { return loadCumSum(op, dict); } if (typeName == "ScatterAssign") { return loadScatterAssign(op, dict); } if (typeName == "IntLookupTable") { return loadIntLookupTable(op, dict); } if (typeName == "LengthsRangeFill") { return loadLengthsRangeFill(op, dict); } if (typeName == "RescaleQuantized") { return loadRescaleQuantized(op, dict); } if (typeName == "RowwiseQuantizedSparseLengthsWeightedSum") { return loadRowwiseQuantizedSparseLengthsWeightedSum(op, dict); } if (typeName == "FusedRowwiseQuantizedSparseLengthsWeightedSum") { return loadFusedRowwiseQuantizedSparseLengthsWeightedSum(op, dict); } if (typeName == "FusedRowwiseQuantizedSparseLengthsSum") { return loadFusedRowwiseQuantizedSparseLengthsSum(op, dict); } if (typeName == "FullyConnected") { return loadFullyConnected(op, dict); } if (typeName == "RowwiseQuantizedFullyConnected") { return loadRowwiseQuantizedFullyConnected(op, dict); } if (typeName == "Splat") { return loadSplat(op, dict); } if (typeName == "InsertTensor") { return loadInsertTensor(op, dict); } if (typeName == "ArgMax") { return loadArgMax(op, dict); } if (typeName == "NonMaxSuppressionV4") { return loadNonMaxSuppression(op, dict, true); } if (typeName == "NonMaxSuppression") { return loadNonMaxSuppression(op, dict, false); } if (typeName == "ConvTranspose") { return loadConvTranspose(op, dict); } if (typeName == "AdaptiveAvgPool") { return loadAdaptiveAvgPool(op, dict); } if (typeName == "Flip") { return loadFlip(op, dict); } if (typeName == "Identity") { return loadIdentity(op, dict); } RETURN_ERR("Failed to load operator " + typeName + " .", ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_OPERATOR); } Error ONNXModelLoader::loadInitializers(ONNX_NAMESPACE::GraphProto &net) { // Load the network initializers: for (const auto &in : net.initializer()) { Tensor T; RETURN_IF_ERR(loadTensor(in, &T, useGlowCustomOps_)); std::string layout = ANY_LAYOUT; if (useGlowCustomOps_) { ASSIGN_VALUE_OR_RETURN_ERR( layout, getAttrFromDocString(layoutSignifier, in.doc_string())); } RETURN_IF_ERR(createAndRegisterConstant(in.name(), std::move(T), layout)); } return Error::success(); } Error ONNXModelLoader::setOutputNodes(ONNX_NAMESPACE::GraphProto &net) { if (net.output_size() == 0) { RETURN_ERR("Net output size must be greater than 0"); } for (int i = 0; i < net.output_size(); i++) { const auto &outputName = net.output(i).name(); NodeValue r; ASSIGN_VALUE_OR_RETURN_ERR(r, getNodeValueByName(outputName)); const std::string &docString = net.output(i).doc_string(); Expected<std::string> saveName = getAttrFromDocString(saveNameSignifier, docString); const bool hasSpecifiedSaveName = !ERR_TO_BOOL(saveName.takeError(), /* log */ false); const std::string &saveNodeName = hasSpecifiedSaveName ? saveName.get() : outputName; std::string isTrainable = "0"; std::string layout = ANY_LAYOUT; if (useGlowCustomOps_) { ASSIGN_VALUE_OR_RETURN_ERR( isTrainable, getAttrFromDocString(trainableSignifier, docString)); ASSIGN_VALUE_OR_RETURN_ERR( layout, getAttrFromDocString(layoutSignifier, docString)); } Placeholder *placeholder = mod_.createPlaceholder( r.getType(), outputName, isTrainable != "0", layout); SaveNode *SN = G_->createSave(saveNodeName, r, placeholder, hasSpecifiedSaveName); outputVarsByName_[outputName] = SN->getPlaceholder(); } return Error::success(); } Error ONNXModelLoader::loadNetwork(ONNX_NAMESPACE::GraphProto &net) { /// Load the network operators: for (int i = 0; i < net.node_size(); i++) { auto &op = net.node(i); if (constFoldInLoader_) { auto tryFold = foldOperator(op); if (!tryFold) { // Error during constant folding; load the op normally below. const std::string errStr = ERR_TO_STRING(tryFold.takeError()); LOG(INFO) << "Error while trying to ConstantFold " << loadOperatorName(op) << ": " << errStr; } else if (tryFold.get()) { // Folded successfully, so skip loading the op below. continue; } } RETURN_IF_ERR(loadOperator(op)); } return Error::success(); } ONNXModelLoader::ONNXModelLoader(Function &F, Error *errPtr) : CommonOperatorLoader({}, {}, &F, errPtr) { deleteUnusedConstants(); } Error ONNXModelLoader::collectStaticInputs(ONNX_NAMESPACE::GraphProto &net) { for (int i = 0; i < net.input_size(); i++) { const ONNX_NAMESPACE::ValueInfoProto &valueInfo = net.input(i); const std::string &inputName = valueInfo.name(); if (useGlowCustomOps_) { std::string isStatic; ASSIGN_VALUE_OR_RETURN_ERR( isStatic, getAttrFromDocString(staticSignifier, valueInfo.doc_string())); if (isStatic == "1") { staticInputs_.emplace(inputName); } } else if (valueInfo.has_doc_string() && valueInfo.doc_string() == staticSignifier) { staticInputs_.emplace(inputName); } } return Error::success(); } Error ONNXModelLoader::checkInputs(ONNX_NAMESPACE::GraphProto &net, llvm::ArrayRef<const char *> tensorNames, llvm::ArrayRef<TypeRef> types) { for (size_t i = 0; i < tensorNames.size(); i++) { // Look if a corresponding input exists. for (int j = 0; j < net.input_size(); j++) { const ONNX_NAMESPACE::ValueInfoProto &valueInfo = net.input(j); const std::string &inputName = valueInfo.name(); if (inputName != tensorNames[i]) { continue; } // Get tensor shape. llvm::ArrayRef<dim_t> dims = types[i]->dims(); // Get proto shape. std::vector<dim_t> dimsProto; ASSIGN_VALUE_OR_RETURN_ERR( dimsProto, getProtoShape(valueInfo.type().tensor_type().shape())); // Check if the number of dimensions is consistent. RETURN_ERR_IF_NOT(dims.size() == dimsProto.size(), "Mismatch between input image and ONNX input shape"); // Allow batch dimensions to be different. for (size_t k = 1; k < dims.size(); k++) { RETURN_ERR_IF_NOT(dims[k] == dimsProto[k], "Mismatch between input image and ONNX input shape"); } if (valueInfo.has_doc_string() && valueInfo.doc_string() == staticSignifier) { staticInputs_.emplace(inputName); } } } return Error::success(); } ONNXModelLoader::ONNXModelLoader(const std::string &modelDescFilename, llvm::ArrayRef<const char *> tensorNames, llvm::ArrayRef<TypeRef> types, Function &F, Error *errPtr, bool zipMode, bool disableConstFoldInLoader, const Backend *B) : CommonOperatorLoader(tensorNames, types, &F, errPtr) { // if errPtr already contains an error then don't continue with constructor if (errPtr && *errPtr) { return; } if (disableConstFoldInLoader) { constFoldInLoader_ = false; } // Lambda to setup the ONNXModelLoader and return any Errors that were // raised. auto setup = [&]() -> Error { // The ONNX model that we are deserializing. ONNX_NAMESPACE::ModelProto modelDef; if (zipMode) { ZipReader zip(modelDescFilename); std::string buffer; buffer = zip.getRecord("model"); modelDef.ParseFromString(buffer); size_t numWeights = 0; auto numWeightsStr = zip.getRecord("weights"); numWeights = atoi(numWeightsStr.c_str()); for (size_t i = 0; i < numWeights; ++i) { std::stringstream ss; ss << "weight_" << i; buffer = zip.getRecord(ss.str()); auto *t = modelDef.mutable_graph()->add_initializer(); t->ParseFromString(buffer); } } else { ASSIGN_VALUE_OR_RETURN_ERR(modelDef, loadProto(modelDescFilename)); } useGlowCustomOps_ = modelDef.producer_name() == "GlowONNXModelWriter"; RETURN_IF_ERR(setVersion(modelDef)); ONNX_NAMESPACE::GraphProto graphDef = modelDef.graph(); RETURN_IF_ERR(checkInputs(graphDef, tensorNames, types)); RETURN_IF_ERR(collectStaticInputs(graphDef)); RETURN_IF_ERR(loadInitializers(graphDef)); if (tensorNames.empty() && types.empty()) { // Detect inputs without initializers and create placeholders. RETURN_IF_ERR(loadInputs(graphDef, /* loadInputsAsPlaceholders */ true)); } RETURN_IF_ERR(loadNetwork(graphDef)); RETURN_IF_ERR(setOutputNodes(graphDef)); RETURN_ERR_IF_NOT(F.verify(B), "Function verification failed."); deleteUnusedConstants(); return Error::success(); }; if (errPtr) { *errPtr = setup(); } else { EXIT_ON_ERR(setup()); } } ONNXModelLoader::ONNXModelLoader( const void *model, uint32_t modelSize, uint32_t weightsCount, const onnxTensorDescriptorV1 *weightDescriptors, Function &F, bool loadInputsAsPlaceholders, Error *errPtr, bool constFoldInLoader) : CommonOperatorLoader({}, {}, &F, errPtr) { // if errPtr already contains an error then don't continue with constructor if (errPtr && *errPtr) { return; } // Always override the default for folding in this constructor. constFoldInLoader_ = constFoldInLoader; // Lambda to setup the ONNXModelLoader and return any Errors that were // raised. auto setup = [&]() -> Error { ONNX_NAMESPACE::ModelProto modelDef; ASSIGN_VALUE_OR_RETURN_ERR(modelDef, loadProto(model, modelSize)); useGlowCustomOps_ = modelDef.producer_name() == "GlowONNXModelWriter"; RETURN_IF_ERR(setVersion(modelDef)); RETURN_IF_ERR(loadWeights(weightsCount, weightDescriptors)); ONNX_NAMESPACE::GraphProto graphDef = modelDef.graph(); RETURN_IF_ERR(loadInputs(graphDef, loadInputsAsPlaceholders)); RETURN_IF_ERR(loadInitializers(graphDef)); RETURN_IF_ERR(loadNetwork(graphDef)); RETURN_IF_ERR(setOutputNodes(graphDef)); deleteUnusedConstants(); return Error::success(); }; if (errPtr) { *errPtr = setup(); } else { EXIT_ON_ERR(setup()); } }
/* * * Copyright (c) 2020-2022 Project CHIP Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file * Platform agnostic implementation of CHIP crypto algorithms */ #include "CHIPCryptoPAL.h" #include <lib/asn1/ASN1.h> #include <lib/asn1/ASN1Macros.h> #include <lib/core/CHIPEncoding.h> #include <lib/support/BufferReader.h> #include <lib/support/BufferWriter.h> #include <lib/support/BytesToHex.h> #include <lib/support/CodeUtils.h> #include <lib/support/Span.h> #include <string.h> using chip::ByteSpan; using chip::MutableByteSpan; using chip::Encoding::BufferWriter; using chip::Encoding::LittleEndian::Reader; using namespace chip::ASN1; namespace { constexpr uint8_t kIntegerTag = 0x02u; constexpr uint8_t kSeqTag = 0x30u; constexpr size_t kMinSequenceOverhead = 1 /* tag */ + 1 /* length */ + 1 /* actual data or second length byte*/; /** * @brief Utility to read a length field after a tag in a DER-encoded stream. * @param[in] reader Reader instance from which the input will be read * @param[out] length Length of the following element read from the stream * @return CHIP_ERROR_INVALID_ARGUMENT or CHIP_ERROR_BUFFER_TOO_SMALL on error, CHIP_NO_ERROR otherwise */ CHIP_ERROR ReadDerLength(Reader & reader, uint8_t & length) { length = 0; uint8_t cur_byte = 0; ReturnErrorOnFailure(reader.Read8(&cur_byte).StatusCode()); if ((cur_byte & (1u << 7)) == 0) { // 7 bit length, the rest of the byte is the length. length = cur_byte & 0x7Fu; return CHIP_NO_ERROR; } // Did not early return: > 7 bit length, the number of bytes of the length is provided next. uint8_t length_bytes = cur_byte & 0x7Fu; if ((length_bytes > 1) || !reader.HasAtLeast(length_bytes)) { // We only support lengths of 0..255 over 2 bytes return CHIP_ERROR_INVALID_ARGUMENT; } // Next byte has length 0..255. return reader.Read8(&length).StatusCode(); } /** * @brief Utility to convert DER-encoded INTEGER into a raw integer buffer in big-endian order * with leading zeroes if the output buffer is larger than needed. * @param[in] reader Reader instance from which the input will be read * @param[out] raw_integer_out Buffer to receive the DER-encoded integer * @return CHIP_ERROR_INVALID_ARGUMENT or CHIP_ERROR_BUFFER_TOO_SMALL on error, CHIP_NO_ERROR otherwise */ CHIP_ERROR ReadDerUnsignedIntegerIntoRaw(Reader & reader, MutableByteSpan raw_integer_out) { uint8_t cur_byte = 0; ReturnErrorOnFailure(reader.Read8(&cur_byte).StatusCode()); // We expect first tag to be INTEGER VerifyOrReturnError(cur_byte == kIntegerTag, CHIP_ERROR_INVALID_ARGUMENT); // Read the length uint8_t integer_len = 0; ReturnErrorOnFailure(ReadDerLength(reader, integer_len)); // Clear the destination buffer, so we can blit the unsigned value into place memset(raw_integer_out.data(), 0, raw_integer_out.size()); // Check for pseudo-zero to mark unsigned value // This means we have too large an integer (should be at most 1 byte too large), it's invalid ReturnErrorCodeIf(integer_len > (raw_integer_out.size() + 1), CHIP_ERROR_INVALID_ARGUMENT); if (integer_len == (raw_integer_out.size() + 1u)) { // Means we had a 0x00 byte stuffed due to MSB being high in original integer ReturnErrorOnFailure(reader.Read8(&cur_byte).StatusCode()); // The extra byte must be a leading zero VerifyOrReturnError(cur_byte == 0, CHIP_ERROR_INVALID_ARGUMENT); --integer_len; } // We now have the rest of the tag that is a "minimal length" unsigned integer. // Blit it at the correct offset, since the order we use is MSB first for // both ASN.1 and EC curve raw points. size_t offset = raw_integer_out.size() - integer_len; return reader.ReadBytes(raw_integer_out.data() + offset, integer_len).StatusCode(); } CHIP_ERROR ConvertIntegerRawToDerInternal(const ByteSpan & raw_integer, MutableByteSpan & out_der_integer, bool include_tag_and_length) { if (!IsSpanUsable(raw_integer) || !IsSpanUsable(out_der_integer)) { return CHIP_ERROR_INVALID_ARGUMENT; } Reader reader(raw_integer); BufferWriter writer(out_der_integer); bool needs_leading_zero_byte = false; uint8_t cur_byte = 0; while ((reader.Remaining() > 0) && (reader.Read8(&cur_byte).StatusCode() == CHIP_NO_ERROR) && (cur_byte == 0)) { // Omit all leading zeros } if ((cur_byte & 0x80u) != 0) { // If overall MSB (from leftmost byte) is set, we will need to push out a zero to avoid it being // considered a negative number. needs_leading_zero_byte = true; } // The + 1 is to account for the last consumed byte of the loop to skip leading zeros size_t length = reader.Remaining() + 1 + (needs_leading_zero_byte ? 1 : 0); if (length > 127) { // We do not support length over more than 1 bytes. return CHIP_ERROR_INVALID_ARGUMENT; } if (include_tag_and_length) { // Put INTEGER tag writer.Put(kIntegerTag); // Put length over 1 byte (i.e. MSB clear) writer.Put(static_cast<uint8_t>(length)); } // If leading zero or no more bytes remaining, must ensure we start with at least a zero byte if (needs_leading_zero_byte) { writer.Put(static_cast<uint8_t>(0u)); } // Put first consumed byte from last read iteration of leading zero suppression writer.Put(cur_byte); // Fill the rest from the input in order while (reader.Read8(&cur_byte).StatusCode() == CHIP_NO_ERROR) { // Emit all other bytes as-is writer.Put(cur_byte); } size_t actually_written = 0; if (!writer.Fit(actually_written)) { return CHIP_ERROR_BUFFER_TOO_SMALL; } out_der_integer = out_der_integer.SubSpan(0, actually_written); return CHIP_NO_ERROR; } } // namespace namespace chip { namespace Crypto { #ifdef ENABLE_HSM_HKDF using HKDF_sha_crypto = HKDF_shaHSM; #else using HKDF_sha_crypto = HKDF_sha; #endif CHIP_ERROR Spake2p::InternalHash(const uint8_t * in, size_t in_len) { const uint64_t u64_len = in_len; uint8_t lb[8]; lb[0] = static_cast<uint8_t>((u64_len >> 0) & 0xff); lb[1] = static_cast<uint8_t>((u64_len >> 8) & 0xff); lb[2] = static_cast<uint8_t>((u64_len >> 16) & 0xff); lb[3] = static_cast<uint8_t>((u64_len >> 24) & 0xff); lb[4] = static_cast<uint8_t>((u64_len >> 32) & 0xff); lb[5] = static_cast<uint8_t>((u64_len >> 40) & 0xff); lb[6] = static_cast<uint8_t>((u64_len >> 48) & 0xff); lb[7] = static_cast<uint8_t>((u64_len >> 56) & 0xff); ReturnErrorOnFailure(Hash(lb, sizeof(lb))); if (in != nullptr) { ReturnErrorOnFailure(Hash(in, in_len)); } return CHIP_NO_ERROR; } Spake2p::Spake2p(size_t _fe_size, size_t _point_size, size_t _hash_size) { fe_size = _fe_size; point_size = _point_size; hash_size = _hash_size; Kca = &Kcab[0]; Kcb = &Kcab[hash_size / 2]; Ka = &Kae[0]; Ke = &Kae[hash_size / 2]; M = nullptr; N = nullptr; G = nullptr; X = nullptr; Y = nullptr; L = nullptr; Z = nullptr; V = nullptr; w0 = nullptr; w1 = nullptr; xy = nullptr; order = nullptr; tempbn = nullptr; } CHIP_ERROR Spake2p::Init(const uint8_t * context, size_t context_len) { if (state != CHIP_SPAKE2P_STATE::PREINIT) { Clear(); } ReturnErrorOnFailure(InitImpl()); ReturnErrorOnFailure(PointLoad(spake2p_M_p256, sizeof(spake2p_M_p256), M)); ReturnErrorOnFailure(PointLoad(spake2p_N_p256, sizeof(spake2p_N_p256), N)); ReturnErrorOnFailure(InternalHash(context, context_len)); state = CHIP_SPAKE2P_STATE::INIT; return CHIP_NO_ERROR; } CHIP_ERROR Spake2p::WriteMN() { ReturnErrorOnFailure(InternalHash(spake2p_M_p256, sizeof(spake2p_M_p256))); ReturnErrorOnFailure(InternalHash(spake2p_N_p256, sizeof(spake2p_N_p256))); return CHIP_NO_ERROR; } CHIP_ERROR Spake2p::BeginVerifier(const uint8_t * my_identity, size_t my_identity_len, const uint8_t * peer_identity, size_t peer_identity_len, const uint8_t * w0in, size_t w0in_len, const uint8_t * Lin, size_t Lin_len) { VerifyOrReturnError(state == CHIP_SPAKE2P_STATE::INIT, CHIP_ERROR_INTERNAL); ReturnErrorOnFailure(InternalHash(peer_identity, peer_identity_len)); ReturnErrorOnFailure(InternalHash(my_identity, my_identity_len)); ReturnErrorOnFailure(WriteMN()); ReturnErrorOnFailure(FELoad(w0in, w0in_len, w0)); ReturnErrorOnFailure(PointLoad(Lin, Lin_len, L)); state = CHIP_SPAKE2P_STATE::STARTED; role = CHIP_SPAKE2P_ROLE::VERIFIER; return CHIP_NO_ERROR; } CHIP_ERROR Spake2p::BeginProver(const uint8_t * my_identity, size_t my_identity_len, const uint8_t * peer_identity, size_t peer_identity_len, const uint8_t * w0in, size_t w0in_len, const uint8_t * w1in, size_t w1in_len) { VerifyOrReturnError(state == CHIP_SPAKE2P_STATE::INIT, CHIP_ERROR_INTERNAL); ReturnErrorOnFailure(InternalHash(my_identity, my_identity_len)); ReturnErrorOnFailure(InternalHash(peer_identity, peer_identity_len)); ReturnErrorOnFailure(WriteMN()); ReturnErrorOnFailure(FELoad(w0in, w0in_len, w0)); ReturnErrorOnFailure(FELoad(w1in, w1in_len, w1)); state = CHIP_SPAKE2P_STATE::STARTED; role = CHIP_SPAKE2P_ROLE::PROVER; return CHIP_NO_ERROR; } CHIP_ERROR Spake2p::ComputeRoundOne(const uint8_t * pab, size_t pab_len, uint8_t * out, size_t * out_len) { CHIP_ERROR error = CHIP_ERROR_INTERNAL; void * MN = nullptr; // Choose M if a prover, N if a verifier void * XY = nullptr; // Choose X if a prover, Y if a verifier VerifyOrExit(state == CHIP_SPAKE2P_STATE::STARTED, error = CHIP_ERROR_INTERNAL); VerifyOrExit(*out_len >= point_size, error = CHIP_ERROR_INTERNAL); ReturnErrorOnFailure(FEGenerate(xy)); if (role == CHIP_SPAKE2P_ROLE::PROVER) { MN = M; XY = X; } else if (role == CHIP_SPAKE2P_ROLE::VERIFIER) { MN = N; XY = Y; } VerifyOrExit(MN != nullptr, error = CHIP_ERROR_INTERNAL); VerifyOrExit(XY != nullptr, error = CHIP_ERROR_INTERNAL); SuccessOrExit(error = PointAddMul(XY, G, xy, MN, w0)); SuccessOrExit(error = PointWrite(XY, out, *out_len)); state = CHIP_SPAKE2P_STATE::R1; error = CHIP_NO_ERROR; exit: *out_len = point_size; return error; } CHIP_ERROR Spake2p::ComputeRoundTwo(const uint8_t * in, size_t in_len, uint8_t * out, size_t * out_len) { CHIP_ERROR error = CHIP_ERROR_INTERNAL; MutableByteSpan out_span{ out, *out_len }; uint8_t point_buffer[kMAX_Point_Length]; void * MN = nullptr; // Choose N if a prover, M if a verifier void * XY = nullptr; // Choose Y if a prover, X if a verifier uint8_t * Kcaorb = nullptr; // Choose Kca if a prover, Kcb if a verifier VerifyOrExit(*out_len >= hash_size, error = CHIP_ERROR_INTERNAL); VerifyOrExit(state == CHIP_SPAKE2P_STATE::R1, error = CHIP_ERROR_INTERNAL); VerifyOrExit(in_len == point_size, error = CHIP_ERROR_INTERNAL); if (role == CHIP_SPAKE2P_ROLE::PROVER) { SuccessOrExit(error = PointWrite(X, point_buffer, point_size)); SuccessOrExit(error = InternalHash(point_buffer, point_size)); SuccessOrExit(error = InternalHash(in, in_len)); MN = N; XY = Y; Kcaorb = Kca; } else if (role == CHIP_SPAKE2P_ROLE::VERIFIER) { SuccessOrExit(error = InternalHash(in, in_len)); SuccessOrExit(error = PointWrite(Y, point_buffer, point_size)); SuccessOrExit(error = InternalHash(point_buffer, point_size)); MN = M; XY = X; Kcaorb = Kcb; } VerifyOrExit(MN != nullptr, error = CHIP_ERROR_INTERNAL); VerifyOrExit(XY != nullptr, error = CHIP_ERROR_INTERNAL); SuccessOrExit(error = PointLoad(in, in_len, XY)); SuccessOrExit(error = PointIsValid(XY)); SuccessOrExit(error = FEMul(tempbn, xy, w0)); SuccessOrExit(error = PointInvert(MN)); SuccessOrExit(error = PointAddMul(Z, XY, xy, MN, tempbn)); SuccessOrExit(error = PointCofactorMul(Z)); if (role == CHIP_SPAKE2P_ROLE::PROVER) { SuccessOrExit(error = FEMul(tempbn, w1, w0)); SuccessOrExit(error = PointAddMul(V, XY, w1, MN, tempbn)); } else if (role == CHIP_SPAKE2P_ROLE::VERIFIER) { SuccessOrExit(error = PointMul(V, L, xy)); } SuccessOrExit(error = PointCofactorMul(V)); SuccessOrExit(error = PointWrite(Z, point_buffer, point_size)); SuccessOrExit(error = InternalHash(point_buffer, point_size)); SuccessOrExit(error = PointWrite(V, point_buffer, point_size)); SuccessOrExit(error = InternalHash(point_buffer, point_size)); SuccessOrExit(error = FEWrite(w0, point_buffer, fe_size)); SuccessOrExit(error = InternalHash(point_buffer, fe_size)); SuccessOrExit(error = GenerateKeys()); SuccessOrExit(error = Mac(Kcaorb, hash_size / 2, in, in_len, out_span)); VerifyOrExit(out_span.size() == hash_size, error = CHIP_ERROR_INTERNAL); state = CHIP_SPAKE2P_STATE::R2; error = CHIP_NO_ERROR; exit: *out_len = hash_size; return error; } CHIP_ERROR Spake2p::GenerateKeys() { static const uint8_t info_keyconfirm[16] = { 'C', 'o', 'n', 'f', 'i', 'r', 'm', 'a', 't', 'i', 'o', 'n', 'K', 'e', 'y', 's' }; MutableByteSpan Kae_span{ &Kae[0], sizeof(Kae) }; ReturnErrorOnFailure(HashFinalize(Kae_span)); ReturnErrorOnFailure(KDF(Ka, hash_size / 2, nullptr, 0, info_keyconfirm, sizeof(info_keyconfirm), Kcab, hash_size)); return CHIP_NO_ERROR; } CHIP_ERROR Spake2p::KeyConfirm(const uint8_t * in, size_t in_len) { uint8_t point_buffer[kP256_Point_Length]; void * XY = nullptr; // Choose X if a prover, Y if a verifier uint8_t * Kcaorb = nullptr; // Choose Kcb if a prover, Kca if a verifier VerifyOrReturnError(state == CHIP_SPAKE2P_STATE::R2, CHIP_ERROR_INTERNAL); if (role == CHIP_SPAKE2P_ROLE::PROVER) { XY = X; Kcaorb = Kcb; } else if (role == CHIP_SPAKE2P_ROLE::VERIFIER) { XY = Y; Kcaorb = Kca; } VerifyOrReturnError(XY != nullptr, CHIP_ERROR_INTERNAL); VerifyOrReturnError(Kcaorb != nullptr, CHIP_ERROR_INTERNAL); ReturnErrorOnFailure(PointWrite(XY, point_buffer, point_size)); CHIP_ERROR err = MacVerify(Kcaorb, hash_size / 2, in, in_len, point_buffer, point_size); if (err == CHIP_ERROR_INTERNAL) { ChipLogError(SecureChannel, "Failed to verify peer's MAC. This can happen when setup code is incorrect."); } ReturnErrorOnFailure(err); state = CHIP_SPAKE2P_STATE::KC; return CHIP_NO_ERROR; } CHIP_ERROR Spake2p::GetKeys(uint8_t * out, size_t * out_len) { CHIP_ERROR error = CHIP_ERROR_INTERNAL; VerifyOrExit(state == CHIP_SPAKE2P_STATE::KC, error = CHIP_ERROR_INTERNAL); VerifyOrExit(*out_len >= hash_size / 2, error = CHIP_ERROR_INVALID_ARGUMENT); memcpy(out, Ke, hash_size / 2); error = CHIP_NO_ERROR; exit: *out_len = hash_size / 2; return error; } CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::InitImpl() { ReturnErrorOnFailure(sha256_hash_ctx.Begin()); ReturnErrorOnFailure(InitInternal()); return CHIP_NO_ERROR; } CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::Hash(const uint8_t * in, size_t in_len) { ReturnErrorOnFailure(sha256_hash_ctx.AddData(ByteSpan{ in, in_len })); return CHIP_NO_ERROR; } CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::HashFinalize(MutableByteSpan & out_span) { ReturnErrorOnFailure(sha256_hash_ctx.Finish(out_span)); return CHIP_NO_ERROR; } CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::KDF(const uint8_t * ikm, const size_t ikm_len, const uint8_t * salt, const size_t salt_len, const uint8_t * info, const size_t info_len, uint8_t * out, size_t out_len) { HKDF_sha_crypto mHKDF; ReturnErrorOnFailure(mHKDF.HKDF_SHA256(ikm, ikm_len, salt, salt_len, info, info_len, out, out_len)); return CHIP_NO_ERROR; } CHIP_ERROR Spake2p_P256_SHA256_HKDF_HMAC::ComputeW0(uint8_t * w0out, size_t * w0_len, const uint8_t * w0sin, size_t w0sin_len) { ReturnErrorOnFailure(FELoad(w0sin, w0sin_len, w0)); ReturnErrorOnFailure(FEWrite(w0, w0out, *w0_len)); return CHIP_NO_ERROR; } CHIP_ERROR Spake2pVerifier::Serialize(MutableByteSpan & outSerialized) const { VerifyOrReturnError(outSerialized.size() >= kSpake2p_VerifierSerialized_Length, CHIP_ERROR_INVALID_ARGUMENT); memcpy(&outSerialized.data()[0], mW0, sizeof(mW0)); memcpy(&outSerialized.data()[sizeof(mW0)], mL, sizeof(mL)); outSerialized.reduce_size(kSpake2p_VerifierSerialized_Length); return CHIP_NO_ERROR; } CHIP_ERROR Spake2pVerifier::Deserialize(const ByteSpan & inSerialized) { VerifyOrReturnError(inSerialized.size() >= kSpake2p_VerifierSerialized_Length, CHIP_ERROR_INVALID_ARGUMENT); memcpy(mW0, &inSerialized.data()[0], sizeof(mW0)); memcpy(mL, &inSerialized.data()[sizeof(mW0)], sizeof(mL)); return CHIP_NO_ERROR; } CHIP_ERROR Spake2pVerifier::Generate(uint32_t pbkdf2IterCount, const ByteSpan & salt, uint32_t & setupPin) { uint8_t serializedWS[kSpake2p_WS_Length * 2] = { 0 }; ReturnErrorOnFailure(ComputeWS(pbkdf2IterCount, salt, setupPin, serializedWS, sizeof(serializedWS))); CHIP_ERROR err = CHIP_NO_ERROR; size_t len; // Create local Spake2+ object for w0 and L computations. #ifdef ENABLE_HSM_SPAKE Spake2pHSM_P256_SHA256_HKDF_HMAC spake2p; #else Spake2p_P256_SHA256_HKDF_HMAC spake2p; #endif uint8_t context[kSHA256_Hash_Length] = { 0 }; SuccessOrExit(err = spake2p.Init(context, sizeof(context))); // Compute w0 len = sizeof(mW0); SuccessOrExit(err = spake2p.ComputeW0(mW0, &len, &serializedWS[0], kSpake2p_WS_Length)); VerifyOrExit(len == sizeof(mW0), err = CHIP_ERROR_INTERNAL); // Compute L len = sizeof(mL); SuccessOrExit(err = spake2p.ComputeL(mL, &len, &serializedWS[kSpake2p_WS_Length], kSpake2p_WS_Length)); VerifyOrExit(len == sizeof(mL), err = CHIP_ERROR_INTERNAL); exit: spake2p.Clear(); return err; } CHIP_ERROR Spake2pVerifier::ComputeWS(uint32_t pbkdf2IterCount, const ByteSpan & salt, uint32_t & setupPin, uint8_t * ws, uint32_t ws_len) { #ifdef ENABLE_HSM_PBKDF2 PBKDF2_sha256HSM pbkdf2; #else PBKDF2_sha256 pbkdf2; #endif uint8_t littleEndianSetupPINCode[sizeof(uint32_t)]; Encoding::LittleEndian::Put32(littleEndianSetupPINCode, setupPin); ReturnErrorCodeIf(salt.size() < kSpake2p_Min_PBKDF_Salt_Length || salt.size() > kSpake2p_Max_PBKDF_Salt_Length, CHIP_ERROR_INVALID_ARGUMENT); ReturnErrorCodeIf(pbkdf2IterCount < kSpake2p_Min_PBKDF_Iterations || pbkdf2IterCount > kSpake2p_Max_PBKDF_Iterations, CHIP_ERROR_INVALID_ARGUMENT); return pbkdf2.pbkdf2_sha256(littleEndianSetupPINCode, sizeof(littleEndianSetupPINCode), salt.data(), salt.size(), pbkdf2IterCount, ws_len, ws); } CHIP_ERROR ConvertIntegerRawToDerWithoutTag(const ByteSpan & raw_integer, MutableByteSpan & out_der_integer) { return ConvertIntegerRawToDerInternal(raw_integer, out_der_integer, /* include_tag_and_length = */ false); } CHIP_ERROR ConvertIntegerRawToDer(const ByteSpan & raw_integer, MutableByteSpan & out_der_integer) { return ConvertIntegerRawToDerInternal(raw_integer, out_der_integer, /* include_tag_and_length = */ true); } CHIP_ERROR EcdsaRawSignatureToAsn1(size_t fe_length_bytes, const ByteSpan & raw_sig, MutableByteSpan & out_asn1_sig) { VerifyOrReturnError(fe_length_bytes > 0, CHIP_ERROR_INVALID_ARGUMENT); VerifyOrReturnError(raw_sig.size() == (2u * fe_length_bytes), CHIP_ERROR_INVALID_ARGUMENT); VerifyOrReturnError(out_asn1_sig.size() >= (raw_sig.size() + kMax_ECDSA_X9Dot62_Asn1_Overhead), CHIP_ERROR_BUFFER_TOO_SMALL); // Write both R an S integers past the overhead, we will shift them back later if we only needed 2 size bytes. uint8_t * cursor = out_asn1_sig.data() + kMinSequenceOverhead; size_t remaining = out_asn1_sig.size() - kMinSequenceOverhead; size_t integers_length = 0; // Write R (first `fe_length_bytes` block of raw signature) { MutableByteSpan out_der_integer(cursor, remaining); ReturnErrorOnFailure(ConvertIntegerRawToDer(raw_sig.SubSpan(0, fe_length_bytes), out_der_integer)); VerifyOrReturnError(out_der_integer.size() <= remaining, CHIP_ERROR_INTERNAL); integers_length += out_der_integer.size(); remaining -= out_der_integer.size(); cursor += out_der_integer.size(); } // Write S (second `fe_length_bytes` block of raw signature) { MutableByteSpan out_der_integer(cursor, remaining); ReturnErrorOnFailure(ConvertIntegerRawToDer(raw_sig.SubSpan(fe_length_bytes, fe_length_bytes), out_der_integer)); VerifyOrReturnError(out_der_integer.size() <= remaining, CHIP_ERROR_INTERNAL); integers_length += out_der_integer.size(); } // We only support outputs that would use 1 or 2 bytes of DER length after the SEQUENCE tag VerifyOrReturnError(integers_length <= UINT8_MAX, CHIP_ERROR_INVALID_ARGUMENT); // We now know the length of both variable sized integers in the sequence, so we // can write the tag and length. BufferWriter writer(out_asn1_sig); // Put SEQUENCE tag writer.Put(kSeqTag); // Put the length over 1 or two bytes depending on case constexpr uint8_t kExtendedLengthMarker = 0x80u; if (integers_length > 127u) { writer.Put(static_cast<uint8_t>(kExtendedLengthMarker | 1)); // Length is extended length, over 1 subsequent byte writer.Put(static_cast<uint8_t>(integers_length)); } else { // Length is directly in the first byte with MSB clear if <= 127. writer.Put(static_cast<uint8_t>(integers_length)); } // Put the contents of the integers previously serialized in the buffer. // The writer.Put is memmove-safe, so the shifting will happen from the read // of the same buffer where the write is taking place. writer.Put(out_asn1_sig.data() + kMinSequenceOverhead, integers_length); size_t actually_written = 0; VerifyOrReturnError(writer.Fit(actually_written), CHIP_ERROR_BUFFER_TOO_SMALL); out_asn1_sig = out_asn1_sig.SubSpan(0, actually_written); return CHIP_NO_ERROR; } CHIP_ERROR EcdsaAsn1SignatureToRaw(size_t fe_length_bytes, const ByteSpan & asn1_sig, MutableByteSpan & out_raw_sig) { VerifyOrReturnError(fe_length_bytes > 0, CHIP_ERROR_INVALID_ARGUMENT); VerifyOrReturnError(asn1_sig.size() > kMinSequenceOverhead, CHIP_ERROR_BUFFER_TOO_SMALL); // Output raw signature is <r,s> both of which are of fe_length_bytes (see SEC1). VerifyOrReturnError(out_raw_sig.size() >= (2u * fe_length_bytes), CHIP_ERROR_BUFFER_TOO_SMALL); Reader reader(asn1_sig); // Make sure we have a starting Sequence uint8_t tag = 0; ReturnErrorOnFailure(reader.Read8(&tag).StatusCode()); VerifyOrReturnError(tag == kSeqTag, CHIP_ERROR_INVALID_ARGUMENT); // Read length of sequence uint8_t tag_len = 0; ReturnErrorOnFailure(ReadDerLength(reader, tag_len)); // Length of sequence must match what is left of signature VerifyOrReturnError(tag_len == reader.Remaining(), CHIP_ERROR_INVALID_ARGUMENT); // Can now clear raw signature integers r,s one by one uint8_t * raw_cursor = out_raw_sig.data(); // Read R ReturnErrorOnFailure(ReadDerUnsignedIntegerIntoRaw(reader, MutableByteSpan{ raw_cursor, fe_length_bytes })); raw_cursor += fe_length_bytes; // Read S ReturnErrorOnFailure(ReadDerUnsignedIntegerIntoRaw(reader, MutableByteSpan{ raw_cursor, fe_length_bytes })); out_raw_sig = out_raw_sig.SubSpan(0, (2u * fe_length_bytes)); return CHIP_NO_ERROR; } CHIP_ERROR GenerateCompressedFabricId(const Crypto::P256PublicKey & root_public_key, uint64_t fabric_id, MutableByteSpan & out_compressed_fabric_id) { VerifyOrReturnError(root_public_key.IsUncompressed(), CHIP_ERROR_INVALID_ARGUMENT); VerifyOrReturnError(out_compressed_fabric_id.size() >= kCompressedFabricIdentifierSize, CHIP_ERROR_BUFFER_TOO_SMALL); // Ensure proper endianness for Fabric ID (i.e. big-endian as it appears in certificates) uint8_t fabric_id_as_big_endian_salt[kCompressedFabricIdentifierSize]; chip::Encoding::BigEndian::Put64(&fabric_id_as_big_endian_salt[0], fabric_id); // Compute Compressed fabric reference per spec pseudocode // CompressedFabricIdentifier = // CHIP_Crypto_KDF( // inputKey := TargetOperationalRootPublicKey, // salt:= TargetOperationalFabricID, // info := CompressedFabricInfo, // len := 64) // // NOTE: len=64 bits is implied by output buffer size when calling HKDF_sha::HKDF_SHA256. constexpr uint8_t kCompressedFabricInfo[16] = /* "CompressedFabric" */ { 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x46, 0x61, 0x62, 0x72, 0x69, 0x63 }; HKDF_sha hkdf; // Must drop uncompressed point form format specifier (first byte), per spec method ByteSpan input_key_span(root_public_key.ConstBytes() + 1, root_public_key.Length() - 1); CHIP_ERROR status = hkdf.HKDF_SHA256( input_key_span.data(), input_key_span.size(), &fabric_id_as_big_endian_salt[0], sizeof(fabric_id_as_big_endian_salt), &kCompressedFabricInfo[0], sizeof(kCompressedFabricInfo), out_compressed_fabric_id.data(), kCompressedFabricIdentifierSize); // Resize output to final bounds on success if (status == CHIP_NO_ERROR) { out_compressed_fabric_id = out_compressed_fabric_id.SubSpan(0, kCompressedFabricIdentifierSize); } return status; } CHIP_ERROR GenerateCompressedFabricId(const Crypto::P256PublicKey & rootPublicKey, uint64_t fabricId, uint64_t & compressedFabricId) { uint8_t allocated[sizeof(fabricId)]; MutableByteSpan span(allocated); ReturnErrorOnFailure(GenerateCompressedFabricId(rootPublicKey, fabricId, span)); // Decode compressed fabric ID accounting for endianness, as GenerateCompressedFabricId() // returns a binary buffer and is agnostic of usage of the output as an integer type. compressedFabricId = Encoding::BigEndian::Get64(allocated); return CHIP_NO_ERROR; } /* Operational Group Key Group, Security Info: "GroupKey v1.0" */ static const uint8_t kGroupSecurityInfo[] = { 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x20, 0x76, 0x31, 0x2e, 0x30 }; /* Group Key Derivation Function, Info: "GroupKeyHash" ” */ static const uint8_t kGroupKeyHashInfo[] = { 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68 }; static const uint8_t kGroupKeyHashSalt[0] = {}; /* OperationalGroupKey = Crypto_KDF ( InputKey = Epoch Key, Salt = CompressedFabricIdentifier, Info = "GroupKey v1.0", Length = CRYPTO_SYMMETRIC_KEY_LENGTH_BITS ) */ CHIP_ERROR DeriveGroupOperationalKey(const ByteSpan & epoch_key, const ByteSpan & compressed_fabric_id, MutableByteSpan & out_key) { VerifyOrReturnError(Crypto::CHIP_CRYPTO_SYMMETRIC_KEY_LENGTH_BYTES == epoch_key.size(), CHIP_ERROR_INVALID_ARGUMENT); VerifyOrReturnError(Crypto::CHIP_CRYPTO_SYMMETRIC_KEY_LENGTH_BYTES <= out_key.size(), CHIP_ERROR_INVALID_ARGUMENT); Crypto::HKDF_sha crypto; return crypto.HKDF_SHA256(epoch_key.data(), epoch_key.size(), compressed_fabric_id.data(), compressed_fabric_id.size(), kGroupSecurityInfo, sizeof(kGroupSecurityInfo), out_key.data(), Crypto::CHIP_CRYPTO_SYMMETRIC_KEY_LENGTH_BYTES); } /* GKH = Crypto_KDF ( InputKey = OperationalGroupKey, Salt = [], Info = "GroupKeyHash", Length = 16) */ CHIP_ERROR DeriveGroupSessionId(const ByteSpan & operational_key, uint16_t & session_id) { VerifyOrReturnError(Crypto::CHIP_CRYPTO_SYMMETRIC_KEY_LENGTH_BYTES == operational_key.size(), CHIP_ERROR_INVALID_ARGUMENT); Crypto::HKDF_sha crypto; uint8_t out_key[sizeof(uint16_t)]; ReturnErrorOnFailure(crypto.HKDF_SHA256(operational_key.data(), operational_key.size(), kGroupKeyHashSalt, sizeof(kGroupKeyHashSalt), kGroupKeyHashInfo, sizeof(kGroupKeyHashInfo), out_key, sizeof(out_key))); session_id = Encoding::BigEndian::Get16(out_key); return CHIP_NO_ERROR; } CHIP_ERROR ExtractVIDPIDFromAttributeString(DNAttrType attrType, const ByteSpan & attr, AttestationCertVidPid & vidpidFromMatterAttr, AttestationCertVidPid & vidpidFromCNAttr) { ReturnErrorCodeIf(attrType == DNAttrType::kUnspecified, CHIP_NO_ERROR); ReturnErrorCodeIf(attr.empty(), CHIP_ERROR_INVALID_ARGUMENT); if (attrType == DNAttrType::kMatterVID || attrType == DNAttrType::kMatterPID) { uint16_t matterAttr; VerifyOrReturnError(attr.size() == kVIDandPIDHexLength, CHIP_ERROR_WRONG_CERT_DN); VerifyOrReturnError(Encoding::UppercaseHexToUint16(reinterpret_cast<const char *>(attr.data()), attr.size(), matterAttr) == sizeof(matterAttr), CHIP_ERROR_WRONG_CERT_DN); if (attrType == DNAttrType::kMatterVID) { // Not more than one VID attribute can be present. ReturnErrorCodeIf(vidpidFromMatterAttr.mVendorId.HasValue(), CHIP_ERROR_WRONG_CERT_DN); vidpidFromMatterAttr.mVendorId.SetValue(static_cast<VendorId>(matterAttr)); } else { // Not more than one PID attribute can be present. ReturnErrorCodeIf(vidpidFromMatterAttr.mProductId.HasValue(), CHIP_ERROR_WRONG_CERT_DN); vidpidFromMatterAttr.mProductId.SetValue(matterAttr); } } // Otherwise, it is a CommonName attribute. else if (!vidpidFromCNAttr.Initialized()) { char cnAttr[kMax_CommonNameAttr_Length + 1]; if (attr.size() <= chip::Crypto::kMax_CommonNameAttr_Length) { memcpy(cnAttr, attr.data(), attr.size()); cnAttr[attr.size()] = 0; char * vid = strstr(cnAttr, kVIDPrefixForCNEncoding); if (vid != nullptr) { vid += strlen(kVIDPrefixForCNEncoding); if (cnAttr + attr.size() >= vid + kVIDandPIDHexLength) { uint16_t matterAttr; if (Encoding::UppercaseHexToUint16(vid, kVIDandPIDHexLength, matterAttr) == sizeof(matterAttr)) { vidpidFromCNAttr.mVendorId.SetValue(static_cast<VendorId>(matterAttr)); } } } char * pid = strstr(cnAttr, kPIDPrefixForCNEncoding); if (pid != nullptr) { pid += strlen(kPIDPrefixForCNEncoding); if (cnAttr + attr.size() >= pid + kVIDandPIDHexLength) { uint16_t matterAttr; if (Encoding::UppercaseHexToUint16(pid, kVIDandPIDHexLength, matterAttr) == sizeof(matterAttr)) { vidpidFromCNAttr.mProductId.SetValue(matterAttr); } } } } } return CHIP_NO_ERROR; } // Generates the to-be-signed portion of a PKCS#10 CSR (`CertificationRequestInformation`) // that contains the static CHIP_ERROR GenerateCertificationRequestInformation(ASN1Writer & writer, const Crypto::P256PublicKey & pubkey) { CHIP_ERROR err = CHIP_NO_ERROR; /** * * CertificationRequestInfo ::= * SEQUENCE { * version INTEGER { v1(0) } (v1,...), * subject Name, * subjectPKInfo SubjectPublicKeyInfo{{ PKInfoAlgorithms }}, * attributes [0] Attributes{{ CRIAttributes }} * } */ ASN1_START_SEQUENCE { ASN1_ENCODE_INTEGER(0); // version INTEGER { v1(0) } // subject Name ASN1_START_SEQUENCE { ASN1_START_SET { ASN1_START_SEQUENCE { // Any subject, placeholder is good, since this // is going to usually be ignored ASN1_ENCODE_OBJECT_ID(kOID_AttributeType_OrganizationalUnitName); ASN1_ENCODE_STRING(kASN1UniversalTag_UTF8String, "CSA", static_cast<uint16_t>(strlen("CSA"))); } ASN1_END_SEQUENCE; } ASN1_END_SET; } ASN1_END_SEQUENCE; // subjectPKInfo ASN1_START_SEQUENCE { ASN1_START_SEQUENCE { ASN1_ENCODE_OBJECT_ID(kOID_PubKeyAlgo_ECPublicKey); ASN1_ENCODE_OBJECT_ID(kOID_EllipticCurve_prime256v1); } ASN1_END_SEQUENCE; ReturnErrorOnFailure(writer.PutBitString(0, pubkey, static_cast<uint8_t>(pubkey.Length()))); } ASN1_END_SEQUENCE; // attributes [0] ASN1_START_CONSTRUCTED(kASN1TagClass_ContextSpecific, 0) { // Using a plain empty attributes request ASN1_START_SEQUENCE { ASN1_ENCODE_OBJECT_ID(kOID_Extension_CSRRequest); ASN1_START_SET { ASN1_START_SEQUENCE {} ASN1_END_SEQUENCE; } ASN1_END_SET; } ASN1_END_SEQUENCE; } ASN1_END_CONSTRUCTED; } ASN1_END_SEQUENCE; exit: return err; } CHIP_ERROR GenerateCertificateSigningRequest(const P256Keypair * keypair, MutableByteSpan & csr_span) { VerifyOrReturnError(keypair != nullptr, CHIP_ERROR_INVALID_ARGUMENT); VerifyOrReturnError(csr_span.size() >= kMAX_CSR_Length, CHIP_ERROR_BUFFER_TOO_SMALL); // First pass: Generate the CertificatioRequestInformation inner // encoding one time, to sign it, before re-generating it within the // full ASN1 writer later, since it's easier than trying to // figure-out the span we need to sign of the overall object. P256ECDSASignature signature; { // The first pass will just generate a signature, so we can use the // output buffer as scratch to avoid needing more stack space. There // are no secrets here and the contents is not reused since all we // need is the signature which is already separately stored. ASN1Writer toBeSignedWriter; toBeSignedWriter.Init(csr_span); CHIP_ERROR err = GenerateCertificationRequestInformation(toBeSignedWriter, keypair->Pubkey()); ReturnErrorOnFailure(err); size_t encodedLen = (uint16_t) toBeSignedWriter.GetLengthWritten(); // This should not/will not happen if (encodedLen > csr_span.size()) { return CHIP_ERROR_INTERNAL; } err = keypair->ECDSA_sign_msg(csr_span.data(), encodedLen, signature); ReturnErrorOnFailure(err); } // Second pass: Generate the entire CSR body, restarting a new write // of the CertificationRequestInformation (cheap) and adding the // signature. // // See RFC2986 for ASN.1 module, repeated here in snippets CHIP_ERROR err = CHIP_NO_ERROR; ASN1Writer writer; writer.Init(csr_span); ASN1_START_SEQUENCE { /* CertificationRequestInfo ::= * SEQUENCE { * version INTEGER { v1(0) } (v1,...), * subject Name, * subjectPKInfo SubjectPublicKeyInfo{{ PKInfoAlgorithms }}, * attributes [0] Attributes{{ CRIAttributes }} * } */ GenerateCertificationRequestInformation(writer, keypair->Pubkey()); // algorithm AlgorithmIdentifier ASN1_START_SEQUENCE { // See RFC5480 sec 2.1 ASN1_ENCODE_OBJECT_ID(kOID_SigAlgo_ECDSAWithSHA256); } ASN1_END_SEQUENCE; // signature BIT STRING --> ECDSA-with-SHA256 signature with P256 key with R,S integers format // (see RFC3279 sec 2.2.3 ECDSA Signature Algorithm) ASN1_START_BIT_STRING_ENCAPSULATED { // Convert raw signature to embedded signature FixedByteSpan<Crypto::kP256_ECDSA_Signature_Length_Raw> rawSig(signature.Bytes()); uint8_t derInt[kP256_FE_Length + kEmitDerIntegerWithoutTagOverhead]; // Ecdsa-Sig-Value ::= SEQUENCE ASN1_START_SEQUENCE { using P256IntegerSpan = FixedByteSpan<Crypto::kP256_FE_Length>; // r INTEGER { MutableByteSpan derIntSpan(derInt, sizeof(derInt)); ReturnErrorOnFailure(ConvertIntegerRawToDerWithoutTag(P256IntegerSpan(rawSig.data()), derIntSpan)); ReturnErrorOnFailure(writer.PutValue(kASN1TagClass_Universal, kASN1UniversalTag_Integer, false, derIntSpan.data(), static_cast<uint16_t>(derIntSpan.size()))); } // s INTEGER { MutableByteSpan derIntSpan(derInt, sizeof(derInt)); ReturnErrorOnFailure( ConvertIntegerRawToDerWithoutTag(P256IntegerSpan(rawSig.data() + kP256_FE_Length), derIntSpan)); ReturnErrorOnFailure(writer.PutValue(kASN1TagClass_Universal, kASN1UniversalTag_Integer, false, derIntSpan.data(), static_cast<uint16_t>(derIntSpan.size()))); } } ASN1_END_SEQUENCE; } ASN1_END_ENCAPSULATED; } ASN1_END_SEQUENCE; exit: // Update size of output buffer on success if (err == CHIP_NO_ERROR) { csr_span.reduce_size(writer.GetLengthWritten()); } return err; } } // namespace Crypto } // namespace chip
/* * Copyright 2009-2017 Alibaba Cloud All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <alibabacloud/kms/model/UpdateCertificateStatusResult.h> #include <json/json.h> using namespace AlibabaCloud::Kms; using namespace AlibabaCloud::Kms::Model; UpdateCertificateStatusResult::UpdateCertificateStatusResult() : ServiceResult() {} UpdateCertificateStatusResult::UpdateCertificateStatusResult(const std::string &payload) : ServiceResult() { parse(payload); } UpdateCertificateStatusResult::~UpdateCertificateStatusResult() {} void UpdateCertificateStatusResult::parse(const std::string &payload) { Json::Reader reader; Json::Value value; reader.parse(payload, value); setRequestId(value["RequestId"].asString()); }
#include "node.h" bool EmptyNode::Evaluate(const Date& date, const string& event) const { return true; } template <typename T> bool CompareTo(const T& lhs, const T& rhs, Comparison cmp) { switch (cmp) { case Comparison::Less: return lhs < rhs; case Comparison::LessOrEqual: return lhs <= rhs; case Comparison::Equal: return lhs == rhs; case Comparison::NotEqual: return lhs != rhs; case Comparison::Greater: return lhs > rhs; case Comparison::GreaterOrEqual: return lhs >= rhs; } return false; // make compiler happy } DateComparisonNode::DateComparisonNode(Comparison comparison, const Date& value) : comparison_(comparison) , value_(value) { } bool DateComparisonNode::Evaluate(const Date& date, const string&) const { return CompareTo(date, value_, comparison_); } EventComparisonNode::EventComparisonNode(Comparison comparison, const string& value) : comparison_(comparison) , value_(value) { } bool EventComparisonNode::Evaluate(const Date&, const string& event) const { return CompareTo(event, value_, comparison_); } LogicalOperationNode::LogicalOperationNode( LogicalOperation operation, shared_ptr<Node> left, shared_ptr<Node> right ) : operation_(operation) , left_(left) , right_(right) { } bool LogicalOperationNode::Evaluate(const Date& date, const string& event) const { switch (operation_) { case LogicalOperation::And: return left_->Evaluate(date, event) && right_->Evaluate(date, event); case LogicalOperation::Or: return left_->Evaluate(date, event) || right_->Evaluate(date, event); } return false; // make compiler happy }
#include "src/array/device/array_device_manager.h" #include <gtest/gtest.h> #include "src/include/array_config.h" #include "test/unit-tests/array/device/array_device_list_mock.h" #include "test/unit-tests/device/base/ublock_device_mock.h" #include "test/unit-tests/device/device_manager_mock.h" using ::testing::_; using ::testing::AtLeast; using ::testing::Return; using ::testing::ReturnRef; namespace pos { shared_ptr<MockUBlockDevice> MockUblockDevice(const char* devName, const string& SN) { MockUBlockDevice* rawPtr = new MockUBlockDevice(devName, 1024, nullptr); EXPECT_CALL(*rawPtr, GetName).WillRepeatedly(Return(devName)); EXPECT_CALL(*rawPtr, GetSN).WillRepeatedly(Return(SN)); return shared_ptr<MockUBlockDevice>(rawPtr); } shared_ptr<MockUBlockDevice> MockUblockDevice(const char* devName, DeviceType type, size_t devSize) { MockUBlockDevice* rawPtr = new MockUBlockDevice(devName, 1024, nullptr); EXPECT_CALL(*rawPtr, GetType).WillRepeatedly(Return(type)); EXPECT_CALL(*rawPtr, GetName).WillRepeatedly(Return(devName)); EXPECT_CALL(*rawPtr, IsAlive).WillRepeatedly(Return(true)); EXPECT_CALL(*rawPtr, GetSize).WillRepeatedly(Return(devSize)); return shared_ptr<MockUBlockDevice>(rawPtr); } shared_ptr<MockUBlockDevice> MockUblockDevice(const char* devName) { MockUBlockDevice* rawPtr = new MockUBlockDevice(devName, 1024, nullptr); return shared_ptr<MockUBlockDevice>(rawPtr); } TEST(ArrayDeviceManager, Import_testIfDeviceSetsAreSuccessfullyImported) { // Given MockDeviceManager mockSysDevMgr(nullptr); ArrayDeviceManager arrDevMgr(&mockSysDevMgr, "mockArrayName"); DeviceSet<string> nameSet; string nvm1 = "mock-nvm1"; string data1 = "mock-data1", data2 = "mock-data2", data3 = "mock-data3"; string spare1 = "mock-spare1"; nameSet.nvm.push_back(nvm1); nameSet.data.push_back(data1); nameSet.data.push_back(data2); nameSet.data.push_back(data3); nameSet.spares.push_back(spare1); DevName nvm1Id(nvm1), data1Id(data1), data2Id(data2), data3Id(data3), spare1Id(spare1); auto nvm1UblockDevPtr = MockUblockDevice(nvm1.c_str(), DeviceType::NVRAM, 805830656); // minNvmSize when logicalChunkCount is 2 auto data1UblockDevPtr = MockUblockDevice(data1.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto data2UblockDevPtr = MockUblockDevice(data2.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto data3UblockDevPtr = MockUblockDevice(data3.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto spare1UblockDevPtr = MockUblockDevice(spare1.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); EXPECT_CALL(mockSysDevMgr, GetDev) // currently, we don't have a good gtest matcher for DevName, hence I'm just simply chaining the expected result .WillOnce(Return(nvm1UblockDevPtr)) .WillOnce(Return(data1UblockDevPtr)) .WillOnce(Return(data2UblockDevPtr)) .WillOnce(Return(data3UblockDevPtr)) .WillOnce(Return(spare1UblockDevPtr)); // When int actual = arrDevMgr.ImportByName(nameSet); // Then ASSERT_EQ(0, actual); arrDevMgr.Clear(); // to avoid the leakage of mocks } TEST(ArrayDeviceManager, ImportByName_testIfNVMDeviceHasNoUblock) { // Given MockDeviceManager mockSysDevMgr(nullptr); string mockArrayname = "mockArray"; ArrayDeviceManager arrDevMgr(&mockSysDevMgr, mockArrayname); DeviceSet<string> nameSet; string nvm1 = "mock-nvm1"; string data1 = "mock-data1", data2 = "mock-data2", data3 = "mock-data3"; string spare1 = "mock-spare1"; nameSet.nvm.push_back(nvm1); nameSet.data.push_back(data1); nameSet.data.push_back(data2); nameSet.data.push_back(data3); nameSet.spares.push_back(spare1); DevName nvm1Id(nvm1), data1Id(data1), data2Id(data2), data3Id(data3), spare1Id(spare1); auto nvm1UblockDevPtr = nullptr; EXPECT_CALL(mockSysDevMgr, GetDev) // currently, we don't have a good gtest matcher for DevName, hence I'm just simply chaining the expected result .WillOnce(Return(nvm1UblockDevPtr)); // When int actual = arrDevMgr.ImportByName(nameSet); // Then int expected = (int)POS_EVENT_ID::ARRAY_DEVICE_NOT_FOUND; ASSERT_EQ(expected, actual); arrDevMgr.Clear(); // to avoid the leakage of mocks } TEST(ArrayDeviceManager, ImportByName_testIfNVMDeviceIsActuallySSDDevice) { // Given MockDeviceManager mockSysDevMgr(nullptr); string mockArrayName = "mockArray"; ArrayDeviceManager arrDevMgr(&mockSysDevMgr, mockArrayName); DeviceSet<string> nameSet; string nvm1 = "mock-nvm1"; string data1 = "mock-data1", data2 = "mock-data2", data3 = "mock-data3"; string spare1 = "mock-spare1"; nameSet.nvm.push_back(nvm1); nameSet.data.push_back(data1); nameSet.data.push_back(data2); nameSet.data.push_back(data3); nameSet.spares.push_back(spare1); DevName nvm1Id(nvm1), data1Id(data1), data2Id(data2), data3Id(data3), spare1Id(spare1); auto nvm1UblockDevPtr = MockUblockDevice(nvm1.c_str(), DeviceType::SSD, 805830656); // minNvmSize when logicalChunkCount is 2 EXPECT_CALL(mockSysDevMgr, GetDev) // currently, we don't have a good gtest matcher for DevName, hence I'm just simply chaining the expected result .WillOnce(Return(nvm1UblockDevPtr)); // When int actual = arrDevMgr.ImportByName(nameSet); // Then int expected = (int)POS_EVENT_ID::ARRAY_DEVICE_TYPE_ERROR; ASSERT_EQ(expected, actual); arrDevMgr.Clear(); // to avoid the leakage of mocks } TEST(ArrayDeviceManager, ImportByName_testIfDataDeviceHasNoUblock) { // Given MockDeviceManager mockSysDevMgr(nullptr); string mockArrayName = "mockArray"; ArrayDeviceManager arrDevMgr(&mockSysDevMgr, mockArrayName); DeviceSet<string> nameSet; string nvm1 = "mock-nvm1"; string data1 = "mock-data1", data2 = "mock-data2", data3 = "mock-data3"; string spare1 = "mock-spare1"; nameSet.nvm.push_back(nvm1); nameSet.data.push_back(data1); nameSet.data.push_back(data2); nameSet.data.push_back(data3); nameSet.spares.push_back(spare1); DevName nvm1Id(nvm1), data1Id(data1), data2Id(data2), data3Id(data3), spare1Id(spare1); auto nvm1UblockDevPtr = MockUblockDevice(nvm1.c_str(), DeviceType::NVRAM, 805830656); // minNvmSize when logicalChunkCount is 2 auto data1UblockDevPtr = nullptr; EXPECT_CALL(mockSysDevMgr, GetDev) // currently, we don't have a good gtest matcher for DevName, hence I'm just simply chaining the expected result .WillOnce(Return(nvm1UblockDevPtr)) .WillOnce(Return(data1UblockDevPtr)); // When int actual = arrDevMgr.ImportByName(nameSet); // Then int expected = (int)POS_EVENT_ID::ARRAY_DEVICE_NOT_FOUND; ASSERT_EQ(expected, actual); arrDevMgr.Clear(); // to avoid the leakage of mocks delete nvm1UblockDevPtr.get(); } TEST(ArrayDeviceManager, ImportByName_testIfDataDeviceIsActuallyNVMDevice) { // Given MockDeviceManager mockSysDevMgr(nullptr); string mockArrayName = "mockArray"; ArrayDeviceManager arrDevMgr(&mockSysDevMgr, mockArrayName); DeviceSet<string> nameSet; string nvm1 = "mock-nvm1"; string data1 = "mock-data1", data2 = "mock-data2", data3 = "mock-data3"; string spare1 = "mock-spare1"; nameSet.nvm.push_back(nvm1); nameSet.data.push_back(data1); nameSet.data.push_back(data2); nameSet.data.push_back(data3); nameSet.spares.push_back(spare1); DevName nvm1Id(nvm1), data1Id(data1), data2Id(data2), data3Id(data3), spare1Id(spare1); auto nvm1UblockDevPtr = MockUblockDevice(nvm1.c_str(), DeviceType::NVRAM, 805830656); // minNvmSize when logicalChunkCount is 2 auto data1UblockDevPtr = MockUblockDevice(data1.c_str(), DeviceType::NVRAM, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); EXPECT_CALL(mockSysDevMgr, GetDev) // currently, we don't have a good gtest matcher for DevName, hence I'm just simply chaining the expected result .WillOnce(Return(nvm1UblockDevPtr)) .WillOnce(Return(data1UblockDevPtr)); // When int actual = arrDevMgr.ImportByName(nameSet); // Then int expected = (int)POS_EVENT_ID::ARRAY_DEVICE_TYPE_ERROR; ASSERT_EQ(expected, actual); arrDevMgr.Clear(); // to avoid the leakage of mocks delete nvm1UblockDevPtr.get(); } TEST(ArrayDeviceManager, ImportByName_testIfSpareDeviceHasNoUblock) { // Given MockDeviceManager mockSysDevMgr(nullptr); string mockArrayName = "mockArray"; ArrayDeviceManager arrDevMgr(&mockSysDevMgr, mockArrayName); DeviceSet<string> nameSet; string nvm1 = "mock-nvm1"; string data1 = "mock-data1", data2 = "mock-data2", data3 = "mock-data3"; string spare1 = "mock-spare1"; nameSet.nvm.push_back(nvm1); nameSet.data.push_back(data1); nameSet.data.push_back(data2); nameSet.data.push_back(data3); nameSet.spares.push_back(spare1); DevName nvm1Id(nvm1), data1Id(data1), data2Id(data2), data3Id(data3), spare1Id(spare1); auto nvm1UblockDevPtr = MockUblockDevice(nvm1.c_str(), DeviceType::NVRAM, 805830656); // minNvmSize when logicalChunkCount is 2 auto data1UblockDevPtr = MockUblockDevice(data1.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto data2UblockDevPtr = MockUblockDevice(data2.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto data3UblockDevPtr = MockUblockDevice(data3.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto spare1UblockDevPtr = nullptr; EXPECT_CALL(mockSysDevMgr, GetDev) // currently, we don't have a good gtest matcher for DevName, hence I'm just simply chaining the expected result .WillOnce(Return(nvm1UblockDevPtr)) .WillOnce(Return(data1UblockDevPtr)) .WillOnce(Return(data2UblockDevPtr)) .WillOnce(Return(data3UblockDevPtr)) .WillOnce(Return(spare1UblockDevPtr)); // When int actual = arrDevMgr.ImportByName(nameSet); // Then int expected = (int)POS_EVENT_ID::ARRAY_DEVICE_NOT_FOUND; ASSERT_EQ(expected, actual); arrDevMgr.Clear(); // to avoid the leakage of mocks delete nvm1UblockDevPtr.get(); delete data1UblockDevPtr.get(); delete data2UblockDevPtr.get(); delete data3UblockDevPtr.get(); } TEST(ArrayDeviceManager, ImportByName_testIfSpareDeviceIsActuallyNVMDevice) { // Given MockDeviceManager mockSysDevMgr(nullptr); string mockArrayName = "mockArray"; ArrayDeviceManager arrDevMgr(&mockSysDevMgr, mockArrayName); DeviceSet<string> nameSet; string nvm1 = "mock-nvm1"; string data1 = "mock-data1", data2 = "mock-data2", data3 = "mock-data3"; string spare1 = "mock-spare1"; nameSet.nvm.push_back(nvm1); nameSet.data.push_back(data1); nameSet.data.push_back(data2); nameSet.data.push_back(data3); nameSet.spares.push_back(spare1); DevName nvm1Id(nvm1), data1Id(data1), data2Id(data2), data3Id(data3), spare1Id(spare1); auto nvm1UblockDevPtr = MockUblockDevice(nvm1.c_str(), DeviceType::NVRAM, 805830656); // minNvmSize when logicalChunkCount is 2 auto data1UblockDevPtr = MockUblockDevice(data1.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto data2UblockDevPtr = MockUblockDevice(data2.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto data3UblockDevPtr = MockUblockDevice(data3.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto spare1UblockDevPtr = MockUblockDevice(spare1.c_str(), DeviceType::NVRAM, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); EXPECT_CALL(mockSysDevMgr, GetDev) // currently, we don't have a good gtest matcher for DevName, hence I'm just simply chaining the expected result .WillOnce(Return(nvm1UblockDevPtr)) .WillOnce(Return(data1UblockDevPtr)) .WillOnce(Return(data2UblockDevPtr)) .WillOnce(Return(data3UblockDevPtr)) .WillOnce(Return(spare1UblockDevPtr)); // When int actual = arrDevMgr.ImportByName(nameSet); // Then int expected = (int)POS_EVENT_ID::ARRAY_DEVICE_TYPE_ERROR; ASSERT_EQ(expected, actual); arrDevMgr.Clear(); // to avoid the leakage of mocks delete nvm1UblockDevPtr.get(); delete data1UblockDevPtr.get(); delete data2UblockDevPtr.get(); delete data3UblockDevPtr.get(); } TEST(ArrayDeviceManager, ImportByName_testIfNVMDeviceIsTooSmall) { // Given MockDeviceManager mockSysDevMgr(nullptr); string mockArrayName = "mockArray"; ArrayDeviceManager arrDevMgr(&mockSysDevMgr, mockArrayName); DeviceSet<string> nameSet; string nvm1 = "mock-nvm1"; string data1 = "mock-data1", data2 = "mock-data2", data3 = "mock-data3"; string spare1 = "mock-spare1"; nameSet.nvm.push_back(nvm1); nameSet.data.push_back(data1); nameSet.data.push_back(data2); nameSet.data.push_back(data3); nameSet.spares.push_back(spare1); DevName nvm1Id(nvm1), data1Id(data1), data2Id(data2), data3Id(data3), spare1Id(spare1); auto nvm1UblockDevPtr = MockUblockDevice(nvm1.c_str(), DeviceType::NVRAM, 0); // minNvmSize when logicalChunkCount is 2 auto data1UblockDevPtr = MockUblockDevice(data1.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto data2UblockDevPtr = MockUblockDevice(data2.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto data3UblockDevPtr = MockUblockDevice(data3.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto spare1UblockDevPtr = MockUblockDevice(spare1.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); EXPECT_CALL(mockSysDevMgr, GetDev) // currently, we don't have a good gtest matcher for DevName, hence I'm just simply chaining the expected result .WillOnce(Return(nvm1UblockDevPtr)) .WillOnce(Return(data1UblockDevPtr)) .WillOnce(Return(data2UblockDevPtr)) .WillOnce(Return(data3UblockDevPtr)) .WillOnce(Return(spare1UblockDevPtr)); // When int actual = arrDevMgr.ImportByName(nameSet); // Then int expected = (int)POS_EVENT_ID::ARRAY_NVM_CAPACITY_ERROR; ASSERT_EQ(expected, actual); arrDevMgr.Clear(); // to avoid the leakage of mocks delete nvm1UblockDevPtr.get(); delete data1UblockDevPtr.get(); delete data2UblockDevPtr.get(); delete data3UblockDevPtr.get(); delete spare1UblockDevPtr.get(); } TEST(ArrayDeviceManager, Import_testIfDeviceSetsAreSuccessfullyImportedWithMetaSetInformation) { // Used when loading array // Given MockDeviceManager mockSysDevMgr(nullptr); ArrayDeviceManager arrDevMgr(&mockSysDevMgr, "mockArrayName"); DeviceSet<string> nameSet; string nvm1 = "mock-nvm1"; string data1 = "mock-data1", data2 = "mock-data2", data3 = "mock-data3"; string spare1 = "mock-spare1"; nameSet.nvm.push_back(nvm1); nameSet.data.push_back(data1); nameSet.data.push_back(data2); nameSet.data.push_back(data3); nameSet.spares.push_back(spare1); DevName nvm1Id(nvm1), data1Id(data1), data2Id(data2), data3Id(data3), spare1Id(spare1); auto nvm1UblockDevPtr = MockUblockDevice(nvm1.c_str(), DeviceType::NVRAM, 805830656); // minNvmSize when logicalChunkCount is 2 auto data1UblockDevPtr = MockUblockDevice(data1.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto data2UblockDevPtr = MockUblockDevice(data2.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto data3UblockDevPtr = MockUblockDevice(data3.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto spare1UblockDevPtr = MockUblockDevice(spare1.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); EXPECT_CALL(mockSysDevMgr, GetDev) // currently, we don't have a good gtest matcher for DevName, hence I'm just simply chaining the expected result .WillOnce(Return(nvm1UblockDevPtr)) .WillOnce(Return(data1UblockDevPtr)) .WillOnce(Return(data2UblockDevPtr)) .WillOnce(Return(data3UblockDevPtr)) .WillOnce(Return(spare1UblockDevPtr)) .WillOnce(Return(nvm1UblockDevPtr)) .WillOnce(Return(data1UblockDevPtr)) .WillOnce(Return(data2UblockDevPtr)) .WillOnce(Return(data3UblockDevPtr)) .WillOnce(Return(spare1UblockDevPtr)); arrDevMgr.ImportByName(nameSet); ArrayMeta arrayMeta; arrayMeta.devs = arrDevMgr.ExportToMeta(); arrDevMgr.Clear(); // When int actual = arrDevMgr.Import(arrayMeta.devs); // Then ASSERT_EQ(0, actual); arrDevMgr.Clear(); // to avoid the leakage of mocks } TEST(ArrayDeviceManager, Import_testIfNVMDeviceHasNoUblockWithMetaSetInformation) { // Used when loading array // Given MockDeviceManager mockSysDevMgr(nullptr); string mockArrayName = "mockArray"; ArrayDeviceManager arrDevMgr(&mockSysDevMgr, mockArrayName); DeviceSet<string> nameSet; string nvm1 = "mock-nvm1"; string data1 = "mock-data1", data2 = "mock-data2", data3 = "mock-data3"; string spare1 = "mock-spare1"; nameSet.nvm.push_back(nvm1); nameSet.data.push_back(data1); nameSet.data.push_back(data2); nameSet.data.push_back(data3); nameSet.spares.push_back(spare1); DevName nvm1Id(nvm1), data1Id(data1), data2Id(data2), data3Id(data3), spare1Id(spare1); auto nvm1UblockDevPtr = MockUblockDevice(nvm1.c_str(), DeviceType::NVRAM, 805830656); // minNvmSize when logicalChunkCount is 2 auto data1UblockDevPtr = MockUblockDevice(data1.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto data2UblockDevPtr = MockUblockDevice(data2.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto data3UblockDevPtr = MockUblockDevice(data3.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto spare1UblockDevPtr = MockUblockDevice(spare1.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); EXPECT_CALL(mockSysDevMgr, GetDev) // currently, we don't have a good gtest matcher for DevName, hence I'm just simply chaining the expected result .WillOnce(Return(nvm1UblockDevPtr)) .WillOnce(Return(data1UblockDevPtr)) .WillOnce(Return(data2UblockDevPtr)) .WillOnce(Return(data3UblockDevPtr)) .WillOnce(Return(spare1UblockDevPtr)) .WillOnce(Return(nullptr)); arrDevMgr.ImportByName(nameSet); ArrayMeta arrayMeta; arrayMeta.devs = arrDevMgr.ExportToMeta(); arrDevMgr.Clear(); // When uint32_t missingCnt = 0; uint32_t brokenCnt = 0; int actual = arrDevMgr.Import(arrayMeta.devs); // Then int expected = (int)POS_EVENT_ID::ARRAY_DEVICE_NVM_NOT_FOUND; ASSERT_EQ(expected, actual); arrDevMgr.Clear(); // to avoid the leakage of mocks } TEST(ArrayDeviceManager, Import_testIfDataDeviceIsFaultState) { // Used when loading array // Given MockDeviceManager mockSysDevMgr(nullptr); string mockArrayName = "mockArray"; ArrayDeviceManager arrDevMgr(&mockSysDevMgr, mockArrayName); DeviceSet<string> nameSet; string nvm1 = "mock-nvm1"; string data1 = "mock-data1", data2 = "mock-data2", data3 = "mock-data3"; string spare1 = "mock-spare1"; nameSet.nvm.push_back(nvm1); nameSet.data.push_back(data1); nameSet.data.push_back(data2); nameSet.data.push_back(data3); nameSet.spares.push_back(spare1); DevName nvm1Id(nvm1), data1Id(data1), data2Id(data2), data3Id(data3), spare1Id(spare1); auto nvm1UblockDevPtr = MockUblockDevice(nvm1.c_str(), DeviceType::NVRAM, 805830656); // minNvmSize when logicalChunkCount is 2 auto data1UblockDevPtr = MockUblockDevice(data1.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto data2UblockDevPtr = MockUblockDevice(data2.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto data3UblockDevPtr = MockUblockDevice(data3.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto spare1UblockDevPtr = MockUblockDevice(spare1.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); EXPECT_CALL(mockSysDevMgr, GetDev) // currently, we don't have a good gtest matcher for DevName, hence I'm just simply chaining the expected result .WillOnce(Return(nvm1UblockDevPtr)) .WillOnce(Return(data1UblockDevPtr)) .WillOnce(Return(data2UblockDevPtr)) .WillOnce(Return(data3UblockDevPtr)) .WillOnce(Return(spare1UblockDevPtr)) .WillOnce(Return(nvm1UblockDevPtr)) .WillOnce(Return(data1UblockDevPtr)) .WillOnce(Return(data2UblockDevPtr)) .WillOnce(Return(data3UblockDevPtr)) .WillOnce(Return(spare1UblockDevPtr)); arrDevMgr.ImportByName(nameSet); ArrayMeta arrayMeta; arrayMeta.devs = arrDevMgr.ExportToMeta(); for (DeviceMeta meta : arrayMeta.devs.data) { meta.state = ArrayDeviceState::FAULT; } arrDevMgr.Clear(); // When int actual = arrDevMgr.Import(arrayMeta.devs); // Then ASSERT_EQ(0, actual); arrDevMgr.Clear(); // to avoid the leakage of mocks } TEST(ArrayDeviceManager, Import_testIfDataDeviceHasNoUblockWithMetaSetInformation) { // Used when loading array // Given MockDeviceManager mockSysDevMgr(nullptr); string mockArrayName = "mockArray"; ArrayDeviceManager arrDevMgr(&mockSysDevMgr, mockArrayName); DeviceSet<string> nameSet; string nvm1 = "mock-nvm1"; string data1 = "mock-data1", data2 = "mock-data2", data3 = "mock-data3"; string spare1 = "mock-spare1"; nameSet.nvm.push_back(nvm1); nameSet.data.push_back(data1); nameSet.data.push_back(data2); nameSet.data.push_back(data3); nameSet.spares.push_back(spare1); DevName nvm1Id(nvm1), data1Id(data1), data2Id(data2), data3Id(data3), spare1Id(spare1); auto nvm1UblockDevPtr = MockUblockDevice(nvm1.c_str(), DeviceType::NVRAM, 805830656); // minNvmSize when logicalChunkCount is 2 auto data1UblockDevPtr = MockUblockDevice(data1.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto data2UblockDevPtr = MockUblockDevice(data2.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto data3UblockDevPtr = MockUblockDevice(data3.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); auto spare1UblockDevPtr = MockUblockDevice(spare1.c_str(), DeviceType::SSD, ArrayConfig::MINIMUM_SSD_SIZE_BYTE); EXPECT_CALL(mockSysDevMgr, GetDev) // currently, we don't have a good gtest matcher for DevName, hence I'm just simply chaining the expected result .WillOnce(Return(nvm1UblockDevPtr)) .WillOnce(Return(data1UblockDevPtr)) .WillOnce(Return(data2UblockDevPtr)) .WillOnce(Return(data3UblockDevPtr)) .WillOnce(Return(spare1UblockDevPtr)) .WillOnce(Return(nvm1UblockDevPtr)) .WillOnce(Return(nullptr)) .WillOnce(Return(nullptr)) .WillOnce(Return(nullptr)) .WillOnce(Return(nullptr)); arrDevMgr.ImportByName(nameSet); ArrayMeta arrayMeta; arrayMeta.devs = arrDevMgr.ExportToMeta(); arrDevMgr.Clear(); // When int actual = arrDevMgr.Import(arrayMeta.devs); // Then ASSERT_EQ(0, actual); arrDevMgr.Clear(); // to avoid the leakage of mocks } TEST(ArrayDeviceManager, Export_testIfArrayDevMgrIsQueriedAgainst) { // Given MockDeviceManager mockSysDevMgr; ArrayDeviceManager adm(&mockSysDevMgr, "mockArrayName"); MockArrayDeviceList* mockArrayDeviceList = new MockArrayDeviceList; DeviceSet<ArrayDevice*> emptyDevSet; EXPECT_CALL(*mockArrayDeviceList, GetDevs).WillOnce(ReturnRef(emptyDevSet)); adm.SetArrayDeviceList(mockArrayDeviceList); // When adm.Export(); // Then: GetDevs is invoked once } TEST(ArrayDeviceManager, ExportToName_testIfEmptyDeviceSetIsReturned) { // Given ArrayDeviceManager adm(nullptr, "mockArrayName"); // When DeviceSet<string> actual = adm.ExportToName(); // Then ASSERT_EQ(0, actual.data.size()); ASSERT_EQ(0, actual.nvm.size()); ASSERT_EQ(0, actual.spares.size()); } TEST(ArrayDeviceManager, ExportToName_testIfArrayDevListIsQueriedAgainst) { // Given ArrayDeviceManager adm(nullptr, "mockArrayName"); MockArrayDeviceList* mockArrayDeviceList = new MockArrayDeviceList; adm.SetArrayDeviceList(mockArrayDeviceList); DeviceSet<string> emptyDevSet; EXPECT_CALL(*mockArrayDeviceList, ExportNames).WillOnce(Return(emptyDevSet)); // When adm.ExportToName(); // Then: GetDevs() should be called once } TEST(ArrayDeviceManager, ExportToMeta_testIfDeviceSetIsExtractedFromArrayDevList) { // Given ArrayDeviceManager adm(nullptr, "mockArrayName"); MockArrayDeviceList* mockArrayDeviceList = new MockArrayDeviceList; adm.SetArrayDeviceList(mockArrayDeviceList); DeviceSet<ArrayDevice*> deviceSet; string nvm1 = "mock-nvm", data1 = "mock-data1", spare1 = "mock-spare1"; string nvm1SN = "mock-nvm-sn", data1SN = "mock-data1-sn", spare1SN = "mock-spare1-sn"; auto nvm1UblockDevPtr = MockUblockDevice(nvm1.c_str(), nvm1SN); auto data1UblockDevPtr = MockUblockDevice(data1.c_str(), data1SN); auto spare1UblockDevPtr = MockUblockDevice(spare1.c_str(), spare1SN); ArrayDevice nvmDev(nvm1UblockDevPtr), dataDev(data1UblockDevPtr), spareDev(spare1UblockDevPtr); deviceSet.nvm.push_back(&nvmDev); deviceSet.data.push_back(&dataDev); deviceSet.spares.push_back(&spareDev); EXPECT_CALL(*mockArrayDeviceList, GetDevs).WillOnce(ReturnRef(deviceSet)); // When DeviceSet<DeviceMeta> actual = adm.ExportToMeta(); // Then ASSERT_EQ(1, actual.nvm.size()); ASSERT_EQ(nvm1SN, actual.nvm.at(0).uid); ASSERT_EQ(1, actual.data.size()); ASSERT_EQ(data1SN, actual.data.at(0).uid); ASSERT_EQ(1, actual.spares.size()); ASSERT_EQ(spare1SN, actual.spares.at(0).uid); } TEST(ArrayDeviceManager, Clear_testIfNullPtrIsHandled) { // Given ArrayDeviceManager adm(nullptr, "mockArrayName"); // When adm.Clear(); // Then } TEST(ArrayDeviceManager, AddSpare_testIfWrongDevnameIsHandled) { // Given MockDeviceManager mockSysDevMgr; ArrayDeviceManager arrDevMgr(&mockSysDevMgr, "mockArrayName"); string devName = "spare1"; EXPECT_CALL(mockSysDevMgr, GetDev).WillOnce(Return(nullptr)); // When int actual = arrDevMgr.AddSpare(devName); // Then ASSERT_EQ(EID(ARRAY_DEVICE_WRONG_NAME), actual); } TEST(ArrayDeviceManager, AddSpare_testIfAddingSpareAgainIsHandled) { // Given MockDeviceManager mockSysDevMgr; ArrayDeviceManager arrDevMgr(&mockSysDevMgr, "mockArrayName"); string devName = "spare1"; auto spare1 = MockUblockDevice(devName.c_str()); EXPECT_CALL(mockSysDevMgr, GetDev).WillOnce(Return(spare1)); EXPECT_CALL(*spare1.get(), GetClass).WillOnce(Return(DeviceClass::ARRAY)); // When int actual = arrDevMgr.AddSpare(devName); // Then ASSERT_EQ(EID(ARRAY_DEVICE_ALREADY_ADDED), actual); } TEST(ArrayDeviceManager, AddSpare_testIfNotAliveSpareIsHandled) { // Given MockDeviceManager mockSysDevMgr; ArrayDeviceManager arrDevMgr(&mockSysDevMgr, "mockArrayName"); string devName = "spare1"; auto spare1 = MockUblockDevice(devName.c_str()); EXPECT_CALL(mockSysDevMgr, GetDev).WillOnce(Return(spare1)); EXPECT_CALL(*spare1.get(), GetClass).WillOnce(Return(DeviceClass::SYSTEM)); EXPECT_CALL(*spare1.get(), IsAlive).WillOnce(Return(false)); // When int actual = arrDevMgr.AddSpare(devName); // Then ASSERT_EQ(-2, actual); } TEST(ArrayDeviceManager, AddSpare_testIfWrongCapacityIsHandled) { // Given MockDeviceManager mockSysDevMgr; ArrayDeviceManager arrDevMgr(&mockSysDevMgr, "mockArrayName"); string devName = "spare1"; int EXPECTED_DEV_SIZE = 121212; auto data1 = MockUblockDevice("data1"); auto spare1 = MockUblockDevice(devName.c_str()); ArrayDevice data1Dev(data1, ArrayDeviceState::NORMAL); DeviceSet<ArrayDevice*> deviceSet; deviceSet.data.push_back(&data1Dev); MockArrayDeviceList* mockArrayDeviceList = new MockArrayDeviceList; arrDevMgr.SetArrayDeviceList(mockArrayDeviceList); EXPECT_CALL(mockSysDevMgr, GetDev).WillOnce(Return(spare1)); EXPECT_CALL(*spare1.get(), GetClass).WillOnce(Return(DeviceClass::SYSTEM)); EXPECT_CALL(*spare1.get(), IsAlive).WillOnce(Return(true)); EXPECT_CALL(*spare1.get(), GetSize).WillOnce(Return(EXPECTED_DEV_SIZE + 1)); // intentionally passing in wrong capacity EXPECT_CALL(*data1.get(), GetSize).WillOnce(Return(EXPECTED_DEV_SIZE)); EXPECT_CALL(*mockArrayDeviceList, GetDevs).WillOnce(ReturnRef(deviceSet)); // When int actual = arrDevMgr.AddSpare(devName); // Then ASSERT_EQ(EID(ARRAY_SSD_SAME_CAPACITY_ERROR), actual); } TEST(ArrayDeviceManager, AddSpare_testIfSpareIsAddedToArrayDeviceList) { // Given MockDeviceManager mockSysDevMgr; ArrayDeviceManager arrDevMgr(&mockSysDevMgr, "mockArrayName"); string devName = "spare1"; int EXPECTED_DEV_SIZE = 121212; auto data1 = MockUblockDevice("data1"); auto spare1 = MockUblockDevice(devName.c_str()); ArrayDevice data1Dev(data1, ArrayDeviceState::NORMAL); DeviceSet<ArrayDevice*> deviceSet; deviceSet.data.push_back(&data1Dev); MockArrayDeviceList* mockArrayDeviceList = new MockArrayDeviceList; arrDevMgr.SetArrayDeviceList(mockArrayDeviceList); EXPECT_CALL(mockSysDevMgr, GetDev).WillOnce(Return(spare1)); EXPECT_CALL(*spare1.get(), GetClass).WillOnce(Return(DeviceClass::SYSTEM)); EXPECT_CALL(*spare1.get(), IsAlive).WillOnce(Return(true)); EXPECT_CALL(*spare1.get(), GetSize).WillOnce(Return(EXPECTED_DEV_SIZE)); EXPECT_CALL(*data1.get(), GetSize).WillOnce(Return(EXPECTED_DEV_SIZE)); EXPECT_CALL(*mockArrayDeviceList, GetDevs).WillOnce(ReturnRef(deviceSet)); EXPECT_CALL(*mockArrayDeviceList, AddSpare(_)).WillOnce([](ArrayDevice* dev) { delete dev; // to avoid leakage return 0; }); // When int actual = arrDevMgr.AddSpare(devName); // Then ASSERT_EQ(0, actual); } TEST(ArrayDeviceManager, RemoveSpare_testIfSpareDeviceRemovalFails) { // Given MockDeviceManager mockSysDevMgr; ArrayDeviceManager arrDevMgr(&mockSysDevMgr, "mockArrayName"); MockArrayDeviceList* mockArrayDeviceList = new MockArrayDeviceList; arrDevMgr.SetArrayDeviceList(mockArrayDeviceList); EXPECT_CALL(mockSysDevMgr, GetDev).WillOnce(Return(nullptr)); EXPECT_CALL(*mockArrayDeviceList, GetDevs).Times(0); EXPECT_CALL(*mockArrayDeviceList, RemoveSpare).Times(0); // When int actual = arrDevMgr.RemoveSpare("spare-that-doesn't-exist"); // Then ASSERT_EQ(EID(ARRAY_DEVICE_REMOVE_FAIL), actual); } TEST(ArrayDeviceManager, RemoveSpare_testIfSpareDeviceRemovalIsSuccessful) { // Given MockDeviceManager mockSysDevMgr; ArrayDeviceManager arrDevMgr(&mockSysDevMgr, "mockArrayName"); auto spare1 = MockUblockDevice("spare1"); ArrayDevice spare1Dev(spare1, ArrayDeviceState::NORMAL); DeviceSet<ArrayDevice*> deviceSet; deviceSet.spares.push_back(&spare1Dev); MockArrayDeviceList* mockArrayDeviceList = new MockArrayDeviceList; arrDevMgr.SetArrayDeviceList(mockArrayDeviceList); EXPECT_CALL(mockSysDevMgr, GetDev).WillOnce(Return(spare1)); EXPECT_CALL(*mockArrayDeviceList, GetDevs).WillOnce(ReturnRef(deviceSet)); EXPECT_CALL(*mockArrayDeviceList, RemoveSpare).WillOnce(Return(0)); // When int actual = arrDevMgr.RemoveSpare("spare1"); // Then ASSERT_EQ(0, actual); } TEST(ArrayDeviceManager, RemoveSpare_testWithPassingArrayDevice) { // Given MockDeviceManager mockSysDevMgr(nullptr); string mockArrayName = "mockArray"; ArrayDeviceManager arrDevMgr(&mockSysDevMgr, mockArrayName); auto spare1 = MockUblockDevice("spare1"); ArrayDevice spare1Dev(spare1, ArrayDeviceState::NORMAL); DeviceSet<ArrayDevice*> deviceSet; deviceSet.spares.push_back(&spare1Dev); MockArrayDeviceList* mockArrayDeviceList = new MockArrayDeviceList; arrDevMgr.SetArrayDeviceList(mockArrayDeviceList); EXPECT_CALL(*mockArrayDeviceList, RemoveSpare).WillOnce(Return(0)); // When int actual = arrDevMgr.RemoveSpare(&spare1Dev); // Then ASSERT_EQ(0, actual); } TEST(ArrayDeviceManager, ReplaceWithSpare_testIfArrayDeviceListIsQueriedAgainst) { // Given MockArrayDeviceList* mockArrayDeviceList = new MockArrayDeviceList; ArrayDeviceManager arrDevMgr(nullptr, "mockArrayName"); arrDevMgr.SetArrayDeviceList(mockArrayDeviceList); int REPLACE_SUCCESS = 0; EXPECT_CALL(*mockArrayDeviceList, SpareToData).WillOnce(Return(REPLACE_SUCCESS)); // When int actual = arrDevMgr.ReplaceWithSpare(nullptr); // Then ASSERT_EQ(REPLACE_SUCCESS, actual); } TEST(ArrayDeviceManager, GetDev_testIfNullPtrIsHandled) { // Given ArrayDeviceManager arrDevMgr(nullptr, "mockArrayName"); ArrayDevice* arrDev; ArrayDeviceType arrDevType; UblockSharedPtr uBlock = nullptr; // When std::tie(arrDev, arrDevType) = arrDevMgr.GetDev(uBlock); // Then ASSERT_EQ(nullptr, arrDev); ASSERT_EQ(ArrayDeviceType::NONE, arrDevType); } TEST(ArrayDeviceManager, GetDev_testIfGetDevNVMIsHandled) { // Given ArrayDeviceManager arrDevMgr(nullptr, "mockArrayName"); MockArrayDeviceList* mockArrayDeviceList = new MockArrayDeviceList; arrDevMgr.SetArrayDeviceList(mockArrayDeviceList); DeviceSet<ArrayDevice*> deviceSet; auto nvmUBlockDev = MockUblockDevice("mock-nvm"); ArrayDevice nvmDev(nvmUBlockDev); deviceSet.nvm.push_back(&nvmDev); EXPECT_CALL(*mockArrayDeviceList, GetDevs).WillOnce(ReturnRef(deviceSet)); ArrayDevice* arrDev; ArrayDeviceType arrDevType; // When std::tie(arrDev, arrDevType) = arrDevMgr.GetDev(nvmUBlockDev); // Then ASSERT_EQ(&nvmDev, arrDev); ASSERT_EQ(ArrayDeviceType::NVM, arrDevType); } TEST(ArrayDeviceManager, GetDev_testIfGetDevDATAIsHandled) { // Given ArrayDeviceManager arrDevMgr(nullptr, "mockArrayName"); MockArrayDeviceList* mockArrayDeviceList = new MockArrayDeviceList; arrDevMgr.SetArrayDeviceList(mockArrayDeviceList); DeviceSet<ArrayDevice*> deviceSet; auto dataUBlockDev = MockUblockDevice("mock-data"); ArrayDevice dataDev(dataUBlockDev); deviceSet.data.push_back(&dataDev); EXPECT_CALL(*mockArrayDeviceList, GetDevs).WillOnce(ReturnRef(deviceSet)); ArrayDevice* arrDev; ArrayDeviceType arrDevType; // When std::tie(arrDev, arrDevType) = arrDevMgr.GetDev(dataUBlockDev); // Then ASSERT_EQ(&dataDev, arrDev); ASSERT_EQ(ArrayDeviceType::DATA, arrDevType); } TEST(ArrayDeviceManager, GetDev_testIfGetDevSPAREIsHandled) { // Given ArrayDeviceManager arrDevMgr(nullptr, "mockArrayName"); MockArrayDeviceList* mockArrayDeviceList = new MockArrayDeviceList; arrDevMgr.SetArrayDeviceList(mockArrayDeviceList); DeviceSet<ArrayDevice*> deviceSet; auto spareUBlockDev = MockUblockDevice("mock-spare"); ArrayDevice spareDev(spareUBlockDev); deviceSet.spares.push_back(&spareDev); EXPECT_CALL(*mockArrayDeviceList, GetDevs).WillOnce(ReturnRef(deviceSet)); ArrayDevice* arrDev; ArrayDeviceType arrDevType; // When std::tie(arrDev, arrDevType) = arrDevMgr.GetDev(spareUBlockDev); // Then ASSERT_EQ(&spareDev, arrDev); ASSERT_EQ(ArrayDeviceType::SPARE, arrDevType); } TEST(ArrayDeviceManager, GetDev_testIfGetDevFailedMatchIsHandled) { // Given ArrayDeviceManager arrDevMgr(nullptr, "mockArrayName"); MockArrayDeviceList* mockArrayDeviceList = new MockArrayDeviceList; arrDevMgr.SetArrayDeviceList(mockArrayDeviceList); auto missingUBlockDev = MockUblockDevice("mock-data-missing"); DeviceSet<ArrayDevice*> deviceSet; EXPECT_CALL(*mockArrayDeviceList, GetDevs).WillOnce(ReturnRef(deviceSet)); ArrayDevice* arrDev; ArrayDeviceType arrDevType; // When std::tie(arrDev, arrDevType) = arrDevMgr.GetDev(missingUBlockDev); // Then ASSERT_EQ(nullptr, arrDev); ASSERT_EQ(ArrayDeviceType::NONE, arrDevType); } TEST(ArrayDeviceManager, GetDev_testIfGetDevDATAIsHandledWithDeviceSerialNumber) { // Given MockDeviceManager* mockSysDevMgr = new MockDeviceManager(); ArrayDeviceManager arrDevMgr(mockSysDevMgr, "mockArrayName"); MockArrayDeviceList* mockArrayDeviceList = new MockArrayDeviceList; arrDevMgr.SetArrayDeviceList(mockArrayDeviceList); DeviceSet<ArrayDevice*> deviceSet; auto dataUBlockDev = MockUblockDevice("mock-data"); ArrayDevice dataDev(dataUBlockDev); deviceSet.data.push_back(&dataDev); EXPECT_CALL(*mockArrayDeviceList, GetDevs).WillOnce(ReturnRef(deviceSet)); ArrayDevice* arrDev; ArrayDeviceType arrDevType; EXPECT_CALL(*mockSysDevMgr, GetDev).WillOnce(Return(dataUBlockDev)); // When std::tie(arrDev, arrDevType) = arrDevMgr.GetDev("mock-data-sn"); // Then ASSERT_EQ(&dataDev, arrDev); ASSERT_EQ(ArrayDeviceType::DATA, arrDevType); delete mockSysDevMgr; } TEST(ArrayDeviceManager, GetFaulty_testIfFaultyArrayDeviceIsReturned) { // Given ArrayDeviceManager arrDevMgr(nullptr, "mockArrayName"); MockArrayDeviceList* mockArrayDeviceList = new MockArrayDeviceList; arrDevMgr.SetArrayDeviceList(mockArrayDeviceList); DeviceSet<ArrayDevice*> deviceSet; auto nonFaultyUBlockDev = MockUblockDevice("mock-data-nonfaulty"); auto faultyUBlockDev = MockUblockDevice("mock-data-faulty"); ArrayDevice dataNonFaulty(nonFaultyUBlockDev, ArrayDeviceState::NORMAL); ArrayDevice dataFaulty(faultyUBlockDev, ArrayDeviceState::FAULT); deviceSet.data.push_back(&dataNonFaulty); deviceSet.data.push_back(&dataFaulty); EXPECT_CALL(*mockArrayDeviceList, GetDevs).WillOnce(ReturnRef(deviceSet)); // When ArrayDevice* actual = arrDevMgr.GetFaulty(); // Then ASSERT_TRUE(actual != nullptr); ASSERT_EQ(&dataFaulty, actual); } TEST(ArrayDeviceManager, GetFaulty_testIfNullptrIsReturnedWhenThereIsNoFaultyDevice) { // Given ArrayDeviceManager arrDevMgr(nullptr, "mockArrayName"); MockArrayDeviceList* mockArrayDeviceList = new MockArrayDeviceList; arrDevMgr.SetArrayDeviceList(mockArrayDeviceList); DeviceSet<ArrayDevice*> deviceSet; auto nonFaultyUBlockDev1 = MockUblockDevice("mock-data-nonfaulty1"); auto nonFaultyUBlockDev2 = MockUblockDevice("mock-data-nonfaulty2"); ArrayDevice dataNonFaulty1(nonFaultyUBlockDev1, ArrayDeviceState::NORMAL); ArrayDevice dataNonFaulty2(nonFaultyUBlockDev2, ArrayDeviceState::NORMAL); deviceSet.data.push_back(&dataNonFaulty1); deviceSet.data.push_back(&dataNonFaulty2); EXPECT_CALL(*mockArrayDeviceList, GetDevs).WillOnce(ReturnRef(deviceSet)); // When ArrayDevice* actual = arrDevMgr.GetFaulty(); // Then ASSERT_EQ(nullptr, actual); } TEST(ArrayDeviceManager, GetRebuilding_testIfRebuildDeviceIsReturned) { // Given ArrayDeviceManager arrDevMgr(nullptr, "mockArrayName"); MockArrayDeviceList* mockArrayDeviceList = new MockArrayDeviceList; arrDevMgr.SetArrayDeviceList(mockArrayDeviceList); DeviceSet<ArrayDevice*> deviceSet; auto normalUBlockDev = MockUblockDevice("mock-data-normal"); auto rebuildUBlockDev = MockUblockDevice("mock-data-rebuild"); ArrayDevice normalDev(normalUBlockDev, ArrayDeviceState::NORMAL); ArrayDevice rebuildDev(rebuildUBlockDev, ArrayDeviceState::REBUILD); deviceSet.data.push_back(&normalDev); deviceSet.data.push_back(&rebuildDev); EXPECT_CALL(*mockArrayDeviceList, GetDevs).WillOnce(ReturnRef(deviceSet)); // When ArrayDevice* actual = arrDevMgr.GetRebuilding(); // Then ASSERT_EQ(&rebuildDev, actual); } TEST(ArrayDeviceManager, GetRebuilding_testIfRebuildDeviceIsNotRebuildState) { // Given string mockArrayName = "mockArray"; ArrayDeviceManager arrDevMgr(nullptr, mockArrayName); MockArrayDeviceList* mockArrayDeviceList = new MockArrayDeviceList; arrDevMgr.SetArrayDeviceList(mockArrayDeviceList); DeviceSet<ArrayDevice*> deviceSet; auto normalUBlockDev = MockUblockDevice("mock-data-normal"); auto rebuildUBlockDev = MockUblockDevice("mock-data-rebuild"); ArrayDevice normalDev(normalUBlockDev, ArrayDeviceState::NORMAL); ArrayDevice rebuildDev(rebuildUBlockDev, ArrayDeviceState::NORMAL); deviceSet.data.push_back(&normalDev); deviceSet.data.push_back(&rebuildDev); EXPECT_CALL(*mockArrayDeviceList, GetDevs).WillOnce(ReturnRef(deviceSet)); // When ArrayDevice* actual = arrDevMgr.GetRebuilding(); // Then ASSERT_EQ(nullptr, actual); } } // namespace pos
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/extensions/api/serial/serial_connection.h" #include <sys/ioctl.h> #include <termios.h> namespace extensions { namespace { int getBaudRate(int bitrate_) { switch (bitrate_) { case 0: return B0; case 50: return B50; case 75: return B75; case 110: return B110; case 134: return B134; case 150: return B150; case 200: return B200; case 300: return B300; case 600: return B600; case 1200: return B1200; case 1800: return B1800; case 2400: return B2400; case 4800: return B4800; case 9600: return B9600; case 19200: return B19200; case 38400: return B38400; #if defined(OS_POSIX) && !defined(OS_MACOSX) case 57600: return B57600; case 115200: return B115200; case 230400: return B230400; case 460800: return B460800; case 576000: return B576000; case 921600: return B921600; default: return B9600; #else // MACOSX doesn't define constants bigger than 38400. // So if it is MACOSX and the value doesn't fit any of the defined constants // It will setup the bitrate with 'bitrate_' (just forwarding the value) default: return bitrate_; #endif } } } // namespace bool SerialConnection::PostOpen() { struct termios options; // Start with existing options and modify. tcgetattr(file_, &options); // Bitrate (sometimes erroneously referred to as baud rate). if (bitrate_ >= 0) { int bitrate_opt_ = getBaudRate(bitrate_); cfsetispeed(&options, bitrate_opt_); cfsetospeed(&options, bitrate_opt_); } options.c_cflag &= ~CSIZE; switch (databit_) { case serial::DATA_BIT_SEVENBIT: options.c_cflag |= CS7; break; case serial::DATA_BIT_EIGHTBIT: default: options.c_cflag |= CS8; break; } switch (stopbit_) { case serial::STOP_BIT_TWOSTOPBIT: options.c_cflag |= CSTOPB; break; case serial::STOP_BIT_ONESTOPBIT: default: options.c_cflag &= ~CSTOPB; break; } switch (parity_) { case serial::PARITY_BIT_EVENPARITY: options.c_cflag |= PARENB; options.c_cflag &= ~PARODD; break; case serial::PARITY_BIT_ODDPARITY: options.c_cflag |= (PARENB | PARODD); break; case serial::PARITY_BIT_NOPARITY: default: options.c_cflag &= ~(PARENB | PARODD); break; } // Set flags for 'raw' operation // At least on Linux the flags are persistent and thus we cannot trust // the default values. options.c_lflag &= ~(ICANON | ECHO | ECHOE | ECHONL | ISIG); options.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON); options.c_oflag &= ~OPOST; // Enable receiver and set local mode // See http://www.easysw.com/~mike/serial/serial.html to understand. options.c_cflag |= (CLOCAL | CREAD); // Write the options. tcsetattr(file_, TCSANOW, &options); return true; } bool SerialConnection::GetControlSignals(ControlSignals &control_signals) { int status; if (ioctl(file_, TIOCMGET, &status) == 0) { control_signals.dcd = (status & TIOCM_CAR) != 0; control_signals.cts = (status & TIOCM_CTS) != 0; return true; } return false; } bool SerialConnection::SetControlSignals( const ControlSignals &control_signals) { int status; if (ioctl(file_, TIOCMGET, &status) != 0) return false; if (control_signals.should_set_dtr) { if (control_signals.dtr) status |= TIOCM_DTR; else status &= ~TIOCM_DTR; } if (control_signals.should_set_rts) { if (control_signals.rts) status |= TIOCM_RTS; else status &= ~TIOCM_RTS; } return ioctl(file_, TIOCMSET, &status) == 0; } std::string SerialConnection::MaybeFixUpPortName( const std::string &port_name) { return port_name; } } // namespace extensions
// Splitting policy used - split if required required size is less than the size of block allocated using "First-Fit" or "Best-Fit" // coalescing policy used - coalesce each time free is called #include<bits/stdc++.h> using namespace std; struct block { int size, addr; bool allocBit; string strData; struct block* prev, *next; }*head,*tail; void firstFit(string data, int ch); // Function for First-fit algorithm void bestFit(string data, int ch); // Function for Best-fit algorithm void _free(int addr); // Function for freeing a block void displImplicitList(); // Function for displaying implicit list (allocated + free) void displAllocatedList(); // Function for displaying allocated list void displFreeList(); // Function for displaying free list int coalesceFit(int reqBlocks); // Function for coalescing blocks to allocate space using First-fit or Best-fit void coalesceFree(struct block* temp); // Function for coalescing free blocks adjacent to the block being freed int main() { //creation of 10 blocks(Initially size of each block is 4 Bytes) struct block* temp; for(int i=0; i<40; i++) { struct block * temp1 = new block(); temp1->size = 1; temp1->addr = i+1; temp1->allocBit = 0; if(i==0) { temp1->prev = NULL; temp1->next = NULL; head = temp1; temp = temp1; } else { temp->next = temp1; temp1->prev = temp; temp1->next = NULL; temp = temp->next; } } tail = temp; // Creating and Displaying simulation of initial implicit list temp = head; cout<<"\n---------------Initial heap---------------\n\n"; while(temp!=NULL) { temp->size = 4; temp->next->next->next->size = 4; cout<<"\t"<<temp->addr<<" [4/0][ ][ ][4/0]\n"; temp = temp->next->next->next->next; } cout<<"\n------------------------------------------\n"; //Menu int choice; while(1) { cout<<"\nPress 1 for malloc using First-Fit"; cout<<"\nPress 2 for malloc using Best-Fit"; cout<<"\nPress 3 to free"; cout<<"\nPress 4 to display implicit list(allocated + free)"; cout<<"\nPress 5 to display allocated list"; cout<<"\nPress 6 to display free list"; cout<<"\nPress 0 to exit"; cout<<"\nPlease enter your choice: "; cin>>choice; if(choice == 0) // Exiting the menu break; switch (choice) { case 1: { int ch; string data; cout<<"\nPress 1 to enter a short value"; cout<<"\nPress 2 to enter an integer value"; cout<<"\nPress 3 to enter a double value"; cout<<"\nPress 4 to enter a float value"; cout<<"\nEnter your choice: "; cin>>ch; if(ch == 1) cout<<"\nEnter a short value : "; else if(ch == 2) cout<<"\nEnter an integer value : "; else if(ch ==3) cout<<"\nEnter a double value : "; else if(ch==4) cout<<"\nEnter a float value : "; else { cout<<"\n-----------------------------------------------------------\n"; cout<<"\nYou have made a wrong choice!!! Please enter a valid choice\n"; cout<<"\n-----------------------------------------------------------\n"; break; } cin>>data; firstFit(data,ch); break; } case 2: { int ch; string data; cout<<"\nPress 1 to enter a short value"; cout<<"\nPress 2 to enter an integer value"; cout<<"\nPress 3 to enter a double value"; cout<<"\nPress 4 to enter a float value"; cout<<"\nEnter your choice: "; cin>>ch; if(ch == 1) cout<<"\nEnter a short value : "; else if(ch == 2) cout<<"\nEnter an integer value : "; else if(ch == 3) cout<<"\nEnter a double value : "; else if(ch == 4) cout<<"\nEnter a float value : "; else { cout<<"\n-----------------------------------------------------------\n"; cout<<"\nYou have made a wrong choice!!! Please enter a valid choice\n"; cout<<"\n-----------------------------------------------------------\n"; break; } cin>>data; bestFit(data, ch); break; } case 3: { int addr; cout<<"\nEnter the starting address of the data to be freed: "; cin>>addr; _free(addr); break; } case 4: { displImplicitList(); break; } case 5: { displAllocatedList(); break; } case 6: { displFreeList(); break; } default: { cout<<"\n-----------------------------------------------------------\n"; cout<<"\nYou have made a wrong choice!!! Please enter a valid choice\n"; cout<<"\n-----------------------------------------------------------\n"; break; } } } } void firstFit(string data, int ch) { int reqBlocks; if(ch == 1) reqBlocks = 2; // 2 Bytes for short else if(ch == 2) reqBlocks = 4; // 4 Bytes for integer else if(ch ==3) reqBlocks = 8; // 8 Bytes for double else if(ch == 4) reqBlocks = 16; // 16 Bytes for float struct block *temp = head; int found = 0; while(temp!=NULL) // Searching first suitable block having size >= required size { if(temp->allocBit == 0) { if((temp->size)-2 >= reqBlocks) { found = 1; break; } } int i = temp->size; while(i>=1) { temp = temp->next; i--; } } if(found == 1) // if suitable block is found { struct block *temp1 = temp; int remaining = temp->size - 2 - reqBlocks; int i = temp->size; while(i>1) { temp1 = temp1->next; i--; } temp->allocBit = 1; temp->strData = data; temp->size = reqBlocks + 2; temp1->size = temp1->size - temp->size; i = temp->size; while(i>1) { temp = temp->next; i--; } temp->size = reqBlocks + 2; temp->allocBit = 1; if(temp->next != NULL && remaining != 0) // split, if available block size > required size { temp = temp->next; temp->size = temp1->size; if(temp1->size==2) { temp1->allocBit = 1; temp->allocBit = 1; } else { temp1->allocBit = 0; temp->allocBit = 0; } } } else // suitable block is not found { int availability = coalesceFit(reqBlocks); // Coalescing the free blocks to achieve suitable block of size >= required size if(availability == 1) { firstFit(data, ch); } else // sufficient space is not available to allocate the memory for required size even after coalescing free blocks { cout<<"\n-------------------------------------------------------------\n"; cout<<"\nSorry, the memory for the data "<<data<<" cannot be allocated\n"; cout<<"\n-------------------------------------------------------------\n"; } } } void bestFit(string data, int ch) { int reqBlocks; if(ch == 1) reqBlocks = 2; // 2 Bytes for short else if(ch == 2) reqBlocks = 4; // 4 Bytes for integer else if(ch ==3) reqBlocks = 8; // 8 Bytes for double else if(ch == 4) reqBlocks = 16; // 16 Bytes for float struct block *temp, *temp1; temp = head; int found = 0; int min = 38; while(temp != NULL) // Searching the whole list for best-fit { if(temp->allocBit == 0) { if(temp->size - 2 >= reqBlocks && temp->size - 2 <min) { min = temp->size -2; temp1 = temp; found = 1; } } int i = temp->size; while(i>=1) { temp = temp->next; i--; } } if(found == 1) // if the best-fit is found { struct block * temp2 = temp1; int remaining = temp1->size - 2 - reqBlocks; int i = temp1->size; while(i>1) { temp2 = temp2->next; i--; } temp1->allocBit = 1; temp1->strData = data; temp1->size = reqBlocks + 2; temp2->size = temp2->size - temp1->size; i = temp1->size; while(i>1) { temp1 = temp1->next; i--; } temp1->size = reqBlocks + 2; temp1->allocBit = 1; if(temp1->next != NULL && remaining != 0) // split, if available block size > required size { temp1 = temp1->next; temp1->size = temp2->size; if(temp2->size==2) { temp2->allocBit = 1; temp1->allocBit = 1; } else { temp2->allocBit = 0; temp1->allocBit = 0; } } } else // suitable block is not found { int availability = coalesceFit(reqBlocks); // Coalesce free blocks to achieve suitable block of size >= required size if(availability == 1) bestFit(data, ch); else // sufficient space is not available to allocate the memory for required size even after coalescing of free blocks { cout<<"\n-------------------------------------------------------------\n"; cout<<"\nSorry, the memory for the data "<<data<<" cannot be allocated\n"; cout<<"\n-------------------------------------------------------------\n"; } } } void _free(int addr) { struct block *temp = head; int flag = 0; while(temp != NULL) // finding whether "addr" is starting address of the block or not { if(temp->addr == addr) { flag = 1; break; } if(temp->addr > addr) break; int i = temp->size; while(i>=1) { temp = temp->next; i--; } } if(flag == 1) // if "addr" is the starting address of the block of memory then free that block and coalesce with adjacent free blocks { if(temp->size == 2) { if((temp->prev != NULL && temp->prev->allocBit == 0) || (temp->next->next != NULL && temp->next->next->allocBit == 0)) { coalesceFree(temp); } } else if(temp->allocBit != 0) coalesceFree(temp); } else { cout<<"\n--------------------------------------------------------------\n"; cout<<"\nPlease enter a valid starting address of block to be freed !!!\n"; cout<<"\n--------------------------------------------------------------\n"; } } void displImplicitList() { cout<<"\n---------------Displaying implicit list---------------\n\n"; struct block * temp = head; while(temp!=NULL) // Traversing and printing list with starting address, header, footer and data content { if(temp->allocBit == 1) { if(temp->size == 2) { cout<<"\t"<<temp->addr<<" ["<<temp->size<<"/"<<temp->allocBit<<"]"<<"["<<temp->size<<"/"<<temp->allocBit<<"]\n"; } else { cout<<"\t"<<temp->addr<<" ["<<temp->size<<"/"<<temp->allocBit<<"]"; cout<<"[ "<<temp->strData<<" ]"; cout<<"["<<temp->size<<"/"<<temp->allocBit<<"]\n"; } } else { cout<<"\t"<<temp->addr<<" ["<<temp->size<<"/"<<temp->allocBit<<"]"; int i = temp->size; for(int j=1; j<=i-2; j++) cout<<"[ ]"; cout<<"["<<temp->size<<"/"<<temp->allocBit<<"]\n"; } int i = temp->size; while(i>=1) { temp = temp->next; i--; } } cout<<"\n------------------------------------------------------\n"; } void displAllocatedList() { cout<<"\n---------------Displaying allocated list---------------\n\n"; struct block * temp = head; while(temp!=NULL) // Traversing and printing only allocated blocks in the implicit list { if(temp->allocBit == 1) { if(temp->size == 2) { cout<<"\t"<<temp->addr<<" ["<<temp->size<<"/"<<temp->allocBit<<"]"<<"["<<temp->size<<"/"<<temp->allocBit<<"]\n"; } else { cout<<"\t"<<temp->addr<<" ["<<temp->size<<"/"<<temp->allocBit<<"]"; cout<<"[ "<<temp->strData<<" ]"; cout<<"["<<temp->size<<"/"<<temp->allocBit<<"]\n"; } } int i = temp->size; while(i>=1) { temp = temp->next; i--; } } cout<<"\n-------------------------------------------------------\n"; } void displFreeList() { cout<<"\n---------------Displaying free list---------------\n\n"; struct block * temp = head; while(temp != NULL) // Traversing and printing the free blocks available in the implicit list { if(temp->allocBit == 0) { cout<<"\t"<<temp->addr<<" ["<<temp->size<<"/"<<temp->allocBit<<"]"; int i = temp->size; for(int j=1; j<=i-2; j++) cout<<"[ ]"; cout<<"["<<temp->size<<"/"<<temp->allocBit<<"]\n"; } int i = temp->size; while(i>=1) { temp = temp->next; i--; } } cout<<"\n--------------------------------------------------\n"; } int coalesceFit(int reqBlocks) { struct block * temp, *temp1; temp = head; int blockNum = 0, flag = 0, i; while(temp != NULL) // Traversing through the list to find whether coalescing of free blocks will fulfill the demand of required space or not { temp1 = temp; while(temp!=NULL && (temp->allocBit == 0 || temp->size ==2)) { blockNum = blockNum + temp->size; if(blockNum-2 >= reqBlocks) { flag = 1; break; } i = temp->size; while(i>=1) { temp = temp->next; i--; } } if(flag == 1) break; blockNum = 0; if(temp!=NULL) { i = temp->size; while(i>=1) { temp = temp->next; i--; } } } if(temp!=NULL) // flag = 1, Coalescing of free blocks fulfill the demand of required space { i = temp->size; while(i>1) { temp = temp->next; i--; } temp1->size = blockNum; temp1->allocBit = 0; temp->size = blockNum; temp->allocBit = 0; i = temp1->size -1; while(i>1) { temp1 = temp1->next; temp1->size = 1; temp1->allocBit = 0; i--; } return 1; } else // Coalescing of free blocks does not fulfill the demand of required space { return 0; } } void coalesceFree(struct block *temp) { struct block * temp1, *temp2; temp1 = temp; temp2 = temp1; int i = temp1->size; while(i>1) // Moving to the footer of block to be freed { temp2 = temp2->next; i--; } temp1->allocBit = 0; temp2->allocBit = 0; if(temp1->prev != NULL) // coalesce with next adjacent free block { if(temp1->prev->allocBit == 0 || temp1->prev->size == 2) { int block = temp1->size; temp1->prev->allocBit = 0; int i = temp1->prev->size; while(i>=1) { temp1 = temp1->prev; i--; } temp1->allocBit = 0; temp1->size = temp1->size + block; temp2->size = temp1->size; } } if(temp2->next !=NULL) // Coalesce with previous adjacent free block { if(temp2->next->allocBit == 0 || temp2->next->size ==2) { int block = temp2->size; temp2->next->allocBit = 0; int i = temp2->next->size; while(i>=1) { temp2 = temp2->next; i--; } temp2->allocBit = 0; temp2->size = temp2->size + block; temp1->size = temp2->size; } } }
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_METRIC_BINARY_METRIC_HPP_ #define LIGHTGBM_METRIC_BINARY_METRIC_HPP_ #include <LightGBM/metric.h> #include <LightGBM/utils/common.h> #include <LightGBM/utils/log.h> #include <string> #include <algorithm> #include <sstream> #include <vector> namespace LightGBM { /*! * \brief Metric for binary classification task. * Use static class "PointWiseLossCalculator" to calculate loss point-wise */ template<typename PointWiseLossCalculator> class BinaryMetric: public Metric { public: explicit BinaryMetric(const Config&) { } virtual ~BinaryMetric() { } void Init(const Metadata& metadata, data_size_t num_data) override { name_.emplace_back(PointWiseLossCalculator::Name()); num_data_ = num_data; // get label label_ = metadata.label(); // get weights weights_ = metadata.weights(); if (weights_ == nullptr) { sum_weights_ = static_cast<double>(num_data_); } else { sum_weights_ = 0.0f; for (data_size_t i = 0; i < num_data; ++i) { sum_weights_ += weights_[i]; } } } const std::vector<std::string>& GetName() const override { return name_; } double factor_to_bigger_better() const override { return -1.0f; } std::vector<double> Eval(const double* score, const ObjectiveFunction* objective) const override { double sum_loss = 0.0f; if (objective == nullptr) { if (weights_ == nullptr) { #pragma omp parallel for schedule(static) reduction(+:sum_loss) for (data_size_t i = 0; i < num_data_; ++i) { // add loss sum_loss += PointWiseLossCalculator::LossOnPoint(label_[i], score[i]); } } else { #pragma omp parallel for schedule(static) reduction(+:sum_loss) for (data_size_t i = 0; i < num_data_; ++i) { // add loss sum_loss += PointWiseLossCalculator::LossOnPoint(label_[i], score[i]) * weights_[i]; } } } else { if (weights_ == nullptr) { #pragma omp parallel for schedule(static) reduction(+:sum_loss) for (data_size_t i = 0; i < num_data_; ++i) { double prob = 0; objective->ConvertOutput(&score[i], &prob); // add loss sum_loss += PointWiseLossCalculator::LossOnPoint(label_[i], prob); } } else { #pragma omp parallel for schedule(static) reduction(+:sum_loss) for (data_size_t i = 0; i < num_data_; ++i) { double prob = 0; objective->ConvertOutput(&score[i], &prob); // add loss sum_loss += PointWiseLossCalculator::LossOnPoint(label_[i], prob) * weights_[i]; } } } double loss = sum_loss / sum_weights_; return std::vector<double>(1, loss); } private: /*! \brief Number of data */ data_size_t num_data_; /*! \brief Pointer of label */ const label_t* label_; /*! \brief Pointer of weighs */ const label_t* weights_; /*! \brief Sum weights */ double sum_weights_; /*! \brief Name of test set */ std::vector<std::string> name_; }; /*! * \brief Log loss metric for binary classification task. */ class BinaryLoglossMetric: public BinaryMetric<BinaryLoglossMetric> { public: explicit BinaryLoglossMetric(const Config& config) :BinaryMetric<BinaryLoglossMetric>(config) {} inline static double LossOnPoint(label_t label, double prob) { if (label <= 0) { if (1.0f - prob > kEpsilon) { return -std::log(1.0f - prob); } } else { if (prob > kEpsilon) { return -std::log(prob); } } return -std::log(kEpsilon); } inline static const char* Name() { return "binary_logloss"; } }; /*! * \brief Error rate metric for binary classification task. */ class BinaryErrorMetric: public BinaryMetric<BinaryErrorMetric> { public: explicit BinaryErrorMetric(const Config& config) :BinaryMetric<BinaryErrorMetric>(config) {} inline static double LossOnPoint(label_t label, double prob) { if (prob <= 0.5f) { return label > 0; } else { return label <= 0; } } inline static const char* Name() { return "binary_error"; } }; /*! * \brief Auc Metric for binary classification task. */ class AUCMetric: public Metric { public: explicit AUCMetric(const Config&) { } virtual ~AUCMetric() { } const std::vector<std::string>& GetName() const override { return name_; } double factor_to_bigger_better() const override { return 1.0f; } void Init(const Metadata& metadata, data_size_t num_data) override { name_.emplace_back("auc"); num_data_ = num_data; // get label label_ = metadata.label(); // get weights weights_ = metadata.weights(); if (weights_ == nullptr) { sum_weights_ = static_cast<double>(num_data_); } else { sum_weights_ = 0.0f; for (data_size_t i = 0; i < num_data; ++i) { sum_weights_ += weights_[i]; } } } std::vector<double> Eval(const double* score, const ObjectiveFunction*) const override { // get indices sorted by score, descent order std::vector<data_size_t> sorted_idx; for (data_size_t i = 0; i < num_data_; ++i) { sorted_idx.emplace_back(i); } Common::ParallelSort(sorted_idx.begin(), sorted_idx.end(), [score](data_size_t a, data_size_t b) {return score[a] > score[b]; }); // temp sum of positive label double cur_pos = 0.0f; // total sum of positive label double sum_pos = 0.0f; // accumulate of AUC double accum = 0.0f; // temp sum of negative label double cur_neg = 0.0f; double threshold = score[sorted_idx[0]]; if (weights_ == nullptr) { // no weights for (data_size_t i = 0; i < num_data_; ++i) { const label_t cur_label = label_[sorted_idx[i]]; const double cur_score = score[sorted_idx[i]]; // new threshold if (cur_score != threshold) { threshold = cur_score; // accumulate accum += cur_neg*(cur_pos * 0.5f + sum_pos); sum_pos += cur_pos; // reset cur_neg = cur_pos = 0.0f; } cur_neg += (cur_label <= 0); cur_pos += (cur_label > 0); } } else { // has weights for (data_size_t i = 0; i < num_data_; ++i) { const label_t cur_label = label_[sorted_idx[i]]; const double cur_score = score[sorted_idx[i]]; const label_t cur_weight = weights_[sorted_idx[i]]; // new threshold if (cur_score != threshold) { threshold = cur_score; // accumulate accum += cur_neg*(cur_pos * 0.5f + sum_pos); sum_pos += cur_pos; // reset cur_neg = cur_pos = 0.0f; } cur_neg += (cur_label <= 0)*cur_weight; cur_pos += (cur_label > 0)*cur_weight; } } accum += cur_neg*(cur_pos * 0.5f + sum_pos); sum_pos += cur_pos; double auc = 1.0f; if (sum_pos > 0.0f && sum_pos != sum_weights_) { auc = accum / (sum_pos *(sum_weights_ - sum_pos)); } return std::vector<double>(1, auc); } private: /*! \brief Number of data */ data_size_t num_data_; /*! \brief Pointer of label */ const label_t* label_; /*! \brief Pointer of weighs */ const label_t* weights_; /*! \brief Sum weights */ double sum_weights_; /*! \brief Name of test set */ std::vector<std::string> name_; }; /*! * \brief Average Precision Metric for binary classification task. */ class AveragePrecisionMetric: public Metric { public: explicit AveragePrecisionMetric(const Config&) { } virtual ~AveragePrecisionMetric() { } const std::vector<std::string>& GetName() const override { return name_; } double factor_to_bigger_better() const override { return 1.0f; } void Init(const Metadata& metadata, data_size_t num_data) override { name_.emplace_back("average_precision"); num_data_ = num_data; // get label label_ = metadata.label(); // get weights weights_ = metadata.weights(); if (weights_ == nullptr) { sum_weights_ = static_cast<double>(num_data_); } else { sum_weights_ = 0.0f; for (data_size_t i = 0; i < num_data; ++i) { sum_weights_ += weights_[i]; } } } std::vector<double> Eval(const double* score, const ObjectiveFunction*) const override { // get indices sorted by score, descending order std::vector<data_size_t> sorted_idx; for (data_size_t i = 0; i < num_data_; ++i) { sorted_idx.emplace_back(i); } Common::ParallelSort(sorted_idx.begin(), sorted_idx.end(), [score](data_size_t a, data_size_t b) {return score[a] > score[b]; }); // temp sum of positive label double cur_actual_pos = 0.0f; // total sum of positive label double sum_actual_pos = 0.0f; // total sum of predicted positive double sum_pred_pos = 0.0f; // accumulated precision double accum_prec = 1.0f; // accumulated pr-auc double accum = 0.0f; // temp sum of negative label double cur_neg = 0.0f; double threshold = score[sorted_idx[0]]; if (weights_ == nullptr) { // no weights for (data_size_t i = 0; i < num_data_; ++i) { const label_t cur_label = label_[sorted_idx[i]]; const double cur_score = score[sorted_idx[i]]; // new threshold if (cur_score != threshold) { threshold = cur_score; // accumulate sum_actual_pos += cur_actual_pos; sum_pred_pos += cur_actual_pos + cur_neg; accum_prec = sum_actual_pos / sum_pred_pos; accum += cur_actual_pos * accum_prec; // reset cur_neg = cur_actual_pos = 0.0f; } cur_neg += (cur_label <= 0); cur_actual_pos += (cur_label > 0); } } else { // has weights for (data_size_t i = 0; i < num_data_; ++i) { const label_t cur_label = label_[sorted_idx[i]]; const double cur_score = score[sorted_idx[i]]; const label_t cur_weight = weights_[sorted_idx[i]]; // new threshold if (cur_score != threshold) { threshold = cur_score; // accumulate sum_actual_pos += cur_actual_pos; sum_pred_pos += cur_actual_pos + cur_neg; accum_prec = sum_actual_pos / sum_pred_pos; accum += cur_actual_pos * accum_prec; // reset cur_neg = cur_actual_pos = 0.0f; } cur_neg += (cur_label <= 0) * cur_weight; cur_actual_pos += (cur_label > 0) * cur_weight; } } sum_actual_pos += cur_actual_pos; sum_pred_pos += cur_actual_pos + cur_neg; accum_prec = sum_actual_pos / sum_pred_pos; accum += cur_actual_pos * accum_prec; double ap = 1.0f; if (sum_actual_pos > 0.0f && sum_actual_pos != sum_weights_) { ap = accum / sum_actual_pos; } return std::vector<double>(1, ap); } private: /*! \brief Number of data */ data_size_t num_data_; /*! \brief Pointer of label */ const label_t* label_; /*! \brief Pointer of weighs */ const label_t* weights_; /*! \brief Sum weights */ double sum_weights_; /*! \brief Name of test set */ std::vector<std::string> name_; }; } // namespace LightGBM #endif // LightGBM_METRIC_BINARY_METRIC_HPP_
//-------------------------------------------------------------------------- // File and Version Information: // $Id: MatDBInfo.hh 516 2010-01-15 08:22:00Z stroili $ // // Description: // Class MatDBInfo. Implementation of MaterialInfo interface // using the database. // // Environment: // Software developed for the BaBar Detector at the SLAC B-Factory. // // Author List: // Dave Brown LBL // // Copyright Information: // Copyright (C) 1999 Lawrence Berkeley Laboratory // //------------------------------------------------------------------------ #ifndef MATDBINFO_HH #define MATDBINFO_HH #include "MatEnv/MaterialInfo.hh" #include "MatEnv/RecoMatFactory.hh" #include "MatEnv/MtrPropObj.hh" #include "MatEnv/ErrLog.hh" #include <string> #include <map> namespace MatEnv { class DetMaterial; class RecoMatFactory; class MatBuildEnv; class MatDBInfo : public MaterialInfo { public: MatDBInfo(); virtual ~MatDBInfo(); // Find the material, given the name virtual const DetMaterial* findDetMaterial( const std::string& matName ) const; template <class T> const T* findDetMaterial( const std::string& matName ) const; // utility functions private: template <class T> T* createMaterial( const std::string& dbName, const std::string& detMatName ) const; void declareMaterial( const std::string& dbName, const std::string& detMatName ); // Cache of RecoMatFactory pointer RecoMatFactory* _genMatFactory; // Cache of list of materials for DetectorModel std::map< std::string*, DetMaterial*, PtrLess > _matList; // Map for reco- and DB material names std::map< std::string, std::string > _matNameMap; // function to cast-off const MatDBInfo* that() const { return const_cast<MatDBInfo*>(this); } // allow MatBuildEnv to mess with me friend class MatBuildEnv; friend class MatBuildCoreEnv; }; template <class T> T* MatDBInfo::createMaterial( const std::string& db_name, const std::string& detMatName ) const { MtrPropObj* genMtrProp; T* theMat; if (_genMatFactory == 0) that()->_genMatFactory = RecoMatFactory::getInstance(); genMtrProp = _genMatFactory->GetMtrProperties(db_name); if(genMtrProp != 0){ theMat = new T( detMatName.c_str(), genMtrProp ) ; that()->_matList[new std::string( detMatName )] = theMat; return theMat; } else { return 0; } } template <class T> const T* MatDBInfo::findDetMaterial( const std::string& matName ) const { if (_genMatFactory == 0) that()->_genMatFactory = RecoMatFactory::getInstance(); T* theMat; std::map< std::string*, DetMaterial*, PtrLess >::const_iterator pos; if ((pos = _matList.find((std::string*)&matName)) != _matList.end()) { theMat = (T*) pos->second; } else { // first, look for aliases std::string theName; std::map< std::string, std::string >::const_iterator matNamePos; if ((matNamePos = _matNameMap.find(matName)) != _matNameMap.end()) { theName = matNamePos->second; theMat = createMaterial<T>( theName, matName); } else { //then , try to find the material name directly theMat = createMaterial<T>( matName, matName); // if we created a new material directly, add it to the list if(theMat != 0)that()->declareMaterial(matName,matName); } } if(theMat == 0){ ErrMsg( error ) << "MatDBInfo: Cannot find requested material " << matName << "." << endmsg; } return theMat; } } #endif
// Copyright 2015 Thomas Trapp // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "htmlext/ProgramOptions.h" #ifdef _WIN32 #include <io.h> #define isatty _isatty #define STDOUT_FILENO 1 #else #include <unistd.h> #endif namespace htmlext { ProgramOptions::ProgramOptions() : desc_("Options"), vm_() { namespace po = boost::program_options; this->desc_.add_options() ("hext,x", po::value<std::vector<std::string>>() ->default_value(std::vector<std::string>(), "") ->value_name("<file>"), "Add Hext from file") ("html,i", po::value<std::vector<std::string>>() ->value_name("<file>"), "Add HTML from file") ("str,s", po::value<std::vector<std::string>>() ->default_value(std::vector<std::string>(), "") ->value_name("<string>"), "Add Hext from string") ("compact,c", "Print one JSON object per line") ("pretty,p", "Pretty-print JSON") ("array,a", "Wrap results in a JSON array") ("filter,f", po::value<std::string>() ->value_name("<key>"), "Print values whose name matches <key>") ("lint,l", "Do Hext syntax check") ("help,h", "Print this help message") ("version,V", "Print info and version") ; } void ProgramOptions::store_and_validate_or_throw(int argc, const char * argv[]) { namespace po = boost::program_options; po::command_line_parser cli_parser(argc, argv); po::positional_options_description pos_opt; cli_parser.options(this->desc_); // If --lint was given, then do not add positional options to avoid // confusion. For example, the following would only parse first.hext, // but not second.hext because it is interpreted as the positional // option <html-file>: // ./htmlext --lint first.hext second.hext const auto end = argv + argc; if( std::find(argv, end, std::string("-l")) == end && std::find(argv, end, std::string("--lint")) == end ) { pos_opt.add("hext", 1); pos_opt.add("html", -1); cli_parser.positional(pos_opt); } po::store(cli_parser.run(), this->vm_); po::notify(this->vm_); if( this->contains("help") || this->contains("version") ) return; if( !this->contains("hext") && !this->contains("str") ) throw po::error("missing Hext input, use --hext/-x <file> " "or --str/-s <string>"); if( this->contains("lint") ) return; if( !this->contains("html") ) throw po::error("missing HTML input, use --html/-i <html-file>"); } bool ProgramOptions::contains(const char * key) const { return this->vm_.count(key) && !this->vm_[key].defaulted(); } std::string ProgramOptions::get(const char * key) const { return this->vm_[key].as<std::string>(); } std::vector<std::string> ProgramOptions::get_hext_files() const { return this->vm_["hext"].as<std::vector<std::string>>(); } std::vector<std::string> ProgramOptions::get_hext_input() const { return this->vm_["str"].as<std::vector<std::string>>(); } std::vector<std::string> ProgramOptions::get_html_input() const { return this->vm_["html"].as<std::vector<std::string>>(); } void ProgramOptions::print(const char * program_name, std::ostream& out) const { out << program_name << " - Extract structured content from HTML.\n\n" << "Usage:\n " << program_name << " [options] <hext-file> <html-file...>\n" " Apply extraction rules from <hext-file> to each\n" " <html-file> and print the captured content as JSON.\n\n" << this->desc_; } JsonOption ProgramOptions::get_json_options() const { using htmlext::operator|; htmlext::JsonOption opt = htmlext::JsonOption::NoOption; // --pretty takes precedence over --compact if( this->contains("pretty") || // by default, pretty print JSON if not piping to a file or pipe ( !this->contains("compact") && isatty(STDOUT_FILENO) ) ) opt = opt | htmlext::JsonOption::PrettyPrint; if( this->contains("array") ) opt = opt | htmlext::JsonOption::ArrayEnvelope; return opt; } } // namespace htmlext
/* * Copyright (c) 2017-2019 THL A29 Limited, a Tencent company. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tencentcloud/tsf/v20180326/model/DescribeConfigReleasesResponse.h> #include <tencentcloud/core/utils/rapidjson/document.h> #include <tencentcloud/core/utils/rapidjson/writer.h> #include <tencentcloud/core/utils/rapidjson/stringbuffer.h> using TencentCloud::CoreInternalOutcome; using namespace TencentCloud::Tsf::V20180326::Model; using namespace std; DescribeConfigReleasesResponse::DescribeConfigReleasesResponse() : m_resultHasBeenSet(false) { } CoreInternalOutcome DescribeConfigReleasesResponse::Deserialize(const string &payload) { rapidjson::Document d; d.Parse(payload.c_str()); if (d.HasParseError() || !d.IsObject()) { return CoreInternalOutcome(Core::Error("response not json format")); } if (!d.HasMember("Response") || !d["Response"].IsObject()) { return CoreInternalOutcome(Core::Error("response `Response` is null or not object")); } rapidjson::Value &rsp = d["Response"]; if (!rsp.HasMember("RequestId") || !rsp["RequestId"].IsString()) { return CoreInternalOutcome(Core::Error("response `Response.RequestId` is null or not string")); } string requestId(rsp["RequestId"].GetString()); SetRequestId(requestId); if (rsp.HasMember("Error")) { if (!rsp["Error"].IsObject() || !rsp["Error"].HasMember("Code") || !rsp["Error"]["Code"].IsString() || !rsp["Error"].HasMember("Message") || !rsp["Error"]["Message"].IsString()) { return CoreInternalOutcome(Core::Error("response `Response.Error` format error").SetRequestId(requestId)); } string errorCode(rsp["Error"]["Code"].GetString()); string errorMsg(rsp["Error"]["Message"].GetString()); return CoreInternalOutcome(Core::Error(errorCode, errorMsg).SetRequestId(requestId)); } if (rsp.HasMember("Result") && !rsp["Result"].IsNull()) { if (!rsp["Result"].IsObject()) { return CoreInternalOutcome(Core::Error("response `Result` is not object type").SetRequestId(requestId)); } CoreInternalOutcome outcome = m_result.Deserialize(rsp["Result"]); if (!outcome.IsSuccess()) { outcome.GetError().SetRequestId(requestId); return outcome; } m_resultHasBeenSet = true; } return CoreInternalOutcome(true); } string DescribeConfigReleasesResponse::ToJsonString() const { rapidjson::Document value; value.SetObject(); rapidjson::Document::AllocatorType& allocator = value.GetAllocator(); if (m_resultHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "Result"; iKey.SetString(key.c_str(), allocator); value.AddMember(iKey, rapidjson::Value(rapidjson::kObjectType).Move(), allocator); m_result.ToJsonObject(value[key.c_str()], allocator); } rapidjson::Value iKey(rapidjson::kStringType); string key = "RequestId"; iKey.SetString(key.c_str(), allocator); value.AddMember(iKey, rapidjson::Value().SetString(GetRequestId().c_str(), allocator), allocator); rapidjson::StringBuffer buffer; rapidjson::Writer<rapidjson::StringBuffer> writer(buffer); value.Accept(writer); return buffer.GetString(); } TsfPageConfigRelease DescribeConfigReleasesResponse::GetResult() const { return m_result; } bool DescribeConfigReleasesResponse::ResultHasBeenSet() const { return m_resultHasBeenSet; }
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include <aws/securityhub/model/AwsAutoScalingAutoScalingGroupDetails.h> #include <aws/core/utils/json/JsonSerializer.h> #include <utility> using namespace Aws::Utils::Json; using namespace Aws::Utils; namespace Aws { namespace SecurityHub { namespace Model { AwsAutoScalingAutoScalingGroupDetails::AwsAutoScalingAutoScalingGroupDetails() : m_launchConfigurationNameHasBeenSet(false), m_loadBalancerNamesHasBeenSet(false), m_healthCheckTypeHasBeenSet(false), m_healthCheckGracePeriod(0), m_healthCheckGracePeriodHasBeenSet(false), m_createdTimeHasBeenSet(false), m_mixedInstancesPolicyHasBeenSet(false), m_availabilityZonesHasBeenSet(false), m_launchTemplateHasBeenSet(false), m_capacityRebalance(false), m_capacityRebalanceHasBeenSet(false) { } AwsAutoScalingAutoScalingGroupDetails::AwsAutoScalingAutoScalingGroupDetails(JsonView jsonValue) : m_launchConfigurationNameHasBeenSet(false), m_loadBalancerNamesHasBeenSet(false), m_healthCheckTypeHasBeenSet(false), m_healthCheckGracePeriod(0), m_healthCheckGracePeriodHasBeenSet(false), m_createdTimeHasBeenSet(false), m_mixedInstancesPolicyHasBeenSet(false), m_availabilityZonesHasBeenSet(false), m_launchTemplateHasBeenSet(false), m_capacityRebalance(false), m_capacityRebalanceHasBeenSet(false) { *this = jsonValue; } AwsAutoScalingAutoScalingGroupDetails& AwsAutoScalingAutoScalingGroupDetails::operator =(JsonView jsonValue) { if(jsonValue.ValueExists("LaunchConfigurationName")) { m_launchConfigurationName = jsonValue.GetString("LaunchConfigurationName"); m_launchConfigurationNameHasBeenSet = true; } if(jsonValue.ValueExists("LoadBalancerNames")) { Array<JsonView> loadBalancerNamesJsonList = jsonValue.GetArray("LoadBalancerNames"); for(unsigned loadBalancerNamesIndex = 0; loadBalancerNamesIndex < loadBalancerNamesJsonList.GetLength(); ++loadBalancerNamesIndex) { m_loadBalancerNames.push_back(loadBalancerNamesJsonList[loadBalancerNamesIndex].AsString()); } m_loadBalancerNamesHasBeenSet = true; } if(jsonValue.ValueExists("HealthCheckType")) { m_healthCheckType = jsonValue.GetString("HealthCheckType"); m_healthCheckTypeHasBeenSet = true; } if(jsonValue.ValueExists("HealthCheckGracePeriod")) { m_healthCheckGracePeriod = jsonValue.GetInteger("HealthCheckGracePeriod"); m_healthCheckGracePeriodHasBeenSet = true; } if(jsonValue.ValueExists("CreatedTime")) { m_createdTime = jsonValue.GetString("CreatedTime"); m_createdTimeHasBeenSet = true; } if(jsonValue.ValueExists("MixedInstancesPolicy")) { m_mixedInstancesPolicy = jsonValue.GetObject("MixedInstancesPolicy"); m_mixedInstancesPolicyHasBeenSet = true; } if(jsonValue.ValueExists("AvailabilityZones")) { Array<JsonView> availabilityZonesJsonList = jsonValue.GetArray("AvailabilityZones"); for(unsigned availabilityZonesIndex = 0; availabilityZonesIndex < availabilityZonesJsonList.GetLength(); ++availabilityZonesIndex) { m_availabilityZones.push_back(availabilityZonesJsonList[availabilityZonesIndex].AsObject()); } m_availabilityZonesHasBeenSet = true; } if(jsonValue.ValueExists("LaunchTemplate")) { m_launchTemplate = jsonValue.GetObject("LaunchTemplate"); m_launchTemplateHasBeenSet = true; } if(jsonValue.ValueExists("CapacityRebalance")) { m_capacityRebalance = jsonValue.GetBool("CapacityRebalance"); m_capacityRebalanceHasBeenSet = true; } return *this; } JsonValue AwsAutoScalingAutoScalingGroupDetails::Jsonize() const { JsonValue payload; if(m_launchConfigurationNameHasBeenSet) { payload.WithString("LaunchConfigurationName", m_launchConfigurationName); } if(m_loadBalancerNamesHasBeenSet) { Array<JsonValue> loadBalancerNamesJsonList(m_loadBalancerNames.size()); for(unsigned loadBalancerNamesIndex = 0; loadBalancerNamesIndex < loadBalancerNamesJsonList.GetLength(); ++loadBalancerNamesIndex) { loadBalancerNamesJsonList[loadBalancerNamesIndex].AsString(m_loadBalancerNames[loadBalancerNamesIndex]); } payload.WithArray("LoadBalancerNames", std::move(loadBalancerNamesJsonList)); } if(m_healthCheckTypeHasBeenSet) { payload.WithString("HealthCheckType", m_healthCheckType); } if(m_healthCheckGracePeriodHasBeenSet) { payload.WithInteger("HealthCheckGracePeriod", m_healthCheckGracePeriod); } if(m_createdTimeHasBeenSet) { payload.WithString("CreatedTime", m_createdTime); } if(m_mixedInstancesPolicyHasBeenSet) { payload.WithObject("MixedInstancesPolicy", m_mixedInstancesPolicy.Jsonize()); } if(m_availabilityZonesHasBeenSet) { Array<JsonValue> availabilityZonesJsonList(m_availabilityZones.size()); for(unsigned availabilityZonesIndex = 0; availabilityZonesIndex < availabilityZonesJsonList.GetLength(); ++availabilityZonesIndex) { availabilityZonesJsonList[availabilityZonesIndex].AsObject(m_availabilityZones[availabilityZonesIndex].Jsonize()); } payload.WithArray("AvailabilityZones", std::move(availabilityZonesJsonList)); } if(m_launchTemplateHasBeenSet) { payload.WithObject("LaunchTemplate", m_launchTemplate.Jsonize()); } if(m_capacityRebalanceHasBeenSet) { payload.WithBool("CapacityRebalance", m_capacityRebalance); } return payload; } } // namespace Model } // namespace SecurityHub } // namespace Aws
// // Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved. // #ifndef PC_SNOWFLAKETYPE_HPP #define PC_SNOWFLAKETYPE_HPP #include <string> #include <algorithm> #include <unordered_map> namespace sf { class SnowflakeType { public: enum class Type : uint8_t { ANY = 0, ARRAY = 1, BINARY = 2, BOOLEAN = 3, CHAR = 4, DATE = 5, FIXED = 6, OBJECT = 7, REAL = 8, TEXT = 9, TIME = 10, TIMESTAMP = 11, TIMESTAMP_LTZ = 12, TIMESTAMP_NTZ = 13, TIMESTAMP_TZ = 14, VARIANT = 15 }; static SnowflakeType::Type snowflakeTypeFromString(std::string str) { std::transform(str.begin(), str.end(), str.begin(), ::toupper); return m_strEnumIndex.at(str); } private: static std::unordered_map<std::string, SnowflakeType::Type> m_strEnumIndex; }; } // namespace sf #endif // PC_SNOWFLAKETYPE_HPP
// Copyright (C) 2019 Rhys Mainwaring // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <https://www.gnu.org/licenses/>. /// \file WavefieldEntity.hh /// \brief This file contains the definition for a Gazebo physics object /// that allows a wave field to be added into a simulated world. #ifndef _ASV_WAVE_SIM_GAZEBO_PLUGINS_WAVEFIELD_ENTITY_HH_ #define _ASV_WAVE_SIM_GAZEBO_PLUGINS_WAVEFIELD_ENTITY_HH_ #include <gazebo/gazebo.hh> #include <gazebo/physics/Base.hh> #include <memory> namespace asv { /////////////////////////////////////////////////////////////////////////////// // WavefieldEntity class Wavefield; class WaveParameters; /// \internal /// \brief Class to hold private data for WavefieldEntity. class WavefieldEntityPrivate; /// \brief A class to manage a wave field that can be accessed from the World. class GZ_PHYSICS_VISIBLE WavefieldEntity : public gazebo::physics::Base { /// \brief Destructor. public: virtual ~WavefieldEntity(); /// \brief Constructor. public: explicit WavefieldEntity(gazebo::physics::BasePtr _parent); /// \brief Load. public: virtual void Load(sdf::ElementPtr _sdf); /// \brief Finialize the object. public: virtual void Fini(); /// \brief Initialize the object. public: virtual void Init(); /// \brief Reset the object. public: virtual void Reset(); /// \brief Update the object. public: virtual void Update(); /// \brief Get a pointer to the wavefield. std::shared_ptr<const Wavefield> GetWavefield() const; /// \brief Make a wave field entity name given a parent object name. /// /// \param[in] _parentName The name of the parent object. /// \return The name of the wave field entity. public: static std::string MakeName(const std::string& _parentName); /// \internal /// \brief Pointer to the class private data. private: std::shared_ptr<WavefieldEntityPrivate> data; }; } // namespace asv #endif // _ASV_WAVE_SIM_GAZEBO_PLUGINS_WAVEFIELD_ENTITY_HH_
/* Copyright 2016 Aldo J. Nunez Licensed under the Apache License, Version 2.0. See the LICENSE text file for details. */ #include "Common.h" #include "Graphics.h" // Y determines the palette, X determines the color in the palette. // But, it looks like the minimum height of a bitmap is 16. const int PaletteBmpWidth = Util::Max( PaletteLength, 16 ); const int PaletteBmpHeight = Util::Max( PaletteCount, 16 ); enum { TileWidth = 8, TileHeight = 8, }; ALLEGRO_BITMAP* tileSheets[Sheet_Max]; ALLEGRO_BITMAP* paletteBmp; ALLEGRO_SHADER* tileShader; unsigned char* paletteBuf; int paletteBufSize; int paletteStride; int systemPalette[SysPaletteLength]; int grayscalePalette[SysPaletteLength]; int* activeSystemPalette = systemPalette; uint8_t palettes[PaletteCount][PaletteLength]; float viewScale; float viewOffsetX; float viewOffsetY; int savedClipX; int savedClipY; int savedClipWidth; int savedClipHeight; Util::Table<SpriteAnim> animSpecs[Sheet_Max]; static bool ChooseShaderSource( ALLEGRO_SHADER* shader, const char** vsource, const char** psource ) { /*ALLEGRO_SHADER_PLATFORM platform = al_get_shader_platform( shader ); if ( platform == ALLEGRO_SHADER_HLSL ) { *vsource = "tileShaderVertex.hlsl"; *psource = "tileShaderPixel.hlsl"; } else if ( platform == ALLEGRO_SHADER_GLSL ) { *vsource = "tileShaderVertex.glsl"; *psource = "tileShaderPixel.glsl"; } else { *vsource = nullptr; *psource = nullptr; return false; }*/ return true; } static bool AllocatePaletteBuffer() { /*int format = al_get_bitmap_format( paletteBmp ); ALLEGRO_LOCKED_REGION* region = al_lock_bitmap( paletteBmp, format, ALLEGRO_LOCK_WRITEONLY ); if ( region == nullptr ) return false; int stride = region->pitch; al_unlock_bitmap( paletteBmp ); if ( stride < 0 ) stride = -stride; paletteBufSize = stride * PaletteBmpHeight; paletteStride = stride; paletteBuf = new unsigned char[paletteBufSize]; if ( paletteBuf == nullptr ) return false; memset( paletteBuf, 0, paletteBufSize );*/ return true; } bool Graphics::Init() { /*const char* vsource = nullptr; const char* psource = nullptr; paletteBmp = al_create_bitmap( PaletteBmpWidth, PaletteBmpHeight ); if ( paletteBmp == nullptr ) return false; tileShader = al_create_shader( ALLEGRO_SHADER_AUTO ); if ( tileShader == nullptr ) return false; if ( !ChooseShaderSource( tileShader, &vsource, &psource ) ) return false; if ( !al_attach_shader_source_file( tileShader, ALLEGRO_VERTEX_SHADER, vsource ) ) { _RPT1( _CRT_WARN, "%s", al_get_shader_log( tileShader ) ); return false; } if ( !al_attach_shader_source_file( tileShader, ALLEGRO_PIXEL_SHADER, psource ) ) { _RPT1( _CRT_WARN, "%s", al_get_shader_log( tileShader ) ); return false; } if ( !al_build_shader( tileShader ) ) { _RPT1( _CRT_WARN, "%s", al_get_shader_log( tileShader ) ); return false; } if ( !al_use_shader( tileShader ) ) return false; if ( !AllocatePaletteBuffer() ) return false;*/ return true; } void Graphics::LoadTileSheet( int slot, const char* path ) { if ( tileSheets[slot] != nullptr ) { //al_destroy_bitmap( tileSheets[slot] ); tileSheets[slot] = nullptr; } //tileSheets[slot] = al_load_bitmap( path ); //assert( tileSheets[slot] != nullptr ); if ( tileSheets[slot] == nullptr ) { //tileSheets[slot] = al_create_bitmap( 1, 1 ); } } void Graphics::LoadTileSheet( int slot, const char* imagePath, const char* animPath ) { LoadTileSheet( slot, imagePath ); Util::LoadResource( animPath, &animSpecs[slot] ); } const SpriteAnim* Graphics::GetAnimation( int slot, int animIndex ) { return animSpecs[slot].GetItem( animIndex ); } void Graphics::LoadSystemPalette( const int* colorsArgb8 ) { memcpy( systemPalette, colorsArgb8, sizeof systemPalette ); for ( int i = 0; i < SysPaletteLength; i++ ) grayscalePalette[i] = systemPalette[i & 0x30]; } /*ALLEGRO_COLOR Graphics::GetSystemColor( int sysColor ) { int argb8 = activeSystemPalette[sysColor]; return al_map_rgba( (argb8 >> 16) & 0xFF, (argb8 >> 8) & 0xFF, (argb8 >> 0) & 0xFF, (argb8 >> 24) & 0xFF ); }*/ // TODO: this method has to consider the picture format void Graphics::SetColor( int paletteIndex, int colorIndex, int colorArgb8 ) { int y = paletteIndex; int x = colorIndex; unsigned char* line = paletteBuf + y * paletteStride; ((int*) line)[x] = colorArgb8; } void Graphics::SetPalette( int paletteIndex, const int* colorsArgb8 ) { int y = paletteIndex; unsigned char* line = paletteBuf + y * paletteStride; for ( int x = 0; x < PaletteLength; x++ ) ((int*) line)[x] = colorsArgb8[x]; } void Graphics::SetColorIndexed( int paletteIndex, int colorIndex, int sysColor ) { int colorArgb8 = 0; if ( colorIndex != 0 ) colorArgb8 = activeSystemPalette[sysColor]; SetColor( paletteIndex, colorIndex, colorArgb8 ); palettes[paletteIndex][colorIndex] = sysColor; } void Graphics::SetPaletteIndexed( int paletteIndex, const uint8_t* sysColors ) { int colorsArgb8[4] = { 0, activeSystemPalette[sysColors[1]], activeSystemPalette[sysColors[2]], activeSystemPalette[sysColors[3]], }; SetPalette( paletteIndex, colorsArgb8 ); memcpy( palettes[paletteIndex], sysColors, PaletteLength ); } void Graphics::UpdatePalettes() { /*int format = al_get_bitmap_format( paletteBmp ); ALLEGRO_LOCKED_REGION* region = al_lock_bitmap( paletteBmp, format, ALLEGRO_LOCK_WRITEONLY ); assert( region != nullptr ); unsigned char* base = (unsigned char*) region->data; if ( region->pitch < 0 ) base += region->pitch * (PaletteBmpHeight - 1); memcpy( base, paletteBuf, paletteBufSize ); al_unlock_bitmap( paletteBmp );*/ } void Graphics::SwitchSystemPalette( int* newSystemPalette ) { if ( activeSystemPalette == newSystemPalette ) return; activeSystemPalette = newSystemPalette; for ( int i = 0; i < PaletteCount; i++ ) { const uint8_t* sysColors = palettes[i]; int colorsArgb8[4] = { 0, activeSystemPalette[sysColors[1]], activeSystemPalette[sysColors[2]], activeSystemPalette[sysColors[3]], }; SetPalette( i, colorsArgb8 ); } UpdatePalettes(); } void Graphics::EnableGrayscale() { SwitchSystemPalette( grayscalePalette ); } void Graphics::DisableGrayscale() { SwitchSystemPalette( systemPalette ); } void Graphics::Begin() { //bool bRet; //bRet = al_set_shader_sampler( "palTex", paletteBmp, 1 ); //assert( bRet ); //al_hold_bitmap_drawing( true ); } void Graphics::End() { //al_hold_bitmap_drawing( false ); } void Graphics::DrawBitmap(ALLEGRO_BITMAP* bitmap, int srcX, int srcY, int width, int height, int destX, int destY, int palette, int flags) { float palRed = palette / (float) PaletteBmpHeight; //ALLEGRO_COLOR tint = al_map_rgba_f( palRed, 0, 0, 1 ); //al_draw_tinted_bitmap_region(bitmap, tint, srcX, srcY, width, height, destX, destY, flags); } void Graphics::DrawSpriteTile(int slot, int srcX, int srcY, int width, int height, int destX, int destY, int palette, int flags) { DrawTile(slot, srcX, srcY, width, height, destX, destY + 1, palette, flags); } void Graphics::DrawTile(int slot, int srcX, int srcY, int width, int height, int destX, int destY, int palette, int flags) { /*assert( slot < Sheet_Max ); float palRed = palette / (float) PaletteBmpHeight; ALLEGRO_COLOR tint = al_map_rgba_f( palRed, 0, 0, 1 ); al_draw_tinted_bitmap_region(tileSheets[slot], tint, srcX, srcY, width, height, destX, destY, flags);*/ } void Graphics::DrawStripSprite16x16(int slot, int firstTile, int destX, int destY, int palette) { static const uint8_t offsetsX[4] = { 0, 0, 8, 8 }; static const uint8_t offsetsY[4] = { 0, 8, 0, 8 }; int tileRef = firstTile; for (int i = 0; i < 4; i++) { int srcX = (tileRef & 0x0F) * TileWidth; int srcY = ((tileRef & 0xF0) >> 4) * TileHeight; tileRef++; DrawTile(slot, srcX, srcY, TileWidth, TileHeight, destX + offsetsX[i], destY + offsetsY[i], palette, 0); } } void Graphics::SetViewParams(float scale, float x, float y) { viewScale = scale; viewOffsetX = x; viewOffsetY = y; } void Graphics::SetClip(int x, int y, int width, int height) { //al_get_clipping_rectangle(&savedClipX, &savedClipY, &savedClipWidth, &savedClipHeight); int y2 = y + height; if ( y2 < 0 ) { height = 0; y = 0; } else if ( y > StdViewHeight ) { height = 0; y = StdViewHeight; } else { if ( y < 0 ) { height += y; y = 0; } if ( y2 > StdViewHeight ) height = StdViewHeight - y; } int clipX = viewOffsetX + x * viewScale; int clipY = viewOffsetY + y * viewScale; int clipWidth = width * viewScale; int clipHeight = height * viewScale; //al_set_clipping_rectangle(clipX, clipY, clipWidth, clipHeight); } void Graphics::ResetClip() { //al_set_clipping_rectangle(savedClipX, savedClipY, savedClipWidth, savedClipHeight); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. //***************************************************************************** // File: daccess.cpp // // // ClrDataAccess implementation. // //***************************************************************************** #include "stdafx.h" #include <clrdata.h> #include "typestring.h" #include "holder.h" #include "debuginfostore.h" #include "peimagelayout.inl" #include "datatargetadapter.h" #include "readonlydatatargetfacade.h" #include "metadataexports.h" #include "excep.h" #include "debugger.h" #include "dwreport.h" #include "primitives.h" #include "dbgutil.h" #ifdef FEATURE_PAL #include <dactablerva.h> #endif #include "dwbucketmanager.hpp" #include "gcinterface.dac.h" // To include definiton of IsThrowableThreadAbortException // #include <exstatecommon.h> CRITICAL_SECTION g_dacCritSec; ClrDataAccess* g_dacImpl; HINSTANCE g_thisModule; extern VOID STDMETHODCALLTYPE TLS_FreeMasterSlotIndex(); DLLEXPORT EXTERN_C BOOL WINAPI DllMain(HANDLE instance, DWORD reason, LPVOID reserved) { static bool g_procInitialized = false; switch(reason) { case DLL_PROCESS_ATTACH: { if (g_procInitialized) { #ifdef FEATURE_PAL // Double initialization can happen on Unix // in case of manual load of DAC shared lib and calling DllMain // not a big deal, we just ignore it. return TRUE; #else return FALSE; #endif } #ifdef FEATURE_PAL int err = PAL_InitializeDLL(); if(err != 0) { return FALSE; } #endif InitializeCriticalSection(&g_dacCritSec); // Save the module handle. g_thisModule = (HINSTANCE)instance; g_procInitialized = true; break; } case DLL_PROCESS_DETACH: // It's possible for this to be called without ATTACH completing (eg. if it failed) if (g_procInitialized) { DeleteCriticalSection(&g_dacCritSec); } #ifndef FEATURE_PAL TLS_FreeMasterSlotIndex(); #endif g_procInitialized = false; break; } return TRUE; } HINSTANCE GetModuleInst(void) { return g_thisModule; } HRESULT ConvertUtf8(__in LPCUTF8 utf8, ULONG32 bufLen, ULONG32* nameLen, __out_ecount_part_opt(bufLen, *nameLen) PWSTR buffer) { if (nameLen) { *nameLen = WszMultiByteToWideChar(CP_UTF8, 0, utf8, -1, NULL, 0); if (!*nameLen) { return HRESULT_FROM_GetLastError(); } } if (buffer && bufLen) { if (!WszMultiByteToWideChar(CP_UTF8, 0, utf8, -1, buffer, bufLen)) { return HRESULT_FROM_GetLastError(); } } return S_OK; } HRESULT AllocUtf8(__in_opt LPCWSTR wstr, ULONG32 srcChars, __deref_out LPUTF8* utf8) { ULONG32 chars = WszWideCharToMultiByte(CP_UTF8, 0, wstr, srcChars, NULL, 0, NULL, NULL); if (!chars) { return HRESULT_FROM_GetLastError(); } // Make sure the converted string is always terminated. if (srcChars != (ULONG32)-1) { if (!ClrSafeInt<ULONG32>::addition(chars, 1, chars)) { return HRESULT_FROM_WIN32(ERROR_ARITHMETIC_OVERFLOW); } } char* mem = new (nothrow) char[chars]; if (!mem) { return E_OUTOFMEMORY; } if (!WszWideCharToMultiByte(CP_UTF8, 0, wstr, srcChars, mem, chars, NULL, NULL)) { HRESULT hr = HRESULT_FROM_GetLastError(); delete [] mem; return hr; } if (srcChars != (ULONG32)-1) { mem[chars - 1] = 0; } *utf8 = mem; return S_OK; } HRESULT GetFullClassNameFromMetadata(IMDInternalImport* mdImport, mdTypeDef classToken, ULONG32 bufferChars, __inout_ecount(bufferChars) LPUTF8 buffer) { HRESULT hr; LPCUTF8 baseName, namespaceName; IfFailRet(mdImport->GetNameOfTypeDef(classToken, &baseName, &namespaceName)); return ns::MakePath(buffer, bufferChars, namespaceName, baseName) ? S_OK : E_OUTOFMEMORY; } HRESULT GetFullMethodNameFromMetadata(IMDInternalImport* mdImport, mdMethodDef methodToken, ULONG32 bufferChars, __inout_ecount(bufferChars) LPUTF8 buffer) { HRESULT status; HRESULT hr; mdTypeDef classToken; size_t len; if (mdImport->GetParentToken(methodToken, &classToken) == S_OK) { if ((status = GetFullClassNameFromMetadata(mdImport, classToken, bufferChars, buffer)) != S_OK) { return status; } len = strlen(buffer); buffer += len; bufferChars -= static_cast<ULONG32>(len) + 1; if (!bufferChars) { return E_OUTOFMEMORY; } *buffer++ = NAMESPACE_SEPARATOR_CHAR; } LPCUTF8 methodName; IfFailRet(mdImport->GetNameOfMethodDef(methodToken, &methodName)); // Review conversion of size_t to ULONG32. #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4267) #endif len = strlen(methodName); #ifdef _MSC_VER #pragma warning(pop) #endif if (len >= bufferChars) { return E_OUTOFMEMORY; } strcpy_s(buffer, bufferChars, methodName); return S_OK; } HRESULT SplitFullName(__in_z __in PCWSTR fullName, SplitSyntax syntax, ULONG32 memberDots, __deref_out_opt LPUTF8* namespaceName, __deref_out_opt LPUTF8* typeName, __deref_out_opt LPUTF8* memberName, __deref_out_opt LPUTF8* params) { HRESULT status; PCWSTR paramsStart, memberStart, memberEnd, typeStart; if (!*fullName) { return E_INVALIDARG; } // // Split off parameters. // paramsStart = wcschr(fullName, W('(')); if (paramsStart) { if (syntax != SPLIT_METHOD || paramsStart == fullName) { return E_INVALIDARG; } if ((status = AllocUtf8(paramsStart, (ULONG32)-1, params)) != S_OK) { return status; } memberEnd = paramsStart - 1; } else { *params = NULL; memberEnd = fullName + (wcslen(fullName) - 1); } if (syntax != SPLIT_TYPE) { // // Split off member name. // memberStart = memberEnd; for (;;) { while (memberStart >= fullName && *memberStart != W('.')) { memberStart--; } // Some member names (e.g. .ctor and .dtor) have // dots, so go back to the first dot. while (memberStart > fullName && memberStart[-1] == W('.')) { memberStart--; } if (memberStart <= fullName) { if (memberDots > 0) { // Caller expected dots in the // member name and they weren't found. status = E_INVALIDARG; goto DelParams; } break; } else if (memberDots == 0) { break; } memberStart--; memberDots--; } memberStart++; if (memberStart > memberEnd) { status = E_INVALIDARG; goto DelParams; } if ((status = AllocUtf8(memberStart, (ULONG32) (memberEnd - memberStart) + 1, memberName)) != S_OK) { goto DelParams; } } else { *memberName = NULL; memberStart = memberEnd + 2; } // // Split off type name. // if (memberStart > fullName) { // Must have at least one character for the type // name. If there was a member name, there must // also be a separator. if (memberStart < fullName + 2) { status = E_INVALIDARG; goto DelMember; } typeStart = memberStart - 2; while (typeStart >= fullName && *typeStart != W('.')) { typeStart--; } typeStart++; if ((status = AllocUtf8(typeStart, (ULONG32) (memberStart - typeStart) - 1, typeName)) != S_OK) { goto DelMember; } } else { *typeName = NULL; typeStart = fullName; } // // Namespace must be the rest. // if (typeStart > fullName) { if ((status = AllocUtf8(fullName, (ULONG32) (typeStart - fullName) - 1, namespaceName)) != S_OK) { goto DelType; } } else { *namespaceName = NULL; } return S_OK; DelType: delete [] (*typeName); DelMember: delete [] (*memberName); DelParams: delete [] (*params); return status; } int CompareUtf8(__in LPCUTF8 str1, __in LPCUTF8 str2, __in ULONG32 nameFlags) { if (nameFlags & CLRDATA_BYNAME_CASE_INSENSITIVE) { // XXX Microsoft - Convert to Unicode? return SString::_stricmp(str1, str2); } return strcmp(str1, str2); } //---------------------------------------------------------------------------- // // MetaEnum. // //---------------------------------------------------------------------------- HRESULT MetaEnum::Start(IMDInternalImport* mdImport, ULONG32 kind, mdToken container) { HRESULT status; switch(kind) { case mdtTypeDef: status = mdImport->EnumTypeDefInit(&m_enum); break; case mdtMethodDef: case mdtFieldDef: status = mdImport->EnumInit(kind, container, &m_enum); break; default: return E_INVALIDARG; } if (status != S_OK) { return status; } m_mdImport = mdImport; m_kind = kind; return S_OK; } void MetaEnum::End(void) { if (!m_mdImport) { return; } switch(m_kind) { case mdtTypeDef: m_mdImport->EnumTypeDefClose(&m_enum); break; case mdtMethodDef: case mdtFieldDef: m_mdImport->EnumClose(&m_enum); break; } Clear(); } HRESULT MetaEnum::NextToken(mdToken* token, __deref_opt_out_opt LPCUTF8* namespaceName, __deref_opt_out_opt LPCUTF8* name) { HRESULT hr; if (!m_mdImport) { return E_INVALIDARG; } switch(m_kind) { case mdtTypeDef: if (!m_mdImport->EnumTypeDefNext(&m_enum, token)) { return S_FALSE; } m_lastToken = *token; if (namespaceName || name) { LPCSTR _name, _namespaceName; IfFailRet(m_mdImport->GetNameOfTypeDef(*token, &_name, &_namespaceName)); if (namespaceName) { *namespaceName = _namespaceName; } if (name) { *name = _name; } } return S_OK; case mdtMethodDef: if (!m_mdImport->EnumNext(&m_enum, token)) { return S_FALSE; } m_lastToken = *token; if (namespaceName) { *namespaceName = NULL; } if (name != NULL) { IfFailRet(m_mdImport->GetNameOfMethodDef(*token, name)); } return S_OK; case mdtFieldDef: if (!m_mdImport->EnumNext(&m_enum, token)) { return S_FALSE; } m_lastToken = *token; if (namespaceName) { *namespaceName = NULL; } if (name != NULL) { IfFailRet(m_mdImport->GetNameOfFieldDef(*token, name)); } return S_OK; default: return E_INVALIDARG; } } HRESULT MetaEnum::NextDomainToken(AppDomain** appDomain, mdToken* token) { HRESULT status; if (m_appDomain) { // Use only the caller-provided app domain. *appDomain = m_appDomain; return NextToken(token, NULL, NULL); } // // Splay tokens across all app domains. // for (;;) { if (m_lastToken == mdTokenNil) { // Need to fetch a token. if ((status = NextToken(token, NULL, NULL)) != S_OK) { return status; } m_domainIter.Init(); } if (m_domainIter.Next()) { break; } m_lastToken = mdTokenNil; } *appDomain = m_domainIter.GetDomain(); *token = m_lastToken; return S_OK; } HRESULT MetaEnum::NextTokenByName(__in_opt LPCUTF8 namespaceName, __in_opt LPCUTF8 name, ULONG32 nameFlags, mdToken* token) { HRESULT status; LPCUTF8 tokNamespace, tokName; for (;;) { if ((status = NextToken(token, &tokNamespace, &tokName)) != S_OK) { return status; } if (namespaceName && (!tokNamespace || CompareUtf8(namespaceName, tokNamespace, nameFlags) != 0)) { continue; } if (name && (!tokName || CompareUtf8(name, tokName, nameFlags) != 0)) { continue; } return S_OK; } } HRESULT MetaEnum::NextDomainTokenByName(__in_opt LPCUTF8 namespaceName, __in_opt LPCUTF8 name, ULONG32 nameFlags, AppDomain** appDomain, mdToken* token) { HRESULT status; if (m_appDomain) { // Use only the caller-provided app domain. *appDomain = m_appDomain; return NextTokenByName(namespaceName, name, nameFlags, token); } // // Splay tokens across all app domains. // for (;;) { if (m_lastToken == mdTokenNil) { // Need to fetch a token. if ((status = NextTokenByName(namespaceName, name, nameFlags, token)) != S_OK) { return status; } m_domainIter.Init(); } if (m_domainIter.Next()) { break; } m_lastToken = mdTokenNil; } *appDomain = m_domainIter.GetDomain(); *token = m_lastToken; return S_OK; } HRESULT MetaEnum::New(Module* mod, ULONG32 kind, mdToken container, IXCLRDataAppDomain* pubAppDomain, MetaEnum** metaEnumRet, CLRDATA_ENUM* handle) { HRESULT status; MetaEnum* metaEnum; if (handle) { *handle = TO_CDENUM(NULL); } if (!mod->GetFile()->HasMetadata()) { return S_FALSE; } metaEnum = new (nothrow) MetaEnum; if (!metaEnum) { return E_OUTOFMEMORY; } if ((status = metaEnum-> Start(mod->GetMDImport(), kind, container)) != S_OK) { delete metaEnum; return status; } if (pubAppDomain) { metaEnum->m_appDomain = ((ClrDataAppDomain*)pubAppDomain)->GetAppDomain(); } if (metaEnumRet) { *metaEnumRet = metaEnum; } if (handle) { *handle = TO_CDENUM(metaEnum); } return S_OK; } //---------------------------------------------------------------------------- // // SplitName // //---------------------------------------------------------------------------- SplitName::SplitName(SplitSyntax syntax, ULONG32 nameFlags, ULONG32 memberDots) { m_syntax = syntax; m_nameFlags = nameFlags; m_memberDots = memberDots; Clear(); } void SplitName::Delete(void) { delete [] m_namespaceName; m_namespaceName = NULL; delete [] m_typeName; m_typeName = NULL; delete [] m_memberName; m_memberName = NULL; delete [] m_params; m_params = NULL; } void SplitName::Clear(void) { m_namespaceName = NULL; m_typeName = NULL; m_typeToken = mdTypeDefNil; m_memberName = NULL; m_memberToken = mdTokenNil; m_params = NULL; m_tlsThread = NULL; m_metaEnum.m_appDomain = NULL; m_module = NULL; m_lastField = NULL; } HRESULT SplitName::SplitString(__in_opt PCWSTR fullName) { if (m_syntax == SPLIT_NO_NAME) { if (fullName) { return E_INVALIDARG; } return S_OK; } else if (!fullName) { return E_INVALIDARG; } return SplitFullName(fullName, m_syntax, m_memberDots, &m_namespaceName, &m_typeName, &m_memberName, &m_params); } FORCEINLINE WCHAR* wcrscan(LPCWSTR beg, LPCWSTR end, WCHAR ch) { //_ASSERTE(beg <= end); WCHAR *p; for (p = (WCHAR*)end; p >= beg; --p) { if (*p == ch) break; } return p; } // This functions allocates a new UTF8 string that contains the classname // lying between the current sepName and the previous sepName. E.g. for a // class name of "Outer+middler+inner" when sepName points to the NULL // terminator this function will return "inner" in pResult and will update // sepName to point to the second '+' character in the string. When sepName // points to the first '+' character this function will return "Outer" in // pResult and sepName will point one WCHAR before fullName. HRESULT NextEnclosingClasName(LPCWSTR fullName, __deref_inout LPWSTR& sepName, __deref_out LPUTF8 *pResult) { if (sepName < fullName) { return E_FAIL; } //_ASSERTE(*sepName == W('\0') || *sepName == W('+') || *sepName == W('/')); LPWSTR origInnerName = sepName-1; if ((sepName = wcrscan(fullName, origInnerName, W('+'))) < fullName) { sepName = wcrscan(fullName, origInnerName, W('/')); } return AllocUtf8(sepName+1, static_cast<ULONG32>(origInnerName-sepName), pResult); } bool SplitName::FindType(IMDInternalImport* mdInternal) { if (m_typeToken != mdTypeDefNil) { return true; } if (!m_typeName) { return false; } if ((m_namespaceName == NULL || m_namespaceName[0] == '\0') && (CompareUtf8(COR_MODULE_CLASS, m_typeName, m_nameFlags)==0)) { m_typeToken = TokenFromRid(1, mdtTypeDef); // <Module> class always has a RID of 1. return true; } MetaEnum metaEnum; if (metaEnum.Start(mdInternal, mdtTypeDef, mdTypeDefNil) != S_OK) { return false; } LPUTF8 curClassName; ULONG32 length; WCHAR wszName[MAX_CLASS_NAME]; ConvertUtf8(m_typeName, MAX_CLASS_NAME, &length, wszName); WCHAR *pHead; Retry: pHead = wszName + length; if (FAILED(NextEnclosingClasName(wszName, pHead, &curClassName))) { return false; } // an inner class has an empty namespace associated with it HRESULT hr = metaEnum.NextTokenByName((pHead < wszName) ? m_namespaceName : "", curClassName, m_nameFlags, &m_typeToken); delete[] curClassName; if (hr != S_OK) { // if we didn't find a token with the given name return false; } else if (pHead < wszName) { // if we did find a token, *and* the class name given // does not specify any enclosing class, that's it return true; } else { // restart with innermost class pHead = wszName + length; mdTypeDef tkInner = m_typeToken; mdTypeDef tkOuter; BOOL bRetry = FALSE; LPUTF8 utf8Name; while ( !bRetry && SUCCEEDED(NextEnclosingClasName(wszName, pHead, &utf8Name)) ) { if (mdInternal->GetNestedClassProps(tkInner, &tkOuter) != S_OK) tkOuter = mdTypeDefNil; LPCSTR szName, szNS; if (FAILED(mdInternal->GetNameOfTypeDef(tkInner, &szName, &szNS))) { return false; } bRetry = (CompareUtf8(utf8Name, szName, m_nameFlags) != 0); if (!bRetry) { // if this is outermost class we need to compare namespaces too if (tkOuter == mdTypeDefNil) { // is this the outermost in the class name, too? if (pHead < wszName && CompareUtf8(m_namespaceName ? m_namespaceName : "", szNS, m_nameFlags) == 0) { delete[] utf8Name; return true; } else { bRetry = TRUE; } } } delete[] utf8Name; tkInner = tkOuter; } goto Retry; } } bool SplitName::FindMethod(IMDInternalImport* mdInternal) { if (m_memberToken != mdTokenNil) { return true; } if (m_typeToken == mdTypeDefNil || !m_memberName) { return false; } ULONG32 EmptySig = 0; // XXX Microsoft - Compare using signature when available. if (mdInternal->FindMethodDefUsingCompare(m_typeToken, m_memberName, (PCCOR_SIGNATURE)&EmptySig, sizeof(EmptySig), NULL, NULL, &m_memberToken) != S_OK) { m_memberToken = mdTokenNil; return false; } return true; } bool SplitName::FindField(IMDInternalImport* mdInternal) { if (m_memberToken != mdTokenNil) { return true; } if (m_typeToken == mdTypeDefNil || !m_memberName || m_params) { // Can't have params with a field. return false; } MetaEnum metaEnum; if (metaEnum.Start(mdInternal, mdtFieldDef, m_typeToken) != S_OK) { return false; } return metaEnum.NextTokenByName(NULL, m_memberName, m_nameFlags, &m_memberToken) == S_OK; } HRESULT SplitName::AllocAndSplitString(__in_opt PCWSTR fullName, SplitSyntax syntax, ULONG32 nameFlags, ULONG32 memberDots, SplitName** split) { HRESULT status; if (nameFlags & ~(CLRDATA_BYNAME_CASE_SENSITIVE | CLRDATA_BYNAME_CASE_INSENSITIVE)) { return E_INVALIDARG; } *split = new (nothrow) SplitName(syntax, nameFlags, memberDots); if (!*split) { return E_OUTOFMEMORY; } if ((status = (*split)->SplitString(fullName)) != S_OK) { delete (*split); return status; } return S_OK; } HRESULT SplitName::CdStartMethod(__in_opt PCWSTR fullName, ULONG32 nameFlags, Module* mod, mdTypeDef typeToken, AppDomain* appDomain, IXCLRDataAppDomain* pubAppDomain, SplitName** splitRet, CLRDATA_ENUM* handle) { HRESULT status; SplitName* split; ULONG methDots = 0; *handle = TO_CDENUM(NULL); Retry: if ((status = SplitName:: AllocAndSplitString(fullName, SPLIT_METHOD, nameFlags, methDots, &split)) != S_OK) { return status; } if (typeToken == mdTypeDefNil) { if (!split->FindType(mod->GetMDImport())) { bool hasNamespace = split->m_namespaceName != NULL; delete split; // // We may have a case where there's an // explicitly implemented method which // has dots in the name. If it's possible // to move the method name dot split // back, go ahead and retry that way. // if (hasNamespace) { methDots++; goto Retry; } return E_INVALIDARG; } typeToken = split->m_typeToken; } else { if (split->m_namespaceName || split->m_typeName) { delete split; return E_INVALIDARG; } } if ((status = split->m_metaEnum. Start(mod->GetMDImport(), mdtMethodDef, typeToken)) != S_OK) { delete split; return status; } split->m_metaEnum.m_appDomain = appDomain; if (pubAppDomain) { split->m_metaEnum.m_appDomain = ((ClrDataAppDomain*)pubAppDomain)->GetAppDomain(); } split->m_module = mod; *handle = TO_CDENUM(split); if (splitRet) { *splitRet = split; } return S_OK; } HRESULT SplitName::CdNextMethod(CLRDATA_ENUM* handle, mdMethodDef* token) { SplitName* split = FROM_CDENUM(SplitName, *handle); if (!split) { return E_INVALIDARG; } return split->m_metaEnum. NextTokenByName(NULL, split->m_memberName, split->m_nameFlags, token); } HRESULT SplitName::CdNextDomainMethod(CLRDATA_ENUM* handle, AppDomain** appDomain, mdMethodDef* token) { SplitName* split = FROM_CDENUM(SplitName, *handle); if (!split) { return E_INVALIDARG; } return split->m_metaEnum. NextDomainTokenByName(NULL, split->m_memberName, split->m_nameFlags, appDomain, token); } HRESULT SplitName::CdStartField(__in_opt PCWSTR fullName, ULONG32 nameFlags, ULONG32 fieldFlags, IXCLRDataTypeInstance* fromTypeInst, TypeHandle typeHandle, Module* mod, mdTypeDef typeToken, ULONG64 objBase, Thread* tlsThread, IXCLRDataTask* pubTlsThread, AppDomain* appDomain, IXCLRDataAppDomain* pubAppDomain, SplitName** splitRet, CLRDATA_ENUM* handle) { HRESULT status; SplitName* split; *handle = TO_CDENUM(NULL); if ((status = SplitName:: AllocAndSplitString(fullName, fullName ? SPLIT_FIELD : SPLIT_NO_NAME, nameFlags, 0, &split)) != S_OK) { return status; } if (typeHandle.IsNull()) { if (typeToken == mdTypeDefNil) { if (!split->FindType(mod->GetMDImport())) { status = E_INVALIDARG; goto Fail; } typeToken = split->m_typeToken; } else { if (split->m_namespaceName || split->m_typeName) { status = E_INVALIDARG; goto Fail; } } // With phased class loading, this may return a partially-loaded type // @todo : does this matter? typeHandle = mod->LookupTypeDef(split->m_typeToken); if (typeHandle.IsNull()) { status = E_UNEXPECTED; goto Fail; } } if ((status = InitFieldIter(&split->m_fieldEnum, typeHandle, true, fieldFlags, fromTypeInst)) != S_OK) { goto Fail; } split->m_objBase = objBase; split->m_tlsThread = tlsThread; if (pubTlsThread) { split->m_tlsThread = ((ClrDataTask*)pubTlsThread)->GetThread(); } split->m_metaEnum.m_appDomain = appDomain; if (pubAppDomain) { split->m_metaEnum.m_appDomain = ((ClrDataAppDomain*)pubAppDomain)->GetAppDomain(); } split->m_module = mod; *handle = TO_CDENUM(split); if (splitRet) { *splitRet = split; } return S_OK; Fail: delete split; return status; } HRESULT SplitName::CdNextField(ClrDataAccess* dac, CLRDATA_ENUM* handle, IXCLRDataTypeDefinition** fieldType, ULONG32* fieldFlags, IXCLRDataValue** value, ULONG32 nameBufRetLen, ULONG32* nameLenRet, __out_ecount_part_opt(nameBufRetLen, *nameLenRet) WCHAR nameBufRet[ ], IXCLRDataModule** tokenScopeRet, mdFieldDef* tokenRet) { HRESULT status; SplitName* split = FROM_CDENUM(SplitName, *handle); if (!split) { return E_INVALIDARG; } FieldDesc* fieldDesc; while ((fieldDesc = split->m_fieldEnum.Next())) { if (split->m_syntax != SPLIT_NO_NAME) { LPCUTF8 fieldName; if (FAILED(fieldDesc->GetName_NoThrow(&fieldName)) || (split->Compare(split->m_memberName, fieldName) != 0)) { continue; } } split->m_lastField = fieldDesc; if (fieldFlags != NULL) { *fieldFlags = GetTypeFieldValueFlags(fieldDesc->GetFieldTypeHandleThrowing(), fieldDesc, split->m_fieldEnum. IsFieldFromParentClass() ? CLRDATA_FIELD_IS_INHERITED : 0, false); } if ((nameBufRetLen != 0) || (nameLenRet != NULL)) { LPCUTF8 szFieldName; status = fieldDesc->GetName_NoThrow(&szFieldName); if (status != S_OK) { return status; } status = ConvertUtf8( szFieldName, nameBufRetLen, nameLenRet, nameBufRet); if (status != S_OK) { return status; } } if (tokenScopeRet && !value) { *tokenScopeRet = new (nothrow) ClrDataModule(dac, fieldDesc->GetModule()); if (!*tokenScopeRet) { return E_OUTOFMEMORY; } } if (tokenRet) { *tokenRet = fieldDesc->GetMemberDef(); } if (fieldType) { TypeHandle fieldTypeHandle = fieldDesc->GetFieldTypeHandleThrowing(); *fieldType = new (nothrow) ClrDataTypeDefinition(dac, fieldTypeHandle.GetModule(), fieldTypeHandle.GetMethodTable()->GetCl(), fieldTypeHandle); if (!*fieldType && tokenScopeRet) { delete (ClrDataModule*)*tokenScopeRet; } return *fieldType ? S_OK : E_OUTOFMEMORY; } if (value) { return ClrDataValue:: NewFromFieldDesc(dac, split->m_metaEnum.m_appDomain, split->m_fieldEnum.IsFieldFromParentClass() ? CLRDATA_VALUE_IS_INHERITED : 0, fieldDesc, split->m_objBase, split->m_tlsThread, NULL, value, nameBufRetLen, nameLenRet, nameBufRet, tokenScopeRet, tokenRet); } return S_OK; } return S_FALSE; } HRESULT SplitName::CdNextDomainField(ClrDataAccess* dac, CLRDATA_ENUM* handle, IXCLRDataValue** value) { HRESULT status; SplitName* split = FROM_CDENUM(SplitName, *handle); if (!split) { return E_INVALIDARG; } if (split->m_metaEnum.m_appDomain) { // Use only the caller-provided app domain. return CdNextField(dac, handle, NULL, NULL, value, 0, NULL, NULL, NULL, NULL); } // // Splay fields across all app domains. // for (;;) { if (!split->m_lastField) { // Need to fetch a field. if ((status = CdNextField(dac, handle, NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL)) != S_OK) { return status; } split->m_metaEnum.m_domainIter.Init(); } if (split->m_metaEnum.m_domainIter.Next()) { break; } split->m_lastField = NULL; } return ClrDataValue:: NewFromFieldDesc(dac, split->m_metaEnum.m_domainIter.GetDomain(), split->m_fieldEnum.IsFieldFromParentClass() ? CLRDATA_VALUE_IS_INHERITED : 0, split->m_lastField, split->m_objBase, split->m_tlsThread, NULL, value, 0, NULL, NULL, NULL, NULL); } HRESULT SplitName::CdStartType(__in_opt PCWSTR fullName, ULONG32 nameFlags, Module* mod, AppDomain* appDomain, IXCLRDataAppDomain* pubAppDomain, SplitName** splitRet, CLRDATA_ENUM* handle) { HRESULT status; SplitName* split; *handle = TO_CDENUM(NULL); if ((status = SplitName:: AllocAndSplitString(fullName, SPLIT_TYPE, nameFlags, 0, &split)) != S_OK) { return status; } if ((status = split->m_metaEnum. Start(mod->GetMDImport(), mdtTypeDef, mdTokenNil)) != S_OK) { delete split; return status; } split->m_metaEnum.m_appDomain = appDomain; if (pubAppDomain) { split->m_metaEnum.m_appDomain = ((ClrDataAppDomain*)pubAppDomain)->GetAppDomain(); } split->m_module = mod; *handle = TO_CDENUM(split); if (splitRet) { *splitRet = split; } return S_OK; } HRESULT SplitName::CdNextType(CLRDATA_ENUM* handle, mdTypeDef* token) { SplitName* split = FROM_CDENUM(SplitName, *handle); if (!split) { return E_INVALIDARG; } return split->m_metaEnum. NextTokenByName(split->m_namespaceName, split->m_typeName, split->m_nameFlags, token); } HRESULT SplitName::CdNextDomainType(CLRDATA_ENUM* handle, AppDomain** appDomain, mdTypeDef* token) { SplitName* split = FROM_CDENUM(SplitName, *handle); if (!split) { return E_INVALIDARG; } return split->m_metaEnum. NextDomainTokenByName(split->m_namespaceName, split->m_typeName, split->m_nameFlags, appDomain, token); } //---------------------------------------------------------------------------- // // DacInstanceManager. // // Data retrieved from the target process is cached for two reasons: // // 1. It may be necessary to map from the host address back to the target // address. For example, if any code uses a 'this' pointer or // takes the address of a field the address has to be translated from // host to target. This requires instances to be held as long as // they may be referenced. // // 2. Data is often referenced multiple times so caching is an important // performance advantage. // // Ideally we'd like to implement a simple page cache but this is // complicated by the fact that user minidump memory can have // arbitrary granularity and also that the member operator (->) // needs to return a pointer to an object. That means that all of // the data for an object must be sequential and cannot be split // at page boundaries. // // Data can also be accessed with different sizes. For example, // a base struct can be accessed, then cast to a derived struct and // accessed again with the larger derived size. The cache must // be able to replace data to maintain the largest amount of data // touched. // // We keep track of each access and the recovered memory for it. // A hash on target address allows quick access to instance data // by target address. The data for each access has a header on it // for bookkeeping purposes, so host address to target address translation // is just a matter of backing up to the header and pulling the target // address from it. Keeping each access separately allows easy // replacement by larger accesses. // //---------------------------------------------------------------------------- DacInstanceManager::DacInstanceManager(void) : m_unusedBlock(NULL) { InitEmpty(); } DacInstanceManager::~DacInstanceManager(void) { // We are stopping debugging in this case, so don't save any block of memory. // Otherwise, there will be a memory leak. Flush(false); } #if defined(DAC_HASHTABLE) DAC_INSTANCE* DacInstanceManager::Add(DAC_INSTANCE* inst) { // Assert that we don't add NULL instances. This allows us to assert that found instances // are not NULL in DacInstanceManager::Find _ASSERTE(inst != NULL); DWORD nHash = DAC_INSTANCE_HASH(inst->addr); HashInstanceKeyBlock* block = m_hash[nHash]; if (!block || block->firstElement == 0) { HashInstanceKeyBlock* newBlock; if (block) { newBlock = (HashInstanceKeyBlock*) new (nothrow) BYTE[HASH_INSTANCE_BLOCK_ALLOC_SIZE]; } else { // We allocate one big memory chunk that has a block for every index of the hash table to // improve data locality and reduce the number of allocs. In most cases, a hash bucket will // use only one block, so improving data locality across blocks (i.e. keeping the buckets of the // hash table together) should help. newBlock = (HashInstanceKeyBlock*) ClrVirtualAlloc(NULL, HASH_INSTANCE_BLOCK_ALLOC_SIZE*NumItems(m_hash), MEM_COMMIT, PAGE_READWRITE); } if (!newBlock) { return NULL; } if (block) { // We add the newest block to the start of the list assuming that most accesses are for // recently added elements. newBlock->next = block; m_hash[nHash] = newBlock; // The previously allocated block newBlock->firstElement = HASH_INSTANCE_BLOCK_NUM_ELEMENTS; block = newBlock; } else { for (DWORD j = 0; j < NumItems(m_hash); j++) { m_hash[j] = newBlock; newBlock->next = NULL; // The previously allocated block newBlock->firstElement = HASH_INSTANCE_BLOCK_NUM_ELEMENTS; newBlock = (HashInstanceKeyBlock*) (((BYTE*) newBlock) + HASH_INSTANCE_BLOCK_ALLOC_SIZE); } block = m_hash[nHash]; } } _ASSERTE(block->firstElement > 0); block->firstElement--; block->instanceKeys[block->firstElement].addr = inst->addr; block->instanceKeys[block->firstElement].instance = inst; inst->next = NULL; return inst; } #else //DAC_HASHTABLE DAC_INSTANCE* DacInstanceManager::Add(DAC_INSTANCE* inst) { _ASSERTE(inst != NULL); #ifdef _DEBUG bool isInserted = (m_hash.find(inst->addr) == m_hash.end()); #endif //_DEBUG DAC_INSTANCE *(&target) = m_hash[inst->addr]; _ASSERTE(!isInserted || target == NULL); if( target != NULL ) { //This is necessary to preserve the semantics of Supersede, however, it //is more or less dead code. inst->next = target; target = inst; //verify descending order _ASSERTE(inst->size >= target->size); } else { target = inst; } return inst; } #endif // #if defined(DAC_HASHTABLE) DAC_INSTANCE* DacInstanceManager::Alloc(TADDR addr, ULONG32 size, DAC_USAGE_TYPE usage) { SUPPORTS_DAC_HOST_ONLY; DAC_INSTANCE_BLOCK* block; DAC_INSTANCE* inst; ULONG32 fullSize; static_assert_no_msg(sizeof(DAC_INSTANCE_BLOCK) <= DAC_INSTANCE_ALIGN); static_assert_no_msg((sizeof(DAC_INSTANCE) & (DAC_INSTANCE_ALIGN - 1)) == 0); // // All allocated instances must be kept alive as long // as anybody may have a host pointer for one of them. // This means that we cannot delete an arbitrary instance // unless we are sure no pointers exist, which currently // is not possible to determine, thus we just hold everything // until a Flush. This greatly simplifies instance allocation // as we can then just sweep through large blocks rather // than having to use a real allocator. The only // complication is that we need to keep all instance // data aligned. We have guaranteed that the header will // preserve alignment of the data following if the header // is aligned, so as long as we round up all allocations // to a multiple of the alignment size everything just works. // fullSize = (size + DAC_INSTANCE_ALIGN - 1) & ~(DAC_INSTANCE_ALIGN - 1); _ASSERTE(fullSize && fullSize <= 0xffffffff - 2 * sizeof(*inst)); fullSize += sizeof(*inst); // // Check for an existing block with space. // for (block = m_blocks; block; block = block->next) { if (fullSize <= block->bytesFree) { break; } } if (!block) { // // No existing block has enough space, so allocate a new // one if necessary and link it in. We know we're allocating large // blocks so directly VirtualAlloc. We save one block through a // flush so that we spend less time allocating/deallocating. // ULONG32 blockSize = fullSize + DAC_INSTANCE_ALIGN; if (blockSize < DAC_INSTANCE_BLOCK_ALLOCATION) { blockSize = DAC_INSTANCE_BLOCK_ALLOCATION; } // If we have a saved block and it's large enough, use it. block = m_unusedBlock; if ((block != NULL) && ((block->bytesUsed + block->bytesFree) >= blockSize)) { m_unusedBlock = NULL; // Right now, we're locked to DAC_INSTANCE_BLOCK_ALLOCATION but // that might change in the future if we decide to do something // else with the size guarantee in code:DacInstanceManager::FreeAllBlocks blockSize = block->bytesUsed + block->bytesFree; } else { block = (DAC_INSTANCE_BLOCK*) ClrVirtualAlloc(NULL, blockSize, MEM_COMMIT, PAGE_READWRITE); } if (!block) { return NULL; } // Keep the first aligned unit for the block header. block->bytesUsed = DAC_INSTANCE_ALIGN; block->bytesFree = blockSize - DAC_INSTANCE_ALIGN; block->next = m_blocks; m_blocks = block; m_blockMemUsage += blockSize; } inst = (DAC_INSTANCE*)((PBYTE)block + block->bytesUsed); block->bytesUsed += fullSize; _ASSERTE(block->bytesFree >= fullSize); block->bytesFree -= fullSize; inst->next = NULL; inst->addr = addr; inst->size = size; inst->sig = DAC_INSTANCE_SIG; inst->usage = usage; inst->enumMem = 0; inst->MDEnumed = 0; m_numInst++; m_instMemUsage += fullSize; return inst; } void DacInstanceManager::ReturnAlloc(DAC_INSTANCE* inst) { SUPPORTS_DAC_HOST_ONLY; DAC_INSTANCE_BLOCK* block; DAC_INSTANCE_BLOCK * pPrevBlock; ULONG32 fullSize; // // This special routine handles cleanup in // cases where an instances has been allocated // but must be returned due to a following error. // The given instance must be the last instance // in an existing block. // fullSize = ((inst->size + DAC_INSTANCE_ALIGN - 1) & ~(DAC_INSTANCE_ALIGN - 1)) + sizeof(*inst); pPrevBlock = NULL; for (block = m_blocks; block; pPrevBlock = block, block = block->next) { if ((PBYTE)inst == (PBYTE)block + (block->bytesUsed - fullSize)) { break; } } if (!block) { return; } block->bytesUsed -= fullSize; block->bytesFree += fullSize; m_numInst--; m_instMemUsage -= fullSize; // If the block is empty after returning the specified instance, that means this block was newly created // when this instance was allocated. We have seen cases where we are asked to allocate a // large chunk of memory only to fail to read the memory from a dump later on, i.e. when both the target // address and the size are invalid. If we keep the allocation, we'll grow the VM size unnecessarily. // Thus, release a block if it's empty and if it's not the default size (to avoid thrashing memory). // See Dev10 Dbug 812112 for more information. if ((block->bytesUsed == DAC_INSTANCE_ALIGN) && ((block->bytesFree + block->bytesUsed) != DAC_INSTANCE_BLOCK_ALLOCATION)) { // The empty block is at the beginning of the list. if (pPrevBlock == NULL) { m_blocks = block->next; } else { _ASSERTE(pPrevBlock->next == block); pPrevBlock->next = block->next; } ClrVirtualFree(block, 0, MEM_RELEASE); } } #if defined(DAC_HASHTABLE) DAC_INSTANCE* DacInstanceManager::Find(TADDR addr) { #if defined(DAC_MEASURE_PERF) unsigned _int64 nStart, nEnd; g_nFindCalls++; nStart = GetCycleCount(); #endif // #if defined(DAC_MEASURE_PERF) HashInstanceKeyBlock* block = m_hash[DAC_INSTANCE_HASH(addr)]; #if defined(DAC_MEASURE_PERF) nEnd = GetCycleCount(); g_nFindHashTotalTime += nEnd - nStart; #endif // #if defined(DAC_MEASURE_PERF) while (block) { DWORD nIndex = block->firstElement; for (; nIndex < HASH_INSTANCE_BLOCK_NUM_ELEMENTS; nIndex++) { if (block->instanceKeys[nIndex].addr == addr) { #if defined(DAC_MEASURE_PERF) nEnd = GetCycleCount(); g_nFindHits++; g_nFindTotalTime += nEnd - nStart; if (g_nStackWalk) g_nFindStackTotalTime += nEnd - nStart; #endif // #if defined(DAC_MEASURE_PERF) DAC_INSTANCE* inst = block->instanceKeys[nIndex].instance; // inst should not be NULL even if the address was superseded. We search // the entries in the reverse order they were added. So we should have // found the superseding entry before this one. (Of course, if a NULL instance // has been added, this assert is meaningless. DacInstanceManager::Add // asserts that NULL instances aren't added.) _ASSERTE(inst != NULL); return inst; } } block = block->next; } #if defined(DAC_MEASURE_PERF) nEnd = GetCycleCount(); g_nFindFails++; g_nFindTotalTime += nEnd - nStart; if (g_nStackWalk) g_nFindStackTotalTime += nEnd - nStart; #endif // #if defined(DAC_MEASURE_PERF) return NULL; } #else //DAC_HASHTABLE DAC_INSTANCE* DacInstanceManager::Find(TADDR addr) { DacInstanceHashIterator iter = m_hash.find(addr); if( iter == m_hash.end() ) { return NULL; } else { return iter->second; } } #endif // if defined(DAC_HASHTABLE) HRESULT DacInstanceManager::Write(DAC_INSTANCE* inst, bool throwEx) { HRESULT status; if (inst->usage == DAC_VPTR) { // Skip over the host-side vtable pointer when // writing back. status = DacWriteAll(inst->addr + sizeof(TADDR), (PBYTE)(inst + 1) + sizeof(PVOID), inst->size - sizeof(TADDR), throwEx); } else { // Write the whole instance back. status = DacWriteAll(inst->addr, inst + 1, inst->size, throwEx); } return status; } #if defined(DAC_HASHTABLE) void DacInstanceManager::Supersede(DAC_INSTANCE* inst) { _ASSERTE(inst != NULL); // // This instance has been superseded by a larger // one and so must be removed from the hash. However, // code may be holding the instance pointer so it // can't just be deleted. Put it on a list for // later cleanup. // HashInstanceKeyBlock* block = m_hash[DAC_INSTANCE_HASH(inst->addr)]; while (block) { DWORD nIndex = block->firstElement; for (; nIndex < HASH_INSTANCE_BLOCK_NUM_ELEMENTS; nIndex++) { if (block->instanceKeys[nIndex].instance == inst) { block->instanceKeys[nIndex].instance = NULL; break; } } if (nIndex < HASH_INSTANCE_BLOCK_NUM_ELEMENTS) { break; } block = block->next; } AddSuperseded(inst); } #else //DAC_HASHTABLE void DacInstanceManager::Supersede(DAC_INSTANCE* inst) { _ASSERTE(inst != NULL); // // This instance has been superseded by a larger // one and so must be removed from the hash. However, // code may be holding the instance pointer so it // can't just be deleted. Put it on a list for // later cleanup. // DacInstanceHashIterator iter = m_hash.find(inst->addr); if( iter == m_hash.end() ) return; DAC_INSTANCE** bucket = &(iter->second); DAC_INSTANCE* cur = *bucket; DAC_INSTANCE* prev = NULL; //walk through the chain looking for this particular instance while (cur) { if (cur == inst) { if (!prev) { *bucket = inst->next; } else { prev->next = inst->next; } break; } prev = cur; cur = cur->next; } AddSuperseded(inst); } #endif // if defined(DAC_HASHTABLE) // This is the default Flush() called when the DAC cache is invalidated, // e.g. when we continue the debuggee process. In this case, we want to // save one block of memory to avoid thrashing. See the usage of m_unusedBlock // for more information. void DacInstanceManager::Flush(void) { Flush(true); } void DacInstanceManager::Flush(bool fSaveBlock) { SUPPORTS_DAC_HOST_ONLY; // // All allocated memory is in the block // list, so just free the blocks and // forget all the internal pointers. // for (;;) { FreeAllBlocks(fSaveBlock); DAC_INSTANCE_PUSH* push = m_instPushed; if (!push) { break; } m_instPushed = push->next; m_blocks = push->blocks; delete push; } // If we are not saving any memory blocks, then clear the saved buffer block (if any) as well. if (!fSaveBlock) { if (m_unusedBlock != NULL) { ClrVirtualFree(m_unusedBlock, 0, MEM_RELEASE); m_unusedBlock = NULL; } } #if defined(DAC_HASHTABLE) for (int i = NumItems(m_hash) - 1; i >= 0; i--) { HashInstanceKeyBlock* block = m_hash[i]; HashInstanceKeyBlock* next; while (block) { next = block->next; if (next) { delete [] block; } else if (i == 0) { ClrVirtualFree(block, 0, MEM_RELEASE); } block = next; } } #else //DAC_HASHTABLE m_hash.clear(); #endif //DAC_HASHTABLE InitEmpty(); } #if defined(DAC_HASHTABLE) void DacInstanceManager::ClearEnumMemMarker(void) { ULONG i; DAC_INSTANCE* inst; for (i = 0; i < NumItems(m_hash); i++) { HashInstanceKeyBlock* block = m_hash[i]; while (block) { DWORD j; for (j = block->firstElement; j < HASH_INSTANCE_BLOCK_NUM_ELEMENTS; j++) { inst = block->instanceKeys[j].instance; if (inst != NULL) { inst->enumMem = 0; } } block = block->next; } } for (inst = m_superseded; inst; inst = inst->next) { inst->enumMem = 0; } } #else //DAC_HASHTABLE void DacInstanceManager::ClearEnumMemMarker(void) { ULONG i; DAC_INSTANCE* inst; DacInstanceHashIterator end = m_hash.end(); /* REVISIT_TODO Fri 10/20/2006 * This might have an issue, since it might miss chained entries off of * ->next. However, ->next is going away, and for all intents and * purposes, this never happens. */ for( DacInstanceHashIterator cur = m_hash.begin(); cur != end; ++cur ) { cur->second->enumMem = 0; } for (inst = m_superseded; inst; inst = inst->next) { inst->enumMem = 0; } } #endif // if defined(DAC_HASHTABLE) #if defined(DAC_HASHTABLE) // // // Iterating through all of the hash entry and report the memory // instance to minidump // // This function returns the total number of bytes that it reported. // // UINT DacInstanceManager::DumpAllInstances( ICLRDataEnumMemoryRegionsCallback *pCallBack) // memory report call back { ULONG i; DAC_INSTANCE* inst; UINT cbTotal = 0; #if defined(DAC_MEASURE_PERF) FILE* fp = fopen("c:\\dumpLog.txt", "a"); int total = 0; #endif // #if defined(DAC_MEASURE_PERF) for (i = 0; i < NumItems(m_hash); i++) { #if defined(DAC_MEASURE_PERF) int numInBucket = 0; #endif // #if defined(DAC_MEASURE_PERF) HashInstanceKeyBlock* block = m_hash[i]; while (block) { DWORD j; for (j = block->firstElement; j < HASH_INSTANCE_BLOCK_NUM_ELEMENTS; j++) { inst = block->instanceKeys[j].instance; // Only report those we intended to. // So far, only metadata is excluded! // if (inst && inst->noReport == 0) { cbTotal += inst->size; HRESULT hr = pCallBack->EnumMemoryRegion(TO_CDADDR(inst->addr), inst->size); if (hr == COR_E_OPERATIONCANCELED) { ThrowHR(hr); } } #if defined(DAC_MEASURE_PERF) if (inst) { numInBucket++; } #endif // #if defined(DAC_MEASURE_PERF) } block = block->next; } #if defined(DAC_MEASURE_PERF) fprintf(fp, "%4d: %4d%s", i, numInBucket, (i+1)%5? "; " : "\n"); total += numInBucket; #endif // #if defined(DAC_MEASURE_PERF) } #if defined(DAC_MEASURE_PERF) fprintf(fp, "\n\nTotal entries: %d\n\n", total); fclose(fp); #endif // #if defined(DAC_MEASURE_PERF) return cbTotal; } #else //DAC_HASHTABLE // // // Iterating through all of the hash entry and report the memory // instance to minidump // // This function returns the total number of bytes that it reported. // // UINT DacInstanceManager::DumpAllInstances( ICLRDataEnumMemoryRegionsCallback *pCallBack) // memory report call back { SUPPORTS_DAC_HOST_ONLY; DAC_INSTANCE* inst; UINT cbTotal = 0; #if defined(DAC_MEASURE_PERF) FILE* fp = fopen("c:\\dumpLog.txt", "a"); #endif // #if defined(DAC_MEASURE_PERF) #if defined(DAC_MEASURE_PERF) int numInBucket = 0; #endif // #if defined(DAC_MEASURE_PERF) DacInstanceHashIterator end = m_hash.end(); for (DacInstanceHashIterator cur = m_hash.begin(); end != cur; ++cur) { inst = cur->second; // Only report those we intended to. // So far, only metadata is excluded! // if (inst->noReport == 0) { cbTotal += inst->size; HRESULT hr = pCallBack->EnumMemoryRegion(TO_CDADDR(inst->addr), inst->size); if (hr == COR_E_OPERATIONCANCELED) { ThrowHR(hr); } } #if defined(DAC_MEASURE_PERF) numInBucket++; #endif // #if defined(DAC_MEASURE_PERF) } #if defined(DAC_MEASURE_PERF) fprintf(fp, "\n\nTotal entries: %d\n\n", numInBucket); fclose(fp); #endif // #if defined(DAC_MEASURE_PERF) return cbTotal; } #endif // if defined(DAC_HASHTABLE) DAC_INSTANCE_BLOCK* DacInstanceManager::FindInstanceBlock(DAC_INSTANCE* inst) { for (DAC_INSTANCE_BLOCK* block = m_blocks; block; block = block->next) { if ((PBYTE)inst >= (PBYTE)block && (PBYTE)inst < (PBYTE)block + block->bytesUsed) { return block; } } return NULL; } // If fSaveBlock is false, free all blocks of allocated memory. Otherwise, // free all blocks except the one we save to avoid thrashing memory. // Callers very frequently flush repeatedly with little memory needed in DAC // so this avoids wasteful repeated allocations/deallocations. // There is a very unlikely case that we'll have allocated an extremely large // block; if this is the only block we will save none since this block will // remain allocated. void DacInstanceManager::FreeAllBlocks(bool fSaveBlock) { DAC_INSTANCE_BLOCK* block; while ((block = m_blocks)) { m_blocks = block->next; // If we haven't saved our single block yet and this block is the default size // then we will save it instead of freeing it. This avoids saving an unnecessarily large // memory block. // Do *NOT* trash the byte counts. code:DacInstanceManager::Alloc // depends on them being correct when checking to see if a block is large enough. if (fSaveBlock && (m_unusedBlock == NULL) && ((block->bytesFree + block->bytesUsed) == DAC_INSTANCE_BLOCK_ALLOCATION)) { // Just to avoid confusion, since we're keeping it around. block->next = NULL; m_unusedBlock = block; } else { ClrVirtualFree(block, 0, MEM_RELEASE); } } } //---------------------------------------------------------------------------- // // DacStreamManager. // //---------------------------------------------------------------------------- #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS namespace serialization { namespace bin { //======================================================================== // Support functions for binary serialization of simple types to a buffer: // - raw_size() returns the size in bytes of the binary representation // of a value. // - raw_serialize() copies the binary representation of a value into a // buffer. // - raw_deserialize() generates a value from its binary representation // in a buffer. // Beyond simple types the APIs below support SString instances. SStrings // are stored as UTF8 strings. //======================================================================== static const size_t ErrOverflow = (size_t)(-1); #ifndef FEATURE_PAL // Template class is_blittable template <typename _Ty, typename Enable = void> struct is_blittable : std::false_type { // determines whether _Ty is blittable }; template <typename _Ty> struct is_blittable<_Ty, typename std::enable_if<std::is_arithmetic<_Ty>::value>::type> : std::true_type { // determines whether _Ty is blittable }; // allow types to declare themselves blittable by including a static bool // member "is_blittable". template <typename _Ty> struct is_blittable<_Ty, typename std::enable_if<_Ty::is_blittable>::type> : std::true_type { // determines whether _Ty is blittable }; //======================================================================== // serialization::bin::Traits<T> enables binary serialization and // deserialization of instances of T. //======================================================================== // // General specialization for non-blittable types - must be overridden // for each specific non-blittable type. // template <typename T, typename Enable = void> class Traits { public: static FORCEINLINE size_t raw_size(const T & val) { static_assert(false, "Non-blittable types need explicit specializations"); } }; // // General type trait supporting serialization/deserialization of blittable // type arguments (as defined by the is_blittable<> type traits above). // template <typename T> class Traits<T, typename std::enable_if<is_blittable<T>::value>::type> { #else // FEATURE_PAL template <typename T> class Traits { #endif // !FEATURE_PAL public: // // raw_size() returns the size in bytes of the binary representation of a // value. // static FORCEINLINE size_t raw_size(const T & val) { return sizeof(T); } // // raw_serialize() copies the binary representation of a value into a // "dest" buffer that has "destSize" bytes available. // Returns raw_size(val), or ErrOverflow if the buffer does not have // enough space to accommodate "val". // static FORCEINLINE size_t raw_serialize(BYTE* dest, size_t destSize, const T & val) { size_t cnt = raw_size(val); if (destSize < cnt) { return ErrOverflow; } memcpy_s(dest, destSize, &val, cnt); return cnt; } // // raw_deserialize() generates a value "val" from its binary // representation in a buffer "src". // Returns raw_size(val), or ErrOverflow if the buffer does not have // enough space to accommodate "val". // static FORCEINLINE size_t raw_deserialize(T & val, const BYTE* src, size_t srcSize) { size_t cnt = raw_size(*(T*)src); if (srcSize < cnt) { return ErrOverflow; } memcpy_s(&val, cnt, src, cnt); return cnt; } }; // // Specialization for UTF8 strings // template<> class Traits<LPCUTF8> { public: static FORCEINLINE size_t raw_size(const LPCUTF8 & val) { return strlen(val) + 1; } static FORCEINLINE size_t raw_serialize(BYTE* dest, size_t destSize, const LPCUTF8 & val) { size_t cnt = raw_size(val); if (destSize < cnt) { return ErrOverflow; } memcpy_s(dest, destSize, &val, cnt); return cnt; } static FORCEINLINE size_t raw_deserialize(LPCUTF8 & val, const BYTE* src, size_t srcSize) { size_t cnt = strnlen((LPCUTF8)src, srcSize) + 1; // assert we found a NULL terminated string at "src" if (srcSize < cnt) { return ErrOverflow; } // we won't allocate another buffer for this string val = (LPCUTF8)src; return cnt; } }; // // Specialization for SString. // SString serialization/deserialization is performed to/from a UTF8 // string. // template<> class Traits<SString> { public: static FORCEINLINE size_t raw_size(const SString & val) { StackSString s; val.ConvertToUTF8(s); // make sure to include the NULL terminator return s.GetCount() + 1; } static FORCEINLINE size_t raw_serialize(BYTE* dest, size_t destSize, const SString & val) { // instead of calling raw_size() we inline it here, so we can reuse // the UTF8 string obtained below as an argument to memcpy. StackSString s; val.ConvertToUTF8(s); // make sure to include the NULL terminator size_t cnt = s.GetCount() + 1; if (destSize < cnt) { return ErrOverflow; } memcpy_s(dest, destSize, s.GetUTF8NoConvert(), cnt); return cnt; } static FORCEINLINE size_t raw_deserialize(SString & val, const BYTE* src, size_t srcSize) { size_t cnt = strnlen((LPCUTF8)src, srcSize) + 1; // assert we found a NULL terminated string at "src" if (srcSize < cnt) { return ErrOverflow; } // a literal SString avoids a new allocation + copy SString sUtf8(SString::Utf8Literal, (LPCUTF8) src); sUtf8.ConvertToUnicode(val); return cnt; } }; #ifndef FEATURE_PAL // // Specialization for SString-derived classes (like SStrings) // template<typename T> class Traits<T, typename std::enable_if<std::is_base_of<SString, T>::value>::type> : public Traits<SString> { }; #endif // !FEATURE_PAL // // Convenience functions to allow argument type deduction // template <typename T> FORCEINLINE size_t raw_size(const T & val) { return Traits<T>::raw_size(val); } template <typename T> FORCEINLINE size_t raw_serialize(BYTE* dest, size_t destSize, const T & val) { return Traits<T>::raw_serialize(dest, destSize, val); } template <typename T> FORCEINLINE size_t raw_deserialize(T & val, const BYTE* src, size_t srcSize) { return Traits<T>::raw_deserialize(val, src, srcSize); } enum StreamBuffState { sbsOK, sbsUnrecoverable, sbsOOM = sbsUnrecoverable, }; // // OStreamBuff - Manages writing to an output buffer // class OStreamBuff { public: OStreamBuff(BYTE * _buff, size_t _buffsize) : buffsize(_buffsize) , buff(_buff) , crt(0) , sbs(sbsOK) { } template <typename T> OStreamBuff& operator << (const T & val) { if (sbs >= sbsUnrecoverable) return *this; size_t cnt = raw_serialize(buff+crt, buffsize-crt, val); if (cnt == ErrOverflow) { sbs = sbsOOM; } else { crt += cnt; } return *this; } inline size_t GetPos() const { return crt; } inline BOOL operator!() const { return sbs >= sbsUnrecoverable; } inline StreamBuffState State() const { return sbs; } private: size_t buffsize; // size of buffer BYTE* buff; // buffer to stream to size_t crt; // current offset in buffer StreamBuffState sbs; // current state }; // // OStreamBuff - Manages reading from an input buffer // class IStreamBuff { public: IStreamBuff(const BYTE* _buff, size_t _buffsize) : buffsize(_buffsize) , buff(_buff) , crt(0) , sbs(sbsOK) { } template <typename T> IStreamBuff& operator >> (T & val) { if (sbs >= sbsUnrecoverable) return *this; size_t cnt = raw_deserialize(val, buff+crt, buffsize-crt); if (cnt == ErrOverflow) { sbs = sbsOOM; } else { crt += cnt; } return *this; } inline size_t GetPos() const { return crt; } inline BOOL operator!() const { return sbs >= sbsUnrecoverable; } inline StreamBuffState State() const { return sbs; } private: size_t buffsize; // size of buffer const BYTE * buff; // buffer to read from size_t crt; // current offset in buffer StreamBuffState sbs; // current state }; } } using serialization::bin::StreamBuffState; using serialization::bin::IStreamBuff; using serialization::bin::OStreamBuff; // Callback function type used by DacStreamManager to coordinate // amount of available memory between multiple streamable data // structures (e.g. DacEENamesStreamable) typedef bool (*Reserve_Fnptr)(DWORD size, void * writeState); // // DacEENamesStreamable // Stores EE struct* -> Name mappings and streams them to a // streambuf when asked // class DacEENamesStreamable { private: // the hash map storing the interesting mappings of EE* -> Names MapSHash< TADDR, SString, NoRemoveSHashTraits < NonDacAwareSHashTraits< MapSHashTraits <TADDR, SString> > > > m_hash; Reserve_Fnptr m_reserveFn; void *m_writeState; private: // signature value in the header in stream static const DWORD sig = 0x614e4545; // "EENa" - EE Name // header in stream struct StreamHeader { DWORD sig; // 0x614e4545 == "EENa" DWORD cnt; // count of entries static const bool is_blittable = true; }; public: DacEENamesStreamable() : m_reserveFn(NULL) , m_writeState(NULL) {} // Ensures the instance is ready for caching data and later writing // its map entries to an OStreamBuff. bool PrepareStreamForWriting(Reserve_Fnptr pfn, void * writeState) { _ASSERTE(pfn != NULL && writeState != NULL); m_reserveFn = pfn; m_writeState = writeState; DWORD size = (DWORD) sizeof(StreamHeader); // notify owner to reserve space for a StreamHeader return m_reserveFn(size, m_writeState); } // Adds a new mapping from an EE struct pointer (e.g. MethodDesc*) to // its name bool AddEEName(TADDR taEE, const SString & eeName) { _ASSERTE(m_reserveFn != NULL && m_writeState != NULL); // as a micro-optimization convert to Utf8 here as both raw_size and // raw_serialize are optimized for Utf8... StackSString seeName; eeName.ConvertToUTF8(seeName); DWORD size = (DWORD)(serialization::bin::raw_size(taEE) + serialization::bin::raw_size(seeName)); // notify owner of the amount of space needed in the buffer if (m_reserveFn(size, m_writeState)) { // if there's still space cache the entry in m_hash m_hash.AddOrReplace(KeyValuePair<TADDR, SString>(taEE, seeName)); return true; } else { return false; } } // Finds an EE name from a target address of an EE struct (e.g. // MethodDesc*) bool FindEEName(TADDR taEE, SString & eeName) const { return m_hash.Lookup(taEE, &eeName) == TRUE; } void Clear() { m_hash.RemoveAll(); } // Writes a header and the hash entries to an OStreamBuff HRESULT StreamTo(OStreamBuff &out) const { StreamHeader hdr; hdr.sig = sig; hdr.cnt = (DWORD) m_hash.GetCount(); out << hdr; auto end = m_hash.End(); for (auto cur = m_hash.Begin(); end != cur; ++cur) { out << cur->Key() << cur->Value(); if (!out) return E_FAIL; } return S_OK; } // Reads a header and the hash entries from an IStreamBuff HRESULT StreamFrom(IStreamBuff &in) { StreamHeader hdr; in >> hdr; // in >> hdr.sig >> hdr.cnt; if (hdr.sig != sig) return E_FAIL; for (size_t i = 0; i < hdr.cnt; ++i) { TADDR taEE; SString eeName; in >> taEE >> eeName; if (!in) return E_FAIL; m_hash.AddOrReplace(KeyValuePair<TADDR, SString>(taEE, eeName)); } return S_OK; } }; //================================================================================ // This class enables two scenarios: // 1. When debugging a triage/mini-dump the class is initialized with a valid // buffer in taMiniMetaDataBuff. Afterwards one can call MdCacheGetEEName to // retrieve the name associated with a MethodDesc*. // 2. When generating a dump one must follow this sequence: // a. Initialize the DacStreamManager passing a valid (if the current // debugging target is a triage/mini-dump) or empty buffer (if the // current target is a live processa full or a heap dump) // b. Call PrepareStreamsForWriting() before starting enumerating any memory // c. Call MdCacheAddEEName() anytime we enumerate an EE structure of interest // d. Call EnumStreams() as the last action in the memory enumeration method. // class DacStreamManager { public: enum eReadOrWrite { eNone, // the stream doesn't exist (target is a live process/full/heap dump) eRO, // the stream exists and we've read it (target is triage/mini-dump) eWO, // the stream doesn't exist but we're creating it // (e.g. to save a minidump from the current debugging session) eRW // the stream exists but we're generating another triage/mini-dump }; static const DWORD sig = 0x6d727473; // 'strm' struct StreamsHeader { DWORD dwSig; // 0x6d727473 == "strm" DWORD dwTotalSize; // total size in bytes DWORD dwCntStreams; // number of streams (currently 1) static const bool is_blittable = true; }; DacStreamManager(TADDR miniMetaDataBuffAddress, DWORD miniMetaDataBuffSizeMax) : m_MiniMetaDataBuffAddress(miniMetaDataBuffAddress) , m_MiniMetaDataBuffSizeMax(miniMetaDataBuffSizeMax) , m_rawBuffer(NULL) , m_cbAvailBuff(0) , m_rw(eNone) , m_bStreamsRead(FALSE) , m_EENames() { Initialize(); } ~DacStreamManager() { if (m_rawBuffer != NULL) { delete [] m_rawBuffer; } } bool PrepareStreamsForWriting() { if (m_rw == eNone) m_rw = eWO; else if (m_rw == eRO) m_rw = eRW; else if (m_rw == eRW) /* nothing */; else // m_rw == eWO { // this is a second invocation from a possibly live process // clean up the map since the callstacks/exceptions may be different m_EENames.Clear(); } // update available count based on the header and footer sizes if (m_MiniMetaDataBuffSizeMax < sizeof(StreamsHeader)) return false; m_cbAvailBuff = m_MiniMetaDataBuffSizeMax - sizeof(StreamsHeader); // update available count based on each stream's initial needs if (!m_EENames.PrepareStreamForWriting(&ReserveInBuffer, this)) return false; return true; } bool MdCacheAddEEName(TADDR taEEStruct, const SString& name) { // don't cache unless we enabled "W"riting from a target that does not // already have a stream yet if (m_rw != eWO) return false; m_EENames.AddEEName(taEEStruct, name); return true; } HRESULT EnumStreams(IN CLRDataEnumMemoryFlags flags) { _ASSERTE(flags == CLRDATA_ENUM_MEM_MINI || flags == CLRDATA_ENUM_MEM_TRIAGE); _ASSERTE(m_rw == eWO || m_rw == eRW); DWORD cbWritten = 0; if (m_rw == eWO) { // only dump the stream is it wasn't already present in the target DumpAllStreams(&cbWritten); } else { cbWritten = m_MiniMetaDataBuffSizeMax; } DacEnumMemoryRegion(m_MiniMetaDataBuffAddress, cbWritten, false); DacUpdateMemoryRegion(m_MiniMetaDataBuffAddress, cbWritten, m_rawBuffer); return S_OK; } bool MdCacheGetEEName(TADDR taEEStruct, SString & eeName) { if (!m_bStreamsRead) { ReadAllStreams(); } if (m_rw == eNone || m_rw == eWO) { return false; } return m_EENames.FindEEName(taEEStruct, eeName); } private: HRESULT Initialize() { _ASSERTE(m_rw == eNone); _ASSERTE(m_rawBuffer == NULL); HRESULT hr = S_OK; StreamsHeader hdr; DacReadAll(dac_cast<TADDR>(m_MiniMetaDataBuffAddress), &hdr, sizeof(hdr), true); // when the DAC looks at a triage dump or minidump generated using // a "minimetadata" enabled DAC, buff will point to a serialized // representation of a methoddesc->method name hashmap. if (hdr.dwSig == sig) { m_rw = eRO; m_MiniMetaDataBuffSizeMax = hdr.dwTotalSize; hr = S_OK; } else // when the DAC initializes this for the case where the target is // (a) a live process, or (b) a full dump, buff will point to a // zero initialized memory region (allocated w/ VirtualAlloc) if (hdr.dwSig == 0 && hdr.dwTotalSize == 0 && hdr.dwCntStreams == 0) { hr = S_OK; } // otherwise we may have some memory corruption. treat this as // a liveprocess/full dump else { hr = S_FALSE; } BYTE * buff = new BYTE[m_MiniMetaDataBuffSizeMax]; DacReadAll(dac_cast<TADDR>(m_MiniMetaDataBuffAddress), buff, m_MiniMetaDataBuffSizeMax, true); m_rawBuffer = buff; return hr; } HRESULT DumpAllStreams(DWORD * pcbWritten) { _ASSERTE(m_rw == eWO); HRESULT hr = S_OK; OStreamBuff out(m_rawBuffer, m_MiniMetaDataBuffSizeMax); // write header StreamsHeader hdr; hdr.dwSig = sig; hdr.dwTotalSize = m_MiniMetaDataBuffSizeMax-m_cbAvailBuff; // will update hdr.dwCntStreams = 1; out << hdr; // write MethodDesc->Method name map hr = m_EENames.StreamTo(out); // wrap up the buffer whether we ecountered an error or not size_t cbWritten = out.GetPos(); cbWritten = ALIGN_UP(cbWritten, sizeof(size_t)); // patch the dwTotalSize field blitted at the beginning of the buffer ((StreamsHeader*)m_rawBuffer)->dwTotalSize = (DWORD) cbWritten; if (pcbWritten) *pcbWritten = (DWORD) cbWritten; return hr; } HRESULT ReadAllStreams() { _ASSERTE(!m_bStreamsRead); if (m_rw == eNone || m_rw == eWO) { // no streams to read... m_bStreamsRead = TRUE; return S_FALSE; } HRESULT hr = S_OK; IStreamBuff in(m_rawBuffer, m_MiniMetaDataBuffSizeMax); // read header StreamsHeader hdr; in >> hdr; _ASSERTE(hdr.dwSig == sig); _ASSERTE(hdr.dwCntStreams == 1); // read EE struct pointer -> EE name map m_EENames.Clear(); hr = m_EENames.StreamFrom(in); m_bStreamsRead = TRUE; return hr; } static bool ReserveInBuffer(DWORD size, void * writeState) { DacStreamManager * pThis = reinterpret_cast<DacStreamManager*>(writeState); if (size > pThis->m_cbAvailBuff) { return false; } else { pThis->m_cbAvailBuff -= size; return true; } } private: TADDR m_MiniMetaDataBuffAddress; // TADDR of the buffer DWORD m_MiniMetaDataBuffSizeMax; // max size of buffer BYTE * m_rawBuffer; // inproc copy of buffer DWORD m_cbAvailBuff; // available bytes in buffer eReadOrWrite m_rw; BOOL m_bStreamsRead; DacEENamesStreamable m_EENames; }; #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS //---------------------------------------------------------------------------- // // ClrDataAccess. // //---------------------------------------------------------------------------- LONG ClrDataAccess::s_procInit; ClrDataAccess::ClrDataAccess(ICorDebugDataTarget * pTarget, ICLRDataTarget * pLegacyTarget/*=0*/) { SUPPORTS_DAC_HOST_ONLY; // ctor does no marshalling - don't check with DacCop /* * Stash the various forms of the new ICorDebugDataTarget interface */ m_pTarget = pTarget; m_pTarget->AddRef(); HRESULT hr; hr = m_pTarget->QueryInterface(__uuidof(ICorDebugMutableDataTarget), (void**)&m_pMutableTarget); if (hr != S_OK) { // Create a target which always fails the write requests with CORDBG_E_TARGET_READONLY m_pMutableTarget = new ReadOnlyDataTargetFacade(); m_pMutableTarget->AddRef(); } /* * If we have a legacy target, it means we're providing compatibility for code that used * the old ICLRDataTarget interfaces. There are still a few things (like metadata location, * GetImageBase, and VirtualAlloc) that the implementation may use which we haven't superseded * in ICorDebugDataTarget, so we still need access to the old target interfaces. * Any functionality that does exist in ICorDebugDataTarget is accessed from that interface * using the DataTargetAdapter on top of the legacy interface (to unify the calling code). * Eventually we may expose all functionality we need using ICorDebug (possibly a private * interface for things like VirtualAlloc), at which point we can stop using the legacy interfaces * completely (except in the DataTargetAdapter). */ m_pLegacyTarget = NULL; m_pLegacyTarget2 = NULL; m_pLegacyTarget3 = NULL; m_legacyMetaDataLocator = NULL; m_target3 = NULL; if (pLegacyTarget != NULL) { m_pLegacyTarget = pLegacyTarget; m_pLegacyTarget->AddRef(); m_pLegacyTarget->QueryInterface(__uuidof(ICLRDataTarget2), (void**)&m_pLegacyTarget2); m_pLegacyTarget->QueryInterface(__uuidof(ICLRDataTarget3), (void**)&m_pLegacyTarget3); if (pLegacyTarget->QueryInterface(__uuidof(ICLRMetadataLocator), (void**)&m_legacyMetaDataLocator) != S_OK) { // The debugger doesn't implement IMetadataLocator. Use // IXCLRDataTarget3 if that exists. Otherwise we don't need it. pLegacyTarget->QueryInterface(__uuidof(IXCLRDataTarget3), (void**)&m_target3); } } m_globalBase = 0; m_refs = 1; m_instanceAge = 0; m_debugMode = GetEnvironmentVariableA("MSCORDACWKS_DEBUG", NULL, 0) != 0; m_enumMemCb = NULL; m_updateMemCb = NULL; m_enumMemFlags = (CLRDataEnumMemoryFlags)-1; // invalid m_jitNotificationTable = NULL; m_gcNotificationTable = NULL; #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS m_streams = NULL; #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS // Target consistency checks are disabled by default. // See code:ClrDataAccess::SetTargetConsistencyChecks for details. m_fEnableTargetConsistencyAsserts = false; #ifdef _DEBUG if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgDACEnableAssert)) { m_fEnableTargetConsistencyAsserts = true; } // Verification asserts are disabled by default because some debuggers (cdb/windbg) probe likely locations // for DAC and having this assert pop up all the time can be annoying. We let derived classes enable // this if they want. It can also be overridden at run-time with COMPlus_DbgDACAssertOnMismatch, // see ClrDataAccess::VerifyDlls for details. m_fEnableDllVerificationAsserts = false; #endif } ClrDataAccess::~ClrDataAccess(void) { SUPPORTS_DAC_HOST_ONLY; #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS if (m_streams) { delete m_streams; } #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS delete [] m_jitNotificationTable; if (m_pLegacyTarget) { m_pLegacyTarget->Release(); } if (m_pLegacyTarget2) { m_pLegacyTarget2->Release(); } if (m_pLegacyTarget3) { m_pLegacyTarget3->Release(); } if (m_legacyMetaDataLocator) { m_legacyMetaDataLocator->Release(); } if (m_target3) { m_target3->Release(); } m_pTarget->Release(); m_pMutableTarget->Release(); } STDMETHODIMP ClrDataAccess::QueryInterface(THIS_ IN REFIID interfaceId, OUT PVOID* iface) { void* ifaceRet; if (IsEqualIID(interfaceId, IID_IUnknown) || IsEqualIID(interfaceId, __uuidof(IXCLRDataProcess)) || IsEqualIID(interfaceId, __uuidof(IXCLRDataProcess2))) { ifaceRet = static_cast<IXCLRDataProcess2*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ICLRDataEnumMemoryRegions))) { ifaceRet = static_cast<ICLRDataEnumMemoryRegions*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface))) { ifaceRet = static_cast<ISOSDacInterface*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface2))) { ifaceRet = static_cast<ISOSDacInterface2*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface3))) { ifaceRet = static_cast<ISOSDacInterface3*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface4))) { ifaceRet = static_cast<ISOSDacInterface4*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface5))) { ifaceRet = static_cast<ISOSDacInterface5*>(this); } else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface6))) { ifaceRet = static_cast<ISOSDacInterface6*>(this); } else { *iface = NULL; return E_NOINTERFACE; } AddRef(); *iface = ifaceRet; return S_OK; } STDMETHODIMP_(ULONG) ClrDataAccess::AddRef(THIS) { return InterlockedIncrement(&m_refs); } STDMETHODIMP_(ULONG) ClrDataAccess::Release(THIS) { SUPPORTS_DAC_HOST_ONLY; LONG newRefs = InterlockedDecrement(&m_refs); if (newRefs == 0) { delete this; } return newRefs; } HRESULT STDMETHODCALLTYPE ClrDataAccess::Flush(void) { SUPPORTS_DAC_HOST_ONLY; // // Free MD import objects. // m_mdImports.Flush(); // Free instance memory. m_instances.Flush(); // When the host instance cache is flushed we // update the instance age count so that // all child objects automatically become // invalid. This prevents them from using // any pointers they've kept to host instances // which are now gone. m_instanceAge++; return S_OK; } HRESULT STDMETHODCALLTYPE ClrDataAccess::StartEnumTasks( /* [out] */ CLRDATA_ENUM* handle) { HRESULT status; DAC_ENTER(); EX_TRY { if (ThreadStore::s_pThreadStore) { Thread* thread = ThreadStore::GetAllThreadList(NULL, 0, 0); *handle = TO_CDENUM(thread); status = *handle ? S_OK : S_FALSE; } else { status = S_FALSE; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EnumTask( /* [in, out] */ CLRDATA_ENUM* handle, /* [out] */ IXCLRDataTask **task) { HRESULT status; DAC_ENTER(); EX_TRY { if (*handle) { Thread* thread = FROM_CDENUM(Thread, *handle); *task = new (nothrow) ClrDataTask(this, thread); if (*task) { thread = ThreadStore::GetAllThreadList(thread, 0, 0); *handle = TO_CDENUM(thread); status = S_OK; } else { status = E_OUTOFMEMORY; } } else { status = S_FALSE; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EndEnumTasks( /* [in] */ CLRDATA_ENUM handle) { HRESULT status; DAC_ENTER(); EX_TRY { // Enumerator holds no resources. status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetTaskByOSThreadID( /* [in] */ ULONG32 osThreadID, /* [out] */ IXCLRDataTask **task) { HRESULT status; DAC_ENTER(); EX_TRY { status = E_INVALIDARG; Thread* thread = DacGetThread(osThreadID); if (thread != NULL) { *task = new (nothrow) ClrDataTask(this, thread); status = *task ? S_OK : E_OUTOFMEMORY; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetTaskByUniqueID( /* [in] */ ULONG64 uniqueID, /* [out] */ IXCLRDataTask **task) { HRESULT status; DAC_ENTER(); EX_TRY { Thread* thread = FindClrThreadByTaskId(uniqueID); if (thread) { *task = new (nothrow) ClrDataTask(this, thread); status = *task ? S_OK : E_OUTOFMEMORY; } else { status = E_INVALIDARG; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetFlags( /* [out] */ ULONG32 *flags) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft - GC check. *flags = CLRDATA_PROCESS_DEFAULT; status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::IsSameObject( /* [in] */ IXCLRDataProcess* process) { HRESULT status; DAC_ENTER(); EX_TRY { status = m_pTarget == ((ClrDataAccess*)process)->m_pTarget ? S_OK : S_FALSE; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetManagedObject( /* [out] */ IXCLRDataValue **value) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetDesiredExecutionState( /* [out] */ ULONG32 *state) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::SetDesiredExecutionState( /* [in] */ ULONG32 state) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetAddressType( /* [in] */ CLRDATA_ADDRESS address, /* [out] */ CLRDataAddressType* type) { HRESULT status; DAC_ENTER(); EX_TRY { // The only thing that constitutes a failure is some // dac failure while checking things. status = S_OK; TADDR taAddr = CLRDATA_ADDRESS_TO_TADDR(address); if (IsPossibleCodeAddress(taAddr) == S_OK) { if (ExecutionManager::IsManagedCode(taAddr)) { *type = CLRDATA_ADDRESS_MANAGED_METHOD; goto Exit; } if (StubManager::IsStub(taAddr)) { *type = CLRDATA_ADDRESS_RUNTIME_UNMANAGED_STUB; goto Exit; } } *type = CLRDATA_ADDRESS_UNRECOGNIZED; Exit: ; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetRuntimeNameByAddress( /* [in] */ CLRDATA_ADDRESS address, /* [in] */ ULONG32 flags, /* [in] */ ULONG32 bufLen, /* [out] */ ULONG32 *symbolLen, /* [size_is][out] */ __out_ecount_opt(bufLen) WCHAR symbolBuf[ ], /* [out] */ CLRDATA_ADDRESS* displacement) { HRESULT status; DAC_ENTER(); EX_TRY { #ifdef _TARGET_ARM_ address &= ~THUMB_CODE; //workaround for windbg passing in addresses with the THUMB mode bit set #endif status = RawGetMethodName(address, flags, bufLen, symbolLen, symbolBuf, displacement); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::StartEnumAppDomains( /* [out] */ CLRDATA_ENUM* handle) { HRESULT status; DAC_ENTER(); EX_TRY { AppDomainIterator* iter = new (nothrow) AppDomainIterator(FALSE); if (iter) { *handle = TO_CDENUM(iter); status = S_OK; } else { status = E_OUTOFMEMORY; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EnumAppDomain( /* [in, out] */ CLRDATA_ENUM* handle, /* [out] */ IXCLRDataAppDomain **appDomain) { HRESULT status; DAC_ENTER(); EX_TRY { AppDomainIterator* iter = FROM_CDENUM(AppDomainIterator, *handle); if (iter->Next()) { *appDomain = new (nothrow) ClrDataAppDomain(this, iter->GetDomain()); status = *appDomain ? S_OK : E_OUTOFMEMORY; } else { status = S_FALSE; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EndEnumAppDomains( /* [in] */ CLRDATA_ENUM handle) { HRESULT status; DAC_ENTER(); EX_TRY { AppDomainIterator* iter = FROM_CDENUM(AppDomainIterator, handle); delete iter; status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetAppDomainByUniqueID( /* [in] */ ULONG64 uniqueID, /* [out] */ IXCLRDataAppDomain **appDomain) { HRESULT status; DAC_ENTER(); EX_TRY { AppDomainIterator iter(FALSE); status = E_INVALIDARG; while (iter.Next()) { if (iter.GetDomain()->GetId().m_dwId == uniqueID) { *appDomain = new (nothrow) ClrDataAppDomain(this, iter.GetDomain()); status = *appDomain ? S_OK : E_OUTOFMEMORY; break; } } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::StartEnumAssemblies( /* [out] */ CLRDATA_ENUM* handle) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter* iter = new (nothrow) ProcessModIter; if (iter) { *handle = TO_CDENUM(iter); status = S_OK; } else { status = E_OUTOFMEMORY; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EnumAssembly( /* [in, out] */ CLRDATA_ENUM* handle, /* [out] */ IXCLRDataAssembly **assembly) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter* iter = FROM_CDENUM(ProcessModIter, *handle); Assembly* assem; if ((assem = iter->NextAssem())) { *assembly = new (nothrow) ClrDataAssembly(this, assem); status = *assembly ? S_OK : E_OUTOFMEMORY; } else { status = S_FALSE; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EndEnumAssemblies( /* [in] */ CLRDATA_ENUM handle) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter* iter = FROM_CDENUM(ProcessModIter, handle); delete iter; status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::StartEnumModules( /* [out] */ CLRDATA_ENUM* handle) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter* iter = new (nothrow) ProcessModIter; if (iter) { *handle = TO_CDENUM(iter); status = S_OK; } else { status = E_OUTOFMEMORY; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EnumModule( /* [in, out] */ CLRDATA_ENUM* handle, /* [out] */ IXCLRDataModule **mod) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter* iter = FROM_CDENUM(ProcessModIter, *handle); Module* curMod; if ((curMod = iter->NextModule())) { *mod = new (nothrow) ClrDataModule(this, curMod); status = *mod ? S_OK : E_OUTOFMEMORY; } else { status = S_FALSE; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EndEnumModules( /* [in] */ CLRDATA_ENUM handle) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter* iter = FROM_CDENUM(ProcessModIter, handle); delete iter; status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetModuleByAddress( /* [in] */ CLRDATA_ADDRESS address, /* [out] */ IXCLRDataModule** mod) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter modIter; Module* modDef; while ((modDef = modIter.NextModule())) { TADDR base; ULONG32 length; PEFile* file = modDef->GetFile(); if ((base = PTR_TO_TADDR(file->GetLoadedImageContents(&length)))) { if (TO_CDADDR(base) <= address && TO_CDADDR(base + length) > address) { break; } } if (file->HasNativeImage()) { base = PTR_TO_TADDR(file->GetLoadedNative()->GetBase()); length = file->GetLoadedNative()->GetVirtualSize(); if (TO_CDADDR(base) <= address && TO_CDADDR(base + length) > address) { break; } } } if (modDef) { *mod = new (nothrow) ClrDataModule(this, modDef); status = *mod ? S_OK : E_OUTOFMEMORY; } else { status = S_FALSE; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::StartEnumMethodDefinitionsByAddress( /* [in] */ CLRDATA_ADDRESS address, /* [out] */ CLRDATA_ENUM *handle) { HRESULT status; DAC_ENTER(); EX_TRY { ProcessModIter modIter; Module* modDef; while ((modDef = modIter.NextModule())) { TADDR base; ULONG32 length; PEFile* file = modDef->GetFile(); if ((base = PTR_TO_TADDR(file->GetLoadedImageContents(&length)))) { if (TO_CDADDR(base) <= address && TO_CDADDR(base + length) > address) { break; } } } status = EnumMethodDefinitions:: CdStart(modDef, true, address, handle); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EnumMethodDefinitionByAddress( /* [out][in] */ CLRDATA_ENUM* handle, /* [out] */ IXCLRDataMethodDefinition **method) { HRESULT status; DAC_ENTER(); EX_TRY { status = EnumMethodDefinitions::CdNext(this, handle, method); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EndEnumMethodDefinitionsByAddress( /* [in] */ CLRDATA_ENUM handle) { HRESULT status; DAC_ENTER(); EX_TRY { status = EnumMethodDefinitions::CdEnd(handle); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::StartEnumMethodInstancesByAddress( /* [in] */ CLRDATA_ADDRESS address, /* [in] */ IXCLRDataAppDomain* appDomain, /* [out] */ CLRDATA_ENUM *handle) { HRESULT status; DAC_ENTER(); EX_TRY { MethodDesc* methodDesc; *handle = 0; status = S_FALSE; TADDR taddr; if( (status = TRY_CLRDATA_ADDRESS_TO_TADDR(address, &taddr)) != S_OK ) { goto Exit; } if (IsPossibleCodeAddress(taddr) != S_OK) { goto Exit; } methodDesc = ExecutionManager::GetCodeMethodDesc(taddr); if (!methodDesc) { goto Exit; } status = EnumMethodInstances::CdStart(methodDesc, appDomain, handle); Exit: ; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EnumMethodInstanceByAddress( /* [out][in] */ CLRDATA_ENUM* handle, /* [out] */ IXCLRDataMethodInstance **method) { HRESULT status; DAC_ENTER(); EX_TRY { status = EnumMethodInstances::CdNext(this, handle, method); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::EndEnumMethodInstancesByAddress( /* [in] */ CLRDATA_ENUM handle) { HRESULT status; DAC_ENTER(); EX_TRY { status = EnumMethodInstances::CdEnd(handle); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetDataByAddress( /* [in] */ CLRDATA_ADDRESS address, /* [in] */ ULONG32 flags, /* [in] */ IXCLRDataAppDomain* appDomain, /* [in] */ IXCLRDataTask* tlsTask, /* [in] */ ULONG32 bufLen, /* [out] */ ULONG32 *nameLen, /* [size_is][out] */ __out_ecount_part_opt(bufLen, *nameLen) WCHAR nameBuf[ ], /* [out] */ IXCLRDataValue **value, /* [out] */ CLRDATA_ADDRESS *displacement) { HRESULT status; if (flags != 0) { return E_INVALIDARG; } DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetExceptionStateByExceptionRecord( /* [in] */ EXCEPTION_RECORD64 *record, /* [out] */ IXCLRDataExceptionState **exception) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::TranslateExceptionRecordToNotification( /* [in] */ EXCEPTION_RECORD64 *record, /* [in] */ IXCLRDataExceptionNotification *notify) { HRESULT status = E_FAIL; ClrDataModule* pubModule = NULL; ClrDataMethodInstance* pubMethodInst = NULL; ClrDataExceptionState* pubExState = NULL; GcEvtArgs pubGcEvtArgs; ULONG32 notifyType = 0; DWORD catcherNativeOffset = 0; TADDR nativeCodeLocation = NULL; DAC_ENTER(); EX_TRY { // // We cannot hold the dac lock while calling // out as the external code can do arbitrary things. // Instead we make a pass over the exception // information and create all necessary objects. // We then leave the lock and make the callbac. // TADDR exInfo[EXCEPTION_MAXIMUM_PARAMETERS]; for (UINT i = 0; i < EXCEPTION_MAXIMUM_PARAMETERS; i++) { exInfo[i] = TO_TADDR(record->ExceptionInformation[i]); } notifyType = DACNotify::GetType(exInfo); switch(notifyType) { case DACNotify::MODULE_LOAD_NOTIFICATION: { TADDR modulePtr; if (DACNotify::ParseModuleLoadNotification(exInfo, modulePtr)) { Module* clrModule = PTR_Module(modulePtr); pubModule = new (nothrow) ClrDataModule(this, clrModule); if (pubModule == NULL) { status = E_OUTOFMEMORY; } else { status = S_OK; } } break; } case DACNotify::MODULE_UNLOAD_NOTIFICATION: { TADDR modulePtr; if (DACNotify::ParseModuleUnloadNotification(exInfo, modulePtr)) { Module* clrModule = PTR_Module(modulePtr); pubModule = new (nothrow) ClrDataModule(this, clrModule); if (pubModule == NULL) { status = E_OUTOFMEMORY; } else { status = S_OK; } } break; } case DACNotify::JIT_NOTIFICATION2: { TADDR methodDescPtr; if(DACNotify::ParseJITNotification(exInfo, methodDescPtr, nativeCodeLocation)) { // Try and find the right appdomain MethodDesc* methodDesc = PTR_MethodDesc(methodDescPtr); BaseDomain* baseDomain = methodDesc->GetDomain(); AppDomain* appDomain = NULL; if (baseDomain->IsAppDomain()) { appDomain = PTR_AppDomain(PTR_HOST_TO_TADDR(baseDomain)); } else { // Find a likely domain, because it's the shared domain. AppDomainIterator adi(FALSE); appDomain = adi.GetDomain(); } pubMethodInst = new (nothrow) ClrDataMethodInstance(this, appDomain, methodDesc); if (pubMethodInst == NULL) { status = E_OUTOFMEMORY; } else { status = S_OK; } } break; } case DACNotify::EXCEPTION_NOTIFICATION: { TADDR threadPtr; if (DACNotify::ParseExceptionNotification(exInfo, threadPtr)) { // Translation can only occur at the time of // receipt of the notify exception, so we assume // that the Thread's current exception state // is the state we want. status = ClrDataExceptionState:: NewFromThread(this, PTR_Thread(threadPtr), &pubExState, NULL); } break; } case DACNotify::GC_NOTIFICATION: { if (DACNotify::ParseGCNotification(exInfo, pubGcEvtArgs)) { status = S_OK; } break; } case DACNotify::CATCH_ENTER_NOTIFICATION: { TADDR methodDescPtr; if (DACNotify::ParseExceptionCatcherEnterNotification(exInfo, methodDescPtr, catcherNativeOffset)) { // Try and find the right appdomain MethodDesc* methodDesc = PTR_MethodDesc(methodDescPtr); BaseDomain* baseDomain = methodDesc->GetDomain(); AppDomain* appDomain = NULL; if (baseDomain->IsAppDomain()) { appDomain = PTR_AppDomain(PTR_HOST_TO_TADDR(baseDomain)); } else { // Find a likely domain, because it's the shared domain. AppDomainIterator adi(FALSE); appDomain = adi.GetDomain(); } pubMethodInst = new (nothrow) ClrDataMethodInstance(this, appDomain, methodDesc); if (pubMethodInst == NULL) { status = E_OUTOFMEMORY; } else { status = S_OK; } } break; } default: status = E_INVALIDARG; break; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); if (status == S_OK) { IXCLRDataExceptionNotification2* notify2; if (notify->QueryInterface(__uuidof(IXCLRDataExceptionNotification2), (void**)&notify2) != S_OK) { notify2 = NULL; } IXCLRDataExceptionNotification3* notify3; if (notify->QueryInterface(__uuidof(IXCLRDataExceptionNotification3), (void**)&notify3) != S_OK) { notify3 = NULL; } IXCLRDataExceptionNotification4* notify4; if (notify->QueryInterface(__uuidof(IXCLRDataExceptionNotification4), (void**)&notify4) != S_OK) { notify4 = NULL; } IXCLRDataExceptionNotification5* notify5; if (notify->QueryInterface(__uuidof(IXCLRDataExceptionNotification5), (void**)&notify5) != S_OK) { notify5 = NULL; } switch(notifyType) { case DACNotify::MODULE_LOAD_NOTIFICATION: notify->OnModuleLoaded(pubModule); break; case DACNotify::MODULE_UNLOAD_NOTIFICATION: notify->OnModuleUnloaded(pubModule); break; case DACNotify::JIT_NOTIFICATION2: notify->OnCodeGenerated(pubMethodInst); if (notify5) { notify5->OnCodeGenerated2(pubMethodInst, TO_CDADDR(nativeCodeLocation)); } break; case DACNotify::EXCEPTION_NOTIFICATION: if (notify2) { notify2->OnException(pubExState); } else { status = E_INVALIDARG; } break; case DACNotify::GC_NOTIFICATION: if (notify3) { notify3->OnGcEvent(pubGcEvtArgs); } break; case DACNotify::CATCH_ENTER_NOTIFICATION: if (notify4) { notify4->ExceptionCatcherEnter(pubMethodInst, catcherNativeOffset); } break; default: // notifyType has already been validated. _ASSERTE(FALSE); break; } if (notify2) { notify2->Release(); } if (notify3) { notify3->Release(); } if (notify4) { notify4->Release(); } if (notify5) { notify5->Release(); } } if (pubModule) { pubModule->Release(); } if (pubMethodInst) { pubMethodInst->Release(); } if (pubExState) { pubExState->Release(); } return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::CreateMemoryValue( /* [in] */ IXCLRDataAppDomain* appDomain, /* [in] */ IXCLRDataTask* tlsTask, /* [in] */ IXCLRDataTypeInstance* type, /* [in] */ CLRDATA_ADDRESS addr, /* [out] */ IXCLRDataValue** value) { HRESULT status; DAC_ENTER(); EX_TRY { AppDomain* dacDomain; Thread* dacThread; TypeHandle dacType; ULONG32 flags; NativeVarLocation loc; dacDomain = ((ClrDataAppDomain*)appDomain)->GetAppDomain(); if (tlsTask) { dacThread = ((ClrDataTask*)tlsTask)->GetThread(); } else { dacThread = NULL; } dacType = ((ClrDataTypeInstance*)type)->GetTypeHandle(); flags = GetTypeFieldValueFlags(dacType, NULL, 0, false); loc.addr = addr; loc.size = dacType.GetSize(); loc.contextReg = false; *value = new (nothrow) ClrDataValue(this, dacDomain, dacThread, flags, dacType, addr, 1, &loc); status = *value ? S_OK : E_OUTOFMEMORY; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::SetAllTypeNotifications( /* [in] */ IXCLRDataModule* mod, /* [in] */ ULONG32 flags) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::SetAllCodeNotifications( /* [in] */ IXCLRDataModule* mod, /* [in] */ ULONG32 flags) { HRESULT status; DAC_ENTER(); EX_TRY { status = E_FAIL; if (!IsValidMethodCodeNotification(flags)) { status = E_INVALIDARG; } else { JITNotifications jn(GetHostJitNotificationTable()); if (!jn.IsActive()) { status = E_OUTOFMEMORY; } else { BOOL changedTable; TADDR modulePtr = mod ? PTR_HOST_TO_TADDR(((ClrDataModule*)mod)->GetModule()) : NULL; if (jn.SetAllNotifications(modulePtr, flags, &changedTable)) { if (!changedTable || (changedTable && jn.UpdateOutOfProcTable())) { status = S_OK; } } } } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetTypeNotifications( /* [in] */ ULONG32 numTokens, /* [in, size_is(numTokens)] */ IXCLRDataModule* mods[], /* [in] */ IXCLRDataModule* singleMod, /* [in, size_is(numTokens)] */ mdTypeDef tokens[], /* [out, size_is(numTokens)] */ ULONG32 flags[]) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::SetTypeNotifications( /* [in] */ ULONG32 numTokens, /* [in, size_is(numTokens)] */ IXCLRDataModule* mods[], /* [in] */ IXCLRDataModule* singleMod, /* [in, size_is(numTokens)] */ mdTypeDef tokens[], /* [in, size_is(numTokens)] */ ULONG32 flags[], /* [in] */ ULONG32 singleFlags) { HRESULT status; DAC_ENTER(); EX_TRY { // XXX Microsoft. status = E_NOTIMPL; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::GetCodeNotifications( /* [in] */ ULONG32 numTokens, /* [in, size_is(numTokens)] */ IXCLRDataModule* mods[], /* [in] */ IXCLRDataModule* singleMod, /* [in, size_is(numTokens)] */ mdMethodDef tokens[], /* [out, size_is(numTokens)] */ ULONG32 flags[]) { HRESULT status; DAC_ENTER(); EX_TRY { if ((flags == NULL || tokens == NULL) || (mods == NULL && singleMod == NULL) || (mods != NULL && singleMod != NULL)) { status = E_INVALIDARG; } else { JITNotifications jn(GetHostJitNotificationTable()); if (!jn.IsActive()) { status = E_OUTOFMEMORY; } else { TADDR modulePtr = NULL; if (singleMod) { modulePtr = PTR_HOST_TO_TADDR(((ClrDataModule*)singleMod)-> GetModule()); } for (ULONG32 i = 0; i < numTokens; i++) { if (singleMod == NULL) { modulePtr = PTR_HOST_TO_TADDR(((ClrDataModule*)mods[i])-> GetModule()); } USHORT jt = jn.Requested(modulePtr, tokens[i]); flags[i] = jt; } status = S_OK; } } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT STDMETHODCALLTYPE ClrDataAccess::SetCodeNotifications( /* [in] */ ULONG32 numTokens, /* [in, size_is(numTokens)] */ IXCLRDataModule* mods[], /* [in] */ IXCLRDataModule* singleMod, /* [in, size_is(numTokens)] */ mdMethodDef tokens[], /* [in, size_is(numTokens)] */ ULONG32 flags[], /* [in] */ ULONG32 singleFlags) { HRESULT status = E_UNEXPECTED; DAC_ENTER(); EX_TRY { if ((tokens == NULL) || (mods == NULL && singleMod == NULL) || (mods != NULL && singleMod != NULL)) { status = E_INVALIDARG; } else { JITNotifications jn(GetHostJitNotificationTable()); if (!jn.IsActive() || numTokens > jn.GetTableSize()) { status = E_OUTOFMEMORY; } else { BOOL changedTable = FALSE; // Are flags valid? if (flags) { for (ULONG32 check = 0; check < numTokens; check++) { if (!IsValidMethodCodeNotification(flags[check])) { status = E_INVALIDARG; goto Exit; } } } else if (!IsValidMethodCodeNotification(singleFlags)) { status = E_INVALIDARG; goto Exit; } TADDR modulePtr = NULL; if (singleMod) { modulePtr = PTR_HOST_TO_TADDR(((ClrDataModule*)singleMod)-> GetModule()); } for (ULONG32 i = 0; i < numTokens; i++) { if (singleMod == NULL) { modulePtr = PTR_HOST_TO_TADDR(((ClrDataModule*)mods[i])-> GetModule()); } USHORT curFlags = jn.Requested(modulePtr, tokens[i]); USHORT setFlags = (USHORT)(flags ? flags[i] : singleFlags); if (curFlags != setFlags) { if (!jn.SetNotification(modulePtr, tokens[i], setFlags)) { status = E_FAIL; goto Exit; } changedTable = TRUE; } } if (!changedTable || (changedTable && jn.UpdateOutOfProcTable())) { status = S_OK; } } } Exit: ; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT ClrDataAccess::GetOtherNotificationFlags( /* [out] */ ULONG32* flags) { HRESULT status; DAC_ENTER(); EX_TRY { *flags = g_dacNotificationFlags; status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } HRESULT ClrDataAccess::SetOtherNotificationFlags( /* [in] */ ULONG32 flags) { HRESULT status; if ((flags & ~(CLRDATA_NOTIFY_ON_MODULE_LOAD | CLRDATA_NOTIFY_ON_MODULE_UNLOAD | CLRDATA_NOTIFY_ON_EXCEPTION | CLRDATA_NOTIFY_ON_EXCEPTION_CATCH_ENTER)) != 0) { return E_INVALIDARG; } DAC_ENTER(); EX_TRY { g_dacNotificationFlags = flags; status = S_OK; } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } enum { STUB_BUF_FLAGS_START, STUB_BUF_METHOD_JITTED, STUB_BUF_FRAME_PUSHED, STUB_BUF_STUB_MANAGER_PUSHED, STUB_BUF_FLAGS_END, }; union STUB_BUF { CLRDATA_FOLLOW_STUB_BUFFER apiBuf; struct { ULONG64 flags; ULONG64 addr; ULONG64 arg1; } u; }; HRESULT ClrDataAccess::FollowStubStep( /* [in] */ Thread* thread, /* [in] */ ULONG32 inFlags, /* [in] */ TADDR inAddr, /* [in] */ union STUB_BUF* inBuffer, /* [out] */ TADDR* outAddr, /* [out] */ union STUB_BUF* outBuffer, /* [out] */ ULONG32* outFlags) { TraceDestination trace; bool traceDone = false; BYTE* retAddr; T_CONTEXT localContext; REGDISPLAY regDisp; MethodDesc* methodDesc; ZeroMemory(outBuffer, sizeof(*outBuffer)); if (inBuffer) { switch(inBuffer->u.flags) { case STUB_BUF_METHOD_JITTED: if (inAddr != GFN_TADDR(DACNotifyCompilationFinished)) { return E_INVALIDARG; } // It's possible that this notification is // for a different method, so double-check // and recycle the notification if necessary. methodDesc = PTR_MethodDesc(CORDB_ADDRESS_TO_TADDR(inBuffer->u.addr)); if (methodDesc->HasNativeCode()) { *outAddr = methodDesc->GetNativeCode(); *outFlags = CLRDATA_FOLLOW_STUB_EXIT; return S_OK; } // We didn't end up with native code so try again. trace.InitForUnjittedMethod(methodDesc); traceDone = true; break; case STUB_BUF_FRAME_PUSHED: if (!thread || inAddr != inBuffer->u.addr) { return E_INVALIDARG; } trace.InitForFramePush(CORDB_ADDRESS_TO_TADDR(inBuffer->u.addr)); DacGetThreadContext(thread, &localContext); thread->FillRegDisplay(&regDisp, &localContext); if (!thread->GetFrame()-> TraceFrame(thread, TRUE, &trace, &regDisp)) { return E_FAIL; } traceDone = true; break; case STUB_BUF_STUB_MANAGER_PUSHED: if (!thread || inAddr != inBuffer->u.addr || !inBuffer->u.arg1) { return E_INVALIDARG; } trace.InitForManagerPush(CORDB_ADDRESS_TO_TADDR(inBuffer->u.addr), PTR_StubManager(CORDB_ADDRESS_TO_TADDR(inBuffer->u.arg1))); DacGetThreadContext(thread, &localContext); if (!trace.GetStubManager()-> TraceManager(thread, &trace, &localContext, &retAddr)) { return E_FAIL; } traceDone = true; break; default: return E_INVALIDARG; } } if ((!traceDone && !StubManager::TraceStub(inAddr, &trace)) || !StubManager::FollowTrace(&trace)) { return E_NOINTERFACE; } switch(trace.GetTraceType()) { case TRACE_UNMANAGED: case TRACE_MANAGED: // We've hit non-stub code so we're done. *outAddr = trace.GetAddress(); *outFlags = CLRDATA_FOLLOW_STUB_EXIT; break; case TRACE_UNJITTED_METHOD: // The stub causes jitting, so return // the address of the jit-complete routine // so that the real native address can // be picked up once the JIT is done. // One special case is ngen'ed code that // needs the prestub run. This results in // an unjitted trace but no jitting will actually // occur since the code is ngen'ed. Detect // this and redirect to the actual code. methodDesc = trace.GetMethodDesc(); if (methodDesc->IsPreImplemented() && !methodDesc->IsPointingToStableNativeCode() && !methodDesc->IsGenericMethodDefinition() && methodDesc->HasNativeCode()) { *outAddr = methodDesc->GetNativeCode(); *outFlags = CLRDATA_FOLLOW_STUB_EXIT; break; } *outAddr = GFN_TADDR(DACNotifyCompilationFinished); outBuffer->u.flags = STUB_BUF_METHOD_JITTED; outBuffer->u.addr = PTR_HOST_TO_TADDR(methodDesc); *outFlags = CLRDATA_FOLLOW_STUB_INTERMEDIATE; break; case TRACE_FRAME_PUSH: if (!thread) { return E_INVALIDARG; } *outAddr = trace.GetAddress(); outBuffer->u.flags = STUB_BUF_FRAME_PUSHED; outBuffer->u.addr = trace.GetAddress(); *outFlags = CLRDATA_FOLLOW_STUB_INTERMEDIATE; break; case TRACE_MGR_PUSH: if (!thread) { return E_INVALIDARG; } *outAddr = trace.GetAddress(); outBuffer->u.flags = STUB_BUF_STUB_MANAGER_PUSHED; outBuffer->u.addr = trace.GetAddress(); outBuffer->u.arg1 = PTR_HOST_TO_TADDR(trace.GetStubManager()); *outFlags = CLRDATA_FOLLOW_STUB_INTERMEDIATE; break; default: return E_INVALIDARG; } return S_OK; } HRESULT STDMETHODCALLTYPE ClrDataAccess::FollowStub( /* [in] */ ULONG32 inFlags, /* [in] */ CLRDATA_ADDRESS inAddr, /* [in] */ CLRDATA_FOLLOW_STUB_BUFFER* _inBuffer, /* [out] */ CLRDATA_ADDRESS* outAddr, /* [out] */ CLRDATA_FOLLOW_STUB_BUFFER* _outBuffer, /* [out] */ ULONG32* outFlags) { return FollowStub2(NULL, inFlags, inAddr, _inBuffer, outAddr, _outBuffer, outFlags); } HRESULT STDMETHODCALLTYPE ClrDataAccess::FollowStub2( /* [in] */ IXCLRDataTask* task, /* [in] */ ULONG32 inFlags, /* [in] */ CLRDATA_ADDRESS _inAddr, /* [in] */ CLRDATA_FOLLOW_STUB_BUFFER* _inBuffer, /* [out] */ CLRDATA_ADDRESS* _outAddr, /* [out] */ CLRDATA_FOLLOW_STUB_BUFFER* _outBuffer, /* [out] */ ULONG32* outFlags) { HRESULT status; if ((inFlags & ~(CLRDATA_FOLLOW_STUB_DEFAULT)) != 0) { return E_INVALIDARG; } STUB_BUF* inBuffer = (STUB_BUF*)_inBuffer; STUB_BUF* outBuffer = (STUB_BUF*)_outBuffer; if (inBuffer && (inBuffer->u.flags <= STUB_BUF_FLAGS_START || inBuffer->u.flags >= STUB_BUF_FLAGS_END)) { return E_INVALIDARG; } DAC_ENTER(); EX_TRY { STUB_BUF cycleBuf; TADDR inAddr = TO_TADDR(_inAddr); TADDR outAddr; Thread* thread = task ? ((ClrDataTask*)task)->GetThread() : NULL; ULONG32 loops = 4; for (;;) { if ((status = FollowStubStep(thread, inFlags, inAddr, inBuffer, &outAddr, outBuffer, outFlags)) != S_OK) { break; } // Some stub tracing just requests further iterations // of processing, so detect that case and loop. if (outAddr != inAddr) { // We can make forward progress, we're done. *_outAddr = TO_CDADDR(outAddr); break; } // We need more processing. As a protection // against infinite loops in corrupted or buggy // situations, we only allow this to happen a // small number of times. if (--loops == 0) { ZeroMemory(outBuffer, sizeof(*outBuffer)); status = E_FAIL; break; } cycleBuf = *outBuffer; inBuffer = &cycleBuf; } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4297) #endif // _MSC_VER STDMETHODIMP ClrDataAccess::GetGcNotification(GcEvtArgs* gcEvtArgs) { HRESULT status; DAC_ENTER(); EX_TRY { if (gcEvtArgs->typ >= GC_EVENT_TYPE_MAX) { status = E_INVALIDARG; } else { GcNotifications gn(GetHostGcNotificationTable()); if (!gn.IsActive()) { status = E_OUTOFMEMORY; } else { GcEvtArgs *res = gn.GetNotification(*gcEvtArgs); if (res != NULL) { *gcEvtArgs = *res; status = S_OK; } else { status = E_FAIL; } } } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } STDMETHODIMP ClrDataAccess::SetGcNotification(IN GcEvtArgs gcEvtArgs) { HRESULT status; DAC_ENTER(); EX_TRY { if (gcEvtArgs.typ >= GC_EVENT_TYPE_MAX) { status = E_INVALIDARG; } else { GcNotifications gn(GetHostGcNotificationTable()); if (!gn.IsActive()) { status = E_OUTOFMEMORY; } else { if (gn.SetNotification(gcEvtArgs) && gn.UpdateOutOfProcTable()) { status = S_OK; } else { status = E_FAIL; } } } } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &status)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); return status; } #ifdef _MSC_VER #pragma warning(pop) #endif // _MSC_VER HRESULT ClrDataAccess::Initialize(void) { HRESULT hr; CLRDATA_ADDRESS base; // // We do not currently support cross-platform // debugging. Verify that cross-platform is not // being attempted. // // Determine our platform based on the pre-processor macros set when we were built #ifdef FEATURE_PAL #if defined(DBG_TARGET_X86) CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_X86; #elif defined(DBG_TARGET_AMD64) CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_AMD64; #elif defined(DBG_TARGET_ARM) CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_ARM; #elif defined(DBG_TARGET_ARM64) CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_ARM64; #else #error Unknown Processor. #endif #else #if defined(DBG_TARGET_X86) CorDebugPlatform hostPlatform = CORDB_PLATFORM_WINDOWS_X86; #elif defined(DBG_TARGET_AMD64) CorDebugPlatform hostPlatform = CORDB_PLATFORM_WINDOWS_AMD64; #elif defined(DBG_TARGET_ARM) CorDebugPlatform hostPlatform = CORDB_PLATFORM_WINDOWS_ARM; #elif defined(DBG_TARGET_ARM64) CorDebugPlatform hostPlatform = CORDB_PLATFORM_WINDOWS_ARM64; #else #error Unknown Processor. #endif #endif CorDebugPlatform targetPlatform; IfFailRet(m_pTarget->GetPlatform(&targetPlatform)); if (targetPlatform != hostPlatform) { // DAC fatal error: Platform mismatch - the platform reported by the data target // is not what this version of mscordacwks.dll was built for. return CORDBG_E_UNCOMPATIBLE_PLATFORMS; } // // Get the current DLL base for mscorwks globals. // In case of multiple-CLRs, there may be multiple dlls named "mscorwks". // code:OpenVirtualProcess can take the base address (clrInstanceId) to select exactly // which CLR to is being target. If so, m_globalBase will already be set. // if (m_globalBase == 0) { // Caller didn't specify which CLR to debug. This supports Whidbey SOS cases, so we should // be using a legacy data target. if (m_pLegacyTarget == NULL) { DacError(E_INVALIDARG); UNREACHABLE(); } // Since this is Whidbey, assume there's only 1 CLR named "mscorwks.dll" and pick that. IfFailRet(m_pLegacyTarget->GetImageBase(MAIN_CLR_DLL_NAME_W, &base)); m_globalBase = TO_TADDR(base); } // We don't need to try too hard to prevent // multiple initializations as each one will // copy the same data into the globals and so // cannot interfere with each other. if (!s_procInit) { IfFailRet(GetDacGlobals()); IfFailRet(DacGetHostVtPtrs()); s_procInit = true; } // // DAC is now setup and ready to use // // Do some validation IfFailRet(VerifyDlls()); // To support EH SxS, utilcode requires the base address of the runtime // as part of its initialization so that functions like "WasThrownByUs" work correctly since // they use the CLR base address to check if an exception was raised by a given instance of the runtime // or not. // // Thus, when DAC is initialized, initialize utilcode with the base address of the runtime loaded in the // target process. This is similar to work done in CorDB::SetTargetCLR for mscordbi. // Initialize UtilCode for SxS scenarios CoreClrCallbacks cccallbacks; cccallbacks.m_hmodCoreCLR = (HINSTANCE)m_globalBase; // Base address of the runtime in the target process cccallbacks.m_pfnIEE = NULL; cccallbacks.m_pfnGetCORSystemDirectory = NULL; cccallbacks.m_pfnGetCLRFunction = NULL; InitUtilcode(cccallbacks); return S_OK; } Thread* ClrDataAccess::FindClrThreadByTaskId(ULONG64 taskId) { Thread* thread = NULL; if (!ThreadStore::s_pThreadStore) { return NULL; } while ((thread = ThreadStore::GetAllThreadList(thread, 0, 0))) { if (thread->GetThreadId() == (DWORD)taskId) { return thread; } } return NULL; } HRESULT ClrDataAccess::IsPossibleCodeAddress(IN TADDR address) { SUPPORTS_DAC; BYTE testRead; ULONG32 testDone; // First do a trivial check on the readability of the // address. This makes for quick rejection of bogus // addresses that the debugger sends in when searching // stacks for return addresses. // XXX Microsoft - Will this cause problems in minidumps // where it's possible the stub is identifiable but // the stub code isn't present? Yes, but the lack // of that code could confuse the walker on its own // if it does code analysis. if ((m_pTarget->ReadVirtual(address, &testRead, sizeof(testRead), &testDone) != S_OK) || !testDone) { return E_INVALIDARG; } return S_OK; } HRESULT ClrDataAccess::GetFullMethodName( IN MethodDesc* methodDesc, IN ULONG32 symbolChars, OUT ULONG32* symbolLen, __out_ecount_part_opt(symbolChars, *symbolLen) LPWSTR symbol ) { StackSString s; #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS PAL_CPP_TRY { #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS TypeString::AppendMethodInternal(s, methodDesc, TypeString::FormatSignature|TypeString::FormatNamespace|TypeString::FormatFullInst); #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS } PAL_CPP_CATCH_ALL { if (!MdCacheGetEEName(dac_cast<TADDR>(methodDesc), s)) { PAL_CPP_RETHROW; } } PAL_CPP_ENDTRY #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS if (symbol) { // Copy as much as we can and truncate the rest. wcsncpy_s(symbol, symbolChars, s.GetUnicode(), _TRUNCATE); } if (symbolLen) *symbolLen = s.GetCount() + 1; if (symbol != NULL && symbolChars < (s.GetCount() + 1)) return S_FALSE; else return S_OK; } PCSTR ClrDataAccess::GetJitHelperName( IN TADDR address, IN bool dynamicHelpersOnly /*=false*/ ) { const static PCSTR s_rgHelperNames[] = { #define JITHELPER(code,fn,sig) #code, #include <jithelpers.h> }; static_assert_no_msg(COUNTOF(s_rgHelperNames) == CORINFO_HELP_COUNT); #ifdef FEATURE_PAL if (!dynamicHelpersOnly) #else if (!dynamicHelpersOnly && g_runtimeLoadedBaseAddress <= address && address < g_runtimeLoadedBaseAddress + g_runtimeVirtualSize) #endif // FEATURE_PAL { // Read the whole table from the target in one shot for better performance VMHELPDEF * pTable = static_cast<VMHELPDEF *>( PTR_READ(dac_cast<TADDR>(&hlpFuncTable), CORINFO_HELP_COUNT * sizeof(VMHELPDEF))); for (int i = 0; i < CORINFO_HELP_COUNT; i++) { if (address == (TADDR)(pTable[i].pfnHelper)) return s_rgHelperNames[i]; } } // Check if its a dynamically generated JIT helper const static CorInfoHelpFunc s_rgDynamicHCallIds[] = { #define DYNAMICJITHELPER(code, fn, sig) code, #define JITHELPER(code, fn,sig) #include <jithelpers.h> }; // Read the whole table from the target in one shot for better performance VMHELPDEF * pDynamicTable = static_cast<VMHELPDEF *>( PTR_READ(dac_cast<TADDR>(&hlpDynamicFuncTable), DYNAMIC_CORINFO_HELP_COUNT * sizeof(VMHELPDEF))); for (unsigned d = 0; d < DYNAMIC_CORINFO_HELP_COUNT; d++) { if (address == (TADDR)(pDynamicTable[d].pfnHelper)) { return s_rgHelperNames[s_rgDynamicHCallIds[d]]; } } return NULL; } HRESULT ClrDataAccess::RawGetMethodName( /* [in] */ CLRDATA_ADDRESS address, /* [in] */ ULONG32 flags, /* [in] */ ULONG32 bufLen, /* [out] */ ULONG32 *symbolLen, /* [size_is][out] */ __out_ecount_opt(bufLen) WCHAR symbolBuf[ ], /* [out] */ CLRDATA_ADDRESS* displacement) { #ifdef _TARGET_ARM_ _ASSERTE((address & THUMB_CODE) == 0); address &= ~THUMB_CODE; #endif const UINT k_cch64BitHexFormat = COUNTOF("1234567812345678"); HRESULT status; if (flags != 0) { return E_INVALIDARG; } TADDR taddr; if( (status = TRY_CLRDATA_ADDRESS_TO_TADDR(address, &taddr)) != S_OK ) { return status; } if ((status = IsPossibleCodeAddress(taddr)) != S_OK) { return status; } PTR_StubManager pStubManager; MethodDesc* methodDesc = NULL; { EECodeInfo codeInfo(TO_TADDR(address)); if (codeInfo.IsValid()) { if (displacement) { *displacement = codeInfo.GetRelOffset(); } methodDesc = codeInfo.GetMethodDesc(); goto NameFromMethodDesc; } } pStubManager = StubManager::FindStubManager(TO_TADDR(address)); if (pStubManager != NULL) { if (displacement) { *displacement = 0; } // // Special-cased stub managers // #ifdef FEATURE_PREJIT if (pStubManager == RangeSectionStubManager::g_pManager) { switch (RangeSectionStubManager::GetStubKind(TO_TADDR(address))) { case STUB_CODE_BLOCK_PRECODE: goto PrecodeStub; case STUB_CODE_BLOCK_JUMPSTUB: goto JumpStub; default: break; } } else #endif if (pStubManager == PrecodeStubManager::g_pManager) { #ifdef FEATURE_PREJIT PrecodeStub: #endif PCODE alignedAddress = AlignDown(TO_TADDR(address), PRECODE_ALIGNMENT); #ifdef _TARGET_ARM_ alignedAddress += THUMB_CODE; #endif SIZE_T maxPrecodeSize = sizeof(StubPrecode); #ifdef HAS_THISPTR_RETBUF_PRECODE maxPrecodeSize = max(maxPrecodeSize, sizeof(ThisPtrRetBufPrecode)); #endif for (SIZE_T i = 0; i < maxPrecodeSize / PRECODE_ALIGNMENT; i++) { EX_TRY { // Try to find matching precode entrypoint Precode* pPrecode = Precode::GetPrecodeFromEntryPoint(alignedAddress, TRUE); if (pPrecode != NULL) { methodDesc = pPrecode->GetMethodDesc(); if (methodDesc != NULL) { if (DacValidateMD(methodDesc)) { if (displacement) { *displacement = TO_TADDR(address) - PCODEToPINSTR(alignedAddress); } goto NameFromMethodDesc; } } } alignedAddress -= PRECODE_ALIGNMENT; } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions) } } else if (pStubManager == JumpStubStubManager::g_pManager) { #ifdef FEATURE_PREJIT JumpStub: #endif PCODE pTarget = decodeBackToBackJump(TO_TADDR(address)); HRESULT hr = GetRuntimeNameByAddress(pTarget, flags, bufLen, symbolLen, symbolBuf, NULL); if (SUCCEEDED(hr)) { return hr; } PCSTR pHelperName = GetJitHelperName(pTarget); if (pHelperName != NULL) { hr = ConvertUtf8(pHelperName, bufLen, symbolLen, symbolBuf); if (FAILED(hr)) return S_FALSE; return hr; } } static WCHAR s_wszFormatNameWithStubManager[] = W("CLRStub[%s]@%I64x"); LPCWSTR wszStubManagerName = pStubManager->GetStubManagerName(TO_TADDR(address)); _ASSERTE(wszStubManagerName != NULL); int result = _snwprintf_s( symbolBuf, bufLen, _TRUNCATE, s_wszFormatNameWithStubManager, wszStubManagerName, // Arg 1 = stub name TO_TADDR(address)); // Arg 2 = stub hex address if (result != -1) { // Printf succeeded, so we have an exact char count to return if (symbolLen) { size_t cchSymbol = wcslen(symbolBuf) + 1; if (!FitsIn<ULONG32>(cchSymbol)) return COR_E_OVERFLOW; *symbolLen = (ULONG32) cchSymbol; } return S_OK; } // Printf failed. Estimate a size that will be at least big enough to hold the name if (symbolLen) { size_t cchSymbol = COUNTOF(s_wszFormatNameWithStubManager) + wcslen(wszStubManagerName) + k_cch64BitHexFormat + 1; if (!FitsIn<ULONG32>(cchSymbol)) return COR_E_OVERFLOW; *symbolLen = (ULONG32) cchSymbol; } return S_FALSE; } // Do not waste time looking up name for static helper. Debugger can get the actual name from .pdb. PCSTR pHelperName; pHelperName = GetJitHelperName(TO_TADDR(address), true /* dynamicHelpersOnly */); if (pHelperName != NULL) { if (displacement) { *displacement = 0; } HRESULT hr = ConvertUtf8(pHelperName, bufLen, symbolLen, symbolBuf); if (FAILED(hr)) return S_FALSE; return S_OK; } return E_NOINTERFACE; NameFromMethodDesc: if (methodDesc->GetClassification() == mcDynamic && !methodDesc->GetSig()) { // XXX Microsoft - Should this case have a more specific name? static WCHAR s_wszFormatNameAddressOnly[] = W("CLRStub@%I64x"); int result = _snwprintf_s( symbolBuf, bufLen, _TRUNCATE, s_wszFormatNameAddressOnly, TO_TADDR(address)); if (result != -1) { // Printf succeeded, so we have an exact char count to return if (symbolLen) { size_t cchSymbol = wcslen(symbolBuf) + 1; if (!FitsIn<ULONG32>(cchSymbol)) return COR_E_OVERFLOW; *symbolLen = (ULONG32) cchSymbol; } return S_OK; } // Printf failed. Estimate a size that will be at least big enough to hold the name if (symbolLen) { size_t cchSymbol = COUNTOF(s_wszFormatNameAddressOnly) + k_cch64BitHexFormat + 1; if (!FitsIn<ULONG32>(cchSymbol)) return COR_E_OVERFLOW; *symbolLen = (ULONG32) cchSymbol; } return S_FALSE; } return GetFullMethodName(methodDesc, bufLen, symbolLen, symbolBuf); } HRESULT ClrDataAccess::GetMethodExtents(MethodDesc* methodDesc, METH_EXTENTS** extents) { CLRDATA_ADDRESS_RANGE* curExtent; { // // Get the information from the methoddesc. // We'll go through the CodeManager + JitManagers, so this should work // for all types of managed code. // PCODE methodStart = methodDesc->GetNativeCode(); if (!methodStart) { return E_NOINTERFACE; } EECodeInfo codeInfo(methodStart); _ASSERTE(codeInfo.IsValid()); TADDR codeSize = codeInfo.GetCodeManager()->GetFunctionSize(codeInfo.GetGCInfoToken()); *extents = new (nothrow) METH_EXTENTS; if (!*extents) { return E_OUTOFMEMORY; } (*extents)->numExtents = 1; curExtent = (*extents)->extents; curExtent->startAddress = TO_CDADDR(methodStart); curExtent->endAddress = curExtent->startAddress + codeSize; curExtent++; } (*extents)->curExtent = 0; return S_OK; } // Allocator to pass to the debug-info-stores... BYTE* DebugInfoStoreNew(void * pData, size_t cBytes) { return new (nothrow) BYTE[cBytes]; } HRESULT ClrDataAccess::GetMethodVarInfo(MethodDesc* methodDesc, TADDR address, ULONG32* numVarInfo, ICorDebugInfo::NativeVarInfo** varInfo, ULONG32* codeOffset) { SUPPORTS_DAC; COUNT_T countNativeVarInfo; NewHolder<ICorDebugInfo::NativeVarInfo> nativeVars(NULL); DebugInfoRequest request; TADDR nativeCodeStartAddr = PCODEToPINSTR(methodDesc->GetNativeCode()); request.InitFromStartingAddr(methodDesc, nativeCodeStartAddr); BOOL success = DebugInfoManager::GetBoundariesAndVars( request, DebugInfoStoreNew, NULL, // allocator NULL, NULL, &countNativeVarInfo, &nativeVars); if (!success) { return E_FAIL; } if (!nativeVars || !countNativeVarInfo) { return E_NOINTERFACE; } *numVarInfo = countNativeVarInfo; *varInfo = nativeVars; nativeVars.SuppressRelease(); // To prevent NewHolder from releasing the memory if (codeOffset) { *codeOffset = (ULONG32) (address - nativeCodeStartAddr); } return S_OK; } HRESULT ClrDataAccess::GetMethodNativeMap(MethodDesc* methodDesc, TADDR address, ULONG32* numMap, DebuggerILToNativeMap** map, bool* mapAllocated, CLRDATA_ADDRESS* codeStart, ULONG32* codeOffset) { _ASSERTE((codeOffset == NULL) || (address != NULL)); // Use the DebugInfoStore to get IL->Native maps. // It doesn't matter whether we're jitted, ngenned etc. DebugInfoRequest request; TADDR nativeCodeStartAddr = PCODEToPINSTR(methodDesc->GetNativeCode()); request.InitFromStartingAddr(methodDesc, nativeCodeStartAddr); // Bounds info. ULONG32 countMapCopy; NewHolder<ICorDebugInfo::OffsetMapping> mapCopy(NULL); BOOL success = DebugInfoManager::GetBoundariesAndVars( request, DebugInfoStoreNew, NULL, // allocator &countMapCopy, &mapCopy, NULL, NULL); if (!success) { return E_FAIL; } // Need to convert map formats. *numMap = countMapCopy; *map = new (nothrow) DebuggerILToNativeMap[countMapCopy]; if (!*map) { return E_OUTOFMEMORY; } ULONG32 i; for (i = 0; i < *numMap; i++) { (*map)[i].ilOffset = mapCopy[i].ilOffset; (*map)[i].nativeStartOffset = mapCopy[i].nativeOffset; if (i > 0) { (*map)[i - 1].nativeEndOffset = (*map)[i].nativeStartOffset; } (*map)[i].source = mapCopy[i].source; } if (*numMap >= 1) { (*map)[i - 1].nativeEndOffset = 0; } // Update varion out params. if (codeStart) { *codeStart = TO_CDADDR(nativeCodeStartAddr); } if (codeOffset) { *codeOffset = (ULONG32) (address - nativeCodeStartAddr); } *mapAllocated = true; return S_OK; } // Get the MethodDesc for a function // Arguments: // Input: // pModule - pointer to the module for the function // memberRef - metadata token for the function // Return Value: // MethodDesc for the function MethodDesc * ClrDataAccess::FindLoadedMethodRefOrDef(Module* pModule, mdToken memberRef) { CONTRACT(MethodDesc *) { GC_NOTRIGGER; PRECONDITION(CheckPointer(pModule)); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); } CONTRACT_END; // Must have a MemberRef or a MethodDef mdToken tkType = TypeFromToken(memberRef); _ASSERTE((tkType == mdtMemberRef) || (tkType == mdtMethodDef)); if (tkType == mdtMemberRef) { RETURN pModule->LookupMemberRefAsMethod(memberRef); } RETURN pModule->LookupMethodDef(memberRef); } // FindLoadedMethodRefOrDef // // ReportMem - report a region of memory for dump gathering // // If you specify that you expect success, any failure will cause ReportMem to // return false. If you do not expect success, true is always returned. // This function only throws when all dump collection should be cancelled. // // Arguments: // addr - the starting target address for the memory to report // size - the length (in bytes) to report // fExpectSuccess - if true (the default), then we expect that this region of memory // should be fully readable. Any read errors indicate a corrupt target. // bool ClrDataAccess::ReportMem(TADDR addr, TSIZE_T size, bool fExpectSuccess /*= true*/) { SUPPORTS_DAC_HOST_ONLY; // This block of code is to help debugging blocks that we report // to minidump/heapdump. You can set break point here to view the static // variable to figure out the size of blocks that we are reporting. // Most useful is set conditional break point to catch large chuck of // memory. We will leave it here for all builds. // static TADDR debugAddr; static TSIZE_T debugSize; debugAddr = addr; debugSize = size; HRESULT status; if (!addr || addr == (TADDR)-1 || !size) { if (fExpectSuccess) return false; else return true; } // // Try and sanity-check the reported region of memory // #ifdef _DEBUG // in debug builds, sanity-check all reports const TSIZE_T k_minSizeToCheck = 1; #else // in retail builds, only sanity-check larger chunks which have the potential to waste a // lot of time and/or space. This avoids the overhead of checking for the majority of // memory regions (which are small). const TSIZE_T k_minSizeToCheck = 1024; #endif if (size >= k_minSizeToCheck) { if (!IsFullyReadable(addr, size)) { if (!fExpectSuccess) { // We know the read might fail (eg. we're trying to find mapped pages in // a module image), so just skip this block silently. // Note that the EnumMemoryRegion callback won't necessarily do anything if any part of // the region is unreadable, and so there is no point in calling it. For cases where we expect // the read might fail, but we want to report any partial blocks, we have to break up the region // into pages and try reporting each page anyway return true; } // We're reporting bogus memory, so the target must be corrupt (or there is a issue). We should abort // reporting and continue with the next data structure (where the exception is caught), // just like we would for a DAC read error (otherwise we might do something stupid // like get into an infinite loop, or otherwise waste time with corrupt data). TARGET_CONSISTENCY_CHECK(false, "Found unreadable memory while reporting memory regions for dump gathering"); return false; } } // Minidumps should never contain data structures that are anywhere near 4MB. If we see this, it's // probably due to memory corruption. To keep the dump small, we'll truncate the block. Note that // the size to which the block is truncated is pretty unique, so should be good evidence in a dump // that this has happened. // Note that it's hard to say what a good value would be here, or whether we should dump any of the // data structure at all. Hopefully experience will help guide this going forward. // @dbgtodo : Extend dump-gathering API to allow a dump-log to be included. const TSIZE_T kMaxMiniDumpRegion = 4*1024*1024 - 3; // 4MB-3 if( size > kMaxMiniDumpRegion && (m_enumMemFlags == CLRDATA_ENUM_MEM_MINI || m_enumMemFlags == CLRDATA_ENUM_MEM_TRIAGE)) { TARGET_CONSISTENCY_CHECK( false, "Dump target consistency failure - truncating minidump data structure"); size = kMaxMiniDumpRegion; } // track the total memory reported. m_cbMemoryReported += size; // ICLRData APIs take only 32-bit sizes. In practice this will almost always be sufficient, but // in theory we might have some >4GB ranges on large 64-bit processes doing a heap dump // (for example, the code:LoaderHeap). If necessary, break up the reporting into maximum 4GB // chunks so we can use the existing API. // @dbgtodo : ICorDebugDataTarget should probably use 64-bit sizes while (size) { ULONG32 enumSize; if (size > ULONG_MAX) { enumSize = ULONG_MAX; } else { enumSize = (ULONG32)size; } // Actually perform the memory reporting callback status = m_enumMemCb->EnumMemoryRegion(TO_CDADDR(addr), enumSize); if (status != S_OK) { // If dump generation was cancelled, allow us to throw upstack so we'll actually quit. if ((fExpectSuccess) && (status != COR_E_OPERATIONCANCELED)) return false; } // If the return value of EnumMemoryRegion is COR_E_OPERATIONCANCELED, // it means that user has requested that the minidump gathering be canceled. // To do this we throw an exception which is caught in EnumMemoryRegionsWrapper. if (status == COR_E_OPERATIONCANCELED) { ThrowHR(status); } // Move onto the next chunk (if any) size -= enumSize; addr += enumSize; } return true; } // // DacUpdateMemoryRegion - updates/poisons a region of memory of generated dump // // Parameters: // addr - target address of the beginning of the memory region // bufferSize - number of bytes to update/poison // buffer - data to be written at given target address // bool ClrDataAccess::DacUpdateMemoryRegion(TADDR addr, TSIZE_T bufferSize, BYTE* buffer) { SUPPORTS_DAC_HOST_ONLY; HRESULT status; if (!addr || addr == (TADDR)-1 || !bufferSize) { return false; } // track the total memory reported. m_cbMemoryReported += bufferSize; if (m_updateMemCb == NULL) { return false; } // Actually perform the memory updating callback status = m_updateMemCb->UpdateMemoryRegion(TO_CDADDR(addr), (ULONG32)bufferSize, buffer); if (status != S_OK) { return false; } return true; } // // Check whether a region of target memory is fully readable. // // Arguments: // addr The base target address of the region // size The size of the region to analyze // // Return value: // True if the entire regions appears to be readable, false otherwise. // // Notes: // The motivation here is that reporting large regions of unmapped address space to dbgeng can result in // it taking a long time trying to identify a valid subrange. This can happen when the target // memory is corrupt, and we enumerate a data structure with a dynamic size. Ideally we would just spec // the ICLRDataEnumMemoryRegionsCallback API to require the client to fail if it detects an unmapped // memory address in the region. However, we can't change the existing dbgeng code, so for now we'll // rely on this heuristic here. // @dbgtodo : Try and get the dbg team to change their EnumMemoryRegion behavior. See DevDiv Bugs 6265 // bool ClrDataAccess::IsFullyReadable(TADDR taBase, TSIZE_T dwSize) { // The only way we have to verify that a memory region is readable is to try reading it in it's // entirety. This is potentially expensive, so we'll rely on a heuristic that spot-checks various // points in the region. // Ensure we've got something to check if( dwSize == 0 ) return true; // Check for overflow TADDR taEnd = DacTAddrOffset(taBase, dwSize, 1); // Loop through using expontential growth, being sure to check both the first and last byte TADDR taCurr = taBase; TSIZE_T dwInc = 4096; bool bDone = false; while (!bDone) { // Try and read a byte from the target. Note that we don't use PTR_BYTE here because we don't want // the overhead of inserting entries into the DAC instance cache. BYTE b; ULONG32 dwBytesRead; HRESULT hr = m_pTarget->ReadVirtual(taCurr, &b, 1, &dwBytesRead); if( hr != S_OK || dwBytesRead < 1 ) { return false; } if (taEnd - taCurr <= 1) { // We just read the last byte so we're done _ASSERTE( taCurr = taEnd - 1 ); bDone = true; } else if (dwInc == 0 || dwInc >= taEnd - taCurr) { // we've reached the end of the exponential series, check the last byte taCurr = taEnd - 1; } else { // advance current pointer (subtraction above ensures this won't overflow) taCurr += dwInc; // double the increment for next time (or set to 0 if it's already the max) dwInc <<= 1; } } return true; } JITNotification* ClrDataAccess::GetHostJitNotificationTable() { if (m_jitNotificationTable == NULL) { m_jitNotificationTable = JITNotifications::InitializeNotificationTable(1000); } return m_jitNotificationTable; } GcNotification* ClrDataAccess::GetHostGcNotificationTable() { if (m_gcNotificationTable == NULL) { m_gcNotificationTable = GcNotifications::InitializeNotificationTable(128); } return m_gcNotificationTable; } /* static */ bool ClrDataAccess::GetMetaDataFileInfoFromPEFile(PEFile *pPEFile, DWORD &dwTimeStamp, DWORD &dwSize, DWORD &dwDataSize, DWORD &dwRvaHint, bool &isNGEN, __out_ecount(cchFilePath) LPWSTR wszFilePath, const DWORD cchFilePath) { SUPPORTS_DAC_HOST_ONLY; PEImage *mdImage = NULL; PEImageLayout *layout; IMAGE_DATA_DIRECTORY *pDir = NULL; COUNT_T uniPathChars = 0; isNGEN = false; if (pPEFile->HasNativeImage()) { mdImage = pPEFile->GetNativeImage(); _ASSERTE(mdImage != NULL); layout = mdImage->GetLoadedLayout(); pDir = &(layout->GetCorHeader()->MetaData); // For ngen image, the IL metadata is stored for private use. So we need to pass // the RVA hint to find it to debuggers. // if (pDir->Size != 0) { isNGEN = true; dwRvaHint = pDir->VirtualAddress; dwDataSize = pDir->Size; } } if (pDir == NULL || pDir->Size == 0) { mdImage = pPEFile->GetILimage(); if (mdImage != NULL) { layout = mdImage->GetLoadedLayout(); pDir = &layout->GetCorHeader()->MetaData; // In IL image case, we do not have any hint to IL metadata since it is stored // in the corheader. // dwRvaHint = 0; dwDataSize = pDir->Size; } else { return false; } } // Do not fail if path can not be read. Triage dumps don't have paths and we want to fallback // on searching metadata from IL image. mdImage->GetPath().DacGetUnicode(cchFilePath, wszFilePath, &uniPathChars); if (!mdImage->HasNTHeaders() || !mdImage->HasCorHeader() || !mdImage->HasLoadedLayout() || (uniPathChars > cchFilePath)) { return false; } // It is possible that the module is in-memory. That is the wszFilePath here is empty. // We will try to use the module name instead in this case for hosting debugger // to find match. if (wcslen(wszFilePath) == 0) { mdImage->GetModuleFileNameHintForDAC().DacGetUnicode(cchFilePath, wszFilePath, &uniPathChars); if (uniPathChars > cchFilePath) { return false; } } dwTimeStamp = layout->GetTimeDateStamp(); dwSize = (ULONG32)layout->GetVirtualSize(); return true; } /* static */ bool ClrDataAccess::GetILImageInfoFromNgenPEFile(PEFile *peFile, DWORD &dwTimeStamp, DWORD &dwSize, __out_ecount(cchFilePath) LPWSTR wszFilePath, const DWORD cchFilePath) { SUPPORTS_DAC_HOST_ONLY; DWORD dwWritten = 0; // use the IL File name if (!peFile->GetPath().DacGetUnicode(cchFilePath, wszFilePath, (COUNT_T *)(&dwWritten))) { // Use DAC hint to retrieve the IL name. peFile->GetModuleFileNameHint().DacGetUnicode(cchFilePath, wszFilePath, (COUNT_T *)(&dwWritten)); } #ifdef FEATURE_PREJIT // Need to get IL image information from cached info in the ngen image. dwTimeStamp = peFile->GetLoaded()->GetNativeVersionInfo()->sourceAssembly.timeStamp; dwSize = peFile->GetLoaded()->GetNativeVersionInfo()->sourceAssembly.ilImageSize; #else dwTimeStamp = 0; dwSize = 0; #endif // FEATURE_PREJIT return true; } #if defined(FEATURE_CORESYSTEM) /* static */ // We extract "ni.dll or .ni.winmd" from the NGEM image name to obtain the IL image name. // In the end we add given ilExtension. // This dependecy is based on Apollo installer behavior. bool ClrDataAccess::GetILImageNameFromNgenImage( LPCWSTR ilExtension, __out_ecount(cchFilePath) LPWSTR wszFilePath, const DWORD cchFilePath) { if (wszFilePath == NULL || cchFilePath == 0) { return false; } _wcslwr_s(wszFilePath, cchFilePath); // Find the "ni.dll" or "ni.winmd" extension (check for PEFile isWinRT something to know when is winmd or not. // If none exists use NGEN image name. // const WCHAR* ngenExtension[] = {W("ni.dll"), W("ni.winmd")}; for (unsigned i = 0; i < COUNTOF(ngenExtension); ++i) { if (wcslen(ilExtension) > wcslen(ngenExtension[i])) { // We should not have IL image name bigger than NGEN image. // It will not fit inside wszFilePath. continue; } LPWSTR wszFileExtension = wcsstr(wszFilePath, ngenExtension[i]); if (wszFileExtension != 0) { LPWSTR wszNextFileExtension = wszFileExtension; // Find last occurence do { wszFileExtension = wszNextFileExtension; wszNextFileExtension = wcsstr(wszFileExtension + 1, ngenExtension[i]); } while (wszNextFileExtension != 0); // Overwrite ni.dll or ni.winmd with ilExtension(.dll, .winmd) if (!memcpy_s(wszFileExtension, wcslen(ngenExtension[i])*sizeof(WCHAR), ilExtension, wcslen(ilExtension)*sizeof(WCHAR))) { wszFileExtension[wcslen(ilExtension)] = '\0'; return true; } } } //Use ngen filename if there is no ".ni" if (wcsstr(wszFilePath, W(".ni")) == 0) { return true; } return false; } #endif // FEATURE_CORESYSTEM void * ClrDataAccess::GetMetaDataFromHost(PEFile* peFile, bool* isAlternate) { DWORD imageTimestamp, imageSize, dataSize; void* buffer = NULL; WCHAR uniPath[MAX_LONGPATH] = {0}; bool isAlt = false; bool isNGEN = false; DAC_INSTANCE* inst = NULL; HRESULT hr = S_OK; DWORD ulRvaHint; // // We always ask for the IL image metadata, // as we expect that to be more // available than others. The drawback is that // there may be differences between the IL image // metadata and native image metadata, so we // have to mark such alternate metadata so that // we can fail unsupported usage of it. // // Microsoft - above comment seems to be an unimplemented thing. // The DAC_MD_IMPORT.isAlternate field gets ultimately set, but // on the searching I did, I cannot find any usage of it // other than in the ctor. Should we be doing something, or should // we remove this comment and the isAlternate field? // It's possible that test will want us to track whether we have // an IL image's metadata loaded against an NGEN'ed image // so the field remains for now. if (!ClrDataAccess::GetMetaDataFileInfoFromPEFile( peFile, imageTimestamp, imageSize, dataSize, ulRvaHint, isNGEN, uniPath, NumItems(uniPath))) { return NULL; } // try direct match for the image that is loaded into the managed process peFile->GetLoadedMetadata((COUNT_T *)(&dataSize)); DWORD allocSize = 0; if (!ClrSafeInt<DWORD>::addition(dataSize, sizeof(DAC_INSTANCE), allocSize)) { DacError(HRESULT_FROM_WIN32(ERROR_ARITHMETIC_OVERFLOW)); } inst = m_instances.Alloc(0, allocSize, DAC_DPTR); if (!inst) { DacError(E_OUTOFMEMORY); return NULL; } buffer = (void*)(inst + 1); // APIs implemented by hosting debugger. It can use the path/filename, timestamp, and // file size to find an exact match for the image. If that fails for an ngen'ed image, // we can request the IL image which it came from. if (m_legacyMetaDataLocator) { // Legacy API implemented by hosting debugger. hr = m_legacyMetaDataLocator->GetMetadata( uniPath, imageTimestamp, imageSize, NULL, // MVID - not used yet ulRvaHint, 0, // flags - reserved for future. dataSize, (BYTE*)buffer, NULL); } else { hr = m_target3->GetMetaData( uniPath, imageTimestamp, imageSize, NULL, // MVID - not used yet ulRvaHint, 0, // flags - reserved for future. dataSize, (BYTE*)buffer, NULL); } if (FAILED(hr) && isNGEN) { // We failed to locate the ngen'ed image. We should try to // find the matching IL image // isAlt = true; if (!ClrDataAccess::GetILImageInfoFromNgenPEFile( peFile, imageTimestamp, imageSize, uniPath, NumItems(uniPath))) { goto ErrExit; } #if defined(FEATURE_CORESYSTEM) const WCHAR* ilExtension[] = {W("dll"), W("winmd")}; WCHAR ngenImageName[MAX_LONGPATH] = {0}; if (wcscpy_s(ngenImageName, NumItems(ngenImageName), uniPath) != 0) { goto ErrExit; } for (unsigned i = 0; i < COUNTOF(ilExtension); i++) { if (wcscpy_s(uniPath, NumItems(uniPath), ngenImageName) != 0) { goto ErrExit; } // Transform NGEN image name into IL Image name if (!GetILImageNameFromNgenImage(ilExtension[i], uniPath, NumItems(uniPath))) { goto ErrExit; } #endif//FEATURE_CORESYSTEM // RVA size in ngen image and IL image is the same. Because the only // different is in RVA. That is 4 bytes column fixed. // // try again if (m_legacyMetaDataLocator) { hr = m_legacyMetaDataLocator->GetMetadata( uniPath, imageTimestamp, imageSize, NULL, // MVID - not used yet 0, // pass zero hint here... important 0, // flags - reserved for future. dataSize, (BYTE*)buffer, NULL); } else { hr = m_target3->GetMetaData( uniPath, imageTimestamp, imageSize, NULL, // MVID - not used yet 0, // pass zero hint here... important 0, // flags - reserved for future. dataSize, (BYTE*)buffer, NULL); } #if defined(FEATURE_CORESYSTEM) if (SUCCEEDED(hr)) { break; } } #endif // FEATURE_CORESYSTEM } if (FAILED(hr)) { goto ErrExit; } *isAlternate = isAlt; m_instances.AddSuperseded(inst); return buffer; ErrExit: if (inst != NULL) { m_instances.ReturnAlloc(inst); } return NULL; } //++++++++++++++++++++++++++++++++++++++++++++++++++++++++ // // Given a PEFile or a ReflectionModule try to find the corresponding metadata // We will first ask debugger to locate it. If fail, we will try // to get it from the target process // //++++++++++++++++++++++++++++++++++++++++++++++++++++++++ IMDInternalImport* ClrDataAccess::GetMDImport(const PEFile* peFile, const ReflectionModule* reflectionModule, bool throwEx) { HRESULT status; PTR_CVOID mdBaseTarget = NULL; COUNT_T mdSize; IMDInternalImport* mdImport = NULL; PVOID mdBaseHost = NULL; bool isAlternate = false; _ASSERTE(peFile == NULL && reflectionModule != NULL || peFile != NULL && reflectionModule == NULL); TADDR peFileAddr = (peFile != NULL) ? dac_cast<TADDR>(peFile) : dac_cast<TADDR>(reflectionModule); // // Look for one we've already created. // mdImport = m_mdImports.Get(peFileAddr); if (mdImport != NULL) { return mdImport; } if (peFile != NULL) { // Get the metadata size mdBaseTarget = ((PEFile*)peFile)->GetLoadedMetadata(&mdSize); } else if (reflectionModule != NULL) { // Get the metadata PTR_SBuffer metadataBuffer = reflectionModule->GetDynamicMetadataBuffer(); if (metadataBuffer != PTR_NULL) { mdBaseTarget = dac_cast<PTR_CVOID>((metadataBuffer->DacGetRawBuffer()).StartAddress()); mdSize = metadataBuffer->GetSize(); } else { if (throwEx) { DacError(E_FAIL); } return NULL; } } else { if (throwEx) { DacError(E_FAIL); } return NULL; } if (mdBaseTarget == PTR_NULL) { mdBaseHost = NULL; } else { // // Maybe the target process has the metadata // Find out where the metadata for the image is // in the target's memory. // // // Read the metadata into the host process. Make sure pass in false in the last // parameter. This is only matters when producing skinny mini-dump. This will // prevent metadata gets reported into mini-dump. // mdBaseHost = DacInstantiateTypeByAddressNoReport(dac_cast<TADDR>(mdBaseTarget), mdSize, false); } // Try to see if debugger can locate it if (peFile != NULL && mdBaseHost == NULL && (m_target3 || m_legacyMetaDataLocator)) { // We couldn't read the metadata from memory. Ask // the target for metadata as it may be able to // provide it from some alternate means. mdBaseHost = GetMetaDataFromHost(const_cast<PEFile *>(peFile), &isAlternate); } if (mdBaseHost == NULL) { // cannot locate metadata anywhere if (throwEx) { DacError(E_INVALIDARG); } return NULL; } // // Open the MD interface on the host copy of the metadata. // status = GetMDInternalInterface(mdBaseHost, mdSize, ofRead, IID_IMDInternalImport, (void**)&mdImport); if (status != S_OK) { if (throwEx) { DacError(status); } return NULL; } // // Remember the object for this module for // possible later use. // The m_mdImports list does get cleaned up by calls to ClrDataAccess::Flush, // i.e. every time the process changes state. if (m_mdImports.Add(peFileAddr, mdImport, isAlternate) == NULL) { mdImport->Release(); DacError(E_OUTOFMEMORY); } return mdImport; } // // Set whether inconsistencies in the target should raise asserts. // This overrides the default initial setting. // // Arguments: // fEnableAsserts - whether ASSERTs in dacized code should be enabled // void ClrDataAccess::SetTargetConsistencyChecks(bool fEnableAsserts) { LIMITED_METHOD_DAC_CONTRACT; m_fEnableTargetConsistencyAsserts = fEnableAsserts; } // // Get whether inconsistencies in the target should raise asserts. // // Return value: // whether ASSERTs in dacized code should be enabled // // Notes: // The implementation of ASSERT accesses this via code:DacTargetConsistencyAssertsEnabled // // By default, this is disabled, unless COMPlus_DbgDACEnableAssert is set (see code:ClrDataAccess::ClrDataAccess). // This is necessary for compatibility. For example, SOS expects to be able to scan for // valid MethodTables etc. (which may cause ASSERTs), and also doesn't want ASSERTs when working // with targets with corrupted memory. // // Calling code:ClrDataAccess::SetTargetConsistencyChecks overrides the default setting. // bool ClrDataAccess::TargetConsistencyAssertsEnabled() { LIMITED_METHOD_DAC_CONTRACT; return m_fEnableTargetConsistencyAsserts; } #ifdef FEATURE_CORESYSTEM #define ctime_s _ctime32_s #define time_t __time32_t #endif // // VerifyDlls - Validate that the mscorwks in the target matches this version of mscordacwks // Only done on Windows and Mac builds at the moment. // See code:CordbProcess::CordbProcess#DBIVersionChecking for more information regarding version checking. // HRESULT ClrDataAccess::VerifyDlls() { #ifndef FEATURE_PAL // Provide a knob for disabling this check if we really want to try and proceed anyway with a // DAC mismatch. DAC behavior may be arbitrarily bad - globals probably won't be at the same // address, data structures may be laid out differently, etc. if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgDACSkipVerifyDlls)) { return S_OK; } // Read the debug directory timestamp from the target mscorwks image using DAC // Note that we don't use the PE timestamp because the PE file might be changed in ways // that don't effect the PDB (and therefore don't effect DAC). Specifically, we rebase // our DLLs at the end of a build, that changes the PE file, but not the PDB. // Note that if we wanted to be extra careful, we could read the CV contents (which includes // the GUID signature) and verify it matches. Using the timestamp is useful for helpful error // messages, and should be sufficient in any real scenario. DWORD timestamp = 0; HRESULT hr = S_OK; DAC_ENTER(); EX_TRY { // Note that we don't need to worry about ensuring the image memory read by this code // is saved in a minidump. Managed minidump debugging already requires that you have // the full mscorwks.dll available at debug time (eg. windbg won't even load DAC without it). PEDecoder pedecoder(dac_cast<PTR_VOID>(m_globalBase)); // We use the first codeview debug directory entry since this should always refer to the single // PDB for mscorwks.dll. const UINT k_maxDebugEntries = 32; // a reasonable upper limit in case of corruption for( UINT i = 0; i < k_maxDebugEntries; i++) { PTR_IMAGE_DEBUG_DIRECTORY pDebugEntry = pedecoder.GetDebugDirectoryEntry(i); // If there are no more entries, then stop if (pDebugEntry == NULL) break; // Ignore non-codeview entries. Some scenarios (eg. optimized builds), there may be extra // debug directory entries at the end of some other type. if (pDebugEntry->Type == IMAGE_DEBUG_TYPE_CODEVIEW) { // Found a codeview entry - use it's timestamp for comparison timestamp = pDebugEntry->TimeDateStamp; break; } } char szMsgBuf[1024]; _snprintf_s(szMsgBuf, sizeof(szMsgBuf), _TRUNCATE, "Failed to find any valid codeview debug directory entry in %s image", MAIN_CLR_MODULE_NAME_A); _ASSERTE_MSG(timestamp != 0, szMsgBuf); } EX_CATCH { if (!DacExceptionFilter(GET_EXCEPTION(), this, &hr)) { EX_RETHROW; } } EX_END_CATCH(SwallowAllExceptions) DAC_LEAVE(); if (FAILED(hr)) { return hr; } // Validate that we got a timestamp and it matches what the DAC table told us to expect if (timestamp == 0 || timestamp != g_dacTableInfo.dwID0) { // Timestamp mismatch. This means mscordacwks is being used with a version of // mscorwks other than the one it was built for. This will not work reliably. #ifdef _DEBUG // Check if verbose asserts are enabled. The default is up to the specific instantiation of // ClrDataAccess, but can be overridden (in either direction) by a COMPlus_ knob. // Note that we check this knob every time because it may be handy to turn it on in // the environment mid-flight. DWORD dwAssertDefault = m_fEnableDllVerificationAsserts ? 1 : 0; if (REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_DbgDACAssertOnMismatch, dwAssertDefault)) { // Output a nice error message that contains the timestamps in string format. time_t actualTime = timestamp; char szActualTime[30]; ctime_s(szActualTime, sizeof(szActualTime), &actualTime); time_t expectedTime = g_dacTableInfo.dwID0; char szExpectedTime[30]; ctime_s(szExpectedTime, sizeof(szExpectedTime), &expectedTime); // Create a nice detailed message for the assert dialog. // Note that the strings returned by ctime_s have terminating newline characters. // This is technically a TARGET_CONSISTENCY_CHECK because a corrupt target could, // in-theory, have a corrupt mscrowks PE header and cause this check to fail // unnecessarily. However, this check occurs during startup, before we know // whether target consistency checks should be enabled, so it's always enabled // at the moment. char szMsgBuf[1024]; _snprintf_s(szMsgBuf, sizeof(szMsgBuf), _TRUNCATE, "DAC fatal error: %s/mscordacwks.dll version mismatch\n\n"\ "The debug directory timestamp of the loaded %s does not match the\n"\ "version mscordacwks.dll was built for.\n"\ "Expected %s timestamp: %s"\ "Actual %s timestamp: %s\n"\ "DAC will now fail to initialize with a CORDBG_E_MISMATCHED_CORWKS_AND_DACWKS_DLLS\n"\ "error. If you really want to try and use the mimatched DLLs, you can disable this\n"\ "check by setting COMPlus_DbgDACSkipVerifyDlls=1. However, using a mismatched DAC\n"\ "DLL will usually result in arbitrary debugger failures.\n", MAIN_CLR_DLL_NAME_A, MAIN_CLR_DLL_NAME_A, MAIN_CLR_DLL_NAME_A, szExpectedTime, MAIN_CLR_DLL_NAME_A, szActualTime); _ASSERTE_MSG(false, szMsgBuf); } #endif // Return a specific hresult indicating this problem return CORDBG_E_MISMATCHED_CORWKS_AND_DACWKS_DLLS; } #endif // FEATURE_PAL return S_OK; } #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS void ClrDataAccess::InitStreamsForWriting(IN CLRDataEnumMemoryFlags flags) { // enforce this should only be called when generating triage and mini-dumps if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE) return; EX_TRY { if (m_streams == NULL) m_streams = new DacStreamManager(g_MiniMetaDataBuffAddress, g_MiniMetaDataBuffMaxSize); if (!m_streams->PrepareStreamsForWriting()) { delete m_streams; m_streams = NULL; } } EX_CATCH { if (m_streams != NULL) { delete m_streams; m_streams = NULL; } } EX_END_CATCH(SwallowAllExceptions) } bool ClrDataAccess::MdCacheAddEEName(TADDR taEEStruct, const SString& name) { bool result = false; EX_TRY { if (m_streams != NULL) result = m_streams->MdCacheAddEEName(taEEStruct, name); } EX_CATCH { result = false; } EX_END_CATCH(SwallowAllExceptions) return result; } void ClrDataAccess::EnumStreams(IN CLRDataEnumMemoryFlags flags) { // enforce this should only be called when generating triage and mini-dumps if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE) return; EX_TRY { if (m_streams != NULL) m_streams->EnumStreams(flags); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions) } bool ClrDataAccess::MdCacheGetEEName(TADDR taEEStruct, SString & eeName) { bool result = false; EX_TRY { if (m_streams == NULL) m_streams = new DacStreamManager(g_MiniMetaDataBuffAddress, g_MiniMetaDataBuffMaxSize); result = m_streams->MdCacheGetEEName(taEEStruct, eeName); } EX_CATCH { result = false; } EX_END_CATCH(SwallowAllExceptions) return result; } #endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS // Needed for RT_RCDATA. #define MAKEINTRESOURCE(v) MAKEINTRESOURCEW(v) // this funny looking double macro forces x to be macro expanded before L is prepended #define _WIDE(x) _WIDE2(x) #define _WIDE2(x) W(x) HRESULT ClrDataAccess::GetDacGlobals() { #ifdef FEATURE_PAL #ifdef DAC_TABLE_SIZE if (DAC_TABLE_SIZE != sizeof(g_dacGlobals)) { return E_INVALIDARG; } #endif ULONG64 dacTableAddress = m_globalBase + DAC_TABLE_RVA; if (FAILED(ReadFromDataTarget(m_pTarget, dacTableAddress, (BYTE*)&g_dacGlobals, sizeof(g_dacGlobals)))) { return CORDBG_E_MISSING_DEBUGGER_EXPORTS; } if (g_dacGlobals.ThreadStore__s_pThreadStore == NULL) { return CORDBG_E_UNSUPPORTED; } return S_OK; #else HRESULT status = E_FAIL; DWORD rsrcRVA = 0; LPVOID rsrcData = NULL; DWORD rsrcSize = 0; DWORD resourceSectionRVA = 0; if (FAILED(status = GetMachineAndResourceSectionRVA(m_pTarget, m_globalBase, NULL, &resourceSectionRVA))) { _ASSERTE_MSG(false, "DAC fatal error: can't locate resource section in " MAIN_CLR_DLL_NAME_A); return CORDBG_E_MISSING_DEBUGGER_EXPORTS; } if (FAILED(status = GetResourceRvaFromResourceSectionRvaByName(m_pTarget, m_globalBase, resourceSectionRVA, (DWORD)RT_RCDATA, _WIDE(DACCESS_TABLE_RESOURCE), 0, &rsrcRVA, &rsrcSize))) { _ASSERTE_MSG(false, "DAC fatal error: can't locate DAC table resource in " MAIN_CLR_DLL_NAME_A); return CORDBG_E_MISSING_DEBUGGER_EXPORTS; } rsrcData = new (nothrow) BYTE[rsrcSize]; if (rsrcData == NULL) return E_OUTOFMEMORY; if (FAILED(status = ReadFromDataTarget(m_pTarget, m_globalBase + rsrcRVA, (BYTE*)rsrcData, rsrcSize))) { _ASSERTE_MSG(false, "DAC fatal error: can't load DAC table resource from " MAIN_CLR_DLL_NAME_A); return CORDBG_E_MISSING_DEBUGGER_EXPORTS; } PBYTE rawData = (PBYTE)rsrcData; DWORD bytesLeft = rsrcSize; // Read the header struct DacTableHeader header; // We currently expect the header to be 2 32-bit values and 1 16-byte value, // make sure there is no packing going on or anything. static_assert_no_msg(sizeof(DacTableHeader) == 2 * 4 + 16); if (bytesLeft < sizeof(DacTableHeader)) { _ASSERTE_MSG(false, "DAC fatal error: DAC table too small for header."); goto Exit; } memcpy(&header, rawData, sizeof(DacTableHeader)); rawData += sizeof(DacTableHeader); bytesLeft -= sizeof(DacTableHeader); // Save the table info for later use g_dacTableInfo = header.info; // Sanity check that the DAC table is the size we expect. // This could fail if a different version of dacvars.h or vptr_list.h was used when building // mscordacwks.dll than when running DacTableGen. if (offsetof(DacGlobals, Thread__vtAddr) != header.numGlobals * sizeof(ULONG)) { #ifdef _DEBUG char szMsgBuf[1024]; _snprintf_s(szMsgBuf, sizeof(szMsgBuf), _TRUNCATE, "DAC fatal error: mismatch in number of globals in DAC table. Read from file: %d, expected: %d.", header.numGlobals, offsetof(DacGlobals, Thread__vtAddr) / sizeof(ULONG)); _ASSERTE_MSG(false, szMsgBuf); #endif // _DEBUG status = E_INVALIDARG; goto Exit; } if (sizeof(DacGlobals) != (header.numGlobals + header.numVptrs) * sizeof(ULONG)) { #ifdef _DEBUG char szMsgBuf[1024]; _snprintf_s(szMsgBuf, sizeof(szMsgBuf), _TRUNCATE, "DAC fatal error: mismatch in number of vptrs in DAC table. Read from file: %d, expected: %d.", header.numVptrs, (sizeof(DacGlobals) - offsetof(DacGlobals, Thread__vtAddr)) / sizeof(ULONG)); _ASSERTE_MSG(false, szMsgBuf); #endif // _DEBUG status = E_INVALIDARG; goto Exit; } // Copy the DAC table into g_dacGlobals if (bytesLeft < sizeof(DacGlobals)) { _ASSERTE_MSG(false, "DAC fatal error: DAC table resource too small for DacGlobals."); status = E_UNEXPECTED; goto Exit; } memcpy(&g_dacGlobals, rawData, sizeof(DacGlobals)); rawData += sizeof(DacGlobals); bytesLeft -= sizeof(DacGlobals); status = S_OK; Exit: return status; #endif } #undef MAKEINTRESOURCE //---------------------------------------------------------------------------- // // IsExceptionFromManagedCode - report if pExceptionRecord points to an exception belonging to the current runtime // // Arguments: // pExceptionRecord - the exception record // // Return Value: // TRUE if it is // Otherwise, FALSE // //---------------------------------------------------------------------------- BOOL ClrDataAccess::IsExceptionFromManagedCode(EXCEPTION_RECORD* pExceptionRecord) { DAC_ENTER(); BOOL flag = FALSE; if (::IsExceptionFromManagedCode(pExceptionRecord)) { flag = TRUE; } DAC_LEAVE(); return flag; } #ifndef FEATURE_PAL //---------------------------------------------------------------------------- // // GetWatsonBuckets - retrieve Watson buckets from the specified thread // // Arguments: // dwThreadId - the thread ID // pGM - pointer to the space to store retrieved Watson buckets // // Return Value: // S_OK if the operation is successful. // or S_FALSE if Watson buckets cannot be found // else detailed error code. // //---------------------------------------------------------------------------- HRESULT ClrDataAccess::GetWatsonBuckets(DWORD dwThreadId, GenericModeBlock * pGM) { _ASSERTE((dwThreadId != 0) && (pGM != NULL)); if ((dwThreadId == 0) || (pGM == NULL)) { return E_INVALIDARG; } DAC_ENTER(); Thread * pThread = DacGetThread(dwThreadId); _ASSERTE(pThread != NULL); HRESULT hr = E_UNEXPECTED; if (pThread != NULL) { hr = GetClrWatsonBucketsWorker(pThread, pGM); } DAC_LEAVE(); return hr; } #endif // FEATURE_PAL //---------------------------------------------------------------------------- // // CLRDataAccessCreateInstance - create and initialize a ClrDataAccess object // // Arguments: // pLegacyTarget - data target object // pClrDataAccess - ClrDataAccess object // // Return Value: // S_OK on success, else detailed error code. // //---------------------------------------------------------------------------- STDAPI CLRDataAccessCreateInstance(ICLRDataTarget * pLegacyTarget, ClrDataAccess ** pClrDataAccess) { if ((pLegacyTarget == NULL) || (pClrDataAccess == NULL)) { return E_INVALIDARG; } *pClrDataAccess = NULL; // Create an adapter which implements the new ICorDebugDataTarget interfaces using // a legacy implementation of ICLRDataTarget // ClrDataAccess will take a take a ref on this and delete it when it's released. DataTargetAdapter * pDtAdapter = new (nothrow) DataTargetAdapter(pLegacyTarget); if (!pDtAdapter) { return E_OUTOFMEMORY; } ClrDataAccess* dacClass = new (nothrow) ClrDataAccess(pDtAdapter, pLegacyTarget); if (!dacClass) { delete pDtAdapter; return E_OUTOFMEMORY; } HRESULT hr = dacClass->Initialize(); if (FAILED(hr)) { dacClass->Release(); return hr; } *pClrDataAccess = dacClass; return S_OK; } //---------------------------------------------------------------------------- // // CLRDataCreateInstance. // Creates the IXClrData object // This is the legacy entrypoint to DAC, used by dbgeng/dbghelp (windbg, SOS, watson, etc). // //---------------------------------------------------------------------------- #ifdef __GNUC__ __attribute__((used)) #endif // __GNUC__ STDAPI CLRDataCreateInstance(REFIID iid, ICLRDataTarget * pLegacyTarget, void ** iface) { if ((pLegacyTarget == NULL) || (iface == NULL)) { return E_INVALIDARG; } *iface = NULL; ClrDataAccess * pClrDataAccess; HRESULT hr = CLRDataAccessCreateInstance(pLegacyTarget, &pClrDataAccess); if (hr != S_OK) { return hr; } hr = pClrDataAccess->QueryInterface(iid, iface); pClrDataAccess->Release(); return hr; } //---------------------------------------------------------------------------- // // OutOfProcessExceptionEventGetProcessIdAndThreadId - get ProcessID and ThreadID // // Arguments: // hProcess - process handle // hThread - thread handle // pPId - pointer to DWORD to store ProcessID // pThreadId - pointer to DWORD to store ThreadID // // Return Value: // TRUE if the operation is successful. // FALSE if it fails // //---------------------------------------------------------------------------- BOOL OutOfProcessExceptionEventGetProcessIdAndThreadId(HANDLE hProcess, HANDLE hThread, DWORD * pPId, DWORD * pThreadId) { _ASSERTE((pPId != NULL) && (pThreadId != NULL)); #ifdef FEATURE_PAL // UNIXTODO: mikem 1/13/15 Need appropriate PAL functions for getting ids *pPId = (DWORD)hProcess; *pThreadId = (DWORD)hThread; #else #if !defined(FEATURE_CORESYSTEM) HMODULE hKernel32 = WszGetModuleHandle(W("kernel32.dll")); #else HMODULE hKernel32 = WszGetModuleHandle(W("api-ms-win-core-processthreads-l1-1-1.dll")); #endif if (hKernel32 == NULL) { return FALSE; } typedef WINBASEAPI DWORD (WINAPI GET_PROCESSID_OF_THREAD)(HANDLE); GET_PROCESSID_OF_THREAD * pGetProcessIdOfThread; typedef WINBASEAPI DWORD (WINAPI GET_THREADID)(HANDLE); GET_THREADID * pGetThreadId; pGetProcessIdOfThread = (GET_PROCESSID_OF_THREAD *)GetProcAddress(hKernel32, "GetProcessIdOfThread"); pGetThreadId = (GET_THREADID *)GetProcAddress(hKernel32, "GetThreadId"); // OOP callbacks are used on Win7 or later. We should have having below two APIs available. _ASSERTE((pGetProcessIdOfThread != NULL) && (pGetThreadId != NULL)); if ((pGetProcessIdOfThread == NULL) || (pGetThreadId == NULL)) { return FALSE; } *pPId = (*pGetProcessIdOfThread)(hThread); *pThreadId = (*pGetThreadId)(hThread); #endif // FEATURE_PAL return TRUE; } // WER_RUNTIME_EXCEPTION_INFORMATION will be available from Win7 SDK once Win7 SDK is released. #if !defined(WER_RUNTIME_EXCEPTION_INFORMATION) typedef struct _WER_RUNTIME_EXCEPTION_INFORMATION { DWORD dwSize; HANDLE hProcess; HANDLE hThread; EXCEPTION_RECORD exceptionRecord; CONTEXT context; } WER_RUNTIME_EXCEPTION_INFORMATION, * PWER_RUNTIME_EXCEPTION_INFORMATION; #endif // !defined(WER_RUNTIME_EXCEPTION_INFORMATION) #ifndef FEATURE_PAL //---------------------------------------------------------------------------- // // OutOfProcessExceptionEventGetWatsonBucket - retrieve Watson buckets if it is a managed exception // // Arguments: // pContext - the context passed at helper module registration // pExceptionInformation - structure that contains information about the crash // pGM - pointer to the space to store retrieved Watson buckets // // Return Value: // S_OK if the operation is successful. // or S_FALSE if it is not a managed exception or Watson buckets cannot be found // else detailed error code. // //---------------------------------------------------------------------------- STDAPI OutOfProcessExceptionEventGetWatsonBucket(__in PDWORD pContext, __in const PWER_RUNTIME_EXCEPTION_INFORMATION pExceptionInformation, __out GenericModeBlock * pGMB) { HANDLE hProcess = pExceptionInformation->hProcess; HANDLE hThread = pExceptionInformation->hThread; DWORD PId, ThreadId; if (!OutOfProcessExceptionEventGetProcessIdAndThreadId(hProcess, hThread, &PId, &ThreadId)) { return E_FAIL; } CLRDATA_ADDRESS baseAddressOfRuntime = (CLRDATA_ADDRESS)pContext; NewHolder<LiveProcDataTarget> dataTarget(NULL); dataTarget = new (nothrow) LiveProcDataTarget(hProcess, PId, baseAddressOfRuntime); if (dataTarget == NULL) { return E_OUTOFMEMORY; } NewHolder<ClrDataAccess> pClrDataAccess(NULL); HRESULT hr = CLRDataAccessCreateInstance(dataTarget, &pClrDataAccess); if (hr != S_OK) { if (hr == S_FALSE) { return E_FAIL; } else { return hr; } } if (!pClrDataAccess->IsExceptionFromManagedCode(&pExceptionInformation->exceptionRecord)) { return S_FALSE; } return pClrDataAccess->GetWatsonBuckets(ThreadId, pGMB); } //---------------------------------------------------------------------------- // // OutOfProcessExceptionEventCallback - claim the ownership of this event if current // runtime threw the unhandled exception // // Arguments: // pContext - the context passed at helper module registration // pExceptionInformation - structure that contains information about the crash // pbOwnershipClaimed - output parameter for claiming the ownership of this event // pwszEventName - name of the event. If this is NULL, pchSize cannot be NULL. // This parameter is valid only if * pbOwnershipClaimed is TRUE. // pchSize - the size of the buffer pointed by pwszEventName // pdwSignatureCount - the count of signature parameters. Valid values range from // 0 to 10. If the value returned is greater than 10, only the // 1st 10 parameters are used for bucketing parameters. This // parameter is valid only if * pbOwnershipClaimed is TRUE. // // Return Value: // S_OK on success, else detailed error code. // // Note: // This is the 1st function that is called into by WER. This API through its out // parameters, tells WER as to whether or not it is claiming the crash. If it does // claim the crash, WER uses the event name specified in the string pointed to by // pwszEventName for error reporting. WER then proceed to call the // OutOfProcessExceptionEventSignatureCallback to get the bucketing parameters from // the helper dll. // // This function follows the multiple call paradigms. WER may call into this function // with *pwszEventName pointer set to NULL. This is to indicate to the function, that // WER wants to know the buffer size needed by the function to populate the string // into the buffer. The function should return E_INSUFFICIENTBUFFER with the needed // buffer size in *pchSize. WER shall then allocate a buffer of size *pchSize for // pwszEventName and then call this function again at which point the function should // populate the string and return S_OK. // // Note that *pdOwnershipClaimed should be set to TRUE everytime this function is called // for the helper dll to claim ownership of bucketing. // // The Win7 WER spec is at // http://windows/windows7/docs/COSD%20Documents/Fundamentals/Feedback%20Services%20and%20Platforms/WER-CLR%20Integration%20Dev%20Spec.docx // // !!!READ THIS!!! // Since this is called by external modules it's important that we don't let any exceptions leak out (see Win8 95224). // //---------------------------------------------------------------------------- STDAPI OutOfProcessExceptionEventCallback(__in PDWORD pContext, __in const PWER_RUNTIME_EXCEPTION_INFORMATION pExceptionInformation, __out BOOL * pbOwnershipClaimed, __out_ecount(*pchSize) PWSTR pwszEventName, __inout PDWORD pchSize, __out PDWORD pdwSignatureCount) { SUPPORTS_DAC_HOST_ONLY; if ((pContext == NULL) || (pExceptionInformation == NULL) || (pExceptionInformation->dwSize < sizeof(WER_RUNTIME_EXCEPTION_INFORMATION)) || (pbOwnershipClaimed == NULL) || (pchSize == NULL) || (pdwSignatureCount == NULL)) { return E_INVALIDARG; } *pbOwnershipClaimed = FALSE; GenericModeBlock gmb; HRESULT hr = E_FAIL; EX_TRY { // get Watson buckets if it is a managed exception hr = OutOfProcessExceptionEventGetWatsonBucket(pContext, pExceptionInformation, &gmb); } EX_CATCH_HRESULT(hr); if (hr != S_OK) { // S_FALSE means either it is not a managed exception or we do not have Watson buckets. // Since we have set pbOwnershipClaimed to FALSE, we return S_OK to WER. if (hr == S_FALSE) { hr = S_OK; } return hr; } if ((pwszEventName == NULL) || (*pchSize <= wcslen(gmb.wzEventTypeName))) { *pchSize = static_cast<DWORD>(wcslen(gmb.wzEventTypeName)) + 1; return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER); } // copy custom event name wcscpy_s(pwszEventName, *pchSize, gmb.wzEventTypeName); *pdwSignatureCount = GetCountBucketParamsForEvent(gmb.wzEventTypeName); *pbOwnershipClaimed = TRUE; return S_OK; } //---------------------------------------------------------------------------- // // OutOfProcessExceptionEventCallback - provide custom Watson buckets // // Arguments: // pContext - the context passed at helper module registration // pExceptionInformation - structure that contains information about the crash // dwIndex - the index of the bucketing parameter being requested. Valid values are // from 0 to 9 // pwszName - pointer to the name of the bucketing parameter // pchName - pointer to character count of the pwszName buffer. If pwszName points to // null, *pchName represents the buffer size (represented in number of characters) // needed to populate the name in pwszName. // pwszValue - pointer to the value of the pwszName bucketing parameter // pchValue - pointer to the character count of the pwszValue buffer. If pwszValue points // to null, *pchValue represents the buffer size (represented in number of // characters) needed to populate the value in pwszValue. // // Return Value: // S_OK on success, else detailed error code. // // Note: // This function is called by WER only if the call to OutOfProcessExceptionEventCallback() // was successful and the value of *pbOwnershipClaimed was TRUE. This function is called // pdwSignatureCount times to collect the bucketing parameters from the helper dll. // // This function also follows the multiple call paradigm as described for the // OutOfProcessExceptionEventCallback() function. The buffer sizes needed for // this function are of the pwszName and pwszValue buffers. // // !!!READ THIS!!! // Since this is called by external modules it's important that we don't let any exceptions leak out (see Win8 95224). // //---------------------------------------------------------------------------- STDAPI OutOfProcessExceptionEventSignatureCallback(__in PDWORD pContext, __in const PWER_RUNTIME_EXCEPTION_INFORMATION pExceptionInformation, __in DWORD dwIndex, __out_ecount(*pchName) PWSTR pwszName, __inout PDWORD pchName, __out_ecount(*pchValue) PWSTR pwszValue, __inout PDWORD pchValue) { SUPPORTS_DAC_HOST_ONLY; if ((pContext == NULL) || (pExceptionInformation == NULL) || (pExceptionInformation->dwSize < sizeof(WER_RUNTIME_EXCEPTION_INFORMATION)) || (pchName == NULL) || (pchValue == NULL)) { return E_INVALIDARG; } if ((pwszName == NULL) || (*pchName == 0)) { *pchName = 1; return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER); } GenericModeBlock gmb; const PWSTR pwszBucketValues[] = {gmb.wzP1, gmb.wzP2, gmb.wzP3, gmb.wzP4, gmb.wzP5, gmb.wzP6, gmb.wzP7, gmb.wzP8, gmb.wzP9, gmb.wzP10}; HRESULT hr = E_FAIL; EX_TRY { // get Watson buckets if it is a managed exception hr = OutOfProcessExceptionEventGetWatsonBucket(pContext, pExceptionInformation, &gmb); } EX_CATCH_HRESULT(hr); #ifndef FEATURE_WINDOWSPHONE // we can't assert this on phone as it's possible for the OS to kill // the faulting process before WER crash reporting has completed. _ASSERTE(hr == S_OK); #else _ASSERTE(hr == S_OK || hr == CORDBG_E_READVIRTUAL_FAILURE); #endif if (hr != S_OK) { // S_FALSE means either it is not a managed exception or we do not have Watson buckets. // Either case is a logic error becuase this function is called by WER only if the call // to OutOfProcessExceptionEventCallback() was successful and the value of // *pbOwnershipClaimed was TRUE. if (hr == S_FALSE) { hr = E_FAIL; } return hr; } DWORD paramCount = GetCountBucketParamsForEvent(gmb.wzEventTypeName); if (dwIndex >= paramCount) { _ASSERTE(!"dwIndex is out of range"); return E_INVALIDARG; } // Return pwszName as an emptry string to let WER use localized version of "Parameter n" *pwszName = W('\0'); if ((pwszValue == NULL) || (*pchValue <= wcslen(pwszBucketValues[dwIndex]))) { *pchValue = static_cast<DWORD>(wcslen(pwszBucketValues[dwIndex]))+ 1; return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER); } // copy custom Watson bucket value wcscpy_s(pwszValue, *pchValue, pwszBucketValues[dwIndex]); return S_OK; } #endif // FEATURE_PAL //---------------------------------------------------------------------------- // // OutOfProcessExceptionEventCallback - provide custom debugger launch string // // Arguments: // pContext - the context passed at helper module registration // pExceptionInformation - structure that contains information about the crash // pbCustomDebuggerNeeded - pointer to a BOOL. If this BOOL is set to TRUE, then // a custom debugger launch option is needed by the // process. In that case, the subsequent parameters will // be meaningfully used. If this is FALSE, the subsequent // parameters will be ignored. // pwszDebuggerLaunch - pointer to a string that will be used to launch the debugger, // if the debugger is launched. The value of this string overrides // the default debugger launch string used by WER. // pchSize - pointer to the character count of the pwszDebuggerLaunch buffer. If // pwszDebuggerLaunch points to null, *pchSize represents the buffer size // (represented in number of characters) needed to populate the debugger // launch string in pwszDebuggerLaunch. // pbAutoLaunchDebugger - pointer to a BOOL. If this BOOL is set to TRUE, WER will // directly launch the debugger. If set to FALSE, WER will show // the debug option to the user in the WER UI. // // Return Value: // S_OK on success, else detailed error code. // // Note: // This function is called into by WER only if the call to OutOfProcessExceptionEventCallback() // was successful and the value of *pbOwnershipClaimed was TRUE. This function allows the helper // dll to customize the debugger launch options including the launch string. // // This function also follows the multiple call paradigm as described for the // OutOfProcessExceptionEventCallback() function. The buffer sizes needed for // this function are of the pwszName and pwszValue buffers. // //---------------------------------------------------------------------------- STDAPI OutOfProcessExceptionEventDebuggerLaunchCallback(__in PDWORD pContext, __in const PWER_RUNTIME_EXCEPTION_INFORMATION pExceptionInformation, __out BOOL * pbCustomDebuggerNeeded, __out_ecount_opt(*pchSize) PWSTR pwszDebuggerLaunch, __inout PDWORD pchSize, __out BOOL * pbAutoLaunchDebugger) { SUPPORTS_DAC_HOST_ONLY; if ((pContext == NULL) || (pExceptionInformation == NULL) || (pExceptionInformation->dwSize < sizeof(WER_RUNTIME_EXCEPTION_INFORMATION)) || (pbCustomDebuggerNeeded == NULL) || (pwszDebuggerLaunch == NULL) || (pchSize == NULL) || (pbAutoLaunchDebugger == NULL)) { return E_INVALIDARG; } // Starting from CLRv4 managed debugger string and setting are unified with native debuggers. // There is no need to provide custom debugger string for WER. *pbCustomDebuggerNeeded = FALSE; return S_OK; } // DacHandleEnum #include "comcallablewrapper.h" DacHandleWalker::DacHandleWalker() : mDac(0), m_instanceAge(0), mMap(0), mIndex(0), mTypeMask(0), mGenerationFilter(-1), mChunkIndex(0), mCurr(0), mIteratorIndex(0) { SUPPORTS_DAC; } DacHandleWalker::~DacHandleWalker() { SUPPORTS_DAC; HandleChunkHead *curr = mHead.Next; while (curr) { HandleChunkHead *tmp = curr; curr = curr->Next; delete tmp; } } HRESULT DacHandleWalker::Init(ClrDataAccess *dac, UINT types[], UINT typeCount) { SUPPORTS_DAC; if (dac == NULL || types == NULL) return E_POINTER; mDac = dac; m_instanceAge = dac->m_instanceAge; return Init(BuildTypemask(types, typeCount)); } HRESULT DacHandleWalker::Init(ClrDataAccess *dac, UINT types[], UINT typeCount, int gen) { SUPPORTS_DAC; if (gen < 0 || gen > (int)*g_gcDacGlobals->max_gen) return E_INVALIDARG; mGenerationFilter = gen; return Init(dac, types, typeCount); } HRESULT DacHandleWalker::Init(UINT32 typemask) { SUPPORTS_DAC; mMap = g_gcDacGlobals->handle_table_map; mTypeMask = typemask; return S_OK; } UINT32 DacHandleWalker::BuildTypemask(UINT types[], UINT typeCount) { SUPPORTS_DAC; UINT32 mask = 0; for (UINT i = 0; i < typeCount; ++i) { _ASSERTE(types[i] < 32); mask |= (1 << types[i]); } return mask; } HRESULT DacHandleWalker::Next(unsigned int celt, SOSHandleData handles[], unsigned int *pceltFetched) { SUPPORTS_DAC; if (handles == NULL || pceltFetched == NULL) return E_POINTER; SOSHelperEnter(); hr = DoHandleWalk<SOSHandleData, unsigned int, DacHandleWalker::EnumCallbackSOS>(celt, handles, pceltFetched); SOSHelperLeave(); return hr; } bool DacHandleWalker::FetchMoreHandles(HANDLESCANPROC callback) { SUPPORTS_DAC; // The table slots are based on the number of GC heaps in the process. int max_slots = 1; #ifdef FEATURE_SVR_GC if (GCHeapUtilities::IsServerHeap()) max_slots = GCHeapCount(); #endif // FEATURE_SVR_GC // Reset the Count on all cached chunks. We reuse chunks after allocating // them, and the count is the only thing which needs resetting. for (HandleChunkHead *curr = &mHead; curr; curr = curr->Next) curr->Count = 0; DacHandleWalkerParam param(&mHead); do { // Have we advanced past the end of the current bucket? if (mMap && mIndex >= INITIAL_HANDLE_TABLE_ARRAY_SIZE) { mIndex = 0; mMap = mMap->pNext; } // Have we walked the entire handle table map? if (mMap == NULL) { mCurr = NULL; return false; } if (mMap->pBuckets[mIndex] != NULL) { for (int i = 0; i < max_slots; ++i) { DPTR(dac_handle_table) hTable = mMap->pBuckets[mIndex]->pTable[i]; if (hTable) { // Yikes! The handle table callbacks don't produce the handle type or // the AppDomain that we need, and it's too difficult to propogate out // these things (especially the type) without worrying about performance // implications for the GC. Instead we'll have the callback walk each // type individually. There are only a few handle types, and the handle // table has a fast-path for only walking a single type anyway. UINT32 handleType = 0; for (UINT32 mask = mTypeMask; mask; mask >>= 1, handleType++) { if (mask & 1) { dac_handle_table *pTable = hTable; PTR_AppDomain pDomain = SystemDomain::GetAppDomainAtIndex(ADIndex(pTable->uADIndex)); param.AppDomain = TO_CDADDR(pDomain.GetAddr()); param.Type = handleType; // Either enumerate the handles regularly, or walk the handle // table as the GC does if a generation filter was requested. if (mGenerationFilter != -1) HndScanHandlesForGC(hTable, callback, (LPARAM)&param, 0, &handleType, 1, mGenerationFilter, *g_gcDacGlobals->max_gen, 0); else HndEnumHandles(hTable, &handleType, 1, callback, (LPARAM)&param, 0, FALSE); } } } } } // Stop looping as soon as we have found data. We also stop if we have a failed HRESULT during // the callback (this should indicate OOM). mIndex++; } while (mHead.Count == 0 && SUCCEEDED(param.Result)); mCurr = mHead.Next; return true; } HRESULT DacHandleWalker::Skip(unsigned int celt) { return E_NOTIMPL; } HRESULT DacHandleWalker::Reset() { return E_NOTIMPL; } HRESULT DacHandleWalker::GetCount(unsigned int *pcelt) { return E_NOTIMPL; } void DacHandleWalker::GetRefCountedHandleInfo( OBJECTREF oref, unsigned int uType, unsigned int *pRefCount, unsigned int *pJupiterRefCount, BOOL *pIsPegged, BOOL *pIsStrong) { SUPPORTS_DAC; #ifdef FEATURE_COMINTEROP if (uType == HNDTYPE_REFCOUNTED) { // get refcount from the CCW PTR_ComCallWrapper pWrap = ComCallWrapper::GetWrapperForObject(oref); if (pWrap != NULL) { if (pRefCount) *pRefCount = (unsigned int)pWrap->GetRefCount(); if (pJupiterRefCount) *pJupiterRefCount = (unsigned int)pWrap->GetJupiterRefCount(); if (pIsPegged) *pIsPegged = pWrap->IsConsideredPegged(); if (pIsStrong) *pIsStrong = pWrap->IsWrapperActive(); return; } } #endif // FEATURE_COMINTEROP if (pRefCount) *pRefCount = 0; if (pJupiterRefCount) *pJupiterRefCount = 0; if (pIsPegged) *pIsPegged = FALSE; if (pIsStrong) *pIsStrong = FALSE; } void CALLBACK DacHandleWalker::EnumCallbackSOS(PTR_UNCHECKED_OBJECTREF handle, uintptr_t *pExtraInfo, uintptr_t param1, uintptr_t param2) { SUPPORTS_DAC; DacHandleWalkerParam *param = (DacHandleWalkerParam *)param1; HandleChunkHead *curr = param->Curr; // If we failed on a previous call (OOM) don't keep trying to allocate, it's not going to work. if (FAILED(param->Result)) return; // We've moved past the size of the current chunk. We'll allocate a new chunk // and stuff the handles there. These are cleaned up by the destructor if (curr->Count >= (curr->Size/sizeof(SOSHandleData))) { if (curr->Next == NULL) { HandleChunk *next = new (nothrow) HandleChunk; if (next != NULL) { curr->Next = next; } else { param->Result = E_OUTOFMEMORY; return; } } curr = param->Curr = param->Curr->Next; } // Fill the current handle. SOSHandleData *dataArray = (SOSHandleData*)curr->pData; SOSHandleData &data = dataArray[curr->Count++]; data.Handle = TO_CDADDR(handle.GetAddr()); data.Type = param->Type; if (param->Type == HNDTYPE_DEPENDENT) data.Secondary = GetDependentHandleSecondary(handle.GetAddr()).GetAddr(); #ifdef FEATURE_COMINTEROP else if (param->Type == HNDTYPE_WEAK_WINRT) data.Secondary = HndGetHandleExtraInfo(handle.GetAddr()); #endif // FEATURE_COMINTEROP else data.Secondary = 0; data.AppDomain = param->AppDomain; GetRefCountedHandleInfo((OBJECTREF)*handle, param->Type, &data.RefCount, &data.JupiterRefCount, &data.IsPegged, &data.StrongReference); data.StrongReference |= (BOOL)IsAlwaysStrongReference(param->Type); } DacStackReferenceWalker::DacStackReferenceWalker(ClrDataAccess *dac, DWORD osThreadID) : mDac(dac), m_instanceAge(dac ? dac->m_instanceAge : 0), mThread(0), mErrors(0), mEnumerated(false), mChunkIndex(0), mCurr(0), mIteratorIndex(0) { Thread *curr = NULL; for (curr = ThreadStore::GetThreadList(curr); curr; curr = ThreadStore::GetThreadList(curr)) { if (curr->GetOSThreadId() == osThreadID) { mThread = curr; break; } } } DacStackReferenceWalker::~DacStackReferenceWalker() { StackRefChunkHead *curr = mHead.next; while (curr) { StackRefChunkHead *tmp = curr; curr = curr->next; delete tmp; } } HRESULT DacStackReferenceWalker::Init() { if (!mThread) return E_INVALIDARG; return mHeap.Init(); } HRESULT STDMETHODCALLTYPE DacStackReferenceWalker::Skip(unsigned int count) { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE DacStackReferenceWalker::Reset() { return E_NOTIMPL; } HRESULT DacStackReferenceWalker::GetCount(unsigned int *pCount) { if (!pCount) return E_POINTER; SOSHelperEnter(); if (!mEnumerated) { // Fill out our data structures. WalkStack<unsigned int, SOSStackRefData>(0, NULL, DacStackReferenceWalker::GCReportCallbackSOS, DacStackReferenceWalker::GCEnumCallbackSOS); } unsigned int count = 0; for(StackRefChunkHead *curr = &mHead; curr; curr = curr->next) count += curr->count; *pCount = count; SOSHelperLeave(); return hr; } HRESULT DacStackReferenceWalker::Next(unsigned int count, SOSStackRefData stackRefs[], unsigned int *pFetched) { if (stackRefs == NULL || pFetched == NULL) return E_POINTER; SOSHelperEnter(); hr = DoStackWalk<unsigned int, SOSStackRefData, DacStackReferenceWalker::GCReportCallbackSOS, DacStackReferenceWalker::GCEnumCallbackSOS> (count, stackRefs, pFetched); SOSHelperLeave(); return hr; } HRESULT DacStackReferenceWalker::EnumerateErrors(ISOSStackRefErrorEnum **ppEnum) { if (!ppEnum) return E_POINTER; SOSHelperEnter(); if (mThread) { // Fill out our data structures. WalkStack<unsigned int, SOSStackRefData>(0, NULL, DacStackReferenceWalker::GCReportCallbackSOS, DacStackReferenceWalker::GCEnumCallbackSOS); } DacStackReferenceErrorEnum *pEnum = new DacStackReferenceErrorEnum(this, mErrors); hr = pEnum->QueryInterface(__uuidof(ISOSStackRefErrorEnum), (void**)ppEnum); SOSHelperLeave(); return hr; } CLRDATA_ADDRESS DacStackReferenceWalker::ReadPointer(TADDR addr) { ULONG32 bytesRead = 0; TADDR result = 0; HRESULT hr = mDac->m_pTarget->ReadVirtual(addr, (BYTE*)&result, sizeof(TADDR), &bytesRead); if (FAILED(hr) || (bytesRead != sizeof(TADDR))) return (CLRDATA_ADDRESS)~0; return TO_CDADDR(result); } void DacStackReferenceWalker::GCEnumCallbackSOS(LPVOID hCallback, OBJECTREF *pObject, uint32_t flags, DacSlotLocation loc) { GCCONTEXT *gcctx = (GCCONTEXT *)hCallback; DacScanContext *dsc = (DacScanContext*)gcctx->sc; // Yuck. The GcInfoDecoder reports a local pointer for registers (as it's reading out of the REGDISPLAY // in the stack walk), and it reports a TADDR for stack locations. This is architecturally difficulty // to fix, so we are leaving it for now. TADDR addr = 0; TADDR obj = 0; if (loc.targetPtr) { addr = (TADDR)pObject; obj = TO_TADDR(dsc->pWalker->ReadPointer((CORDB_ADDRESS)addr)); } else { obj = pObject->GetAddr(); } if (flags & GC_CALL_INTERIOR) { CORDB_ADDRESS fixed_obj = 0; HRESULT hr = dsc->pWalker->mHeap.ListNearObjects((CORDB_ADDRESS)obj, NULL, &fixed_obj, NULL); // If we failed...oh well, SOS won't mind. We'll just report the interior pointer as is. if (SUCCEEDED(hr)) obj = TO_TADDR(fixed_obj); } SOSStackRefData *data = dsc->pWalker->GetNextObject<SOSStackRefData>(dsc); if (data != NULL) { // Report where the object and where it was found. data->HasRegisterInformation = true; data->Register = loc.reg; data->Offset = loc.regOffset; data->Address = TO_CDADDR(addr); data->Object = TO_CDADDR(obj); data->Flags = flags; // Report the frame that the data came from. data->StackPointer = TO_CDADDR(dsc->sp); if (dsc->pFrame) { data->SourceType = SOS_StackSourceFrame; data->Source = dac_cast<PTR_Frame>(dsc->pFrame).GetAddr(); } else { data->SourceType = SOS_StackSourceIP; data->Source = TO_CDADDR(dsc->pc); } } } void DacStackReferenceWalker::GCReportCallbackSOS(PTR_PTR_Object ppObj, ScanContext *sc, uint32_t flags) { DacScanContext *dsc = (DacScanContext*)sc; CLRDATA_ADDRESS obj = dsc->pWalker->ReadPointer(ppObj.GetAddr()); if (flags & GC_CALL_INTERIOR) { CORDB_ADDRESS fixed_addr = 0; HRESULT hr = dsc->pWalker->mHeap.ListNearObjects((CORDB_ADDRESS)obj, NULL, &fixed_addr, NULL); // If we failed...oh well, SOS won't mind. We'll just report the interior pointer as is. if (SUCCEEDED(hr)) obj = TO_CDADDR(fixed_addr); } SOSStackRefData *data = dsc->pWalker->GetNextObject<SOSStackRefData>(dsc); if (data != NULL) { data->HasRegisterInformation = false; data->Register = 0; data->Offset = 0; data->Address = ppObj.GetAddr(); data->Object = obj; data->Flags = flags; data->StackPointer = TO_CDADDR(dsc->sp); if (dsc->pFrame) { data->SourceType = SOS_StackSourceFrame; data->Source = dac_cast<PTR_Frame>(dsc->pFrame).GetAddr(); } else { data->SourceType = SOS_StackSourceIP; data->Source = TO_CDADDR(dsc->pc); } } } StackWalkAction DacStackReferenceWalker::Callback(CrawlFrame *pCF, VOID *pData) { // // KEEP IN SYNC WITH GcStackCrawlCallBack in vm\gcscan.cpp // GCCONTEXT *gcctx = (GCCONTEXT*)pData; DacScanContext *dsc = (DacScanContext*)gcctx->sc; MethodDesc *pMD = pCF->GetFunction(); gcctx->sc->pMD = pMD; gcctx->sc->pCurrentDomain = pCF->GetAppDomain(); PREGDISPLAY pRD = pCF->GetRegisterSet(); dsc->sp = (TADDR)GetRegdisplaySP(pRD);; dsc->pc = PCODEToPINSTR(GetControlPC(pRD)); ResetPointerHolder<CrawlFrame*> rph(&gcctx->cf); gcctx->cf = pCF; bool fReportGCReferences = true; #if defined(WIN64EXCEPTIONS) // On Win64 and ARM, we may have unwound this crawlFrame and thus, shouldn't report the invalid // references it may contain. // todo. fReportGCReferences = pCF->ShouldCrawlframeReportGCReferences(); #endif // defined(WIN64EXCEPTIONS) Frame *pFrame = ((DacScanContext*)gcctx->sc)->pFrame = pCF->GetFrame(); EX_TRY { if (fReportGCReferences) { if (pCF->IsFrameless()) { ICodeManager * pCM = pCF->GetCodeManager(); _ASSERTE(pCM != NULL); unsigned flags = pCF->GetCodeManagerFlags(); pCM->EnumGcRefs(pCF->GetRegisterSet(), pCF->GetCodeInfo(), flags, dsc->pEnumFunc, pData); } else { pFrame->GcScanRoots(gcctx->f, gcctx->sc); } } } EX_CATCH { SOSStackErrorList *err = new SOSStackErrorList; err->pNext = NULL; if (pFrame) { err->error.SourceType = SOS_StackSourceFrame; err->error.Source = dac_cast<PTR_Frame>(pFrame).GetAddr(); } else { err->error.SourceType = SOS_StackSourceIP; err->error.Source = TO_CDADDR(dsc->pc); } if (dsc->pWalker->mErrors == NULL) { dsc->pWalker->mErrors = err; } else { // This exception case should be non-existent. It only happens when there is either // a clr!Frame on the callstack which is not properly dac-ized, or when a call down // EnumGcRefs causes a data read exception. Since this is so rare, we don't worry // about making this code very efficient. SOSStackErrorList *curr = dsc->pWalker->mErrors; while (curr->pNext) curr = curr->pNext; curr->pNext = err; } } EX_END_CATCH(SwallowAllExceptions) #if 0 // todo // If we're executing a LCG dynamic method then we must promote the associated resolver to ensure it // doesn't get collected and yank the method code out from under us). // Be careful to only promote the reference -- we can also be called to relocate the reference and // that can lead to all sorts of problems since we could be racing for the relocation with the long // weak handle we recover the reference from. Promoting the reference is enough, the handle in the // reference will be relocated properly as long as we keep it alive till the end of the collection // as long as the reference is actually maintained by the long weak handle. if (pMD) { BOOL fMaybeCollectibleMethod = TRUE; // If this is a frameless method then the jitmanager can answer the question of whether // or not this is LCG simply by looking at the heap where the code lives, however there // is also the prestub case where we need to explicitly look at the MD for stuff that isn't // ngen'd if (pCF->IsFrameless() && pMD->IsLCGMethod()) { fMaybeCollectibleMethod = ExecutionManager::IsCollectibleMethod(pCF->GetMethodToken()); } if (fMaybeCollectibleMethod && pMD->IsLCGMethod()) { PTR_Object obj = OBJECTREFToObject(pMD->AsDynamicMethodDesc()->GetLCGMethodResolver()->GetManagedResolver()); dsc->pWalker->ReportObject(obj); } else { if (fMaybeCollectibleMethod) { PTR_Object obj = pMD->GetLoaderAllocator()->GetExposedObject(); dsc->pWalker->ReportObject(obj); } if (fReportGCReferences) { GenericParamContextType paramContextType = GENERIC_PARAM_CONTEXT_NONE; if (pCF->IsFrameless()) { // We need to grab the Context Type here because there are cases where the MethodDesc // is shared, and thus indicates there should be an instantion argument, but the JIT // was still allowed to optimize it away and we won't grab it below because we're not // reporting any references from this frame. paramContextType = pCF->GetCodeManager()->GetParamContextType(pCF->GetRegisterSet(), pCF->GetCodeInfo()); } else { if (pMD->RequiresInstMethodDescArg()) paramContextType = GENERIC_PARAM_CONTEXT_METHODDESC; else if (pMD->RequiresInstMethodTableArg()) paramContextType = GENERIC_PARAM_CONTEXT_METHODTABLE; } // Handle the case where the method is a static shared generic method and we need to keep the type of the generic parameters alive if (paramContextType == GENERIC_PARAM_CONTEXT_METHODDESC) { MethodDesc *pMDReal = dac_cast<PTR_MethodDesc>(pCF->GetParamTypeArg()); _ASSERTE((pMDReal != NULL) || !pCF->IsFrameless()); if (pMDReal != NULL) { PTR_Object obj = pMDReal->GetLoaderAllocator()->GetExposedObject(); dsc->pWalker->ReportObject(obj); } } else if (paramContextType == GENERIC_PARAM_CONTEXT_METHODTABLE) { MethodTable *pMTReal = dac_cast<PTR_MethodTable>(pCF->GetParamTypeArg()); _ASSERTE((pMTReal != NULL) || !pCF->IsFrameless()); if (pMTReal != NULL) { PTR_Object obj = pMTReal->GetLoaderAllocator()->GetExposedObject(); dsc->pWalker->ReportObject(obj); } } } } } #endif return SWA_CONTINUE; } DacStackReferenceErrorEnum::DacStackReferenceErrorEnum(DacStackReferenceWalker *pEnum, SOSStackErrorList *pErrors) : mEnum(pEnum), mHead(pErrors), mCurr(pErrors) { _ASSERTE(mEnum); if (mHead != NULL) mEnum->AddRef(); } DacStackReferenceErrorEnum::~DacStackReferenceErrorEnum() { if (mHead) mEnum->Release(); } HRESULT DacStackReferenceErrorEnum::Skip(unsigned int count) { unsigned int i = 0; for (i = 0; i < count && mCurr; ++i) mCurr = mCurr->pNext; return i < count ? S_FALSE : S_OK; } HRESULT DacStackReferenceErrorEnum::Reset() { mCurr = mHead; return S_OK; } HRESULT DacStackReferenceErrorEnum::GetCount(unsigned int *pCount) { SOSStackErrorList *curr = mHead; unsigned int count = 0; while (curr) { curr = curr->pNext; count++; } *pCount = count; return S_OK; } HRESULT DacStackReferenceErrorEnum::Next(unsigned int count, SOSStackRefError ref[], unsigned int *pFetched) { if (pFetched == NULL || ref == NULL) return E_POINTER; unsigned int i; for (i = 0; i < count && mCurr; ++i, mCurr = mCurr->pNext) ref[i] = mCurr->error; *pFetched = i; return i < count ? S_FALSE : S_OK; }
#include "Mouse.hpp" #include <GLFW/glfw3.h> #include "Bitmaps/Bitmap.hpp" #include "Maths/Maths.hpp" namespace acid { void CallbackMouseButton(GLFWwindow *window, int32_t button, int32_t action, int32_t mods) { Mouse::Get()->m_onButton(static_cast<MouseButton>(button), static_cast<InputAction>(action), MakeBitMask<InputMod>(mods)); } void CallbackCursorPos(GLFWwindow *window, double xpos, double ypos) { Mouse::Get()->m_position = {xpos, ypos}; Mouse::Get()->m_onPosition(Mouse::Get()->m_position); } void CallbackCursorEnter(GLFWwindow *window, int32_t entered) { Mouse::Get()->m_windowSelected = entered == GLFW_TRUE; Mouse::Get()->m_onEnter(entered == GLFW_TRUE); } void CallbackScroll(GLFWwindow *window, double xoffset, double yoffset) { Mouse::Get()->m_scroll = {yoffset, yoffset}; Mouse::Get()->m_onScroll(Mouse::Get()->m_scroll); } void CallbackDrop(GLFWwindow *window, int32_t count, const char **paths) { std::vector<std::string> files(static_cast<uint32_t>(count)); for (uint32_t i = 0; i < static_cast<uint32_t>(count); i++) { files[i] = paths[i]; } Mouse::Get()->m_onDrop(files); } Mouse::Mouse() { glfwSetMouseButtonCallback(Window::Get()->GetWindow(), CallbackMouseButton); glfwSetCursorPosCallback(Window::Get()->GetWindow(), CallbackCursorPos); glfwSetCursorEnterCallback(Window::Get()->GetWindow(), CallbackCursorEnter); glfwSetScrollCallback(Window::Get()->GetWindow(), CallbackScroll); glfwSetDropCallback(Window::Get()->GetWindow(), CallbackDrop); } Mouse::~Mouse() { glfwDestroyCursor(m_cursor); } void Mouse::Update() { auto delta = Engine::Get()->GetDelta().AsSeconds(); // Updates the position delta. m_positionDelta = delta * (m_lastPosition - m_position); m_lastPosition = m_position; // Updates the scroll delta. m_scrollDelta = delta * (m_lastScroll - m_scroll); m_lastScroll = m_scroll; } void Mouse::SetCursor(const std::filesystem::path &filename, CursorHotspot hotspot) { if (m_currentCursor && m_currentCursor->first == filename && m_currentCursor->second == hotspot) { return; } Bitmap bitmap(filename); if (!bitmap) return; GLFWimage image[1]; image[0].width = bitmap.GetSize().m_x; image[0].height = bitmap.GetSize().m_y; image[0].pixels = bitmap.GetData().get(); glfwDestroyCursor(m_cursor); switch (hotspot) { case CursorHotspot::UpperLeft: m_cursor = glfwCreateCursor(image, 0, 0); break; case CursorHotspot::UpperRight: m_cursor = glfwCreateCursor(image, image->width - 1, 0); break; case CursorHotspot::BottomLeft: m_cursor = glfwCreateCursor(image, 0, image->height - 1); break; case CursorHotspot::BottomRight: m_cursor = glfwCreateCursor(image, image->width - 1, image->height - 1); break; case CursorHotspot::Centered: m_cursor = glfwCreateCursor(image, image->width / 2, image->height / 2); break; } glfwSetCursor(Window::Get()->GetWindow(), m_cursor); m_currentCursor = {filename, hotspot}; m_currentStandard = std::nullopt; } void Mouse::SetCursor(CursorStandard standard) { if (m_currentStandard == standard) { return; } glfwDestroyCursor(m_cursor); m_cursor = glfwCreateStandardCursor(static_cast<int32_t>(standard)); glfwSetCursor(Window::Get()->GetWindow(), m_cursor); m_currentCursor = std::nullopt; m_currentStandard = standard; } std::string Mouse::GetClipboard() const { return glfwGetClipboardString(Window::Get()->GetWindow()); } void Mouse::SetClipboard(const std::string &string) const { glfwSetClipboardString(Window::Get()->GetWindow(), string.c_str()); } InputAction Mouse::GetButton(MouseButton mouseButton) const { auto state = glfwGetMouseButton(Window::Get()->GetWindow(), static_cast<int32_t>(mouseButton)); return static_cast<InputAction>(state); } void Mouse::SetPosition(const Vector2d &position) { m_lastPosition = position; m_position = position; glfwSetCursorPos(Window::Get()->GetWindow(), m_position.m_x, m_position.m_y); } void Mouse::SetScroll(const Vector2d &scroll) { m_lastScroll = scroll; m_scroll = scroll; } void Mouse::SetCursorHidden(bool hidden) { if (m_cursorHidden != hidden) { glfwSetInputMode(Window::Get()->GetWindow(), GLFW_CURSOR, hidden ? GLFW_CURSOR_DISABLED : GLFW_CURSOR_NORMAL); if (!hidden && m_cursorHidden) { SetPosition(m_position); } } m_cursorHidden = hidden; } double Mouse::SmoothScrollWheel(double value, float delta) { if (value != 0.0) { value -= static_cast<double>(delta) * std::copysign(3.0, value); value = Maths::Deadband(0.08, value); return value; } return 0.0; } }
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma (iuriish@yahoo.com), created on 25.11.2017. // #include <system/op_boilerplate.h> #if NOT_EXCLUDED(OP_softmax_cross_entropy_loss) #include <ops/declarable/CustomOperations.h> namespace sd { namespace ops { ////////////////////////////////////////////////////////////////////////// CUSTOM_OP_IMPL(softmax_cross_entropy_loss, 3, 1, false, 1, 1) { auto logits = INPUT_VARIABLE(0); auto weights = INPUT_VARIABLE(1); auto labels = INPUT_VARIABLE(2); auto output = OUTPUT_VARIABLE(0); int reductionMode = INT_ARG(0); // 0 - "none"; 1 - "weighted_sum"; 2 - "weighted_mean"; 3 - "weighted_sum_by_nonzero_weights" double labelsSmoothing = T_ARG(0); // input validation REQUIRE_TRUE(labels->isSameShape(logits), 0, "SOFTMAX_CROSS_ENTROPY_LOSS OP: labels and logits arrays must have the same shapes, but got %s and %s correspondingly !", ShapeUtils::shapeAsString(labels).c_str(), ShapeUtils::shapeAsString(logits).c_str()); // only 4 possible reduction modes exist REQUIRE_TRUE(reductionMode==0 || reductionMode==1 || reductionMode==2 || reductionMode==3, 0, "SOFTMAX_CROSS_ENTROPY_LOSS OP: reduction mode value is not acceptable, possible values are 0, 1, 2, 3, but got %i instead!", reductionMode); // smoothing is possible for rank of logits/labels > 1 REQUIRE_TRUE(labels->rankOf() > 1 || (labels->rankOf() == 1 && labelsSmoothing == 0.), 0, "SOFTMAX_CROSS_ENTROPY_LOSS OP: smoothing is not possible when rank of labels/ logits = 1 !"); if(!output->isScalar()) { // weights array can be single scalar or has the same shape as output, and must be broadcastable to output shape REQUIRE_TRUE(weights->isScalar() || weights->rankOf() == output->rankOf(), 0, "SOFTMAX_CROSS_ENTROPY_LOSS OP: weights array should be scalar or have the same rank as output array, but got %i and %i correspondingly!", weights->rankOf(), output->rankOf()); // check whether broadcast operation is possible for weights array REQUIRE_TRUE(weights->isScalar() || ShapeUtils::areShapesBroadcastable(*weights, *output), 0, "SOFTMAX_CROSS_ENTROPY_LOSS OP: shapes of weights and output arrays should be broadcastable, but got weights = %s and output = %s instead!", ShapeUtils::shapeAsString(weights).c_str(), ShapeUtils::shapeAsString(labels).c_str()); } // If label_smoothing is nonzero, smooth the labels towards 1/num_classes: new_onehot_labels = onehot_labels * (1 - label_smoothing) + label_smoothing / num_classes // num_classes = labels->sizeAt(1) NDArray* cLabels = new NDArray(labels->cast(weights->dataType())); NDArray* newLabels = cLabels; if(labelsSmoothing != 0.) { newLabels = new NDArray(cLabels); newLabels->assign((1.f - labelsSmoothing) * *cLabels + labelsSmoothing / cLabels->sizeAt(1)); } // main formula: result = - sum_i(lables_i * log(softmax_i)) - sum over last dimension // softmax_i = exp(logits_i) / sum_j(exp(logits_j)) // so result = sum_i( lables_i * (log(sum_j(exp(logits_j))) - logits_i) ) // for numerical stability we use shifted logits (one can approve this using simple math): // softmax_i = exp(logits_i - maxLogit) / sum_j(exp(logits_j - maxLogit)) // maxLogit is max among logits_i std::vector<int> dimensions = {-1}; NDArray shiftedLogits = *logits - logits->reduceAlongDimension(reduce::Max, dimensions, true); NDArray logSumExp = shiftedLogits.transform(transform::Exp).reduceAlongDimension(reduce::Sum, dimensions, true).transform(transform::Log); NDArray E = (*newLabels * (logSumExp - shiftedLogits)).reduceAlongDimension(reduce::Sum, dimensions); // perform weights broadcasting/tile to E if it is necessary auto weightsBroad = weights; if(!weights->isScalar() && !weights->isSameShape(&E)) { if(E.rankOf() == 1 && weights->isVector() && weights->rankOf() > 1) weightsBroad = new NDArray(weights->reshape(weights->ordering(), {weights->lengthOf()})); else weightsBroad = new NDArray(weights->tileToShape(E.shapeInfo())); } // multiply E on weights E *= *weightsBroad; switch (reductionMode) { case 0: // 0 - "none", un-reduced weighted losses with the same shape as labels. output->assign(&E); break; case 1: { // 1 - "weighted_sum", output is scalar and equal to sum of all elements of E array E.reduceNumber(reduce::Sum, *output); break; } case 2: { // 2 - "weighted_mean", output is scalar and equal to sum of all elements of E array divided by sum of all elements of weightsBroad array double sum; if (weights->isScalar()) sum = weights->e<double>(0) * E.lengthOf(); else sum = weightsBroad->reduceNumber(reduce::Sum).e<double>(0); if (sum == 0.) *output = 0.; else output->assign(E.reduceNumber(reduce::Sum) / sum); break; } case 3: { // 3 - "weighted_sum_by_nonzero_weights", output is scalar and equal to scalar sum of all elements of E array divided by number of non-zero weights Nd4jLong numOfNonZeroWeights = 0; if(weights->isScalar()) { if(weights->e<double>(0) != 0.) numOfNonZeroWeights = E.lengthOf(); } else { numOfNonZeroWeights = weightsBroad->reduceNumber(reduce::CountNonZero).e<Nd4jLong>(0); } if (numOfNonZeroWeights == 0) *output = 0.; else output->assign(E.reduceNumber(reduce::Sum) / double(numOfNonZeroWeights)); break; } } if(weightsBroad != weights) delete weightsBroad; if(newLabels != cLabels) delete newLabels; delete cLabels; return Status::OK(); } ////////////////////////////////////////////////////////////////////////// DECLARE_TYPES(softmax_cross_entropy_loss) { getOpDescriptor()->setAllowedInputTypes(0, {ALL_FLOATS}) ->setAllowedInputTypes(1, {ALL_FLOATS}) ->setAllowedInputTypes(2, {ALL_FLOATS, ALL_INTS}) ->setAllowedOutputTypes({ALL_FLOATS}); } ////////////////////////////////////////////////////////////////////////// DECLARE_SHAPE_FN(softmax_cross_entropy_loss) { auto logitsShapeInfo = inputShape->at(0); auto weightsShapeInfo = inputShape->at(1); auto labelsShapeInfo = inputShape->at(2); // labels and logits must have the same shapes REQUIRE_TRUE(shape::shapeEquals(logitsShapeInfo, labelsShapeInfo), 0, "SOFTMAX_CROSS_ENTROPY_LOSS OP: labels and logits arrays must have the same shapes, but got %s and %s correspondingly!", ShapeUtils::shapeAsString(labelsShapeInfo).c_str(), ShapeUtils::shapeAsString(logitsShapeInfo).c_str()); DataType outType = DataTypeUtils::pickFloatingType(ArrayOptions::dataType(logitsShapeInfo)); Nd4jLong const* outShapeInfo = nullptr; if(INT_ARG(0) != 0) // in this case output is scalar outShapeInfo = ConstantShapeHelper::getInstance()->scalarShapeInfo(outType); else { // in this case output has the shape as labels and logits minus last dimension std::vector<int> dimensions = {-1}; outShapeInfo = ShapeUtils::evalReduceShapeInfo(shape::order(logitsShapeInfo), dimensions, logitsShapeInfo, false, true, block.getWorkspace()); // weights array can be single scalar or has the same rank as output, and must be broadcastable to output REQUIRE_TRUE(shape::isScalar(weightsShapeInfo) || shape::rank(weightsShapeInfo) == shape::rank(outShapeInfo), 0, "SOFTMAX_CROSS_ENTROPY_LOSS OP: weights array should be scalar or have the same rank as output array, but got %i and %i correspondingly!", shape::rank(weightsShapeInfo), shape::rank(outShapeInfo)); // check whether broadcast operation is possible for weights array REQUIRE_TRUE(shape::isScalar(weightsShapeInfo) || ShapeUtils::areShapesBroadcastable(weightsShapeInfo, outShapeInfo), 0, "SOFTMAX_CROSS_ENTROPY_LOSS OP: shapes of weights and output arrays should be broadcastable, but got weights = %s and output = %s instead!", ShapeUtils::shapeAsString(weightsShapeInfo).c_str(), ShapeUtils::shapeAsString(outShapeInfo).c_str()); } return SHAPELIST(outShapeInfo); } ////////////////////////////////////////////////////////////////////////// CUSTOM_OP_IMPL(softmax_cross_entropy_loss_grad, 3, 3, false, 1, 1) { auto logits = INPUT_VARIABLE(0); auto weights = INPUT_VARIABLE(1); auto labels = INPUT_VARIABLE(2); auto dLdp = OUTPUT_VARIABLE(0); // dL/dlogits auto dLdw = OUTPUT_VARIABLE(1); // dL/dweights auto dLdl = OUTPUT_VARIABLE(2); // dL/dlabels auto labelsSmoothing = T_ARG(0); int reductionMode = INT_ARG(0); // 0 - "none"; 1 - "weighted_sum"; 2 - "weighted_mean"; 3 - "weighted_sum_by_nonzero_weights" // take into account Alex's proposition to treat "none" the same as "weighted_sum" mode when calculating gradients if(reductionMode == 0) reductionMode = 1; std::vector<int> dimensions = {-1}; // input validation REQUIRE_TRUE(labels->isSameShape(logits), 0, "SOFTMAX_CROSS_ENTROPY_LOSS_GRAD OP: labels and logits arrays must have the same shapes, but got %s and %s correspondingly !", ShapeUtils::shapeAsString(labels).c_str(), ShapeUtils::shapeAsString(logits).c_str()); // only 4 possible reduction modes exist REQUIRE_TRUE(reductionMode==0 || reductionMode==1 || reductionMode==2 || reductionMode==3, 0, "SOFTMAX_CROSS_ENTROPY_LOSS_GRAD OP: reduction mode value is not acceptable, possible values are 0, 1, 2, 3, but got %i instead!", reductionMode); auto lossShapeInfo = ShapeUtils::evalReduceShapeInfo(logits->ordering(), dimensions, logits->shapeInfo(), false, false, block.getWorkspace()); // weights array can be single scalar or has the same shape as loss, and must be broadcastable to loss shape REQUIRE_TRUE(weights->isScalar() || weights->rankOf() == shape::rank(lossShapeInfo), 0, "SOFTMAX_CROSS_ENTROPY_LOSS_GRAD OP: weights array should be scalar or have the same rank as loss array, but got %i and %i correspondingly!", weights->rankOf(), shape::rank(lossShapeInfo)); // check whether broadcast operation is possible for weights array REQUIRE_TRUE(weights->isScalar() || ShapeUtils::areShapesBroadcastable(weights->shapeInfo(), lossShapeInfo), 0, "SOFTMAX_CROSS_ENTROPY_LOSS_GRAD OP: shapes of weights and loss arrays should be broadcastable, but got weights = %s and loss = %s instead!", ShapeUtils::shapeAsString(weights).c_str(), ShapeUtils::shapeAsString(lossShapeInfo).c_str()); // smoothing is possible for rank of logits/labels > 1 REQUIRE_TRUE(labels->rankOf() > 1 || (labels->rankOf() == 1 && labelsSmoothing == 0.), 0, "SOFTMAX_CROSS_ENTROPY_LOSS_GRAD OP: smoothing is not possible when rank of labels/ logits = 1 !"); // If label_smoothing is nonzero, smooth the labels towards 1/num_classes: new_onehot_labels = onehot_labels * (1 - label_smoothing) + label_smoothing / num_classes // num_classes = labels->sizeAt(1) NDArray* cLabels = new NDArray(labels->cast(weights->dataType())); NDArray* newLabels = cLabels; if(labelsSmoothing != 0.) { newLabels = new NDArray(labels->shapeInfo(), dLdl->dataType(), false, block.launchContext()); newLabels->assign((1.f - labelsSmoothing) * *cLabels + labelsSmoothing / cLabels->sizeAt(1)); } NDArray softmax = (*logits - logits->reduceAlongDimension(reduce::Max, dimensions, true)).transform(transform::Exp); softmax /= softmax.reduceAlongDimension(reduce::Sum, dimensions, true); // dEdp = softmax * sum_i(lables_i) - labels dLdp->assign(softmax * newLabels->reduceAlongDimension(reduce::Sum, dimensions, true) - *newLabels); // dEdl = -log(softmax) dLdl->assign(-softmax.transform(transform::Log)* (1.f - labelsSmoothing)); NDArray shiftedLogits = *logits - logits->reduceAlongDimension(reduce::Max, dimensions, true); NDArray logSumExp = shiftedLogits.transform(transform::Exp).reduceAlongDimension(reduce::Sum, dimensions, true).transform(transform::Log); NDArray E = (*newLabels * (logSumExp - shiftedLogits)).reduceAlongDimension(reduce::Sum, dimensions); // perform weights broadcasting/tile to E if it is necessary auto weightsBroad = weights; if(!weights->isScalar() && !weights->isSameShape(&E)) weightsBroad = new NDArray(weights->tileToShape(E.shapeInfo())); dimensions = ShapeUtils::evalDimsToExclude(dLdp->rankOf(), dimensions); switch (reductionMode) { case 1: { // 1 - "none" and "weighted_sum", output is scalar and equal to sum of all elements of E array if(weights->isScalar() || weights->lengthOf() == 1) { dLdw->assign(E.reduceNumber(reduce::Sum)); *dLdp *= *weights; *dLdl *= *weights; } else { dLdp->applyBroadcast(sd::broadcast::Multiply, dimensions, *weightsBroad, *dLdp); dLdl->applyBroadcast(sd::broadcast::Multiply, dimensions, *weightsBroad, *dLdl); if(weights != weightsBroad) { std::vector<int> axesToReduceAlong = ShapeUtils::evalBroadcastBackwardAxis(weights->shapeInfo(), weightsBroad->shapeInfo()); E.reduceAlongDimension(reduce::Sum, *dLdw, axesToReduceAlong, true, false, false); } else dLdw->assign(E); } break; } case 2: { // 2 - "weighted_mean", output is scalar and equal to sum of all elements of E array divided by sum of all elements of weightsBroad array NDArray sum; if (weights->isScalar()) sum = (*weights) * E.lengthOf(); else sum = weightsBroad->reduceNumber(reduce::Sum); if (sum.e<double>(0) == 0.) { *dLdp = 0.; *dLdl = 0.; *dLdw = 0.; } else { if(weights->isScalar() || weights->lengthOf() == 1) { NDArray temp = *weights / sum; *dLdp *= temp; *dLdl *= temp; *dLdw = 0.; } else { NDArray temp = *weightsBroad / sum; dLdp->applyBroadcast(sd::broadcast::Multiply, dimensions, temp, *dLdp); dLdl->applyBroadcast(sd::broadcast::Multiply, dimensions, temp, *dLdl); if(weights != weightsBroad) { std::vector<int> axesToReduceAlong = ShapeUtils::evalBroadcastBackwardAxis(weights->shapeInfo(), weightsBroad->shapeInfo()); ((E * sum - (E * *weightsBroad).reduceNumber(reduce::Sum)) / (sum*sum)).reduceAlongDimension(reduce::Sum, *dLdw, axesToReduceAlong, true, false, false); } else dLdw->assign((E * sum - (E * *weightsBroad).reduceNumber(reduce::Sum)) / (sum*sum)); } } break; } case 3: { // 3 - "weighted_sum_by_nonzero_weights", output is scalar and equal to scalar sum of all elements of E array divided by number of non-zero weights Nd4jLong numOfNonZeroWeights = 0; if(weights->isScalar()) { if(weights->e<double>(0) != 0.) numOfNonZeroWeights = E.lengthOf(); } else numOfNonZeroWeights = weightsBroad->reduceNumber(reduce::CountNonZero).e<Nd4jLong>(0); if (numOfNonZeroWeights == 0) { *dLdp = 0.; *dLdl = 0.; *dLdw = 0.; } else { if(weights->isScalar() || weights->lengthOf() == 1) { NDArray temp = *weights / numOfNonZeroWeights; *dLdp *= temp; *dLdl *= temp; dLdw->assign(E.reduceNumber(reduce::Sum) / numOfNonZeroWeights); } else { NDArray temp = *weightsBroad / numOfNonZeroWeights; dLdp->applyBroadcast(sd::broadcast::Multiply, dimensions, temp, *dLdp); dLdl->applyBroadcast(sd::broadcast::Multiply, dimensions, temp, *dLdl); if(weights != weightsBroad) { std::vector<int> axesToReduceAlong = ShapeUtils::evalBroadcastBackwardAxis(weights->shapeInfo(), weightsBroad->shapeInfo()); E.reduceAlongDimension(reduce::Sum, *dLdw, axesToReduceAlong, true, false, false); *dLdw /= numOfNonZeroWeights; } else dLdw->assign(E / numOfNonZeroWeights); } } break; } } if(weightsBroad != weights) delete weightsBroad; if(newLabels != cLabels) delete newLabels; delete cLabels; return Status::OK(); } ////////////////////////////////////////////////////////////////////////// DECLARE_TYPES(softmax_cross_entropy_loss_grad) { getOpDescriptor()->setAllowedInputTypes(0, {ALL_FLOATS}) ->setAllowedInputTypes(1, {ALL_FLOATS}) ->setAllowedInputTypes(2, {ALL_FLOATS, ALL_INTS}) ->setAllowedInputTypes(3, {ALL_FLOATS}) ->setAllowedInputTypes(4, {ALL_FLOATS}) ->setAllowedInputTypes(5, {ALL_FLOATS}) ->setAllowedOutputTypes({ALL_FLOATS}); } ////////////////////////////////////////////////////////////////////////// DECLARE_SHAPE_FN(softmax_cross_entropy_loss_grad) { auto logitsShapeInfo = inputShape->at(0); auto weightsShapeInfo = inputShape->at(1); auto labelsShapeInfo = inputShape->at(2); std::vector<int> dimensions = {-1}; // labels and logits must have the same shapes REQUIRE_TRUE(shape::shapeEquals(logitsShapeInfo, labelsShapeInfo), 0, "SOFTMAX_CROSS_ENTROPY_LOSS_GRAD OP: labels and logits arrays must have the same shapes, but got %s and %s correspondingly!", ShapeUtils::shapeAsString(labelsShapeInfo).c_str(), ShapeUtils::shapeAsString(logitsShapeInfo).c_str()); auto lossShapeInfo = ShapeUtils::evalReduceShapeInfo(shape::order(logitsShapeInfo), dimensions, logitsShapeInfo, false, false, block.getWorkspace()); // weights array can be single scalar or has the same rank as loss, and must be broadcastable to loss REQUIRE_TRUE(shape::isScalar(weightsShapeInfo) || shape::rank(weightsShapeInfo) == shape::rank(lossShapeInfo), 0, "SOFTMAX_CROSS_ENTROPY_LOSS_GRAD OP: weights array should be scalar or have the same rank as loss array, but got %i and %i correspondingly!", shape::rank(weightsShapeInfo), shape::rank(lossShapeInfo)); // check whether broadcast operation is possible for weights array REQUIRE_TRUE(shape::isScalar(weightsShapeInfo) || ShapeUtils::areShapesBroadcastable(weightsShapeInfo, lossShapeInfo), 0, "SOFTMAX_CROSS_ENTROPY_LOSS_GRAD OP: shapes of weights and loss arrays should be broadcastable, but got weights = %s and loss = %s instead!", ShapeUtils::shapeAsString(weightsShapeInfo).c_str(), ShapeUtils::shapeAsString(lossShapeInfo).c_str()); auto outType = DataTypeUtils::pickFloatingType(ArrayOptions::dataType(logitsShapeInfo)); auto dLdpShapeInfo = ConstantShapeHelper::getInstance()->createShapeInfo(ShapeDescriptor(outType, shape::order(logitsShapeInfo), shape::shapeOf(logitsShapeInfo), shape::rank(logitsShapeInfo))); auto dLdwShapeInfo = ConstantShapeHelper::getInstance()->createShapeInfo(ShapeDescriptor(outType, shape::order(weightsShapeInfo), shape::shapeOf(weightsShapeInfo), shape::rank(weightsShapeInfo))); auto dLdlShapeInfo = ConstantShapeHelper::getInstance()->createShapeInfo(ShapeDescriptor(outType, shape::order(labelsShapeInfo), shape::shapeOf(labelsShapeInfo), shape::rank(labelsShapeInfo))); return SHAPELIST(dLdpShapeInfo, dLdwShapeInfo, dLdlShapeInfo); } } } #endif
#include <map> #include <string> #include <boost/test/unit_test.hpp> #include "json/json_spirit_writer_template.h" #include "main.h" #include "wallet.h" using namespace std; using namespace json_spirit; // In script_tests.cpp extern Array read_json(const std::string& filename); extern CScript ParseScript(string s); BOOST_AUTO_TEST_SUITE(transaction_tests) BOOST_AUTO_TEST_CASE(tx_valid) { // Read tests from test/data/tx_valid.json // Format is an array of arrays // Inner arrays are either [ "comment" ] // or [[[prevout hash, prevout index, prevout scriptPubKey], [input 2], ...],"], serializedTransaction, enforceP2SH // ... where all scripts are stringified scripts. Array tests = read_json("tx_valid.json"); BOOST_FOREACH(Value& tv, tests) { Array test = tv.get_array(); string strTest = write_string(tv, false); if (test[0].type() == array_type) { if (test.size() != 3 || test[1].type() != str_type || test[2].type() != bool_type) { BOOST_ERROR("Bad test: " << strTest); continue; } map<COutPoint, CScript> mapprevOutScriptPubKeys; Array inputs = test[0].get_array(); bool fValid = true; BOOST_FOREACH(Value& input, inputs) { if (input.type() != array_type) { fValid = false; break; } Array vinput = input.get_array(); if (vinput.size() != 3) { fValid = false; break; } mapprevOutScriptPubKeys[COutPoint(uint256(vinput[0].get_str()), vinput[1].get_int())] = ParseScript(vinput[2].get_str()); } if (!fValid) { BOOST_ERROR("Bad test: " << strTest); continue; } string transaction = test[1].get_str(); CDataStream stream(ParseHex(transaction), SER_NETWORK, PROTOCOL_VERSION); CTransaction tx; stream >> tx; CValidationState state; BOOST_CHECK_MESSAGE(tx.CheckTransaction(state), strTest); BOOST_CHECK(state.IsValid()); for (unsigned int i = 0; i < tx.vin.size(); i++) { if (!mapprevOutScriptPubKeys.count(tx.vin[i].prevout)) { BOOST_ERROR("Bad test: " << strTest); break; } BOOST_CHECK_MESSAGE(VerifyScript(tx.vin[i].scriptSig, mapprevOutScriptPubKeys[tx.vin[i].prevout], tx, i, test[2].get_bool() ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE, 0), strTest); } } } } BOOST_AUTO_TEST_CASE(tx_invalid) { // Read tests from test/data/tx_invalid.json // Format is an array of arrays // Inner arrays are either [ "comment" ] // or [[[prevout hash, prevout index, prevout scriptPubKey], [input 2], ...],"], serializedTransaction, enforceP2SH // ... where all scripts are stringified scripts. Array tests = read_json("tx_invalid.json"); BOOST_FOREACH(Value& tv, tests) { Array test = tv.get_array(); string strTest = write_string(tv, false); if (test[0].type() == array_type) { if (test.size() != 3 || test[1].type() != str_type || test[2].type() != bool_type) { BOOST_ERROR("Bad test: " << strTest); continue; } map<COutPoint, CScript> mapprevOutScriptPubKeys; Array inputs = test[0].get_array(); bool fValid = true; BOOST_FOREACH(Value& input, inputs) { if (input.type() != array_type) { fValid = false; break; } Array vinput = input.get_array(); if (vinput.size() != 3) { fValid = false; break; } mapprevOutScriptPubKeys[COutPoint(uint256(vinput[0].get_str()), vinput[1].get_int())] = ParseScript(vinput[2].get_str()); } if (!fValid) { BOOST_ERROR("Bad test: " << strTest); continue; } string transaction = test[1].get_str(); CDataStream stream(ParseHex(transaction), SER_NETWORK, PROTOCOL_VERSION); CTransaction tx; stream >> tx; CValidationState state; fValid = tx.CheckTransaction(state) && state.IsValid(); for (unsigned int i = 0; i < tx.vin.size() && fValid; i++) { if (!mapprevOutScriptPubKeys.count(tx.vin[i].prevout)) { BOOST_ERROR("Bad test: " << strTest); break; } fValid = VerifyScript(tx.vin[i].scriptSig, mapprevOutScriptPubKeys[tx.vin[i].prevout], tx, i, test[2].get_bool() ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE, 0); } BOOST_CHECK_MESSAGE(!fValid, strTest); } } } BOOST_AUTO_TEST_CASE(basic_transaction_tests) { // Random real transaction (e2769b09e784f32f62ef849763d4f45b98e07ba658647343b915ff832b110436) unsigned char ch[] = {0x01, 0x00, 0x00, 0x00, 0x01, 0x6b, 0xff, 0x7f, 0xcd, 0x4f, 0x85, 0x65, 0xef, 0x40, 0x6d, 0xd5, 0xd6, 0x3d, 0x4f, 0xf9, 0x4f, 0x31, 0x8f, 0xe8, 0x20, 0x27, 0xfd, 0x4d, 0xc4, 0x51, 0xb0, 0x44, 0x74, 0x01, 0x9f, 0x74, 0xb4, 0x00, 0x00, 0x00, 0x00, 0x8c, 0x49, 0x30, 0x46, 0x02, 0x21, 0x00, 0xda, 0x0d, 0xc6, 0xae, 0xce, 0xfe, 0x1e, 0x06, 0xef, 0xdf, 0x05, 0x77, 0x37, 0x57, 0xde, 0xb1, 0x68, 0x82, 0x09, 0x30, 0xe3, 0xb0, 0xd0, 0x3f, 0x46, 0xf5, 0xfc, 0xf1, 0x50, 0xbf, 0x99, 0x0c, 0x02, 0x21, 0x00, 0xd2, 0x5b, 0x5c, 0x87, 0x04, 0x00, 0x76, 0xe4, 0xf2, 0x53, 0xf8, 0x26, 0x2e, 0x76, 0x3e, 0x2d, 0xd5, 0x1e, 0x7f, 0xf0, 0xbe, 0x15, 0x77, 0x27, 0xc4, 0xbc, 0x42, 0x80, 0x7f, 0x17, 0xbd, 0x39, 0x01, 0x41, 0x04, 0xe6, 0xc2, 0x6e, 0xf6, 0x7d, 0xc6, 0x10, 0xd2, 0xcd, 0x19, 0x24, 0x84, 0x78, 0x9a, 0x6c, 0xf9, 0xae, 0xa9, 0x93, 0x0b, 0x94, 0x4b, 0x7e, 0x2d, 0xb5, 0x34, 0x2b, 0x9d, 0x9e, 0x5b, 0x9f, 0xf7, 0x9a, 0xff, 0x9a, 0x2e, 0xe1, 0x97, 0x8d, 0xd7, 0xfd, 0x01, 0xdf, 0xc5, 0x22, 0xee, 0x02, 0x28, 0x3d, 0x3b, 0x06, 0xa9, 0xd0, 0x3a, 0xcf, 0x80, 0x96, 0x96, 0x8d, 0x7d, 0xbb, 0x0f, 0x91, 0x78, 0xff, 0xff, 0xff, 0xff, 0x02, 0x8b, 0xa7, 0x94, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x19, 0x76, 0xa9, 0x14, 0xba, 0xde, 0xec, 0xfd, 0xef, 0x05, 0x07, 0x24, 0x7f, 0xc8, 0xf7, 0x42, 0x41, 0xd7, 0x3b, 0xc0, 0x39, 0x97, 0x2d, 0x7b, 0x88, 0xac, 0x40, 0x94, 0xa8, 0x02, 0x00, 0x00, 0x00, 0x00, 0x19, 0x76, 0xa9, 0x14, 0xc1, 0x09, 0x32, 0x48, 0x3f, 0xec, 0x93, 0xed, 0x51, 0xf5, 0xfe, 0x95, 0xe7, 0x25, 0x59, 0xf2, 0xcc, 0x70, 0x43, 0xf9, 0x88, 0xac, 0x00, 0x00, 0x00, 0x00, 0x00}; vector<unsigned char> vch(ch, ch + sizeof(ch) -1); CDataStream stream(vch, SER_DISK, CLIENT_VERSION); CTransaction tx; stream >> tx; CValidationState state; BOOST_CHECK_MESSAGE(tx.CheckTransaction(state) && state.IsValid(), "Simple deserialized transaction should be valid."); // Check that duplicate txins fail tx.vin.push_back(tx.vin[0]); BOOST_CHECK_MESSAGE(!tx.CheckTransaction(state) || !state.IsValid(), "Transaction with duplicate txins should be invalid."); } // // Helper: create two dummy transactions, each with // two outputs. The first has 11 and 50 CENT outputs // paid to a TX_PUBKEY, the second 21 and 22 CENT outputs // paid to a TX_PUBKEYHASH. // static std::vector<CTransaction> SetupDummyInputs(CBasicKeyStore& keystoreRet, CCoinsView & coinsRet) { std::vector<CTransaction> dummyTransactions; dummyTransactions.resize(2); // Add some keys to the keystore: CKey key[4]; for (int i = 0; i < 4; i++) { key[i].MakeNewKey(i % 2); keystoreRet.AddKey(key[i]); } // Create some dummy input transactions dummyTransactions[0].vout.resize(2); dummyTransactions[0].vout[0].nValue = 11*CENT; dummyTransactions[0].vout[0].scriptPubKey << key[0].GetPubKey() << OP_CHECKSIG; dummyTransactions[0].vout[1].nValue = 50*CENT; dummyTransactions[0].vout[1].scriptPubKey << key[1].GetPubKey() << OP_CHECKSIG; coinsRet.SetCoins(dummyTransactions[0].GetHash(), CCoins(dummyTransactions[0], 0)); dummyTransactions[1].vout.resize(2); dummyTransactions[1].vout[0].nValue = 21*CENT; dummyTransactions[1].vout[0].scriptPubKey.SetDestination(key[2].GetPubKey().GetID()); dummyTransactions[1].vout[1].nValue = 22*CENT; dummyTransactions[1].vout[1].scriptPubKey.SetDestination(key[3].GetPubKey().GetID()); coinsRet.SetCoins(dummyTransactions[1].GetHash(), CCoins(dummyTransactions[1], 0)); return dummyTransactions; } BOOST_AUTO_TEST_CASE(test_Get) { CBasicKeyStore keystore; CCoinsView coinsDummy; CCoinsViewCache coins(coinsDummy); std::vector<CTransaction> dummyTransactions = SetupDummyInputs(keystore, coins); CTransaction t1; t1.vin.resize(3); t1.vin[0].prevout.hash = dummyTransactions[0].GetHash(); t1.vin[0].prevout.n = 1; t1.vin[0].scriptSig << std::vector<unsigned char>(65, 0); t1.vin[1].prevout.hash = dummyTransactions[1].GetHash(); t1.vin[1].prevout.n = 0; t1.vin[1].scriptSig << std::vector<unsigned char>(65, 0) << std::vector<unsigned char>(33, 4); t1.vin[2].prevout.hash = dummyTransactions[1].GetHash(); t1.vin[2].prevout.n = 1; t1.vin[2].scriptSig << std::vector<unsigned char>(65, 0) << std::vector<unsigned char>(33, 4); t1.vout.resize(2); t1.vout[0].nValue = 90*CENT; t1.vout[0].scriptPubKey << OP_1; BOOST_CHECK(t1.AreInputsStandard(coins)); BOOST_CHECK_EQUAL(t1.GetValueIn(coins), (50+21+22)*CENT); // Adding extra junk to the scriptSig should make it non-standard: t1.vin[0].scriptSig << OP_11; BOOST_CHECK(!t1.AreInputsStandard(coins)); // ... as should not having enough: t1.vin[0].scriptSig = CScript(); BOOST_CHECK(!t1.AreInputsStandard(coins)); } BOOST_AUTO_TEST_CASE(test_IsStandard) { CBasicKeyStore keystore; CCoinsView coinsDummy; CCoinsViewCache coins(coinsDummy); std::vector<CTransaction> dummyTransactions = SetupDummyInputs(keystore, coins); CTransaction t; t.vin.resize(1); t.vin[0].prevout.hash = dummyTransactions[0].GetHash(); t.vin[0].prevout.n = 1; t.vin[0].scriptSig << std::vector<unsigned char>(65, 0); t.vout.resize(1); t.vout[0].nValue = 90*CENT; CKey key; key.MakeNewKey(true); t.vout[0].scriptPubKey.SetDestination(key.GetPubKey().GetID()); BOOST_CHECK(t.IsStandard()); t.vout[0].nValue = 5011; // dust // Dubaicoin does not enforce isDust(). Per dust fees are considered sufficient as deterrant. // BOOST_CHECK(!t.IsStandard()); t.vout[0].nValue = 6011; // not dust BOOST_CHECK(t.IsStandard()); t.vout[0].scriptPubKey = CScript() << OP_1; BOOST_CHECK(!t.IsStandard()); } BOOST_AUTO_TEST_SUITE_END()
// Copyright (c) 2018-2019 Intel Corporation // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #include "math.h" #include "mfx_common.h" #if defined (MFX_ENABLE_VPP) #include "mfx_enc_common.h" #include "mfx_session.h" #include "mfx_vpp_hw.h" #include "libmfx_core.h" #include "libmfx_core_factory.h" #include "libmfx_core_interface.h" #include "mfx_vpp_utils.h" #include "mfx_vpp_sw.h" using namespace MfxHwVideoProcessing; class CmDevice; VideoVPPBase* CreateAndInitVPPImpl(mfxVideoParam *par, VideoCORE *core, mfxStatus *mfxSts) { bool bHWInitFailed = false; VideoVPPBase * vpp = 0; if( MFX_PLATFORM_HARDWARE == core->GetPlatformType()) { vpp = new VideoVPP_HW(core, mfxSts); if (*mfxSts != MFX_ERR_NONE) { delete vpp; return 0; } *mfxSts = vpp->Init(par); if (*mfxSts < MFX_ERR_NONE) { delete vpp; return 0; } if(MFX_WRN_INCOMPATIBLE_VIDEO_PARAM == *mfxSts || MFX_WRN_FILTER_SKIPPED == *mfxSts || MFX_WRN_PARTIAL_ACCELERATION == *mfxSts || MFX_ERR_NONE == *mfxSts) { return vpp; } delete vpp; vpp = 0; bHWInitFailed = true; } *mfxSts = MFX_ERR_UNSUPPORTED; return 0; } /* ******************************************************************** */ /* useful macros */ /* ******************************************************************** */ #ifndef VPP_CHECK_STS_SAFE #define VPP_CHECK_STS_SAFE(sts, in, out) \ { \ if (sts != MFX_ERR_NONE && in) \ { \ m_core->DecreaseReference( &(in->Data) ); \ } \ if (sts != MFX_ERR_NONE && out) \ { \ m_core->DecreaseReference( &(out->Data) ); \ } \ MFX_CHECK_STS( sts ); \ } #endif #define VPP_UPDATE_STAT( sts, stat ) \ { \ if( MFX_ERR_NONE == sts ) stat.NumFrame++; \ if( MFX_ERR_NULL_PTR == sts ) stat.NumCachedFrame++;\ } #define VPP_UNLOCK_SURFACE(sts, surface) \ { \ if( MFX_ERR_NONE == sts ) m_core->DecreaseReference( &surface->Data );\ } /* ******************************************************************** */ /* implementation of VPP Pipeline: interface methods */ /* ******************************************************************** */ VideoVPPBase::VideoVPPBase(VideoCORE *core, mfxStatus* sts ) : m_pipelineList() , m_core(core) , m_pHWVPP() { /* opaque processing */ m_bOpaqMode[VPP_IN] = false; m_bOpaqMode[VPP_OUT] = false; memset(&m_requestOpaq[VPP_IN], 0, sizeof(mfxFrameAllocRequest)); memset(&m_requestOpaq[VPP_OUT], 0, sizeof(mfxFrameAllocRequest)); /* common */ m_bDynamicDeinterlace = false; memset(&m_stat, 0, sizeof(mfxVPPStat)); memset(&m_errPrtctState, 0, sizeof(sErrPrtctState)); memset(&m_InitState, 0, sizeof(sErrPrtctState)); VPP_CLEAN; *sts = MFX_ERR_NONE; } // VideoVPPBase::VideoVPPBase(VideoCORE *core, mfxStatus* sts ) : VideoVPP() VideoVPPBase::~VideoVPPBase() { Close(); } // VideoVPPBase::~VideoVPPBase() mfxStatus VideoVPPBase::Close(void) { VPP_CHECK_NOT_INITIALIZED; m_stat.NumCachedFrame = 0; m_stat.NumFrame = 0; m_bDynamicDeinterlace = false; /* opaque processing */ if( m_bOpaqMode[VPP_IN] ) { m_requestOpaq[VPP_IN].NumFrameMin = m_requestOpaq[VPP_IN].NumFrameSuggested = 0; m_requestOpaq[VPP_IN].Type = 0; } if( m_bOpaqMode[VPP_OUT] ) { m_requestOpaq[VPP_OUT].NumFrameMin = m_requestOpaq[VPP_OUT].NumFrameSuggested = 0; m_requestOpaq[VPP_OUT].Type = 0; } m_bOpaqMode[VPP_IN] = m_bOpaqMode[VPP_OUT] = false; //m_numUsedFilters = 0; m_pipelineList.resize(0); VPP_CLEAN; return MFX_ERR_NONE;// in according with spec } // mfxStatus VideoVPPBase::Close(void) mfxStatus VideoVPPBase::Init(mfxVideoParam *par) { mfxStatus sts = MFX_ERR_INVALID_VIDEO_PARAM; mfxStatus sts_wrn = MFX_ERR_NONE; MFX_CHECK_NULL_PTR1( par ); VPP_CHECK_MULTIPLE_INIT; /* step [0]: checking */ sts = CheckIOPattern( par ); MFX_CHECK_STS(sts); if (par->Protected) return MFX_ERR_INVALID_VIDEO_PARAM; sts = CheckFrameInfo( &(par->vpp.In), VPP_IN, m_core->GetHWType()); MFX_CHECK_STS( sts ); sts = CheckFrameInfo( &(par->vpp.Out), VPP_OUT, m_core->GetHWType()); MFX_CHECK_STS(sts); PicStructMode picStructMode = GetPicStructMode(par->vpp.In.PicStruct, par->vpp.Out.PicStruct); m_bDynamicDeinterlace = (DYNAMIC_DI_PICSTRUCT_MODE == picStructMode) ? true : false; sts = CheckExtParam(m_core, par->ExtParam, par->NumExtParam); if( MFX_WRN_INCOMPATIBLE_VIDEO_PARAM == sts || MFX_WRN_FILTER_SKIPPED == sts) { sts_wrn = sts; sts = MFX_ERR_NONE; } MFX_CHECK_STS(sts); /* step [1]: building stage of VPP pipeline */ sts = GetPipelineList( par, m_pipelineList, true); MFX_CHECK_STS(sts); // opaque configuration rules: // (1) in case of OPAQ request VPP should ignore IOPattern and use extBuffer native memory type // (2) VPP_IN abd VPP_OUT should be checked independently of one another sts = CheckOpaqMode( par, m_bOpaqMode ); MFX_CHECK_STS( sts ); if( m_bOpaqMode[VPP_IN] || m_bOpaqMode[VPP_OUT] ) { sts = GetOpaqRequest( par, m_bOpaqMode, m_requestOpaq); MFX_CHECK_STS( sts ); // VPP controls OPAQUE request. // will be combined with SW::CreatePipeline() to prevent multu run of QueryIOSurf() { mfxFrameAllocRequest cntrlRequest[2]; sts = QueryIOSurf(m_core, par, cntrlRequest); VPP_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION); MFX_CHECK_STS( sts ); if( m_bOpaqMode[VPP_IN] && (m_requestOpaq[VPP_IN].NumFrameMin < cntrlRequest[VPP_IN].NumFrameMin || m_requestOpaq[VPP_IN].NumFrameSuggested < cntrlRequest[VPP_IN].NumFrameSuggested) ) { return MFX_ERR_INVALID_VIDEO_PARAM; } if( m_bOpaqMode[VPP_OUT] && (m_requestOpaq[VPP_OUT].NumFrameMin < cntrlRequest[VPP_OUT].NumFrameMin || m_requestOpaq[VPP_OUT].NumFrameSuggested < cntrlRequest[VPP_OUT].NumFrameSuggested) ) { return MFX_ERR_INVALID_VIDEO_PARAM; } } } sts = InternalInit(par); if (MFX_WRN_INCOMPATIBLE_VIDEO_PARAM == sts || MFX_WRN_FILTER_SKIPPED == sts) { sts_wrn = sts; sts = MFX_ERR_NONE; } MFX_CHECK_STS( sts ); /* save init params to prevent core crash */ m_errPrtctState.In = par->vpp.In; m_errPrtctState.Out = par->vpp.Out; m_errPrtctState.IOPattern = par->IOPattern; m_errPrtctState.AsyncDepth = par->AsyncDepth; m_errPrtctState.isCompositionModeEnabled = IsCompositionMode(par); m_InitState = m_errPrtctState; // Save params on init m_stat.NumCachedFrame = 0; m_stat.NumFrame = 0; VPP_INIT_SUCCESSFUL; if( MFX_ERR_NONE != sts_wrn ) { return sts_wrn; } bool bCorrectionEnable = false; sts = CheckPlatformLimitations(m_core, *par, bCorrectionEnable); if (MFX_ERR_UNSUPPORTED == sts) { sts = MFX_ERR_INVALID_VIDEO_PARAM; } return sts; } // mfxStatus VideoVPPBase::Init(mfxVideoParam *par) mfxStatus VideoVPPBase::VppFrameCheck(mfxFrameSurface1 *in, mfxFrameSurface1 *out, mfxExtVppAuxData *, MFX_ENTRY_POINT [], mfxU32 &) { //printf("\nVideoVPPBase::VppFrameCheck()\n"); fflush(stdout); mfxStatus sts = MFX_ERR_NONE; /* [IN] */ // it is end of stream procedure if(NULL == in) if( NULL == out ) { return MFX_ERR_NULL_PTR; } if (out->Data.Locked) { return MFX_ERR_UNDEFINED_BEHAVIOR; } /* *************************************** */ /* check info */ /* *************************************** */ if (in) { sts = CheckInputPicStruct( in->Info.PicStruct ); MFX_CHECK_STS(sts); /* we have special case for composition: * if composition enabled sub stream's picture (WxH) * can be less than primary stream (WxH) * So, do check frame info only if composition is not enabled */ if (m_errPrtctState.isCompositionModeEnabled == false) { sts = CompareFrameInfo( &(in->Info), &(m_errPrtctState.In)); MFX_CHECK_STS(sts); } sts = CheckCropParam( &(in->Info) ); MFX_CHECK_STS( sts ); } sts = CompareFrameInfo( &(out->Info), &(m_errPrtctState.Out)); MFX_CHECK_STS(sts); sts = CheckCropParam( &(out->Info) ); MFX_CHECK_STS( sts ); return sts; } // mfxStatus VideoVPPBase::VppFrameCheck(...) mfxStatus VideoVPPBase::QueryIOSurf(VideoCORE* core, mfxVideoParam *par, mfxFrameAllocRequest *request) { mfxStatus mfxSts; MFX_CHECK_NULL_PTR2(par, request); mfxSts = CheckFrameInfo( &(par->vpp.In), VPP_IN, core->GetHWType()); MFX_CHECK_STS( mfxSts ); mfxSts = CheckFrameInfo( &(par->vpp.Out), VPP_OUT, core->GetHWType()); MFX_CHECK_STS( mfxSts ); // make sense? //mfxSts = CheckExtParam(par->ExtParam, par->NumExtParam); //if( MFX_WRN_INCOMPATIBLE_VIDEO_PARAM == mfxSts ) //{ // mfxSts = MFX_ERR_NONE; // //bWarningIncompatible = true; //} //MFX_CHECK_STS(mfxSts); //PicStructMode m_picStructSupport = GetTypeOfInitPicStructSupport(par->vpp.In.PicStruct, par->vpp.Out.PicStruct); // default settings // VPP_IN request[VPP_IN].Info = par->vpp.In; request[VPP_IN].NumFrameMin = 1; request[VPP_IN].NumFrameSuggested = 1; //VPP_OUT request[VPP_OUT].Info = par->vpp.Out; request[VPP_OUT].NumFrameMin = 1; request[VPP_OUT].NumFrameSuggested = 1; /* correction */ std::vector<mfxU32> pipelineList; //mfxU32 lenList; mfxSts = GetPipelineList( par, pipelineList, true ); MFX_CHECK_STS( mfxSts ); mfxU16 framesCountMin[2]; mfxU16 framesCountSuggested[2]; mfxSts = GetExternalFramesCount(core, par, &pipelineList[0], (mfxU32)pipelineList.size(), framesCountMin, framesCountSuggested); MFX_CHECK_STS( mfxSts ); request[VPP_IN].NumFrameMin = framesCountMin[VPP_IN]; request[VPP_OUT].NumFrameMin = framesCountMin[VPP_OUT]; request[VPP_IN].NumFrameSuggested = framesCountSuggested[VPP_IN]; request[VPP_OUT].NumFrameSuggested = framesCountSuggested[VPP_OUT]; if( MFX_PLATFORM_HARDWARE == core->GetPlatformType() ) { mfxFrameAllocRequest hwRequest[2]; mfxSts = VideoVPPHW::QueryIOSurf(VideoVPPHW::ALL, core, par, hwRequest); bool bSWLib = (mfxSts == MFX_ERR_NONE) ? false : true; if( !bSWLib ) { // suggested request[VPP_IN].NumFrameSuggested = std::max(request[VPP_IN].NumFrameSuggested, hwRequest[VPP_IN].NumFrameSuggested); request[VPP_OUT].NumFrameSuggested = std::max(request[VPP_OUT].NumFrameSuggested, hwRequest[VPP_OUT].NumFrameSuggested); // min request[VPP_IN].NumFrameMin = std::max(request[VPP_IN].NumFrameMin, hwRequest[VPP_IN].NumFrameMin); request[VPP_OUT].NumFrameMin = std::max(request[VPP_OUT].NumFrameMin, hwRequest[VPP_OUT].NumFrameMin); } mfxU16 vppAsyncDepth = (0 == par->AsyncDepth) ? MFX_AUTO_ASYNC_DEPTH_VALUE : par->AsyncDepth; { // suggested request[VPP_IN].NumFrameSuggested *= vppAsyncDepth; request[VPP_OUT].NumFrameSuggested *= vppAsyncDepth; // min request[VPP_IN].NumFrameMin *= vppAsyncDepth; request[VPP_OUT].NumFrameMin *= vppAsyncDepth; } mfxSts = CheckIOPattern_AndSetIOMemTypes(par->IOPattern, &(request[VPP_IN].Type), &(request[VPP_OUT].Type), bSWLib); MFX_CHECK_STS(mfxSts); return (bSWLib)? MFX_ERR_UNSUPPORTED : MFX_ERR_NONE; } return MFX_ERR_NONE; } // mfxStatus VideoVPPBase::QueryIOSurf(mfxVideoParam *par, mfxFrameAllocRequest *request, const mfxU32 adapterNum) mfxStatus VideoVPPBase::GetVPPStat(mfxVPPStat *stat) { MFX_CHECK_NULL_PTR1(stat); VPP_CHECK_NOT_INITIALIZED; if( 0 == m_pipelineList.size() ) return MFX_ERR_NOT_INITIALIZED; stat->NumCachedFrame = m_stat.NumCachedFrame; stat->NumFrame = m_stat.NumFrame; return MFX_ERR_NONE; } // mfxStatus VideoVPPBase::GetVPPStat(mfxVPPStat *stat) mfxStatus VideoVPPBase::GetVideoParam(mfxVideoParam *par) { MFX_CHECK_NULL_PTR1( par ) par->vpp.In = m_errPrtctState.In; par->vpp.Out = m_errPrtctState.Out; par->Protected = 0; par->IOPattern = m_errPrtctState.IOPattern; par->AsyncDepth = m_errPrtctState.AsyncDepth; if( NULL == par->ExtParam || 0 == par->NumExtParam) { return MFX_ERR_NONE; } for( mfxU32 i = 0; i < par->NumExtParam; i++ ) { if( MFX_EXTBUFF_VPP_DOUSE == par->ExtParam[i]->BufferId ) { mfxExtVPPDoUse* pVPPHint = (mfxExtVPPDoUse*)(par->ExtParam[i]); mfxU32 numUsedFilters = 0; for( mfxU32 filterIndex = 0; filterIndex < GetNumUsedFilters(); filterIndex++ ) { switch ( m_pipelineList[filterIndex] ) { case MFX_EXTBUFF_VPP_CSC: case MFX_EXTBUFF_VPP_RESIZE: case MFX_EXTBUFF_VPP_ITC: case MFX_EXTBUFF_VPP_CSC_OUT_RGB4: case MFX_EXTBUFF_VPP_CSC_OUT_A2RGB10: { continue; } case MFX_EXTBUFF_VPP_DENOISE: case MFX_EXTBUFF_VPP_SCENE_ANALYSIS: case MFX_EXTBUFF_VPP_PROCAMP: case MFX_EXTBUFF_VPP_DETAIL: case MFX_EXTBUFF_VPP_FRAME_RATE_CONVERSION: case MFX_EXTBUFF_VPP_IMAGE_STABILIZATION: case MFX_EXTBUFF_VPP_COMPOSITE: case MFX_EXTBUFF_VPP_FIELD_PROCESSING: case MFX_EXTBUFF_VPP_DEINTERLACING: case MFX_EXTBUFF_VPP_DI: case MFX_EXTBUFF_VPP_DI_30i60p: case MFX_EXTBUFF_VPP_VIDEO_SIGNAL_INFO: case MFX_EXTBUFF_VPP_MIRRORING: #if (MFX_VERSION >= 1025) case MFX_EXTBUFF_VPP_COLOR_CONVERSION: #endif #ifdef MFX_ENABLE_MCTF case MFX_EXTBUFF_VPP_MCTF: #endif { if(numUsedFilters + 1 > pVPPHint->NumAlg) return MFX_ERR_UNDEFINED_BEHAVIOR; pVPPHint->AlgList[numUsedFilters] = m_pipelineList[filterIndex]; numUsedFilters++; break; } default: return MFX_ERR_UNDEFINED_BEHAVIOR; } } } } return MFX_ERR_NONE; } // mfxStatus VideoVPPBase::GetVideoParam(mfxVideoParam *par) mfxU32 VideoVPPBase::GetNumUsedFilters() { return ( (mfxU32)m_pipelineList.size() ); } // mfxU32 VideoVPPBase::GetNumUsedFilters() mfxStatus VideoVPPBase::CheckIOPattern( mfxVideoParam* par ) { if (0 == par->IOPattern) // IOPattern is mandatory parameter { return MFX_ERR_INVALID_VIDEO_PARAM; } if (!m_core->IsExternalFrameAllocator() && (par->IOPattern & (MFX_IOPATTERN_OUT_VIDEO_MEMORY | MFX_IOPATTERN_IN_VIDEO_MEMORY))) { return MFX_ERR_INVALID_VIDEO_PARAM; } if ((par->IOPattern & MFX_IOPATTERN_IN_VIDEO_MEMORY) && (par->IOPattern & MFX_IOPATTERN_IN_SYSTEM_MEMORY)) { return MFX_ERR_INVALID_VIDEO_PARAM; } if ((par->IOPattern & MFX_IOPATTERN_OUT_VIDEO_MEMORY) && (par->IOPattern & MFX_IOPATTERN_OUT_SYSTEM_MEMORY)) { return MFX_ERR_INVALID_VIDEO_PARAM; } return MFX_ERR_NONE; } // mfxStatus VideoVPPBase::CheckIOPattern( mfxVideoParam* par ) mfxStatus VideoVPPBase::QueryCaps(VideoCORE * core, MfxHwVideoProcessing::mfxVppCaps& caps) { mfxStatus sts = MFX_ERR_NONE; if(MFX_PLATFORM_HARDWARE == core->GetPlatformType() ) { sts = VideoVPPHW::QueryCaps(core, caps); caps.uFrameRateConversion= 1; // "1" means general FRC is supported. "Interpolation" modes descibed by caps.frcCaps caps.uDeinterlacing = 1; // "1" means general deinterlacing is supported caps.uVideoSignalInfo = 1; // "1" means general VSI is supported if (sts >= MFX_ERR_NONE) return sts; } return MFX_ERR_UNSUPPORTED; } // mfxStatus VideoVPPBase::QueryCaps((VideoCORE * core, MfxHwVideoProcessing::mfxVppCaps& caps) mfxStatus VideoVPPBase::Query(VideoCORE * core, mfxVideoParam *in, mfxVideoParam *out) { mfxStatus mfxSts = MFX_ERR_NONE; MFX_CHECK_NULL_PTR1( out ); if( NULL == in ) { memset(&out->mfx, 0, sizeof(mfxInfoMFX)); memset(&out->vpp, 0, sizeof(mfxInfoVPP)); // We have to set FourCC and FrameRate below to // pass requirements of CheckPlatformLimitation for frame interpolation /* vppIn */ out->vpp.In.FourCC = 1; out->vpp.In.Height = 1; out->vpp.In.Width = 1; out->vpp.In.PicStruct = 1; out->vpp.In.FrameRateExtN = 1; out->vpp.In.FrameRateExtD = 1; /* vppOut */ out->vpp.Out.FourCC = 1; out->vpp.Out.Height = 1; out->vpp.Out.Width = 1; out->vpp.Out.PicStruct = 1; out->vpp.Out.FrameRateExtN = 1; out->vpp.Out.FrameRateExtD = 1; out->IOPattern = 1; /* protected content is not supported. check it */ out->Protected = 1; out->AsyncDepth = 1; if (0 == out->NumExtParam) { out->NumExtParam = 1; } else { // check for IS and AFRC out->vpp.In.FourCC = MFX_FOURCC_NV12; out->vpp.Out.FourCC = MFX_FOURCC_NV12; out->vpp.In.FrameRateExtN = 30; out->vpp.Out.FrameRateExtN = 60; mfxSts = CheckPlatformLimitations(core, *out, true); } return mfxSts; } else { out->vpp = in->vpp; /* [asyncDepth] section */ out->AsyncDepth = in->AsyncDepth; /* [Protected] section */ out->Protected = in->Protected; if( out->Protected ) { out->Protected = 0; mfxSts = MFX_ERR_UNSUPPORTED; } /* [IOPattern] section * Reuse check function from QueryIOsurf * Zero value just skipped. */ mfxU16 inPattern; mfxU16 outPattern; if (0 == in->IOPattern || MFX_ERR_NONE == CheckIOPattern_AndSetIOMemTypes(in->IOPattern, &inPattern, &outPattern, true)) { out->IOPattern = in->IOPattern; } else { mfxSts = MFX_ERR_UNSUPPORTED; out->IOPattern = 0; } /* [ExtParam] section */ if ((in->ExtParam == 0 && out->ExtParam != 0) || (in->ExtParam != 0 && out->ExtParam == 0) || (in->NumExtParam != out->NumExtParam)) { mfxSts = MFX_ERR_UNDEFINED_BEHAVIOR; } if (0 != in->ExtParam) { for (int i = 0; i < in->NumExtParam; i++) { MFX_CHECK_NULL_PTR1( in->ExtParam[i] ); } } if (0 != out->ExtParam) { for (int i = 0; i < out->NumExtParam; i++) { MFX_CHECK_NULL_PTR1(out->ExtParam[i]); } } if( out->NumExtParam > MAX_NUM_OF_VPP_EXT_PARAM) { out->NumExtParam = 0; mfxSts = MFX_ERR_UNSUPPORTED; } if( 0 == out->NumExtParam && in->ExtParam ) { mfxSts = MFX_ERR_UNDEFINED_BEHAVIOR; } if (in->ExtParam && out->ExtParam && (in->NumExtParam == out->NumExtParam) ) { mfxU16 i; // to prevent multiple initialization std::vector<mfxU32> filterList(1); bool bMultipleInitDNU = false; bool bMultipleInitDOUSE = false; for (i = 0; i < out->NumExtParam; i++) { if ((in->ExtParam[i] == 0 && out->ExtParam[i] != 0) || (in->ExtParam[i] != 0 && out->ExtParam[i] == 0)) { //mfxSts = MFX_ERR_UNDEFINED_BEHAVIOR; mfxSts = MFX_ERR_NULL_PTR; continue; // stop working with ExtParam[i] } if (in->ExtParam[i] && out->ExtParam[i]) { if (in->ExtParam[i]->BufferId != out->ExtParam[i]->BufferId) { mfxSts = MFX_ERR_UNDEFINED_BEHAVIOR; continue; // stop working with ExtParam[i] } if (in->ExtParam[i]->BufferSz != out->ExtParam[i]->BufferSz) { mfxSts = MFX_ERR_UNDEFINED_BEHAVIOR; continue; // stop working with ExtParam[i] } // -------------------------------- // analysis of configurable filters // -------------------------------- if( IsConfigurable( in->ExtParam[i]->BufferId ) ) { if( IsFilterFound(&filterList[0], (mfxU32)filterList.size(), in->ExtParam[i]->BufferId) ) { mfxSts = MFX_ERR_UNDEFINED_BEHAVIOR; } else { uint8_t *src = reinterpret_cast<uint8_t *>(in->ExtParam[i]), *dst = reinterpret_cast<uint8_t *>(out->ExtParam[i]); std::copy(src, src + GetConfigSize(in->ExtParam[i]->BufferId), dst); mfxStatus extSts = ExtendedQuery(core, in->ExtParam[i]->BufferId, out->ExtParam[i]); if( MFX_ERR_NONE != extSts ) { mfxSts = extSts; } filterList.push_back(in->ExtParam[i]->BufferId); } continue; // stop working with ExtParam[i] } // -------------------------------- // analysis of DONOTUSE structure // -------------------------------- else if( MFX_EXTBUFF_VPP_DONOTUSE == in->ExtParam[i]->BufferId ) { if( bMultipleInitDNU ) { mfxSts = MFX_ERR_UNDEFINED_BEHAVIOR; continue;// stop working with ExtParam[i] } bMultipleInitDNU = true; // deep analysis //-------------------------------------- { mfxExtVPPDoNotUse* extDoNotUseIn = (mfxExtVPPDoNotUse*)in->ExtParam[i]; mfxExtVPPDoNotUse* extDoNotUseOut = (mfxExtVPPDoNotUse*)out->ExtParam[i]; if(extDoNotUseIn->NumAlg != extDoNotUseOut->NumAlg) { mfxSts = MFX_ERR_UNDEFINED_BEHAVIOR; continue; // stop working with ExtParam[i] } if( 0 == extDoNotUseIn->NumAlg ) { extDoNotUseIn->NumAlg = 0; mfxSts = MFX_ERR_UNDEFINED_BEHAVIOR; continue; // stop working with ExtParam[i] } if(extDoNotUseIn->NumAlg > 4) { extDoNotUseIn->NumAlg = 0; mfxSts = MFX_ERR_UNSUPPORTED; continue; // stop working with ExtParam[i] } if( NULL == extDoNotUseOut->AlgList || NULL == extDoNotUseIn->AlgList ) { mfxSts = MFX_ERR_UNDEFINED_BEHAVIOR; continue; // stop working with ExtParam[i] } for( mfxU32 algIdx = 0; algIdx < extDoNotUseIn->NumAlg; algIdx++ ) { // app must turn off filter once only if( IsFilterFound( extDoNotUseIn->AlgList, algIdx, extDoNotUseIn->AlgList[algIdx] ) ) { mfxSts = MFX_ERR_UNSUPPORTED; continue; // stop working with ExtParam[i] } extDoNotUseOut->AlgList[algIdx] = extDoNotUseIn->AlgList[algIdx]; } extDoNotUseOut->NumAlg = extDoNotUseIn->NumAlg; for( mfxU32 extParIdx = 0; extParIdx < in->NumExtParam; extParIdx++ ) { // configured via extended parameters filter should not be disabled if ( IsFilterFound( extDoNotUseIn->AlgList, extDoNotUseIn->NumAlg, in->ExtParam[extParIdx]->BufferId ) ) { mfxU32 filterIdx = GetFilterIndex( extDoNotUseIn->AlgList, extDoNotUseIn->NumAlg, in->ExtParam[extParIdx]->BufferId ); extDoNotUseIn->AlgList[filterIdx] = 0; mfxSts = MFX_ERR_UNDEFINED_BEHAVIOR; continue; // stop working with ExtParam[i] } } } } // -------------------------------- // analysis of DOUSE structure // -------------------------------- else if( MFX_EXTBUFF_VPP_DOUSE == in->ExtParam[i]->BufferId ) { if( bMultipleInitDOUSE ) { mfxSts = MFX_ERR_UNDEFINED_BEHAVIOR; continue;// stop working with ExtParam[i] } bMultipleInitDOUSE = true; // deep analysis //-------------------------------------- { mfxExtVPPDoUse* extDoUseIn = (mfxExtVPPDoUse*)in->ExtParam[i]; mfxExtVPPDoUse* extDoUseOut = (mfxExtVPPDoUse*)out->ExtParam[i]; if(extDoUseIn->NumAlg != extDoUseOut->NumAlg) { mfxSts = MFX_ERR_UNDEFINED_BEHAVIOR; continue; // stop working with ExtParam[i] } if( 0 == extDoUseIn->NumAlg ) { extDoUseIn->NumAlg = 0; mfxSts = MFX_ERR_UNDEFINED_BEHAVIOR; continue; // stop working with ExtParam[i] } if( NULL == extDoUseOut->AlgList || NULL == extDoUseIn->AlgList ) { mfxSts = MFX_ERR_UNDEFINED_BEHAVIOR; continue; // stop working with ExtParam[i] } for( mfxU32 algIdx = 0; algIdx < extDoUseIn->NumAlg; algIdx++ ) { if( !CheckDoUseCompatibility( extDoUseIn->AlgList[algIdx] ) ) { mfxSts = MFX_ERR_UNSUPPORTED; continue; // stop working with ExtParam[i] } // app must turn off filter once only if( IsFilterFound( extDoUseIn->AlgList, algIdx, extDoUseIn->AlgList[algIdx] ) ) { mfxSts = MFX_ERR_UNSUPPORTED; continue; // stop working with ExtParam[i] } if(MFX_EXTBUFF_VPP_COMPOSITE == extDoUseIn->AlgList[algIdx]) { mfxSts = MFX_ERR_INVALID_VIDEO_PARAM; continue; // stop working with ExtParam[i] } if(MFX_EXTBUFF_VPP_FIELD_PROCESSING == extDoUseIn->AlgList[algIdx]) { /* NOTE: * It's legal to use DOUSE for field processing, * but application must attach appropriate ext buffer to mfxFrameData for each input surface */ //mfxSts = MFX_ERR_UNSUPPORTED; //continue; } extDoUseOut->AlgList[algIdx] = extDoUseIn->AlgList[algIdx]; } extDoUseOut->NumAlg = extDoUseIn->NumAlg; } //-------------------------------------- } else if( MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION == in->ExtParam[i]->BufferId ) { // No specific checks for Opaque ext buffer at the moment. } #ifdef MFX_ENABLE_MCTF else if (MFX_EXTBUFF_VPP_MCTF == in->ExtParam[i]->BufferId) { // no specific checks for MCTF control buffer continue; } #endif else { out->ExtParam[i]->BufferId = 0; mfxSts = MFX_ERR_UNSUPPORTED; }// if( MFX_EXTBUFF_VPP_XXX == in->ExtParam[i]->BufferId ) } // if(in->ExtParam[i] && out->ExtParam[i]) } // for (i = 0; i < out->NumExtParam; i++) } // if (in->ExtParam && out->ExtParam && (in->NumExtParam == out->NumExtParam) ) if ( out->vpp.In.FourCC != MFX_FOURCC_P010 && out->vpp.In.FourCC != MFX_FOURCC_P210 && out->vpp.Out.FourCC == MFX_FOURCC_A2RGB10 ){ if( out->vpp.In.FourCC ) { out->vpp.In.FourCC = 0; mfxSts = MFX_ERR_UNSUPPORTED; } } if ( out->vpp.In.FourCC == MFX_FOURCC_P010 && out->vpp.Out.FourCC != MFX_FOURCC_A2RGB10 && out->vpp.Out.FourCC != MFX_FOURCC_NV12 && out->vpp.Out.FourCC != MFX_FOURCC_P010 && out->vpp.Out.FourCC != MFX_FOURCC_P210 && out->vpp.Out.FourCC != MFX_FOURCC_YUY2 && #if defined(MFX_VA_LINUX) out->vpp.Out.FourCC != MFX_FOURCC_UYVY && #endif out->vpp.Out.FourCC != MFX_FOURCC_AYUV && #if (MFX_VERSION >= 1027) out->vpp.Out.FourCC != MFX_FOURCC_Y210 && out->vpp.Out.FourCC != MFX_FOURCC_Y410 && #endif out->vpp.Out.FourCC != MFX_FOURCC_RGB4){ if( out->vpp.In.FourCC ) { out->vpp.In.FourCC = 0; mfxSts = MFX_ERR_UNSUPPORTED; } } /* [IN VPP] data */ if( out->vpp.In.FourCC != MFX_FOURCC_YV12 && out->vpp.In.FourCC != MFX_FOURCC_NV12 && out->vpp.In.FourCC != MFX_FOURCC_YUY2 && #if defined (MFX_ENABLE_FOURCC_RGB565) out->vpp.In.FourCC != MFX_FOURCC_RGB565 && #endif // MFX_ENABLE_FOURCC_RGB565 out->vpp.In.FourCC != MFX_FOURCC_RGB4 && out->vpp.In.FourCC != MFX_FOURCC_P010 && out->vpp.In.FourCC != MFX_FOURCC_UYVY && out->vpp.In.FourCC != MFX_FOURCC_P210 && #if (MFX_VERSION >= 1027) out->vpp.In.FourCC != MFX_FOURCC_Y210 && out->vpp.In.FourCC != MFX_FOURCC_Y410 && #endif out->vpp.In.FourCC != MFX_FOURCC_AYUV) { if( out->vpp.In.FourCC ) { out->vpp.In.FourCC = 0; mfxSts = MFX_ERR_UNSUPPORTED; } } // Check for invalid cases if (out->vpp.In.PicStruct != MFX_PICSTRUCT_PROGRESSIVE && out->vpp.In.PicStruct != MFX_PICSTRUCT_FIELD_TFF && out->vpp.In.PicStruct != MFX_PICSTRUCT_FIELD_BFF && out->vpp.In.PicStruct != MFX_PICSTRUCT_FIELD_SINGLE && out->vpp.In.PicStruct != MFX_PICSTRUCT_FIELD_TOP && // Field pass-through out->vpp.In.PicStruct != MFX_PICSTRUCT_FIELD_BOTTOM && out->vpp.In.PicStruct != MFX_PICSTRUCT_UNKNOWN) { if( out->vpp.In.PicStruct ) { out->vpp.In.PicStruct = 0; mfxSts = MFX_ERR_UNSUPPORTED; } } if ((0 == (out->vpp.In.FrameRateExtN * out->vpp.In.FrameRateExtD)) && (out->vpp.In.FrameRateExtN + out->vpp.In.FrameRateExtD) ) { out->vpp.In.FrameRateExtN = 0; out->vpp.In.FrameRateExtD = 0; mfxSts = MFX_ERR_UNSUPPORTED; } if( out->vpp.In.Width ) { if ( (out->vpp.In.Width & 15 ) != 0 ) { out->vpp.In.Width = 0; mfxSts = MFX_ERR_UNSUPPORTED; } } if (out->vpp.In.Height) { if ((out->vpp.In.Height & 15) !=0 ) { out->vpp.In.Height = 0; mfxSts = MFX_ERR_UNSUPPORTED; } } /* [OUT VPP] data */ if( out->vpp.Out.FourCC != MFX_FOURCC_YV12 && out->vpp.Out.FourCC != MFX_FOURCC_NV12 && out->vpp.Out.FourCC != MFX_FOURCC_YUY2 && out->vpp.Out.FourCC != MFX_FOURCC_RGB4 && out->vpp.Out.FourCC != MFX_FOURCC_UYVY && #ifdef MFX_ENABLE_RGBP out->vpp.Out.FourCC != MFX_FOURCC_RGBP && #endif out->vpp.Out.FourCC != MFX_FOURCC_P010 && out->vpp.Out.FourCC != MFX_FOURCC_P210 && #if (MFX_VERSION >= 1027) out->vpp.Out.FourCC != MFX_FOURCC_Y210 && out->vpp.Out.FourCC != MFX_FOURCC_Y410 && #endif out->vpp.Out.FourCC != MFX_FOURCC_AYUV && out->vpp.Out.FourCC != MFX_FOURCC_A2RGB10 ) { out->vpp.Out.FourCC = 0; mfxSts = MFX_ERR_UNSUPPORTED; } if (out->vpp.Out.PicStruct != MFX_PICSTRUCT_PROGRESSIVE && out->vpp.Out.PicStruct != MFX_PICSTRUCT_FIELD_TFF && out->vpp.Out.PicStruct != MFX_PICSTRUCT_FIELD_BFF && out->vpp.Out.PicStruct != MFX_PICSTRUCT_FIELD_SINGLE && // Field pass-through out->vpp.Out.PicStruct != MFX_PICSTRUCT_FIELD_TOP && out->vpp.Out.PicStruct != MFX_PICSTRUCT_FIELD_BOTTOM && out->vpp.Out.PicStruct != MFX_PICSTRUCT_UNKNOWN) { if(out->vpp.Out.PicStruct) { out->vpp.Out.PicStruct = 0; mfxSts = MFX_ERR_UNSUPPORTED; } } if ((0 == (out->vpp.Out.FrameRateExtN * out->vpp.Out.FrameRateExtD)) && (out->vpp.Out.FrameRateExtN + out->vpp.Out.FrameRateExtD)) { out->vpp.Out.FrameRateExtN = 0; out->vpp.Out.FrameRateExtD = 0; mfxSts = MFX_ERR_UNSUPPORTED; } if ( out->vpp.Out.Width ) { if ( (out->vpp.Out.Width & 15 ) != 0 ) { out->vpp.Out.Width = 0; mfxSts = MFX_ERR_UNSUPPORTED; } } if( out->vpp.Out.Height ) { if ((out->vpp.Out.Height & 15) !=0) { out->vpp.Out.Height = 0; mfxSts = MFX_ERR_UNSUPPORTED; } } MFX_CHECK_STS(mfxSts); //------------------------------------------------- // FRC, IS and similar enhancement algorithms // special "interface" to signal on application level about support/unsupport ones //------------------------------------------------- bool bCorrectionEnable = true; mfxSts = CheckPlatformLimitations(core, *out, bCorrectionEnable); //------------------------------------------------- mfxStatus hwQuerySts = MFX_ERR_NONE; if(MFX_PLATFORM_HARDWARE == core->GetPlatformType()) { // HW VPP checking hwQuerySts = VideoVPPHW::Query(core, out); // Statuses returned by Init differ in several cases from Query if (MFX_ERR_INVALID_VIDEO_PARAM == hwQuerySts || MFX_ERR_UNSUPPORTED == hwQuerySts) { return MFX_ERR_UNSUPPORTED; } if (MFX_WRN_INCOMPATIBLE_VIDEO_PARAM == hwQuerySts || MFX_WRN_FILTER_SKIPPED == hwQuerySts) { return hwQuerySts; } if(MFX_ERR_NONE == hwQuerySts) { return mfxSts; } else { hwQuerySts = MFX_WRN_PARTIAL_ACCELERATION; } } else { MFX_RETURN(MFX_ERR_UNSUPPORTED); } MFX_CHECK_STS(hwQuerySts); return mfxSts; }//else } // mfxStatus VideoVPPBase::Query(VideoCORE *core, mfxVideoParam *in, mfxVideoParam *out) mfxStatus VideoVPPBase::Reset(mfxVideoParam *par) { mfxStatus sts = MFX_ERR_NONE; MFX_CHECK_NULL_PTR1( par ); VPP_CHECK_NOT_INITIALIZED; sts = CheckFrameInfo( &(par->vpp.In), VPP_IN, m_core->GetHWType()); MFX_CHECK_STS( sts ); sts = CheckFrameInfo( &(par->vpp.Out), VPP_OUT, m_core->GetHWType()); MFX_CHECK_STS(sts); //----------------------------------------------------- // specific check for Reset() if( m_InitState.In.PicStruct != par->vpp.In.PicStruct || m_InitState.Out.PicStruct != par->vpp.Out.PicStruct) { return MFX_ERR_INCOMPATIBLE_VIDEO_PARAM; } /* IOPattern check */ if( m_InitState.IOPattern != par->IOPattern ) { return MFX_ERR_INCOMPATIBLE_VIDEO_PARAM; } if (par->Protected) return MFX_ERR_INVALID_VIDEO_PARAM; /* AsyncDepth */ if( m_InitState.AsyncDepth < par->AsyncDepth ) { return MFX_ERR_INCOMPATIBLE_VIDEO_PARAM; } /* in general, in/out resolution should be <= m_initParam.resolution */ if( (par->vpp.In.Width > m_InitState.In.Width) || (par->vpp.In.Height > m_InitState.In.Height) || (par->vpp.Out.Width > m_InitState.Out.Width) || (par->vpp.Out.Height > m_InitState.Out.Height) ) { return MFX_ERR_INCOMPATIBLE_VIDEO_PARAM; } //----------------------------------------------------- /* Opaque */ if( m_bOpaqMode[VPP_IN] || m_bOpaqMode[VPP_OUT] ) { bool bLocalOpaqMode[2] = {false, false}; sts = CheckOpaqMode( par, bLocalOpaqMode ); MFX_CHECK_STS( sts ); if( bLocalOpaqMode[VPP_IN] && !m_bOpaqMode[VPP_IN] ) { return MFX_ERR_INCOMPATIBLE_VIDEO_PARAM; } if( bLocalOpaqMode[VPP_OUT] && !m_bOpaqMode[VPP_OUT] ) { return MFX_ERR_INCOMPATIBLE_VIDEO_PARAM; } if( bLocalOpaqMode[VPP_IN] || bLocalOpaqMode[VPP_OUT] ) { mfxFrameAllocRequest localOpaqRequest[2]; sts = GetOpaqRequest( par, bLocalOpaqMode, localOpaqRequest); MFX_CHECK_STS( sts ); if( bLocalOpaqMode[VPP_IN] ) { if ( m_requestOpaq[VPP_IN].NumFrameMin != localOpaqRequest[VPP_IN].NumFrameMin || m_requestOpaq[VPP_IN].NumFrameSuggested != localOpaqRequest[VPP_IN].NumFrameSuggested ) { return MFX_ERR_INCOMPATIBLE_VIDEO_PARAM; } } if( bLocalOpaqMode[VPP_OUT] ) { if ( m_requestOpaq[VPP_OUT].NumFrameMin != localOpaqRequest[VPP_OUT].NumFrameMin || m_requestOpaq[VPP_OUT].NumFrameSuggested != localOpaqRequest[VPP_OUT].NumFrameSuggested ) { return MFX_ERR_INCOMPATIBLE_VIDEO_PARAM; } } } }// Opaque bool isCompositionModeInNewParams = IsCompositionMode(par); // Enabling/disabling composition via Reset() doesn't work currently. // This is a workaround to prevent undefined behavior. MFX_CHECK(m_errPrtctState.isCompositionModeEnabled == isCompositionModeInNewParams, MFX_ERR_INCOMPATIBLE_VIDEO_PARAM); /* save init params to prevent core crash */ m_errPrtctState.In = par->vpp.In; m_errPrtctState.Out = par->vpp.Out; m_errPrtctState.IOPattern = par->IOPattern; m_errPrtctState.AsyncDepth = par->AsyncDepth; m_errPrtctState.isCompositionModeEnabled = isCompositionModeInNewParams; return sts; } // mfxStatus VideoVPPBase::Reset(mfxVideoParam *par) mfxTaskThreadingPolicy VideoVPPBase::GetThreadingPolicy(void) { return MFX_TASK_THREADING_INTRA; } // mfxTaskThreadingPolicy VideoVPPBase::GetThreadingPolicy(void) //--------------------------------------------------------- // UTILS //--------------------------------------------------------- mfxStatus VideoVPPBase::CheckPlatformLimitations( VideoCORE* core, mfxVideoParam & param, bool /* bCorrectionEnable */) { std::vector<mfxU32> capsList; MfxHwVideoProcessing::mfxVppCaps caps; QueryCaps(core, caps); ConvertCaps2ListDoUse(caps, capsList); std::vector<mfxU32> pipelineList; bool bExtendedSupport = true; mfxStatus sts = GetPipelineList( &param, pipelineList, bExtendedSupport ); MFX_CHECK_STS(sts); std::vector<mfxU32> supportedList; std::vector<mfxU32> unsupportedList; // compare pipelineList and capsList mfxStatus capsSts = GetCrossList(pipelineList, capsList, supportedList, unsupportedList);// this function could return WRN_FILTER_SKIPPED // check unsupported list if we need to reset ext buffer fields if(!unsupportedList.empty()) { if (IsFilterFound(&unsupportedList[0], (mfxU32)unsupportedList.size(), MFX_EXTBUFF_VPP_IMAGE_STABILIZATION)) { SetMFXISMode(param, 0); } } return (MFX_ERR_NONE != capsSts) ? capsSts : sts; } // mfxStatus CheckPlatformLimitations(...) VideoVPP_HW::VideoVPP_HW(VideoCORE *core, mfxStatus* sts) : VideoVPPBase(core, sts) { } mfxStatus VideoVPP_HW::InternalInit(mfxVideoParam *par) { mfxStatus sts = MFX_ERR_UNDEFINED_BEHAVIOR; CommonCORE* pCommonCore = NULL; bool bIsFilterSkipped = false; bool isFieldProcessing = IsFilterFound(&m_pipelineList[0], (mfxU32)m_pipelineList.size(), MFX_EXTBUFF_VPP_FIELD_PROCESSING) || IsFilterFound(&m_pipelineList[0], (mfxU32)m_pipelineList.size(), MFX_EXTBUFF_VPP_FIELD_WEAVING) || IsFilterFound(&m_pipelineList[0], (mfxU32)m_pipelineList.size(), MFX_EXTBUFF_VPP_FIELD_SPLITTING); pCommonCore = QueryCoreInterface<CommonCORE>(m_core, MFXIVideoCORE_GUID); MFX_CHECK(pCommonCore, MFX_ERR_UNDEFINED_BEHAVIOR); VideoVPPHW::IOMode mode = VideoVPPHW::GetIOMode(par, m_requestOpaq); m_pHWVPP.reset(new VideoVPPHW(mode, m_core)); if (isFieldProcessing) { CmDevice * device = QueryCoreInterface<CmDevice>(m_core, MFXICORECM_GUID); MFX_CHECK(device, MFX_ERR_UNDEFINED_BEHAVIOR); m_pHWVPP.get()->SetCmDevice(device); } sts = m_pHWVPP.get()->Init(par); // OK or ERR only if (MFX_WRN_FILTER_SKIPPED == sts) { bIsFilterSkipped = true; sts = MFX_ERR_NONE; } if (MFX_ERR_NONE != sts) { m_pHWVPP.reset(0); } MFX_CHECK_STS( sts ); return (bIsFilterSkipped) ? MFX_WRN_FILTER_SKIPPED : MFX_ERR_NONE; } mfxStatus VideoVPP_HW::Reset(mfxVideoParam *par) { mfxStatus sts = VideoVPPBase::Reset(par); MFX_CHECK_STS( sts ); sts = m_pHWVPP.get()->Reset(par); MFX_CHECK_STS(sts); bool bCorrectionEnable = false; sts = CheckPlatformLimitations(m_core, *par, bCorrectionEnable); return sts; } mfxStatus VideoVPP_HW::Close(void) { mfxStatus sts = VideoVPPBase::Close(); m_pHWVPP.reset(0); return sts; } // mfxStatus VideoVPPBase::Close(void) mfxStatus VideoVPP_HW::GetVideoParam(mfxVideoParam *par) { mfxStatus sts = VideoVPPBase::GetVideoParam(par); MFX_CHECK_STS( sts ); if (m_pHWVPP.get()) { return m_pHWVPP->GetVideoParams(par); } return sts; } mfxStatus VideoVPP_HW::VppFrameCheck(mfxFrameSurface1 *in, mfxFrameSurface1 *out, mfxExtVppAuxData *aux, MFX_ENTRY_POINT pEntryPoints[], mfxU32 &numEntryPoints) { mfxStatus sts = VideoVPPBase::VppFrameCheck(in, out, aux, pEntryPoints, numEntryPoints); MFX_CHECK_STS( sts ); mfxStatus internalSts = m_pHWVPP.get()->VppFrameCheck( in, out, aux, pEntryPoints, numEntryPoints); bool isInverseTelecinedEnabled = false; const DdiTask* pTask = (DdiTask*)pEntryPoints[0].pParam ; isInverseTelecinedEnabled = IsFilterFound(&m_pipelineList[0], (mfxU32)m_pipelineList.size(), MFX_EXTBUFF_VPP_ITC); if (MFX_ERR_MORE_DATA == internalSts && true == isInverseTelecinedEnabled) { //internalSts = (mfxStatus) MFX_ERR_MORE_DATA_SUBMIT_TASK; } if( out && (MFX_ERR_NONE == internalSts || MFX_ERR_MORE_SURFACE == internalSts) ) { sts = PassThrough(NULL != in ? &(in->Info) : NULL, &(out->Info), pTask->taskIndex); //MFX_CHECK_STS( sts ); } return (MFX_ERR_NONE == internalSts) ? sts : internalSts; } mfxStatus VideoVPP_HW::PassThrough(mfxFrameInfo* In, mfxFrameInfo* Out, mfxU32 taskIndex) { if( In ) // no delay { mfxStatus sts; Out->AspectRatioH = In->AspectRatioH; Out->AspectRatioW = In->AspectRatioW; Out->PicStruct = UpdatePicStruct( In->PicStruct, Out->PicStruct, m_bDynamicDeinterlace, sts, taskIndex ); m_errPrtctState.Deffered.AspectRatioH = Out->AspectRatioH; m_errPrtctState.Deffered.AspectRatioW = Out->AspectRatioW; m_errPrtctState.Deffered.PicStruct = Out->PicStruct; // not "pass through" process. Frame Rates from Init. Out->FrameRateExtN = m_errPrtctState.Out.FrameRateExtN; Out->FrameRateExtD = m_errPrtctState.Out.FrameRateExtD; //return MFX_ERR_NONE; return sts; } else { if ( MFX_PICSTRUCT_UNKNOWN == Out->PicStruct && m_bDynamicDeinterlace ) { // Fix for case when app retrievs cached frames from ADI3->60 and output surf has unknown picstruct Out->PicStruct = MFX_PICSTRUCT_PROGRESSIVE; } // in case of HW_VPP in==NULL means ERROR due to absence of delayed frames and should be processed before. // here we return OK only return MFX_ERR_NONE; } } // mfxStatus VideoVPPBase::PassThrough(mfxFrameInfo* In, mfxFrameInfo* Out) mfxStatus VideoVPP_HW::RunFrameVPP(mfxFrameSurface1* , mfxFrameSurface1* , mfxExtVppAuxData *) { return MFX_ERR_NONE; } #endif // MFX_ENABLE_VPP /* EOF */
#include <gtest/gtest.h> #include <gmock/gmock-matchers.h> #include "meta/metamanager.hpp" #include "game/skirmish/meta/deploymentzonemetaclass.hpp" #include "game/deploymentzone.hpp" using ::testing::NotNull; TEST(DeploymentZoneMetaClass_Deserialize, Then_all_properties_are_initialized) { // Arrange YAML::Node node; node["type"] = qrw::DeploymentZone::typeName.getStringId(); node["playerId"] = 2; node["zone_"][0]["x"] = 11; node["zone_"][0]["y"] = 12; node["zone_"][1]["x"] = 4; node["zone_"][1]["y"] = 5; qrw::MetaManager metaManager; qrw::DeploymentZoneMetaClass deploymentZoneMetaClass(metaManager); // Act qrw::DeploymentZone* deploymentZone = dynamic_cast<qrw::DeploymentZone*>(deploymentZoneMetaClass.deserialize(node)); // Assert ASSERT_THAT(deploymentZone, NotNull()); EXPECT_EQ(deploymentZone->getPlayerId(), 2); EXPECT_EQ(deploymentZone->getSize(), 2); EXPECT_TRUE(deploymentZone->containsSquare({11, 12})); EXPECT_TRUE(deploymentZone->containsSquare({4, 5})); delete deploymentZone; }
/* * Copyright (C) 2008 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * Read-only access to Zip archives, with minimal heap allocation. */ #include "ZipArchive.h" #include <zlib.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <fcntl.h> #include <errno.h> #include <JNIHelp.h> // TEMP_FAILURE_RETRY may or may not be in unistd #include <utils/Compat.h> // For off64_t and lseek64 on Mac #ifndef O_BINARY #define O_BINARY 0 #endif /* * Zip file constants. */ #define kEOCDSignature 0x06054b50 #define kEOCDLen 22 #define kEOCDDiskNumber 4 // number of the current disk #define kEOCDDiskNumberForCD 6 // disk number with the Central Directory #define kEOCDNumEntries 8 // offset to #of entries in file #define kEOCDTotalNumEntries 10 // offset to total #of entries in spanned archives #define kEOCDSize 12 // size of the central directory #define kEOCDFileOffset 16 // offset to central directory #define kEOCDCommentSize 20 // offset to the length of the file comment #define kMaxCommentLen 65535 // longest possible in ushort #define kMaxEOCDSearch (kMaxCommentLen + kEOCDLen) #define kLFHSignature 0x04034b50 #define kLFHLen 30 // excluding variable-len fields #define kLFHGPBFlags 6 // offset to GPB flags #define kLFHNameLen 26 // offset to filename length #define kLFHExtraLen 28 // offset to extra length #define kCDESignature 0x02014b50 #define kCDELen 46 // excluding variable-len fields #define kCDEGPBFlags 8 // offset to GPB flags #define kCDEMethod 10 // offset to compression method #define kCDEModWhen 12 // offset to modification timestamp #define kCDECRC 16 // offset to entry CRC #define kCDECompLen 20 // offset to compressed length #define kCDEUncompLen 24 // offset to uncompressed length #define kCDENameLen 28 // offset to filename length #define kCDEExtraLen 30 // offset to extra length #define kCDECommentLen 32 // offset to comment length #define kCDELocalOffset 42 // offset to local hdr /* General Purpose Bit Flag */ #define kGPFEncryptedFlag (1 << 0) #define kGPFUnsupportedMask (kGPFEncryptedFlag) /* * The values we return for ZipEntryRO use 0 as an invalid value, so we * want to adjust the hash table index by a fixed amount. Using a large * value helps insure that people don't mix & match arguments, e.g. to * findEntryByIndex(). */ #define kZipEntryAdj 10000 /* * Convert a ZipEntry to a hash table index, verifying that it's in a * valid range. */ static int entryToIndex(const ZipArchive* pArchive, const ZipEntry entry) { long ent = ((long) entry) - kZipEntryAdj; if (ent < 0 || ent >= pArchive->mHashTableSize || pArchive->mHashTable[ent].name == NULL) { ALOGW("Zip: invalid ZipEntry %p (%ld)", entry, ent); return -1; } return ent; } /* * Simple string hash function for non-null-terminated strings. */ static unsigned int computeHash(const char* str, int len) { unsigned int hash = 0; while (len--) hash = hash * 31 + *str++; return hash; } /* * Add a new entry to the hash table. */ static void addToHash(ZipArchive* pArchive, const char* str, int strLen, unsigned int hash) { const int hashTableSize = pArchive->mHashTableSize; int ent = hash & (hashTableSize - 1); /* * We over-allocated the table, so we're guaranteed to find an empty slot. */ while (pArchive->mHashTable[ent].name != NULL) ent = (ent + 1) & (hashTableSize-1); pArchive->mHashTable[ent].name = str; pArchive->mHashTable[ent].nameLen = strLen; } /* * Get 2 little-endian bytes. */ static u2 get2LE(unsigned char const* pSrc) { return pSrc[0] | (pSrc[1] << 8); } /* * Get 4 little-endian bytes. */ static u4 get4LE(unsigned char const* pSrc) { u4 result; result = pSrc[0]; result |= pSrc[1] << 8; result |= pSrc[2] << 16; result |= pSrc[3] << 24; return result; } static int mapCentralDirectory0(int fd, const char* debugFileName, ZipArchive* pArchive, off64_t fileLength, size_t readAmount, u1* scanBuf) { /* * Make sure this is a Zip archive. */ if (lseek64(pArchive->mFd, 0, SEEK_SET) != 0) { ALOGW("seek to start failed: %s", strerror(errno)); return false; } ssize_t actual = TEMP_FAILURE_RETRY(read(pArchive->mFd, scanBuf, sizeof(int32_t))); if (actual != (ssize_t) sizeof(int32_t)) { ALOGI("couldn't read first signature from zip archive: %s", strerror(errno)); return false; } unsigned int header = get4LE(scanBuf); if (header != kLFHSignature) { ALOGV("Not a Zip archive (found 0x%08x)\n", header); return false; } /* * Perform the traditional EOCD snipe hunt. * * We're searching for the End of Central Directory magic number, * which appears at the start of the EOCD block. It's followed by * 18 bytes of EOCD stuff and up to 64KB of archive comment. We * need to read the last part of the file into a buffer, dig through * it to find the magic number, parse some values out, and use those * to determine the extent of the CD. * * We start by pulling in the last part of the file. */ off64_t searchStart = fileLength - readAmount; if (lseek64(pArchive->mFd, searchStart, SEEK_SET) != searchStart) { ALOGW("seek %ld failed: %s\n", (long) searchStart, strerror(errno)); return false; } actual = TEMP_FAILURE_RETRY(read(pArchive->mFd, scanBuf, readAmount)); if (actual != (ssize_t) readAmount) { ALOGW("Zip: read %zd, expected %zd. Failed: %s\n", actual, readAmount, strerror(errno)); return false; } /* * Scan backward for the EOCD magic. In an archive without a trailing * comment, we'll find it on the first try. (We may want to consider * doing an initial minimal read; if we don't find it, retry with a * second read as above.) */ int i; for (i = readAmount - kEOCDLen; i >= 0; i--) { if (scanBuf[i] == 0x50 && get4LE(&scanBuf[i]) == kEOCDSignature) { ALOGV("+++ Found EOCD at buf+%d", i); break; } } if (i < 0) { ALOGD("Zip: EOCD not found, %s is not zip", debugFileName); return -1; } off64_t eocdOffset = searchStart + i; const u1* eocdPtr = scanBuf + i; assert(eocdOffset < fileLength); /* * Grab the CD offset and size, and the number of entries in the * archive. Verify that they look reasonable. */ u4 diskNumber = get2LE(eocdPtr + kEOCDDiskNumber); u4 diskWithCentralDir = get2LE(eocdPtr + kEOCDDiskNumberForCD); u4 numEntries = get2LE(eocdPtr + kEOCDNumEntries); u4 totalNumEntries = get2LE(eocdPtr + kEOCDTotalNumEntries); u4 centralDirSize = get4LE(eocdPtr + kEOCDSize); u4 centralDirOffset = get4LE(eocdPtr + kEOCDFileOffset); u4 commentSize = get2LE(eocdPtr + kEOCDCommentSize); // Verify that they look reasonable. if ((long long) centralDirOffset + (long long) centralDirSize > (long long) eocdOffset) { ALOGW("bad offsets (dir %ld, size %u, eocd %ld)\n", (long) centralDirOffset, centralDirSize, (long) eocdOffset); return false; } if (numEntries == 0) { ALOGW("empty archive?\n"); return false; } else if (numEntries != totalNumEntries || diskNumber != 0 || diskWithCentralDir != 0) { ALOGW("spanned archives not supported"); return false; } // Check to see if comment is a sane size if (((size_t) commentSize > (fileLength - kEOCDLen)) || (eocdOffset > (fileLength - kEOCDLen) - commentSize)) { ALOGW("comment size runs off end of file"); return false; } ALOGV("+++ numEntries=%d dirSize=%d dirOffset=%d\n", numEntries, centralDirSize, centralDirOffset); /* * It all looks good. Create a mapping for the CD, and set the fields * in pArchive. */ if (sysMapFileSegmentInShmem(fd, centralDirOffset, centralDirSize, &pArchive->mDirectoryMap) != 0) { ALOGW("Zip: cd map failed"); return -1; } pArchive->mNumEntries = numEntries; pArchive->mDirectoryOffset = centralDirOffset; return 0; } /* * Find the zip Central Directory and memory-map it. * * On success, returns 0 after populating fields from the EOCD area: * mDirectoryOffset * mDirectoryMap * mNumEntries */ static int mapCentralDirectory(int fd, const char* debugFileName, ZipArchive* pArchive) { /* * Get and test file length. */ off64_t fileLength = lseek64(fd, 0, SEEK_END); if (fileLength < kEOCDLen) { ALOGV("Zip: length %ld is too small to be zip", (long) fileLength); return -1; } /* * Perform the traditional EOCD snipe hunt. * * We're searching for the End of Central Directory magic number, * which appears at the start of the EOCD block. It's followed by * 18 bytes of EOCD stuff and up to 64KB of archive comment. We * need to read the last part of the file into a buffer, dig through * it to find the magic number, parse some values out, and use those * to determine the extent of the CD. * * We start by pulling in the last part of the file. */ size_t readAmount = kMaxEOCDSearch; if (fileLength < off_t(readAmount)) readAmount = fileLength; u1* scanBuf = (u1*) malloc(readAmount); if (scanBuf == NULL) { return -1; } int result = mapCentralDirectory0(fd, debugFileName, pArchive, fileLength, readAmount, scanBuf); free(scanBuf); return result; } /* * Parses the Zip archive's Central Directory. Allocates and populates the * hash table. * * Returns 0 on success. */ static int parseZipArchive(ZipArchive* pArchive) { int result = -1; const u1* cdPtr = (const u1*)pArchive->mDirectoryMap.addr; size_t cdLength = pArchive->mDirectoryMap.length; int numEntries = pArchive->mNumEntries; /* * Create hash table. We have a minimum 75% load factor, possibly as * low as 50% after we round off to a power of 2. There must be at * least one unused entry to avoid an infinite loop during creation. */ pArchive->mHashTableSize = dexRoundUpPower2(1 + (numEntries * 4) / 3); pArchive->mHashTable = (ZipHashEntry*) calloc(pArchive->mHashTableSize, sizeof(ZipHashEntry)); /* * Walk through the central directory, adding entries to the hash * table and verifying values. */ const u1* ptr = cdPtr; int i; for (i = 0; i < numEntries; i++) { if (get4LE(ptr) != kCDESignature) { ALOGW("Zip: missed a central dir sig (at %d)", i); goto bail; } if (ptr + kCDELen > cdPtr + cdLength) { ALOGW("Zip: ran off the end (at %d)", i); goto bail; } long localHdrOffset = (long) get4LE(ptr + kCDELocalOffset); if (localHdrOffset >= pArchive->mDirectoryOffset) { ALOGW("Zip: bad LFH offset %ld at entry %d", localHdrOffset, i); goto bail; } unsigned int gpbf = get2LE(ptr + kCDEGPBFlags); if ((gpbf & kGPFUnsupportedMask) != 0) { ALOGW("Invalid General Purpose Bit Flag: %d", gpbf); goto bail; } unsigned int nameLen, extraLen, commentLen, hash; nameLen = get2LE(ptr + kCDENameLen); extraLen = get2LE(ptr + kCDEExtraLen); commentLen = get2LE(ptr + kCDECommentLen); const char *name = (const char *) ptr + kCDELen; /* Check name for NULL characters */ if (memchr(name, 0, nameLen) != NULL) { ALOGW("Filename contains NUL byte"); goto bail; } /* add the CDE filename to the hash table */ hash = computeHash(name, nameLen); addToHash(pArchive, name, nameLen, hash); /* We don't care about the comment or extra data. */ ptr += kCDELen + nameLen + extraLen + commentLen; if ((size_t)(ptr - cdPtr) > cdLength) { ALOGW("Zip: bad CD advance (%d vs %zd) at entry %d", (int) (ptr - cdPtr), cdLength, i); goto bail; } } ALOGV("+++ zip good scan %d entries", numEntries); result = 0; bail: return result; } /* * Open the specified file read-only. We examine the contents and verify * that it appears to be a valid zip file. * * This will be called on non-Zip files, especially during VM startup, so * we don't want to be too noisy about certain types of failure. (Do * we want a "quiet" flag?) * * On success, we fill out the contents of "pArchive" and return 0. On * failure we return the errno value. */ int dexZipOpenArchive(const char* fileName, ZipArchive* pArchive) { int fd, err; ALOGV("Opening as zip '%s' %p", fileName, pArchive); memset(pArchive, 0, sizeof(ZipArchive)); fd = open(fileName, O_RDONLY | O_BINARY, 0); if (fd < 0) { err = errno ? errno : -1; ALOGV("Unable to open '%s': %s", fileName, strerror(err)); return err; } return dexZipPrepArchive(fd, fileName, pArchive); } /* * Prepare to access a ZipArchive through an open file descriptor. * * On success, we fill out the contents of "pArchive" and return 0. */ int dexZipPrepArchive(int fd, const char* debugFileName, ZipArchive* pArchive) { int result = -1; memset(pArchive, 0, sizeof(*pArchive)); pArchive->mFd = fd; if (mapCentralDirectory(fd, debugFileName, pArchive) != 0) goto bail; if (parseZipArchive(pArchive) != 0) { ALOGV("Zip: parsing '%s' failed", debugFileName); goto bail; } /* success */ result = 0; bail: if (result != 0) dexZipCloseArchive(pArchive); return result; } /* * Close a ZipArchive, closing the file and freeing the contents. * * NOTE: the ZipArchive may not have been fully created. */ void dexZipCloseArchive(ZipArchive* pArchive) { ALOGV("Closing archive %p", pArchive); if (pArchive->mFd >= 0) close(pArchive->mFd); sysReleaseShmem(&pArchive->mDirectoryMap); free(pArchive->mHashTable); /* ensure nobody tries to use the ZipArchive after it's closed */ pArchive->mDirectoryOffset = -1; pArchive->mFd = -1; pArchive->mNumEntries = -1; pArchive->mHashTableSize = -1; pArchive->mHashTable = NULL; } /* * Find a matching entry. * * Returns 0 if not found. */ ZipEntry dexZipFindEntry(const ZipArchive* pArchive, const char* entryName) { int nameLen = strlen(entryName); unsigned int hash = computeHash(entryName, nameLen); const int hashTableSize = pArchive->mHashTableSize; int ent = hash & (hashTableSize-1); while (pArchive->mHashTable[ent].name != NULL) { if (pArchive->mHashTable[ent].nameLen == nameLen && memcmp(pArchive->mHashTable[ent].name, entryName, nameLen) == 0) { /* match */ return (ZipEntry)(long)(ent + kZipEntryAdj); } ent = (ent + 1) & (hashTableSize-1); } return NULL; } #if 0 /* * Find the Nth entry. * * This currently involves walking through the sparse hash table, counting * non-empty entries. If we need to speed this up we can either allocate * a parallel lookup table or (perhaps better) provide an iterator interface. */ ZipEntry findEntryByIndex(ZipArchive* pArchive, int idx) { if (idx < 0 || idx >= pArchive->mNumEntries) { ALOGW("Invalid index %d", idx); return NULL; } int ent; for (ent = 0; ent < pArchive->mHashTableSize; ent++) { if (pArchive->mHashTable[ent].name != NULL) { if (idx-- == 0) return (ZipEntry) (ent + kZipEntryAdj); } } return NULL; } #endif /* * Get the useful fields from the zip entry. * * Returns non-zero if the contents of the fields (particularly the data * offset) appear to be bogus. */ int dexZipGetEntryInfo(const ZipArchive* pArchive, ZipEntry entry, int* pMethod, size_t* pUncompLen, size_t* pCompLen, off_t* pOffset, long* pModWhen, long* pCrc32) { int ent = entryToIndex(pArchive, entry); if (ent < 0) return -1; /* * Recover the start of the central directory entry from the filename * pointer. The filename is the first entry past the fixed-size data, * so we can just subtract back from that. */ const unsigned char* basePtr = (const unsigned char*) pArchive->mDirectoryMap.addr; const unsigned char* ptr = (const unsigned char*) pArchive->mHashTable[ent].name; off_t cdOffset = pArchive->mDirectoryOffset; ptr -= kCDELen; int method = get2LE(ptr + kCDEMethod); if (pMethod != NULL) *pMethod = method; if (pModWhen != NULL) *pModWhen = get4LE(ptr + kCDEModWhen); if (pCrc32 != NULL) *pCrc32 = get4LE(ptr + kCDECRC); size_t compLen = get4LE(ptr + kCDECompLen); if (pCompLen != NULL) *pCompLen = compLen; size_t uncompLen = get4LE(ptr + kCDEUncompLen); if (pUncompLen != NULL) *pUncompLen = uncompLen; /* * If requested, determine the offset of the start of the data. All we * have is the offset to the Local File Header, which is variable size, * so we have to read the contents of the struct to figure out where * the actual data starts. * * We also need to make sure that the lengths are not so large that * somebody trying to map the compressed or uncompressed data runs * off the end of the mapped region. * * Note we don't verify compLen/uncompLen if they don't request the * dataOffset, because dataOffset is expensive to determine. However, * if they don't have the file offset, they're not likely to be doing * anything with the contents. */ if (pOffset != NULL) { long localHdrOffset = (long) get4LE(ptr + kCDELocalOffset); if (localHdrOffset + kLFHLen >= cdOffset) { ALOGW("Zip: bad local hdr offset in zip"); return -1; } u1 lfhBuf[kLFHLen]; if (lseek(pArchive->mFd, localHdrOffset, SEEK_SET) != localHdrOffset) { ALOGW("Zip: failed seeking to lfh at offset %ld", localHdrOffset); return -1; } ssize_t actual = TEMP_FAILURE_RETRY(read(pArchive->mFd, lfhBuf, sizeof(lfhBuf))); if (actual != sizeof(lfhBuf)) { ALOGW("Zip: failed reading lfh from offset %ld", localHdrOffset); return -1; } if (get4LE(lfhBuf) != kLFHSignature) { ALOGW("Zip: didn't find signature at start of lfh, offset=%ld", localHdrOffset); return -1; } u4 gpbf = get2LE(lfhBuf + kLFHGPBFlags); if ((gpbf & kGPFUnsupportedMask) != 0) { ALOGW("Invalid General Purpose Bit Flag: %d", gpbf); return -1; } off64_t dataOffset = localHdrOffset + kLFHLen + get2LE(lfhBuf + kLFHNameLen) + get2LE(lfhBuf + kLFHExtraLen); if (dataOffset >= cdOffset) { ALOGW("Zip: bad data offset %ld in zip", (long) dataOffset); return -1; } /* check lengths */ if ((off_t)(dataOffset + compLen) > cdOffset) { ALOGW("Zip: bad compressed length in zip (%ld + %zd > %ld)", (long) dataOffset, compLen, (long) cdOffset); return -1; } if (method == kCompressStored && (off_t)(dataOffset + uncompLen) > cdOffset) { ALOGW("Zip: bad uncompressed length in zip (%ld + %zd > %ld)", (long) dataOffset, uncompLen, (long) cdOffset); return -1; } *pOffset = dataOffset; } return 0; } /* * Uncompress "deflate" data from the archive's file to an open file * descriptor. */ static int inflateToFile(int outFd, int inFd, size_t uncompLen, size_t compLen) { int result = -1; const size_t kBufSize = 32768; unsigned char* readBuf = (unsigned char*) malloc(kBufSize); unsigned char* writeBuf = (unsigned char*) malloc(kBufSize); z_stream zstream; int zerr; if (readBuf == NULL || writeBuf == NULL) goto bail; /* * Initialize the zlib stream struct. */ memset(&zstream, 0, sizeof(zstream)); zstream.zalloc = Z_NULL; zstream.zfree = Z_NULL; zstream.opaque = Z_NULL; zstream.next_in = NULL; zstream.avail_in = 0; zstream.next_out = (Bytef*) writeBuf; zstream.avail_out = kBufSize; zstream.data_type = Z_UNKNOWN; /* * Use the undocumented "negative window bits" feature to tell zlib * that there's no zlib header waiting for it. */ zerr = inflateInit2(&zstream, -MAX_WBITS); if (zerr != Z_OK) { if (zerr == Z_VERSION_ERROR) { ALOGE("Installed zlib is not compatible with linked version (%s)", ZLIB_VERSION); } else { ALOGW("Call to inflateInit2 failed (zerr=%d)", zerr); } goto bail; } /* * Loop while we have more to do. */ do { /* read as much as we can */ if (zstream.avail_in == 0) { size_t getSize = (compLen > kBufSize) ? kBufSize : compLen; ssize_t actual = TEMP_FAILURE_RETRY(read(inFd, readBuf, getSize)); if (actual != (ssize_t) getSize) { ALOGW("Zip: inflate read failed (%d vs %zd)", (int)actual, getSize); goto z_bail; } compLen -= getSize; zstream.next_in = readBuf; zstream.avail_in = getSize; } /* uncompress the data */ zerr = inflate(&zstream, Z_NO_FLUSH); if (zerr != Z_OK && zerr != Z_STREAM_END) { ALOGW("Zip: inflate zerr=%d (nIn=%p aIn=%u nOut=%p aOut=%u)", zerr, zstream.next_in, zstream.avail_in, zstream.next_out, zstream.avail_out); goto z_bail; } /* write when we're full or when we're done */ if (zstream.avail_out == 0 || (zerr == Z_STREAM_END && zstream.avail_out != kBufSize)) { size_t writeSize = zstream.next_out - writeBuf; if (sysWriteFully(outFd, writeBuf, writeSize, "Zip inflate") != 0) goto z_bail; zstream.next_out = writeBuf; zstream.avail_out = kBufSize; } } while (zerr == Z_OK); assert(zerr == Z_STREAM_END); /* other errors should've been caught */ /* paranoia */ if (zstream.total_out != uncompLen) { ALOGW("Zip: size mismatch on inflated file (%ld vs %zd)", zstream.total_out, uncompLen); goto z_bail; } result = 0; z_bail: inflateEnd(&zstream); /* free up any allocated structures */ bail: free(readBuf); free(writeBuf); return result; } /* * Uncompress an entry, in its entirety, to an open file descriptor. * * TODO: this doesn't verify the data's CRC, but probably should (especially * for uncompressed data). */ int dexZipExtractEntryToFile(const ZipArchive* pArchive, const ZipEntry entry, int fd) { int result = -1; int ent = entryToIndex(pArchive, entry); if (ent < 0) { ALOGW("Zip: extract can't find entry %p", entry); goto bail; } int method; size_t uncompLen, compLen; off_t dataOffset; if (dexZipGetEntryInfo(pArchive, entry, &method, &uncompLen, &compLen, &dataOffset, NULL, NULL) != 0) { goto bail; } if (lseek(pArchive->mFd, dataOffset, SEEK_SET) != dataOffset) { ALOGW("Zip: lseek to data at %ld failed", (long) dataOffset); goto bail; } if (method == kCompressStored) { if (sysCopyFileToFile(fd, pArchive->mFd, uncompLen) != 0) goto bail; } else { if (inflateToFile(fd, pArchive->mFd, uncompLen, compLen) != 0) goto bail; } result = 0; bail: return result; }
/**************************************************************************\ * * This file is part of the Coin 3D visualization library. * Copyright (C) by Kongsberg Oil & Gas Technologies. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * ("GPL") version 2 as published by the Free Software Foundation. * See the file LICENSE.GPL at the root directory of this source * distribution for additional information about the GNU GPL. * * For using Coin with software that can not be combined with the GNU * GPL, and for taking advantage of the additional benefits of our * support services, please contact Kongsberg Oil & Gas Technologies * about acquiring a Coin Professional Edition License. * * See http://www.coin3d.org/ for more information. * * Kongsberg Oil & Gas Technologies, Bygdoy Alle 5, 0257 Oslo, NORWAY. * http://www.sim.no/ sales@sim.no coin-support@coin3d.org * \**************************************************************************/ /*! \class SoArray SoArray.h Inventor/nodes/SoArray.h \brief The SoArray class is a group node for setting up regular arrays of subgraphs. \ingroup nodes SoArray presents a convenient way of duplicating a node (typically a shape node) or a complete subgraph in 1 to 3 dimensions. The child node or subgraph can only be translated by regular offsets for all dimensions. For more flexible functionality for duplication of geometry, see the SoMultipleCopy group node, which can do general transformations (including rotation and scaling) for its child. <b>FILE FORMAT/DEFAULTS:</b> \code Array { origin FIRST numElements1 1 numElements2 1 numElements3 1 separation1 1 0 0 separation2 0 1 0 separation3 0 0 1 } \endcode \sa SoMultipleCopy */ #include <Inventor/nodes/SoArray.h> #include <Inventor/nodes/SoSwitch.h> // SO_SWITCH_ALL #include <Inventor/actions/SoCallbackAction.h> #include <Inventor/actions/SoSearchAction.h> #include <Inventor/actions/SoGLRenderAction.h> #include <Inventor/actions/SoGetBoundingBoxAction.h> #include <Inventor/elements/SoBBoxModelMatrixElement.h> #include <Inventor/elements/SoSwitchElement.h> #include <Inventor/misc/SoState.h> #include "nodes/SoSubNodeP.h" /*! \enum SoArray::Origin The possible settings for the SoArray::origin field. */ /*! \var SoSFEnum SoArray::origin Where the origin of the array should be set, ie how the array elements will be distributed from the local origo. Default value is SoArray::FIRST. */ /*! \var SoSFShort SoArray::numElements1 Number of duplicates for each X axis row. Default 1. */ /*! \var SoSFShort SoArray::numElements2 Number of duplicates for each Y axis row. Default 1. */ /*! \var SoSFShort SoArray::numElements3 Number of duplicates for each Z axis row. Default 1. */ /*! \var SoSFVec3f SoArray::separation1 Distance in current units between the center point of each element along the X axis. Default [1.0, 0.0, 0.0]. */ /*! \var SoSFVec3f SoArray::separation2 Distance in current units between the center point of each element along the Y axis. Default [0.0, 1.0, 0.0]. */ /*! \var SoSFVec3f SoArray::separation3 Distance in current units between the center point of each element along the Z axis. Default [0.0, 0.0, 1.0]. */ // ************************************************************************* SO_NODE_SOURCE(SoArray); /*! Constructor. */ SoArray::SoArray(void) { SO_NODE_INTERNAL_CONSTRUCTOR(SoArray); SO_NODE_ADD_FIELD(origin, (SoArray::FIRST)); SO_NODE_ADD_FIELD(numElements1, (1)); SO_NODE_ADD_FIELD(numElements2, (1)); SO_NODE_ADD_FIELD(numElements3, (1)); SO_NODE_ADD_FIELD(separation1, (SbVec3f(1, 0, 0))); SO_NODE_ADD_FIELD(separation2, (SbVec3f(0, 1, 0))); SO_NODE_ADD_FIELD(separation3, (SbVec3f(0, 0, 1))); SO_NODE_DEFINE_ENUM_VALUE(Origin, FIRST); SO_NODE_DEFINE_ENUM_VALUE(Origin, CENTER); SO_NODE_DEFINE_ENUM_VALUE(Origin, LAST); SO_NODE_SET_SF_ENUM_TYPE(origin, Origin); } /*! Destructor. */ SoArray::~SoArray() { } // Doc in superclass. void SoArray::initClass(void) { SO_NODE_INTERNAL_INIT_CLASS(SoArray, SO_FROM_INVENTOR_1); } // Doc in superclass. void SoArray::getBoundingBox(SoGetBoundingBoxAction * action) { #if 0 // OBSOLETED: mortene's old (buggy ?) code (removed 19990423, pederb) // store incoming modelmatrix SbMatrix mat = SoModelMatrixElement::get(action->getState()); // get reference to the box SbXfBox3f & box = action->getXfBoundingBox(); // store current bbox SbXfBox3f incomingbox = box; // accumulation variables SbVec3f acccenter(0.0f, 0.0f, 0.0f); int numCenters = 0; SbXfBox3f totalbox; for (int i=0; i < numElements3.getValue(); i++) { for (int j=0; j < numElements2.getValue(); j++) { for (int k=0; k < numElements1.getValue(); k++) { float multfactor_i = float(i); float multfactor_j = float(j); float multfactor_k = float(k); switch (origin.getValue()) { case SoArray::FIRST: break; case SoArray::CENTER: multfactor_i = -float(numElements3.getValue()-1.0f)/2.0f + float(i); multfactor_j = -float(numElements2.getValue()-1.0f)/2.0f + float(j); multfactor_k = -float(numElements1.getValue()-1.0f)/2.0f + float(k); break; case SoArray::LAST: multfactor_i = -multfactor_i; multfactor_j = -multfactor_j; multfactor_k = -multfactor_k; break; default: assert(0); break; } SbVec3f instance_pos = separation3.getValue() * multfactor_i + separation2.getValue() * multfactor_j + separation1.getValue() * multfactor_k; #if 0 // debug SoDebugError::postInfo("SoArray::getBoundingBox", "instance_pos: <%f, %f, %f>", instance_pos[0], instance_pos[1], instance_pos[2]); #endif // debug SbMatrix mat; mat.setTranslate(instance_pos); action->getState()->push(); SoSwitchElement::set(action->getState(), i * numElements2.getValue() * numElements1.getValue() + j * numElements1.getValue() + k); // make current box empty to calculate bbox of this separator box.makeEmpty(); box.setTransform(SbMatrix::identity()); // set local matrix to identity SoBBoxModelMatrixElement::set(action->getState(), this, mat); // traverse all children, calculate the local bbox inherited::getBoundingBox(action); // If center point is set, accumulate center. if (action->isCenterSet()) { acccenter += action->getCenter(); numCenters++; action->resetCenter(); } // expand box by stored bbox if (!totalbox.isEmpty()) box.extendBy(totalbox); totalbox = box; action->getState()->pop(); } } } // transform the local bbox by stored model matrix if (!box.isEmpty()) box.transform(mat); if (!incomingbox.isEmpty()) box.extendBy(incomingbox); if (numCenters != 0) action->setCenter(acccenter / numCenters, FALSE); #else // "new" code, 19990423, pederb float curri = 0.0f; float currj = 0.0f; float currk = 0.0f; float inci = 1.0f; float incj = 1.0f; float inck = 1.0f; // accumulation variables SbVec3f acccenter(0.0f, 0.0f, 0.0f); int numCenters = 0; switch (origin.getValue()) { case SoArray::FIRST: break; case SoArray::CENTER: curri = -(numElements3.getValue()-1.0f)/2.0f; currj = -(numElements2.getValue()-1.0f)/2.0f; currk = -(numElements1.getValue()-1.0f)/2.0f; break; case SoArray::LAST: inci = -1.0f; incj = -1.0f; inck = -1.0f; break; default: assert(0); break; } float initj = currj; float initk = currk; int N = 0; for (int i=0; i < numElements3.getValue(); i++) { currj = initj; for (int j=0; j < numElements2.getValue(); j++) { currk = initk; for (int k=0; k < numElements1.getValue(); k++) { SbVec3f instance_pos = separation3.getValue() * curri + separation2.getValue() * currj + separation1.getValue() * currk; action->getState()->push(); // translate bbox matrix SoBBoxModelMatrixElement::translateBy(action->getState(), this, instance_pos); SoSwitchElement::set(action->getState(),N++); inherited::getBoundingBox(action); // If center point is set, accumulate center. if (action->isCenterSet()) { acccenter += action->getCenter(); numCenters++; action->resetCenter(); } // pop back to the original bboxmatrix action->getState()->pop(); currk += inck; } currj += incj; } curri += inci; } if (numCenters != 0) action->setCenter(acccenter / float(numCenters), FALSE); #endif // end of new code by pederb } // Doc in superclass. void SoArray::GLRender(SoGLRenderAction * action) { SoArray::doAction(action); } // Doc in superclass. SbBool SoArray::affectsState(void) const { return FALSE; // state is pushed/popped for each traversal } // Doc in superclass. void SoArray::doAction(SoAction *action) { int N = 0; for (int i=0; i < numElements3.getValue(); i++) { for (int j=0; j < numElements2.getValue(); j++) { for (int k=0; k < numElements1.getValue(); k++) { float multfactor_i = float(i); float multfactor_j = float(j); float multfactor_k = float(k); switch (origin.getValue()) { case SoArray::FIRST: break; case SoArray::CENTER: multfactor_i = -float(numElements3.getValue()-1.0f)/2.0f + float(i); multfactor_j = -float(numElements2.getValue()-1.0f)/2.0f + float(j); multfactor_k = -float(numElements1.getValue()-1.0f)/2.0f + float(k); break; case SoArray::LAST: multfactor_i = -multfactor_i; multfactor_j = -multfactor_j; multfactor_k = -multfactor_k; break; default: assert(0); break; } SbVec3f instance_pos = separation3.getValue() * multfactor_i + separation2.getValue() * multfactor_j + separation1.getValue() * multfactor_k; action->getState()->push(); SoSwitchElement::set(action->getState(), N++); SoModelMatrixElement::translateBy(action->getState(), this, instance_pos); inherited::doAction(action); action->getState()->pop(); } } } } // Doc in superclass. void SoArray::callback(SoCallbackAction *action) { SoArray::doAction((SoAction*)action); } // Doc in superclass. void SoArray::pick(SoPickAction *action) { // We came across what we think is a bug in TGS/SGI OIV when // implementing picking for this node and testing against the // original Inventor library. The SoPickedPoint class can return the // object space point, normal and texture coordinates. TGS/SGI OIV // do not consider the translation inside this node before returning // the object space data from SoPickedPoint, since the path in // SoPickedPoint does not say anything about on which copy the pick // occured. // // We solved this simply by extending SoPickedPoint for storing both // world space and object space data. SoArray::doAction((SoAction*)action); } // Doc in superclass. void SoArray::handleEvent(SoHandleEventAction *action) { SoNode::handleEvent(action); inherited::handleEvent(action); } // Doc in superclass void SoArray::audioRender(SoAudioRenderAction * action) { SoArray::doAction((SoAction*)action); } // Doc in superclass. void SoArray::getMatrix(SoGetMatrixAction *action) { // path does not specify which copy to traverse inherited::getMatrix(action); } // Doc in superclass. void SoArray::search(SoSearchAction * action) { SoState * state = action->getState(); state->push(); // set Switch element so that subgraphs depending on this element // will traverse all children (it's set during normal traversal in // doAction()). SoSwitchElement::set(action->getState(), SO_SWITCH_ALL); // just use SoGroup::search() to traverse all children. inherited::search(action); state->pop(); } // Doc in superclass. void SoArray::getPrimitiveCount(SoGetPrimitiveCountAction *action) { SoArray::doAction((SoAction*)action); }
/****************************************** Copyright (c) 2016, Mate Soos Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ***********************************************/ #include "completedetachreattacher.h" #include "solver.h" #include "varreplacer.h" #include "clausecleaner.h" #include "clauseallocator.h" using namespace CMSat; CompleteDetachReatacher::CompleteDetachReatacher(Solver* _solver) : solver(_solver) { } /** @brief Completely detach all non-binary clauses */ void CompleteDetachReatacher::detach_nonbins_nontris() { assert(!solver->drat->something_delayed()); ClausesStay stay; for (watch_array::iterator it = solver->watches.begin(), end = solver->watches.end() ; it != end ; ++it ) { stay += clearWatchNotBinNotTri(*it); } solver->litStats.redLits = 0; solver->litStats.irredLits = 0; assert(stay.redBins % 2 == 0); solver->binTri.redBins = stay.redBins/2; assert(stay.irredBins % 2 == 0); solver->binTri.irredBins = stay.irredBins/2; } /** @brief Helper function for detachPointerUsingClauses() */ CompleteDetachReatacher::ClausesStay CompleteDetachReatacher::clearWatchNotBinNotTri( watch_subarray ws ) { ClausesStay stay; Watched* i = ws.begin(); Watched* j = i; for (Watched* end = ws.end(); i != end; i++) { if (i->isBin()) { if (i->red()) stay.redBins++; else stay.irredBins++; *j++ = *i; } } ws.shrink_(i-j); return stay; } bool CompleteDetachReatacher::reattachLongs(bool removeStatsFirst) { if (solver->conf.verbosity >= 6) { cout << "Cleaning and reattaching clauses" << endl; } cleanAndAttachClauses(solver->longIrredCls, removeStatsFirst); for(auto& lredcls: solver->longRedCls) { cleanAndAttachClauses(lredcls, removeStatsFirst); } solver->clauseCleaner->clean_implicit_clauses(); assert(!solver->drat->something_delayed()); if (solver->ok) { solver->ok = (solver->propagate<true>().isNULL()); } return solver->okay(); } void CompleteDetachReatacher::attachClauses( vector<ClOffset>& cs ) { for (ClOffset offs: cs) { Clause* cl = solver->cl_alloc.ptr(offs); bool satisfied = false; for(Lit lit: *cl) { if (solver->value(lit) == l_True) { satisfied = true; } } if (!satisfied) { assert(solver->value((*cl)[0]) == l_Undef); assert(solver->value((*cl)[1]) == l_Undef); } solver->attachClause(*cl, false); } } /** @brief Cleans clauses from failed literals/removes satisfied clauses from cs May change solver->ok to FALSE (!) */ void CompleteDetachReatacher::cleanAndAttachClauses( vector<ClOffset>& cs , bool removeStatsFirst ) { vector<ClOffset>::iterator i = cs.begin(); vector<ClOffset>::iterator j = i; for (vector<ClOffset>::iterator end = cs.end(); i != end; i++) { assert(!solver->drat->something_delayed()); Clause* cl = solver->cl_alloc.ptr(*i); //Handle stat removal if need be if (removeStatsFirst) { if (cl->red()) { solver->litStats.redLits -= cl->size(); } else { solver->litStats.irredLits -= cl->size(); } } if (clean_clause(cl)) { solver->attachClause(*cl); *j++ = *i; } else { solver->free_cl(*i); } } cs.resize(cs.size() - (i-j)); } /** @brief Not only cleans a clause from false literals, but if clause is satisfied, it reports it */ bool CompleteDetachReatacher::clean_clause(Clause* cl) { Clause& ps = *cl; (*solver->drat) << deldelay << ps << fin; if (ps.size() <= 2) { cout << "ERROR, clause is too small, and linked in: " << *cl << endl; } assert(ps.size() > 2); Lit *i = ps.begin(); Lit *j = i; for (Lit *end = ps.end(); i != end; i++) { if (solver->value(*i) == l_True) { (*solver->drat) << findelay; return false; } if (solver->value(*i) == l_Undef) { *j++ = *i; } } ps.shrink(i-j); //Drat if (i != j) { (*solver->drat) << add << *cl #ifdef STATS_NEEDED << solver->sumConflicts #endif << fin << findelay; } else { solver->drat->forget_delay(); } switch (ps.size()) { case 0: solver->ok = false; return false; case 1: solver->enqueue(ps[0]); #ifdef STATS_NEEDED solver->propStats.propsUnit++; #endif return false; case 2: { solver->attach_bin_clause(ps[0], ps[1], ps.red()); return false; } default: { break; } } return true; }
/* * Copyright (C) 2021 Intel Corporation * * SPDX-License-Identifier: MIT * */ #include "shared/source/gmm_helper/client_context/gmm_client_context.h" #include "shared/source/helpers/hw_helper.h" #include "shared/source/os_interface/hw_info_config.h" #include "shared/test/common/helpers/blit_commands_helper_tests.inl" #include "shared/test/common/helpers/debug_manager_state_restore.h" #include "shared/test/common/mocks/mock_gmm.h" #include "shared/test/common/mocks/ult_device_factory.h" #include "gtest/gtest.h" #include "test_traits_common.h" using BlitTests = Test<DeviceFixture>; using BlitPlatforms = IsAtLeastProduct<IGFX_XE_HP_SDV>; using namespace NEO; struct CompressionParamsSupportedMatcher { template <PRODUCT_FAMILY productFamily> static constexpr bool isMatched() { if constexpr (HwMapper<productFamily>::GfxProduct::supportsCmdSet(IGFX_XE_HP_CORE)) { return TestTraits<NEO::ToGfxCoreFamily<productFamily>::get()>::surfaceStateCompressionParamsSupported; } return false; } }; HWTEST2_F(BlitTests, givenDeviceWithoutDefaultGmmWhenAppendBlitCommandsForFillBufferThenDstCompressionDisabled, CompressionParamsSupportedMatcher) { using XY_COLOR_BLT = typename FamilyType::XY_COLOR_BLT; auto blitCmd = FamilyType::cmdInitXyColorBlt; MockGraphicsAllocation mockAllocation(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); BlitCommandsHelper<FamilyType>::appendBlitCommandsForFillBuffer(&mockAllocation, blitCmd, *pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]); EXPECT_EQ(blitCmd.getDestinationCompressionEnable(), XY_COLOR_BLT::DESTINATION_COMPRESSION_ENABLE::DESTINATION_COMPRESSION_ENABLE_COMPRESSION_DISABLE); EXPECT_EQ(blitCmd.getDestinationAuxiliarysurfacemode(), XY_COLOR_BLT::DESTINATION_AUXILIARY_SURFACE_MODE::DESTINATION_AUXILIARY_SURFACE_MODE_AUX_NONE); } HWTEST2_F(BlitTests, givenGmmWithDisabledCompresionWhenAppendBlitCommandsForFillBufferThenDstCompressionDisabled, CompressionParamsSupportedMatcher) { using XY_COLOR_BLT = typename FamilyType::XY_COLOR_BLT; auto blitCmd = FamilyType::cmdInitXyColorBlt; auto gmm = std::make_unique<MockGmm>(pDevice->getGmmClientContext()); gmm->isCompressionEnabled = false; MockGraphicsAllocation mockAllocation(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); mockAllocation.setGmm(gmm.get(), 0); BlitCommandsHelper<FamilyType>::appendBlitCommandsForFillBuffer(&mockAllocation, blitCmd, *pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]); EXPECT_EQ(blitCmd.getDestinationCompressionEnable(), XY_COLOR_BLT::DESTINATION_COMPRESSION_ENABLE::DESTINATION_COMPRESSION_ENABLE_COMPRESSION_DISABLE); EXPECT_EQ(blitCmd.getDestinationAuxiliarysurfacemode(), XY_COLOR_BLT::DESTINATION_AUXILIARY_SURFACE_MODE::DESTINATION_AUXILIARY_SURFACE_MODE_AUX_NONE); } HWTEST2_F(BlitTests, givenGmmWithEnabledCompresionWhenAppendBlitCommandsForFillBufferThenDstCompressionEnabled, CompressionParamsSupportedMatcher) { using XY_COLOR_BLT = typename FamilyType::XY_COLOR_BLT; auto blitCmd = FamilyType::cmdInitXyColorBlt; auto gmm = std::make_unique<MockGmm>(pDevice->getGmmClientContext()); gmm->isCompressionEnabled = true; MockGraphicsAllocation mockAllocation(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); mockAllocation.setGmm(gmm.get(), 0); BlitCommandsHelper<FamilyType>::appendBlitCommandsForFillBuffer(&mockAllocation, blitCmd, *pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]); EXPECT_EQ(blitCmd.getDestinationCompressionEnable(), XY_COLOR_BLT::DESTINATION_COMPRESSION_ENABLE::DESTINATION_COMPRESSION_ENABLE_COMPRESSION_ENABLE); EXPECT_EQ(blitCmd.getDestinationAuxiliarysurfacemode(), XY_COLOR_BLT::DESTINATION_AUXILIARY_SURFACE_MODE::DESTINATION_AUXILIARY_SURFACE_MODE_AUX_CCS_E); } HWTEST2_F(BlitTests, givenGmmWithEnabledCompresionWhenAppendBlitCommandsForFillBufferThenSetCompressionFormat, BlitPlatforms) { using XY_COLOR_BLT = typename FamilyType::XY_COLOR_BLT; auto blitCmd = FamilyType::cmdInitXyColorBlt; auto gmmContext = pDevice->getGmmClientContext(); auto gmm = std::make_unique<MockGmm>(gmmContext); gmm->isCompressionEnabled = true; MockGraphicsAllocation mockAllocation(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::LocalMemory); mockAllocation.setGmm(gmm.get(), 0); uint32_t compressionFormat = gmmContext->getSurfaceStateCompressionFormat(GMM_RESOURCE_FORMAT::GMM_FORMAT_GENERIC_8BIT); BlitCommandsHelper<FamilyType>::appendBlitCommandsForFillBuffer(&mockAllocation, blitCmd, *pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]); EXPECT_EQ(compressionFormat, blitCmd.getDestinationCompressionFormat()); } HWTEST2_F(BlitTests, givenGmmWithEnabledCompresionAndDebugFlagSetWhenAppendBlitCommandsForFillBufferThenSetCompressionFormat, BlitPlatforms) { DebugManagerStateRestore dbgRestore; using XY_COLOR_BLT = typename FamilyType::XY_COLOR_BLT; auto blitCmd = FamilyType::cmdInitXyColorBlt; uint32_t newCompressionFormat = 1; DebugManager.flags.ForceBufferCompressionFormat.set(static_cast<int32_t>(newCompressionFormat)); auto gmm = std::make_unique<MockGmm>(pDevice->getGmmClientContext()); gmm->isCompressionEnabled = true; MockGraphicsAllocation mockAllocation(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::LocalMemory); mockAllocation.setGmm(gmm.get(), 0); BlitCommandsHelper<FamilyType>::appendBlitCommandsForFillBuffer(&mockAllocation, blitCmd, *pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]); EXPECT_EQ(newCompressionFormat, blitCmd.getDestinationCompressionFormat()); } HWTEST2_F(BlitTests, givenA0StepWhenAppendBlitCommandsForFillBufferWithLocalAccessModeCpuAllowedThenSystemMemoryIsUsed, IsXEHP) { using XY_COLOR_BLT = typename FamilyType::XY_COLOR_BLT; DebugManagerStateRestore dbgRestore; DebugManager.flags.ForceLocalMemoryAccessMode.set(static_cast<int32_t>(LocalMemoryAccessMode::CpuAccessAllowed)); auto blitCmd = FamilyType::cmdInitXyColorBlt; MockGraphicsAllocation mockAllocation(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::LocalMemory, MemoryManager::maxOsContextCount); HardwareInfo *hwInfo = pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]->getMutableHardwareInfo(); const auto &hwInfoConfig = *HwInfoConfig::get(hwInfo->platform.eProductFamily); hwInfo->platform.usRevId = hwInfoConfig.getHwRevIdFromStepping(REVISION_A0, *hwInfo); BlitCommandsHelper<FamilyType>::appendBlitCommandsForFillBuffer(&mockAllocation, blitCmd, *pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]); EXPECT_EQ(blitCmd.getDestinationTargetMemory(), XY_COLOR_BLT::DESTINATION_TARGET_MEMORY::DESTINATION_TARGET_MEMORY_SYSTEM_MEM); } HWTEST2_F(BlitTests, givenA0StepWhenAppendBlitCommandsForFillBufferWithLocalAccessModeCpuDisallowedThenLocalMemoryIsUsed, IsXEHP) { using XY_COLOR_BLT = typename FamilyType::XY_COLOR_BLT; DebugManagerStateRestore dbgRestore; DebugManager.flags.ForceLocalMemoryAccessMode.set(static_cast<int32_t>(LocalMemoryAccessMode::CpuAccessDisallowed)); auto blitCmd = FamilyType::cmdInitXyColorBlt; MockGraphicsAllocation mockAllocation(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::LocalMemory, MemoryManager::maxOsContextCount); HardwareInfo *hwInfo = pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]->getMutableHardwareInfo(); const auto &hwInfoConfig = *HwInfoConfig::get(hwInfo->platform.eProductFamily); hwInfo->platform.usRevId = hwInfoConfig.getHwRevIdFromStepping(REVISION_A0, *hwInfo); BlitCommandsHelper<FamilyType>::appendBlitCommandsForFillBuffer(&mockAllocation, blitCmd, *pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]); EXPECT_EQ(blitCmd.getDestinationTargetMemory(), XY_COLOR_BLT::DESTINATION_TARGET_MEMORY::DESTINATION_TARGET_MEMORY_LOCAL_MEM); } HWTEST2_F(BlitTests, givenBStepWhenAppendBlitCommandsForFillBufferWithLocalAccessModeCpuAllowedThenLocalIsUsed, IsXEHP) { using XY_COLOR_BLT = typename FamilyType::XY_COLOR_BLT; DebugManagerStateRestore dbgRestore; DebugManager.flags.ForceLocalMemoryAccessMode.set(static_cast<int32_t>(LocalMemoryAccessMode::CpuAccessAllowed)); auto blitCmd = FamilyType::cmdInitXyColorBlt; MockGraphicsAllocation mockAllocation(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::LocalMemory, MemoryManager::maxOsContextCount); HardwareInfo *hwInfo = pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]->getMutableHardwareInfo(); const auto &hwInfoConfig = *HwInfoConfig::get(hwInfo->platform.eProductFamily); hwInfo->platform.usRevId = hwInfoConfig.getHwRevIdFromStepping(REVISION_B, *hwInfo); BlitCommandsHelper<FamilyType>::appendBlitCommandsForFillBuffer(&mockAllocation, blitCmd, *pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]); EXPECT_EQ(blitCmd.getDestinationTargetMemory(), XY_COLOR_BLT::DESTINATION_TARGET_MEMORY::DESTINATION_TARGET_MEMORY_LOCAL_MEM); } HWTEST2_F(BlitTests, givenBStepWhenAppendBlitCommandsForFillBufferWithLocalAccessModeCpuDisallowedThenLocalIsUsed, IsXEHP) { using XY_COLOR_BLT = typename FamilyType::XY_COLOR_BLT; DebugManagerStateRestore dbgRestore; DebugManager.flags.ForceLocalMemoryAccessMode.set(static_cast<int32_t>(LocalMemoryAccessMode::CpuAccessDisallowed)); auto blitCmd = FamilyType::cmdInitXyColorBlt; MockGraphicsAllocation mockAllocation(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::LocalMemory, MemoryManager::maxOsContextCount); HardwareInfo *hwInfo = pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]->getMutableHardwareInfo(); const auto &hwInfoConfig = *HwInfoConfig::get(hwInfo->platform.eProductFamily); hwInfo->platform.usRevId = hwInfoConfig.getHwRevIdFromStepping(REVISION_B, *hwInfo); BlitCommandsHelper<FamilyType>::appendBlitCommandsForFillBuffer(&mockAllocation, blitCmd, *pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]); EXPECT_EQ(blitCmd.getDestinationTargetMemory(), XY_COLOR_BLT::DESTINATION_TARGET_MEMORY::DESTINATION_TARGET_MEMORY_LOCAL_MEM); } HWTEST2_F(BlitTests, givenAllocationInSystemMemWhenAppendBlitCommandsForFillBufferWithLocalAccessModeCpuAllowedThenSystemMemIsUsed, IsXEHP) { DebugManagerStateRestore dbgRestore; DebugManager.flags.ForceLocalMemoryAccessMode.set(static_cast<int32_t>(LocalMemoryAccessMode::CpuAccessAllowed)); using XY_COLOR_BLT = typename FamilyType::XY_COLOR_BLT; auto blitCmd = FamilyType::cmdInitXyColorBlt; MockGraphicsAllocation mockAllocation(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); HardwareInfo *hwInfo = pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]->getMutableHardwareInfo(); const auto &hwInfoConfig = *HwInfoConfig::get(hwInfo->platform.eProductFamily); hwInfo->platform.usRevId = hwInfoConfig.getHwRevIdFromStepping(REVISION_A1, *hwInfo); BlitCommandsHelper<FamilyType>::appendBlitCommandsForFillBuffer(&mockAllocation, blitCmd, *pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]); EXPECT_EQ(blitCmd.getDestinationTargetMemory(), XY_COLOR_BLT::DESTINATION_TARGET_MEMORY::DESTINATION_TARGET_MEMORY_SYSTEM_MEM); } HWTEST2_F(BlitTests, givenAllocationInSystemMemWhenAppendBlitCommandsForFillBufferWithLocalAccessModeCpuDisallowedThenSystemMemIsUsed, IsXEHP) { DebugManagerStateRestore dbgRestore; DebugManager.flags.ForceLocalMemoryAccessMode.set(static_cast<int32_t>(LocalMemoryAccessMode::CpuAccessDisallowed)); using XY_COLOR_BLT = typename FamilyType::XY_COLOR_BLT; auto blitCmd = FamilyType::cmdInitXyColorBlt; MockGraphicsAllocation mockAllocation(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); HardwareInfo *hwInfo = pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]->getMutableHardwareInfo(); const auto &hwInfoConfig = *HwInfoConfig::get(hwInfo->platform.eProductFamily); hwInfo->platform.usRevId = hwInfoConfig.getHwRevIdFromStepping(REVISION_A1, *hwInfo); BlitCommandsHelper<FamilyType>::appendBlitCommandsForFillBuffer(&mockAllocation, blitCmd, *pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]); EXPECT_EQ(blitCmd.getDestinationTargetMemory(), XY_COLOR_BLT::DESTINATION_TARGET_MEMORY::DESTINATION_TARGET_MEMORY_SYSTEM_MEM); } HWTEST2_F(BlitTests, givenOverridedMocksValueWhenAppendBlitCommandsForFillBufferThenDebugMocksValueIsSet, BlitPlatforms) { using XY_COLOR_BLT = typename FamilyType::XY_COLOR_BLT; DebugManagerStateRestore dbgRestore; uint32_t mockValue = 5; DebugManager.flags.OverrideBlitterMocs.set(mockValue); auto blitCmd = FamilyType::cmdInitXyColorBlt; MockGraphicsAllocation mockAllocation(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); BlitCommandsHelper<FamilyType>::appendBlitCommandsForFillBuffer(&mockAllocation, blitCmd, *pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]); EXPECT_EQ(blitCmd.getDestinationMOCS(), mockValue); } HWTEST2_F(BlitTests, givenOverridedBliterTargetToZeroWhenAppendBlitCommandsForFillBufferThenUseSystemMem, BlitPlatforms) { using XY_COLOR_BLT = typename FamilyType::XY_COLOR_BLT; DebugManagerStateRestore dbgRestore; DebugManager.flags.OverrideBlitterTargetMemory.set(0); auto blitCmd = FamilyType::cmdInitXyColorBlt; MockGraphicsAllocation mockAllocation(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); BlitCommandsHelper<FamilyType>::appendBlitCommandsForFillBuffer(&mockAllocation, blitCmd, *pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]); EXPECT_EQ(blitCmd.getDestinationTargetMemory(), XY_COLOR_BLT::DESTINATION_TARGET_MEMORY::DESTINATION_TARGET_MEMORY_SYSTEM_MEM); } HWTEST2_F(BlitTests, givenOverridedBliterTargetToOneWhenAppendBlitCommandsForFillBufferThenUseLocalMem, BlitPlatforms) { using XY_COLOR_BLT = typename FamilyType::XY_COLOR_BLT; DebugManagerStateRestore dbgRestore; DebugManager.flags.OverrideBlitterTargetMemory.set(1); auto blitCmd = FamilyType::cmdInitXyColorBlt; MockGraphicsAllocation mockAllocation(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); BlitCommandsHelper<FamilyType>::appendBlitCommandsForFillBuffer(&mockAllocation, blitCmd, *pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]); EXPECT_EQ(blitCmd.getDestinationTargetMemory(), XY_COLOR_BLT::DESTINATION_TARGET_MEMORY::DESTINATION_TARGET_MEMORY_LOCAL_MEM); } HWTEST2_F(BlitTests, givenOverridedBliterTargetToTwoWhenAppendBlitCommandsForFillBufferThenUseDefaultMem, BlitPlatforms) { using XY_COLOR_BLT = typename FamilyType::XY_COLOR_BLT; DebugManagerStateRestore dbgRestore; DebugManager.flags.OverrideBlitterTargetMemory.set(2); auto blitCmd = FamilyType::cmdInitXyColorBlt; MockGraphicsAllocation mockAllocation(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); BlitCommandsHelper<FamilyType>::appendBlitCommandsForFillBuffer(&mockAllocation, blitCmd, *pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]); EXPECT_EQ(blitCmd.getDestinationTargetMemory(), XY_COLOR_BLT::DESTINATION_TARGET_MEMORY::DESTINATION_TARGET_MEMORY_SYSTEM_MEM); } HWTEST2_F(BlitTests, GivenCpuAccessToLocalMemoryWhenGettingMaxBlitSizeThenValuesAreOverriden, BlitPlatforms) { DebugManagerStateRestore restore{}; UltDeviceFactory deviceFactory{1, 2}; int32_t testedLocalMemoryAccessModes[] = {static_cast<int32_t>(LocalMemoryAccessMode::Default), static_cast<int32_t>(LocalMemoryAccessMode::CpuAccessAllowed), static_cast<int32_t>(LocalMemoryAccessMode::CpuAccessDisallowed)}; for (auto localMemoryAccessModeOverride : testedLocalMemoryAccessModes) { DebugManager.flags.ForceLocalMemoryAccessMode.set(localMemoryAccessModeOverride); bool isBlitSizeOverridden = (localMemoryAccessModeOverride == static_cast<int32_t>(LocalMemoryAccessMode::CpuAccessAllowed)); if (isBlitSizeOverridden) { EXPECT_EQ(1024u, BlitCommandsHelper<FamilyType>::getMaxBlitWidth(deviceFactory.rootDevices[0]->getRootDeviceEnvironment())); EXPECT_EQ(1024u, BlitCommandsHelper<FamilyType>::getMaxBlitHeight(deviceFactory.rootDevices[0]->getRootDeviceEnvironment())); } else { EXPECT_EQ(BlitterConstants::maxBlitWidth, BlitCommandsHelper<FamilyType>::getMaxBlitWidth(deviceFactory.rootDevices[0]->getRootDeviceEnvironment())); EXPECT_EQ(BlitterConstants::maxBlitHeight, BlitCommandsHelper<FamilyType>::getMaxBlitHeight(deviceFactory.rootDevices[0]->getRootDeviceEnvironment())); } } } struct BlitTestsTestXeHP : BlitColorTests {}; template <typename FamilyType> class GivenLinearStreamWhenCallDispatchBlitMemoryColorFillThenCorrectDepthIsProgrammedXEHP : public GivenLinearStreamWhenCallDispatchBlitMemoryColorFillThenCorrectDepthIsProgrammed<FamilyType> { public: GivenLinearStreamWhenCallDispatchBlitMemoryColorFillThenCorrectDepthIsProgrammedXEHP(Device *device) : GivenLinearStreamWhenCallDispatchBlitMemoryColorFillThenCorrectDepthIsProgrammed<FamilyType>(device) {} }; template <typename FamilyType> typename FamilyType::XY_COLOR_BLT::COLOR_DEPTH getColorDepth(size_t patternSize) { using COLOR_DEPTH = typename FamilyType::XY_COLOR_BLT::COLOR_DEPTH; COLOR_DEPTH depth = {}; switch (patternSize) { case 1: depth = COLOR_DEPTH::COLOR_DEPTH_8_BIT_COLOR; break; case 2: depth = COLOR_DEPTH::COLOR_DEPTH_16_BIT_COLOR; break; case 4: depth = COLOR_DEPTH::COLOR_DEPTH_32_BIT_COLOR; break; case 8: depth = COLOR_DEPTH::COLOR_DEPTH_64_BIT_COLOR; break; case 16: depth = COLOR_DEPTH::COLOR_DEPTH_128_BIT_COLOR; break; } return depth; } HWTEST2_P(BlitTestsTestXeHP, givenCommandStreamWhenCallToDispatchMemoryFillThenColorDepthAreProgrammedCorrectly, IsXeHpCore) { auto patternSize = GetParam(); auto expecttedDepth = getColorDepth<FamilyType>(patternSize); GivenLinearStreamWhenCallDispatchBlitMemoryColorFillThenCorrectDepthIsProgrammedXEHP<FamilyType> test(pDevice); test.TestBodyImpl(patternSize, expecttedDepth); } INSTANTIATE_TEST_CASE_P(size_t, BlitTestsTestXeHP, testing::Values(1, 2, 4, 8, 16)); HWTEST2_F(BlitTests, givenOneBytePerPixelWhenAppendColrDepthThenCorrectDepthIsSet, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; properties.bytesPerPixel = 1; BlitCommandsHelper<FamilyType>::appendColorDepth(properties, bltCmd); EXPECT_EQ(bltCmd.getColorDepth(), XY_COPY_BLT::COLOR_DEPTH::COLOR_DEPTH_8_BIT_COLOR); } HWTEST2_F(BlitTests, givenTwoBytePerPixelWhenAppendColrDepthThenCorrectDepthIsSet, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; properties.bytesPerPixel = 2; BlitCommandsHelper<FamilyType>::appendColorDepth(properties, bltCmd); EXPECT_EQ(bltCmd.getColorDepth(), XY_COPY_BLT::COLOR_DEPTH::COLOR_DEPTH_16_BIT_COLOR); } HWTEST2_F(BlitTests, givenFourBytePerPixelWhenAppendColrDepthThenCorrectDepthIsSet, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; properties.bytesPerPixel = 4; BlitCommandsHelper<FamilyType>::appendColorDepth(properties, bltCmd); EXPECT_EQ(bltCmd.getColorDepth(), XY_COPY_BLT::COLOR_DEPTH::COLOR_DEPTH_32_BIT_COLOR); } HWTEST2_F(BlitTests, givenEightBytePerPixelWhenAppendColrDepthThenCorrectDepthIsSet, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; properties.bytesPerPixel = 8; BlitCommandsHelper<FamilyType>::appendColorDepth(properties, bltCmd); EXPECT_EQ(bltCmd.getColorDepth(), XY_COPY_BLT::COLOR_DEPTH::COLOR_DEPTH_64_BIT_COLOR); } HWTEST2_F(BlitTests, givenSixteenBytePerPixelWhenAppendColrDepthThenCorrectDepthIsSet, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; properties.bytesPerPixel = 16; BlitCommandsHelper<FamilyType>::appendColorDepth(properties, bltCmd); EXPECT_EQ(bltCmd.getColorDepth(), XY_COPY_BLT::COLOR_DEPTH::COLOR_DEPTH_128_BIT_COLOR); } HWTEST2_F(BlitTests, givenIncorrectBytePerPixelWhenAppendColrDepthThenCorrectDepthIsSet, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; properties.bytesPerPixel = 48; EXPECT_THROW(BlitCommandsHelper<FamilyType>::appendColorDepth(properties, bltCmd), std::exception); } HWTEST2_F(BlitTests, givenNotTiledSrcAndDestinationWhenAppendTilingTypeThenCorrectTilingIsSet, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; BlitCommandsHelper<FamilyType>::appendTilingType(GMM_NOT_TILED, GMM_NOT_TILED, bltCmd); EXPECT_EQ(bltCmd.getSourceTiling(), XY_COPY_BLT::TILING::TILING_LINEAR); EXPECT_EQ(bltCmd.getDestinationTiling(), XY_COPY_BLT::TILING::TILING_LINEAR); } HWTEST2_F(BlitTests, givenTiled4SrcAndDestinationAppendTilingTypeThenCorrectTilingIsSet, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; BlitCommandsHelper<FamilyType>::appendTilingType(GMM_TILED_4, GMM_TILED_4, bltCmd); EXPECT_EQ(bltCmd.getSourceTiling(), XY_COPY_BLT::TILING::TILING_TILE4); EXPECT_EQ(bltCmd.getDestinationTiling(), XY_COPY_BLT::TILING::TILING_TILE4); } HWTEST2_F(BlitTests, givenTiled64SrcAndDestinationAppendTilingTypeThenCorrectTilingIsSet, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; BlitCommandsHelper<FamilyType>::appendTilingType(GMM_TILED_64, GMM_TILED_64, bltCmd); EXPECT_EQ(bltCmd.getSourceTiling(), XY_COPY_BLT::TILING::TILING_TILE64); EXPECT_EQ(bltCmd.getDestinationTiling(), XY_COPY_BLT::TILING::TILING_TILE64); } HWTEST2_F(BlitTests, givenTiled4SrcAndDestinationAppendImageCommandsThenCorrectTiledIsSet, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto gmm = std::make_unique<MockGmm>(pDevice->getGmmClientContext()); auto flags = gmm->gmmResourceInfo->getResourceFlags(); flags->Info.Tile4 = true; MockGraphicsAllocation mockAllocationSrc(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockAllocationDst(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockClearColor(reinterpret_cast<void *>(0x1234), sizeof(uint32_t)); mockAllocationSrc.setGmm(gmm.get(), 0); mockAllocationDst.setGmm(gmm.get(), 0); auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; properties.srcAllocation = &mockAllocationSrc; properties.dstAllocation = &mockAllocationDst; properties.clearColorAllocation = &mockClearColor; auto srcSlicePitch = static_cast<uint32_t>(properties.srcSlicePitch); auto dstSlicePitch = static_cast<uint32_t>(properties.dstSlicePitch); BlitCommandsHelper<FamilyType>::appendBlitCommandsForImages(properties, bltCmd, pDevice->getRootDeviceEnvironment(), srcSlicePitch, dstSlicePitch); EXPECT_EQ(bltCmd.getSourceTiling(), XY_COPY_BLT::TILING::TILING_TILE4); EXPECT_EQ(bltCmd.getDestinationTiling(), XY_COPY_BLT::TILING::TILING_TILE4); } HWTEST2_F(BlitTests, givenNotTiled64SrcAndDestinationAppendImageCommandsThenCorrectTiledIsSet, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto gmm = std::make_unique<MockGmm>(pDevice->getGmmClientContext()); auto flags = gmm->gmmResourceInfo->getResourceFlags(); flags->Info.Tile64 = true; MockGraphicsAllocation mockAllocationSrc(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockAllocationDst(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockClearColor(reinterpret_cast<void *>(0x1234), sizeof(uint32_t)); mockAllocationSrc.setGmm(gmm.get(), 0); mockAllocationDst.setGmm(gmm.get(), 0); auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; properties.srcAllocation = &mockAllocationSrc; properties.dstAllocation = &mockAllocationDst; properties.clearColorAllocation = &mockClearColor; auto srcSlicePitch = static_cast<uint32_t>(properties.srcSlicePitch); auto dstSlicePitch = static_cast<uint32_t>(properties.dstSlicePitch); BlitCommandsHelper<FamilyType>::appendBlitCommandsForImages(properties, bltCmd, pDevice->getRootDeviceEnvironment(), srcSlicePitch, dstSlicePitch); EXPECT_EQ(bltCmd.getSourceTiling(), XY_COPY_BLT::TILING::TILING_TILE64); EXPECT_EQ(bltCmd.getDestinationTiling(), XY_COPY_BLT::TILING::TILING_TILE64); } HWTEST2_F(BlitTests, givenNotTiledSrcAndDestinationAppendImageCommandsThenCorrectTiledIsSet, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto gmm = std::make_unique<MockGmm>(pDevice->getGmmClientContext()); auto flags = gmm->gmmResourceInfo->getResourceFlags(); flags->Info.Tile64 = false; MockGraphicsAllocation mockAllocationSrc(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockAllocationDst(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockClearColor(reinterpret_cast<void *>(0x1234), sizeof(uint32_t)); mockAllocationSrc.setGmm(gmm.get(), 0); mockAllocationDst.setGmm(gmm.get(), 0); auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; properties.srcAllocation = &mockAllocationSrc; properties.dstAllocation = &mockAllocationDst; properties.clearColorAllocation = &mockClearColor; auto srcSlicePitch = static_cast<uint32_t>(properties.srcSlicePitch); auto dstSlicePitch = static_cast<uint32_t>(properties.dstSlicePitch); BlitCommandsHelper<FamilyType>::appendBlitCommandsForImages(properties, bltCmd, pDevice->getRootDeviceEnvironment(), srcSlicePitch, dstSlicePitch); EXPECT_EQ(bltCmd.getSourceTiling(), XY_COPY_BLT::TILING::TILING_LINEAR); EXPECT_EQ(bltCmd.getDestinationTiling(), XY_COPY_BLT::TILING::TILING_LINEAR); } HWTEST2_F(BlitTests, givenGmmParamsWhenAppendSurfaceTypeThenCorrectSurfaceTypeIsSet, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; std::tuple<GMM_RESOURCE_TYPE_ENUM, typename XY_COPY_BLT::SURFACE_TYPE, uint32_t> testParams[]{ {GMM_RESOURCE_TYPE::RESOURCE_1D, XY_COPY_BLT::SURFACE_TYPE::SURFACE_TYPE_SURFTYPE_1D, 1u}, {GMM_RESOURCE_TYPE::RESOURCE_2D, XY_COPY_BLT::SURFACE_TYPE::SURFACE_TYPE_SURFTYPE_2D, 1u}, {GMM_RESOURCE_TYPE::RESOURCE_3D, XY_COPY_BLT::SURFACE_TYPE::SURFACE_TYPE_SURFTYPE_3D, 1u}, {GMM_RESOURCE_TYPE::RESOURCE_1D, XY_COPY_BLT::SURFACE_TYPE::SURFACE_TYPE_SURFTYPE_2D, 10u}}; for (const auto &[resourceType, expectedSurfaceType, arraySize] : testParams) { auto gmm = std::make_unique<MockGmm>(pDevice->getGmmClientContext()); auto resourceInfo = static_cast<MockGmmResourceInfo *>(gmm->gmmResourceInfo.get()); resourceInfo->mockResourceCreateParams.Type = resourceType; resourceInfo->mockResourceCreateParams.ArraySize = arraySize; MockGraphicsAllocation mockAllocationSrc(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockAllocationDst(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); mockAllocationSrc.setGmm(gmm.get(), 0); mockAllocationDst.setGmm(gmm.get(), 0); auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; properties.srcAllocation = &mockAllocationSrc; properties.dstAllocation = &mockAllocationDst; BlitCommandsHelper<FamilyType>::appendSurfaceType(properties, bltCmd); EXPECT_EQ(bltCmd.getDestinationSurfaceType(), expectedSurfaceType); EXPECT_EQ(bltCmd.getSourceSurfaceType(), expectedSurfaceType); } } HWTEST2_F(BlitTests, givenInvalidResourceWhenAppendSurfaceTypeThenSurfaceTypeDoesNotChange, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto gmm = std::make_unique<MockGmm>(pDevice->getGmmClientContext()); auto resourceInfo = static_cast<MockGmmResourceInfo *>(gmm->gmmResourceInfo.get()); resourceInfo->mockResourceCreateParams.Type = GMM_RESOURCE_TYPE::RESOURCE_INVALID; MockGraphicsAllocation mockAllocationSrc(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockAllocationDst(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); mockAllocationSrc.setGmm(gmm.get(), 0); mockAllocationDst.setGmm(gmm.get(), 0); auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; properties.srcAllocation = &mockAllocationSrc; properties.dstAllocation = &mockAllocationDst; auto srcSurfaceType = bltCmd.getSourceSurfaceType(); auto dstSurfaceType = bltCmd.getDestinationSurfaceType(); BlitCommandsHelper<FamilyType>::appendSurfaceType(properties, bltCmd); EXPECT_EQ(bltCmd.getSourceSurfaceType(), srcSurfaceType); EXPECT_EQ(bltCmd.getDestinationSurfaceType(), dstSurfaceType); } HWTEST2_F(BlitTests, givenResourcesWithoutGmmsWhenAppendSurfaceTypeThenSurfaceTypeDoesNotChange, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; MockGraphicsAllocation mockAllocationSrc(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockAllocationDst(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; properties.srcAllocation = &mockAllocationSrc; properties.dstAllocation = &mockAllocationDst; auto srcSurfaceType = bltCmd.getSourceSurfaceType(); auto dstSurfaceType = bltCmd.getDestinationSurfaceType(); BlitCommandsHelper<FamilyType>::appendSurfaceType(properties, bltCmd); EXPECT_EQ(bltCmd.getSourceSurfaceType(), srcSurfaceType); EXPECT_EQ(bltCmd.getDestinationSurfaceType(), dstSurfaceType); } HWTEST2_F(BlitTests, givenGmmParamsWhenGetBlitAllocationPropertiesIsCalledThenCompressionFormatIsSet, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; std::tuple<bool, bool, bool> params[]{ {false, false, false}, {false, true, true}, {true, false, true}}; for (auto &[mediaCompressed, renderCompressed, compressionExpected] : params) { auto gmm = std::make_unique<MockGmm>(pDevice->getGmmClientContext()); auto resourceInfo = static_cast<MockGmmResourceInfo *>(gmm->gmmResourceInfo.get()); auto &resInfo = resourceInfo->getResourceFlags()->Info; resInfo.MediaCompressed = mediaCompressed; resInfo.RenderCompressed = renderCompressed; MockGraphicsAllocation mockAllocationSrc(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); mockAllocationSrc.setGmm(gmm.get(), 0); BlitProperties properties = {}; properties.srcAllocation = &mockAllocationSrc; uint32_t qPitch = static_cast<uint32_t>(properties.copySize.y); GMM_TILE_TYPE tileType = GMM_NOT_TILED; uint32_t mipTailLod = 0; uint32_t compressionFormat = 0; auto rowPitch = static_cast<uint32_t>(properties.srcRowPitch); BlitCommandsHelper<FamilyType>::getBlitAllocationProperties(*properties.srcAllocation, rowPitch, qPitch, tileType, mipTailLod, compressionFormat, pDevice->getRootDeviceEnvironment()); if (compressionExpected) { EXPECT_GT(compressionFormat, 0u); } else { EXPECT_EQ(compressionFormat, 0u); } } } struct MyMockResourecInfo : public GmmResourceInfo { using GmmResourceInfo::resourceInfo; MyMockResourecInfo(GmmClientContext *clientContext, GMM_RESCREATE_PARAMS *inputParams) : GmmResourceInfo(clientContext, inputParams){}; MyMockResourecInfo(GmmClientContext *clientContext, GMM_RESOURCE_INFO *inputGmmResourceInfo) : GmmResourceInfo(clientContext, inputGmmResourceInfo){}; size_t getRenderPitch() override { return pitch; } uint32_t getQPitch() override { return 0; } GMM_RESOURCE_FLAG *getResourceFlags() override { return &flags; } uint32_t getMipTailStartLodSurfaceState() override { return 0; } size_t pitch = 0; GMM_RESOURCE_FLAG flags = {}; }; HWTEST2_F(BlitTests, givenResourceWithoutGmmWhenAppendImageCommandsThenPitchEqualPropertiesValue, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; MockGraphicsAllocation mockAllocationSrc(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockAllocationDst(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockClearColor(reinterpret_cast<void *>(0x1234), sizeof(uint32_t)); auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; properties.dstRowPitch = 0x100; properties.srcRowPitch = 0x100; properties.srcAllocation = &mockAllocationSrc; properties.dstAllocation = &mockAllocationDst; properties.clearColorAllocation = &mockClearColor; auto srcSlicePitch = static_cast<uint32_t>(properties.srcSlicePitch); auto dstSlicePitch = static_cast<uint32_t>(properties.dstSlicePitch); BlitCommandsHelper<FamilyType>::appendBlitCommandsForImages(properties, bltCmd, pDevice->getRootDeviceEnvironment(), srcSlicePitch, dstSlicePitch); EXPECT_EQ(bltCmd.getDestinationPitch(), properties.dstRowPitch); EXPECT_EQ(bltCmd.getSourcePitch(), properties.srcRowPitch); } HWTEST2_F(BlitTests, givenInputAndDefaultSlicePitchWhenAppendBlitCommandsForImagesIsCalledThenSlicePitchIsCorrect, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; MockGraphicsAllocation mockAllocationSrc(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockAllocationDst(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockClearColor; auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; properties.dstRowPitch = 0x100; properties.srcRowPitch = 0x200; properties.copySize = {10, 20, 1}; properties.srcSize = {20, 18, 1}; properties.dstSize = {18, 20, 1}; properties.srcAllocation = &mockAllocationSrc; properties.dstAllocation = &mockAllocationDst; properties.clearColorAllocation = &mockClearColor; { uint32_t inputSlicePitch = 0x4000; uint32_t srcSlicePitch = inputSlicePitch; uint32_t dstSlicePitch = inputSlicePitch; BlitCommandsHelper<FamilyType>::appendBlitCommandsForImages(properties, bltCmd, pDevice->getRootDeviceEnvironment(), srcSlicePitch, dstSlicePitch); EXPECT_EQ(inputSlicePitch, srcSlicePitch); EXPECT_EQ(inputSlicePitch, dstSlicePitch); } { uint32_t expectedSrcSlicePitch = static_cast<uint32_t>(properties.srcSize.y * properties.srcRowPitch); uint32_t expectedDstSlicePitch = static_cast<uint32_t>(properties.dstSize.y * properties.dstRowPitch); uint32_t srcSlicePitch = 0; uint32_t dstSlicePitch = 0; BlitCommandsHelper<FamilyType>::appendBlitCommandsForImages(properties, bltCmd, pDevice->getRootDeviceEnvironment(), srcSlicePitch, dstSlicePitch); EXPECT_EQ(expectedSrcSlicePitch, srcSlicePitch); EXPECT_EQ(expectedDstSlicePitch, dstSlicePitch); } } HWTEST2_F(BlitTests, givenResourceInfoWithZeroPitchWhenAppendImageCommandsThenPitchEqualPropertiesValue, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto gmm = std::make_unique<MockGmm>(pDevice->getGmmClientContext()); GMM_RESCREATE_PARAMS gmmParams = {}; gmm->gmmResourceInfo.reset(new MyMockResourecInfo(pDevice->getRootDeviceEnvironment().getGmmClientContext(), &gmmParams)); MockGraphicsAllocation mockAllocationSrc(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockAllocationDst(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockClearColor(reinterpret_cast<void *>(0x1234), sizeof(uint32_t)); mockAllocationSrc.setGmm(gmm.get(), 0); mockAllocationDst.setGmm(gmm.get(), 0); auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; properties.dstRowPitch = 0x100; properties.srcRowPitch = 0x100; properties.srcAllocation = &mockAllocationSrc; properties.dstAllocation = &mockAllocationDst; properties.clearColorAllocation = &mockClearColor; auto srcSlicePitch = static_cast<uint32_t>(properties.srcSlicePitch); auto dstSlicePitch = static_cast<uint32_t>(properties.dstSlicePitch); BlitCommandsHelper<FamilyType>::appendBlitCommandsForImages(properties, bltCmd, pDevice->getRootDeviceEnvironment(), srcSlicePitch, dstSlicePitch); EXPECT_EQ(bltCmd.getDestinationPitch(), properties.dstRowPitch); EXPECT_EQ(bltCmd.getSourcePitch(), properties.srcRowPitch); } HWTEST2_F(BlitTests, givenTiledAllocationWhenAppendBlitCommandsForImagesThenBlitCmdIsCorrect, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto gmm = std::make_unique<MockGmm>(pDevice->getGmmClientContext()); GMM_RESCREATE_PARAMS gmmParams = {}; auto myResourecInfo = std::make_unique<MyMockResourecInfo>(pDevice->getRootDeviceEnvironment().getGmmClientContext(), &gmmParams); myResourecInfo->pitch = 0x100; gmm->gmmResourceInfo.reset(myResourecInfo.release()); auto flags = gmm->gmmResourceInfo->getResourceFlags(); flags->Info.Tile64 = true; MockGraphicsAllocation mockAllocationSrc(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockAllocationDst(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockClearColor(reinterpret_cast<void *>(0x1234), sizeof(uint32_t)); mockAllocationSrc.setGmm(gmm.get(), 0); mockAllocationDst.setGmm(gmm.get(), 0); auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; properties.dstRowPitch = 0x1000; properties.srcRowPitch = 0x1000; properties.srcAllocation = &mockAllocationSrc; properties.clearColorAllocation = &mockClearColor; properties.dstAllocation = &mockAllocationDst; auto srcSlicePitch = static_cast<uint32_t>(properties.srcSlicePitch); auto dstSlicePitch = static_cast<uint32_t>(properties.dstSlicePitch); BlitCommandsHelper<FamilyType>::appendBlitCommandsForImages(properties, bltCmd, pDevice->getRootDeviceEnvironment(), srcSlicePitch, dstSlicePitch); EXPECT_EQ(bltCmd.getDestinationPitch(), gmm->gmmResourceInfo->getRenderPitch() / sizeof(uint32_t)); EXPECT_EQ(bltCmd.getSourcePitch(), gmm->gmmResourceInfo->getRenderPitch() / sizeof(uint32_t)); EXPECT_NE(bltCmd.getDestinationPitch(), static_cast<uint32_t>(properties.dstRowPitch)); EXPECT_NE(bltCmd.getSourcePitch(), static_cast<uint32_t>(properties.srcRowPitch)); } HWTEST2_F(BlitTests, givenAlocationsWhenAppendBlitCommandsForImagesThenSurfaceSizesAreProgrammedCorrectly, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto gmm = std::make_unique<MockGmm>(pDevice->getGmmClientContext()); MockGraphicsAllocation mockAllocationSrc(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockAllocationDst(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockClearColor; mockAllocationSrc.setGmm(gmm.get(), 0); mockAllocationDst.setGmm(gmm.get(), 0); auto blitCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; properties.dstRowPitch = 0x100; properties.srcRowPitch = 0x100; properties.srcSize = {8, 10, 12}; properties.dstSize = {12, 8, 10}; properties.srcAllocation = &mockAllocationSrc; properties.dstAllocation = &mockAllocationDst; properties.clearColorAllocation = &mockClearColor; auto srcSlicePitch = static_cast<uint32_t>(properties.srcSlicePitch); auto dstSlicePitch = static_cast<uint32_t>(properties.dstSlicePitch); BlitCommandsHelper<FamilyType>::appendBlitCommandsForImages(properties, blitCmd, pDevice->getRootDeviceEnvironment(), srcSlicePitch, dstSlicePitch); EXPECT_EQ(blitCmd.getSourceSurfaceWidth(), properties.srcSize.x); EXPECT_EQ(blitCmd.getSourceSurfaceHeight(), properties.srcSize.y); EXPECT_EQ(blitCmd.getSourceSurfaceDepth(), properties.srcSize.z); EXPECT_EQ(blitCmd.getDestinationSurfaceWidth(), properties.dstSize.x); EXPECT_EQ(blitCmd.getDestinationSurfaceHeight(), properties.dstSize.y); EXPECT_EQ(blitCmd.getDestinationSurfaceDepth(), properties.dstSize.z); } HWTEST2_F(BlitTests, givenLinearResourceInfoWithNotZeroPitchWhenAppendImageCommandsThenPitchEqualValueFromProperties, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto gmm = std::make_unique<MockGmm>(pDevice->getGmmClientContext()); GMM_RESCREATE_PARAMS gmmParams = {}; auto myResourecInfo = std::make_unique<MyMockResourecInfo>(pDevice->getRootDeviceEnvironment().getGmmClientContext(), &gmmParams); myResourecInfo->pitch = 0x100; myResourecInfo->flags.Info.Linear = 1; gmm->gmmResourceInfo.reset(myResourecInfo.release()); MockGraphicsAllocation mockAllocationSrc(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockAllocationDst(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockClearColor(reinterpret_cast<void *>(0x1234), sizeof(uint32_t)); mockAllocationSrc.setGmm(gmm.get(), 0); mockAllocationDst.setGmm(gmm.get(), 0); auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; properties.dstRowPitch = 0x1000; properties.srcRowPitch = 0x1000; properties.srcAllocation = &mockAllocationSrc; properties.dstAllocation = &mockAllocationDst; properties.clearColorAllocation = &mockClearColor; auto srcSlicePitch = static_cast<uint32_t>(properties.srcSlicePitch); auto dstSlicePitch = static_cast<uint32_t>(properties.dstSlicePitch); BlitCommandsHelper<FamilyType>::appendBlitCommandsForImages(properties, bltCmd, pDevice->getRootDeviceEnvironment(), srcSlicePitch, dstSlicePitch); EXPECT_EQ(bltCmd.getDestinationPitch(), properties.dstRowPitch); EXPECT_EQ(bltCmd.getSourcePitch(), properties.dstRowPitch); EXPECT_NE(bltCmd.getDestinationPitch(), gmm->gmmResourceInfo->getRenderPitch()); EXPECT_NE(bltCmd.getSourcePitch(), gmm->gmmResourceInfo->getRenderPitch()); } HWTEST2_F(BlitTests, givenLinearResorcesWhenAppendSliceOffsetsThenAdressAreOffsetted, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; MockGraphicsAllocation mockAllocationSrc(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockAllocationDst(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockClearColor(reinterpret_cast<void *>(0x1234), sizeof(uint32_t)); properties.copySize = {0x10, 0x10, 0x1}; properties.srcAllocation = &mockAllocationSrc; properties.dstAllocation = &mockAllocationDst; properties.srcSlicePitch = 0x4000; properties.dstSlicePitch = 0x8000; properties.srcGpuAddress = mockAllocationSrc.getGpuAddress(); properties.dstGpuAddress = mockAllocationDst.getGpuAddress(); properties.clearColorAllocation = &mockClearColor; properties.bytesPerPixel = 1; bltCmd.setSourceTiling(XY_COPY_BLT::TILING::TILING_LINEAR); bltCmd.setDestinationTiling(XY_COPY_BLT::TILING::TILING_LINEAR); uint32_t index = 1; auto srcSlicePitch = static_cast<uint32_t>(properties.srcSlicePitch); auto dstSlicePitch = static_cast<uint32_t>(properties.dstSlicePitch); BlitCommandsHelper<FamilyType>::appendSliceOffsets(properties, bltCmd, index, pDevice->getRootDeviceEnvironment(), srcSlicePitch, dstSlicePitch); EXPECT_EQ(bltCmd.getDestinationBaseAddress(), ptrOffset(mockAllocationDst.getGpuAddress(), dstSlicePitch)); EXPECT_EQ(bltCmd.getSourceBaseAddress(), ptrOffset(mockAllocationSrc.getGpuAddress(), srcSlicePitch)); } HWTEST2_F(BlitTests, givenTiledResorcesWhenAppendSliceOffsetsThenIndexsAreSet, IsXeHpCore) { using XY_COPY_BLT = typename FamilyType::XY_COPY_BLT; auto bltCmd = FamilyType::cmdInitXyCopyBlt; BlitProperties properties = {}; MockGraphicsAllocation mockAllocationSrc(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); MockGraphicsAllocation mockAllocationDst(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, sizeof(uint32_t), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); bltCmd.setSourceTiling(XY_COPY_BLT::TILING::TILING_TILE64); bltCmd.setDestinationTiling(XY_COPY_BLT::TILING::TILING_TILE64); uint32_t index = 1; auto srcSlicePitch = static_cast<uint32_t>(properties.srcSlicePitch); auto dstSlicePitch = static_cast<uint32_t>(properties.dstSlicePitch); BlitCommandsHelper<FamilyType>::appendSliceOffsets(properties, bltCmd, index, pDevice->getRootDeviceEnvironment(), srcSlicePitch, dstSlicePitch); EXPECT_EQ(bltCmd.getDestinationArrayIndex(), index + 1); EXPECT_EQ(bltCmd.getSourceArrayIndex(), index + 1); } HWTEST2_F(BlitTests, givenMemorySizeTwiceBiggerThanMaxWidthWhenFillPatternWithBlitThenHeightIsTwo, IsXeHpCore) { using XY_COLOR_BLT = typename FamilyType::XY_COLOR_BLT; using COLOR_DEPTH = typename XY_COLOR_BLT::COLOR_DEPTH; uint32_t pattern[4] = {1, 0, 0, 0}; uint32_t streamBuffer[100] = {}; LinearStream stream(streamBuffer, sizeof(streamBuffer)); MockGraphicsAllocation mockAllocation(0, GraphicsAllocation::AllocationType::INTERNAL_HOST_MEMORY, reinterpret_cast<void *>(0x1234), 0x1000, 0, (2 * BlitterConstants::maxBlitWidth * sizeof(uint32_t)), MemoryPool::System4KBPages, MemoryManager::maxOsContextCount); BlitCommandsHelper<FamilyType>::dispatchBlitMemoryColorFill(&mockAllocation, 0, pattern, sizeof(uint32_t), stream, mockAllocation.getUnderlyingBufferSize(), *pDevice->getExecutionEnvironment()->rootDeviceEnvironments[pDevice->getRootDeviceIndex()]); GenCmdList cmdList; ASSERT_TRUE(FamilyType::PARSE::parseCommandBuffer( cmdList, ptrOffset(stream.getCpuBase(), 0), stream.getUsed())); auto itor = find<XY_COLOR_BLT *>(cmdList.begin(), cmdList.end()); EXPECT_NE(cmdList.end(), itor); { auto cmd = genCmdCast<XY_COLOR_BLT *>(*itor); EXPECT_EQ(cmd->getDestinationSurfaceType(), XY_COLOR_BLT::DESTINATION_SURFACE_TYPE::DESTINATION_SURFACE_TYPE_2D); } } using IsXeHPOrAbove = IsAtLeastProduct<IGFX_XE_HP_SDV>; HWTEST2_F(BlitTests, givenEnabledGlobalCacheInvalidationWhenProgrammingGlobalSequencerFlushThenCommandsAreProgrammed, IsXeHPOrAbove) { using MI_LOAD_REGISTER_IMM = typename FamilyType::MI_LOAD_REGISTER_IMM; using MI_SEMAPHORE_WAIT = typename FamilyType::MI_SEMAPHORE_WAIT; DebugManagerStateRestore dbgRestore; DebugManager.flags.GlobalSequencerFlushOnCopyEngine.set(true); uint32_t streamBuffer[100] = {}; LinearStream stream(streamBuffer, sizeof(streamBuffer)); size_t expectedSize = sizeof(MI_LOAD_REGISTER_IMM) + sizeof(MI_SEMAPHORE_WAIT); auto val = BlitCommandsHelper<FamilyType>::getSizeForGlobalSequencerFlush(); EXPECT_EQ(expectedSize, val); BlitCommandsHelper<FamilyType>::programGlobalSequencerFlush(stream); EXPECT_EQ(expectedSize, stream.getUsed()); auto lriCmd = reinterpret_cast<MI_LOAD_REGISTER_IMM *>(streamBuffer); EXPECT_EQ(0xB404u, lriCmd->getRegisterOffset()); EXPECT_EQ(1u, lriCmd->getDataDword()); auto semaphoreCmd = reinterpret_cast<MI_SEMAPHORE_WAIT *>(++lriCmd); EXPECT_EQ(MI_SEMAPHORE_WAIT::REGISTER_POLL_MODE::REGISTER_POLL_MODE_REGISTER_POLL, semaphoreCmd->getRegisterPollMode()); EXPECT_EQ(MI_SEMAPHORE_WAIT::COMPARE_OPERATION::COMPARE_OPERATION_SAD_EQUAL_SDD, semaphoreCmd->getCompareOperation()); EXPECT_EQ(0xB404u, semaphoreCmd->getSemaphoreGraphicsAddress()); EXPECT_EQ(0u, semaphoreCmd->getSemaphoreDataDword()); } HWTEST2_F(BlitTests, givenDisabledGlobalCacheInvalidationWhenProgrammingGlobalSequencerFlushThenCommandsAreProgrammed, IsXeHPOrAbove) { DebugManagerStateRestore dbgRestore; DebugManager.flags.GlobalSequencerFlushOnCopyEngine.set(false); uint32_t streamBuffer[100] = {}; LinearStream stream(streamBuffer, sizeof(streamBuffer)); size_t expectedSize = 0u; auto val = BlitCommandsHelper<FamilyType>::getSizeForGlobalSequencerFlush(); EXPECT_EQ(expectedSize, val); BlitCommandsHelper<FamilyType>::programGlobalSequencerFlush(stream); EXPECT_EQ(0u, stream.getUsed()); } HWTEST2_F(BlitTests, givenBcsCommandsHelperWhenMiArbCheckWaRequiredThenReturnTrue, IsXeHPOrAbove) { EXPECT_TRUE(BlitCommandsHelper<FamilyType>::miArbCheckWaRequired()); }
#include <qqmlengine.h> #include <qqmlcontext.h> #include <qqml.h> #include <QGuiApplication> #include <QtQuick/qquickitem.h> #include <QtQuick/qquickview.h> /* This example illustrates exposing a Database Connections as a model in QML */ int main(int argc, char ** argv) { QGuiApplication app(argc, argv); QQuickView view; view.setSource(QUrl("qrc:movingBox/movingBox.qml")); view.show(); return app.exec(); }
#include <wx/filedlg.h> #include <wx/utils.h> #include <wx/wfstream.h> #include <wx/datetime.h> #include <fstream> #include <iostream> #include "futils.h" #include "ConnectionFrame.h" #include "MyFrame.h" #include "cApp.h" bool contentSaved = true; void MyFrame::OnExit(wxCommandEvent& event) { // Closes the application Close( true ); } void MyFrame::OnAbout(wxCommandEvent& event) { // NGL I hope it is! wxMessageBox( "This is the start of something BIG!", "About CppCall", wxOK | wxICON_INFORMATION ); } void MyFrame::OnHello(wxCommandEvent& event) { // wxLogMessage("Hello world from wxWidgets!"); } void MyFrame::ChatSettings(wxCommandEvent& event) { // LMAO there are no settings boi! wxLogMessage("Hello this is the chat settings lmao you just got pranked noob. There are no settings because I dont care! HAHA"); } void MyFrame::OnOpen(wxCommandEvent& event) { if (contentSaved != true) { if (wxMessageBox(_("Current setup is not saved! Are you sure that you want to proceed?"), _("Please Confirm"), wxICON_QUESTION | wxYES_NO, this) == wxNO ) { return; } } wxFileDialog openFileDialog(this, ("Open a configuration file!"), "", "", "Zach Files (*.zach)|.zach", wxFD_OPEN|wxFD_FILE_MUST_EXIST); if (openFileDialog.ShowModal() == wxID_CANCEL) return; // the user changed idea... // proceed loading the file chosen by the user; // this can be done with e.g. wxWidgets input streams: wxFileInputStream input_stream(openFileDialog.GetPath()); if (!input_stream.IsOk()) { wxLogError("Cannot open file '%s'.", openFileDialog.GetPath()); return; } } void MyFrame::OnSave(wxCommandEvent& WXUNUSED(event)) { wxFileDialog saveFileDialog(this, _("Save as a zach file"), "", "", "ZACH files (*.zach)|*.zach", wxFD_SAVE|wxFD_OVERWRITE_PROMPT); if (saveFileDialog.ShowModal() == wxID_CANCEL) { contentSaved = false; return; } // save the current contents in the file; // this can be done with e.g. wxWidgets output streams: wxFileOutputStream output_stream(saveFileDialog.GetPath()); contentSaved = true; if (!output_stream.IsOk()) { wxLogError("Cannot save current contents in file '%s'.", saveFileDialog.GetPath()); contentSaved = false; return; } } void MyFrame::OpenDocs(wxCommandEvent& event) { // Opens up my website for now, one day it will open the doxygen docs. wxLaunchDefaultBrowser("https:/zmht25.ddns.net"); } void MyFrame::OnSend(wxCommandEvent& event) { futils::AppendToDateFile(ToSend->GetValue() + std::string("\n")); // Adds the message from the text field to the file MyFrame::Messages->AppendString(ToSend->GetValue( )); // Clears the text field ToSend->ChangeValue( "" ); } void MyFrame::ClearChat(wxCommandEvent& event) { futils::ClearDateFile(); Messages->Clear(); } void MyFrame::OverwriteChat(wxCommandEvent& event) { futils::OverwriteDateFile(); } void ConnectionFrame::OnConnect(wxCommandEvent& event) { futils::InitDateFile(NameField->GetValue()); ConnectionFrame::Close(); // TODO: Rename smaller frame to something that makes sense. }
#include "test/integration/server.h" #include <memory> #include <string> #include "envoy/http/header_map.h" #include "common/common/thread.h" #include "common/filesystem/filesystem_impl.h" #include "common/local_info/local_info_impl.h" #include "common/network/utility.h" #include "common/stats/thread_local_store.h" #include "common/thread_local/thread_local_impl.h" #include "server/hot_restart_nop_impl.h" #include "server/options_impl.h" #include "test/integration/integration.h" #include "test/integration/utility.h" #include "test/mocks/runtime/mocks.h" #include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" #include "absl/strings/str_replace.h" #include "gtest/gtest.h" namespace Envoy { namespace Server { OptionsImpl createTestOptionsImpl(const std::string& config_path, const std::string& config_yaml, Network::Address::IpVersion ip_version) { OptionsImpl test_options("cluster_name", "node_name", "zone_name", spdlog::level::info); test_options.setConfigPath(config_path); test_options.setConfigYaml(config_yaml); test_options.setLocalAddressIpVersion(ip_version); test_options.setFileFlushIntervalMsec(std::chrono::milliseconds(50)); test_options.setDrainTime(std::chrono::seconds(1)); test_options.setParentShutdownTime(std::chrono::seconds(2)); test_options.setMaxStats(16384u); return test_options; } } // namespace Server IntegrationTestServerPtr IntegrationTestServer::create( const std::string& config_path, const Network::Address::IpVersion version, std::function<void()> pre_worker_start_test_steps, bool deterministic, Event::TestTimeSystem& time_system, Api::Api& api, bool defer_listener_finalization) { IntegrationTestServerPtr server{ std::make_unique<IntegrationTestServerImpl>(time_system, api, config_path)}; server->start(version, pre_worker_start_test_steps, deterministic, defer_listener_finalization); return server; } void IntegrationTestServer::waitUntilListenersReady() { Thread::LockGuard guard(listeners_mutex_); while (pending_listeners_ != 0) { // If your test is hanging forever here, you may need to create your listener manually, // after BaseIntegrationTest::initialize() is done. See cds_integration_test.cc for an example. listeners_cv_.wait(listeners_mutex_); // Safe since CondVar::wait won't throw. } ENVOY_LOG(info, "listener wait complete"); } void IntegrationTestServer::start(const Network::Address::IpVersion version, std::function<void()> pre_worker_start_test_steps, bool deterministic, bool defer_listener_finalization) { ENVOY_LOG(info, "starting integration test server"); ASSERT(!thread_); thread_ = api_.threadFactory().createThread( [version, deterministic, this]() -> void { threadRoutine(version, deterministic); }); // If any steps need to be done prior to workers starting, do them now. E.g., xDS pre-init. if (pre_worker_start_test_steps != nullptr) { pre_worker_start_test_steps(); } // Wait for the server to be created and the number of initial listeners to wait for to be set. server_set_.waitReady(); if (!defer_listener_finalization) { // Now wait for the initial listeners (if any) to actually be listening on the worker. // At this point the server is up and ready for testing. waitUntilListenersReady(); } // If we are tapping, spin up tcpdump. const auto tap_path = TestEnvironment::getOptionalEnvVar("TAP_PATH"); if (tap_path) { std::vector<uint32_t> ports; for (auto listener : server().listenerManager().listeners()) { const auto listen_addr = listener.get().socket().localAddress(); if (listen_addr->type() == Network::Address::Type::Ip) { ports.push_back(listen_addr->ip()->port()); } } // TODO(htuch): Support a different loopback interface as needed. const ::testing::TestInfo* const test_info = ::testing::UnitTest::GetInstance()->current_test_info(); const std::string test_id = std::string(test_info->name()) + "_" + std::string(test_info->test_case_name()); const std::string pcap_path = tap_path.value() + "_" + absl::StrReplaceAll(test_id, {{"/", "_"}}) + "_server.pcap"; tcp_dump_ = std::make_unique<TcpDump>(pcap_path, "lo", ports); } } IntegrationTestServer::~IntegrationTestServer() { // Derived class must have shutdown server. thread_->join(); } void IntegrationTestServer::onWorkerListenerAdded() { if (on_worker_listener_added_cb_) { on_worker_listener_added_cb_(); } Thread::LockGuard guard(listeners_mutex_); if (pending_listeners_ > 0) { pending_listeners_--; listeners_cv_.notifyOne(); } } void IntegrationTestServer::onWorkerListenerRemoved() { if (on_worker_listener_removed_cb_) { on_worker_listener_removed_cb_(); } } void IntegrationTestServer::serverReady() { pending_listeners_ = server().listenerManager().listeners().size(); server_set_.setReady(); } void IntegrationTestServer::threadRoutine(const Network::Address::IpVersion version, bool deterministic) { OptionsImpl options(Server::createTestOptionsImpl(config_path_, "", version)); Thread::MutexBasicLockable lock; Runtime::RandomGeneratorPtr random_generator; if (deterministic) { random_generator = std::make_unique<testing::NiceMock<Runtime::MockRandomGenerator>>(); } else { random_generator = std::make_unique<Runtime::RandomGeneratorImpl>(); } createAndRunEnvoyServer(options, time_system_, Network::Utility::getLocalAddress(version), *this, lock, *this, std::move(random_generator)); } void IntegrationTestServerImpl::createAndRunEnvoyServer( OptionsImpl& options, Event::TimeSystem& time_system, Network::Address::InstanceConstSharedPtr local_address, TestHooks& hooks, Thread::BasicLockable& access_log_lock, Server::ComponentFactory& component_factory, Runtime::RandomGeneratorPtr&& random_generator) { Server::HotRestartNopImpl restarter; ThreadLocal::InstanceImpl tls; Stats::HeapStatDataAllocator stats_allocator; Stats::ThreadLocalStoreImpl stat_store(options.statsOptions(), stats_allocator); Server::InstanceImpl server(options, time_system, local_address, hooks, restarter, stat_store, access_log_lock, component_factory, std::move(random_generator), tls, Thread::threadFactoryForTest()); // This is technically thread unsafe (assigning to a shared_ptr accessed // across threads), but because we synchronize below through serverReady(), the only // consumer on the main test thread in ~IntegrationTestServerImpl will not race. admin_address_ = server.admin().socket().localAddress(); server_ = &server; stat_store_ = &stat_store; serverReady(); server.run(); } IntegrationTestServerImpl::~IntegrationTestServerImpl() { ENVOY_LOG(info, "stopping integration test server"); Network::Address::InstanceConstSharedPtr admin_address(admin_address_); admin_address_ = nullptr; server_ = nullptr; stat_store_ = nullptr; if (admin_address != nullptr) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( admin_address, "POST", "/quitquitquit", "", Http::CodecClient::Type::HTTP1); EXPECT_TRUE(response->complete()); EXPECT_STREQ("200", response->headers().Status()->value().c_str()); } } } // namespace Envoy
//===- lib/Support/ErrorHandling.cpp - Callbacks for errors ---------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines an API used to indicate fatal error conditions. Non-fatal // errors (most of them) should be handled through LLVMContext. // //===----------------------------------------------------------------------===// #include "llvm/Support/ErrorHandling.h" #include "llvm-c/ErrorHandling.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Twine.h" #include "llvm/Config/config.h" #include "llvm/Support/Debug.h" #include "llvm/Support/Errc.h" #include "llvm/Support/Error.h" #include "llvm/Support/Process.h" #include "llvm/Support/Signals.h" #include "llvm/Support/Threading.h" #include "llvm/Support/WindowsError.h" #include "llvm/Support/raw_ostream.h" #include <cassert> #include <cstdlib> #include <mutex> #include <new> #if defined(HAVE_UNISTD_H) # include <unistd.h> #endif #if defined(_MSC_VER) # include <io.h> # include <fcntl.h> #endif using namespace llvm; static fatal_error_handler_t ErrorHandler = nullptr; static void *ErrorHandlerUserData = nullptr; static fatal_error_handler_t BadAllocErrorHandler = nullptr; static void *BadAllocErrorHandlerUserData = nullptr; #if LLVM_ENABLE_THREADS == 1 // Mutexes to synchronize installing error handlers and calling error handlers. // Do not use ManagedStatic, or that may allocate memory while attempting to // report an OOM. // // This usage of std::mutex has to be conditionalized behind ifdefs because // of this script: // compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh // That script attempts to statically link the LLVM symbolizer library with the // STL and hide all of its symbols with 'opt -internalize'. To reduce size, it // cuts out the threading portions of the hermetic copy of libc++ that it // builds. We can remove these ifdefs if that script goes away. static std::mutex ErrorHandlerMutex; static std::mutex BadAllocErrorHandlerMutex; #endif void llvm::install_fatal_error_handler(fatal_error_handler_t handler, void *user_data) { #if LLVM_ENABLE_THREADS == 1 std::lock_guard<std::mutex> Lock(ErrorHandlerMutex); #endif assert(!ErrorHandler && "Error handler already registered!\n"); ErrorHandler = handler; ErrorHandlerUserData = user_data; } void llvm::remove_fatal_error_handler() { #if LLVM_ENABLE_THREADS == 1 std::lock_guard<std::mutex> Lock(ErrorHandlerMutex); #endif ErrorHandler = nullptr; ErrorHandlerUserData = nullptr; } void llvm::report_fatal_error(const char *Reason, bool GenCrashDiag) { report_fatal_error(Twine(Reason), GenCrashDiag); } void llvm::report_fatal_error(const std::string &Reason, bool GenCrashDiag) { report_fatal_error(Twine(Reason), GenCrashDiag); } void llvm::report_fatal_error(StringRef Reason, bool GenCrashDiag) { report_fatal_error(Twine(Reason), GenCrashDiag); } void llvm::report_fatal_error(const Twine &Reason, bool GenCrashDiag) { llvm::fatal_error_handler_t handler = nullptr; void* handlerData = nullptr; { // Only acquire the mutex while reading the handler, so as not to invoke a // user-supplied callback under a lock. #if LLVM_ENABLE_THREADS == 1 std::lock_guard<std::mutex> Lock(ErrorHandlerMutex); #endif handler = ErrorHandler; handlerData = ErrorHandlerUserData; } if (handler) { handler(handlerData, Reason.str(), GenCrashDiag); } else { // Blast the result out to stderr. We don't try hard to make sure this // succeeds (e.g. handling EINTR) and we can't use errs() here because // raw ostreams can call report_fatal_error. SmallVector<char, 64> Buffer; raw_svector_ostream OS(Buffer); OS << "LLVM ERROR: " << Reason << "\n"; StringRef MessageStr = OS.str(); ssize_t written = ::write(2, MessageStr.data(), MessageStr.size()); (void)written; // If something went wrong, we deliberately just give up. } // If we reached here, we are failing ungracefully. Run the interrupt handlers // to make sure any special cleanups get done, in particular that we remove // files registered with RemoveFileOnSignal. sys::RunInterruptHandlers(); abort(); } void llvm::install_bad_alloc_error_handler(fatal_error_handler_t handler, void *user_data) { #if LLVM_ENABLE_THREADS == 1 std::lock_guard<std::mutex> Lock(BadAllocErrorHandlerMutex); #endif assert(!ErrorHandler && "Bad alloc error handler already registered!\n"); BadAllocErrorHandler = handler; BadAllocErrorHandlerUserData = user_data; } void llvm::remove_bad_alloc_error_handler() { #if LLVM_ENABLE_THREADS == 1 std::lock_guard<std::mutex> Lock(BadAllocErrorHandlerMutex); #endif BadAllocErrorHandler = nullptr; BadAllocErrorHandlerUserData = nullptr; } void llvm::report_bad_alloc_error(const char *Reason, bool GenCrashDiag) { fatal_error_handler_t Handler = nullptr; void *HandlerData = nullptr; { // Only acquire the mutex while reading the handler, so as not to invoke a // user-supplied callback under a lock. #if LLVM_ENABLE_THREADS == 1 std::lock_guard<std::mutex> Lock(BadAllocErrorHandlerMutex); #endif Handler = BadAllocErrorHandler; HandlerData = BadAllocErrorHandlerUserData; } if (Handler) { Handler(HandlerData, Reason, GenCrashDiag); llvm_unreachable("bad alloc handler should not return"); } #ifdef LLVM_ENABLE_EXCEPTIONS // If exceptions are enabled, make OOM in malloc look like OOM in new. throw std::bad_alloc(); #else // Don't call the normal error handler. It may allocate memory. Directly write // an OOM to stderr and abort. char OOMMessage[] = "LLVM ERROR: out of memory\n"; ssize_t written = ::write(2, OOMMessage, strlen(OOMMessage)); (void)written; abort(); #endif } #ifdef LLVM_ENABLE_EXCEPTIONS // Do not set custom new handler if exceptions are enabled. In this case OOM // errors are handled by throwing 'std::bad_alloc'. void llvm::install_out_of_memory_new_handler() { } #else // Causes crash on allocation failure. It is called prior to the handler set by // 'install_bad_alloc_error_handler'. static void out_of_memory_new_handler() { llvm::report_bad_alloc_error("Allocation failed"); } // Installs new handler that causes crash on allocation failure. It is called by // InitLLVM. void llvm::install_out_of_memory_new_handler() { std::new_handler old = std::set_new_handler(out_of_memory_new_handler); (void)old; assert(old == nullptr && "new-handler already installed"); } #endif void llvm::llvm_unreachable_internal(const char *msg, const char *file, unsigned line) { // This code intentionally doesn't call the ErrorHandler callback, because // llvm_unreachable is intended to be used to indicate "impossible" // situations, and not legitimate runtime errors. if (msg) dbgs() << msg << "\n"; dbgs() << "UNREACHABLE executed"; if (file) dbgs() << " at " << file << ":" << line; dbgs() << "!\n"; abort(); #ifdef LLVM_BUILTIN_UNREACHABLE // Windows systems and possibly others don't declare abort() to be noreturn, // so use the unreachable builtin to avoid a Clang self-host warning. LLVM_BUILTIN_UNREACHABLE; #endif } static void bindingsErrorHandler(void *user_data, const std::string& reason, bool gen_crash_diag) { LLVMFatalErrorHandler handler = LLVM_EXTENSION reinterpret_cast<LLVMFatalErrorHandler>(user_data); handler(reason.c_str()); } void LLVMInstallFatalErrorHandler(LLVMFatalErrorHandler Handler) { install_fatal_error_handler(bindingsErrorHandler, LLVM_EXTENSION reinterpret_cast<void *>(Handler)); } void LLVMResetFatalErrorHandler() { remove_fatal_error_handler(); } #ifdef _WIN32 #include <winerror.h> // I'd rather not double the line count of the following. #define MAP_ERR_TO_COND(x, y) \ case x: \ return make_error_code(errc::y) std::error_code llvm::mapWindowsError(unsigned EV) { switch (EV) { MAP_ERR_TO_COND(ERROR_ACCESS_DENIED, permission_denied); MAP_ERR_TO_COND(ERROR_ALREADY_EXISTS, file_exists); MAP_ERR_TO_COND(ERROR_BAD_UNIT, no_such_device); MAP_ERR_TO_COND(ERROR_BUFFER_OVERFLOW, filename_too_long); MAP_ERR_TO_COND(ERROR_BUSY, device_or_resource_busy); MAP_ERR_TO_COND(ERROR_BUSY_DRIVE, device_or_resource_busy); MAP_ERR_TO_COND(ERROR_CANNOT_MAKE, permission_denied); MAP_ERR_TO_COND(ERROR_CANTOPEN, io_error); MAP_ERR_TO_COND(ERROR_CANTREAD, io_error); MAP_ERR_TO_COND(ERROR_CANTWRITE, io_error); MAP_ERR_TO_COND(ERROR_CURRENT_DIRECTORY, permission_denied); MAP_ERR_TO_COND(ERROR_DEV_NOT_EXIST, no_such_device); MAP_ERR_TO_COND(ERROR_DEVICE_IN_USE, device_or_resource_busy); MAP_ERR_TO_COND(ERROR_DIR_NOT_EMPTY, directory_not_empty); MAP_ERR_TO_COND(ERROR_DIRECTORY, invalid_argument); MAP_ERR_TO_COND(ERROR_DISK_FULL, no_space_on_device); MAP_ERR_TO_COND(ERROR_FILE_EXISTS, file_exists); MAP_ERR_TO_COND(ERROR_FILE_NOT_FOUND, no_such_file_or_directory); MAP_ERR_TO_COND(ERROR_HANDLE_DISK_FULL, no_space_on_device); MAP_ERR_TO_COND(ERROR_INVALID_ACCESS, permission_denied); MAP_ERR_TO_COND(ERROR_INVALID_DRIVE, no_such_device); MAP_ERR_TO_COND(ERROR_INVALID_FUNCTION, function_not_supported); MAP_ERR_TO_COND(ERROR_INVALID_HANDLE, invalid_argument); MAP_ERR_TO_COND(ERROR_INVALID_NAME, invalid_argument); MAP_ERR_TO_COND(ERROR_LOCK_VIOLATION, no_lock_available); MAP_ERR_TO_COND(ERROR_LOCKED, no_lock_available); MAP_ERR_TO_COND(ERROR_NEGATIVE_SEEK, invalid_argument); MAP_ERR_TO_COND(ERROR_NOACCESS, permission_denied); MAP_ERR_TO_COND(ERROR_NOT_ENOUGH_MEMORY, not_enough_memory); MAP_ERR_TO_COND(ERROR_NOT_READY, resource_unavailable_try_again); MAP_ERR_TO_COND(ERROR_OPEN_FAILED, io_error); MAP_ERR_TO_COND(ERROR_OPEN_FILES, device_or_resource_busy); MAP_ERR_TO_COND(ERROR_OUTOFMEMORY, not_enough_memory); MAP_ERR_TO_COND(ERROR_PATH_NOT_FOUND, no_such_file_or_directory); MAP_ERR_TO_COND(ERROR_BAD_NETPATH, no_such_file_or_directory); MAP_ERR_TO_COND(ERROR_READ_FAULT, io_error); MAP_ERR_TO_COND(ERROR_RETRY, resource_unavailable_try_again); MAP_ERR_TO_COND(ERROR_SEEK, io_error); MAP_ERR_TO_COND(ERROR_SHARING_VIOLATION, permission_denied); MAP_ERR_TO_COND(ERROR_TOO_MANY_OPEN_FILES, too_many_files_open); MAP_ERR_TO_COND(ERROR_WRITE_FAULT, io_error); MAP_ERR_TO_COND(ERROR_WRITE_PROTECT, permission_denied); MAP_ERR_TO_COND(WSAEACCES, permission_denied); MAP_ERR_TO_COND(WSAEBADF, bad_file_descriptor); MAP_ERR_TO_COND(WSAEFAULT, bad_address); MAP_ERR_TO_COND(WSAEINTR, interrupted); MAP_ERR_TO_COND(WSAEINVAL, invalid_argument); MAP_ERR_TO_COND(WSAEMFILE, too_many_files_open); MAP_ERR_TO_COND(WSAENAMETOOLONG, filename_too_long); default: return std::error_code(EV, std::system_category()); } } #endif
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include <aws/common/common.h> #include <aws/core/utils/event/EventHeader.h> #include <aws/core/utils/event/EventMessage.h> #include <aws/core/utils/event/EventStreamDecoder.h> #include <aws/core/utils/logging/LogMacros.h> #include <aws/core/utils/UnreferencedParam.h> #include <aws/core/utils/memory/AWSMemory.h> namespace Aws { namespace Utils { namespace Event { static const char EVENT_STREAM_DECODER_CLASS_TAG[] = "Aws::Utils::Event::EventStreamDecoder"; EventStreamDecoder::EventStreamDecoder(EventStreamHandler* handler) : m_eventStreamHandler(handler) { aws_event_stream_streaming_decoder_init(&m_decoder, get_aws_allocator(), onPayloadSegment, onPreludeReceived, onHeaderReceived, onError, (void*)handler); } EventStreamDecoder::~EventStreamDecoder() { aws_event_stream_streaming_decoder_clean_up(&m_decoder); } void EventStreamDecoder::Pump(const ByteBuffer& data) { Pump(data, data.GetLength()); } void EventStreamDecoder::Pump(const ByteBuffer& data, size_t length) { aws_byte_buf dataBuf = aws_byte_buf_from_array(static_cast<uint8_t*>(data.GetUnderlyingData()), length); aws_event_stream_streaming_decoder_pump(&m_decoder, &dataBuf); } void EventStreamDecoder::Reset() { m_eventStreamHandler->Reset(); } void EventStreamDecoder::ResetEventStreamHandler(EventStreamHandler* handler) { aws_event_stream_streaming_decoder_init(&m_decoder, get_aws_allocator(), onPayloadSegment, onPreludeReceived, onHeaderReceived, onError, reinterpret_cast<void *>(handler)); } void EventStreamDecoder::onPayloadSegment( aws_event_stream_streaming_decoder* decoder, aws_byte_buf* payload, int8_t isFinalSegment, void* context) { AWS_UNREFERENCED_PARAM(decoder); auto handler = static_cast<EventStreamHandler*>(context); assert(handler); if (!handler) { AWS_LOGSTREAM_ERROR(EVENT_STREAM_DECODER_CLASS_TAG, "Payload received, but decoder encountered internal errors before." "ErrorCode: " << EventStreamErrorsMapper::GetNameForError(handler->GetInternalError()) << ", " "ErrorMessage: " << handler->GetEventPayloadAsString()); return; } handler->WriteMessageEventPayload(static_cast<unsigned char*>(payload->buffer), payload->len); // Complete payload received if (isFinalSegment == 1) { assert(handler->IsMessageCompleted()); handler->OnEvent(); handler->Reset(); } } void EventStreamDecoder::onPreludeReceived( aws_event_stream_streaming_decoder* decoder, aws_event_stream_message_prelude* prelude, void* context) { AWS_UNREFERENCED_PARAM(decoder); auto handler = static_cast<EventStreamHandler*>(context); handler->Reset(); //Encounter internal error in prelude received. //This error will be handled by OnError callback function later. if (prelude->total_len < prelude->headers_len + 16) { return; } handler->SetMessageMetadata(prelude->total_len, prelude->headers_len, prelude->total_len - prelude->headers_len - 4/*total byte-length*/ - 4/*headers byte-length*/ - 4/*prelude crc*/ - 4/*message crc*/); AWS_LOGSTREAM_TRACE(EVENT_STREAM_DECODER_CLASS_TAG, "Message received, the expected length of the message is: " << prelude->total_len << " bytes, and the expected length of the header is: " << prelude->headers_len << " bytes"); //Handle empty message //if (handler->m_message.GetHeadersLength() == 0 && handler->m_message.GetPayloadLength() == 0) if (handler->IsMessageCompleted()) { handler->OnEvent(); handler->Reset(); } } void EventStreamDecoder::onHeaderReceived( aws_event_stream_streaming_decoder* decoder, aws_event_stream_message_prelude* prelude, aws_event_stream_header_value_pair* header, void* context) { AWS_UNREFERENCED_PARAM(decoder); AWS_UNREFERENCED_PARAM(prelude); auto handler = static_cast<EventStreamHandler*>(context); assert(handler); if (!handler) { AWS_LOGSTREAM_ERROR(EVENT_STREAM_DECODER_CLASS_TAG, "Payload received, but decoder encountered internal errors before." "ErrorCode: " << EventStreamErrorsMapper::GetNameForError(handler->GetInternalError()) << ", " "ErrorMessage: " << handler->GetEventPayloadAsString()); return; } // The length of a header = 1 byte (to represent the length of header name) + length of header name + 1 byte (to represent header type) // + 2 bytes (to represent length of header value) + length of header value handler->InsertMessageEventHeader(Aws::String(header->header_name, header->header_name_len), 1 + header->header_name_len + 1 + 2 + header->header_value_len, EventHeaderValue(header)); // Handle messages only have headers, but without payload. //if (handler->m_message.GetHeadersLength() == handler->m_headersBytesReceived() && handler->m_message.GetPayloadLength() == 0) if (handler->IsMessageCompleted()) { handler->OnEvent(); handler->Reset(); } } void EventStreamDecoder::onError( aws_event_stream_streaming_decoder* decoder, aws_event_stream_message_prelude* prelude, int error_code, const char* message, void* context) { AWS_UNREFERENCED_PARAM(decoder); AWS_UNREFERENCED_PARAM(prelude); auto handler = static_cast<EventStreamHandler*>(context); handler->SetFailure(); handler->SetInternalError(error_code); handler->WriteMessageEventPayload(reinterpret_cast<const unsigned char*>(message), strlen(message)); handler->OnEvent(); } } // namespace Event } // namespace Utils } // namespace Aws
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/devtools/device/usb/android_usb_socket.h" #include <stddef.h> #include "base/callback_helpers.h" #include "base/logging.h" #include "base/message_loop/message_loop.h" #include "net/base/io_buffer.h" #include "net/base/ip_address.h" #include "net/base/net_errors.h" namespace { const int kMaxPayload = 4096; } // namespace AndroidUsbSocket::AndroidUsbSocket(scoped_refptr<AndroidUsbDevice> device, uint32_t socket_id, const std::string& command, base::Closure delete_callback) : device_(device), command_(command), local_id_(socket_id), remote_id_(0), is_connected_(false), delete_callback_(delete_callback), weak_factory_(this) {} AndroidUsbSocket::~AndroidUsbSocket() { DCHECK(CalledOnValidThread()); if (is_connected_) Disconnect(); if (!delete_callback_.is_null()) delete_callback_.Run(); } void AndroidUsbSocket::HandleIncoming(std::unique_ptr<AdbMessage> message) { if (!device_.get()) return; CHECK_EQ(message->arg1, local_id_); switch (message->command) { case AdbMessage::kCommandOKAY: if (!is_connected_) { remote_id_ = message->arg0; is_connected_ = true; if (!connect_callback_.is_null()) base::ResetAndReturn(&connect_callback_).Run(net::OK); // "this" can be deleted. } else { RespondToWriter(write_length_); // "this" can be deleted. } break; case AdbMessage::kCommandWRTE: device_->Send(AdbMessage::kCommandOKAY, local_id_, message->arg0, ""); read_buffer_ += message->body; // Allow WRTE over new connection even though OKAY ack was not received. if (!is_connected_) { remote_id_ = message->arg0; is_connected_ = true; if (!connect_callback_.is_null()) base::ResetAndReturn(&connect_callback_).Run(net::OK); // "this" can be deleted. } else { RespondToReader(false); // "this" can be deleted. } break; case AdbMessage::kCommandCLSE: if (is_connected_) device_->Send(AdbMessage::kCommandCLSE, local_id_, 0, ""); Terminated(true); // "this" can be deleted. break; default: break; } } void AndroidUsbSocket::Terminated(bool closed_by_device) { is_connected_ = false; // Break the socket -> device connection, release the device. device_ = nullptr; base::ResetAndReturn(&delete_callback_).Run(); if (!closed_by_device) return; // Respond to pending callbacks. if (!connect_callback_.is_null()) { base::ResetAndReturn(&connect_callback_).Run(net::ERR_FAILED); // "this" can be deleted. return; } base::WeakPtr<AndroidUsbSocket> weak_this = weak_factory_.GetWeakPtr(); RespondToReader(true); // "this" can be deleted. if (weak_this) { RespondToWriter(net::ERR_FAILED); // "this" can be deleted. } } int AndroidUsbSocket::Read(net::IOBuffer* buffer, int length, const net::CompletionCallback& callback) { DCHECK(!callback.is_null()); if (!is_connected_) return device_.get() ? net::ERR_SOCKET_NOT_CONNECTED : 0; DCHECK(read_callback_.is_null()); if (read_buffer_.empty()) { read_callback_ = callback; read_io_buffer_ = buffer; read_length_ = length; return net::ERR_IO_PENDING; } size_t bytes_to_copy = static_cast<size_t>(length) > read_buffer_.length() ? read_buffer_.length() : static_cast<size_t>(length); memcpy(buffer->data(), read_buffer_.data(), bytes_to_copy); if (read_buffer_.length() > bytes_to_copy) read_buffer_ = read_buffer_.substr(bytes_to_copy); else read_buffer_ = std::string(); return bytes_to_copy; } int AndroidUsbSocket::Write(net::IOBuffer* buffer, int length, const net::CompletionCallback& callback) { DCHECK(!callback.is_null()); if (!is_connected_) return net::ERR_SOCKET_NOT_CONNECTED; if (length > kMaxPayload) length = kMaxPayload; DCHECK(write_callback_.is_null()); write_callback_ = callback; write_length_ = length; device_->Send(AdbMessage::kCommandWRTE, local_id_, remote_id_, std::string(buffer->data(), length)); return net::ERR_IO_PENDING; } int AndroidUsbSocket::SetReceiveBufferSize(int32_t size) { NOTIMPLEMENTED(); return net::ERR_NOT_IMPLEMENTED; } int AndroidUsbSocket::SetSendBufferSize(int32_t size) { NOTIMPLEMENTED(); return net::ERR_NOT_IMPLEMENTED; } int AndroidUsbSocket::Connect(const net::CompletionCallback& callback) { DCHECK(CalledOnValidThread()); DCHECK(!callback.is_null()); if (!device_.get()) return net::ERR_FAILED; DCHECK(!is_connected_); DCHECK(connect_callback_.is_null()); connect_callback_ = callback; device_->Send(AdbMessage::kCommandOPEN, local_id_, 0, command_); return net::ERR_IO_PENDING; } void AndroidUsbSocket::Disconnect() { if (!device_.get()) return; device_->Send(AdbMessage::kCommandCLSE, local_id_, remote_id_, ""); Terminated(false); } bool AndroidUsbSocket::IsConnected() const { DCHECK(CalledOnValidThread()); return is_connected_; } bool AndroidUsbSocket::IsConnectedAndIdle() const { NOTIMPLEMENTED(); return false; } int AndroidUsbSocket::GetPeerAddress(net::IPEndPoint* address) const { *address = net::IPEndPoint(net::IPAddress(0, 0, 0, 0), 0); return net::OK; } int AndroidUsbSocket::GetLocalAddress(net::IPEndPoint* address) const { NOTIMPLEMENTED(); return net::ERR_NOT_IMPLEMENTED; } const net::BoundNetLog& AndroidUsbSocket::NetLog() const { return net_log_; } void AndroidUsbSocket::SetSubresourceSpeculation() { NOTIMPLEMENTED(); } void AndroidUsbSocket::SetOmniboxSpeculation() { NOTIMPLEMENTED(); } bool AndroidUsbSocket::WasEverUsed() const { NOTIMPLEMENTED(); return true; } bool AndroidUsbSocket::WasNpnNegotiated() const { NOTIMPLEMENTED(); return true; } net::NextProto AndroidUsbSocket::GetNegotiatedProtocol() const { NOTIMPLEMENTED(); return net::kProtoUnknown; } bool AndroidUsbSocket::GetSSLInfo(net::SSLInfo* ssl_info) { return false; } void AndroidUsbSocket::GetConnectionAttempts( net::ConnectionAttempts* out) const { out->clear(); } int64_t AndroidUsbSocket::GetTotalReceivedBytes() const { NOTIMPLEMENTED(); return 0; } void AndroidUsbSocket::RespondToReader(bool disconnect) { if (read_callback_.is_null() || (read_buffer_.empty() && !disconnect)) return; size_t bytes_to_copy = static_cast<size_t>(read_length_) > read_buffer_.length() ? read_buffer_.length() : static_cast<size_t>(read_length_); memcpy(read_io_buffer_->data(), read_buffer_.data(), bytes_to_copy); if (read_buffer_.length() > bytes_to_copy) read_buffer_ = read_buffer_.substr(bytes_to_copy); else read_buffer_ = std::string(); base::ResetAndReturn(&read_callback_).Run(bytes_to_copy); } void AndroidUsbSocket::RespondToWriter(int result) { if (!write_callback_.is_null()) base::ResetAndReturn(&write_callback_).Run(result); }
// // ResizeExecution.cpp // MNN // // Created by MNN on 2019/02/28. // Copyright © 2018, Alibaba Group Holding Limited // #include "backend/opencl/execution/ResizeExecution.hpp" #include "core/Macro.h" #include "core/TensorUtils.hpp" namespace MNN { namespace OpenCL { ResizeExecution::ResizeExecution(const std::vector<Tensor *> &inputs, const MNN::Op *op, Backend *backend) : Execution(backend) { #ifdef LOG_VERBOSE MNN_PRINT("Start ResizeExecution init !\n"); #endif mOpenCLBackend = static_cast<OpenCLBackend *>(backend); const auto *scaleParams = op->main_as_Resize(); mXScale = scaleParams->xScale(); mYScale = scaleParams->yScale(); #ifdef LOG_VERBOSE MNN_PRINT("end ResizeExecution init !\n"); #endif } ErrorCode ResizeExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) { #ifdef LOG_VERBOSE MNN_PRINT("Start ResizeExecution onResize !\n"); #endif auto runtime = mOpenCLBackend->getOpenCLRuntime(); if (mKernel.get() == nullptr) { mKernel = runtime->buildKernel("interp", "interp", {}); mMaxWorkGroupSize = static_cast<uint32_t>(runtime->getMaxWorkGroupSize(mKernel)); } Tensor *input = inputs[0]; Tensor *output = outputs[0]; std::vector<int> inputShape = tensorShapeFormat(input); std::vector<int> outputShape = tensorShapeFormat(output); const float x_scaling_ = 1.0 / mXScale; const float y_scaling_ = 1.0 / mYScale; const int batch = outputShape.at(0); const int height = outputShape.at(1); const int width = outputShape.at(2); const int channels = outputShape.at(3); const int channelBlocks = UP_DIV(channels, 4); const int inputHeight = input->height(); const int inputWidth = input->width(); const std::vector<uint32_t> gws = {static_cast<uint32_t>(channelBlocks), static_cast<uint32_t>(width), static_cast<uint32_t>(height * batch)}; uint32_t idx = 0; mKernel.setArg(idx++, gws[0]); mKernel.setArg(idx++, gws[1]); mKernel.setArg(idx++, gws[2]); mKernel.setArg(idx++, openCLImage(input)); mKernel.setArg(idx++, openCLImage(output)); mKernel.setArg(idx++, y_scaling_); mKernel.setArg(idx++, x_scaling_); mKernel.setArg(idx++, static_cast<int32_t>(inputHeight)); mKernel.setArg(idx++, static_cast<int32_t>(inputWidth)); mKernel.setArg(idx++, static_cast<int32_t>(height)); std::string name = "Interp"; mLWS = localWS3DDefault(gws, mMaxWorkGroupSize, mOpenCLBackend->getOpenCLRuntime(), name, mKernel); for (size_t i = 0; i < mLWS.size(); ++i) { if (mLWS[i] != 0) { mGWS[i] = ROUND_UP(gws[i], std::max((uint32_t)1, mLWS[i])); } } #ifdef LOG_VERBOSE MNN_PRINT("end ResizeExecution onResize !\n"); #endif return NO_ERROR; } ErrorCode ResizeExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) { #ifdef LOG_VERBOSE MNN_PRINT("Start ResizeExecution onExecute !\n"); #endif auto runtime = mOpenCLBackend->getOpenCLRuntime(); #ifdef ENABLE_OPENCL_TIME_PROFILER cl::Event event; auto error = runtime->commandQueue().enqueueNDRangeKernel( mKernel, cl::NullRange, cl::NDRange(mGWS[0], mGWS[1], mGWS[2]), cl::NDRange(mLWS[0], mLWS[1], mLWS[2]), nullptr, &event); int costTime = (int)mOpenCLBackend->getOpenCLRuntime()->getCostTime(&event); MNN_PRINT("kernel cost:%d us Resize\n",costTime); #else auto error = runtime->commandQueue().enqueueNDRangeKernel( mKernel, cl::NullRange, cl::NDRange(mGWS[0], mGWS[1], mGWS[2]), cl::NDRange(mLWS[0], mLWS[1], mLWS[2]), nullptr, nullptr); #endif MNN_CHECK_CL_SUCCESS(error); #ifdef LOG_VERBOSE MNN_PRINT("end ResizeExecution onExecute !\n"); #endif return NO_ERROR; } OpenCLCreatorRegister<TypedCreator<ResizeExecution>> __resize_op(OpType_Resize); } // namespace OpenCL } // namespace MNN
/* * Copyright (c) 2017-2019 THL A29 Limited, a Tencent company. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tencentcloud/mps/v20190612/model/UserDefineFaceReviewTemplateInfoForUpdate.h> using TencentCloud::CoreInternalOutcome; using namespace TencentCloud::Mps::V20190612::Model; using namespace rapidjson; using namespace std; UserDefineFaceReviewTemplateInfoForUpdate::UserDefineFaceReviewTemplateInfoForUpdate() : m_switchHasBeenSet(false), m_labelSetHasBeenSet(false), m_blockConfidenceHasBeenSet(false), m_reviewConfidenceHasBeenSet(false) { } CoreInternalOutcome UserDefineFaceReviewTemplateInfoForUpdate::Deserialize(const Value &value) { string requestId = ""; if (value.HasMember("Switch") && !value["Switch"].IsNull()) { if (!value["Switch"].IsString()) { return CoreInternalOutcome(Error("response `UserDefineFaceReviewTemplateInfoForUpdate.Switch` IsString=false incorrectly").SetRequestId(requestId)); } m_switch = string(value["Switch"].GetString()); m_switchHasBeenSet = true; } if (value.HasMember("LabelSet") && !value["LabelSet"].IsNull()) { if (!value["LabelSet"].IsArray()) return CoreInternalOutcome(Error("response `UserDefineFaceReviewTemplateInfoForUpdate.LabelSet` is not array type")); const Value &tmpValue = value["LabelSet"]; for (Value::ConstValueIterator itr = tmpValue.Begin(); itr != tmpValue.End(); ++itr) { m_labelSet.push_back((*itr).GetString()); } m_labelSetHasBeenSet = true; } if (value.HasMember("BlockConfidence") && !value["BlockConfidence"].IsNull()) { if (!value["BlockConfidence"].IsInt64()) { return CoreInternalOutcome(Error("response `UserDefineFaceReviewTemplateInfoForUpdate.BlockConfidence` IsInt64=false incorrectly").SetRequestId(requestId)); } m_blockConfidence = value["BlockConfidence"].GetInt64(); m_blockConfidenceHasBeenSet = true; } if (value.HasMember("ReviewConfidence") && !value["ReviewConfidence"].IsNull()) { if (!value["ReviewConfidence"].IsInt64()) { return CoreInternalOutcome(Error("response `UserDefineFaceReviewTemplateInfoForUpdate.ReviewConfidence` IsInt64=false incorrectly").SetRequestId(requestId)); } m_reviewConfidence = value["ReviewConfidence"].GetInt64(); m_reviewConfidenceHasBeenSet = true; } return CoreInternalOutcome(true); } void UserDefineFaceReviewTemplateInfoForUpdate::ToJsonObject(Value &value, Document::AllocatorType& allocator) const { if (m_switchHasBeenSet) { Value iKey(kStringType); string key = "Switch"; iKey.SetString(key.c_str(), allocator); value.AddMember(iKey, Value(m_switch.c_str(), allocator).Move(), allocator); } if (m_labelSetHasBeenSet) { Value iKey(kStringType); string key = "LabelSet"; iKey.SetString(key.c_str(), allocator); value.AddMember(iKey, Value(kArrayType).Move(), allocator); for (auto itr = m_labelSet.begin(); itr != m_labelSet.end(); ++itr) { value[key.c_str()].PushBack(Value().SetString((*itr).c_str(), allocator), allocator); } } if (m_blockConfidenceHasBeenSet) { Value iKey(kStringType); string key = "BlockConfidence"; iKey.SetString(key.c_str(), allocator); value.AddMember(iKey, m_blockConfidence, allocator); } if (m_reviewConfidenceHasBeenSet) { Value iKey(kStringType); string key = "ReviewConfidence"; iKey.SetString(key.c_str(), allocator); value.AddMember(iKey, m_reviewConfidence, allocator); } } string UserDefineFaceReviewTemplateInfoForUpdate::GetSwitch() const { return m_switch; } void UserDefineFaceReviewTemplateInfoForUpdate::SetSwitch(const string& _switch) { m_switch = _switch; m_switchHasBeenSet = true; } bool UserDefineFaceReviewTemplateInfoForUpdate::SwitchHasBeenSet() const { return m_switchHasBeenSet; } vector<string> UserDefineFaceReviewTemplateInfoForUpdate::GetLabelSet() const { return m_labelSet; } void UserDefineFaceReviewTemplateInfoForUpdate::SetLabelSet(const vector<string>& _labelSet) { m_labelSet = _labelSet; m_labelSetHasBeenSet = true; } bool UserDefineFaceReviewTemplateInfoForUpdate::LabelSetHasBeenSet() const { return m_labelSetHasBeenSet; } int64_t UserDefineFaceReviewTemplateInfoForUpdate::GetBlockConfidence() const { return m_blockConfidence; } void UserDefineFaceReviewTemplateInfoForUpdate::SetBlockConfidence(const int64_t& _blockConfidence) { m_blockConfidence = _blockConfidence; m_blockConfidenceHasBeenSet = true; } bool UserDefineFaceReviewTemplateInfoForUpdate::BlockConfidenceHasBeenSet() const { return m_blockConfidenceHasBeenSet; } int64_t UserDefineFaceReviewTemplateInfoForUpdate::GetReviewConfidence() const { return m_reviewConfidence; } void UserDefineFaceReviewTemplateInfoForUpdate::SetReviewConfidence(const int64_t& _reviewConfidence) { m_reviewConfidence = _reviewConfidence; m_reviewConfidenceHasBeenSet = true; } bool UserDefineFaceReviewTemplateInfoForUpdate::ReviewConfidenceHasBeenSet() const { return m_reviewConfidenceHasBeenSet; }
// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "syzygy/trace/service/session.h" #include "base/atomicops.h" #include "base/bind.h" #include "base/callback.h" #include "base/environment.h" #include "base/files/file_util.h" #include "base/files/scoped_temp_dir.h" #include "base/memory/scoped_ptr.h" #include "base/strings/stringprintf.h" #include "base/strings/utf_string_conversions.h" #include "base/threading/thread.h" #include "gtest/gtest.h" #include "syzygy/trace/protocol/call_trace_defs.h" #include "syzygy/trace/service/service.h" #include "syzygy/trace/service/service_rpc_impl.h" #include "syzygy/trace/service/session_trace_file_writer.h" #include "syzygy/trace/service/session_trace_file_writer_factory.h" namespace trace { namespace service { namespace { class TestSessionTraceFileWriter : public SessionTraceFileWriter { public: explicit TestSessionTraceFileWriter( base::MessageLoop* message_loop, const base::FilePath& trace_directory) : SessionTraceFileWriter(message_loop, trace_directory), num_buffers_to_recycle_(0) { base::subtle::Barrier_AtomicIncrement(&num_instances_, 1); } ~TestSessionTraceFileWriter() { base::subtle::Barrier_AtomicIncrement(&num_instances_, -1); } void RecycleBuffers() { queue_lock_.AssertAcquired(); while (!queue_.empty() && num_buffers_to_recycle_ != 0) { Buffer* buffer = queue_.front(); queue_.pop_front(); ASSERT_TRUE(buffer != NULL); ASSERT_EQ(buffer->session, session_ref_.get()); ASSERT_TRUE( SessionTraceFileWriter::ConsumeBuffer(buffer)); --num_buffers_to_recycle_; } // If we've emptied the queue, release our reference to the session. if (queue_.empty()) session_ref_ = reinterpret_cast<Session*>(NULL); } void AllowBuffersToBeRecycled(size_t num_buffers) { base::AutoLock auto_lock(queue_lock_); num_buffers_to_recycle_ = num_buffers; RecycleBuffers(); } virtual bool ConsumeBuffer(Buffer* buffer) override { base::AutoLock auto_lock(queue_lock_); EXPECT_TRUE(buffer != NULL); if (buffer) { // While there are buffers in the queue, keep a reference to the session. if (queue_.empty()) { EXPECT_TRUE(session_ref_.get() == NULL); EXPECT_TRUE(buffer->session != NULL); session_ref_ = buffer->session; } // Put the buffer into the consumer queue. queue_.push_back(buffer); } RecycleBuffers(); return buffer != NULL; } static base::subtle::Atomic32 num_instances() { return base::subtle::Acquire_Load(&num_instances_); } protected: // The queue of buffers to be consumed. std::deque<Buffer*> queue_; // This keeps the session object alive while there are buffers in the queue. scoped_refptr<Session> session_ref_; // A lock to protect access to the queue and session reference. base::Lock queue_lock_; // The number of buffers to recycle berfore pausing. size_t num_buffers_to_recycle_; // The number of active writer instances. // @note All accesses to this member should be via base/atomicops.h functions. static volatile base::subtle::Atomic32 num_instances_; }; volatile base::subtle::Atomic32 TestSessionTraceFileWriter::num_instances_ = 0; class TestSessionTraceFileWriterFactory : public SessionTraceFileWriterFactory { public: explicit TestSessionTraceFileWriterFactory(base::MessageLoop* message_loop) : SessionTraceFileWriterFactory(message_loop) { } bool CreateConsumer(scoped_refptr<BufferConsumer>* consumer) override { // w00t, somewhat bogus coverage ploy, at least will reuse the DCHECKS. EXPECT_TRUE(SessionTraceFileWriterFactory::CreateConsumer(consumer)); EXPECT_TRUE((*consumer)->HasOneRef()); *consumer = new TestSessionTraceFileWriter( message_loop_, trace_file_directory_); return true; } }; class TestSession : public Session { public: explicit TestSession(Service* service) : Session(service), waiting_for_buffer_to_be_recycled_(&lock_), waiting_for_buffer_to_be_recycled_state_(false), destroying_singleton_buffer_(&lock_), destroying_singleton_buffer_state_(false), last_singleton_buffer_destroyed_(NULL), singleton_buffers_destroyed_(0), allocating_buffers_(&lock_), allocating_buffers_state_(false) { } void AllowBuffersToBeRecycled(size_t num_buffers) { static_cast<TestSessionTraceFileWriter*>( buffer_consumer())->AllowBuffersToBeRecycled(num_buffers); } void ClearWaitingForBufferToBeRecycledState() { base::AutoLock lock(lock_); waiting_for_buffer_to_be_recycled_state_ = false; } void PauseUntilWaitingForBufferToBeRecycled() { base::AutoLock lock(lock_); while (!waiting_for_buffer_to_be_recycled_state_) waiting_for_buffer_to_be_recycled_.Wait(); waiting_for_buffer_to_be_recycled_state_ = false; } void ClearDestroyingSingletonBufferState() { base::AutoLock lock(lock_); destroying_singleton_buffer_state_ = false; } void PauseUntilDestroyingSingletonBuffer() { base::AutoLock lock(lock_); while (!destroying_singleton_buffer_state_) destroying_singleton_buffer_.Wait(); destroying_singleton_buffer_state_ = true; } void ClearAllocatingBuffersState() { base::AutoLock lock(lock_); allocating_buffers_state_ = false; } void PauseUntilAllocatingBuffers() { base::AutoLock lock(lock_); while (!allocating_buffers_state_) allocating_buffers_.Wait(); waiting_for_buffer_to_be_recycled_state_ = false; } size_t buffer_requests_waiting_for_recycle() { base::AutoLock lock(lock_); return buffer_requests_waiting_for_recycle_; } virtual void OnWaitingForBufferToBeRecycled() override { lock_.AssertAcquired(); waiting_for_buffer_to_be_recycled_state_ = true; waiting_for_buffer_to_be_recycled_.Signal(); } virtual void OnDestroySingletonBuffer(Buffer* buffer) override { lock_.AssertAcquired(); last_singleton_buffer_destroyed_ = buffer; singleton_buffers_destroyed_++; destroying_singleton_buffer_state_ = true; destroying_singleton_buffer_.Signal(); } bool InitializeProcessInfo(ProcessId process_id, ProcessInfo* client) override { DCHECK(client != NULL); // Lobotomize the process info initialization to allow using fake PIDs. client->process_id = process_id; const DWORD kFlags = PROCESS_DUP_HANDLE | PROCESS_QUERY_INFORMATION | PROCESS_VM_READ; client->process_handle.Set( ::OpenProcess(kFlags, FALSE, ::GetCurrentProcessId())); static const wchar_t kEnvironment[] = L"asdf=fofofo\0"; client->environment.assign(kEnvironment, kEnvironment + arraysize(kEnvironment)); return true; } bool CopyBufferHandleToClient(HANDLE client_process_handle, HANDLE local_handle, HANDLE* client_copy) override { // Avoid handle leaks by using the same handle for both "ends". *client_copy = local_handle; return true; } virtual bool AllocateBuffers(size_t count, size_t size) override { lock_.AssertAcquired(); allocating_buffers_state_ = true; allocating_buffers_.Signal(); // Forward this to the original implementation. return Session::AllocateBuffers(count, size); } // Under lock_. base::ConditionVariable waiting_for_buffer_to_be_recycled_; bool waiting_for_buffer_to_be_recycled_state_; // Under lock_. base::ConditionVariable destroying_singleton_buffer_; bool destroying_singleton_buffer_state_; Buffer* last_singleton_buffer_destroyed_; size_t singleton_buffers_destroyed_; // Under lock_. base::ConditionVariable allocating_buffers_; bool allocating_buffers_state_; }; typedef scoped_refptr<TestSession> TestSessionPtr; class TestService : public Service { public: explicit TestService(BufferConsumerFactory* factory) : Service(factory), process_id_(0xfafafa) { } TestSessionPtr CreateTestSession() { scoped_refptr<Session> session; if (!GetNewSession(++process_id_, &session)) return NULL; return TestSessionPtr(static_cast<TestSession*>(session.get())); } size_t num_active_sessions() const { return num_active_sessions_; } protected: virtual Session* CreateSession() override { return new TestSession(this); } private: uint32 process_id_; // Under lock_; }; class SessionTest : public ::testing::Test { public: SessionTest() : consumer_thread_("session-test-consumer-thread"), consumer_thread_has_started_( consumer_thread_.StartWithOptions( base::Thread::Options(base::MessageLoop::TYPE_IO, 0))), session_trace_file_writer_factory_(consumer_thread_.message_loop()), call_trace_service_(&session_trace_file_writer_factory_), rpc_service_instance_manager_(&call_trace_service_), worker1_("Worker1"), worker2_("Worker2") { } virtual void SetUp() override { testing::Test::SetUp(); ASSERT_TRUE(consumer_thread_has_started_); EXPECT_EQ(0, call_trace_service_.num_active_sessions()); EXPECT_EQ(0, TestSessionTraceFileWriter::num_instances()); // Setup the buffer management to make it easy to force buffer contention. call_trace_service_.set_num_incremental_buffers(2); call_trace_service_.set_buffer_size_in_bytes(8192); // Create a temporary directory for the call trace files. ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); ASSERT_TRUE(session_trace_file_writer_factory_.SetTraceFileDirectory( temp_dir_.path())); // We give the service instance a "unique" id so that it does not interfere // with any other instances or tests that might be concurrently active. std::string instance_id(base::StringPrintf("%d", ::GetCurrentProcessId())); call_trace_service_.set_instance_id(base::UTF8ToWide(instance_id)); // The instance id needs to be in the environment to be picked up by the // client library. We prefix the existing environment variable, if any. scoped_ptr<base::Environment> env(base::Environment::Create()); ASSERT_FALSE(env.get() == NULL); std::string env_var; env->GetVar(::kSyzygyRpcInstanceIdEnvVar, &env_var); env_var.insert(0, ";"); env_var.insert(0, instance_id); ASSERT_TRUE(env->SetVar(::kSyzygyRpcInstanceIdEnvVar, env_var)); // Start our worker threads so we can use them later. ASSERT_TRUE(worker1_.Start()); ASSERT_TRUE(worker2_.Start()); } virtual void TearDown() override { // Stop the worker threads. worker2_.Stop(); worker1_.Stop(); // Stop the call trace service. EXPECT_TRUE(call_trace_service_.Stop()); EXPECT_FALSE(call_trace_service_.is_running()); EXPECT_EQ(0, call_trace_service_.num_active_sessions()); EXPECT_EQ(0, TestSessionTraceFileWriter::num_instances()); } protected: // The thread on which the trace file writer will consumer buffers and a // helper variable whose initialization we use as a trigger to start the // thread (ensuring it's message_loop is created). These declarations MUST // remain in this order and preceed that of trace_file_writer_factory_; base::Thread consumer_thread_; bool consumer_thread_has_started_; // The call trace service related objects. These declarations MUST be in // this order. TestSessionTraceFileWriterFactory session_trace_file_writer_factory_; TestService call_trace_service_; RpcServiceInstanceManager rpc_service_instance_manager_; // The directory where trace file output will be written. base::ScopedTempDir temp_dir_; // A couple of worker threads where we can dispatch closures. base::Thread worker1_; base::Thread worker2_; }; void GetNextBuffer(Session* session, Buffer** buffer, bool* result) { DCHECK(session != NULL); DCHECK(buffer != NULL); DCHECK(result != NULL); *buffer = NULL; *result = session->GetNextBuffer(buffer); } } // namespace TEST_F(SessionTest, ReturnBufferWorksAfterSessionClose) { ASSERT_TRUE(call_trace_service_.Start(true)); TestSessionPtr session = call_trace_service_.CreateTestSession(); ASSERT_TRUE(session != NULL); Buffer* buffer1 = NULL; ASSERT_TRUE(session->GetNextBuffer(&buffer1)); ASSERT_TRUE(buffer1 != NULL); ASSERT_TRUE(session->Close()); // Closing the session should have forced all buffers to be submitted to // the write queue. ASSERT_EQ(Buffer::kPendingWrite, buffer1->state); // A request for another buffer should fail. Buffer* buffer2 = NULL; ASSERT_FALSE(session->GetNextBuffer(&buffer2)); ASSERT_TRUE(buffer2 == NULL); // Returning the original buffer should be a noop, but it should succeed. // Most of all, it shouldn't cause a race condition. ASSERT_TRUE(session->ReturnBuffer(buffer1)); // Let's allow the outstanding buffers to be written. session->AllowBuffersToBeRecycled(9999); } TEST_F(SessionTest, BackPressureWorks) { // Configure things so that back-pressure will be easily forced. call_trace_service_.set_max_buffers_pending_write(1); ASSERT_TRUE(call_trace_service_.Start(true)); TestSessionPtr session = call_trace_service_.CreateTestSession(); ASSERT_TRUE(session != NULL); Buffer* buffer1 = NULL; ASSERT_TRUE(session->GetNextBuffer(&buffer1)); ASSERT_TRUE(buffer1 != NULL); Buffer* buffer2 = NULL; ASSERT_TRUE(session->GetNextBuffer(&buffer2)); ASSERT_TRUE(buffer2 != NULL); // Return both buffers so we have 2 pending writes. Neither of these will // go through because we have not allowed any buffers to be written yet. ASSERT_TRUE(session->ReturnBuffer(buffer1)); ASSERT_TRUE(session->ReturnBuffer(buffer2)); // We don't care about events up until this point. session->ClearWaitingForBufferToBeRecycledState(); // Start the buffer getter. This launches another thread that will try to // get another buffer. This will be blocked because of the pending writes. bool result3 = false; Buffer* buffer3 = NULL; base::Closure buffer_getter3 = base::Bind( &GetNextBuffer, session, &buffer3, &result3); worker1_.message_loop()->PostTask(FROM_HERE, buffer_getter3); // Wait for the session to start applying back-pressure. This occurs when it // has indicated that it is waiting for a buffer to be written. session->PauseUntilWaitingForBufferToBeRecycled(); // Allow a single buffer to be written. session->AllowBuffersToBeRecycled(1); // Wait for the buffer getter to complete. worker1_.Stop(); // Ensure the buffer was a recycled forced wait. ASSERT_TRUE(result3); ASSERT_EQ(buffer1, buffer3); // Return the last buffer and allow everything to be written. ASSERT_TRUE(session->ReturnBuffer(buffer3)); session->AllowBuffersToBeRecycled(9999); } TEST_F(SessionTest, BackPressureIsLimited) { // Configure things so that back-pressure will be easily forced. call_trace_service_.set_max_buffers_pending_write(1); ASSERT_TRUE(call_trace_service_.Start(true)); TestSessionPtr session = call_trace_service_.CreateTestSession(); ASSERT_TRUE(session != NULL); Buffer* buffer1 = NULL; ASSERT_TRUE(session->GetNextBuffer(&buffer1)); ASSERT_TRUE(buffer1 != NULL); Buffer* buffer2 = NULL; ASSERT_TRUE(session->GetNextBuffer(&buffer2)); ASSERT_TRUE(buffer2 != NULL); // Return both buffers so we have 2 pending writes. Neither of these will // go through because we have not allowed any buffers to be written yet. ASSERT_TRUE(session->ReturnBuffer(buffer1)); ASSERT_TRUE(session->ReturnBuffer(buffer2)); // Since the back-pressure threshold is 1 and we have 2 pending buffers // if 1 is recycled it will bring us below the back-pressure threshold. Thus // if we pile on a lot of buffer requests, only the first one should apply // back-pressure, and the next ones should cause an allocation. // We don't care about events up until this point. session->ClearWaitingForBufferToBeRecycledState(); session->ClearAllocatingBuffersState(); bool result3 = false; Buffer* buffer3 = NULL; base::Closure buffer_getter3 = base::Bind( &GetNextBuffer, session, &buffer3, &result3); worker1_.message_loop()->PostTask(FROM_HERE, buffer_getter3); // Wait for the session to start applying back-pressure. This occurs when it // has indicated that it is waiting for a buffer to be written. session->PauseUntilWaitingForBufferToBeRecycled(); // At this point, there should be only one getter applying back pressure. ASSERT_EQ(1u, session->buffer_requests_waiting_for_recycle()); // Allocate yet another buffer on a new thread, this will force an allocation // which in turn will satisfy as many waits as there are buffers allocated. bool result4 = false; Buffer* buffer4 = NULL; base::Closure buffer_getter4 = base::Bind( &GetNextBuffer, session, &buffer4, &result4); worker2_.message_loop()->PostTask(FROM_HERE, buffer_getter4); // Similarly, wait for an allocation. The second buffer getter should cause // one to occur. session->PauseUntilAllocatingBuffers(); // Allow a single buffer to be written. session->AllowBuffersToBeRecycled(1); // Wait for the buffer getters to complete. worker1_.Stop(); worker2_.Stop(); ASSERT_TRUE(result3); ASSERT_TRUE(result4); // We can't guarantee where the returned buffers come from (recycled or // not), just that they should be returned. ASSERT_TRUE(buffer3 != NULL); ASSERT_TRUE(buffer4 != NULL); // Return the last 2 buffers and allow everything to be written. ASSERT_TRUE(session->ReturnBuffer(buffer3)); ASSERT_TRUE(session->ReturnBuffer(buffer4)); session->AllowBuffersToBeRecycled(9999); } TEST_F(SessionTest, LargeBufferRequestAvoidsBackPressure) { // Configure things so that back-pressure will be easily forced. call_trace_service_.set_max_buffers_pending_write(1); ASSERT_TRUE(call_trace_service_.Start(true)); TestSessionPtr session = call_trace_service_.CreateTestSession(); ASSERT_TRUE(session != NULL); Buffer* buffer1 = NULL; ASSERT_TRUE(session->GetNextBuffer(&buffer1)); ASSERT_TRUE(buffer1 != NULL); Buffer* buffer2 = NULL; ASSERT_TRUE(session->GetNextBuffer(&buffer2)); ASSERT_TRUE(buffer2 != NULL); // Return both buffers so we have 2 pending writes. Neither of these will // go through because we have not allowed any buffers to be written yet. ASSERT_TRUE(session->ReturnBuffer(buffer1)); ASSERT_TRUE(session->ReturnBuffer(buffer2)); // Ask for a big buffer. This should go through immediately and side-step the // usual buffer pool. Thus, it is not subject to back-pressure. Buffer* buffer3 = NULL; ASSERT_TRUE(session->GetBuffer(10 * 1024 * 1024, &buffer3)); ASSERT_EQ(10u * 1024 * 1024, buffer3->mapping_size); ASSERT_EQ(10u * 1024 * 1024, buffer3->buffer_size); ASSERT_EQ(0u, buffer3->buffer_offset); // Return the buffer and allow them all to be recycled. ASSERT_TRUE(session->ReturnBuffer(buffer3)); session->AllowBuffersToBeRecycled(9999); // Wait until the singleton buffer has been destroyed. session->PauseUntilDestroyingSingletonBuffer(); ASSERT_EQ(1, session->singleton_buffers_destroyed_); ASSERT_EQ(buffer3, session->last_singleton_buffer_destroyed_); } } // namespace service } // namespace trace
// This file is auto-generated, don't edit it. Thanks. #include <alibabacloud/quickbi_public_20220101.hpp> #include <alibabacloud/endpoint_util.hpp> #include <alibabacloud/open_api.hpp> #include <alibabacloud/open_api_util.hpp> #include <boost/any.hpp> #include <boost/throw_exception.hpp> #include <darabonba/core.hpp> #include <darabonba/util.hpp> #include <iostream> #include <map> #include <vector> using namespace std; using namespace Alibabacloud_Quickbi-public20220101; Alibabacloud_Quickbi-public20220101::Client::Client(const shared_ptr<Alibabacloud_OpenApi::Config>& config) : Alibabacloud_OpenApi::Client(config) { _endpointRule = make_shared<string>(""); checkConfig(config); _endpoint = make_shared<string>(getEndpoint(make_shared<string>("quickbi-public"), _regionId, _endpointRule, _network, _suffix, _endpointMap, _endpoint)); }; string Alibabacloud_Quickbi-public20220101::Client::getEndpoint(shared_ptr<string> productId, shared_ptr<string> regionId, shared_ptr<string> endpointRule, shared_ptr<string> network, shared_ptr<string> suffix, shared_ptr<map<string, string>> endpointMap, shared_ptr<string> endpoint) { if (!Darabonba_Util::Client::empty(endpoint)) { return *endpoint; } if (!Darabonba_Util::Client::isUnset<map<string, string>>(endpointMap) && !Darabonba_Util::Client::empty(make_shared<string>((*endpointMap)[regionId]))) { return (*endpointMap)[regionId]; } return Alibabacloud_EndpointUtil::Client::getEndpointRules(productId, regionId, endpointRule, network, suffix); } AddDataLevelPermissionRuleUsersResponse Alibabacloud_Quickbi-public20220101::Client::addDataLevelPermissionRuleUsersWithOptions(shared_ptr<AddDataLevelPermissionRuleUsersRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->addUserModel)) { query->insert(pair<string, string>("AddUserModel", *request->addUserModel)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("AddDataLevelPermissionRuleUsers"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return AddDataLevelPermissionRuleUsersResponse(callApi(params, req, runtime)); } AddDataLevelPermissionRuleUsersResponse Alibabacloud_Quickbi-public20220101::Client::addDataLevelPermissionRuleUsers(shared_ptr<AddDataLevelPermissionRuleUsersRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return addDataLevelPermissionRuleUsersWithOptions(request, runtime); } AddDataLevelPermissionWhiteListResponse Alibabacloud_Quickbi-public20220101::Client::addDataLevelPermissionWhiteListWithOptions(shared_ptr<AddDataLevelPermissionWhiteListRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->cubeId)) { query->insert(pair<string, string>("CubeId", *request->cubeId)); } if (!Darabonba_Util::Client::isUnset<string>(request->operateType)) { query->insert(pair<string, string>("OperateType", *request->operateType)); } if (!Darabonba_Util::Client::isUnset<string>(request->ruleType)) { query->insert(pair<string, string>("RuleType", *request->ruleType)); } if (!Darabonba_Util::Client::isUnset<string>(request->targetIds)) { query->insert(pair<string, string>("TargetIds", *request->targetIds)); } if (!Darabonba_Util::Client::isUnset<string>(request->targetType)) { query->insert(pair<string, string>("TargetType", *request->targetType)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("AddDataLevelPermissionWhiteList"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return AddDataLevelPermissionWhiteListResponse(callApi(params, req, runtime)); } AddDataLevelPermissionWhiteListResponse Alibabacloud_Quickbi-public20220101::Client::addDataLevelPermissionWhiteList(shared_ptr<AddDataLevelPermissionWhiteListRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return addDataLevelPermissionWhiteListWithOptions(request, runtime); } AddShareReportResponse Alibabacloud_Quickbi-public20220101::Client::addShareReportWithOptions(shared_ptr<AddShareReportRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<long>(request->authPoint)) { query->insert(pair<string, long>("AuthPoint", *request->authPoint)); } if (!Darabonba_Util::Client::isUnset<long>(request->expireDate)) { query->insert(pair<string, long>("ExpireDate", *request->expireDate)); } if (!Darabonba_Util::Client::isUnset<string>(request->shareToId)) { query->insert(pair<string, string>("ShareToId", *request->shareToId)); } if (!Darabonba_Util::Client::isUnset<long>(request->shareToType)) { query->insert(pair<string, long>("ShareToType", *request->shareToType)); } if (!Darabonba_Util::Client::isUnset<string>(request->worksId)) { query->insert(pair<string, string>("WorksId", *request->worksId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("AddShareReport"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return AddShareReportResponse(callApi(params, req, runtime)); } AddShareReportResponse Alibabacloud_Quickbi-public20220101::Client::addShareReport(shared_ptr<AddShareReportRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return addShareReportWithOptions(request, runtime); } AddUserResponse Alibabacloud_Quickbi-public20220101::Client::addUserWithOptions(shared_ptr<AddUserRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->accountName)) { query->insert(pair<string, string>("AccountName", *request->accountName)); } if (!Darabonba_Util::Client::isUnset<bool>(request->adminUser)) { query->insert(pair<string, bool>("AdminUser", *request->adminUser)); } if (!Darabonba_Util::Client::isUnset<bool>(request->authAdminUser)) { query->insert(pair<string, bool>("AuthAdminUser", *request->authAdminUser)); } if (!Darabonba_Util::Client::isUnset<string>(request->nickName)) { query->insert(pair<string, string>("NickName", *request->nickName)); } if (!Darabonba_Util::Client::isUnset<long>(request->userType)) { query->insert(pair<string, long>("UserType", *request->userType)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("AddUser"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return AddUserResponse(callApi(params, req, runtime)); } AddUserResponse Alibabacloud_Quickbi-public20220101::Client::addUser(shared_ptr<AddUserRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return addUserWithOptions(request, runtime); } AddUserGroupMemberResponse Alibabacloud_Quickbi-public20220101::Client::addUserGroupMemberWithOptions(shared_ptr<AddUserGroupMemberRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->userGroupId)) { query->insert(pair<string, string>("UserGroupId", *request->userGroupId)); } if (!Darabonba_Util::Client::isUnset<string>(request->userIdList)) { query->insert(pair<string, string>("UserIdList", *request->userIdList)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("AddUserGroupMember"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return AddUserGroupMemberResponse(callApi(params, req, runtime)); } AddUserGroupMemberResponse Alibabacloud_Quickbi-public20220101::Client::addUserGroupMember(shared_ptr<AddUserGroupMemberRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return addUserGroupMemberWithOptions(request, runtime); } AddUserGroupMembersResponse Alibabacloud_Quickbi-public20220101::Client::addUserGroupMembersWithOptions(shared_ptr<AddUserGroupMembersRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->userGroupIds)) { query->insert(pair<string, string>("UserGroupIds", *request->userGroupIds)); } if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("AddUserGroupMembers"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return AddUserGroupMembersResponse(callApi(params, req, runtime)); } AddUserGroupMembersResponse Alibabacloud_Quickbi-public20220101::Client::addUserGroupMembers(shared_ptr<AddUserGroupMembersRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return addUserGroupMembersWithOptions(request, runtime); } AddUserTagMetaResponse Alibabacloud_Quickbi-public20220101::Client::addUserTagMetaWithOptions(shared_ptr<AddUserTagMetaRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->tagDescription)) { query->insert(pair<string, string>("TagDescription", *request->tagDescription)); } if (!Darabonba_Util::Client::isUnset<string>(request->tagName)) { query->insert(pair<string, string>("TagName", *request->tagName)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("AddUserTagMeta"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return AddUserTagMetaResponse(callApi(params, req, runtime)); } AddUserTagMetaResponse Alibabacloud_Quickbi-public20220101::Client::addUserTagMeta(shared_ptr<AddUserTagMetaRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return addUserTagMetaWithOptions(request, runtime); } AddUserToWorkspaceResponse Alibabacloud_Quickbi-public20220101::Client::addUserToWorkspaceWithOptions(shared_ptr<AddUserToWorkspaceRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<long>(request->roleId)) { query->insert(pair<string, long>("RoleId", *request->roleId)); } if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } if (!Darabonba_Util::Client::isUnset<string>(request->workspaceId)) { query->insert(pair<string, string>("WorkspaceId", *request->workspaceId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("AddUserToWorkspace"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return AddUserToWorkspaceResponse(callApi(params, req, runtime)); } AddUserToWorkspaceResponse Alibabacloud_Quickbi-public20220101::Client::addUserToWorkspace(shared_ptr<AddUserToWorkspaceRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return addUserToWorkspaceWithOptions(request, runtime); } AddWorkspaceUsersResponse Alibabacloud_Quickbi-public20220101::Client::addWorkspaceUsersWithOptions(shared_ptr<AddWorkspaceUsersRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<long>(request->roleId)) { query->insert(pair<string, long>("RoleId", *request->roleId)); } if (!Darabonba_Util::Client::isUnset<string>(request->userIds)) { query->insert(pair<string, string>("UserIds", *request->userIds)); } if (!Darabonba_Util::Client::isUnset<string>(request->workspaceId)) { query->insert(pair<string, string>("WorkspaceId", *request->workspaceId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("AddWorkspaceUsers"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return AddWorkspaceUsersResponse(callApi(params, req, runtime)); } AddWorkspaceUsersResponse Alibabacloud_Quickbi-public20220101::Client::addWorkspaceUsers(shared_ptr<AddWorkspaceUsersRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return addWorkspaceUsersWithOptions(request, runtime); } AuthorizeMenuResponse Alibabacloud_Quickbi-public20220101::Client::authorizeMenuWithOptions(shared_ptr<AuthorizeMenuRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<long>(request->authPointsValue)) { query->insert(pair<string, long>("AuthPointsValue", *request->authPointsValue)); } if (!Darabonba_Util::Client::isUnset<string>(request->dataPortalId)) { query->insert(pair<string, string>("DataPortalId", *request->dataPortalId)); } if (!Darabonba_Util::Client::isUnset<string>(request->menuIds)) { query->insert(pair<string, string>("MenuIds", *request->menuIds)); } if (!Darabonba_Util::Client::isUnset<string>(request->userGroupIds)) { query->insert(pair<string, string>("UserGroupIds", *request->userGroupIds)); } if (!Darabonba_Util::Client::isUnset<string>(request->userIds)) { query->insert(pair<string, string>("UserIds", *request->userIds)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("AuthorizeMenu"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return AuthorizeMenuResponse(callApi(params, req, runtime)); } AuthorizeMenuResponse Alibabacloud_Quickbi-public20220101::Client::authorizeMenu(shared_ptr<AuthorizeMenuRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return authorizeMenuWithOptions(request, runtime); } CancelAuthorizationMenuResponse Alibabacloud_Quickbi-public20220101::Client::cancelAuthorizationMenuWithOptions(shared_ptr<CancelAuthorizationMenuRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->dataPortalId)) { query->insert(pair<string, string>("DataPortalId", *request->dataPortalId)); } if (!Darabonba_Util::Client::isUnset<string>(request->menuIds)) { query->insert(pair<string, string>("MenuIds", *request->menuIds)); } if (!Darabonba_Util::Client::isUnset<string>(request->userGroupIds)) { query->insert(pair<string, string>("UserGroupIds", *request->userGroupIds)); } if (!Darabonba_Util::Client::isUnset<string>(request->userIds)) { query->insert(pair<string, string>("UserIds", *request->userIds)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("CancelAuthorizationMenu"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return CancelAuthorizationMenuResponse(callApi(params, req, runtime)); } CancelAuthorizationMenuResponse Alibabacloud_Quickbi-public20220101::Client::cancelAuthorizationMenu(shared_ptr<CancelAuthorizationMenuRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return cancelAuthorizationMenuWithOptions(request, runtime); } CancelCollectionResponse Alibabacloud_Quickbi-public20220101::Client::cancelCollectionWithOptions(shared_ptr<CancelCollectionRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } if (!Darabonba_Util::Client::isUnset<string>(request->worksId)) { query->insert(pair<string, string>("WorksId", *request->worksId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("CancelCollection"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return CancelCollectionResponse(callApi(params, req, runtime)); } CancelCollectionResponse Alibabacloud_Quickbi-public20220101::Client::cancelCollection(shared_ptr<CancelCollectionRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return cancelCollectionWithOptions(request, runtime); } CancelReportShareResponse Alibabacloud_Quickbi-public20220101::Client::cancelReportShareWithOptions(shared_ptr<CancelReportShareRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->reportId)) { query->insert(pair<string, string>("ReportId", *request->reportId)); } if (!Darabonba_Util::Client::isUnset<string>(request->shareToIds)) { query->insert(pair<string, string>("ShareToIds", *request->shareToIds)); } if (!Darabonba_Util::Client::isUnset<long>(request->shareToType)) { query->insert(pair<string, long>("ShareToType", *request->shareToType)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("CancelReportShare"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return CancelReportShareResponse(callApi(params, req, runtime)); } CancelReportShareResponse Alibabacloud_Quickbi-public20220101::Client::cancelReportShare(shared_ptr<CancelReportShareRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return cancelReportShareWithOptions(request, runtime); } ChangeVisibilityModelResponse Alibabacloud_Quickbi-public20220101::Client::changeVisibilityModelWithOptions(shared_ptr<ChangeVisibilityModelRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->dataPortalId)) { query->insert(pair<string, string>("DataPortalId", *request->dataPortalId)); } if (!Darabonba_Util::Client::isUnset<string>(request->menuIds)) { query->insert(pair<string, string>("MenuIds", *request->menuIds)); } if (!Darabonba_Util::Client::isUnset<bool>(request->showOnlyWithAccess)) { query->insert(pair<string, bool>("ShowOnlyWithAccess", *request->showOnlyWithAccess)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("ChangeVisibilityModel"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return ChangeVisibilityModelResponse(callApi(params, req, runtime)); } ChangeVisibilityModelResponse Alibabacloud_Quickbi-public20220101::Client::changeVisibilityModel(shared_ptr<ChangeVisibilityModelRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return changeVisibilityModelWithOptions(request, runtime); } CheckReadableResponse Alibabacloud_Quickbi-public20220101::Client::checkReadableWithOptions(shared_ptr<CheckReadableRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } if (!Darabonba_Util::Client::isUnset<string>(request->worksId)) { query->insert(pair<string, string>("WorksId", *request->worksId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("CheckReadable"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return CheckReadableResponse(callApi(params, req, runtime)); } CheckReadableResponse Alibabacloud_Quickbi-public20220101::Client::checkReadable(shared_ptr<CheckReadableRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return checkReadableWithOptions(request, runtime); } CreateTicketResponse Alibabacloud_Quickbi-public20220101::Client::createTicketWithOptions(shared_ptr<CreateTicketRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->accountName)) { query->insert(pair<string, string>("AccountName", *request->accountName)); } if (!Darabonba_Util::Client::isUnset<long>(request->accountType)) { query->insert(pair<string, long>("AccountType", *request->accountType)); } if (!Darabonba_Util::Client::isUnset<string>(request->cmptId)) { query->insert(pair<string, string>("CmptId", *request->cmptId)); } if (!Darabonba_Util::Client::isUnset<long>(request->expireTime)) { query->insert(pair<string, long>("ExpireTime", *request->expireTime)); } if (!Darabonba_Util::Client::isUnset<string>(request->globalParam)) { query->insert(pair<string, string>("GlobalParam", *request->globalParam)); } if (!Darabonba_Util::Client::isUnset<long>(request->ticketNum)) { query->insert(pair<string, long>("TicketNum", *request->ticketNum)); } if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } if (!Darabonba_Util::Client::isUnset<string>(request->watermarkParam)) { query->insert(pair<string, string>("WatermarkParam", *request->watermarkParam)); } if (!Darabonba_Util::Client::isUnset<string>(request->worksId)) { query->insert(pair<string, string>("WorksId", *request->worksId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("CreateTicket"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return CreateTicketResponse(callApi(params, req, runtime)); } CreateTicketResponse Alibabacloud_Quickbi-public20220101::Client::createTicket(shared_ptr<CreateTicketRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return createTicketWithOptions(request, runtime); } CreateUserGroupResponse Alibabacloud_Quickbi-public20220101::Client::createUserGroupWithOptions(shared_ptr<CreateUserGroupRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->parentUserGroupId)) { query->insert(pair<string, string>("ParentUserGroupId", *request->parentUserGroupId)); } if (!Darabonba_Util::Client::isUnset<string>(request->userGroupDescription)) { query->insert(pair<string, string>("UserGroupDescription", *request->userGroupDescription)); } if (!Darabonba_Util::Client::isUnset<string>(request->userGroupId)) { query->insert(pair<string, string>("UserGroupId", *request->userGroupId)); } if (!Darabonba_Util::Client::isUnset<string>(request->userGroupName)) { query->insert(pair<string, string>("UserGroupName", *request->userGroupName)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("CreateUserGroup"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return CreateUserGroupResponse(callApi(params, req, runtime)); } CreateUserGroupResponse Alibabacloud_Quickbi-public20220101::Client::createUserGroup(shared_ptr<CreateUserGroupRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return createUserGroupWithOptions(request, runtime); } DelayTicketExpireTimeResponse Alibabacloud_Quickbi-public20220101::Client::delayTicketExpireTimeWithOptions(shared_ptr<DelayTicketExpireTimeRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<long>(request->expireTime)) { query->insert(pair<string, long>("ExpireTime", *request->expireTime)); } if (!Darabonba_Util::Client::isUnset<string>(request->ticket)) { query->insert(pair<string, string>("Ticket", *request->ticket)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("DelayTicketExpireTime"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return DelayTicketExpireTimeResponse(callApi(params, req, runtime)); } DelayTicketExpireTimeResponse Alibabacloud_Quickbi-public20220101::Client::delayTicketExpireTime(shared_ptr<DelayTicketExpireTimeRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return delayTicketExpireTimeWithOptions(request, runtime); } DeleteDataLevelPermissionRuleUsersResponse Alibabacloud_Quickbi-public20220101::Client::deleteDataLevelPermissionRuleUsersWithOptions(shared_ptr<DeleteDataLevelPermissionRuleUsersRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->deleteUserModel)) { query->insert(pair<string, string>("DeleteUserModel", *request->deleteUserModel)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("DeleteDataLevelPermissionRuleUsers"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return DeleteDataLevelPermissionRuleUsersResponse(callApi(params, req, runtime)); } DeleteDataLevelPermissionRuleUsersResponse Alibabacloud_Quickbi-public20220101::Client::deleteDataLevelPermissionRuleUsers(shared_ptr<DeleteDataLevelPermissionRuleUsersRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return deleteDataLevelPermissionRuleUsersWithOptions(request, runtime); } DeleteDataLevelRuleConfigResponse Alibabacloud_Quickbi-public20220101::Client::deleteDataLevelRuleConfigWithOptions(shared_ptr<DeleteDataLevelRuleConfigRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->cubeId)) { query->insert(pair<string, string>("CubeId", *request->cubeId)); } if (!Darabonba_Util::Client::isUnset<string>(request->ruleId)) { query->insert(pair<string, string>("RuleId", *request->ruleId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("DeleteDataLevelRuleConfig"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return DeleteDataLevelRuleConfigResponse(callApi(params, req, runtime)); } DeleteDataLevelRuleConfigResponse Alibabacloud_Quickbi-public20220101::Client::deleteDataLevelRuleConfig(shared_ptr<DeleteDataLevelRuleConfigRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return deleteDataLevelRuleConfigWithOptions(request, runtime); } DeleteTicketResponse Alibabacloud_Quickbi-public20220101::Client::deleteTicketWithOptions(shared_ptr<DeleteTicketRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->ticket)) { query->insert(pair<string, string>("Ticket", *request->ticket)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("DeleteTicket"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return DeleteTicketResponse(callApi(params, req, runtime)); } DeleteTicketResponse Alibabacloud_Quickbi-public20220101::Client::deleteTicket(shared_ptr<DeleteTicketRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return deleteTicketWithOptions(request, runtime); } DeleteUserResponse Alibabacloud_Quickbi-public20220101::Client::deleteUserWithOptions(shared_ptr<DeleteUserRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->transferUserId)) { query->insert(pair<string, string>("TransferUserId", *request->transferUserId)); } if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("DeleteUser"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return DeleteUserResponse(callApi(params, req, runtime)); } DeleteUserResponse Alibabacloud_Quickbi-public20220101::Client::deleteUser(shared_ptr<DeleteUserRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return deleteUserWithOptions(request, runtime); } DeleteUserFromWorkspaceResponse Alibabacloud_Quickbi-public20220101::Client::deleteUserFromWorkspaceWithOptions(shared_ptr<DeleteUserFromWorkspaceRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } if (!Darabonba_Util::Client::isUnset<string>(request->workspaceId)) { query->insert(pair<string, string>("WorkspaceId", *request->workspaceId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("DeleteUserFromWorkspace"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return DeleteUserFromWorkspaceResponse(callApi(params, req, runtime)); } DeleteUserFromWorkspaceResponse Alibabacloud_Quickbi-public20220101::Client::deleteUserFromWorkspace(shared_ptr<DeleteUserFromWorkspaceRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return deleteUserFromWorkspaceWithOptions(request, runtime); } DeleteUserGroupResponse Alibabacloud_Quickbi-public20220101::Client::deleteUserGroupWithOptions(shared_ptr<DeleteUserGroupRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->userGroupId)) { query->insert(pair<string, string>("UserGroupId", *request->userGroupId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("DeleteUserGroup"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return DeleteUserGroupResponse(callApi(params, req, runtime)); } DeleteUserGroupResponse Alibabacloud_Quickbi-public20220101::Client::deleteUserGroup(shared_ptr<DeleteUserGroupRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return deleteUserGroupWithOptions(request, runtime); } DeleteUserGroupMemberResponse Alibabacloud_Quickbi-public20220101::Client::deleteUserGroupMemberWithOptions(shared_ptr<DeleteUserGroupMemberRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->userGroupId)) { query->insert(pair<string, string>("UserGroupId", *request->userGroupId)); } if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("DeleteUserGroupMember"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return DeleteUserGroupMemberResponse(callApi(params, req, runtime)); } DeleteUserGroupMemberResponse Alibabacloud_Quickbi-public20220101::Client::deleteUserGroupMember(shared_ptr<DeleteUserGroupMemberRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return deleteUserGroupMemberWithOptions(request, runtime); } DeleteUserGroupMembersResponse Alibabacloud_Quickbi-public20220101::Client::deleteUserGroupMembersWithOptions(shared_ptr<DeleteUserGroupMembersRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->userGroupIds)) { query->insert(pair<string, string>("UserGroupIds", *request->userGroupIds)); } if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("DeleteUserGroupMembers"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return DeleteUserGroupMembersResponse(callApi(params, req, runtime)); } DeleteUserGroupMembersResponse Alibabacloud_Quickbi-public20220101::Client::deleteUserGroupMembers(shared_ptr<DeleteUserGroupMembersRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return deleteUserGroupMembersWithOptions(request, runtime); } DeleteUserTagMetaResponse Alibabacloud_Quickbi-public20220101::Client::deleteUserTagMetaWithOptions(shared_ptr<DeleteUserTagMetaRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->tagId)) { query->insert(pair<string, string>("TagId", *request->tagId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("DeleteUserTagMeta"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return DeleteUserTagMetaResponse(callApi(params, req, runtime)); } DeleteUserTagMetaResponse Alibabacloud_Quickbi-public20220101::Client::deleteUserTagMeta(shared_ptr<DeleteUserTagMetaRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return deleteUserTagMetaWithOptions(request, runtime); } GetUserGroupInfoResponse Alibabacloud_Quickbi-public20220101::Client::getUserGroupInfoWithOptions(shared_ptr<GetUserGroupInfoRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->keyword)) { query->insert(pair<string, string>("Keyword", *request->keyword)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("GetUserGroupInfo"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return GetUserGroupInfoResponse(callApi(params, req, runtime)); } GetUserGroupInfoResponse Alibabacloud_Quickbi-public20220101::Client::getUserGroupInfo(shared_ptr<GetUserGroupInfoRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return getUserGroupInfoWithOptions(request, runtime); } ListByUserGroupIdResponse Alibabacloud_Quickbi-public20220101::Client::listByUserGroupIdWithOptions(shared_ptr<ListByUserGroupIdRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->userGroupIds)) { query->insert(pair<string, string>("UserGroupIds", *request->userGroupIds)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("ListByUserGroupId"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return ListByUserGroupIdResponse(callApi(params, req, runtime)); } ListByUserGroupIdResponse Alibabacloud_Quickbi-public20220101::Client::listByUserGroupId(shared_ptr<ListByUserGroupIdRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return listByUserGroupIdWithOptions(request, runtime); } ListCollectionsResponse Alibabacloud_Quickbi-public20220101::Client::listCollectionsWithOptions(shared_ptr<ListCollectionsRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("ListCollections"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return ListCollectionsResponse(callApi(params, req, runtime)); } ListCollectionsResponse Alibabacloud_Quickbi-public20220101::Client::listCollections(shared_ptr<ListCollectionsRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return listCollectionsWithOptions(request, runtime); } ListCubeDataLevelPermissionConfigResponse Alibabacloud_Quickbi-public20220101::Client::listCubeDataLevelPermissionConfigWithOptions(shared_ptr<ListCubeDataLevelPermissionConfigRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->cubeId)) { query->insert(pair<string, string>("CubeId", *request->cubeId)); } if (!Darabonba_Util::Client::isUnset<string>(request->ruleType)) { query->insert(pair<string, string>("RuleType", *request->ruleType)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("ListCubeDataLevelPermissionConfig"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return ListCubeDataLevelPermissionConfigResponse(callApi(params, req, runtime)); } ListCubeDataLevelPermissionConfigResponse Alibabacloud_Quickbi-public20220101::Client::listCubeDataLevelPermissionConfig(shared_ptr<ListCubeDataLevelPermissionConfigRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return listCubeDataLevelPermissionConfigWithOptions(request, runtime); } ListDataLevelPermissionWhiteListResponse Alibabacloud_Quickbi-public20220101::Client::listDataLevelPermissionWhiteListWithOptions(shared_ptr<ListDataLevelPermissionWhiteListRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->cubeId)) { query->insert(pair<string, string>("CubeId", *request->cubeId)); } if (!Darabonba_Util::Client::isUnset<string>(request->ruleType)) { query->insert(pair<string, string>("RuleType", *request->ruleType)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("ListDataLevelPermissionWhiteList"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return ListDataLevelPermissionWhiteListResponse(callApi(params, req, runtime)); } ListDataLevelPermissionWhiteListResponse Alibabacloud_Quickbi-public20220101::Client::listDataLevelPermissionWhiteList(shared_ptr<ListDataLevelPermissionWhiteListRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return listDataLevelPermissionWhiteListWithOptions(request, runtime); } ListFavoriteReportsResponse Alibabacloud_Quickbi-public20220101::Client::listFavoriteReportsWithOptions(shared_ptr<ListFavoriteReportsRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->keyword)) { query->insert(pair<string, string>("Keyword", *request->keyword)); } if (!Darabonba_Util::Client::isUnset<long>(request->pageSize)) { query->insert(pair<string, long>("PageSize", *request->pageSize)); } if (!Darabonba_Util::Client::isUnset<string>(request->treeType)) { query->insert(pair<string, string>("TreeType", *request->treeType)); } if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("ListFavoriteReports"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return ListFavoriteReportsResponse(callApi(params, req, runtime)); } ListFavoriteReportsResponse Alibabacloud_Quickbi-public20220101::Client::listFavoriteReports(shared_ptr<ListFavoriteReportsRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return listFavoriteReportsWithOptions(request, runtime); } ListPortalMenuAuthorizationResponse Alibabacloud_Quickbi-public20220101::Client::listPortalMenuAuthorizationWithOptions(shared_ptr<ListPortalMenuAuthorizationRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->dataPortalId)) { query->insert(pair<string, string>("DataPortalId", *request->dataPortalId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("ListPortalMenuAuthorization"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return ListPortalMenuAuthorizationResponse(callApi(params, req, runtime)); } ListPortalMenuAuthorizationResponse Alibabacloud_Quickbi-public20220101::Client::listPortalMenuAuthorization(shared_ptr<ListPortalMenuAuthorizationRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return listPortalMenuAuthorizationWithOptions(request, runtime); } ListPortalMenusResponse Alibabacloud_Quickbi-public20220101::Client::listPortalMenusWithOptions(shared_ptr<ListPortalMenusRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->dataPortalId)) { query->insert(pair<string, string>("DataPortalId", *request->dataPortalId)); } if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("ListPortalMenus"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return ListPortalMenusResponse(callApi(params, req, runtime)); } ListPortalMenusResponse Alibabacloud_Quickbi-public20220101::Client::listPortalMenus(shared_ptr<ListPortalMenusRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return listPortalMenusWithOptions(request, runtime); } ListRecentViewReportsResponse Alibabacloud_Quickbi-public20220101::Client::listRecentViewReportsWithOptions(shared_ptr<ListRecentViewReportsRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->keyword)) { query->insert(pair<string, string>("Keyword", *request->keyword)); } if (!Darabonba_Util::Client::isUnset<long>(request->offsetDay)) { query->insert(pair<string, long>("OffsetDay", *request->offsetDay)); } if (!Darabonba_Util::Client::isUnset<long>(request->pageSize)) { query->insert(pair<string, long>("PageSize", *request->pageSize)); } if (!Darabonba_Util::Client::isUnset<string>(request->queryMode)) { query->insert(pair<string, string>("QueryMode", *request->queryMode)); } if (!Darabonba_Util::Client::isUnset<string>(request->treeType)) { query->insert(pair<string, string>("TreeType", *request->treeType)); } if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("ListRecentViewReports"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return ListRecentViewReportsResponse(callApi(params, req, runtime)); } ListRecentViewReportsResponse Alibabacloud_Quickbi-public20220101::Client::listRecentViewReports(shared_ptr<ListRecentViewReportsRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return listRecentViewReportsWithOptions(request, runtime); } ListSharedReportsResponse Alibabacloud_Quickbi-public20220101::Client::listSharedReportsWithOptions(shared_ptr<ListSharedReportsRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->keyword)) { query->insert(pair<string, string>("Keyword", *request->keyword)); } if (!Darabonba_Util::Client::isUnset<long>(request->pageSize)) { query->insert(pair<string, long>("PageSize", *request->pageSize)); } if (!Darabonba_Util::Client::isUnset<string>(request->treeType)) { query->insert(pair<string, string>("TreeType", *request->treeType)); } if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("ListSharedReports"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return ListSharedReportsResponse(callApi(params, req, runtime)); } ListSharedReportsResponse Alibabacloud_Quickbi-public20220101::Client::listSharedReports(shared_ptr<ListSharedReportsRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return listSharedReportsWithOptions(request, runtime); } ListUserGroupsByUserIdResponse Alibabacloud_Quickbi-public20220101::Client::listUserGroupsByUserIdWithOptions(shared_ptr<ListUserGroupsByUserIdRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("ListUserGroupsByUserId"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return ListUserGroupsByUserIdResponse(callApi(params, req, runtime)); } ListUserGroupsByUserIdResponse Alibabacloud_Quickbi-public20220101::Client::listUserGroupsByUserId(shared_ptr<ListUserGroupsByUserIdRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return listUserGroupsByUserIdWithOptions(request, runtime); } QueryDataServiceResponse Alibabacloud_Quickbi-public20220101::Client::queryDataServiceWithOptions(shared_ptr<QueryDataServiceRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->apiId)) { query->insert(pair<string, string>("ApiId", *request->apiId)); } if (!Darabonba_Util::Client::isUnset<string>(request->conditions)) { query->insert(pair<string, string>("Conditions", *request->conditions)); } if (!Darabonba_Util::Client::isUnset<string>(request->returnFields)) { query->insert(pair<string, string>("ReturnFields", *request->returnFields)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryDataService"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryDataServiceResponse(callApi(params, req, runtime)); } QueryDataServiceResponse Alibabacloud_Quickbi-public20220101::Client::queryDataService(shared_ptr<QueryDataServiceRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryDataServiceWithOptions(request, runtime); } QueryDatasetDetailInfoResponse Alibabacloud_Quickbi-public20220101::Client::queryDatasetDetailInfoWithOptions(shared_ptr<QueryDatasetDetailInfoRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->datasetId)) { query->insert(pair<string, string>("DatasetId", *request->datasetId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryDatasetDetailInfo"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryDatasetDetailInfoResponse(callApi(params, req, runtime)); } QueryDatasetDetailInfoResponse Alibabacloud_Quickbi-public20220101::Client::queryDatasetDetailInfo(shared_ptr<QueryDatasetDetailInfoRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryDatasetDetailInfoWithOptions(request, runtime); } QueryDatasetInfoResponse Alibabacloud_Quickbi-public20220101::Client::queryDatasetInfoWithOptions(shared_ptr<QueryDatasetInfoRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->datasetId)) { query->insert(pair<string, string>("DatasetId", *request->datasetId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryDatasetInfo"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryDatasetInfoResponse(callApi(params, req, runtime)); } QueryDatasetInfoResponse Alibabacloud_Quickbi-public20220101::Client::queryDatasetInfo(shared_ptr<QueryDatasetInfoRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryDatasetInfoWithOptions(request, runtime); } QueryDatasetListResponse Alibabacloud_Quickbi-public20220101::Client::queryDatasetListWithOptions(shared_ptr<QueryDatasetListRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->directoryId)) { query->insert(pair<string, string>("DirectoryId", *request->directoryId)); } if (!Darabonba_Util::Client::isUnset<string>(request->keyword)) { query->insert(pair<string, string>("Keyword", *request->keyword)); } if (!Darabonba_Util::Client::isUnset<long>(request->pageNum)) { query->insert(pair<string, long>("PageNum", *request->pageNum)); } if (!Darabonba_Util::Client::isUnset<long>(request->pageSize)) { query->insert(pair<string, long>("PageSize", *request->pageSize)); } if (!Darabonba_Util::Client::isUnset<bool>(request->withChildren)) { query->insert(pair<string, bool>("WithChildren", *request->withChildren)); } if (!Darabonba_Util::Client::isUnset<string>(request->workspaceId)) { query->insert(pair<string, string>("WorkspaceId", *request->workspaceId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryDatasetList"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryDatasetListResponse(callApi(params, req, runtime)); } QueryDatasetListResponse Alibabacloud_Quickbi-public20220101::Client::queryDatasetList(shared_ptr<QueryDatasetListRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryDatasetListWithOptions(request, runtime); } QueryDatasetSwitchInfoResponse Alibabacloud_Quickbi-public20220101::Client::queryDatasetSwitchInfoWithOptions(shared_ptr<QueryDatasetSwitchInfoRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->cubeId)) { query->insert(pair<string, string>("CubeId", *request->cubeId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryDatasetSwitchInfo"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryDatasetSwitchInfoResponse(callApi(params, req, runtime)); } QueryDatasetSwitchInfoResponse Alibabacloud_Quickbi-public20220101::Client::queryDatasetSwitchInfo(shared_ptr<QueryDatasetSwitchInfoRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryDatasetSwitchInfoWithOptions(request, runtime); } QueryEmbeddedInfoResponse Alibabacloud_Quickbi-public20220101::Client::queryEmbeddedInfoWithOptions(shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryEmbeddedInfo"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryEmbeddedInfoResponse(callApi(params, req, runtime)); } QueryEmbeddedInfoResponse Alibabacloud_Quickbi-public20220101::Client::queryEmbeddedInfo() { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryEmbeddedInfoWithOptions(runtime); } QueryEmbeddedStausResponse Alibabacloud_Quickbi-public20220101::Client::queryEmbeddedStausWithOptions(shared_ptr<QueryEmbeddedStausRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->worksId)) { query->insert(pair<string, string>("WorksId", *request->worksId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryEmbeddedStaus"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryEmbeddedStausResponse(callApi(params, req, runtime)); } QueryEmbeddedStausResponse Alibabacloud_Quickbi-public20220101::Client::queryEmbeddedStaus(shared_ptr<QueryEmbeddedStausRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryEmbeddedStausWithOptions(request, runtime); } QueryOrganizationWorkspaceListResponse Alibabacloud_Quickbi-public20220101::Client::queryOrganizationWorkspaceListWithOptions(shared_ptr<QueryOrganizationWorkspaceListRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->keyword)) { query->insert(pair<string, string>("Keyword", *request->keyword)); } if (!Darabonba_Util::Client::isUnset<long>(request->pageNum)) { query->insert(pair<string, long>("PageNum", *request->pageNum)); } if (!Darabonba_Util::Client::isUnset<long>(request->pageSize)) { query->insert(pair<string, long>("PageSize", *request->pageSize)); } if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryOrganizationWorkspaceList"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryOrganizationWorkspaceListResponse(callApi(params, req, runtime)); } QueryOrganizationWorkspaceListResponse Alibabacloud_Quickbi-public20220101::Client::queryOrganizationWorkspaceList(shared_ptr<QueryOrganizationWorkspaceListRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryOrganizationWorkspaceListWithOptions(request, runtime); } QueryReadableResourcesListByUserIdResponse Alibabacloud_Quickbi-public20220101::Client::queryReadableResourcesListByUserIdWithOptions(shared_ptr<QueryReadableResourcesListByUserIdRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryReadableResourcesListByUserId"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryReadableResourcesListByUserIdResponse(callApi(params, req, runtime)); } QueryReadableResourcesListByUserIdResponse Alibabacloud_Quickbi-public20220101::Client::queryReadableResourcesListByUserId(shared_ptr<QueryReadableResourcesListByUserIdRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryReadableResourcesListByUserIdWithOptions(request, runtime); } QueryShareListResponse Alibabacloud_Quickbi-public20220101::Client::queryShareListWithOptions(shared_ptr<QueryShareListRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->reportId)) { query->insert(pair<string, string>("ReportId", *request->reportId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryShareList"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryShareListResponse(callApi(params, req, runtime)); } QueryShareListResponse Alibabacloud_Quickbi-public20220101::Client::queryShareList(shared_ptr<QueryShareListRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryShareListWithOptions(request, runtime); } QuerySharesToUserListResponse Alibabacloud_Quickbi-public20220101::Client::querySharesToUserListWithOptions(shared_ptr<QuerySharesToUserListRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QuerySharesToUserList"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QuerySharesToUserListResponse(callApi(params, req, runtime)); } QuerySharesToUserListResponse Alibabacloud_Quickbi-public20220101::Client::querySharesToUserList(shared_ptr<QuerySharesToUserListRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return querySharesToUserListWithOptions(request, runtime); } QueryTicketInfoResponse Alibabacloud_Quickbi-public20220101::Client::queryTicketInfoWithOptions(shared_ptr<QueryTicketInfoRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->ticket)) { query->insert(pair<string, string>("Ticket", *request->ticket)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryTicketInfo"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryTicketInfoResponse(callApi(params, req, runtime)); } QueryTicketInfoResponse Alibabacloud_Quickbi-public20220101::Client::queryTicketInfo(shared_ptr<QueryTicketInfoRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryTicketInfoWithOptions(request, runtime); } QueryUserGroupListByParentIdResponse Alibabacloud_Quickbi-public20220101::Client::queryUserGroupListByParentIdWithOptions(shared_ptr<QueryUserGroupListByParentIdRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->parentUserGroupId)) { query->insert(pair<string, string>("ParentUserGroupId", *request->parentUserGroupId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryUserGroupListByParentId"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryUserGroupListByParentIdResponse(callApi(params, req, runtime)); } QueryUserGroupListByParentIdResponse Alibabacloud_Quickbi-public20220101::Client::queryUserGroupListByParentId(shared_ptr<QueryUserGroupListByParentIdRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryUserGroupListByParentIdWithOptions(request, runtime); } QueryUserGroupMemberResponse Alibabacloud_Quickbi-public20220101::Client::queryUserGroupMemberWithOptions(shared_ptr<QueryUserGroupMemberRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->keyword)) { query->insert(pair<string, string>("Keyword", *request->keyword)); } if (!Darabonba_Util::Client::isUnset<string>(request->userGroupId)) { query->insert(pair<string, string>("UserGroupId", *request->userGroupId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryUserGroupMember"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryUserGroupMemberResponse(callApi(params, req, runtime)); } QueryUserGroupMemberResponse Alibabacloud_Quickbi-public20220101::Client::queryUserGroupMember(shared_ptr<QueryUserGroupMemberRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryUserGroupMemberWithOptions(request, runtime); } QueryUserInfoByAccountResponse Alibabacloud_Quickbi-public20220101::Client::queryUserInfoByAccountWithOptions(shared_ptr<QueryUserInfoByAccountRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->account)) { query->insert(pair<string, string>("Account", *request->account)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryUserInfoByAccount"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryUserInfoByAccountResponse(callApi(params, req, runtime)); } QueryUserInfoByAccountResponse Alibabacloud_Quickbi-public20220101::Client::queryUserInfoByAccount(shared_ptr<QueryUserInfoByAccountRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryUserInfoByAccountWithOptions(request, runtime); } QueryUserInfoByUserIdResponse Alibabacloud_Quickbi-public20220101::Client::queryUserInfoByUserIdWithOptions(shared_ptr<QueryUserInfoByUserIdRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryUserInfoByUserId"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryUserInfoByUserIdResponse(callApi(params, req, runtime)); } QueryUserInfoByUserIdResponse Alibabacloud_Quickbi-public20220101::Client::queryUserInfoByUserId(shared_ptr<QueryUserInfoByUserIdRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryUserInfoByUserIdWithOptions(request, runtime); } QueryUserListResponse Alibabacloud_Quickbi-public20220101::Client::queryUserListWithOptions(shared_ptr<QueryUserListRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, string>> query = make_shared<map<string, string>>(Alibabacloud_OpenApiUtil::Client::query(make_shared<map<string, boost::any>>(Darabonba_Util::Client::toMap(request)))); shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryUserList"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("GET"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryUserListResponse(callApi(params, req, runtime)); } QueryUserListResponse Alibabacloud_Quickbi-public20220101::Client::queryUserList(shared_ptr<QueryUserListRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryUserListWithOptions(request, runtime); } QueryUserRoleInfoInWorkspaceResponse Alibabacloud_Quickbi-public20220101::Client::queryUserRoleInfoInWorkspaceWithOptions(shared_ptr<QueryUserRoleInfoInWorkspaceRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } if (!Darabonba_Util::Client::isUnset<string>(request->workspaceId)) { query->insert(pair<string, string>("WorkspaceId", *request->workspaceId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryUserRoleInfoInWorkspace"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryUserRoleInfoInWorkspaceResponse(callApi(params, req, runtime)); } QueryUserRoleInfoInWorkspaceResponse Alibabacloud_Quickbi-public20220101::Client::queryUserRoleInfoInWorkspace(shared_ptr<QueryUserRoleInfoInWorkspaceRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryUserRoleInfoInWorkspaceWithOptions(request, runtime); } QueryUserTagMetaListResponse Alibabacloud_Quickbi-public20220101::Client::queryUserTagMetaListWithOptions(shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryUserTagMetaList"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("GET"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryUserTagMetaListResponse(callApi(params, req, runtime)); } QueryUserTagMetaListResponse Alibabacloud_Quickbi-public20220101::Client::queryUserTagMetaList() { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryUserTagMetaListWithOptions(runtime); } QueryUserTagValueListResponse Alibabacloud_Quickbi-public20220101::Client::queryUserTagValueListWithOptions(shared_ptr<QueryUserTagValueListRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, string>> query = make_shared<map<string, string>>(Alibabacloud_OpenApiUtil::Client::query(make_shared<map<string, boost::any>>(Darabonba_Util::Client::toMap(request)))); shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryUserTagValueList"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("GET"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryUserTagValueListResponse(callApi(params, req, runtime)); } QueryUserTagValueListResponse Alibabacloud_Quickbi-public20220101::Client::queryUserTagValueList(shared_ptr<QueryUserTagValueListRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryUserTagValueListWithOptions(request, runtime); } QueryWorksResponse Alibabacloud_Quickbi-public20220101::Client::queryWorksWithOptions(shared_ptr<QueryWorksRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->worksId)) { query->insert(pair<string, string>("WorksId", *request->worksId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryWorks"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryWorksResponse(callApi(params, req, runtime)); } QueryWorksResponse Alibabacloud_Quickbi-public20220101::Client::queryWorks(shared_ptr<QueryWorksRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryWorksWithOptions(request, runtime); } QueryWorksBloodRelationshipResponse Alibabacloud_Quickbi-public20220101::Client::queryWorksBloodRelationshipWithOptions(shared_ptr<QueryWorksBloodRelationshipRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->worksId)) { query->insert(pair<string, string>("WorksId", *request->worksId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryWorksBloodRelationship"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryWorksBloodRelationshipResponse(callApi(params, req, runtime)); } QueryWorksBloodRelationshipResponse Alibabacloud_Quickbi-public20220101::Client::queryWorksBloodRelationship(shared_ptr<QueryWorksBloodRelationshipRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryWorksBloodRelationshipWithOptions(request, runtime); } QueryWorksByOrganizationResponse Alibabacloud_Quickbi-public20220101::Client::queryWorksByOrganizationWithOptions(shared_ptr<QueryWorksByOrganizationRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<long>(request->pageNum)) { query->insert(pair<string, long>("PageNum", *request->pageNum)); } if (!Darabonba_Util::Client::isUnset<long>(request->pageSize)) { query->insert(pair<string, long>("PageSize", *request->pageSize)); } if (!Darabonba_Util::Client::isUnset<long>(request->status)) { query->insert(pair<string, long>("Status", *request->status)); } if (!Darabonba_Util::Client::isUnset<long>(request->thirdPartAuthFlag)) { query->insert(pair<string, long>("ThirdPartAuthFlag", *request->thirdPartAuthFlag)); } if (!Darabonba_Util::Client::isUnset<string>(request->worksType)) { query->insert(pair<string, string>("WorksType", *request->worksType)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryWorksByOrganization"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryWorksByOrganizationResponse(callApi(params, req, runtime)); } QueryWorksByOrganizationResponse Alibabacloud_Quickbi-public20220101::Client::queryWorksByOrganization(shared_ptr<QueryWorksByOrganizationRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryWorksByOrganizationWithOptions(request, runtime); } QueryWorksByWorkspaceResponse Alibabacloud_Quickbi-public20220101::Client::queryWorksByWorkspaceWithOptions(shared_ptr<QueryWorksByWorkspaceRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<long>(request->pageNum)) { query->insert(pair<string, long>("PageNum", *request->pageNum)); } if (!Darabonba_Util::Client::isUnset<long>(request->pageSize)) { query->insert(pair<string, long>("PageSize", *request->pageSize)); } if (!Darabonba_Util::Client::isUnset<long>(request->status)) { query->insert(pair<string, long>("Status", *request->status)); } if (!Darabonba_Util::Client::isUnset<long>(request->thirdPartAuthFlag)) { query->insert(pair<string, long>("ThirdPartAuthFlag", *request->thirdPartAuthFlag)); } if (!Darabonba_Util::Client::isUnset<string>(request->worksType)) { query->insert(pair<string, string>("WorksType", *request->worksType)); } if (!Darabonba_Util::Client::isUnset<string>(request->workspaceId)) { query->insert(pair<string, string>("WorkspaceId", *request->workspaceId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryWorksByWorkspace"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryWorksByWorkspaceResponse(callApi(params, req, runtime)); } QueryWorksByWorkspaceResponse Alibabacloud_Quickbi-public20220101::Client::queryWorksByWorkspace(shared_ptr<QueryWorksByWorkspaceRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryWorksByWorkspaceWithOptions(request, runtime); } QueryWorkspaceUserListResponse Alibabacloud_Quickbi-public20220101::Client::queryWorkspaceUserListWithOptions(shared_ptr<QueryWorkspaceUserListRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->keyword)) { query->insert(pair<string, string>("Keyword", *request->keyword)); } if (!Darabonba_Util::Client::isUnset<long>(request->pageNum)) { query->insert(pair<string, long>("PageNum", *request->pageNum)); } if (!Darabonba_Util::Client::isUnset<long>(request->pageSize)) { query->insert(pair<string, long>("PageSize", *request->pageSize)); } if (!Darabonba_Util::Client::isUnset<string>(request->workspaceId)) { query->insert(pair<string, string>("WorkspaceId", *request->workspaceId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("QueryWorkspaceUserList"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return QueryWorkspaceUserListResponse(callApi(params, req, runtime)); } QueryWorkspaceUserListResponse Alibabacloud_Quickbi-public20220101::Client::queryWorkspaceUserList(shared_ptr<QueryWorkspaceUserListRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return queryWorkspaceUserListWithOptions(request, runtime); } ResultCallbackResponse Alibabacloud_Quickbi-public20220101::Client::resultCallbackWithOptions(shared_ptr<ResultCallbackRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->applicationId)) { query->insert(pair<string, string>("ApplicationId", *request->applicationId)); } if (!Darabonba_Util::Client::isUnset<string>(request->handleReason)) { query->insert(pair<string, string>("HandleReason", *request->handleReason)); } if (!Darabonba_Util::Client::isUnset<long>(request->status)) { query->insert(pair<string, long>("Status", *request->status)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("ResultCallback"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return ResultCallbackResponse(callApi(params, req, runtime)); } ResultCallbackResponse Alibabacloud_Quickbi-public20220101::Client::resultCallback(shared_ptr<ResultCallbackRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return resultCallbackWithOptions(request, runtime); } SaveFavoritesResponse Alibabacloud_Quickbi-public20220101::Client::saveFavoritesWithOptions(shared_ptr<SaveFavoritesRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } if (!Darabonba_Util::Client::isUnset<string>(request->worksId)) { query->insert(pair<string, string>("WorksId", *request->worksId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("SaveFavorites"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return SaveFavoritesResponse(callApi(params, req, runtime)); } SaveFavoritesResponse Alibabacloud_Quickbi-public20220101::Client::saveFavorites(shared_ptr<SaveFavoritesRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return saveFavoritesWithOptions(request, runtime); } SetDataLevelPermissionExtraConfigResponse Alibabacloud_Quickbi-public20220101::Client::setDataLevelPermissionExtraConfigWithOptions(shared_ptr<SetDataLevelPermissionExtraConfigRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->cubeId)) { query->insert(pair<string, string>("CubeId", *request->cubeId)); } if (!Darabonba_Util::Client::isUnset<string>(request->missHitPolicy)) { query->insert(pair<string, string>("MissHitPolicy", *request->missHitPolicy)); } if (!Darabonba_Util::Client::isUnset<string>(request->ruleType)) { query->insert(pair<string, string>("RuleType", *request->ruleType)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("SetDataLevelPermissionExtraConfig"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return SetDataLevelPermissionExtraConfigResponse(callApi(params, req, runtime)); } SetDataLevelPermissionExtraConfigResponse Alibabacloud_Quickbi-public20220101::Client::setDataLevelPermissionExtraConfig(shared_ptr<SetDataLevelPermissionExtraConfigRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return setDataLevelPermissionExtraConfigWithOptions(request, runtime); } SetDataLevelPermissionRuleConfigResponse Alibabacloud_Quickbi-public20220101::Client::setDataLevelPermissionRuleConfigWithOptions(shared_ptr<SetDataLevelPermissionRuleConfigRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->ruleModel)) { query->insert(pair<string, string>("RuleModel", *request->ruleModel)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("SetDataLevelPermissionRuleConfig"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return SetDataLevelPermissionRuleConfigResponse(callApi(params, req, runtime)); } SetDataLevelPermissionRuleConfigResponse Alibabacloud_Quickbi-public20220101::Client::setDataLevelPermissionRuleConfig(shared_ptr<SetDataLevelPermissionRuleConfigRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return setDataLevelPermissionRuleConfigWithOptions(request, runtime); } SetDataLevelPermissionWhiteListResponse Alibabacloud_Quickbi-public20220101::Client::setDataLevelPermissionWhiteListWithOptions(shared_ptr<SetDataLevelPermissionWhiteListRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->whiteListModel)) { query->insert(pair<string, string>("WhiteListModel", *request->whiteListModel)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("SetDataLevelPermissionWhiteList"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return SetDataLevelPermissionWhiteListResponse(callApi(params, req, runtime)); } SetDataLevelPermissionWhiteListResponse Alibabacloud_Quickbi-public20220101::Client::setDataLevelPermissionWhiteList(shared_ptr<SetDataLevelPermissionWhiteListRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return setDataLevelPermissionWhiteListWithOptions(request, runtime); } UpdateDataLevelPermissionStatusResponse Alibabacloud_Quickbi-public20220101::Client::updateDataLevelPermissionStatusWithOptions(shared_ptr<UpdateDataLevelPermissionStatusRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->cubeId)) { query->insert(pair<string, string>("CubeId", *request->cubeId)); } if (!Darabonba_Util::Client::isUnset<long>(request->isOpen)) { query->insert(pair<string, long>("IsOpen", *request->isOpen)); } if (!Darabonba_Util::Client::isUnset<string>(request->ruleType)) { query->insert(pair<string, string>("RuleType", *request->ruleType)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("UpdateDataLevelPermissionStatus"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return UpdateDataLevelPermissionStatusResponse(callApi(params, req, runtime)); } UpdateDataLevelPermissionStatusResponse Alibabacloud_Quickbi-public20220101::Client::updateDataLevelPermissionStatus(shared_ptr<UpdateDataLevelPermissionStatusRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return updateDataLevelPermissionStatusWithOptions(request, runtime); } UpdateEmbeddedStatusResponse Alibabacloud_Quickbi-public20220101::Client::updateEmbeddedStatusWithOptions(shared_ptr<UpdateEmbeddedStatusRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<bool>(request->thirdPartAuthFlag)) { query->insert(pair<string, bool>("ThirdPartAuthFlag", *request->thirdPartAuthFlag)); } if (!Darabonba_Util::Client::isUnset<string>(request->worksId)) { query->insert(pair<string, string>("WorksId", *request->worksId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("UpdateEmbeddedStatus"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return UpdateEmbeddedStatusResponse(callApi(params, req, runtime)); } UpdateEmbeddedStatusResponse Alibabacloud_Quickbi-public20220101::Client::updateEmbeddedStatus(shared_ptr<UpdateEmbeddedStatusRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return updateEmbeddedStatusWithOptions(request, runtime); } UpdateTicketNumResponse Alibabacloud_Quickbi-public20220101::Client::updateTicketNumWithOptions(shared_ptr<UpdateTicketNumRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->ticket)) { query->insert(pair<string, string>("Ticket", *request->ticket)); } if (!Darabonba_Util::Client::isUnset<long>(request->ticketNum)) { query->insert(pair<string, long>("TicketNum", *request->ticketNum)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("UpdateTicketNum"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return UpdateTicketNumResponse(callApi(params, req, runtime)); } UpdateTicketNumResponse Alibabacloud_Quickbi-public20220101::Client::updateTicketNum(shared_ptr<UpdateTicketNumRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return updateTicketNumWithOptions(request, runtime); } UpdateUserResponse Alibabacloud_Quickbi-public20220101::Client::updateUserWithOptions(shared_ptr<UpdateUserRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<bool>(request->adminUser)) { query->insert(pair<string, bool>("AdminUser", *request->adminUser)); } if (!Darabonba_Util::Client::isUnset<bool>(request->authAdminUser)) { query->insert(pair<string, bool>("AuthAdminUser", *request->authAdminUser)); } if (!Darabonba_Util::Client::isUnset<string>(request->nickName)) { query->insert(pair<string, string>("NickName", *request->nickName)); } if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } if (!Darabonba_Util::Client::isUnset<long>(request->userType)) { query->insert(pair<string, long>("UserType", *request->userType)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("UpdateUser"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return UpdateUserResponse(callApi(params, req, runtime)); } UpdateUserResponse Alibabacloud_Quickbi-public20220101::Client::updateUser(shared_ptr<UpdateUserRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return updateUserWithOptions(request, runtime); } UpdateUserGroupResponse Alibabacloud_Quickbi-public20220101::Client::updateUserGroupWithOptions(shared_ptr<UpdateUserGroupRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->userGroupDescription)) { query->insert(pair<string, string>("UserGroupDescription", *request->userGroupDescription)); } if (!Darabonba_Util::Client::isUnset<string>(request->userGroupId)) { query->insert(pair<string, string>("UserGroupId", *request->userGroupId)); } if (!Darabonba_Util::Client::isUnset<string>(request->userGroupName)) { query->insert(pair<string, string>("UserGroupName", *request->userGroupName)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("UpdateUserGroup"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return UpdateUserGroupResponse(callApi(params, req, runtime)); } UpdateUserGroupResponse Alibabacloud_Quickbi-public20220101::Client::updateUserGroup(shared_ptr<UpdateUserGroupRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return updateUserGroupWithOptions(request, runtime); } UpdateUserTagMetaResponse Alibabacloud_Quickbi-public20220101::Client::updateUserTagMetaWithOptions(shared_ptr<UpdateUserTagMetaRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->tagDescription)) { query->insert(pair<string, string>("TagDescription", *request->tagDescription)); } if (!Darabonba_Util::Client::isUnset<string>(request->tagId)) { query->insert(pair<string, string>("TagId", *request->tagId)); } if (!Darabonba_Util::Client::isUnset<string>(request->tagName)) { query->insert(pair<string, string>("TagName", *request->tagName)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("UpdateUserTagMeta"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return UpdateUserTagMetaResponse(callApi(params, req, runtime)); } UpdateUserTagMetaResponse Alibabacloud_Quickbi-public20220101::Client::updateUserTagMeta(shared_ptr<UpdateUserTagMetaRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return updateUserTagMetaWithOptions(request, runtime); } UpdateUserTagValueResponse Alibabacloud_Quickbi-public20220101::Client::updateUserTagValueWithOptions(shared_ptr<UpdateUserTagValueRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->tagId)) { query->insert(pair<string, string>("TagId", *request->tagId)); } if (!Darabonba_Util::Client::isUnset<string>(request->tagValue)) { query->insert(pair<string, string>("TagValue", *request->tagValue)); } if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("UpdateUserTagValue"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return UpdateUserTagValueResponse(callApi(params, req, runtime)); } UpdateUserTagValueResponse Alibabacloud_Quickbi-public20220101::Client::updateUserTagValue(shared_ptr<UpdateUserTagValueRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return updateUserTagValueWithOptions(request, runtime); } UpdateWorkspaceUserRoleResponse Alibabacloud_Quickbi-public20220101::Client::updateWorkspaceUserRoleWithOptions(shared_ptr<UpdateWorkspaceUserRoleRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<long>(request->roleId)) { query->insert(pair<string, long>("RoleId", *request->roleId)); } if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } if (!Darabonba_Util::Client::isUnset<string>(request->workspaceId)) { query->insert(pair<string, string>("WorkspaceId", *request->workspaceId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("UpdateWorkspaceUserRole"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return UpdateWorkspaceUserRoleResponse(callApi(params, req, runtime)); } UpdateWorkspaceUserRoleResponse Alibabacloud_Quickbi-public20220101::Client::updateWorkspaceUserRole(shared_ptr<UpdateWorkspaceUserRoleRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return updateWorkspaceUserRoleWithOptions(request, runtime); } UpdateWorkspaceUsersRoleResponse Alibabacloud_Quickbi-public20220101::Client::updateWorkspaceUsersRoleWithOptions(shared_ptr<UpdateWorkspaceUsersRoleRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<long>(request->roleId)) { query->insert(pair<string, long>("RoleId", *request->roleId)); } if (!Darabonba_Util::Client::isUnset<string>(request->userIds)) { query->insert(pair<string, string>("UserIds", *request->userIds)); } if (!Darabonba_Util::Client::isUnset<string>(request->workspaceId)) { query->insert(pair<string, string>("WorkspaceId", *request->workspaceId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("UpdateWorkspaceUsersRole"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return UpdateWorkspaceUsersRoleResponse(callApi(params, req, runtime)); } UpdateWorkspaceUsersRoleResponse Alibabacloud_Quickbi-public20220101::Client::updateWorkspaceUsersRole(shared_ptr<UpdateWorkspaceUsersRoleRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return updateWorkspaceUsersRoleWithOptions(request, runtime); } WithdrawAllUserGroupsResponse Alibabacloud_Quickbi-public20220101::Client::withdrawAllUserGroupsWithOptions(shared_ptr<WithdrawAllUserGroupsRequest> request, shared_ptr<Darabonba_Util::RuntimeOptions> runtime) { Darabonba_Util::Client::validateModel(request); shared_ptr<map<string, boost::any>> query = make_shared<map<string, boost::any>>(map<string, boost::any>()); if (!Darabonba_Util::Client::isUnset<string>(request->userId)) { query->insert(pair<string, string>("UserId", *request->userId)); } shared_ptr<Alibabacloud_OpenApi::OpenApiRequest> req = make_shared<Alibabacloud_OpenApi::OpenApiRequest>(map<string, boost::any>({ {"query", boost::any(Alibabacloud_OpenApiUtil::Client::query(query))} })); shared_ptr<Alibabacloud_OpenApi::Params> params = make_shared<Alibabacloud_OpenApi::Params>(map<string, boost::any>({ {"action", boost::any(string("WithdrawAllUserGroups"))}, {"version", boost::any(string("2022-01-01"))}, {"protocol", boost::any(string("HTTPS"))}, {"pathname", boost::any(string("/"))}, {"method", boost::any(string("POST"))}, {"authType", boost::any(string("AK"))}, {"style", boost::any(string("RPC"))}, {"reqBodyType", boost::any(string("formData"))}, {"bodyType", boost::any(string("json"))} })); return WithdrawAllUserGroupsResponse(callApi(params, req, runtime)); } WithdrawAllUserGroupsResponse Alibabacloud_Quickbi-public20220101::Client::withdrawAllUserGroups(shared_ptr<WithdrawAllUserGroupsRequest> request) { shared_ptr<Darabonba_Util::RuntimeOptions> runtime = make_shared<Darabonba_Util::RuntimeOptions>(); return withdrawAllUserGroupsWithOptions(request, runtime); }
#pragma once #include <cstddef> #include <functional> #include <memory> namespace elle { template<typename T, typename... Args> std::unique_ptr<T> make_unique(Args&&... args); template <typename T> class generic_unique_ptr: public std::unique_ptr<T, std::function<void (T*)>> { /*------. | Types | `------*/ public: using Self = generic_unique_ptr<T>; using Super = std::unique_ptr<T, std::function<void (T*)>>; /*-------------. | Construction | `-------------*/ public: /// Construct a null pointer. constexpr generic_unique_ptr(); /// Construct a null pointer. constexpr generic_unique_ptr(std::nullptr_t); /// Construct a pointer to \a p. explicit generic_unique_ptr(typename Self::pointer p); /// Construct a pointer to \a p with specific deleter. explicit generic_unique_ptr(typename Self::pointer p, std::function<void (T*)> const& deleter); /// Construct a pointer to \a p with specific deleter. template <typename D> explicit generic_unique_ptr(typename Self::pointer p, D&& deleter); /// Construct a pointer that steals ownership from \a p. /* implicit */ generic_unique_ptr(Self&& p); /// Construct a pointer that steals ownership from \a p. template <typename P, typename D> /* implicit */ generic_unique_ptr(std::unique_ptr<P, D>&& source); /// Copy is disabled. generic_unique_ptr(Self const& p) = delete; /*-----------. | Assignment | `-----------*/ public: /// Construct a pointer that steals ownership from \a p. template <typename P, typename D> Self& operator = (std::unique_ptr<P, D>&& source); /// Construct a pointer that steals ownership from \a p. template <typename P> Self& operator = (elle::generic_unique_ptr<P>&& source); }; } #include <elle/memory.hxx>
// Jason Brillante "Damdoshi" // Hanged Bunny Studio 2014-2018 // // Bibliotheque Lapin bool gl_bunny_sound_sprite_trap = true; void bunny_sound_sprite_trap_or_sync(bool trap) { gl_bunny_sound_sprite_trap = trap; }
#include <bits/stdc++.h> #define N 605 #define M 100020 #define ll long long using namespace std; inline int read(){ int x=0,f=0;char ch=getchar(); while(ch>'9'||ch<'0'){ch=='-'&&(f=1);ch=getchar();} while(ch<='9'&&ch>='0'){x=(x<<3)+(x<<1)+ch-'0';ch=getchar();} return f?-x:x; } namespace MCMF{ #ifndef inf #define inf 707406378 #endif int to[M], nxt[M], head[N], val[M], cst[M]; int st, ed, cnt = 1, ans; void ins(int x, int y, int v, int c){ to[++cnt] = y; nxt[cnt] = head[x]; head[x] = cnt; val[cnt] = v; cst[cnt] = c; to[++cnt] = x; nxt[cnt] = head[y]; head[y] = cnt; val[cnt] = 0; cst[cnt] = -c; } bool vis[N]; int dis[N], q[M]; bool spfa(){ memset(vis, 0, sizeof(bool)*N); memset(dis, 127/3, sizeof(int)*N); int l = 0, r = 1; q[1] = ed; dis[ed] = 0; vis[ed] = 1; while(l < r){ int x = q[++l]; for(int i = head[x]; i; i = nxt[i]) if(val[i^1] && dis[to[i]] > dis[x]-cst[i]){ dis[to[i]] = dis[x]-cst[i]; if(!vis[to[i]]){ vis[to[i]] = 1; q[++r] = to[i]; } } vis[x] = 0; } return dis[st] != inf; } int dfs(int x, int f){ vis[x] = 1; if(x == ed) return f; int used = 0; for(int i = head[x]; i; i = nxt[i]) if(!vis[to[i]] && val[i] && dis[to[i]] == dis[x]-cst[i]){ int w = dfs(to[i], min(val[i], f-used)); ans += w*cst[i]; val[i] -= w; val[i^1] += w; used += w; if(used == f) return f; } return used; } int zkw(){ while(spfa() && (vis[ed] = 1)) while(vis[ed]){ memset(vis, 0, sizeof(bool)*N); dfs(st, 1<<30); } return ans; } }; int t[N][N]; using namespace MCMF; int main(){ int n = read(), m = read(), nm = n*m; ed = 601; for(int i = 1; i <= m; i++) for(int j = 1; j <= n; j++) t[i][j] = read(); for(int i = 1; i <= nm; i++) ins(st, i, 1, 0); for(int i = 1; i <= m; i++) ins(i+nm, ed, 1, 0); for(int i = 1; i <= n; i++) for(int j = 1; j <= m; j++) for(int k = 1; k <= m; k++) ins((i-1)*m+j, nm+k, 1, t[k][i]*j); printf("%.2lf\n", 1.*zkw()/m); return 0; }
// -------------------------------------------------------------------------- // OpenMS -- Open-Source Mass Spectrometry // -------------------------------------------------------------------------- // Copyright The OpenMS Team -- Eberhard Karls University Tuebingen, // ETH Zurich, and Freie Universitaet Berlin 2002-2018. // // This software is released under a three-clause BSD license: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of any author or any participating institution // may be used to endorse or promote products derived from this software // without specific prior written permission. // For a full list of authors, refer to the file AUTHORS. // -------------------------------------------------------------------------- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING // INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: Stephan Aiche $ // -------------------------------------------------------------------------- #include <OpenMS/ANALYSIS/MAPMATCHING/TransformationModelInterpolated.h> // Spline2dInterpolator #include <OpenMS/MATH/MISC/CubicSpline2d.h> #include <numeric> // AkimaInterpolator #include <Wm5IntpAkimaNonuniform1.h> #include <Wm5Math.h> namespace OpenMS { /** * @brief Spline2dInterpolator */ class Spline2dInterpolator : public TransformationModelInterpolated::Interpolator { public: Spline2dInterpolator() : spline_(nullptr) { } void init(std::vector<double>& x, std::vector<double>& y) override { // cleanup before we use a new one if (spline_ != (CubicSpline2d*) nullptr) delete spline_; // initialize spline spline_ = new CubicSpline2d(x, y); } double eval(const double& x) const override { return spline_->eval(x); } ~Spline2dInterpolator() override { delete spline_; } private: CubicSpline2d* spline_; // Spline2d<double>* spline_; }; /** * @brief AkimaInterpolator */ class AkimaInterpolator : public TransformationModelInterpolated::Interpolator { public: AkimaInterpolator() : interpolator_(nullptr) {} void init(std::vector<double>& x, std::vector<double>& y) override { if (interpolator_ != (Wm5::IntpAkimaNonuniform1<double>*) nullptr) delete interpolator_; // re-construct a new interpolator interpolator_ = new Wm5::IntpAkimaNonuniform1<double>(static_cast<int>(x.size()), &x.front(), &y.front()); } double eval(const double& x) const override { return (* interpolator_)(x); } ~AkimaInterpolator() override { delete interpolator_; } private: Wm5::IntpAkimaNonuniform1<double>* interpolator_; }; /** * @brief LinearInterpolator. */ class LinearInterpolator : public TransformationModelInterpolated::Interpolator { public: LinearInterpolator() {} void init(std::vector<double>& x, std::vector<double>& y) override { // clear data x_.clear(); y_.clear(); // copy data // TODO: should we solve this using pointers to the original data? x_.insert(x_.begin(), x.begin(), x.end()); y_.insert(y_.begin(), y.begin(), y.end()); } double eval(const double& x) const override { // find nearest pair of points std::vector<double>::const_iterator it = std::upper_bound(x_.begin(), x_.end(), x); // interpolator is guaranteed to be only evaluated on points x, x_.front() =< x =< x x.back() // see TransformationModelInterpolated::evaluate // compute interpolation // the only point that is > then an element in our series is y_.back() // see call guarantee above if (it == x_.end()) { return y_.back(); } else { // interpolate .. invariant: idx > 0 const SignedSize idx = it - x_.begin(); const double x_0 = x_[idx - 1]; const double x_1 = x_[idx]; const double y_0 = y_[idx - 1]; const double y_1 = y_[idx]; return y_0 + (y_1 - y_0) * (x - x_0) / (x_1 - x_0); } } ~LinearInterpolator() override { } private: /// x values std::vector<double> x_; /// y values std::vector<double> y_; }; void TransformationModelInterpolated::preprocessDataPoints_(const DataPoints& data) { // need monotonically increasing x values (can't have the same value twice): std::map<double, std::vector<double> > mapping; for (TransformationModel::DataPoints::const_iterator it = data.begin(); it != data.end(); ++it) { mapping[it->first].push_back(it->second); } x_.resize(mapping.size()); y_.resize(mapping.size()); size_t i = 0; for (std::map<double, std::vector<double> >::const_iterator it = mapping.begin(); it != mapping.end(); ++it, ++i) { x_[i] = it->first; // use average y value: y_[i] = std::accumulate(it->second.begin(), it->second.end(), 0.0) / it->second.size(); } // ensure that we have enough points for an interpolation if (x_.size() < 3) { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Cubic spline model needs at least 3 data points (with unique x values)"); } } void TransformationModelInterpolated::preprocessDataPoints_(const std::vector<std::pair<double,double>>& data) { // need monotonically increasing x values (can't have the same value twice): std::map<double, std::vector<double> > mapping; for (std::vector<std::pair<double,double>>::const_iterator it = data.begin(); it != data.end(); ++it) { mapping[it->first].push_back(it->second); } x_.resize(mapping.size()); y_.resize(mapping.size()); size_t i = 0; for (std::map<double, std::vector<double> >::const_iterator it = mapping.begin(); it != mapping.end(); ++it, ++i) { x_[i] = it->first; // use average y value: y_[i] = std::accumulate(it->second.begin(), it->second.end(), 0.0) / it->second.size(); } // ensure that we have enough points for an interpolation if (x_.size() < 3) { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Cubic spline model needs at least 3 data points (with unique x values)"); } } TransformationModelInterpolated::TransformationModelInterpolated(const std::vector<std::pair<double,double>>& data, const Param& params, bool preprocess = true) { params_ = params; Param defaults; getDefaultParameters(defaults); params_.setDefaults(defaults); // convert incoming data to x_ and y_ if (preprocess) { preprocessDataPoints_(data); } else { x_.resize(data.size()); y_.resize(data.size()); for (const std::pair<double,double>& pair : data) { x_.push_back(pair.first); y_.push_back(pair.second); } } // choose the actual interpolation type const String interpolation_type = params_.getValue("interpolation_type"); if (interpolation_type == "linear") { interp_ = new LinearInterpolator(); } else if (interpolation_type == "cspline") { interp_ = new Spline2dInterpolator(); } else if (interpolation_type == "akima") { interp_ = new AkimaInterpolator(); } else { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "unknown/unsupported interpolation type '" + interpolation_type + "'"); } // assign data interp_->init(x_, y_); // linear model for extrapolation: const String extrapolation_type = params_.getValue("extrapolation_type"); if (extrapolation_type == "global-linear") { std::vector<TransformationModel::DataPoint> bloated_data{}; bloated_data.resize(x_.size()); //uff... well here we go.. adding an empty string for (Size s = 0; s < x_.size(); ++s) { bloated_data.emplace_back(TransformationModel::DataPoint(x_[s],y_[s])); } lm_front_ = new TransformationModelLinear(bloated_data, Param()); lm_back_ = new TransformationModelLinear(bloated_data, Param()); } else if (extrapolation_type == "two-point-linear") { TransformationModel::DataPoints lm_data(2); lm_data[0] = std::make_pair(x_.front(), y_.front()); lm_data[1] = std::make_pair(x_.back(), y_.back()); // last point lm_front_ = new TransformationModelLinear(lm_data, Param()); lm_back_ = new TransformationModelLinear(lm_data, Param()); } else if (extrapolation_type == "four-point-linear") { TransformationModel::DataPoints lm_data(2); lm_data[0] = std::make_pair(x_[0], y_[0]); lm_data[1] = std::make_pair(x_[1], y_[1]); lm_front_ = new TransformationModelLinear(lm_data, Param()); lm_data[0] = std::make_pair(x_[ x_.size()-2 ], y_[ y_.size()-2] ); // second to last point lm_data[1] = std::make_pair(x_.back(), y_.back()); // last point lm_back_ = new TransformationModelLinear(lm_data, Param()); } else { if (interp_) { delete interp_; } throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "unknown/unsupported extrapolation type '" + extrapolation_type + "'"); } } TransformationModelInterpolated::TransformationModelInterpolated(const TransformationModel::DataPoints& data, const Param& params) { params_ = params; Param defaults; getDefaultParameters(defaults); params_.setDefaults(defaults); // convert incoming data to x_ and y_ preprocessDataPoints_(data); // choose the actual interpolation type const String interpolation_type = params_.getValue("interpolation_type"); if (interpolation_type == "linear") { interp_ = new LinearInterpolator(); } else if (interpolation_type == "cspline") { interp_ = new Spline2dInterpolator(); } else if (interpolation_type == "akima") { interp_ = new AkimaInterpolator(); } else { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "unknown/unsupported interpolation type '" + interpolation_type + "'"); } // assign data interp_->init(x_, y_); // linear model for extrapolation: const String extrapolation_type = params_.getValue("extrapolation_type"); if (extrapolation_type == "global-linear") { lm_front_ = new TransformationModelLinear(data, Param()); lm_back_ = new TransformationModelLinear(data, Param()); } else if (extrapolation_type == "two-point-linear") { TransformationModel::DataPoints lm_data(2); lm_data[0] = std::make_pair(x_.front(), y_.front()); lm_data[1] = std::make_pair(x_.back(), y_.back()); // last point lm_front_ = new TransformationModelLinear(lm_data, Param()); lm_back_ = new TransformationModelLinear(lm_data, Param()); } else if (extrapolation_type == "four-point-linear") { TransformationModel::DataPoints lm_data(2); lm_data[0] = std::make_pair(x_[0], y_[0]); lm_data[1] = std::make_pair(x_[1], y_[1]); lm_front_ = new TransformationModelLinear(lm_data, Param()); lm_data[0] = std::make_pair(x_[ x_.size()-2 ], y_[ y_.size()-2] ); // second to last point lm_data[1] = std::make_pair(x_.back(), y_.back()); // last point lm_back_ = new TransformationModelLinear(lm_data, Param()); } else { if (interp_) { delete interp_; } throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "unknown/unsupported extrapolation type '" + extrapolation_type + "'"); } } TransformationModelInterpolated::~TransformationModelInterpolated() { if (interp_) delete interp_; if (lm_front_) delete lm_front_; if (lm_back_) delete lm_back_; } double TransformationModelInterpolated::evaluate(double value) const { if (value < x_.front()) // extrapolate front { return lm_front_->evaluate(value); } else if (value > x_.back()) // extrapolate back { return lm_back_->evaluate(value); } // interpolate: return interp_->eval(value); } void TransformationModelInterpolated::getDefaultParameters(Param& params) { params.clear(); params.setValue("interpolation_type", "cspline", "Type of interpolation to apply."); StringList types = ListUtils::create<String>("linear,cspline,akima"); params.setValidStrings("interpolation_type", types); params.setValue("extrapolation_type", "two-point-linear", "Type of extrapolation to apply: two-point-linear: use the first and last data point to build a single linear model, four-point-linear: build two linear models on both ends using the first two / last two points, global-linear: use all points to build a single linear model. Note that global-linear may not be continuous at the border."); StringList etypes = ListUtils::create<String>("two-point-linear,four-point-linear,global-linear"); params.setValidStrings("extrapolation_type", etypes); } } // namespace
#pragma once #include "es/protoground.hpp" #include "es/math/protoground-glm.hpp" #include <fbxsdk.h> namespace es { namespace fbx { /** * shared_ptrでラップする */ template<typename T> std::shared_ptr<T> wrapFbxSharedPtr(T *ptr) { return std::shared_ptr<T>(ptr, [](T *ptr) { if (ptr) { ptr->Destroy(); } }); } } namespace util { inline vec3 to3dsAxis(const vec3 &v) { return vec3(v.x, v.z, -v.y); } inline void getTransform(vec3 *resultPosition, quat *resultQuat, vec3 *resultRotate, vec3 *resultScale, const FbxMatrix &m) { FbxVector4 translate; FbxQuaternion rotate; FbxVector4 sharing; FbxVector4 scale; double sign; m.GetElements(translate, rotate, sharing, scale, sign); if (resultPosition) { *resultPosition = vec3(translate[0], translate[1], translate[2]); } quat temp; temp.x = rotate[0]; temp.y = rotate[1]; temp.z = rotate[2]; temp.w = rotate[3]; if (resultQuat) { *resultQuat = temp; } if (resultRotate) { *resultRotate = glm::eulerAngles(temp); } if (resultScale) { *resultScale = vec3(scale[0], scale[1], scale[2]); } } inline void getTransform(vec3 *resultPosition, quat *resultQuat, vec3 *resultRotate, vec3 *resultScale, const FbxAMatrix &m) { getTransform(resultPosition, resultQuat, resultRotate, resultScale, FbxMatrix(m)); } } }
// Copyright 2016 PDFium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Original code copyright 2014 Foxit Software Inc. http://www.foxitsoftware.com #include "xfa/fxgraphics/cfx_pattern.h" CFX_Pattern::CFX_Pattern(FX_HatchStyle hatchStyle, const FX_ARGB foreArgb, const FX_ARGB backArgb, CFX_Matrix* matrix) : m_hatchStyle(hatchStyle), m_foreArgb(foreArgb), m_backArgb(backArgb) { ASSERT(m_hatchStyle >= FX_HATCHSTYLE_Horizontal && m_hatchStyle <= FX_HATCHSTYLE_SolidDiamond); if (matrix) { // TODO(dsinclair): Add a Set(const CFX_Matrix&) method. pdfium:436 m_matrix.Set(matrix->a, matrix->b, matrix->c, matrix->d, matrix->e, matrix->f); } else { m_matrix.SetIdentity(); } } CFX_Pattern::~CFX_Pattern() {}
/*============================================================================= Copyright (c) 2001-2011 Joel de Guzman Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ==============================================================================*/ #if !defined(FUSION_INSERT_RANGE_009172005_1147) #define FUSION_INSERT_RANGE_009172005_1147 #include <boost/fusion/iterator/mpl/convert_iterator.hpp> #include <boost/fusion/container/vector/vector10.hpp> #include <boost/fusion/view/joint_view/joint_view.hpp> #include <boost/fusion/view/iterator_range/iterator_range.hpp> #include <boost/fusion/support/detail/as_fusion_element.hpp> #include <boost/fusion/sequence/intrinsic/begin.hpp> #include <boost/fusion/sequence/intrinsic/end.hpp> #include <boost/fusion/adapted/mpl/mpl_iterator.hpp> namespace boost { namespace fusion { namespace result_of { template <typename Sequence, typename Position, typename Range> struct insert_range { typedef typename convert_iterator<Position>::type pos_type; typedef typename result_of::begin<Sequence>::type first_type; typedef typename result_of::end<Sequence>::type last_type; typedef iterator_range<first_type, pos_type> left_type; typedef iterator_range<pos_type, last_type> right_type; typedef joint_view<left_type, Range> left_insert_type; typedef joint_view<left_insert_type, right_type> type; }; } template <typename Sequence, typename Position, typename Range> inline typename result_of::insert_range<Sequence const, Position, Range const>::type insert_range(Sequence const& seq, Position const& pos, Range const& range) { typedef result_of::insert_range<Sequence const, Position, Range const> result_of; typedef typename result_of::left_type left_type; typedef typename result_of::right_type right_type; typedef typename result_of::left_insert_type left_insert_type; typedef typename result_of::type result; left_type left(fusion::begin(seq), convert_iterator<Position>::call(pos)); right_type right(convert_iterator<Position>::call(pos), fusion::end(seq)); left_insert_type left_insert(left, range); return result(left_insert, right); } }} #endif
// // ssl/impl/context.ipp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_ASIO_SSL_IMPL_CONTEXT_IPP #define BOOST_ASIO_SSL_IMPL_CONTEXT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include <boost/asio/detail/config.hpp> #include <cstring> #include <boost/asio/detail/throw_error.hpp> #include <boost/asio/error.hpp> #include <boost/asio/ssl/context.hpp> #include <boost/asio/ssl/error.hpp> #include <boost/asio/detail/push_options.hpp> namespace boost { namespace asio { namespace ssl { struct context::bio_cleanup { BIO* p; ~bio_cleanup() { if (p) ::BIO_free(p); } }; struct context::x509_cleanup { X509* p; ~x509_cleanup() { if (p) ::X509_free(p); } }; struct context::evp_pkey_cleanup { EVP_PKEY* p; ~evp_pkey_cleanup() { if (p) ::EVP_PKEY_free(p); } }; struct context::rsa_cleanup { RSA* p; ~rsa_cleanup() { if (p) ::RSA_free(p); } }; struct context::dh_cleanup { DH* p; ~dh_cleanup() { if (p) ::DH_free(p); } }; context::context(context::method m) : handle_(0) { ::ERR_clear_error(); switch (m) { // SSL v2. #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) || defined(OPENSSL_NO_SSL2) case context::sslv2: case context::sslv2_client: case context::sslv2_server: boost::asio::detail::throw_error( boost::asio::error::invalid_argument, "context"); break; #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) || defined(OPENSSL_NO_SSL2) case context::sslv2: handle_ = ::SSL_CTX_new(::SSLv2_method()); break; case context::sslv2_client: handle_ = ::SSL_CTX_new(::SSLv2_client_method()); break; case context::sslv2_server: handle_ = ::SSL_CTX_new(::SSLv2_server_method()); break; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) || defined(OPENSSL_NO_SSL2) // SSL v3. #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && !defined(LIBRESSL_VERSION_NUMBER) case context::sslv3: handle_ = ::SSL_CTX_new(::TLS_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, SSL3_VERSION); SSL_CTX_set_max_proto_version(handle_, SSL3_VERSION); } break; case context::sslv3_client: handle_ = ::SSL_CTX_new(::TLS_client_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, SSL3_VERSION); SSL_CTX_set_max_proto_version(handle_, SSL3_VERSION); } break; case context::sslv3_server: handle_ = ::SSL_CTX_new(::TLS_server_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, SSL3_VERSION); SSL_CTX_set_max_proto_version(handle_, SSL3_VERSION); } break; #elif defined(OPENSSL_NO_SSL3) case context::sslv3: case context::sslv3_client: case context::sslv3_server: boost::asio::detail::throw_error( boost::asio::error::invalid_argument, "context"); break; #else // defined(OPENSSL_NO_SSL3) case context::sslv3: handle_ = ::SSL_CTX_new(::SSLv3_method()); break; case context::sslv3_client: handle_ = ::SSL_CTX_new(::SSLv3_client_method()); break; case context::sslv3_server: handle_ = ::SSL_CTX_new(::SSLv3_server_method()); break; #endif // defined(OPENSSL_NO_SSL3) // TLS v1.0. #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && !defined(LIBRESSL_VERSION_NUMBER) case context::tlsv1: handle_ = ::SSL_CTX_new(::TLS_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_VERSION); } break; case context::tlsv1_client: handle_ = ::SSL_CTX_new(::TLS_client_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_VERSION); } break; case context::tlsv1_server: handle_ = ::SSL_CTX_new(::TLS_server_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_VERSION); } break; #elif defined(SSL_TXT_TLSV1) case context::tlsv1: handle_ = ::SSL_CTX_new(::TLSv1_method()); break; case context::tlsv1_client: handle_ = ::SSL_CTX_new(::TLSv1_client_method()); break; case context::tlsv1_server: handle_ = ::SSL_CTX_new(::TLSv1_server_method()); break; #else // defined(SSL_TXT_TLSV1) case context::tlsv1: case context::tlsv1_client: case context::tlsv1_server: boost::asio::detail::throw_error( boost::asio::error::invalid_argument, "context"); break; #endif // defined(SSL_TXT_TLSV1) // TLS v1.1. #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && !defined(LIBRESSL_VERSION_NUMBER) case context::tlsv11: handle_ = ::SSL_CTX_new(::TLS_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_1_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_1_VERSION); } break; case context::tlsv11_client: handle_ = ::SSL_CTX_new(::TLS_client_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_1_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_1_VERSION); } break; case context::tlsv11_server: handle_ = ::SSL_CTX_new(::TLS_server_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_1_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_1_VERSION); } break; #elif defined(SSL_TXT_TLSV1_1) case context::tlsv11: handle_ = ::SSL_CTX_new(::TLSv1_1_method()); break; case context::tlsv11_client: handle_ = ::SSL_CTX_new(::TLSv1_1_client_method()); break; case context::tlsv11_server: handle_ = ::SSL_CTX_new(::TLSv1_1_server_method()); break; #else // defined(SSL_TXT_TLSV1_1) case context::tlsv11: case context::tlsv11_client: case context::tlsv11_server: boost::asio::detail::throw_error( boost::asio::error::invalid_argument, "context"); break; #endif // defined(SSL_TXT_TLSV1_1) // TLS v1.2. #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && !defined(LIBRESSL_VERSION_NUMBER) case context::tlsv12: handle_ = ::SSL_CTX_new(::TLS_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_2_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_2_VERSION); } break; case context::tlsv12_client: handle_ = ::SSL_CTX_new(::TLS_client_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_2_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_2_VERSION); } break; case context::tlsv12_server: handle_ = ::SSL_CTX_new(::TLS_server_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_2_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_2_VERSION); } break; #elif defined(SSL_TXT_TLSV1_2) case context::tlsv12: handle_ = ::SSL_CTX_new(::TLSv1_2_method()); break; case context::tlsv12_client: handle_ = ::SSL_CTX_new(::TLSv1_2_client_method()); break; case context::tlsv12_server: handle_ = ::SSL_CTX_new(::TLSv1_2_server_method()); break; #else // defined(SSL_TXT_TLSV1_2) case context::tlsv12: case context::tlsv12_client: case context::tlsv12_server: boost::asio::detail::throw_error( boost::asio::error::invalid_argument, "context"); break; #endif // defined(SSL_TXT_TLSV1_2) // TLS v1.3. #if (OPENSSL_VERSION_NUMBER >= 0x10101000L) \ && !defined(LIBRESSL_VERSION_NUMBER) case context::tlsv13: handle_ = ::SSL_CTX_new(::TLS_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_3_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_3_VERSION); } break; case context::tlsv13_client: handle_ = ::SSL_CTX_new(::TLS_client_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_3_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_3_VERSION); } break; case context::tlsv13_server: handle_ = ::SSL_CTX_new(::TLS_server_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_3_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_3_VERSION); } break; #else // (OPENSSL_VERSION_NUMBER >= 0x10101000L) // && !defined(LIBRESSL_VERSION_NUMBER) case context::tlsv13: case context::tlsv13_client: case context::tlsv13_server: boost::asio::detail::throw_error( boost::asio::error::invalid_argument, "context"); break; #endif // (OPENSSL_VERSION_NUMBER >= 0x10101000L) // && !defined(LIBRESSL_VERSION_NUMBER) // Any supported SSL/TLS version. case context::sslv23: handle_ = ::SSL_CTX_new(::SSLv23_method()); break; case context::sslv23_client: handle_ = ::SSL_CTX_new(::SSLv23_client_method()); break; case context::sslv23_server: handle_ = ::SSL_CTX_new(::SSLv23_server_method()); break; // Any supported TLS version. #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && !defined(LIBRESSL_VERSION_NUMBER) case context::tls: handle_ = ::SSL_CTX_new(::TLS_method()); if (handle_) SSL_CTX_set_min_proto_version(handle_, TLS1_VERSION); break; case context::tls_client: handle_ = ::SSL_CTX_new(::TLS_client_method()); if (handle_) SSL_CTX_set_min_proto_version(handle_, TLS1_VERSION); break; case context::tls_server: handle_ = ::SSL_CTX_new(::TLS_server_method()); if (handle_) SSL_CTX_set_min_proto_version(handle_, TLS1_VERSION); break; #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) case context::tls: handle_ = ::SSL_CTX_new(::SSLv23_method()); if (handle_) SSL_CTX_set_options(handle_, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3); break; case context::tls_client: handle_ = ::SSL_CTX_new(::SSLv23_client_method()); if (handle_) SSL_CTX_set_options(handle_, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3); break; case context::tls_server: handle_ = ::SSL_CTX_new(::SSLv23_server_method()); if (handle_) SSL_CTX_set_options(handle_, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3); break; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) default: handle_ = ::SSL_CTX_new(0); break; } if (handle_ == 0) { boost::system::error_code ec( static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()); boost::asio::detail::throw_error(ec, "context"); } set_options(no_compression); } context::context(context::native_handle_type native_handle) : handle_(native_handle) { if (!handle_) { boost::asio::detail::throw_error( boost::asio::error::invalid_argument, "context"); } } #if defined(BOOST_ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) context::context(context&& other) { handle_ = other.handle_; other.handle_ = 0; } context& context::operator=(context&& other) { context tmp(BOOST_ASIO_MOVE_CAST(context)(*this)); handle_ = other.handle_; other.handle_ = 0; return *this; } #endif // defined(BOOST_ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) context::~context() { if (handle_) { #if ((OPENSSL_VERSION_NUMBER >= 0x10100000L) \ && (!defined(LIBRESSL_VERSION_NUMBER) \ || LIBRESSL_VERSION_NUMBER >= 0x2070000fL)) \ || defined(BOOST_ASIO_USE_WOLFSSL) void* cb_userdata = ::SSL_CTX_get_default_passwd_cb_userdata(handle_); #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) void* cb_userdata = handle_->default_passwd_callback_userdata; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) if (cb_userdata) { detail::password_callback_base* callback = static_cast<detail::password_callback_base*>( cb_userdata); delete callback; #if ((OPENSSL_VERSION_NUMBER >= 0x10100000L) \ && (!defined(LIBRESSL_VERSION_NUMBER) \ || LIBRESSL_VERSION_NUMBER >= 0x2070000fL)) \ || defined(BOOST_ASIO_USE_WOLFSSL) ::SSL_CTX_set_default_passwd_cb_userdata(handle_, 0); #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) handle_->default_passwd_callback_userdata = 0; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) } if (SSL_CTX_get_app_data(handle_)) { detail::verify_callback_base* callback = static_cast<detail::verify_callback_base*>( SSL_CTX_get_app_data(handle_)); delete callback; SSL_CTX_set_app_data(handle_, 0); } ::SSL_CTX_free(handle_); } } context::native_handle_type context::native_handle() { return handle_; } void context::clear_options(context::options o) { boost::system::error_code ec; clear_options(o, ec); boost::asio::detail::throw_error(ec, "clear_options"); } BOOST_ASIO_SYNC_OP_VOID context::clear_options( context::options o, boost::system::error_code& ec) { #if (OPENSSL_VERSION_NUMBER >= 0x009080DFL) \ && (OPENSSL_VERSION_NUMBER != 0x00909000L) # if !defined(SSL_OP_NO_COMPRESSION) if ((o & context::no_compression) != 0) { # if (OPENSSL_VERSION_NUMBER >= 0x00908000L) handle_->comp_methods = SSL_COMP_get_compression_methods(); # endif // (OPENSSL_VERSION_NUMBER >= 0x00908000L) o ^= context::no_compression; } # endif // !defined(SSL_OP_NO_COMPRESSION) ::SSL_CTX_clear_options(handle_, o); ec = boost::system::error_code(); #else // (OPENSSL_VERSION_NUMBER >= 0x009080DFL) // && (OPENSSL_VERSION_NUMBER != 0x00909000L) (void)o; ec = boost::asio::error::operation_not_supported; #endif // (OPENSSL_VERSION_NUMBER >= 0x009080DFL) // && (OPENSSL_VERSION_NUMBER != 0x00909000L) BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } void context::set_options(context::options o) { boost::system::error_code ec; set_options(o, ec); boost::asio::detail::throw_error(ec, "set_options"); } BOOST_ASIO_SYNC_OP_VOID context::set_options( context::options o, boost::system::error_code& ec) { #if !defined(SSL_OP_NO_COMPRESSION) if ((o & context::no_compression) != 0) { #if (OPENSSL_VERSION_NUMBER >= 0x00908000L) handle_->comp_methods = boost::asio::ssl::detail::openssl_init<>::get_null_compression_methods(); #endif // (OPENSSL_VERSION_NUMBER >= 0x00908000L) o ^= context::no_compression; } #endif // !defined(SSL_OP_NO_COMPRESSION) ::SSL_CTX_set_options(handle_, o); ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } void context::set_verify_mode(verify_mode v) { boost::system::error_code ec; set_verify_mode(v, ec); boost::asio::detail::throw_error(ec, "set_verify_mode"); } BOOST_ASIO_SYNC_OP_VOID context::set_verify_mode( verify_mode v, boost::system::error_code& ec) { ::SSL_CTX_set_verify(handle_, v, ::SSL_CTX_get_verify_callback(handle_)); ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } void context::set_verify_depth(int depth) { boost::system::error_code ec; set_verify_depth(depth, ec); boost::asio::detail::throw_error(ec, "set_verify_depth"); } BOOST_ASIO_SYNC_OP_VOID context::set_verify_depth( int depth, boost::system::error_code& ec) { ::SSL_CTX_set_verify_depth(handle_, depth); ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } void context::load_verify_file(const std::string& filename) { boost::system::error_code ec; load_verify_file(filename, ec); boost::asio::detail::throw_error(ec, "load_verify_file"); } BOOST_ASIO_SYNC_OP_VOID context::load_verify_file( const std::string& filename, boost::system::error_code& ec) { ::ERR_clear_error(); if (::SSL_CTX_load_verify_locations(handle_, filename.c_str(), 0) != 1) { ec = boost::system::error_code( static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } void context::add_certificate_authority(const const_buffer& ca) { boost::system::error_code ec; add_certificate_authority(ca, ec); boost::asio::detail::throw_error(ec, "add_certificate_authority"); } BOOST_ASIO_SYNC_OP_VOID context::add_certificate_authority( const const_buffer& ca, boost::system::error_code& ec) { ::ERR_clear_error(); bio_cleanup bio = { make_buffer_bio(ca) }; if (bio.p) { if (X509_STORE* store = ::SSL_CTX_get_cert_store(handle_)) { for (bool added = false;; added = true) { x509_cleanup cert = { ::PEM_read_bio_X509(bio.p, 0, 0, 0) }; if (!cert.p) { unsigned long err = ::ERR_get_error(); if (added && ERR_GET_LIB(err) == ERR_LIB_PEM && ERR_GET_REASON(err) == PEM_R_NO_START_LINE) break; ec = boost::system::error_code( static_cast<int>(err), boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } if (::X509_STORE_add_cert(store, cert.p) != 1) { ec = boost::system::error_code( static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } } } } ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } void context::set_default_verify_paths() { boost::system::error_code ec; set_default_verify_paths(ec); boost::asio::detail::throw_error(ec, "set_default_verify_paths"); } BOOST_ASIO_SYNC_OP_VOID context::set_default_verify_paths( boost::system::error_code& ec) { ::ERR_clear_error(); if (::SSL_CTX_set_default_verify_paths(handle_) != 1) { ec = boost::system::error_code( static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } void context::add_verify_path(const std::string& path) { boost::system::error_code ec; add_verify_path(path, ec); boost::asio::detail::throw_error(ec, "add_verify_path"); } BOOST_ASIO_SYNC_OP_VOID context::add_verify_path( const std::string& path, boost::system::error_code& ec) { ::ERR_clear_error(); if (::SSL_CTX_load_verify_locations(handle_, 0, path.c_str()) != 1) { ec = boost::system::error_code( static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } void context::use_certificate( const const_buffer& certificate, file_format format) { boost::system::error_code ec; use_certificate(certificate, format, ec); boost::asio::detail::throw_error(ec, "use_certificate"); } BOOST_ASIO_SYNC_OP_VOID context::use_certificate( const const_buffer& certificate, file_format format, boost::system::error_code& ec) { ::ERR_clear_error(); if (format == context_base::asn1) { if (::SSL_CTX_use_certificate_ASN1(handle_, static_cast<int>(certificate.size()), static_cast<const unsigned char*>(certificate.data())) == 1) { ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } } else if (format == context_base::pem) { bio_cleanup bio = { make_buffer_bio(certificate) }; if (bio.p) { x509_cleanup cert = { ::PEM_read_bio_X509(bio.p, 0, 0, 0) }; if (cert.p) { if (::SSL_CTX_use_certificate(handle_, cert.p) == 1) { ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } } } } else { ec = boost::asio::error::invalid_argument; BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } ec = boost::system::error_code( static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } void context::use_certificate_file( const std::string& filename, file_format format) { boost::system::error_code ec; use_certificate_file(filename, format, ec); boost::asio::detail::throw_error(ec, "use_certificate_file"); } BOOST_ASIO_SYNC_OP_VOID context::use_certificate_file( const std::string& filename, file_format format, boost::system::error_code& ec) { int file_type; switch (format) { case context_base::asn1: file_type = SSL_FILETYPE_ASN1; break; case context_base::pem: file_type = SSL_FILETYPE_PEM; break; default: { ec = boost::asio::error::invalid_argument; BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } } ::ERR_clear_error(); if (::SSL_CTX_use_certificate_file(handle_, filename.c_str(), file_type) != 1) { ec = boost::system::error_code( static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } void context::use_certificate_chain(const const_buffer& chain) { boost::system::error_code ec; use_certificate_chain(chain, ec); boost::asio::detail::throw_error(ec, "use_certificate_chain"); } BOOST_ASIO_SYNC_OP_VOID context::use_certificate_chain( const const_buffer& chain, boost::system::error_code& ec) { ::ERR_clear_error(); bio_cleanup bio = { make_buffer_bio(chain) }; if (bio.p) { #if ((OPENSSL_VERSION_NUMBER >= 0x10100000L) \ && (!defined(LIBRESSL_VERSION_NUMBER) \ || LIBRESSL_VERSION_NUMBER >= 0x2070000fL)) \ || defined(BOOST_ASIO_USE_WOLFSSL) pem_password_cb* callback = ::SSL_CTX_get_default_passwd_cb(handle_); void* cb_userdata = ::SSL_CTX_get_default_passwd_cb_userdata(handle_); #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) pem_password_cb* callback = handle_->default_passwd_callback; void* cb_userdata = handle_->default_passwd_callback_userdata; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) x509_cleanup cert = { ::PEM_read_bio_X509_AUX(bio.p, 0, callback, cb_userdata) }; if (!cert.p) { ec = boost::system::error_code(ERR_R_PEM_LIB, boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } int result = ::SSL_CTX_use_certificate(handle_, cert.p); if (result == 0 || ::ERR_peek_error() != 0) { ec = boost::system::error_code( static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } #if ((OPENSSL_VERSION_NUMBER >= 0x10002000L) \ && (!defined(LIBRESSL_VERSION_NUMBER) \ || LIBRESSL_VERSION_NUMBER >= 0x2090100fL)) \ || defined(BOOST_ASIO_USE_WOLFSSL) ::SSL_CTX_clear_chain_certs(handle_); #else if (handle_->extra_certs) { ::sk_X509_pop_free(handle_->extra_certs, X509_free); handle_->extra_certs = 0; } #endif // (OPENSSL_VERSION_NUMBER >= 0x10002000L) while (X509* cacert = ::PEM_read_bio_X509(bio.p, 0, callback, cb_userdata)) { if (!::SSL_CTX_add_extra_chain_cert(handle_, cacert)) { ec = boost::system::error_code( static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } } result = ::ERR_peek_last_error(); if ((ERR_GET_LIB(result) == ERR_LIB_PEM) && (ERR_GET_REASON(result) == PEM_R_NO_START_LINE)) { ::ERR_clear_error(); ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } } ec = boost::system::error_code( static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } void context::use_certificate_chain_file(const std::string& filename) { boost::system::error_code ec; use_certificate_chain_file(filename, ec); boost::asio::detail::throw_error(ec, "use_certificate_chain_file"); } BOOST_ASIO_SYNC_OP_VOID context::use_certificate_chain_file( const std::string& filename, boost::system::error_code& ec) { ::ERR_clear_error(); if (::SSL_CTX_use_certificate_chain_file(handle_, filename.c_str()) != 1) { ec = boost::system::error_code( static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } void context::use_private_key( const const_buffer& private_key, context::file_format format) { boost::system::error_code ec; use_private_key(private_key, format, ec); boost::asio::detail::throw_error(ec, "use_private_key"); } BOOST_ASIO_SYNC_OP_VOID context::use_private_key( const const_buffer& private_key, context::file_format format, boost::system::error_code& ec) { ::ERR_clear_error(); #if ((OPENSSL_VERSION_NUMBER >= 0x10100000L) \ && (!defined(LIBRESSL_VERSION_NUMBER) \ || LIBRESSL_VERSION_NUMBER >= 0x2070000fL)) \ || defined(BOOST_ASIO_USE_WOLFSSL) pem_password_cb* callback = ::SSL_CTX_get_default_passwd_cb(handle_); void* cb_userdata = ::SSL_CTX_get_default_passwd_cb_userdata(handle_); #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) pem_password_cb* callback = handle_->default_passwd_callback; void* cb_userdata = handle_->default_passwd_callback_userdata; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) bio_cleanup bio = { make_buffer_bio(private_key) }; if (bio.p) { evp_pkey_cleanup evp_private_key = { 0 }; switch (format) { case context_base::asn1: evp_private_key.p = ::d2i_PrivateKey_bio(bio.p, 0); break; case context_base::pem: evp_private_key.p = ::PEM_read_bio_PrivateKey( bio.p, 0, callback, cb_userdata); break; default: { ec = boost::asio::error::invalid_argument; BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } } if (evp_private_key.p) { if (::SSL_CTX_use_PrivateKey(handle_, evp_private_key.p) == 1) { ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } } } ec = boost::system::error_code( static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } void context::use_private_key_file( const std::string& filename, context::file_format format) { boost::system::error_code ec; use_private_key_file(filename, format, ec); boost::asio::detail::throw_error(ec, "use_private_key_file"); } void context::use_rsa_private_key( const const_buffer& private_key, context::file_format format) { boost::system::error_code ec; use_rsa_private_key(private_key, format, ec); boost::asio::detail::throw_error(ec, "use_rsa_private_key"); } BOOST_ASIO_SYNC_OP_VOID context::use_rsa_private_key( const const_buffer& private_key, context::file_format format, boost::system::error_code& ec) { ::ERR_clear_error(); #if ((OPENSSL_VERSION_NUMBER >= 0x10100000L) \ && (!defined(LIBRESSL_VERSION_NUMBER) \ || LIBRESSL_VERSION_NUMBER >= 0x2070000fL)) \ || defined(BOOST_ASIO_USE_WOLFSSL) pem_password_cb* callback = ::SSL_CTX_get_default_passwd_cb(handle_); void* cb_userdata = ::SSL_CTX_get_default_passwd_cb_userdata(handle_); #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) pem_password_cb* callback = handle_->default_passwd_callback; void* cb_userdata = handle_->default_passwd_callback_userdata; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) bio_cleanup bio = { make_buffer_bio(private_key) }; if (bio.p) { rsa_cleanup rsa_private_key = { 0 }; switch (format) { case context_base::asn1: rsa_private_key.p = ::d2i_RSAPrivateKey_bio(bio.p, 0); break; case context_base::pem: rsa_private_key.p = ::PEM_read_bio_RSAPrivateKey( bio.p, 0, callback, cb_userdata); break; default: { ec = boost::asio::error::invalid_argument; BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } } if (rsa_private_key.p) { if (::SSL_CTX_use_RSAPrivateKey(handle_, rsa_private_key.p) == 1) { ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } } } ec = boost::system::error_code( static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } BOOST_ASIO_SYNC_OP_VOID context::use_private_key_file( const std::string& filename, context::file_format format, boost::system::error_code& ec) { int file_type; switch (format) { case context_base::asn1: file_type = SSL_FILETYPE_ASN1; break; case context_base::pem: file_type = SSL_FILETYPE_PEM; break; default: { ec = boost::asio::error::invalid_argument; BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } } ::ERR_clear_error(); if (::SSL_CTX_use_PrivateKey_file(handle_, filename.c_str(), file_type) != 1) { ec = boost::system::error_code( static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } void context::use_rsa_private_key_file( const std::string& filename, context::file_format format) { boost::system::error_code ec; use_rsa_private_key_file(filename, format, ec); boost::asio::detail::throw_error(ec, "use_rsa_private_key_file"); } BOOST_ASIO_SYNC_OP_VOID context::use_rsa_private_key_file( const std::string& filename, context::file_format format, boost::system::error_code& ec) { int file_type; switch (format) { case context_base::asn1: file_type = SSL_FILETYPE_ASN1; break; case context_base::pem: file_type = SSL_FILETYPE_PEM; break; default: { ec = boost::asio::error::invalid_argument; BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } } ::ERR_clear_error(); if (::SSL_CTX_use_RSAPrivateKey_file( handle_, filename.c_str(), file_type) != 1) { ec = boost::system::error_code( static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } void context::use_tmp_dh(const const_buffer& dh) { boost::system::error_code ec; use_tmp_dh(dh, ec); boost::asio::detail::throw_error(ec, "use_tmp_dh"); } BOOST_ASIO_SYNC_OP_VOID context::use_tmp_dh( const const_buffer& dh, boost::system::error_code& ec) { ::ERR_clear_error(); bio_cleanup bio = { make_buffer_bio(dh) }; if (bio.p) { return do_use_tmp_dh(bio.p, ec); } ec = boost::system::error_code( static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } void context::use_tmp_dh_file(const std::string& filename) { boost::system::error_code ec; use_tmp_dh_file(filename, ec); boost::asio::detail::throw_error(ec, "use_tmp_dh_file"); } BOOST_ASIO_SYNC_OP_VOID context::use_tmp_dh_file( const std::string& filename, boost::system::error_code& ec) { ::ERR_clear_error(); bio_cleanup bio = { ::BIO_new_file(filename.c_str(), "r") }; if (bio.p) { return do_use_tmp_dh(bio.p, ec); } ec = boost::system::error_code( static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } BOOST_ASIO_SYNC_OP_VOID context::do_use_tmp_dh( BIO* bio, boost::system::error_code& ec) { ::ERR_clear_error(); dh_cleanup dh = { ::PEM_read_bio_DHparams(bio, 0, 0, 0) }; if (dh.p) { if (::SSL_CTX_set_tmp_dh(handle_, dh.p) == 1) { ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } } ec = boost::system::error_code( static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } BOOST_ASIO_SYNC_OP_VOID context::do_set_verify_callback( detail::verify_callback_base* callback, boost::system::error_code& ec) { if (SSL_CTX_get_app_data(handle_)) { delete static_cast<detail::verify_callback_base*>( SSL_CTX_get_app_data(handle_)); } SSL_CTX_set_app_data(handle_, callback); ::SSL_CTX_set_verify(handle_, ::SSL_CTX_get_verify_mode(handle_), &context::verify_callback_function); ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } int context::verify_callback_function(int preverified, X509_STORE_CTX* ctx) { if (ctx) { if (SSL* ssl = static_cast<SSL*>( ::X509_STORE_CTX_get_ex_data( ctx, ::SSL_get_ex_data_X509_STORE_CTX_idx()))) { if (SSL_CTX* handle = ::SSL_get_SSL_CTX(ssl)) { if (SSL_CTX_get_app_data(handle)) { detail::verify_callback_base* callback = static_cast<detail::verify_callback_base*>( SSL_CTX_get_app_data(handle)); verify_context verify_ctx(ctx); return callback->call(preverified != 0, verify_ctx) ? 1 : 0; } } } } return 0; } BOOST_ASIO_SYNC_OP_VOID context::do_set_password_callback( detail::password_callback_base* callback, boost::system::error_code& ec) { #if ((OPENSSL_VERSION_NUMBER >= 0x10100000L) \ && (!defined(LIBRESSL_VERSION_NUMBER) \ || LIBRESSL_VERSION_NUMBER >= 0x2070000fL)) \ || defined(BOOST_ASIO_USE_WOLFSSL) void* old_callback = ::SSL_CTX_get_default_passwd_cb_userdata(handle_); ::SSL_CTX_set_default_passwd_cb_userdata(handle_, callback); #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) void* old_callback = handle_->default_passwd_callback_userdata; handle_->default_passwd_callback_userdata = callback; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) if (old_callback) delete static_cast<detail::password_callback_base*>( old_callback); SSL_CTX_set_default_passwd_cb(handle_, &context::password_callback_function); ec = boost::system::error_code(); BOOST_ASIO_SYNC_OP_VOID_RETURN(ec); } int context::password_callback_function( char* buf, int size, int purpose, void* data) { using namespace std; // For strncat and strlen. if (data) { detail::password_callback_base* callback = static_cast<detail::password_callback_base*>(data); std::string passwd = callback->call(static_cast<std::size_t>(size), purpose ? context_base::for_writing : context_base::for_reading); #if defined(BOOST_ASIO_HAS_SECURE_RTL) strcpy_s(buf, size, passwd.c_str()); #else // defined(BOOST_ASIO_HAS_SECURE_RTL) *buf = '\0'; if (size > 0) strncat(buf, passwd.c_str(), size - 1); #endif // defined(BOOST_ASIO_HAS_SECURE_RTL) return static_cast<int>(strlen(buf)); } return 0; } BIO* context::make_buffer_bio(const const_buffer& b) { return ::BIO_new_mem_buf( const_cast<void*>(b.data()), static_cast<int>(b.size())); } } // namespace ssl } // namespace asio } // namespace boost #include <boost/asio/detail/pop_options.hpp> #endif // BOOST_ASIO_SSL_IMPL_CONTEXT_IPP
#include "wasgo_state.h" #include "core/io/marshalls.h" #include "wasgo_callable.h" #include "wasgo_runtime.h" #include "wasm_export.h" #include <cstdint> void WasGoState::_initialize() { _stop(); reference_object(this); ERR_FAIL_COND(!wasm_script.is_valid()); ERR_FAIL_COND(!is_inside_tree()); String err_string; module_inst = WasGoRuntime::get_singleton()->instantiate_module(wasm_script->get_rid(), stack_size, heap_size, err_string); ERR_FAIL_COND_MSG(module_inst == nullptr, vformat("Failed to instantiate wasm module %s: \"%s\".", get_path(), err_string)); exec_env = wasm_runtime_create_exec_env(module_inst, stack_size); wasm_runtime_set_user_data(exec_env, this); void *wasgo_func = wasm_runtime_lookup_function(module_inst, "test", "()i"); printf("found func %x\n", wasgo_func); if (notification_callback = wasm_runtime_lookup_function(module_inst, "_notification", NULL)) { print_line("The notification callback found."); } if (ready_callback = wasm_runtime_lookup_function(module_inst, "_ready", NULL)) { print_line("The ready callback found."); } if (process_callback = wasm_runtime_lookup_function(module_inst, "_process", NULL)) { set_process(true); print_line("The process callback found."); } if (physics_process_callback = wasm_runtime_lookup_function(module_inst, "_physics_process", NULL)) { set_physics_process(true); print_line("The physics_process callback found."); } if (wasm_runtime_lookup_function(module_inst, "_input", NULL) && (input_callback = wasm_runtime_lookup_function(module_inst, "_wasgo_input", NULL))) { set_process_input(true); print_line("The input callback found."); } if (wasm_runtime_lookup_function(module_inst, "_unhandled_input", NULL) && (unhandled_input_callback = wasm_runtime_lookup_function(module_inst, "_wasgo_unhandled_input", NULL))) { set_process_unhandled_input(true); print_line("The unhandled_input callback found."); } if (wasm_runtime_lookup_function(module_inst, "_unhandled_key_input", NULL) && (unhandled_key_input_callback = wasm_runtime_lookup_function(module_inst, "_wasgo_unhandled_key_input", NULL))) { set_process_unhandled_key_input(true); print_line("The unhandled_key_input callback found."); } } void WasGoState::_stop() { if (exec_env) { wasm_runtime_destroy_exec_env(exec_env); } if (module_inst) { if (wasm_buffer) wasm_runtime_module_free(module_inst, wasm_buffer); wasm_runtime_deinstantiate(module_inst); } last_id = 0; exec_env = nullptr; module_inst = nullptr; createdObjects.clear(); createdObjectsReverse.clear(); referencedObjects.clear(); referencedObjectsReverse.clear(); notification_callback = nullptr; ready_callback = nullptr; process_callback = nullptr; physics_process_callback = nullptr; input_callback = nullptr; unhandled_input_callback = nullptr; unhandled_key_input_callback = nullptr; } void WasGoState::_bind_methods() { ClassDB::bind_method(D_METHOD("set_wasm_script", "wasm_script"), &WasGoState::set_wasm_script); ClassDB::bind_method(D_METHOD("get_wasm_script"), &WasGoState::get_wasm_script); ClassDB::bind_method(D_METHOD("set_properties", "properties"), &WasGoState::set_properties); ClassDB::bind_method(D_METHOD("get_properties"), &WasGoState::get_properties); // ClassDB::bind_method(D_METHOD("set_property", "property", "value"), &WasGoState::set_property); // ClassDB::bind_method(D_METHOD("get_property", "property"), &WasGoState::get_property); ClassDB::bind_method(D_METHOD("set_stack_size", "stack_size"), &WasGoState::set_stack_size); ClassDB::bind_method(D_METHOD("get_stack_size"), &WasGoState::get_stack_size); ClassDB::bind_method(D_METHOD("set_heap_size", "heap_size"), &WasGoState::set_heap_size); ClassDB::bind_method(D_METHOD("get_heap_size"), &WasGoState::get_heap_size); // for the wrappers ClassDB::bind_method(D_METHOD("set_property_bool", "property", "value"), &WasGoState::set_property_bool); ClassDB::bind_method(D_METHOD("get_property_bool", "property"), &WasGoState::get_property_bool); ClassDB::bind_method(D_METHOD("set_property_int", "property", "value"), &WasGoState::set_property_int); ClassDB::bind_method(D_METHOD("get_property_int", "property"), &WasGoState::get_property_int); ClassDB::bind_method(D_METHOD("set_property_float", "property", "value"), &WasGoState::set_property_float); ClassDB::bind_method(D_METHOD("get_property_float", "property"), &WasGoState::get_property_float); ClassDB::bind_method(D_METHOD("set_property_string", "property", "value"), &WasGoState::set_property_string); ClassDB::bind_method(D_METHOD("get_property_string", "property"), &WasGoState::get_property_string); ClassDB::bind_method(D_METHOD("set_property_vector2", "property", "value"), &WasGoState::set_property_vector2); ClassDB::bind_method(D_METHOD("get_property_vector2", "property"), &WasGoState::get_property_vector2); ClassDB::bind_method(D_METHOD("set_property_rect2", "property", "value"), &WasGoState::set_property_rect2); ClassDB::bind_method(D_METHOD("get_property_rect2", "property"), &WasGoState::get_property_rect2); ClassDB::bind_method(D_METHOD("set_property_vector3", "property", "value"), &WasGoState::set_property_vector3); ClassDB::bind_method(D_METHOD("get_property_vector3", "property"), &WasGoState::get_property_vector3); ClassDB::bind_method(D_METHOD("set_property_transform2d", "property", "value"), &WasGoState::set_property_transform2d); ClassDB::bind_method(D_METHOD("get_property_transform2d", "property"), &WasGoState::get_property_transform2d); ClassDB::bind_method(D_METHOD("set_property_plane", "property", "value"), &WasGoState::set_property_plane); ClassDB::bind_method(D_METHOD("get_property_plane", "property"), &WasGoState::get_property_plane); ClassDB::bind_method(D_METHOD("set_property_quat", "property", "value"), &WasGoState::set_property_quat); ClassDB::bind_method(D_METHOD("get_property_quat", "property"), &WasGoState::get_property_quat); ClassDB::bind_method(D_METHOD("set_property_aabb", "property", "value"), &WasGoState::set_property_aabb); ClassDB::bind_method(D_METHOD("get_property_aabb", "property"), &WasGoState::get_property_aabb); ClassDB::bind_method(D_METHOD("set_property_basis", "property", "value"), &WasGoState::set_property_basis); ClassDB::bind_method(D_METHOD("get_property_basis", "property"), &WasGoState::get_property_basis); ClassDB::bind_method(D_METHOD("set_property_transform", "property", "value"), &WasGoState::set_property_transform); ClassDB::bind_method(D_METHOD("get_property_transform", "property"), &WasGoState::get_property_transform); ClassDB::bind_method(D_METHOD("set_property_color", "property", "value"), &WasGoState::set_property_color); ClassDB::bind_method(D_METHOD("get_property_color", "property"), &WasGoState::get_property_color); ClassDB::bind_method(D_METHOD("set_property_nodepath", "property", "value"), &WasGoState::set_property_nodepath); ClassDB::bind_method(D_METHOD("get_property_nodepath", "property"), &WasGoState::get_property_nodepath); ClassDB::bind_method(D_METHOD("set_int_property", "value", "key"), &WasGoState::set_int_property); // callbacks ClassDB::bind_method(D_METHOD("_input", "p_event"), &WasGoState::_input); ClassDB::bind_method(D_METHOD("_unhandled_input", "p_event"), &WasGoState::_unhandled_input); ClassDB::bind_method(D_METHOD("_unhandled_key_input", "p_event"), &WasGoState::_unhandled_key_input); ClassDB::bind_method(D_METHOD("get_callable", "func", "definition"), &WasGoState::get_callable); ADD_GROUP("script", "script_"); ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "script_binary", PROPERTY_HINT_RESOURCE_TYPE, "WasmResource"), "set_wasm_script", "get_wasm_script"); ADD_PROPERTY(PropertyInfo(Variant::DICTIONARY, "script_properties", PROPERTY_HINT_NONE, ""), "set_properties", "get_properties"); ADD_GROUP("runtime", "runtime_"); ADD_PROPERTY(PropertyInfo(Variant::INT, "runtime_stack_size", PROPERTY_HINT_NONE, ""), "set_stack_size", "get_stack_size"); ADD_PROPERTY(PropertyInfo(Variant::INT, "runtime_heap_size", PROPERTY_HINT_NONE, ""), "set_heap_size", "get_heap_size"); ADD_GROUP("sync", "sync_"); } void WasGoState::_validate_property(PropertyInfo &property) const { } void WasGoState::_notification(uint32_t p_what) { // TODO: Uncomment this if (!Engine::get_singleton()->is_editor_hint() || p_what == NOTIFICATION_READY) { // only run in game but use the ready function so that the properties autopopulate switch (p_what) { case NOTIFICATION_READY: { if (ready_callback) { if (!wasm_runtime_call_wasm(exec_env, ready_callback, 0, nullptr)) { printf("Wasm ready callback failed. %s\n", wasm_runtime_get_exception(module_inst)); } } } break; case NOTIFICATION_INTERNAL_PROCESS: case NOTIFICATION_PROCESS: { if (process_callback) { float delta = get_process_delta_time(); uint32_t argv[2]; memcpy(argv, &delta, sizeof(delta)); if (!wasm_runtime_call_wasm(exec_env, process_callback, 1, argv)) { printf("Wasm process callback failed. %s\n", wasm_runtime_get_exception(module_inst)); } } } break; case NOTIFICATION_INTERNAL_PHYSICS_PROCESS: case NOTIFICATION_PHYSICS_PROCESS: { if (physics_process_callback) { float delta = get_physics_process_delta_time(); uint32_t argv[2]; memcpy(argv, &delta, sizeof(delta)); if (!wasm_runtime_call_wasm(exec_env, physics_process_callback, 1, argv)) { printf("Wasm physics process callback failed. %s\n", wasm_runtime_get_exception(module_inst)); } } } break; default: { } break; } if (notification_callback) { uint32_t argv[2] = { 0, p_what }; // argv[0] is the return value if (!wasm_runtime_call_wasm(exec_env, notification_callback, 1, argv)) { printf("wasm notification callback failed. %s\n", wasm_runtime_get_exception(module_inst)); } } } } WasGoState::WasGoState() { properties = {}; stack_size = 8192; heap_size = 8192; _stop(); } WasGoState::~WasGoState() { _stop(); } Variant WasGoState::lookup_object(WasGoID id) { if (createdObjectsReverse.has(id)) { return createdObjectsReverse[id]; } else if (referencedObjectsReverse.has(id)) { return referencedObjectsReverse[id]; } Array print_arr; print_arr.append(id); print_error(String("Invalid WasGoID: {0}").format(print_arr)); return Variant(); } Variant WasGoState::lookup_createdObject(WasGoID id) { if (createdObjectsReverse.has(id)) { return createdObjectsReverse[id]; } Array print_arr; print_arr.append(id); print_error(String("Invalid WasGoID: {0}").format(print_arr)); return Variant(); } Variant WasGoState::lookup_referencedObject(WasGoID id) { if (referencedObjectsReverse.has(id)) { return referencedObjectsReverse[id]; } Array print_arr; print_arr.append(id); print_error(String("Invalid WasGoID: {0}").format(print_arr)); return Variant(); } // Variant *WasGoState::lookup_variant(WasGoID id){ // if(createdVariantsReverse.has(id)){ // return &createdVariantsReverse[id]; // } // return nullptr; // } WasGoState::WasGoID WasGoState::lookup_wasgo_object(Variant obj) { if (createdObjects.has(obj)) { return createdObjects[obj]; } else if (referencedObjects.has(obj)) { return referencedObjects[obj]; } print_error("Invalid Object Lookup"); return 0; } WasGoState::WasGoID WasGoState::lookup_wasgo_createdObject(Variant obj) { if (createdObjects.has(obj)) { return createdObjects[obj]; } print_error("Invalid Object Lookup"); return 0; } WasGoState::WasGoID WasGoState::lookup_wasgo_referencedObject(Variant obj) { if (referencedObjects.has(obj)) { return referencedObjects[obj]; } print_error("Invalid Object Lookup"); return 0; } bool WasGoState::is_active() { return module_inst && exec_env; } void WasGoState::set_stack_size(int p_stack_size) { // I don't think you can dynamically change the stack and heap sizes, so we're gonna only change it if the wasm module is not active if (!is_active()) { stack_size = p_stack_size; } } int WasGoState::get_stack_size() { return stack_size; } void WasGoState::set_heap_size(int p_heap_size) { // I don't think you can dynamically change the stack and heap sizes, so we're gonna only change it if the wasm module is not active if (!is_active()) { heap_size = p_heap_size; } } int WasGoState::get_heap_size() { return heap_size; } void WasGoState::set_wasm_script(Ref<WasmResource> p_wasm_script) { // Only change it if the wasm module is not active ERR_FAIL_COND(is_active()); // Fail instead of stopping. if (!is_active()) { _stop(); } wasm_script = p_wasm_script; _initialize(); if (is_inside_tree() && ready_callback) { // call the ready callback again because we must have missed the first one if (!wasm_runtime_call_wasm(exec_env, ready_callback, 0, nullptr)) { printf("wasm ready callback failed. %s\n", wasm_runtime_get_exception(module_inst)); } } } Ref<WasmResource> WasGoState::get_wasm_script() { return wasm_script; } void WasGoState::set_properties(Dictionary p_properties) { // I don't think you can dynamically change the stack and heap sizes, so we're gonna only change it if the wasm module is not active // if (!is_active()) { properties = p_properties; // } } Dictionary WasGoState::get_properties() { return properties; } void WasGoState::set_property(String key, Variant value) { // I don't think you can dynamically change the stack and heap sizes, so we're gonna only change it if the wasm module is not active // if (!is_active()) { properties[key] = value; // } } Variant WasGoState::get_property(String key) { return properties[key]; } WasGoState::WasGoID WasGoState::generate_id() { last_id++; return last_id; } WasGoState::WasGoID WasGoState::create_object(Variant obj) { WasGoID wasgo_id = 0; if (obj.get_type() != Variant::OBJECT || !obj.is_zero()) { if (createdObjects.has(obj)) { wasgo_id = createdObjects[obj]; } else if (referencedObjects.has(obj)) { wasgo_id = referencedObjects[obj]; } else { wasgo_id = generate_id(); // createdObjects.set(obj, wasgo_id); // createdObjectsReverse.set(wasgo_id, obj); createdObjects[obj] = wasgo_id; createdObjectsReverse[wasgo_id] = obj; } } return wasgo_id; } WasGoState::WasGoID WasGoState::reference_object(Variant obj) { WasGoID wasgo_id = 0; if (obj.get_type() != Variant::OBJECT || !obj.is_zero()) { if (referencedObjects.has(obj)) { wasgo_id = referencedObjects[obj]; } if (createdObjects.has(obj)) { wasgo_id = createdObjects[obj]; } else { wasgo_id = generate_id(); // referencedObjects.set(obj, wasgo_id); // referencedObjectsReverse.set(wasgo_id, obj); referencedObjects[obj] = wasgo_id; referencedObjectsReverse[wasgo_id] = obj; } } return wasgo_id; } // WasGoState::WasGoID WasGoState::create_variant(Variant var){ // WasGoID wasgo_id = generate_id(); // // createdVariants.set(var, wasgo_id); // createdVariantsReverse.set(wasgo_id, var); // return wasgo_id; // } // WasGoState::WasGoID WasGoState::create_object(Object obj) { // WasGoID wasgo_id = 0; // if (createdObjects.has(obj.get_instance_id())) { // wasgo_id = createdObjects[wasgo_id]; // } else { // wasgo_id = generate_id(); // createdObjects.set(obj.get_instance_id(), wasgo_id); // createdObjectsReverse.set(wasgo_id, obj.get_instance_id()); // } // return wasgo_id; // } // WasGoState::WasGoID WasGoState::reference_object(Object obj) { // return reference_object(obj.get_instance_id()); // } // WasGoState::WasGoID WasGoState::reference_object(Ref<Object> ref) { // WasGoID wasgo_id = 0; // //TODO: Figure out how to handle this case // // if (referencedObjects.has(obj_id)) { // // wasgo_id = referencedObjects[wasgo_id]; // // } else { // // wasgo_id = generate_id(); // // referencedObjects.set(obj_id, wasgo_id); // // referencedObjectsReverse.set(wasgo_id, obj_id); // // } // return wasgo_id; // } WasGoState::WasGoID WasGoState::handle_return_variant(Variant var) { // WasGoID id = 0; // if(var.is_ref()){ // if(var.get_type() == Variant::OBJECT){ // RefPtr ref = RefPtr(var); // Object *obj = (Object *)ref.get_data(); // id = reference_object(obj); // } else { // //We shouldn't be able to get here // printf("ERROR: unexpected ref type."); // } // } else { // // if (var.get_type() == Variant::OBJECT) { // // Object *obj = (Object *)var; // // if (obj) { // // id = create_object(obj); // // } // // } else { // // id = create_variant(var); // // } // id = create_object(var); // } // return id; return create_object(var); } // Regular Node Callbacks // void WasGoState::_enter_tree(){ // if(is_active()){ // call_wasm_function("_enter_tree"); // } // } // void WasGoState::_exit_tree(){ // if(is_active()){ // call_wasm_function("_exit_tree"); // } // } // String WasGoState::_get_configuration_warning(){ // if(is_active()){ // call_wasm_function("_get_configuration_warning"); // } // } void WasGoState::_input(const Ref<InputEvent> &p_event) { if (input_callback) { uint32_t argv[1]; argv[0] = reference_object(p_event); if (!wasm_runtime_call_wasm(exec_env, input_callback, 1, argv)) { printf("wasm input callback failed. %s\n", wasm_runtime_get_exception(module_inst)); } } } // void WasGoState::_physics_process(float delta){ // if(is_active()){ // call_wasm_function("_physics_process"); // } // } // void WasGoState::_process(float delta){ // if(is_active()){ // call_wasm_function("_process"); // } // } // void WasGoState::_ready(){ // if(is_active()){ // call_wasm_function("_ready"); // } // } void WasGoState::_unhandled_input(Ref<InputEvent> p_event) { if (unhandled_input_callback) { uint32_t argv[1]; argv[0] = reference_object(p_event); if (!wasm_runtime_call_wasm(exec_env, unhandled_input_callback, 1, argv)) { printf("wasm unhandled input callback failed. %s\n", wasm_runtime_get_exception(module_inst)); } } } void WasGoState::_unhandled_key_input(Ref<InputEventKey> p_event) { if (unhandled_key_input_callback) { uint32_t argv[1]; WasGoID wasgo_id = reference_object(p_event.ptr()); argv[0] = wasgo_id; if (!wasm_runtime_call_wasm(exec_env, unhandled_key_input_callback, 1, argv)) { printf("wasm unhandled key input callback failed. %s\n", wasm_runtime_get_exception(module_inst)); } } } WasGoState::WasGoID _wasgo_this_node(wasm_exec_env_t p_exec_env) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); if (state) { return state->lookup_wasgo_object(state); } return 0; } int _wasgo_get_property_bool(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); return (int)state->get_property(name); } void _wasgo_set_property_bool(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, int value) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); state->set_property(name, value); } int _wasgo_get_property_int(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); return (int)state->get_property(name); } void _wasgo_set_property_int(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, int value) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); state->set_property(name, value); } float _wasgo_get_property_float(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); return (float)state->get_property(name); } void _wasgo_set_property_float(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, float value) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); state->set_property(name, value); } void _wasgo_get_property_string(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); String ret = state->get_property(name); encode_variant(ret, value, value_size); } void _wasgo_set_property_string(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Variant value_var; decode_variant(value_var, value, value_size); state->set_property(name, value_var); } void _wasgo_get_property_vector2(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Vector2 ret = state->get_property(name); encode_variant(ret, value, value_size); } void _wasgo_set_property_vector2(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Variant value_var; decode_variant(value_var, value, value_size); state->set_property(name, value_var); } void _wasgo_get_property_rect2(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Rect2 ret = state->get_property(name); encode_variant(ret, value, value_size); } void _wasgo_set_property_rect2(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Variant value_var; decode_variant(value_var, value, value_size); state->set_property(name, value_var); } void _wasgo_get_property_vector3(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Vector3 ret = state->get_property(name); encode_variant(ret, value, value_size); } void _wasgo_set_property_vector3(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Variant value_var; decode_variant(value_var, value, value_size); state->set_property(name, value_var); } void _wasgo_get_property_transform2d(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Transform2D ret = state->get_property(name); encode_variant(ret, value, value_size); } void _wasgo_set_property_transform2d(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Variant value_var; decode_variant(value_var, value, value_size); state->set_property(name, value_var); } void _wasgo_get_property_plane(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Plane ret = state->get_property(name); encode_variant(ret, value, value_size); } void _wasgo_set_property_plane(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Variant value_var; decode_variant(value_var, value, value_size); state->set_property(name, value_var); } void _wasgo_get_property_quat(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Quaternion ret = state->get_property(name); encode_variant(ret, value, value_size); } void _wasgo_set_property_quat(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Variant value_var; decode_variant(value_var, value, value_size); state->set_property(name, value_var); } void _wasgo_get_property_basis(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Basis ret = state->get_property(name); encode_variant(ret, value, value_size); } void _wasgo_set_property_basis(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Variant value_var; decode_variant(value_var, value, value_size); state->set_property(name, value_var); } void _wasgo_get_property_aabb(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); AABB ret = state->get_property(name); encode_variant(ret, value, value_size); } void _wasgo_set_property_aabb(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Variant value_var; decode_variant(value_var, value, value_size); state->set_property(name, value_var); } void _wasgo_get_property_transform(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Transform3D ret = state->get_property(name); encode_variant(ret, value, value_size); } void _wasgo_set_property_transform(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Variant value_var; decode_variant(value_var, value, value_size); state->set_property(name, value_var); } void _wasgo_get_property_color(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Color ret = state->get_property(name); encode_variant(ret, value, value_size); } void _wasgo_set_property_color(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Variant value_var; decode_variant(value_var, value, value_size); state->set_property(name, value_var); } void _wasgo_get_property_nodepath(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); NodePath ret = state->get_property(name); encode_variant(ret, value, value_size); } void _wasgo_set_property_nodepath(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, uint8_t *value, int value_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Variant value_var; decode_variant(value_var, value, value_size); state->set_property(name, value_var); } WasGoState::WasGoID _wasgo_get_property_object(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Object *ret = state->get_property(name); return state->lookup_wasgo_object(ret); } void _wasgo_set_property_object(wasm_exec_env_t p_exec_env, const uint8_t *property_name, int property_name_size, WasGoState::WasGoID p_wasgo_id) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); Variant name = String(); decode_variant(name, property_name, property_name_size); Object *obj = state->lookup_object(p_wasgo_id); if (obj) { state->set_property(name, obj); } } void _wasgo_set_process(wasm_exec_env_t p_exec_env, bool p_enable) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); state->set_process(p_enable); } void _wasgo_set_physics_process(wasm_exec_env_t p_exec_env, bool p_enable) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); state->set_physics_process(p_enable); } void _wasgo_set_process_internal(wasm_exec_env_t p_exec_env, bool p_enable) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); state->set_process_internal(p_enable); } void _wasgo_set_physics_process_internal(wasm_exec_env_t p_exec_env, bool p_enable) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); state->set_physics_process_internal(p_enable); } void _wasgo_set_process_input(wasm_exec_env_t p_exec_env, bool p_enable) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); state->set_process_input(p_enable); } void _wasgo_set_process_unhandled_input(wasm_exec_env_t p_exec_env, bool p_enable) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); state->set_process_unhandled_input(p_enable); } void _wasgo_set_process_unhandled_key_input(wasm_exec_env_t p_exec_env, bool p_enable) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); state->set_process_unhandled_key_input(p_enable); } bool _wasgo_is_processing(wasm_exec_env_t p_exec_env) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); return state->is_processing(); } bool _wasgo_is_physics_processing(wasm_exec_env_t p_exec_env) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); return state->is_physics_processing(); } bool _wasgo_is_processing_internal(wasm_exec_env_t p_exec_env) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); return state->is_processing_internal(); } bool _wasgo_is_physics_processing_internal(wasm_exec_env_t p_exec_env) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); return state->is_physics_processing_internal(); } bool _wasgo_is_processing_input(wasm_exec_env_t p_exec_env) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); return state->is_processing_input(); } bool _wasgo_is_processing_unhandled_input(wasm_exec_env_t p_exec_env) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); return state->is_processing_unhandled_input(); } bool _wasgo_is_processing_unhandled_key_input(wasm_exec_env_t p_exec_env) { WasGoState *state = (WasGoState *)wasm_runtime_get_user_data(p_exec_env); return state->is_processing_unhandled_key_input(); } bool WasGoState::get_property_bool(String key) { return get_property(key); } int WasGoState::get_property_int(String key) { return get_property(key); } float WasGoState::get_property_float(String key) { return get_property(key); } String WasGoState::get_property_string(String key) { return get_property(key); } Vector2 WasGoState::get_property_vector2(String key) { return get_property(key); } Rect2 WasGoState::get_property_rect2(String key) { return get_property(key); } Vector3 WasGoState::get_property_vector3(String key) { return get_property(key); } Transform2D WasGoState::get_property_transform2d(String key) { return get_property(key); } Plane WasGoState::get_property_plane(String key) { return get_property(key); } Quaternion WasGoState::get_property_quat(String key) { return get_property(key); } AABB WasGoState::get_property_aabb(String key) { return get_property(key); } Basis WasGoState::get_property_basis(String key) { return get_property(key); } Transform3D WasGoState::get_property_transform(String key) { return get_property(key); } Color WasGoState::get_property_color(String key) { return get_property(key); } NodePath WasGoState::get_property_nodepath(String key) { return get_property(key); } void WasGoState::set_property_bool(String key, bool p_value) { set_property(key, p_value); } void WasGoState::set_property_int(String key, int p_value) { set_property(key, p_value); } void WasGoState::set_property_float(String key, float p_value) { set_property(key, p_value); } void WasGoState::set_property_string(String key, String p_value) { set_property(key, p_value); } void WasGoState::set_property_vector2(String key, Vector2 p_value) { set_property(key, p_value); } void WasGoState::set_property_rect2(String key, Rect2 p_value) { set_property(key, p_value); } void WasGoState::set_property_vector3(String key, Vector3 p_value) { set_property(key, p_value); } void WasGoState::set_property_transform2d(String key, Transform2D p_value) { set_property(key, p_value); } void WasGoState::set_property_plane(String key, Plane p_value) { set_property(key, p_value); } void WasGoState::set_property_quat(String key, Quaternion p_value) { set_property(key, p_value); } void WasGoState::set_property_aabb(String key, AABB p_value) { set_property(key, p_value); } void WasGoState::set_property_basis(String key, Basis p_value) { set_property(key, p_value); } void WasGoState::set_property_transform(String key, Transform3D p_value) { set_property(key, p_value); } void WasGoState::set_property_color(String key, Color p_value) { set_property(key, p_value); } void WasGoState::set_property_nodepath(String key, NodePath p_value) { set_property(key, p_value); } void WasGoState::set_int_property(int p_value, String key) { set_property(key, p_value); } Callable WasGoState::get_callable(String p_func, String p_definition) { return (Callable)memnew(WasGoCallable(this, p_func, p_definition)); }
/** * @file * @brief functions used during combat */ #include "AppHdr.h" #include "fight.h" #include <algorithm> #include <cmath> #include <cstdio> #include <cstdlib> #include <cstring> #include "art-enum.h" #include "cloud.h" #include "coord.h" #include "coordit.h" #include "delay.h" #include "english.h" #include "env.h" #include "fineff.h" #include "fprop.h" #include "god-abil.h" #include "god-passive.h" // passive_t::shadow_attacks #include "hints.h" #include "invent.h" #include "item-prop.h" #include "item-use.h" #include "melee-attack.h" #include "message.h" #include "mgen-data.h" #include "misc.h" #include "mon-behv.h" #include "mon-cast.h" #include "mon-place.h" #include "mon-util.h" #include "ouch.h" #include "player.h" #include "prompt.h" #include "random-var.h" #include "religion.h" #include "shopping.h" #include "spl-miscast.h" #include "spl-summoning.h" #include "state.h" #include "stringutil.h" #include "target.h" #include "terrain.h" #include "transform.h" #include "traps.h" #include "travel.h" /** * What are the odds of an HD-checking confusion effect (e.g. Confusing Touch, * Fungus Form, SPWPN_CHAOS maybe) to confuse a monster of the given HD? * * @param HD The current hit dice (level) of the monster to confuse. * @return A percentage chance (0-100) of confusing that monster. * (Except it tops out at 80%.) */ int melee_confuse_chance(int HD) { return max(80 * (24 - HD) / 24, 0); } /** * Switch from a bad weapon to melee. * * This function assumes some weapon is being wielded. * @return whether a swap did occur. */ static bool _autoswitch_to_melee() { bool penance; if (is_melee_weapon(*you.weapon()) && !needs_handle_warning(*you.weapon(), OPER_ATTACK, penance)) { return false; } int item_slot; if (you.equip[EQ_WEAPON] == letter_to_index('a')) item_slot = letter_to_index('b'); else if (you.equip[EQ_WEAPON] == letter_to_index('b')) item_slot = letter_to_index('a'); else return false; if (!is_melee_weapon(you.inv[item_slot])) return false; return wield_weapon(true, item_slot); } /** * Handle melee combat between attacker and defender. * * Works using the new fight rewrite. For a monster attacking, this method * loops through all their available attacks, instantiating a new melee_attack * for each attack. Combat effects should not go here, if at all possible. This * is merely a wrapper function which is used to start combat. * * @param[in] attacker,defender The (non-null) participants in the attack. * Either may be killed as a result of the attack. * @param[out] did_hit If non-null, receives true if the attack hit the * defender, and false otherwise. * @param simu Is this a simulated attack? Disables a few problematic * effects such as blood spatter and distortion teleports. * * @return Whether the attack took time (i.e. wasn't cancelled). */ bool fight_melee(actor *attacker, actor *defender, bool *did_hit, bool simu) { ASSERT(attacker); // XXX: change to actor &attacker ASSERT(defender); // XXX: change to actor &defender // A dead defender would result in us returning true without actually // taking an action. ASSERT(defender->alive()); if (defender->is_player()) { ASSERT(!crawl_state.game_is_arena()); // Friendly and good neutral monsters won't attack unless confused. if (attacker->as_monster()->wont_attack() && !mons_is_confused(*attacker->as_monster()) && !attacker->as_monster()->has_ench(ENCH_INSANE)) { return false; } // In case the monster hasn't noticed you, bumping into it will // change that. behaviour_event(attacker->as_monster(), ME_ALERT, defender); } else if (attacker->is_player()) { ASSERT(!crawl_state.game_is_arena()); // Can't damage orbs this way. if (mons_is_projectile(defender->type) && !you.confused()) { you.turn_is_over = false; return false; } if (!simu && Options.auto_switch && you.weapon() && _autoswitch_to_melee()) { return true; // Is this right? We did take time, but we didn't melee } melee_attack attk(&you, defender); if (simu) attk.simu = true; // We're trying to hit a monster, break out of travel/explore now. interrupt_activity(AI_HIT_MONSTER, defender->as_monster()); // Check if the player is fighting with something unsuitable, // or someone unsuitable. if (you.can_see(*defender) && !simu && !wielded_weapon_check(attk.weapon)) { you.turn_is_over = false; return false; } if (!attk.attack()) { // Attack was cancelled or unsuccessful... if (attk.cancel_attack) you.turn_is_over = false; return !attk.cancel_attack; } if (did_hit) *did_hit = attk.did_hit; // A spectral weapon attacks whenever the player does if (!simu && you.props.exists("spectral_weapon")) trigger_spectral_weapon(&you, defender); if (!simu && will_have_passive(passive_t::shadow_attacks)) dithmenos_shadow_melee(defender); return true; } // If execution gets here, attacker != Player, so we can safely continue // with processing the number of attacks a monster has without worrying // about unpredictable or weird results from players. // If this is a spectral weapon check if it can attack if (attacker->type == MONS_SPECTRAL_WEAPON && !confirm_attack_spectral_weapon(attacker->as_monster(), defender)) { // Pretend an attack happened, // so the weapon doesn't advance unecessarily. return true; } const int nrounds = attacker->as_monster()->has_hydra_multi_attack() ? attacker->heads() + MAX_NUM_ATTACKS - 1 : MAX_NUM_ATTACKS; coord_def pos = defender->pos(); // Melee combat, tell attacker to wield its melee weapon. attacker->as_monster()->wield_melee_weapon(); int effective_attack_number = 0; int attack_number; for (attack_number = 0; attack_number < nrounds && attacker->alive(); ++attack_number, ++effective_attack_number) { if (!attacker->alive()) return false; // Monster went away? if (!defender->alive() || defender->pos() != pos || defender->is_banished()) { if (attacker == defender || !attacker->as_monster()->has_multitargeting()) { break; } // Hydras can try and pick up a new monster to attack to // finish out their round. -cao bool end = true; for (adjacent_iterator i(attacker->pos()); i; ++i) { if (*i == you.pos() && !mons_aligned(attacker, &you)) { attacker->as_monster()->foe = MHITYOU; attacker->as_monster()->target = you.pos(); defender = &you; end = false; break; } monster* mons = monster_at(*i); if (mons && !mons_aligned(attacker, mons)) { defender = mons; end = false; pos = mons->pos(); break; } } // No adjacent hostiles. if (end) break; } if (!simu && attacker->is_monster() && mons_attack_spec(*attacker->as_monster(), attack_number, true) .flavour == AF_KITE && attacker->as_monster()->foe_distance() == 1 && attacker->reach_range() == REACH_TWO && x_chance_in_y(3, 5)) { monster* mons = attacker->as_monster(); coord_def foepos = mons->get_foe()->pos(); coord_def hopspot = mons->pos() - (foepos - mons->pos()).sgn(); bool found = false; if (!monster_habitable_grid(mons, grd(hopspot)) || actor_at(hopspot)) { for (adjacent_iterator ai(mons->pos()); ai; ++ai) { if (ai->distance_from(foepos) != 2) continue; else { if (monster_habitable_grid(mons, grd(*ai)) && !actor_at(*ai)) { hopspot = *ai; found = true; break; } } } } else found = true; if (found) { const bool could_see = you.can_see(*mons); if (mons->move_to_pos(hopspot)) { if (could_see || you.can_see(*mons)) { mprf("%s hops backward while attacking.", mons->name(DESC_THE, true).c_str()); } mons->speed_increment -= 2; // Add a small extra delay } } } melee_attack melee_attk(attacker, defender, attack_number, effective_attack_number); if (simu) melee_attk.simu = true; // If the attack fails out, keep effective_attack_number up to // date so that we don't cause excess energy loss in monsters if (!melee_attk.attack()) effective_attack_number = melee_attk.effective_attack_number; else if (did_hit && !(*did_hit)) *did_hit = melee_attk.did_hit; fire_final_effects(); } // A spectral weapon attacks whenever the player does if (!simu && attacker->props.exists("spectral_weapon")) trigger_spectral_weapon(attacker, defender); return true; } /** * If the given attacker attacks the given defender right now, what kind of * extra-damage "stab" attack can the attacker perform, if any? * * @param attacker The attacker; may be null. * @param defender The defender. * @param actual True if we're actually committing to a stab, false if we're * just checking for display purposes. * @return The best (most damaging) kind of stab available to the * attacker against this defender, or STAB_NO_STAB. */ stab_type find_stab_type(const actor *attacker, const actor &defender, bool actual) { const monster* def = defender.as_monster(); // Stabbing intelligent monsters is unchivalric, and disabled under TSO! // When just checking for display purposes, still indicate when monsters // are sleeping/paralysed etc. if (actual && attacker && attacker->is_player() && def && have_passive(passive_t::no_stabbing)) { return STAB_NO_STAB; } // No stabbing monsters that cannot fight (e.g. plants) or monsters // the attacker can't see (either due to invisibility or being behind // opaque clouds). if (def && mons_is_firewood(*def)) return STAB_NO_STAB; if (attacker && !attacker->can_see(defender)) return STAB_NO_STAB; // sleeping if (defender.asleep()) return STAB_SLEEPING; // paralysed if (defender.paralysed()) return STAB_PARALYSED; // petrified if (defender.petrified()) return STAB_PETRIFIED; // petrifying if (def && def->petrifying()) return STAB_PETRIFYING; // held in a net if (def && def->caught()) return STAB_HELD_IN_NET; // invisible if (attacker && !attacker->visible_to(&defender)) return STAB_INVISIBLE; // fleeing if (def && mons_is_fleeing(*def)) return STAB_FLEEING; // allies if (def && def->friendly()) return STAB_ALLY; // confused (but not perma-confused) if (def && mons_is_confused(*def, false)) return STAB_CONFUSED; // Distracted (but not batty); this only applies to players. if (attacker && attacker->is_player() && def && def->foe != MHITYOU && !mons_is_batty(*def)) { return STAB_DISTRACTED; } return STAB_NO_STAB; } /** * What bonus does this type of stab give the player when attacking? * * @param The type of stab in question; e.g. STAB_SLEEPING. * @return The bonus the stab gives. Note that this is used as a divisor for * damage, so the larger the value we return here, the less bonus * damage will be done. */ int stab_bonus_denom(stab_type stab) { // XXX: if we don't get rid of this logic, turn it into a static array. switch (stab) { case STAB_NO_STAB: case NUM_STABS: return 0; case STAB_SLEEPING: case STAB_PARALYSED: case STAB_PETRIFIED: return 1; default: return 4; } } static bool is_boolean_resist(beam_type flavour) { switch (flavour) { case BEAM_ELECTRICITY: case BEAM_MIASMA: // rotting case BEAM_STICKY_FLAME: case BEAM_WATER: // water asphyxiation damage, // bypassed by being water inhabitant. case BEAM_POISON: case BEAM_POISON_ARROW: return true; default: return false; } } // Gets the percentage of the total damage of this damage flavour that can // be resisted. static inline int get_resistible_fraction(beam_type flavour) { switch (flavour) { // Drowning damage from water is resistible by being a water thing, or // otherwise asphyx resistant. case BEAM_WATER: return 40; // Assume ice storm and throw icicle are mostly solid. case BEAM_ICE: return 40; case BEAM_LAVA: return 55; case BEAM_POISON_ARROW: return 70; default: return 100; } } static int _beam_to_resist(const actor* defender, beam_type flavour) { switch (flavour) { case BEAM_FIRE: case BEAM_LAVA: return defender->res_fire(); case BEAM_DAMNATION: return defender->res_damnation(); case BEAM_STEAM: return defender->res_steam(); case BEAM_COLD: case BEAM_ICE: return defender->res_cold(); case BEAM_WATER: return defender->res_water_drowning(); case BEAM_ELECTRICITY: return defender->res_elec(); case BEAM_NEG: case BEAM_PAIN: case BEAM_MALIGN_OFFERING: return defender->res_negative_energy(); case BEAM_ACID: return defender->res_acid(); case BEAM_POISON: case BEAM_POISON_ARROW: return defender->res_poison(); case BEAM_HOLY: return defender->res_holy_energy(); default: return 0; } } /** * Adjusts damage for elemental resists, electricity and poison. * * For players, damage is reduced to 1/2, 1/3, or 1/5 if res has values 1, 2, * or 3, respectively. "Boolean" resists (rElec, rPois) reduce damage to 1/3. * rN is a special case that reduces damage to 1/2, 1/4, 0 instead. * * For monsters, damage is reduced to 1/2, 1/5, and 0 for 1/2/3 resistance. * "Boolean" resists give 1/3, 1/6, 0 instead. * * @param defender The victim of the attack. * @param flavour The type of attack having its damage adjusted. * (Does not necessarily imply the attack is a beam.) * @param rawdamage The base damage, to be adjusted by resistance. * @return The amount of damage done, after resists are applied. */ int resist_adjust_damage(const actor* defender, beam_type flavour, int rawdamage) { const int res = _beam_to_resist(defender, flavour); if (!res) return rawdamage; const bool is_mon = defender->is_monster(); const int resistible_fraction = get_resistible_fraction(flavour); int resistible = rawdamage * resistible_fraction / 100; const int irresistible = rawdamage - resistible; if (res > 0) { const bool immune_at_3_res = is_mon || flavour == BEAM_NEG || flavour == BEAM_PAIN || flavour == BEAM_MALIGN_OFFERING || flavour == BEAM_HOLY || flavour == BEAM_POISON // just the resistible part || flavour == BEAM_POISON_ARROW; if (immune_at_3_res && res >= 3 || res > 3) resistible = 0; else { // Is this a resist that claims to be boolean for damage purposes? const int bonus_res = (is_boolean_resist(flavour) ? 1 : 0); // Monster resistances are stronger than player versions. if (is_mon) resistible /= 1 + bonus_res + res * res; else if (flavour == BEAM_NEG || flavour == BEAM_PAIN || flavour == BEAM_MALIGN_OFFERING) { resistible /= res * 2; } else resistible /= (3 * res + 1) / 2 + bonus_res; } } else if (res < 0) resistible = resistible * 15 / 10; return max(resistible + irresistible, 0); } // Reduce damage by AC. // In most cases, we want AC to mostly stop weak attacks completely but affect // strong ones less, but the regular formula is too hard to apply well to cases // when damage is spread into many small chunks. // // Every point of damage is processed independently. Every point of AC has // an independent 1/81 chance of blocking that damage. // // AC 20 stops 22% of damage, AC 40 -- 39%, AC 80 -- 63%. int apply_chunked_AC(int dam, int ac) { double chance = pow(80.0/81, ac); uint64_t cr = chance * (((uint64_t)1) << 32); int hurt = 0; for (int i = 0; i < dam; i++) if (get_uint32() < cr) hurt++; return hurt; } /////////////////////////////////////////////////////////////////////////// bool wielded_weapon_check(item_def *weapon) { bool penance = false; if (you.received_weapon_warning || (weapon && !needs_handle_warning(*weapon, OPER_ATTACK, penance) && is_melee_weapon(*weapon)) || you.confused()) { return true; } // Don't pester the player if they're using UC or if they don't have any // melee weapons yet. if (!weapon && (you.skill(SK_UNARMED_COMBAT) > 0 || !any_of(you.inv.begin(), you.inv.end(), [](item_def &it) { return is_melee_weapon(it) && can_wield(&it); }))) { return true; } string prompt; if (weapon) prompt = "Really attack while wielding " + weapon->name(DESC_YOUR) + "?"; else prompt = "Really attack barehanded?"; if (penance) prompt += " This could place you under penance!"; const bool result = yesno(prompt.c_str(), true, 'n'); if (!result) canned_msg(MSG_OK); learned_something_new(HINT_WIELD_WEAPON); // for hints mode Rangers // Don't warn again if you decide to continue your attack. if (result) you.received_weapon_warning = true; return result; } /** * Should the given attacker cleave into the given victim with an axe or axe- * like weapon? * * @param attacker The creature doing the cleaving. * @param defender The potential cleave-ee. * @return True if the defender is an enemy of the defender; false * otherwise. */ static bool _dont_harm(const actor &attacker, const actor &defender) { if (mons_aligned(&attacker, &defender)) return true; if (defender.is_player()) return attacker.wont_attack(); if (attacker.is_player()) { return defender.wont_attack() || mons_attitude(*defender.as_monster()) == ATT_NEUTRAL; } return false; } /** * List potential cleave targets (adjacent hostile creatures), including the * defender itself. * * @param attacker[in] The attacking creature. * @param def[in] The location of the targeted defender. * @param targets[out] A list to be populated with targets. * @param which_attack The attack_number (default -1, which uses the default weapon). */ void get_cleave_targets(const actor &attacker, const coord_def& def, list<actor*> &targets, int which_attack) { // Prevent scanning invalid coordinates if the attacker dies partway through // a cleave (due to hitting explosive creatures, or perhaps other things) if (!attacker.alive()) return; if (actor_at(def)) targets.push_back(actor_at(def)); const item_def* weap = attacker.weapon(which_attack); if (weap && item_attack_skill(*weap) == SK_AXES || attacker.is_player() && (you.form == transformation::hydra && you.heads() > 1 || you.duration[DUR_CLEAVE])) { const coord_def atk = attacker.pos(); coord_def atk_vector = def - atk; const int dir = random_choose(-1, 1); for (int i = 0; i < 7; ++i) { atk_vector = rotate_adjacent(atk_vector, dir); actor *target = actor_at(atk + atk_vector); if (target && !_dont_harm(attacker, *target)) targets.push_back(target); } } if (weap && is_unrandom_artefact(*weap, UNRAND_GYRE)) { list<actor*> new_targets; for (actor* targ : targets) { new_targets.push_back(targ); new_targets.push_back(targ); } targets = new_targets; } } /** * Attack a provided list of cleave targets. * * @param attacker The attacking creature. * @param targets The targets to cleave. * @param attack_number ? * @param effective_attack_number ? */ void attack_cleave_targets(actor &attacker, list<actor*> &targets, int attack_number, int effective_attack_number, wu_jian_attack_type wu_jian_attack) { if (attacker.is_player()) { const item_def* weap = attacker.weapon(attack_number); if ((wu_jian_attack == WU_JIAN_ATTACK_WHIRLWIND || wu_jian_attack == WU_JIAN_ATTACK_WALL_JUMP || wu_jian_attack == WU_JIAN_ATTACK_TRIGGERED_AUX) && !(weap && is_unrandom_artefact(*weap, UNRAND_GYRE))) { return; // WJC AOE attacks don't cleave, but G&G use cleaving // XXX: If a player under Xom wrath gets cleaving while using G&G and // worshiping Wu they'll be able to cleave their Wu attacks. } } while (attacker.alive() && !targets.empty()) { actor* def = targets.front(); if (def && def->alive() && !_dont_harm(attacker, *def) && adjacent(attacker.pos(), def->pos())) { melee_attack attck(&attacker, def, attack_number, ++effective_attack_number, true); attck.wu_jian_attack = wu_jian_attack; attck.attack(); } targets.pop_front(); } } /** * What skill is required to reach mindelay with a weapon? May be >27. * @param weapon The weapon to be considered. * @returns The level of the relevant skill you must reach. */ int weapon_min_delay_skill(const item_def &weapon) { const int speed = property(weapon, PWPN_SPEED); const int mindelay = weapon_min_delay(weapon, false); return (speed - mindelay) * 2; } /** * How fast will this weapon get from your skill training? * * @param weapon the weapon to be considered. * @param check_speed whether to take it into account if the weapon has the * speed brand. * @return How many aut the fastest possible attack with this weapon would take. */ int weapon_min_delay(const item_def &weapon, bool check_speed) { const int base = property(weapon, PWPN_SPEED); int min_delay = base/2; // Short blades can get up to at least unarmed speed. if (item_attack_skill(weapon) == SK_SHORT_BLADES && min_delay > 5) min_delay = 5; // All weapons have min delay 7 or better if (min_delay > 7) min_delay = 7; // ...except crossbows... if (item_attack_skill(weapon) == SK_CROSSBOWS && min_delay < 10) min_delay = 10; // ... and unless it would take more than skill 27 to get there. // Round up the reduction from skill, so that min delay is rounded down. min_delay = max(min_delay, base - (MAX_SKILL_LEVEL + 1)/2); if (check_speed && get_weapon_brand(weapon) == SPWPN_SPEED) { min_delay *= 2; min_delay /= 3; } // never go faster than speed 3 (ie 3.33 attacks per round) if (min_delay < 3) min_delay = 3; return min_delay; } int mons_weapon_damage_rating(const item_def &launcher) { return property(launcher, PWPN_DAMAGE) + launcher.plus; } // Returns a rough estimate of damage from firing/throwing missile. int mons_missile_damage(monster* mons, const item_def *launch, const item_def *missile) { if (!missile || (!launch && !is_throwable(mons, *missile))) return 0; const int missile_damage = property(*missile, PWPN_DAMAGE) / 2 + 1; const int launch_damage = launch? property(*launch, PWPN_DAMAGE) : 0; return max(0, launch_damage + missile_damage); } int mons_usable_missile(monster* mons, item_def **launcher) { *launcher = nullptr; item_def *launch = nullptr; for (int i = MSLOT_WEAPON; i <= MSLOT_ALT_WEAPON; ++i) { if (item_def *item = mons->mslot_item(static_cast<mon_inv_type>(i))) { if (is_range_weapon(*item)) launch = item; } } const item_def *missiles = mons->missiles(); if (launch && missiles && !missiles->launched_by(*launch)) launch = nullptr; const int fdam = mons_missile_damage(mons, launch, missiles); if (!fdam) return NON_ITEM; else { *launcher = launch; return missiles->index(); } } bool bad_attack(const monster *mon, string& adj, string& suffix, bool& would_cause_penance, coord_def attack_pos) { ASSERT(mon); // XXX: change to const monster &mon ASSERT(!crawl_state.game_is_arena()); if (!you.can_see(*mon)) return false; if (attack_pos == coord_def(0, 0)) attack_pos = you.pos(); adj.clear(); suffix.clear(); would_cause_penance = false; if (is_sanctuary(mon->pos()) || is_sanctuary(attack_pos)) suffix = ", despite your sanctuary"; if (you.duration[DUR_LIFESAVING] && mon->holiness() & (MH_NATURAL | MH_PLANT)) { suffix = " while asking for your life to be spared"; would_cause_penance = true; } if (you_worship(GOD_JIYVA) && mons_is_slime(*mon) && !(mon->is_shapeshifter() && (mon->flags & MF_KNOWN_SHIFTER))) { would_cause_penance = true; return true; } if (mon->friendly()) { if (god_hates_attacking_friend(you.religion, *mon)) { adj = "your ally "; monster_info mi(mon, MILEV_NAME); if (!mi.is(MB_NAME_UNQUALIFIED)) adj += "the "; would_cause_penance = true; } else { adj = "your "; monster_info mi(mon, MILEV_NAME); if (mi.is(MB_NAME_UNQUALIFIED)) adj += "ally "; } return true; } if (mon->neutral() && is_good_god(you.religion)) { adj += "neutral "; if (you_worship(GOD_SHINING_ONE) || you_worship(GOD_ELYVILON)) would_cause_penance = true; } else if (mon->wont_attack()) { adj += "non-hostile "; if (you_worship(GOD_SHINING_ONE) || you_worship(GOD_ELYVILON)) would_cause_penance = true; } return !adj.empty() || !suffix.empty(); } bool stop_attack_prompt(const monster* mon, bool beam_attack, coord_def beam_target, bool *prompted, coord_def attack_pos) { ASSERT(mon); // XXX: change to const monster &mon bool penance = false; if (prompted) *prompted = false; if (crawl_state.disables[DIS_CONFIRMATIONS]) return false; if (you.confused() || !you.can_see(*mon)) return false; string adj, suffix; if (!bad_attack(mon, adj, suffix, penance, attack_pos)) return false; // Listed in the form: "your rat", "Blork the orc". string mon_name = mon->name(DESC_PLAIN); if (starts_with(mon_name, "the ")) // no "your the Royal Jelly" nor "the the RJ" mon_name = mon_name.substr(4); // strlen("the ") if (!starts_with(adj, "your")) adj = "the " + adj; mon_name = adj + mon_name; string verb; if (beam_attack) { verb = "fire "; if (beam_target == mon->pos()) verb += "at "; else { verb += "in " + apostrophise(mon_name) + " direction"; mon_name = ""; } } else verb = "attack "; const string prompt = make_stringf("Really %s%s%s?%s", verb.c_str(), mon_name.c_str(), suffix.c_str(), penance ? " This attack would place you under penance!" : ""); if (prompted) *prompted = true; if (yesno(prompt.c_str(), false, 'n')) return false; else { canned_msg(MSG_OK); return true; } } bool stop_attack_prompt(targeter &hitfunc, const char* verb, function<bool(const actor *victim)> affects, bool *prompted, const monster *defender) { if (crawl_state.disables[DIS_CONFIRMATIONS]) return false; if (crawl_state.which_god_acting() == GOD_XOM) return false; if (you.confused()) return false; string adj, suffix; bool penance = false; bool defender_ok = true; counted_monster_list victims; for (distance_iterator di(hitfunc.origin, false, true, LOS_RADIUS); di; ++di) { if (hitfunc.is_affected(*di) <= AFF_NO) continue; const monster* mon = monster_at(*di); if (!mon || !you.can_see(*mon)) continue; if (affects && !affects(mon)) continue; string adjn, suffixn; bool penancen = false; if (bad_attack(mon, adjn, suffixn, penancen)) { // record the adjectives for the first listed, or // first that would cause penance if (victims.empty() || penancen && !penance) adj = adjn, suffix = suffixn, penance = penancen; victims.add(mon); if (defender && defender == mon) defender_ok = false; } } if (victims.empty()) return false; // Listed in the form: "your rat", "Blork the orc". string mon_name = victims.describe(DESC_PLAIN); if (starts_with(mon_name, "the ")) // no "your the Royal Jelly" nor "the the RJ" mon_name = mon_name.substr(4); // strlen("the ") if (!starts_with(adj, "your")) adj = "the " + adj; mon_name = adj + mon_name; const string prompt = make_stringf("Really %s%s %s%s?%s", verb, defender_ok ? " near" : "", mon_name.c_str(), suffix.c_str(), penance ? " This attack would place you under penance!" : ""); if (prompted) *prompted = true; if (yesno(prompt.c_str(), false, 'n')) return false; else { canned_msg(MSG_OK); return true; } } bool actor_collision_immune(const actor *agent) { if(agent->is_player() && you.species == SP_BODACH && you.attribute[ATTR_BODACH_ASPECT] == 4) { return true; } return false; }
// BugReporter.cpp - Generate PathDiagnostics for Bugs ------------*- C++ -*--// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines BugReporter, a utility class for generating // PathDiagnostics. // //===----------------------------------------------------------------------===// #include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ParentMap.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/Analysis/CFG.h" #include "clang/Analysis/ProgramPoint.h" #include "clang/Basic/SourceManager.h" #include "clang/StaticAnalyzer/Core/BugReporter/BugType.h" #include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h" #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/IntrusiveRefCntPtr.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/Statistic.h" #include "llvm/Support/raw_ostream.h" #include <memory> #include <queue> using namespace clang; using namespace ento; #define DEBUG_TYPE "BugReporter" STATISTIC(MaxBugClassSize, "The maximum number of bug reports in the same equivalence class"); STATISTIC(MaxValidBugClassSize, "The maximum number of bug reports in the same equivalence class " "where at least one report is valid (not suppressed)"); BugReporterVisitor::~BugReporterVisitor() {} void BugReporterContext::anchor() {} //===----------------------------------------------------------------------===// // Helper routines for walking the ExplodedGraph and fetching statements. //===----------------------------------------------------------------------===// static const Stmt *GetPreviousStmt(const ExplodedNode *N) { for (N = N->getFirstPred(); N; N = N->getFirstPred()) if (const Stmt *S = PathDiagnosticLocation::getStmt(N)) return S; return nullptr; } static inline const Stmt* GetCurrentOrPreviousStmt(const ExplodedNode *N) { if (const Stmt *S = PathDiagnosticLocation::getStmt(N)) return S; return GetPreviousStmt(N); } //===----------------------------------------------------------------------===// // Diagnostic cleanup. //===----------------------------------------------------------------------===// static PathDiagnosticEventPiece * eventsDescribeSameCondition(PathDiagnosticEventPiece *X, PathDiagnosticEventPiece *Y) { // Prefer diagnostics that come from ConditionBRVisitor over // those that came from TrackConstraintBRVisitor. const void *tagPreferred = ConditionBRVisitor::getTag(); const void *tagLesser = TrackConstraintBRVisitor::getTag(); if (X->getLocation() != Y->getLocation()) return nullptr; if (X->getTag() == tagPreferred && Y->getTag() == tagLesser) return X; if (Y->getTag() == tagPreferred && X->getTag() == tagLesser) return Y; return nullptr; } /// An optimization pass over PathPieces that removes redundant diagnostics /// generated by both ConditionBRVisitor and TrackConstraintBRVisitor. Both /// BugReporterVisitors use different methods to generate diagnostics, with /// one capable of emitting diagnostics in some cases but not in others. This /// can lead to redundant diagnostic pieces at the same point in a path. static void removeRedundantMsgs(PathPieces &path) { unsigned N = path.size(); if (N < 2) return; // NOTE: this loop intentionally is not using an iterator. Instead, we // are streaming the path and modifying it in place. This is done by // grabbing the front, processing it, and if we decide to keep it append // it to the end of the path. The entire path is processed in this way. for (unsigned i = 0; i < N; ++i) { IntrusiveRefCntPtr<PathDiagnosticPiece> piece(path.front()); path.pop_front(); switch (piece->getKind()) { case clang::ento::PathDiagnosticPiece::Call: removeRedundantMsgs(cast<PathDiagnosticCallPiece>(piece)->path); break; case clang::ento::PathDiagnosticPiece::Macro: removeRedundantMsgs(cast<PathDiagnosticMacroPiece>(piece)->subPieces); break; case clang::ento::PathDiagnosticPiece::ControlFlow: break; case clang::ento::PathDiagnosticPiece::Event: { if (i == N-1) break; if (PathDiagnosticEventPiece *nextEvent = dyn_cast<PathDiagnosticEventPiece>(path.front().get())) { PathDiagnosticEventPiece *event = cast<PathDiagnosticEventPiece>(piece); // Check to see if we should keep one of the two pieces. If we // come up with a preference, record which piece to keep, and consume // another piece from the path. if (PathDiagnosticEventPiece *pieceToKeep = eventsDescribeSameCondition(event, nextEvent)) { piece = pieceToKeep; path.pop_front(); ++i; } } break; } } path.push_back(piece); } } /// A map from PathDiagnosticPiece to the LocationContext of the inlined /// function call it represents. typedef llvm::DenseMap<const PathPieces *, const LocationContext *> LocationContextMap; /// Recursively scan through a path and prune out calls and macros pieces /// that aren't needed. Return true if afterwards the path contains /// "interesting stuff" which means it shouldn't be pruned from the parent path. static bool removeUnneededCalls(PathPieces &pieces, BugReport *R, LocationContextMap &LCM) { bool containsSomethingInteresting = false; const unsigned N = pieces.size(); for (unsigned i = 0 ; i < N ; ++i) { // Remove the front piece from the path. If it is still something we // want to keep once we are done, we will push it back on the end. IntrusiveRefCntPtr<PathDiagnosticPiece> piece(pieces.front()); pieces.pop_front(); switch (piece->getKind()) { case PathDiagnosticPiece::Call: { PathDiagnosticCallPiece *call = cast<PathDiagnosticCallPiece>(piece); // Check if the location context is interesting. assert(LCM.count(&call->path)); if (R->isInteresting(LCM[&call->path])) { containsSomethingInteresting = true; break; } if (!removeUnneededCalls(call->path, R, LCM)) continue; containsSomethingInteresting = true; break; } case PathDiagnosticPiece::Macro: { PathDiagnosticMacroPiece *macro = cast<PathDiagnosticMacroPiece>(piece); if (!removeUnneededCalls(macro->subPieces, R, LCM)) continue; containsSomethingInteresting = true; break; } case PathDiagnosticPiece::Event: { PathDiagnosticEventPiece *event = cast<PathDiagnosticEventPiece>(piece); // We never throw away an event, but we do throw it away wholesale // as part of a path if we throw the entire path away. containsSomethingInteresting |= !event->isPrunable(); break; } case PathDiagnosticPiece::ControlFlow: break; } pieces.push_back(piece); } return containsSomethingInteresting; } /// Returns true if the given decl has been implicitly given a body, either by /// the analyzer or by the compiler proper. static bool hasImplicitBody(const Decl *D) { assert(D); return D->isImplicit() || !D->hasBody(); } /// Recursively scan through a path and make sure that all call pieces have /// valid locations. static void adjustCallLocations(PathPieces &Pieces, PathDiagnosticLocation *LastCallLocation = nullptr) { for (PathPieces::iterator I = Pieces.begin(), E = Pieces.end(); I != E; ++I) { PathDiagnosticCallPiece *Call = dyn_cast<PathDiagnosticCallPiece>(*I); if (!Call) { assert((*I)->getLocation().asLocation().isValid()); continue; } if (LastCallLocation) { bool CallerIsImplicit = hasImplicitBody(Call->getCaller()); if (CallerIsImplicit || !Call->callEnter.asLocation().isValid()) Call->callEnter = *LastCallLocation; if (CallerIsImplicit || !Call->callReturn.asLocation().isValid()) Call->callReturn = *LastCallLocation; } // Recursively clean out the subclass. Keep this call around if // it contains any informative diagnostics. PathDiagnosticLocation *ThisCallLocation; if (Call->callEnterWithin.asLocation().isValid() && !hasImplicitBody(Call->getCallee())) ThisCallLocation = &Call->callEnterWithin; else ThisCallLocation = &Call->callEnter; assert(ThisCallLocation && "Outermost call has an invalid location"); adjustCallLocations(Call->path, ThisCallLocation); } } /// Remove edges in and out of C++ default initializer expressions. These are /// for fields that have in-class initializers, as opposed to being initialized /// explicitly in a constructor or braced list. static void removeEdgesToDefaultInitializers(PathPieces &Pieces) { for (PathPieces::iterator I = Pieces.begin(), E = Pieces.end(); I != E;) { if (PathDiagnosticCallPiece *C = dyn_cast<PathDiagnosticCallPiece>(*I)) removeEdgesToDefaultInitializers(C->path); if (PathDiagnosticMacroPiece *M = dyn_cast<PathDiagnosticMacroPiece>(*I)) removeEdgesToDefaultInitializers(M->subPieces); if (PathDiagnosticControlFlowPiece *CF = dyn_cast<PathDiagnosticControlFlowPiece>(*I)) { const Stmt *Start = CF->getStartLocation().asStmt(); const Stmt *End = CF->getEndLocation().asStmt(); if (Start && isa<CXXDefaultInitExpr>(Start)) { I = Pieces.erase(I); continue; } else if (End && isa<CXXDefaultInitExpr>(End)) { PathPieces::iterator Next = std::next(I); if (Next != E) { if (PathDiagnosticControlFlowPiece *NextCF = dyn_cast<PathDiagnosticControlFlowPiece>(*Next)) { NextCF->setStartLocation(CF->getStartLocation()); } } I = Pieces.erase(I); continue; } } I++; } } /// Remove all pieces with invalid locations as these cannot be serialized. /// We might have pieces with invalid locations as a result of inlining Body /// Farm generated functions. static void removePiecesWithInvalidLocations(PathPieces &Pieces) { for (PathPieces::iterator I = Pieces.begin(), E = Pieces.end(); I != E;) { if (PathDiagnosticCallPiece *C = dyn_cast<PathDiagnosticCallPiece>(*I)) removePiecesWithInvalidLocations(C->path); if (PathDiagnosticMacroPiece *M = dyn_cast<PathDiagnosticMacroPiece>(*I)) removePiecesWithInvalidLocations(M->subPieces); if (!(*I)->getLocation().isValid() || !(*I)->getLocation().asLocation().isValid()) { I = Pieces.erase(I); continue; } I++; } } //===----------------------------------------------------------------------===// // PathDiagnosticBuilder and its associated routines and helper objects. //===----------------------------------------------------------------------===// namespace { class NodeMapClosure : public BugReport::NodeResolver { InterExplodedGraphMap &M; public: NodeMapClosure(InterExplodedGraphMap &m) : M(m) {} const ExplodedNode *getOriginalNode(const ExplodedNode *N) override { return M.lookup(N); } }; class PathDiagnosticBuilder : public BugReporterContext { BugReport *R; PathDiagnosticConsumer *PDC; NodeMapClosure NMC; public: const LocationContext *LC; PathDiagnosticBuilder(GRBugReporter &br, BugReport *r, InterExplodedGraphMap &Backmap, PathDiagnosticConsumer *pdc) : BugReporterContext(br), R(r), PDC(pdc), NMC(Backmap), LC(r->getErrorNode()->getLocationContext()) {} PathDiagnosticLocation ExecutionContinues(const ExplodedNode *N); PathDiagnosticLocation ExecutionContinues(llvm::raw_string_ostream &os, const ExplodedNode *N); BugReport *getBugReport() { return R; } Decl const &getCodeDecl() { return R->getErrorNode()->getCodeDecl(); } ParentMap& getParentMap() { return LC->getParentMap(); } const Stmt *getParent(const Stmt *S) { return getParentMap().getParent(S); } NodeMapClosure& getNodeResolver() override { return NMC; } PathDiagnosticLocation getEnclosingStmtLocation(const Stmt *S); PathDiagnosticConsumer::PathGenerationScheme getGenerationScheme() const { return PDC ? PDC->getGenerationScheme() : PathDiagnosticConsumer::Extensive; } bool supportsLogicalOpControlFlow() const { return PDC ? PDC->supportsLogicalOpControlFlow() : true; } }; } // end anonymous namespace PathDiagnosticLocation PathDiagnosticBuilder::ExecutionContinues(const ExplodedNode *N) { if (const Stmt *S = PathDiagnosticLocation::getNextStmt(N)) return PathDiagnosticLocation(S, getSourceManager(), LC); return PathDiagnosticLocation::createDeclEnd(N->getLocationContext(), getSourceManager()); } PathDiagnosticLocation PathDiagnosticBuilder::ExecutionContinues(llvm::raw_string_ostream &os, const ExplodedNode *N) { // Slow, but probably doesn't matter. if (os.str().empty()) os << ' '; const PathDiagnosticLocation &Loc = ExecutionContinues(N); if (Loc.asStmt()) os << "Execution continues on line " << getSourceManager().getExpansionLineNumber(Loc.asLocation()) << '.'; else { os << "Execution jumps to the end of the "; const Decl *D = N->getLocationContext()->getDecl(); if (isa<ObjCMethodDecl>(D)) os << "method"; else if (isa<FunctionDecl>(D)) os << "function"; else { assert(isa<BlockDecl>(D)); os << "anonymous block"; } os << '.'; } return Loc; } static const Stmt *getEnclosingParent(const Stmt *S, const ParentMap &PM) { if (isa<Expr>(S) && PM.isConsumedExpr(cast<Expr>(S))) return PM.getParentIgnoreParens(S); const Stmt *Parent = PM.getParentIgnoreParens(S); if (!Parent) return nullptr; switch (Parent->getStmtClass()) { case Stmt::ForStmtClass: case Stmt::DoStmtClass: case Stmt::WhileStmtClass: case Stmt::ObjCForCollectionStmtClass: case Stmt::CXXForRangeStmtClass: return Parent; default: break; } return nullptr; } static PathDiagnosticLocation getEnclosingStmtLocation(const Stmt *S, SourceManager &SMgr, const ParentMap &P, const LocationContext *LC, bool allowNestedContexts) { if (!S) return PathDiagnosticLocation(); while (const Stmt *Parent = getEnclosingParent(S, P)) { switch (Parent->getStmtClass()) { case Stmt::BinaryOperatorClass: { const BinaryOperator *B = cast<BinaryOperator>(Parent); if (B->isLogicalOp()) return PathDiagnosticLocation(allowNestedContexts ? B : S, SMgr, LC); break; } case Stmt::CompoundStmtClass: case Stmt::StmtExprClass: return PathDiagnosticLocation(S, SMgr, LC); case Stmt::ChooseExprClass: // Similar to '?' if we are referring to condition, just have the edge // point to the entire choose expression. if (allowNestedContexts || cast<ChooseExpr>(Parent)->getCond() == S) return PathDiagnosticLocation(Parent, SMgr, LC); else return PathDiagnosticLocation(S, SMgr, LC); case Stmt::BinaryConditionalOperatorClass: case Stmt::ConditionalOperatorClass: // For '?', if we are referring to condition, just have the edge point // to the entire '?' expression. if (allowNestedContexts || cast<AbstractConditionalOperator>(Parent)->getCond() == S) return PathDiagnosticLocation(Parent, SMgr, LC); else return PathDiagnosticLocation(S, SMgr, LC); case Stmt::CXXForRangeStmtClass: if (cast<CXXForRangeStmt>(Parent)->getBody() == S) return PathDiagnosticLocation(S, SMgr, LC); break; case Stmt::DoStmtClass: return PathDiagnosticLocation(S, SMgr, LC); case Stmt::ForStmtClass: if (cast<ForStmt>(Parent)->getBody() == S) return PathDiagnosticLocation(S, SMgr, LC); break; case Stmt::IfStmtClass: if (cast<IfStmt>(Parent)->getCond() != S) return PathDiagnosticLocation(S, SMgr, LC); break; case Stmt::ObjCForCollectionStmtClass: if (cast<ObjCForCollectionStmt>(Parent)->getBody() == S) return PathDiagnosticLocation(S, SMgr, LC); break; case Stmt::WhileStmtClass: if (cast<WhileStmt>(Parent)->getCond() != S) return PathDiagnosticLocation(S, SMgr, LC); break; default: break; } S = Parent; } assert(S && "Cannot have null Stmt for PathDiagnosticLocation"); return PathDiagnosticLocation(S, SMgr, LC); } PathDiagnosticLocation PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) { assert(S && "Null Stmt passed to getEnclosingStmtLocation"); return ::getEnclosingStmtLocation(S, getSourceManager(), getParentMap(), LC, /*allowNestedContexts=*/false); } //===----------------------------------------------------------------------===// // "Visitors only" path diagnostic generation algorithm. //===----------------------------------------------------------------------===// static bool GenerateVisitorsOnlyPathDiagnostic( PathDiagnostic &PD, PathDiagnosticBuilder &PDB, const ExplodedNode *N, ArrayRef<std::unique_ptr<BugReporterVisitor>> visitors) { // All path generation skips the very first node (the error node). // This is because there is special handling for the end-of-path note. N = N->getFirstPred(); if (!N) return true; BugReport *R = PDB.getBugReport(); while (const ExplodedNode *Pred = N->getFirstPred()) { for (auto &V : visitors) { // Visit all the node pairs, but throw the path pieces away. PathDiagnosticPiece *Piece = V->VisitNode(N, Pred, PDB, *R); delete Piece; } N = Pred; } return R->isValid(); } //===----------------------------------------------------------------------===// // "Minimal" path diagnostic generation algorithm. //===----------------------------------------------------------------------===// typedef std::pair<PathDiagnosticCallPiece*, const ExplodedNode*> StackDiagPair; typedef SmallVector<StackDiagPair, 6> StackDiagVector; static void updateStackPiecesWithMessage(PathDiagnosticPiece *P, StackDiagVector &CallStack) { // If the piece contains a special message, add it to all the call // pieces on the active stack. if (PathDiagnosticEventPiece *ep = dyn_cast<PathDiagnosticEventPiece>(P)) { if (ep->hasCallStackHint()) for (StackDiagVector::iterator I = CallStack.begin(), E = CallStack.end(); I != E; ++I) { PathDiagnosticCallPiece *CP = I->first; const ExplodedNode *N = I->second; std::string stackMsg = ep->getCallStackMessage(N); // The last message on the path to final bug is the most important // one. Since we traverse the path backwards, do not add the message // if one has been previously added. if (!CP->hasCallStackMessage()) CP->setCallStackMessage(stackMsg); } } } static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM); static bool GenerateMinimalPathDiagnostic( PathDiagnostic &PD, PathDiagnosticBuilder &PDB, const ExplodedNode *N, LocationContextMap &LCM, ArrayRef<std::unique_ptr<BugReporterVisitor>> visitors) { SourceManager& SMgr = PDB.getSourceManager(); const LocationContext *LC = PDB.LC; const ExplodedNode *NextNode = N->pred_empty() ? nullptr : *(N->pred_begin()); StackDiagVector CallStack; while (NextNode) { N = NextNode; PDB.LC = N->getLocationContext(); NextNode = N->getFirstPred(); ProgramPoint P = N->getLocation(); do { if (Optional<CallExitEnd> CE = P.getAs<CallExitEnd>()) { PathDiagnosticCallPiece *C = PathDiagnosticCallPiece::construct(N, *CE, SMgr); // Record the mapping from call piece to LocationContext. LCM[&C->path] = CE->getCalleeContext(); PD.getActivePath().push_front(C); PD.pushActivePath(&C->path); CallStack.push_back(StackDiagPair(C, N)); break; } if (Optional<CallEnter> CE = P.getAs<CallEnter>()) { // Flush all locations, and pop the active path. bool VisitedEntireCall = PD.isWithinCall(); PD.popActivePath(); // Either we just added a bunch of stuff to the top-level path, or // we have a previous CallExitEnd. If the former, it means that the // path terminated within a function call. We must then take the // current contents of the active path and place it within // a new PathDiagnosticCallPiece. PathDiagnosticCallPiece *C; if (VisitedEntireCall) { C = cast<PathDiagnosticCallPiece>(PD.getActivePath().front()); } else { const Decl *Caller = CE->getLocationContext()->getDecl(); C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller); // Record the mapping from call piece to LocationContext. LCM[&C->path] = CE->getCalleeContext(); } C->setCallee(*CE, SMgr); if (!CallStack.empty()) { assert(CallStack.back().first == C); CallStack.pop_back(); } break; } if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) { const CFGBlock *Src = BE->getSrc(); const CFGBlock *Dst = BE->getDst(); const Stmt *T = Src->getTerminator(); if (!T) break; PathDiagnosticLocation Start = PathDiagnosticLocation::createBegin(T, SMgr, N->getLocationContext()); switch (T->getStmtClass()) { default: break; case Stmt::GotoStmtClass: case Stmt::IndirectGotoStmtClass: { const Stmt *S = PathDiagnosticLocation::getNextStmt(N); if (!S) break; std::string sbuf; llvm::raw_string_ostream os(sbuf); const PathDiagnosticLocation &End = PDB.getEnclosingStmtLocation(S); os << "Control jumps to line " << End.asLocation().getExpansionLineNumber(); PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( Start, End, os.str())); break; } case Stmt::SwitchStmtClass: { // Figure out what case arm we took. std::string sbuf; llvm::raw_string_ostream os(sbuf); if (const Stmt *S = Dst->getLabel()) { PathDiagnosticLocation End(S, SMgr, LC); switch (S->getStmtClass()) { default: os << "No cases match in the switch statement. " "Control jumps to line " << End.asLocation().getExpansionLineNumber(); break; case Stmt::DefaultStmtClass: os << "Control jumps to the 'default' case at line " << End.asLocation().getExpansionLineNumber(); break; case Stmt::CaseStmtClass: { os << "Control jumps to 'case "; const CaseStmt *Case = cast<CaseStmt>(S); const Expr *LHS = Case->getLHS()->IgnoreParenCasts(); // Determine if it is an enum. bool GetRawInt = true; if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(LHS)) { // FIXME: Maybe this should be an assertion. Are there cases // were it is not an EnumConstantDecl? const EnumConstantDecl *D = dyn_cast<EnumConstantDecl>(DR->getDecl()); if (D) { GetRawInt = false; os << *D; } } if (GetRawInt) os << LHS->EvaluateKnownConstInt(PDB.getASTContext()); os << ":' at line " << End.asLocation().getExpansionLineNumber(); break; } } PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( Start, End, os.str())); } else { os << "'Default' branch taken. "; const PathDiagnosticLocation &End = PDB.ExecutionContinues(os, N); PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( Start, End, os.str())); } break; } case Stmt::BreakStmtClass: case Stmt::ContinueStmtClass: { std::string sbuf; llvm::raw_string_ostream os(sbuf); PathDiagnosticLocation End = PDB.ExecutionContinues(os, N); PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( Start, End, os.str())); break; } // Determine control-flow for ternary '?'. case Stmt::BinaryConditionalOperatorClass: case Stmt::ConditionalOperatorClass: { std::string sbuf; llvm::raw_string_ostream os(sbuf); os << "'?' condition is "; if (*(Src->succ_begin()+1) == Dst) os << "false"; else os << "true"; PathDiagnosticLocation End = PDB.ExecutionContinues(N); if (const Stmt *S = End.asStmt()) End = PDB.getEnclosingStmtLocation(S); PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( Start, End, os.str())); break; } // Determine control-flow for short-circuited '&&' and '||'. case Stmt::BinaryOperatorClass: { if (!PDB.supportsLogicalOpControlFlow()) break; const BinaryOperator *B = cast<BinaryOperator>(T); std::string sbuf; llvm::raw_string_ostream os(sbuf); os << "Left side of '"; if (B->getOpcode() == BO_LAnd) { os << "&&" << "' is "; if (*(Src->succ_begin()+1) == Dst) { os << "false"; PathDiagnosticLocation End(B->getLHS(), SMgr, LC); PathDiagnosticLocation Start = PathDiagnosticLocation::createOperatorLoc(B, SMgr); PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( Start, End, os.str())); } else { os << "true"; PathDiagnosticLocation Start(B->getLHS(), SMgr, LC); PathDiagnosticLocation End = PDB.ExecutionContinues(N); PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( Start, End, os.str())); } } else { assert(B->getOpcode() == BO_LOr); os << "||" << "' is "; if (*(Src->succ_begin()+1) == Dst) { os << "false"; PathDiagnosticLocation Start(B->getLHS(), SMgr, LC); PathDiagnosticLocation End = PDB.ExecutionContinues(N); PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( Start, End, os.str())); } else { os << "true"; PathDiagnosticLocation End(B->getLHS(), SMgr, LC); PathDiagnosticLocation Start = PathDiagnosticLocation::createOperatorLoc(B, SMgr); PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( Start, End, os.str())); } } break; } case Stmt::DoStmtClass: { if (*(Src->succ_begin()) == Dst) { std::string sbuf; llvm::raw_string_ostream os(sbuf); os << "Loop condition is true. "; PathDiagnosticLocation End = PDB.ExecutionContinues(os, N); if (const Stmt *S = End.asStmt()) End = PDB.getEnclosingStmtLocation(S); PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( Start, End, os.str())); } else { PathDiagnosticLocation End = PDB.ExecutionContinues(N); if (const Stmt *S = End.asStmt()) End = PDB.getEnclosingStmtLocation(S); PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( Start, End, "Loop condition is false. Exiting loop")); } break; } case Stmt::WhileStmtClass: case Stmt::ForStmtClass: { if (*(Src->succ_begin()+1) == Dst) { std::string sbuf; llvm::raw_string_ostream os(sbuf); os << "Loop condition is false. "; PathDiagnosticLocation End = PDB.ExecutionContinues(os, N); if (const Stmt *S = End.asStmt()) End = PDB.getEnclosingStmtLocation(S); PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( Start, End, os.str())); } else { PathDiagnosticLocation End = PDB.ExecutionContinues(N); if (const Stmt *S = End.asStmt()) End = PDB.getEnclosingStmtLocation(S); PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( Start, End, "Loop condition is true. Entering loop body")); } break; } case Stmt::IfStmtClass: { PathDiagnosticLocation End = PDB.ExecutionContinues(N); if (const Stmt *S = End.asStmt()) End = PDB.getEnclosingStmtLocation(S); if (*(Src->succ_begin()+1) == Dst) PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( Start, End, "Taking false branch")); else PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( Start, End, "Taking true branch")); break; } } } } while(0); if (NextNode) { // Add diagnostic pieces from custom visitors. BugReport *R = PDB.getBugReport(); for (auto &V : visitors) { if (PathDiagnosticPiece *p = V->VisitNode(N, NextNode, PDB, *R)) { PD.getActivePath().push_front(p); updateStackPiecesWithMessage(p, CallStack); } } } } if (!PDB.getBugReport()->isValid()) return false; // After constructing the full PathDiagnostic, do a pass over it to compact // PathDiagnosticPieces that occur within a macro. CompactPathDiagnostic(PD.getMutablePieces(), PDB.getSourceManager()); return true; } //===----------------------------------------------------------------------===// // "Extensive" PathDiagnostic generation. //===----------------------------------------------------------------------===// static bool IsControlFlowExpr(const Stmt *S) { const Expr *E = dyn_cast<Expr>(S); if (!E) return false; E = E->IgnoreParenCasts(); if (isa<AbstractConditionalOperator>(E)) return true; if (const BinaryOperator *B = dyn_cast<BinaryOperator>(E)) if (B->isLogicalOp()) return true; return false; } namespace { class ContextLocation : public PathDiagnosticLocation { bool IsDead; public: ContextLocation(const PathDiagnosticLocation &L, bool isdead = false) : PathDiagnosticLocation(L), IsDead(isdead) {} void markDead() { IsDead = true; } bool isDead() const { return IsDead; } }; static PathDiagnosticLocation cleanUpLocation(PathDiagnosticLocation L, const LocationContext *LC, bool firstCharOnly = false) { if (const Stmt *S = L.asStmt()) { const Stmt *Original = S; while (1) { // Adjust the location for some expressions that are best referenced // by one of their subexpressions. switch (S->getStmtClass()) { default: break; case Stmt::ParenExprClass: case Stmt::GenericSelectionExprClass: S = cast<Expr>(S)->IgnoreParens(); firstCharOnly = true; continue; case Stmt::BinaryConditionalOperatorClass: case Stmt::ConditionalOperatorClass: S = cast<AbstractConditionalOperator>(S)->getCond(); firstCharOnly = true; continue; case Stmt::ChooseExprClass: S = cast<ChooseExpr>(S)->getCond(); firstCharOnly = true; continue; case Stmt::BinaryOperatorClass: S = cast<BinaryOperator>(S)->getLHS(); firstCharOnly = true; continue; } break; } if (S != Original) L = PathDiagnosticLocation(S, L.getManager(), LC); } if (firstCharOnly) L = PathDiagnosticLocation::createSingleLocation(L); return L; } class EdgeBuilder { std::vector<ContextLocation> CLocs; typedef std::vector<ContextLocation>::iterator iterator; PathDiagnostic &PD; PathDiagnosticBuilder &PDB; PathDiagnosticLocation PrevLoc; bool IsConsumedExpr(const PathDiagnosticLocation &L); bool containsLocation(const PathDiagnosticLocation &Container, const PathDiagnosticLocation &Containee); PathDiagnosticLocation getContextLocation(const PathDiagnosticLocation &L); void popLocation() { if (!CLocs.back().isDead() && CLocs.back().asLocation().isFileID()) { // For contexts, we only one the first character as the range. rawAddEdge(cleanUpLocation(CLocs.back(), PDB.LC, true)); } CLocs.pop_back(); } public: EdgeBuilder(PathDiagnostic &pd, PathDiagnosticBuilder &pdb) : PD(pd), PDB(pdb) { // If the PathDiagnostic already has pieces, add the enclosing statement // of the first piece as a context as well. if (!PD.path.empty()) { PrevLoc = (*PD.path.begin())->getLocation(); if (const Stmt *S = PrevLoc.asStmt()) addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt()); } } ~EdgeBuilder() { while (!CLocs.empty()) popLocation(); // Finally, add an initial edge from the start location of the first // statement (if it doesn't already exist). PathDiagnosticLocation L = PathDiagnosticLocation::createDeclBegin( PDB.LC, PDB.getSourceManager()); if (L.isValid()) rawAddEdge(L); } void flushLocations() { while (!CLocs.empty()) popLocation(); PrevLoc = PathDiagnosticLocation(); } void addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd = false, bool IsPostJump = false); void rawAddEdge(PathDiagnosticLocation NewLoc); void addContext(const Stmt *S); void addContext(const PathDiagnosticLocation &L); void addExtendedContext(const Stmt *S); }; } // end anonymous namespace PathDiagnosticLocation EdgeBuilder::getContextLocation(const PathDiagnosticLocation &L) { if (const Stmt *S = L.asStmt()) { if (IsControlFlowExpr(S)) return L; return PDB.getEnclosingStmtLocation(S); } return L; } bool EdgeBuilder::containsLocation(const PathDiagnosticLocation &Container, const PathDiagnosticLocation &Containee) { if (Container == Containee) return true; if (Container.asDecl()) return true; if (const Stmt *S = Containee.asStmt()) if (const Stmt *ContainerS = Container.asStmt()) { while (S) { if (S == ContainerS) return true; S = PDB.getParent(S); } return false; } // Less accurate: compare using source ranges. SourceRange ContainerR = Container.asRange(); SourceRange ContaineeR = Containee.asRange(); SourceManager &SM = PDB.getSourceManager(); SourceLocation ContainerRBeg = SM.getExpansionLoc(ContainerR.getBegin()); SourceLocation ContainerREnd = SM.getExpansionLoc(ContainerR.getEnd()); SourceLocation ContaineeRBeg = SM.getExpansionLoc(ContaineeR.getBegin()); SourceLocation ContaineeREnd = SM.getExpansionLoc(ContaineeR.getEnd()); unsigned ContainerBegLine = SM.getExpansionLineNumber(ContainerRBeg); unsigned ContainerEndLine = SM.getExpansionLineNumber(ContainerREnd); unsigned ContaineeBegLine = SM.getExpansionLineNumber(ContaineeRBeg); unsigned ContaineeEndLine = SM.getExpansionLineNumber(ContaineeREnd); assert(ContainerBegLine <= ContainerEndLine); assert(ContaineeBegLine <= ContaineeEndLine); return (ContainerBegLine <= ContaineeBegLine && ContainerEndLine >= ContaineeEndLine && (ContainerBegLine != ContaineeBegLine || SM.getExpansionColumnNumber(ContainerRBeg) <= SM.getExpansionColumnNumber(ContaineeRBeg)) && (ContainerEndLine != ContaineeEndLine || SM.getExpansionColumnNumber(ContainerREnd) >= SM.getExpansionColumnNumber(ContaineeREnd))); } void EdgeBuilder::rawAddEdge(PathDiagnosticLocation NewLoc) { if (!PrevLoc.isValid()) { PrevLoc = NewLoc; return; } const PathDiagnosticLocation &NewLocClean = cleanUpLocation(NewLoc, PDB.LC); const PathDiagnosticLocation &PrevLocClean = cleanUpLocation(PrevLoc, PDB.LC); if (PrevLocClean.asLocation().isInvalid()) { PrevLoc = NewLoc; return; } if (NewLocClean.asLocation() == PrevLocClean.asLocation()) return; // FIXME: Ignore intra-macro edges for now. if (NewLocClean.asLocation().getExpansionLoc() == PrevLocClean.asLocation().getExpansionLoc()) return; PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(NewLocClean, PrevLocClean)); PrevLoc = NewLoc; } void EdgeBuilder::addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd, bool IsPostJump) { if (!alwaysAdd && NewLoc.asLocation().isMacroID()) return; const PathDiagnosticLocation &CLoc = getContextLocation(NewLoc); while (!CLocs.empty()) { ContextLocation &TopContextLoc = CLocs.back(); // Is the top location context the same as the one for the new location? if (TopContextLoc == CLoc) { if (alwaysAdd) { if (IsConsumedExpr(TopContextLoc)) TopContextLoc.markDead(); rawAddEdge(NewLoc); } if (IsPostJump) TopContextLoc.markDead(); return; } if (containsLocation(TopContextLoc, CLoc)) { if (alwaysAdd) { rawAddEdge(NewLoc); if (IsConsumedExpr(CLoc)) { CLocs.push_back(ContextLocation(CLoc, /*IsDead=*/true)); return; } } CLocs.push_back(ContextLocation(CLoc, /*IsDead=*/IsPostJump)); return; } // Context does not contain the location. Flush it. popLocation(); } // If we reach here, there is no enclosing context. Just add the edge. rawAddEdge(NewLoc); } bool EdgeBuilder::IsConsumedExpr(const PathDiagnosticLocation &L) { if (const Expr *X = dyn_cast_or_null<Expr>(L.asStmt())) return PDB.getParentMap().isConsumedExpr(X) && !IsControlFlowExpr(X); return false; } void EdgeBuilder::addExtendedContext(const Stmt *S) { if (!S) return; const Stmt *Parent = PDB.getParent(S); while (Parent) { if (isa<CompoundStmt>(Parent)) Parent = PDB.getParent(Parent); else break; } if (Parent) { switch (Parent->getStmtClass()) { case Stmt::DoStmtClass: case Stmt::ObjCAtSynchronizedStmtClass: addContext(Parent); default: break; } } addContext(S); } void EdgeBuilder::addContext(const Stmt *S) { if (!S) return; PathDiagnosticLocation L(S, PDB.getSourceManager(), PDB.LC); addContext(L); } void EdgeBuilder::addContext(const PathDiagnosticLocation &L) { while (!CLocs.empty()) { const PathDiagnosticLocation &TopContextLoc = CLocs.back(); // Is the top location context the same as the one for the new location? if (TopContextLoc == L) return; if (containsLocation(TopContextLoc, L)) { CLocs.push_back(L); return; } // Context does not contain the location. Flush it. popLocation(); } CLocs.push_back(L); } // Cone-of-influence: support the reverse propagation of "interesting" symbols // and values by tracing interesting calculations backwards through evaluated // expressions along a path. This is probably overly complicated, but the idea // is that if an expression computed an "interesting" value, the child // expressions are are also likely to be "interesting" as well (which then // propagates to the values they in turn compute). This reverse propagation // is needed to track interesting correlations across function call boundaries, // where formal arguments bind to actual arguments, etc. This is also needed // because the constraint solver sometimes simplifies certain symbolic values // into constants when appropriate, and this complicates reasoning about // interesting values. typedef llvm::DenseSet<const Expr *> InterestingExprs; static void reversePropagateIntererstingSymbols(BugReport &R, InterestingExprs &IE, const ProgramState *State, const Expr *Ex, const LocationContext *LCtx) { SVal V = State->getSVal(Ex, LCtx); if (!(R.isInteresting(V) || IE.count(Ex))) return; switch (Ex->getStmtClass()) { default: if (!isa<CastExpr>(Ex)) break; // Fall through. case Stmt::BinaryOperatorClass: case Stmt::UnaryOperatorClass: { for (const Stmt *SubStmt : Ex->children()) { if (const Expr *child = dyn_cast_or_null<Expr>(SubStmt)) { IE.insert(child); SVal ChildV = State->getSVal(child, LCtx); R.markInteresting(ChildV); } } break; } } R.markInteresting(V); } static void reversePropagateInterestingSymbols(BugReport &R, InterestingExprs &IE, const ProgramState *State, const LocationContext *CalleeCtx, const LocationContext *CallerCtx) { // FIXME: Handle non-CallExpr-based CallEvents. const StackFrameContext *Callee = CalleeCtx->getCurrentStackFrame(); const Stmt *CallSite = Callee->getCallSite(); if (const CallExpr *CE = dyn_cast_or_null<CallExpr>(CallSite)) { if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeCtx->getDecl())) { FunctionDecl::param_const_iterator PI = FD->param_begin(), PE = FD->param_end(); CallExpr::const_arg_iterator AI = CE->arg_begin(), AE = CE->arg_end(); for (; AI != AE && PI != PE; ++AI, ++PI) { if (const Expr *ArgE = *AI) { if (const ParmVarDecl *PD = *PI) { Loc LV = State->getLValue(PD, CalleeCtx); if (R.isInteresting(LV) || R.isInteresting(State->getRawSVal(LV))) IE.insert(ArgE); } } } } } } //===----------------------------------------------------------------------===// // Functions for determining if a loop was executed 0 times. //===----------------------------------------------------------------------===// static bool isLoop(const Stmt *Term) { switch (Term->getStmtClass()) { case Stmt::ForStmtClass: case Stmt::WhileStmtClass: case Stmt::ObjCForCollectionStmtClass: case Stmt::CXXForRangeStmtClass: return true; default: // Note that we intentionally do not include do..while here. return false; } } static bool isJumpToFalseBranch(const BlockEdge *BE) { const CFGBlock *Src = BE->getSrc(); assert(Src->succ_size() == 2); return (*(Src->succ_begin()+1) == BE->getDst()); } /// Return true if the terminator is a loop and the destination is the /// false branch. static bool isLoopJumpPastBody(const Stmt *Term, const BlockEdge *BE) { if (!isLoop(Term)) return false; // Did we take the false branch? return isJumpToFalseBranch(BE); } static bool isContainedByStmt(ParentMap &PM, const Stmt *S, const Stmt *SubS) { while (SubS) { if (SubS == S) return true; SubS = PM.getParent(SubS); } return false; } static const Stmt *getStmtBeforeCond(ParentMap &PM, const Stmt *Term, const ExplodedNode *N) { while (N) { Optional<StmtPoint> SP = N->getLocation().getAs<StmtPoint>(); if (SP) { const Stmt *S = SP->getStmt(); if (!isContainedByStmt(PM, Term, S)) return S; } N = N->getFirstPred(); } return nullptr; } static bool isInLoopBody(ParentMap &PM, const Stmt *S, const Stmt *Term) { const Stmt *LoopBody = nullptr; switch (Term->getStmtClass()) { case Stmt::CXXForRangeStmtClass: { const CXXForRangeStmt *FR = cast<CXXForRangeStmt>(Term); if (isContainedByStmt(PM, FR->getInc(), S)) return true; if (isContainedByStmt(PM, FR->getLoopVarStmt(), S)) return true; LoopBody = FR->getBody(); break; } case Stmt::ForStmtClass: { const ForStmt *FS = cast<ForStmt>(Term); if (isContainedByStmt(PM, FS->getInc(), S)) return true; LoopBody = FS->getBody(); break; } case Stmt::ObjCForCollectionStmtClass: { const ObjCForCollectionStmt *FC = cast<ObjCForCollectionStmt>(Term); LoopBody = FC->getBody(); break; } case Stmt::WhileStmtClass: LoopBody = cast<WhileStmt>(Term)->getBody(); break; default: return false; } return isContainedByStmt(PM, LoopBody, S); } //===----------------------------------------------------------------------===// // Top-level logic for generating extensive path diagnostics. //===----------------------------------------------------------------------===// static bool GenerateExtensivePathDiagnostic( PathDiagnostic &PD, PathDiagnosticBuilder &PDB, const ExplodedNode *N, LocationContextMap &LCM, ArrayRef<std::unique_ptr<BugReporterVisitor>> visitors) { EdgeBuilder EB(PD, PDB); const SourceManager& SM = PDB.getSourceManager(); StackDiagVector CallStack; InterestingExprs IE; const ExplodedNode *NextNode = N->pred_empty() ? nullptr : *(N->pred_begin()); while (NextNode) { N = NextNode; NextNode = N->getFirstPred(); ProgramPoint P = N->getLocation(); do { if (Optional<PostStmt> PS = P.getAs<PostStmt>()) { if (const Expr *Ex = PS->getStmtAs<Expr>()) reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE, N->getState().get(), Ex, N->getLocationContext()); } if (Optional<CallExitEnd> CE = P.getAs<CallExitEnd>()) { const Stmt *S = CE->getCalleeContext()->getCallSite(); if (const Expr *Ex = dyn_cast_or_null<Expr>(S)) { reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE, N->getState().get(), Ex, N->getLocationContext()); } PathDiagnosticCallPiece *C = PathDiagnosticCallPiece::construct(N, *CE, SM); LCM[&C->path] = CE->getCalleeContext(); EB.addEdge(C->callReturn, /*AlwaysAdd=*/true, /*IsPostJump=*/true); EB.flushLocations(); PD.getActivePath().push_front(C); PD.pushActivePath(&C->path); CallStack.push_back(StackDiagPair(C, N)); break; } // Pop the call hierarchy if we are done walking the contents // of a function call. if (Optional<CallEnter> CE = P.getAs<CallEnter>()) { // Add an edge to the start of the function. const Decl *D = CE->getCalleeContext()->getDecl(); PathDiagnosticLocation pos = PathDiagnosticLocation::createBegin(D, SM); EB.addEdge(pos); // Flush all locations, and pop the active path. bool VisitedEntireCall = PD.isWithinCall(); EB.flushLocations(); PD.popActivePath(); PDB.LC = N->getLocationContext(); // Either we just added a bunch of stuff to the top-level path, or // we have a previous CallExitEnd. If the former, it means that the // path terminated within a function call. We must then take the // current contents of the active path and place it within // a new PathDiagnosticCallPiece. PathDiagnosticCallPiece *C; if (VisitedEntireCall) { C = cast<PathDiagnosticCallPiece>(PD.getActivePath().front()); } else { const Decl *Caller = CE->getLocationContext()->getDecl(); C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller); LCM[&C->path] = CE->getCalleeContext(); } C->setCallee(*CE, SM); EB.addContext(C->getLocation()); if (!CallStack.empty()) { assert(CallStack.back().first == C); CallStack.pop_back(); } break; } // Note that is important that we update the LocationContext // after looking at CallExits. CallExit basically adds an // edge in the *caller*, so we don't want to update the LocationContext // too soon. PDB.LC = N->getLocationContext(); // Block edges. if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) { // Does this represent entering a call? If so, look at propagating // interesting symbols across call boundaries. if (NextNode) { const LocationContext *CallerCtx = NextNode->getLocationContext(); const LocationContext *CalleeCtx = PDB.LC; if (CallerCtx != CalleeCtx) { reversePropagateInterestingSymbols(*PDB.getBugReport(), IE, N->getState().get(), CalleeCtx, CallerCtx); } } // Are we jumping to the head of a loop? Add a special diagnostic. if (const Stmt *Loop = BE->getSrc()->getLoopTarget()) { PathDiagnosticLocation L(Loop, SM, PDB.LC); const CompoundStmt *CS = nullptr; if (const ForStmt *FS = dyn_cast<ForStmt>(Loop)) CS = dyn_cast<CompoundStmt>(FS->getBody()); else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Loop)) CS = dyn_cast<CompoundStmt>(WS->getBody()); PathDiagnosticEventPiece *p = new PathDiagnosticEventPiece(L, "Looping back to the head of the loop"); p->setPrunable(true); EB.addEdge(p->getLocation(), true); PD.getActivePath().push_front(p); if (CS) { PathDiagnosticLocation BL = PathDiagnosticLocation::createEndBrace(CS, SM); EB.addEdge(BL); } } const CFGBlock *BSrc = BE->getSrc(); ParentMap &PM = PDB.getParentMap(); if (const Stmt *Term = BSrc->getTerminator()) { // Are we jumping past the loop body without ever executing the // loop (because the condition was false)? if (isLoopJumpPastBody(Term, &*BE) && !isInLoopBody(PM, getStmtBeforeCond(PM, BSrc->getTerminatorCondition(), N), Term)) { PathDiagnosticLocation L(Term, SM, PDB.LC); PathDiagnosticEventPiece *PE = new PathDiagnosticEventPiece(L, "Loop body executed 0 times"); PE->setPrunable(true); EB.addEdge(PE->getLocation(), true); PD.getActivePath().push_front(PE); } // In any case, add the terminator as the current statement // context for control edges. EB.addContext(Term); } break; } if (Optional<BlockEntrance> BE = P.getAs<BlockEntrance>()) { Optional<CFGElement> First = BE->getFirstElement(); if (Optional<CFGStmt> S = First ? First->getAs<CFGStmt>() : None) { const Stmt *stmt = S->getStmt(); if (IsControlFlowExpr(stmt)) { // Add the proper context for '&&', '||', and '?'. EB.addContext(stmt); } else EB.addExtendedContext(PDB.getEnclosingStmtLocation(stmt).asStmt()); } break; } } while (0); if (!NextNode) continue; // Add pieces from custom visitors. BugReport *R = PDB.getBugReport(); for (auto &V : visitors) { if (PathDiagnosticPiece *p = V->VisitNode(N, NextNode, PDB, *R)) { const PathDiagnosticLocation &Loc = p->getLocation(); EB.addEdge(Loc, true); PD.getActivePath().push_front(p); updateStackPiecesWithMessage(p, CallStack); if (const Stmt *S = Loc.asStmt()) EB.addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt()); } } } return PDB.getBugReport()->isValid(); } /// \brief Adds a sanitized control-flow diagnostic edge to a path. static void addEdgeToPath(PathPieces &path, PathDiagnosticLocation &PrevLoc, PathDiagnosticLocation NewLoc, const LocationContext *LC) { if (!NewLoc.isValid()) return; SourceLocation NewLocL = NewLoc.asLocation(); if (NewLocL.isInvalid()) return; if (!PrevLoc.isValid() || !PrevLoc.asLocation().isValid()) { PrevLoc = NewLoc; return; } // Ignore self-edges, which occur when there are multiple nodes at the same // statement. if (NewLoc.asStmt() && NewLoc.asStmt() == PrevLoc.asStmt()) return; path.push_front(new PathDiagnosticControlFlowPiece(NewLoc, PrevLoc)); PrevLoc = NewLoc; } /// A customized wrapper for CFGBlock::getTerminatorCondition() /// which returns the element for ObjCForCollectionStmts. static const Stmt *getTerminatorCondition(const CFGBlock *B) { const Stmt *S = B->getTerminatorCondition(); if (const ObjCForCollectionStmt *FS = dyn_cast_or_null<ObjCForCollectionStmt>(S)) return FS->getElement(); return S; } static const char StrEnteringLoop[] = "Entering loop body"; static const char StrLoopBodyZero[] = "Loop body executed 0 times"; static const char StrLoopRangeEmpty[] = "Loop body skipped when range is empty"; static const char StrLoopCollectionEmpty[] = "Loop body skipped when collection is empty"; static bool GenerateAlternateExtensivePathDiagnostic( PathDiagnostic &PD, PathDiagnosticBuilder &PDB, const ExplodedNode *N, LocationContextMap &LCM, ArrayRef<std::unique_ptr<BugReporterVisitor>> visitors) { BugReport *report = PDB.getBugReport(); const SourceManager& SM = PDB.getSourceManager(); StackDiagVector CallStack; InterestingExprs IE; PathDiagnosticLocation PrevLoc = PD.getLocation(); const ExplodedNode *NextNode = N->getFirstPred(); while (NextNode) { N = NextNode; NextNode = N->getFirstPred(); ProgramPoint P = N->getLocation(); do { // Have we encountered an entrance to a call? It may be // the case that we have not encountered a matching // call exit before this point. This means that the path // terminated within the call itself. if (Optional<CallEnter> CE = P.getAs<CallEnter>()) { // Add an edge to the start of the function. const StackFrameContext *CalleeLC = CE->getCalleeContext(); const Decl *D = CalleeLC->getDecl(); addEdgeToPath(PD.getActivePath(), PrevLoc, PathDiagnosticLocation::createBegin(D, SM), CalleeLC); // Did we visit an entire call? bool VisitedEntireCall = PD.isWithinCall(); PD.popActivePath(); PathDiagnosticCallPiece *C; if (VisitedEntireCall) { PathDiagnosticPiece *P = PD.getActivePath().front().get(); C = cast<PathDiagnosticCallPiece>(P); } else { const Decl *Caller = CE->getLocationContext()->getDecl(); C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller); // Since we just transferred the path over to the call piece, // reset the mapping from active to location context. assert(PD.getActivePath().size() == 1 && PD.getActivePath().front() == C); LCM[&PD.getActivePath()] = nullptr; // Record the location context mapping for the path within // the call. assert(LCM[&C->path] == nullptr || LCM[&C->path] == CE->getCalleeContext()); LCM[&C->path] = CE->getCalleeContext(); // If this is the first item in the active path, record // the new mapping from active path to location context. const LocationContext *&NewLC = LCM[&PD.getActivePath()]; if (!NewLC) NewLC = N->getLocationContext(); PDB.LC = NewLC; } C->setCallee(*CE, SM); // Update the previous location in the active path. PrevLoc = C->getLocation(); if (!CallStack.empty()) { assert(CallStack.back().first == C); CallStack.pop_back(); } break; } // Query the location context here and the previous location // as processing CallEnter may change the active path. PDB.LC = N->getLocationContext(); // Record the mapping from the active path to the location // context. assert(!LCM[&PD.getActivePath()] || LCM[&PD.getActivePath()] == PDB.LC); LCM[&PD.getActivePath()] = PDB.LC; // Have we encountered an exit from a function call? if (Optional<CallExitEnd> CE = P.getAs<CallExitEnd>()) { const Stmt *S = CE->getCalleeContext()->getCallSite(); // Propagate the interesting symbols accordingly. if (const Expr *Ex = dyn_cast_or_null<Expr>(S)) { reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE, N->getState().get(), Ex, N->getLocationContext()); } // We are descending into a call (backwards). Construct // a new call piece to contain the path pieces for that call. PathDiagnosticCallPiece *C = PathDiagnosticCallPiece::construct(N, *CE, SM); // Record the location context for this call piece. LCM[&C->path] = CE->getCalleeContext(); // Add the edge to the return site. addEdgeToPath(PD.getActivePath(), PrevLoc, C->callReturn, PDB.LC); PD.getActivePath().push_front(C); PrevLoc.invalidate(); // Make the contents of the call the active path for now. PD.pushActivePath(&C->path); CallStack.push_back(StackDiagPair(C, N)); break; } if (Optional<PostStmt> PS = P.getAs<PostStmt>()) { // For expressions, make sure we propagate the // interesting symbols correctly. if (const Expr *Ex = PS->getStmtAs<Expr>()) reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE, N->getState().get(), Ex, N->getLocationContext()); // Add an edge. If this is an ObjCForCollectionStmt do // not add an edge here as it appears in the CFG both // as a terminator and as a terminator condition. if (!isa<ObjCForCollectionStmt>(PS->getStmt())) { PathDiagnosticLocation L = PathDiagnosticLocation(PS->getStmt(), SM, PDB.LC); addEdgeToPath(PD.getActivePath(), PrevLoc, L, PDB.LC); } break; } // Block edges. if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) { // Does this represent entering a call? If so, look at propagating // interesting symbols across call boundaries. if (NextNode) { const LocationContext *CallerCtx = NextNode->getLocationContext(); const LocationContext *CalleeCtx = PDB.LC; if (CallerCtx != CalleeCtx) { reversePropagateInterestingSymbols(*PDB.getBugReport(), IE, N->getState().get(), CalleeCtx, CallerCtx); } } // Are we jumping to the head of a loop? Add a special diagnostic. if (const Stmt *Loop = BE->getSrc()->getLoopTarget()) { PathDiagnosticLocation L(Loop, SM, PDB.LC); const Stmt *Body = nullptr; if (const ForStmt *FS = dyn_cast<ForStmt>(Loop)) Body = FS->getBody(); else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Loop)) Body = WS->getBody(); else if (const ObjCForCollectionStmt *OFS = dyn_cast<ObjCForCollectionStmt>(Loop)) { Body = OFS->getBody(); } else if (const CXXForRangeStmt *FRS = dyn_cast<CXXForRangeStmt>(Loop)) { Body = FRS->getBody(); } // do-while statements are explicitly excluded here PathDiagnosticEventPiece *p = new PathDiagnosticEventPiece(L, "Looping back to the head " "of the loop"); p->setPrunable(true); addEdgeToPath(PD.getActivePath(), PrevLoc, p->getLocation(), PDB.LC); PD.getActivePath().push_front(p); if (const CompoundStmt *CS = dyn_cast_or_null<CompoundStmt>(Body)) { addEdgeToPath(PD.getActivePath(), PrevLoc, PathDiagnosticLocation::createEndBrace(CS, SM), PDB.LC); } } const CFGBlock *BSrc = BE->getSrc(); ParentMap &PM = PDB.getParentMap(); if (const Stmt *Term = BSrc->getTerminator()) { // Are we jumping past the loop body without ever executing the // loop (because the condition was false)? if (isLoop(Term)) { const Stmt *TermCond = getTerminatorCondition(BSrc); bool IsInLoopBody = isInLoopBody(PM, getStmtBeforeCond(PM, TermCond, N), Term); const char *str = nullptr; if (isJumpToFalseBranch(&*BE)) { if (!IsInLoopBody) { if (isa<ObjCForCollectionStmt>(Term)) { str = StrLoopCollectionEmpty; } else if (isa<CXXForRangeStmt>(Term)) { str = StrLoopRangeEmpty; } else { str = StrLoopBodyZero; } } } else { str = StrEnteringLoop; } if (str) { PathDiagnosticLocation L(TermCond ? TermCond : Term, SM, PDB.LC); PathDiagnosticEventPiece *PE = new PathDiagnosticEventPiece(L, str); PE->setPrunable(true); addEdgeToPath(PD.getActivePath(), PrevLoc, PE->getLocation(), PDB.LC); PD.getActivePath().push_front(PE); } } else if (isa<BreakStmt>(Term) || isa<ContinueStmt>(Term) || isa<GotoStmt>(Term)) { PathDiagnosticLocation L(Term, SM, PDB.LC); addEdgeToPath(PD.getActivePath(), PrevLoc, L, PDB.LC); } } break; } } while (0); if (!NextNode) continue; // Add pieces from custom visitors. for (auto &V : visitors) { if (PathDiagnosticPiece *p = V->VisitNode(N, NextNode, PDB, *report)) { addEdgeToPath(PD.getActivePath(), PrevLoc, p->getLocation(), PDB.LC); PD.getActivePath().push_front(p); updateStackPiecesWithMessage(p, CallStack); } } } // Add an edge to the start of the function. // We'll prune it out later, but it helps make diagnostics more uniform. const StackFrameContext *CalleeLC = PDB.LC->getCurrentStackFrame(); const Decl *D = CalleeLC->getDecl(); addEdgeToPath(PD.getActivePath(), PrevLoc, PathDiagnosticLocation::createBegin(D, SM), CalleeLC); return report->isValid(); } static const Stmt *getLocStmt(PathDiagnosticLocation L) { if (!L.isValid()) return nullptr; return L.asStmt(); } static const Stmt *getStmtParent(const Stmt *S, const ParentMap &PM) { if (!S) return nullptr; while (true) { S = PM.getParentIgnoreParens(S); if (!S) break; if (isa<ExprWithCleanups>(S) || isa<CXXBindTemporaryExpr>(S) || isa<SubstNonTypeTemplateParmExpr>(S)) continue; break; } return S; } static bool isConditionForTerminator(const Stmt *S, const Stmt *Cond) { switch (S->getStmtClass()) { case Stmt::BinaryOperatorClass: { const BinaryOperator *BO = cast<BinaryOperator>(S); if (!BO->isLogicalOp()) return false; return BO->getLHS() == Cond || BO->getRHS() == Cond; } case Stmt::IfStmtClass: return cast<IfStmt>(S)->getCond() == Cond; case Stmt::ForStmtClass: return cast<ForStmt>(S)->getCond() == Cond; case Stmt::WhileStmtClass: return cast<WhileStmt>(S)->getCond() == Cond; case Stmt::DoStmtClass: return cast<DoStmt>(S)->getCond() == Cond; case Stmt::ChooseExprClass: return cast<ChooseExpr>(S)->getCond() == Cond; case Stmt::IndirectGotoStmtClass: return cast<IndirectGotoStmt>(S)->getTarget() == Cond; case Stmt::SwitchStmtClass: return cast<SwitchStmt>(S)->getCond() == Cond; case Stmt::BinaryConditionalOperatorClass: return cast<BinaryConditionalOperator>(S)->getCond() == Cond; case Stmt::ConditionalOperatorClass: { const ConditionalOperator *CO = cast<ConditionalOperator>(S); return CO->getCond() == Cond || CO->getLHS() == Cond || CO->getRHS() == Cond; } case Stmt::ObjCForCollectionStmtClass: return cast<ObjCForCollectionStmt>(S)->getElement() == Cond; case Stmt::CXXForRangeStmtClass: { const CXXForRangeStmt *FRS = cast<CXXForRangeStmt>(S); return FRS->getCond() == Cond || FRS->getRangeInit() == Cond; } default: return false; } } static bool isIncrementOrInitInForLoop(const Stmt *S, const Stmt *FL) { if (const ForStmt *FS = dyn_cast<ForStmt>(FL)) return FS->getInc() == S || FS->getInit() == S; if (const CXXForRangeStmt *FRS = dyn_cast<CXXForRangeStmt>(FL)) return FRS->getInc() == S || FRS->getRangeStmt() == S || FRS->getLoopVarStmt() || FRS->getRangeInit() == S; return false; } typedef llvm::DenseSet<const PathDiagnosticCallPiece *> OptimizedCallsSet; /// Adds synthetic edges from top-level statements to their subexpressions. /// /// This avoids a "swoosh" effect, where an edge from a top-level statement A /// points to a sub-expression B.1 that's not at the start of B. In these cases, /// we'd like to see an edge from A to B, then another one from B to B.1. static void addContextEdges(PathPieces &pieces, SourceManager &SM, const ParentMap &PM, const LocationContext *LCtx) { PathPieces::iterator Prev = pieces.end(); for (PathPieces::iterator I = pieces.begin(), E = Prev; I != E; Prev = I, ++I) { PathDiagnosticControlFlowPiece *Piece = dyn_cast<PathDiagnosticControlFlowPiece>(*I); if (!Piece) continue; PathDiagnosticLocation SrcLoc = Piece->getStartLocation(); SmallVector<PathDiagnosticLocation, 4> SrcContexts; PathDiagnosticLocation NextSrcContext = SrcLoc; const Stmt *InnerStmt = nullptr; while (NextSrcContext.isValid() && NextSrcContext.asStmt() != InnerStmt) { SrcContexts.push_back(NextSrcContext); InnerStmt = NextSrcContext.asStmt(); NextSrcContext = getEnclosingStmtLocation(InnerStmt, SM, PM, LCtx, /*allowNested=*/true); } // Repeatedly split the edge as necessary. // This is important for nested logical expressions (||, &&, ?:) where we // want to show all the levels of context. while (true) { const Stmt *Dst = getLocStmt(Piece->getEndLocation()); // We are looking at an edge. Is the destination within a larger // expression? PathDiagnosticLocation DstContext = getEnclosingStmtLocation(Dst, SM, PM, LCtx, /*allowNested=*/true); if (!DstContext.isValid() || DstContext.asStmt() == Dst) break; // If the source is in the same context, we're already good. if (std::find(SrcContexts.begin(), SrcContexts.end(), DstContext) != SrcContexts.end()) break; // Update the subexpression node to point to the context edge. Piece->setStartLocation(DstContext); // Try to extend the previous edge if it's at the same level as the source // context. if (Prev != E) { PathDiagnosticControlFlowPiece *PrevPiece = dyn_cast<PathDiagnosticControlFlowPiece>(*Prev); if (PrevPiece) { if (const Stmt *PrevSrc = getLocStmt(PrevPiece->getStartLocation())) { const Stmt *PrevSrcParent = getStmtParent(PrevSrc, PM); if (PrevSrcParent == getStmtParent(getLocStmt(DstContext), PM)) { PrevPiece->setEndLocation(DstContext); break; } } } } // Otherwise, split the current edge into a context edge and a // subexpression edge. Note that the context statement may itself have // context. Piece = new PathDiagnosticControlFlowPiece(SrcLoc, DstContext); I = pieces.insert(I, Piece); } } } /// \brief Move edges from a branch condition to a branch target /// when the condition is simple. /// /// This restructures some of the work of addContextEdges. That function /// creates edges this may destroy, but they work together to create a more /// aesthetically set of edges around branches. After the call to /// addContextEdges, we may have (1) an edge to the branch, (2) an edge from /// the branch to the branch condition, and (3) an edge from the branch /// condition to the branch target. We keep (1), but may wish to remove (2) /// and move the source of (3) to the branch if the branch condition is simple. /// static void simplifySimpleBranches(PathPieces &pieces) { for (PathPieces::iterator I = pieces.begin(), E = pieces.end(); I != E; ++I) { PathDiagnosticControlFlowPiece *PieceI = dyn_cast<PathDiagnosticControlFlowPiece>(*I); if (!PieceI) continue; const Stmt *s1Start = getLocStmt(PieceI->getStartLocation()); const Stmt *s1End = getLocStmt(PieceI->getEndLocation()); if (!s1Start || !s1End) continue; PathPieces::iterator NextI = I; ++NextI; if (NextI == E) break; PathDiagnosticControlFlowPiece *PieceNextI = nullptr; while (true) { if (NextI == E) break; PathDiagnosticEventPiece *EV = dyn_cast<PathDiagnosticEventPiece>(*NextI); if (EV) { StringRef S = EV->getString(); if (S == StrEnteringLoop || S == StrLoopBodyZero || S == StrLoopCollectionEmpty || S == StrLoopRangeEmpty) { ++NextI; continue; } break; } PieceNextI = dyn_cast<PathDiagnosticControlFlowPiece>(*NextI); break; } if (!PieceNextI) continue; const Stmt *s2Start = getLocStmt(PieceNextI->getStartLocation()); const Stmt *s2End = getLocStmt(PieceNextI->getEndLocation()); if (!s2Start || !s2End || s1End != s2Start) continue; // We only perform this transformation for specific branch kinds. // We don't want to do this for do..while, for example. if (!(isa<ForStmt>(s1Start) || isa<WhileStmt>(s1Start) || isa<IfStmt>(s1Start) || isa<ObjCForCollectionStmt>(s1Start) || isa<CXXForRangeStmt>(s1Start))) continue; // Is s1End the branch condition? if (!isConditionForTerminator(s1Start, s1End)) continue; // Perform the hoisting by eliminating (2) and changing the start // location of (3). PieceNextI->setStartLocation(PieceI->getStartLocation()); I = pieces.erase(I); } } /// Returns the number of bytes in the given (character-based) SourceRange. /// /// If the locations in the range are not on the same line, returns None. /// /// Note that this does not do a precise user-visible character or column count. static Optional<size_t> getLengthOnSingleLine(SourceManager &SM, SourceRange Range) { SourceRange ExpansionRange(SM.getExpansionLoc(Range.getBegin()), SM.getExpansionRange(Range.getEnd()).second); FileID FID = SM.getFileID(ExpansionRange.getBegin()); if (FID != SM.getFileID(ExpansionRange.getEnd())) return None; bool Invalid; const llvm::MemoryBuffer *Buffer = SM.getBuffer(FID, &Invalid); if (Invalid) return None; unsigned BeginOffset = SM.getFileOffset(ExpansionRange.getBegin()); unsigned EndOffset = SM.getFileOffset(ExpansionRange.getEnd()); StringRef Snippet = Buffer->getBuffer().slice(BeginOffset, EndOffset); // We're searching the raw bytes of the buffer here, which might include // escaped newlines and such. That's okay; we're trying to decide whether the // SourceRange is covering a large or small amount of space in the user's // editor. if (Snippet.find_first_of("\r\n") != StringRef::npos) return None; // This isn't Unicode-aware, but it doesn't need to be. return Snippet.size(); } /// \sa getLengthOnSingleLine(SourceManager, SourceRange) static Optional<size_t> getLengthOnSingleLine(SourceManager &SM, const Stmt *S) { return getLengthOnSingleLine(SM, S->getSourceRange()); } /// Eliminate two-edge cycles created by addContextEdges(). /// /// Once all the context edges are in place, there are plenty of cases where /// there's a single edge from a top-level statement to a subexpression, /// followed by a single path note, and then a reverse edge to get back out to /// the top level. If the statement is simple enough, the subexpression edges /// just add noise and make it harder to understand what's going on. /// /// This function only removes edges in pairs, because removing only one edge /// might leave other edges dangling. /// /// This will not remove edges in more complicated situations: /// - if there is more than one "hop" leading to or from a subexpression. /// - if there is an inlined call between the edges instead of a single event. /// - if the whole statement is large enough that having subexpression arrows /// might be helpful. static void removeContextCycles(PathPieces &Path, SourceManager &SM, ParentMap &PM) { for (PathPieces::iterator I = Path.begin(), E = Path.end(); I != E; ) { // Pattern match the current piece and its successor. PathDiagnosticControlFlowPiece *PieceI = dyn_cast<PathDiagnosticControlFlowPiece>(*I); if (!PieceI) { ++I; continue; } const Stmt *s1Start = getLocStmt(PieceI->getStartLocation()); const Stmt *s1End = getLocStmt(PieceI->getEndLocation()); PathPieces::iterator NextI = I; ++NextI; if (NextI == E) break; PathDiagnosticControlFlowPiece *PieceNextI = dyn_cast<PathDiagnosticControlFlowPiece>(*NextI); if (!PieceNextI) { if (isa<PathDiagnosticEventPiece>(*NextI)) { ++NextI; if (NextI == E) break; PieceNextI = dyn_cast<PathDiagnosticControlFlowPiece>(*NextI); } if (!PieceNextI) { ++I; continue; } } const Stmt *s2Start = getLocStmt(PieceNextI->getStartLocation()); const Stmt *s2End = getLocStmt(PieceNextI->getEndLocation()); if (s1Start && s2Start && s1Start == s2End && s2Start == s1End) { const size_t MAX_SHORT_LINE_LENGTH = 80; Optional<size_t> s1Length = getLengthOnSingleLine(SM, s1Start); if (s1Length && *s1Length <= MAX_SHORT_LINE_LENGTH) { Optional<size_t> s2Length = getLengthOnSingleLine(SM, s2Start); if (s2Length && *s2Length <= MAX_SHORT_LINE_LENGTH) { Path.erase(I); I = Path.erase(NextI); continue; } } } ++I; } } /// \brief Return true if X is contained by Y. static bool lexicalContains(ParentMap &PM, const Stmt *X, const Stmt *Y) { while (X) { if (X == Y) return true; X = PM.getParent(X); } return false; } // Remove short edges on the same line less than 3 columns in difference. static void removePunyEdges(PathPieces &path, SourceManager &SM, ParentMap &PM) { bool erased = false; for (PathPieces::iterator I = path.begin(), E = path.end(); I != E; erased ? I : ++I) { erased = false; PathDiagnosticControlFlowPiece *PieceI = dyn_cast<PathDiagnosticControlFlowPiece>(*I); if (!PieceI) continue; const Stmt *start = getLocStmt(PieceI->getStartLocation()); const Stmt *end = getLocStmt(PieceI->getEndLocation()); if (!start || !end) continue; const Stmt *endParent = PM.getParent(end); if (!endParent) continue; if (isConditionForTerminator(end, endParent)) continue; SourceLocation FirstLoc = start->getLocStart(); SourceLocation SecondLoc = end->getLocStart(); if (!SM.isWrittenInSameFile(FirstLoc, SecondLoc)) continue; if (SM.isBeforeInTranslationUnit(SecondLoc, FirstLoc)) std::swap(SecondLoc, FirstLoc); SourceRange EdgeRange(FirstLoc, SecondLoc); Optional<size_t> ByteWidth = getLengthOnSingleLine(SM, EdgeRange); // If the statements are on different lines, continue. if (!ByteWidth) continue; const size_t MAX_PUNY_EDGE_LENGTH = 2; if (*ByteWidth <= MAX_PUNY_EDGE_LENGTH) { // FIXME: There are enough /bytes/ between the endpoints of the edge, but // there might not be enough /columns/. A proper user-visible column count // is probably too expensive, though. I = path.erase(I); erased = true; continue; } } } static void removeIdenticalEvents(PathPieces &path) { for (PathPieces::iterator I = path.begin(), E = path.end(); I != E; ++I) { PathDiagnosticEventPiece *PieceI = dyn_cast<PathDiagnosticEventPiece>(*I); if (!PieceI) continue; PathPieces::iterator NextI = I; ++NextI; if (NextI == E) return; PathDiagnosticEventPiece *PieceNextI = dyn_cast<PathDiagnosticEventPiece>(*NextI); if (!PieceNextI) continue; // Erase the second piece if it has the same exact message text. if (PieceI->getString() == PieceNextI->getString()) { path.erase(NextI); } } } static bool optimizeEdges(PathPieces &path, SourceManager &SM, OptimizedCallsSet &OCS, LocationContextMap &LCM) { bool hasChanges = false; const LocationContext *LC = LCM[&path]; assert(LC); ParentMap &PM = LC->getParentMap(); for (PathPieces::iterator I = path.begin(), E = path.end(); I != E; ) { // Optimize subpaths. if (PathDiagnosticCallPiece *CallI = dyn_cast<PathDiagnosticCallPiece>(*I)){ // Record the fact that a call has been optimized so we only do the // effort once. if (!OCS.count(CallI)) { while (optimizeEdges(CallI->path, SM, OCS, LCM)) {} OCS.insert(CallI); } ++I; continue; } // Pattern match the current piece and its successor. PathDiagnosticControlFlowPiece *PieceI = dyn_cast<PathDiagnosticControlFlowPiece>(*I); if (!PieceI) { ++I; continue; } const Stmt *s1Start = getLocStmt(PieceI->getStartLocation()); const Stmt *s1End = getLocStmt(PieceI->getEndLocation()); const Stmt *level1 = getStmtParent(s1Start, PM); const Stmt *level2 = getStmtParent(s1End, PM); PathPieces::iterator NextI = I; ++NextI; if (NextI == E) break; PathDiagnosticControlFlowPiece *PieceNextI = dyn_cast<PathDiagnosticControlFlowPiece>(*NextI); if (!PieceNextI) { ++I; continue; } const Stmt *s2Start = getLocStmt(PieceNextI->getStartLocation()); const Stmt *s2End = getLocStmt(PieceNextI->getEndLocation()); const Stmt *level3 = getStmtParent(s2Start, PM); const Stmt *level4 = getStmtParent(s2End, PM); // Rule I. // // If we have two consecutive control edges whose end/begin locations // are at the same level (e.g. statements or top-level expressions within // a compound statement, or siblings share a single ancestor expression), // then merge them if they have no interesting intermediate event. // // For example: // // (1.1 -> 1.2) -> (1.2 -> 1.3) becomes (1.1 -> 1.3) because the common // parent is '1'. Here 'x.y.z' represents the hierarchy of statements. // // NOTE: this will be limited later in cases where we add barriers // to prevent this optimization. // if (level1 && level1 == level2 && level1 == level3 && level1 == level4) { PieceI->setEndLocation(PieceNextI->getEndLocation()); path.erase(NextI); hasChanges = true; continue; } // Rule II. // // Eliminate edges between subexpressions and parent expressions // when the subexpression is consumed. // // NOTE: this will be limited later in cases where we add barriers // to prevent this optimization. // if (s1End && s1End == s2Start && level2) { bool removeEdge = false; // Remove edges into the increment or initialization of a // loop that have no interleaving event. This means that // they aren't interesting. if (isIncrementOrInitInForLoop(s1End, level2)) removeEdge = true; // Next only consider edges that are not anchored on // the condition of a terminator. This are intermediate edges // that we might want to trim. else if (!isConditionForTerminator(level2, s1End)) { // Trim edges on expressions that are consumed by // the parent expression. if (isa<Expr>(s1End) && PM.isConsumedExpr(cast<Expr>(s1End))) { removeEdge = true; } // Trim edges where a lexical containment doesn't exist. // For example: // // X -> Y -> Z // // If 'Z' lexically contains Y (it is an ancestor) and // 'X' does not lexically contain Y (it is a descendant OR // it has no lexical relationship at all) then trim. // // This can eliminate edges where we dive into a subexpression // and then pop back out, etc. else if (s1Start && s2End && lexicalContains(PM, s2Start, s2End) && !lexicalContains(PM, s1End, s1Start)) { removeEdge = true; } // Trim edges from a subexpression back to the top level if the // subexpression is on a different line. // // A.1 -> A -> B // becomes // A.1 -> B // // These edges just look ugly and don't usually add anything. else if (s1Start && s2End && lexicalContains(PM, s1Start, s1End)) { SourceRange EdgeRange(PieceI->getEndLocation().asLocation(), PieceI->getStartLocation().asLocation()); if (!getLengthOnSingleLine(SM, EdgeRange).hasValue()) removeEdge = true; } } if (removeEdge) { PieceI->setEndLocation(PieceNextI->getEndLocation()); path.erase(NextI); hasChanges = true; continue; } } // Optimize edges for ObjC fast-enumeration loops. // // (X -> collection) -> (collection -> element) // // becomes: // // (X -> element) if (s1End == s2Start) { const ObjCForCollectionStmt *FS = dyn_cast_or_null<ObjCForCollectionStmt>(level3); if (FS && FS->getCollection()->IgnoreParens() == s2Start && s2End == FS->getElement()) { PieceI->setEndLocation(PieceNextI->getEndLocation()); path.erase(NextI); hasChanges = true; continue; } } // No changes at this index? Move to the next one. ++I; } if (!hasChanges) { // Adjust edges into subexpressions to make them more uniform // and aesthetically pleasing. addContextEdges(path, SM, PM, LC); // Remove "cyclical" edges that include one or more context edges. removeContextCycles(path, SM, PM); // Hoist edges originating from branch conditions to branches // for simple branches. simplifySimpleBranches(path); // Remove any puny edges left over after primary optimization pass. removePunyEdges(path, SM, PM); // Remove identical events. removeIdenticalEvents(path); } return hasChanges; } /// Drop the very first edge in a path, which should be a function entry edge. /// /// If the first edge is not a function entry edge (say, because the first /// statement had an invalid source location), this function does nothing. // FIXME: We should just generate invalid edges anyway and have the optimizer // deal with them. static void dropFunctionEntryEdge(PathPieces &Path, LocationContextMap &LCM, SourceManager &SM) { const PathDiagnosticControlFlowPiece *FirstEdge = dyn_cast<PathDiagnosticControlFlowPiece>(Path.front()); if (!FirstEdge) return; const Decl *D = LCM[&Path]->getDecl(); PathDiagnosticLocation EntryLoc = PathDiagnosticLocation::createBegin(D, SM); if (FirstEdge->getStartLocation() != EntryLoc) return; Path.pop_front(); } //===----------------------------------------------------------------------===// // Methods for BugType and subclasses. //===----------------------------------------------------------------------===// void BugType::anchor() { } void BugType::FlushReports(BugReporter &BR) {} void BuiltinBug::anchor() {} //===----------------------------------------------------------------------===// // Methods for BugReport and subclasses. //===----------------------------------------------------------------------===// void BugReport::NodeResolver::anchor() {} void BugReport::addVisitor(std::unique_ptr<BugReporterVisitor> visitor) { if (!visitor) return; llvm::FoldingSetNodeID ID; visitor->Profile(ID); void *InsertPos; if (CallbacksSet.FindNodeOrInsertPos(ID, InsertPos)) return; CallbacksSet.InsertNode(visitor.get(), InsertPos); Callbacks.push_back(std::move(visitor)); ++ConfigurationChangeToken; } BugReport::~BugReport() { while (!interestingSymbols.empty()) { popInterestingSymbolsAndRegions(); } } const Decl *BugReport::getDeclWithIssue() const { if (DeclWithIssue) return DeclWithIssue; const ExplodedNode *N = getErrorNode(); if (!N) return nullptr; const LocationContext *LC = N->getLocationContext(); return LC->getCurrentStackFrame()->getDecl(); } void BugReport::Profile(llvm::FoldingSetNodeID& hash) const { hash.AddPointer(&BT); hash.AddString(Description); PathDiagnosticLocation UL = getUniqueingLocation(); if (UL.isValid()) { UL.Profile(hash); } else if (Location.isValid()) { Location.Profile(hash); } else { assert(ErrorNode); hash.AddPointer(GetCurrentOrPreviousStmt(ErrorNode)); } for (SmallVectorImpl<SourceRange>::const_iterator I = Ranges.begin(), E = Ranges.end(); I != E; ++I) { const SourceRange range = *I; if (!range.isValid()) continue; hash.AddInteger(range.getBegin().getRawEncoding()); hash.AddInteger(range.getEnd().getRawEncoding()); } } void BugReport::markInteresting(SymbolRef sym) { if (!sym) return; // If the symbol wasn't already in our set, note a configuration change. if (getInterestingSymbols().insert(sym).second) ++ConfigurationChangeToken; if (const SymbolMetadata *meta = dyn_cast<SymbolMetadata>(sym)) getInterestingRegions().insert(meta->getRegion()); } void BugReport::markInteresting(const MemRegion *R) { if (!R) return; // If the base region wasn't already in our set, note a configuration change. R = R->getBaseRegion(); if (getInterestingRegions().insert(R).second) ++ConfigurationChangeToken; if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) getInterestingSymbols().insert(SR->getSymbol()); } void BugReport::markInteresting(SVal V) { markInteresting(V.getAsRegion()); markInteresting(V.getAsSymbol()); } void BugReport::markInteresting(const LocationContext *LC) { if (!LC) return; InterestingLocationContexts.insert(LC); } bool BugReport::isInteresting(SVal V) { return isInteresting(V.getAsRegion()) || isInteresting(V.getAsSymbol()); } bool BugReport::isInteresting(SymbolRef sym) { if (!sym) return false; // We don't currently consider metadata symbols to be interesting // even if we know their region is interesting. Is that correct behavior? return getInterestingSymbols().count(sym); } bool BugReport::isInteresting(const MemRegion *R) { if (!R) return false; R = R->getBaseRegion(); bool b = getInterestingRegions().count(R); if (b) return true; if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) return getInterestingSymbols().count(SR->getSymbol()); return false; } bool BugReport::isInteresting(const LocationContext *LC) { if (!LC) return false; return InterestingLocationContexts.count(LC); } void BugReport::lazyInitializeInterestingSets() { if (interestingSymbols.empty()) { interestingSymbols.push_back(new Symbols()); interestingRegions.push_back(new Regions()); } } BugReport::Symbols &BugReport::getInterestingSymbols() { lazyInitializeInterestingSets(); return *interestingSymbols.back(); } BugReport::Regions &BugReport::getInterestingRegions() { lazyInitializeInterestingSets(); return *interestingRegions.back(); } void BugReport::pushInterestingSymbolsAndRegions() { interestingSymbols.push_back(new Symbols(getInterestingSymbols())); interestingRegions.push_back(new Regions(getInterestingRegions())); } void BugReport::popInterestingSymbolsAndRegions() { delete interestingSymbols.pop_back_val(); delete interestingRegions.pop_back_val(); } const Stmt *BugReport::getStmt() const { if (!ErrorNode) return nullptr; ProgramPoint ProgP = ErrorNode->getLocation(); const Stmt *S = nullptr; if (Optional<BlockEntrance> BE = ProgP.getAs<BlockEntrance>()) { CFGBlock &Exit = ProgP.getLocationContext()->getCFG()->getExit(); if (BE->getBlock() == &Exit) S = GetPreviousStmt(ErrorNode); } if (!S) S = PathDiagnosticLocation::getStmt(ErrorNode); return S; } llvm::iterator_range<BugReport::ranges_iterator> BugReport::getRanges() { // If no custom ranges, add the range of the statement corresponding to // the error node. if (Ranges.empty()) { if (const Expr *E = dyn_cast_or_null<Expr>(getStmt())) addRange(E->getSourceRange()); else return llvm::make_range(ranges_iterator(), ranges_iterator()); } // User-specified absence of range info. if (Ranges.size() == 1 && !Ranges.begin()->isValid()) return llvm::make_range(ranges_iterator(), ranges_iterator()); return llvm::iterator_range<BugReport::ranges_iterator>(Ranges.begin(), Ranges.end()); } PathDiagnosticLocation BugReport::getLocation(const SourceManager &SM) const { if (ErrorNode) { assert(!Location.isValid() && "Either Location or ErrorNode should be specified but not both."); return PathDiagnosticLocation::createEndOfPath(ErrorNode, SM); } assert(Location.isValid()); return Location; } //===----------------------------------------------------------------------===// // Methods for BugReporter and subclasses. //===----------------------------------------------------------------------===// BugReportEquivClass::~BugReportEquivClass() { } GRBugReporter::~GRBugReporter() { } BugReporterData::~BugReporterData() {} ExplodedGraph &GRBugReporter::getGraph() { return Eng.getGraph(); } ProgramStateManager& GRBugReporter::getStateManager() { return Eng.getStateManager(); } BugReporter::~BugReporter() { FlushReports(); // Free the bug reports we are tracking. typedef std::vector<BugReportEquivClass *> ContTy; for (ContTy::iterator I = EQClassesVector.begin(), E = EQClassesVector.end(); I != E; ++I) { delete *I; } } void BugReporter::FlushReports() { if (BugTypes.isEmpty()) return; // First flush the warnings for each BugType. This may end up creating new // warnings and new BugTypes. // FIXME: Only NSErrorChecker needs BugType's FlushReports. // Turn NSErrorChecker into a proper checker and remove this. SmallVector<const BugType *, 16> bugTypes(BugTypes.begin(), BugTypes.end()); for (SmallVectorImpl<const BugType *>::iterator I = bugTypes.begin(), E = bugTypes.end(); I != E; ++I) const_cast<BugType*>(*I)->FlushReports(*this); // We need to flush reports in deterministic order to ensure the order // of the reports is consistent between runs. typedef std::vector<BugReportEquivClass *> ContVecTy; for (ContVecTy::iterator EI=EQClassesVector.begin(), EE=EQClassesVector.end(); EI != EE; ++EI){ BugReportEquivClass& EQ = **EI; FlushReport(EQ); } // BugReporter owns and deletes only BugTypes created implicitly through // EmitBasicReport. // FIXME: There are leaks from checkers that assume that the BugTypes they // create will be destroyed by the BugReporter. llvm::DeleteContainerSeconds(StrBugTypes); // Remove all references to the BugType objects. BugTypes = F.getEmptySet(); } //===----------------------------------------------------------------------===// // PathDiagnostics generation. //===----------------------------------------------------------------------===// namespace { /// A wrapper around a report graph, which contains only a single path, and its /// node maps. class ReportGraph { public: InterExplodedGraphMap BackMap; std::unique_ptr<ExplodedGraph> Graph; const ExplodedNode *ErrorNode; size_t Index; }; /// A wrapper around a trimmed graph and its node maps. class TrimmedGraph { InterExplodedGraphMap InverseMap; typedef llvm::DenseMap<const ExplodedNode *, unsigned> PriorityMapTy; PriorityMapTy PriorityMap; typedef std::pair<const ExplodedNode *, size_t> NodeIndexPair; SmallVector<NodeIndexPair, 32> ReportNodes; std::unique_ptr<ExplodedGraph> G; /// A helper class for sorting ExplodedNodes by priority. template <bool Descending> class PriorityCompare { const PriorityMapTy &PriorityMap; public: PriorityCompare(const PriorityMapTy &M) : PriorityMap(M) {} bool operator()(const ExplodedNode *LHS, const ExplodedNode *RHS) const { PriorityMapTy::const_iterator LI = PriorityMap.find(LHS); PriorityMapTy::const_iterator RI = PriorityMap.find(RHS); PriorityMapTy::const_iterator E = PriorityMap.end(); if (LI == E) return Descending; if (RI == E) return !Descending; return Descending ? LI->second > RI->second : LI->second < RI->second; } bool operator()(const NodeIndexPair &LHS, const NodeIndexPair &RHS) const { return (*this)(LHS.first, RHS.first); } }; public: TrimmedGraph(const ExplodedGraph *OriginalGraph, ArrayRef<const ExplodedNode *> Nodes); bool popNextReportGraph(ReportGraph &GraphWrapper); }; } TrimmedGraph::TrimmedGraph(const ExplodedGraph *OriginalGraph, ArrayRef<const ExplodedNode *> Nodes) { // The trimmed graph is created in the body of the constructor to ensure // that the DenseMaps have been initialized already. InterExplodedGraphMap ForwardMap; G = OriginalGraph->trim(Nodes, &ForwardMap, &InverseMap); // Find the (first) error node in the trimmed graph. We just need to consult // the node map which maps from nodes in the original graph to nodes // in the new graph. llvm::SmallPtrSet<const ExplodedNode *, 32> RemainingNodes; for (unsigned i = 0, count = Nodes.size(); i < count; ++i) { if (const ExplodedNode *NewNode = ForwardMap.lookup(Nodes[i])) { ReportNodes.push_back(std::make_pair(NewNode, i)); RemainingNodes.insert(NewNode); } } assert(!RemainingNodes.empty() && "No error node found in the trimmed graph"); // Perform a forward BFS to find all the shortest paths. std::queue<const ExplodedNode *> WS; assert(G->num_roots() == 1); WS.push(*G->roots_begin()); unsigned Priority = 0; while (!WS.empty()) { const ExplodedNode *Node = WS.front(); WS.pop(); PriorityMapTy::iterator PriorityEntry; bool IsNew; std::tie(PriorityEntry, IsNew) = PriorityMap.insert(std::make_pair(Node, Priority)); ++Priority; if (!IsNew) { assert(PriorityEntry->second <= Priority); continue; } if (RemainingNodes.erase(Node)) if (RemainingNodes.empty()) break; for (ExplodedNode::const_pred_iterator I = Node->succ_begin(), E = Node->succ_end(); I != E; ++I) WS.push(*I); } // Sort the error paths from longest to shortest. std::sort(ReportNodes.begin(), ReportNodes.end(), PriorityCompare<true>(PriorityMap)); } bool TrimmedGraph::popNextReportGraph(ReportGraph &GraphWrapper) { if (ReportNodes.empty()) return false; const ExplodedNode *OrigN; std::tie(OrigN, GraphWrapper.Index) = ReportNodes.pop_back_val(); assert(PriorityMap.find(OrigN) != PriorityMap.end() && "error node not accessible from root"); // Create a new graph with a single path. This is the graph // that will be returned to the caller. auto GNew = llvm::make_unique<ExplodedGraph>(); GraphWrapper.BackMap.clear(); // Now walk from the error node up the BFS path, always taking the // predeccessor with the lowest number. ExplodedNode *Succ = nullptr; while (true) { // Create the equivalent node in the new graph with the same state // and location. ExplodedNode *NewN = GNew->getNode(OrigN->getLocation(), OrigN->getState(), OrigN->isSink()); // Store the mapping to the original node. InterExplodedGraphMap::const_iterator IMitr = InverseMap.find(OrigN); assert(IMitr != InverseMap.end() && "No mapping to original node."); GraphWrapper.BackMap[NewN] = IMitr->second; // Link up the new node with the previous node. if (Succ) Succ->addPredecessor(NewN, *GNew); else GraphWrapper.ErrorNode = NewN; Succ = NewN; // Are we at the final node? if (OrigN->pred_empty()) { GNew->addRoot(NewN); break; } // Find the next predeccessor node. We choose the node that is marked // with the lowest BFS number. OrigN = *std::min_element(OrigN->pred_begin(), OrigN->pred_end(), PriorityCompare<false>(PriorityMap)); } GraphWrapper.Graph = std::move(GNew); return true; } /// CompactPathDiagnostic - This function postprocesses a PathDiagnostic object /// and collapses PathDiagosticPieces that are expanded by macros. static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM) { typedef std::vector<std::pair<IntrusiveRefCntPtr<PathDiagnosticMacroPiece>, SourceLocation> > MacroStackTy; typedef std::vector<IntrusiveRefCntPtr<PathDiagnosticPiece> > PiecesTy; MacroStackTy MacroStack; PiecesTy Pieces; for (PathPieces::const_iterator I = path.begin(), E = path.end(); I!=E; ++I) { PathDiagnosticPiece *piece = I->get(); // Recursively compact calls. if (PathDiagnosticCallPiece *call=dyn_cast<PathDiagnosticCallPiece>(piece)){ CompactPathDiagnostic(call->path, SM); } // Get the location of the PathDiagnosticPiece. const FullSourceLoc Loc = piece->getLocation().asLocation(); // Determine the instantiation location, which is the location we group // related PathDiagnosticPieces. SourceLocation InstantiationLoc = Loc.isMacroID() ? SM.getExpansionLoc(Loc) : SourceLocation(); if (Loc.isFileID()) { MacroStack.clear(); Pieces.push_back(piece); continue; } assert(Loc.isMacroID()); // Is the PathDiagnosticPiece within the same macro group? if (!MacroStack.empty() && InstantiationLoc == MacroStack.back().second) { MacroStack.back().first->subPieces.push_back(piece); continue; } // We aren't in the same group. Are we descending into a new macro // or are part of an old one? IntrusiveRefCntPtr<PathDiagnosticMacroPiece> MacroGroup; SourceLocation ParentInstantiationLoc = InstantiationLoc.isMacroID() ? SM.getExpansionLoc(Loc) : SourceLocation(); // Walk the entire macro stack. while (!MacroStack.empty()) { if (InstantiationLoc == MacroStack.back().second) { MacroGroup = MacroStack.back().first; break; } if (ParentInstantiationLoc == MacroStack.back().second) { MacroGroup = MacroStack.back().first; break; } MacroStack.pop_back(); } if (!MacroGroup || ParentInstantiationLoc == MacroStack.back().second) { // Create a new macro group and add it to the stack. PathDiagnosticMacroPiece *NewGroup = new PathDiagnosticMacroPiece( PathDiagnosticLocation::createSingleLocation(piece->getLocation())); if (MacroGroup) MacroGroup->subPieces.push_back(NewGroup); else { assert(InstantiationLoc.isFileID()); Pieces.push_back(NewGroup); } MacroGroup = NewGroup; MacroStack.push_back(std::make_pair(MacroGroup, InstantiationLoc)); } // Finally, add the PathDiagnosticPiece to the group. MacroGroup->subPieces.push_back(piece); } // Now take the pieces and construct a new PathDiagnostic. path.clear(); path.insert(path.end(), Pieces.begin(), Pieces.end()); } bool GRBugReporter::generatePathDiagnostic(PathDiagnostic& PD, PathDiagnosticConsumer &PC, ArrayRef<BugReport *> &bugReports) { assert(!bugReports.empty()); bool HasValid = false; bool HasInvalid = false; SmallVector<const ExplodedNode *, 32> errorNodes; for (ArrayRef<BugReport*>::iterator I = bugReports.begin(), E = bugReports.end(); I != E; ++I) { if ((*I)->isValid()) { HasValid = true; errorNodes.push_back((*I)->getErrorNode()); } else { // Keep the errorNodes list in sync with the bugReports list. HasInvalid = true; errorNodes.push_back(nullptr); } } // If all the reports have been marked invalid by a previous path generation, // we're done. if (!HasValid) return false; typedef PathDiagnosticConsumer::PathGenerationScheme PathGenerationScheme; PathGenerationScheme ActiveScheme = PC.getGenerationScheme(); if (ActiveScheme == PathDiagnosticConsumer::Extensive) { AnalyzerOptions &options = getAnalyzerOptions(); if (options.getBooleanOption("path-diagnostics-alternate", true)) { ActiveScheme = PathDiagnosticConsumer::AlternateExtensive; } } TrimmedGraph TrimG(&getGraph(), errorNodes); ReportGraph ErrorGraph; while (TrimG.popNextReportGraph(ErrorGraph)) { // Find the BugReport with the original location. assert(ErrorGraph.Index < bugReports.size()); BugReport *R = bugReports[ErrorGraph.Index]; assert(R && "No original report found for sliced graph."); assert(R->isValid() && "Report selected by trimmed graph marked invalid."); // Start building the path diagnostic... PathDiagnosticBuilder PDB(*this, R, ErrorGraph.BackMap, &PC); const ExplodedNode *N = ErrorGraph.ErrorNode; // Register additional node visitors. R->addVisitor(llvm::make_unique<NilReceiverBRVisitor>()); R->addVisitor(llvm::make_unique<ConditionBRVisitor>()); R->addVisitor(llvm::make_unique<LikelyFalsePositiveSuppressionBRVisitor>()); BugReport::VisitorList visitors; unsigned origReportConfigToken, finalReportConfigToken; LocationContextMap LCM; // While generating diagnostics, it's possible the visitors will decide // new symbols and regions are interesting, or add other visitors based on // the information they find. If they do, we need to regenerate the path // based on our new report configuration. do { // Get a clean copy of all the visitors. for (BugReport::visitor_iterator I = R->visitor_begin(), E = R->visitor_end(); I != E; ++I) visitors.push_back((*I)->clone()); // Clear out the active path from any previous work. PD.resetPath(); origReportConfigToken = R->getConfigurationChangeToken(); // Generate the very last diagnostic piece - the piece is visible before // the trace is expanded. std::unique_ptr<PathDiagnosticPiece> LastPiece; for (BugReport::visitor_iterator I = visitors.begin(), E = visitors.end(); I != E; ++I) { if (std::unique_ptr<PathDiagnosticPiece> Piece = (*I)->getEndPath(PDB, N, *R)) { assert (!LastPiece && "There can only be one final piece in a diagnostic."); LastPiece = std::move(Piece); } } if (ActiveScheme != PathDiagnosticConsumer::None) { if (!LastPiece) LastPiece = BugReporterVisitor::getDefaultEndPath(PDB, N, *R); assert(LastPiece); PD.setEndOfPath(std::move(LastPiece)); } // Make sure we get a clean location context map so we don't // hold onto old mappings. LCM.clear(); switch (ActiveScheme) { case PathDiagnosticConsumer::AlternateExtensive: GenerateAlternateExtensivePathDiagnostic(PD, PDB, N, LCM, visitors); break; case PathDiagnosticConsumer::Extensive: GenerateExtensivePathDiagnostic(PD, PDB, N, LCM, visitors); break; case PathDiagnosticConsumer::Minimal: GenerateMinimalPathDiagnostic(PD, PDB, N, LCM, visitors); break; case PathDiagnosticConsumer::None: GenerateVisitorsOnlyPathDiagnostic(PD, PDB, N, visitors); break; } // Clean up the visitors we used. visitors.clear(); // Did anything change while generating this path? finalReportConfigToken = R->getConfigurationChangeToken(); } while (finalReportConfigToken != origReportConfigToken); if (!R->isValid()) continue; // Finally, prune the diagnostic path of uninteresting stuff. if (!PD.path.empty()) { if (R->shouldPrunePath() && getAnalyzerOptions().shouldPrunePaths()) { bool stillHasNotes = removeUnneededCalls(PD.getMutablePieces(), R, LCM); assert(stillHasNotes); (void)stillHasNotes; } // Redirect all call pieces to have valid locations. adjustCallLocations(PD.getMutablePieces()); removePiecesWithInvalidLocations(PD.getMutablePieces()); if (ActiveScheme == PathDiagnosticConsumer::AlternateExtensive) { SourceManager &SM = getSourceManager(); // Reduce the number of edges from a very conservative set // to an aesthetically pleasing subset that conveys the // necessary information. OptimizedCallsSet OCS; while (optimizeEdges(PD.getMutablePieces(), SM, OCS, LCM)) {} // Drop the very first function-entry edge. It's not really necessary // for top-level functions. dropFunctionEntryEdge(PD.getMutablePieces(), LCM, SM); } // Remove messages that are basically the same, and edges that may not // make sense. // We have to do this after edge optimization in the Extensive mode. removeRedundantMsgs(PD.getMutablePieces()); removeEdgesToDefaultInitializers(PD.getMutablePieces()); } // We found a report and didn't suppress it. return true; } // We suppressed all the reports in this equivalence class. assert(!HasInvalid && "Inconsistent suppression"); (void)HasInvalid; return false; } void BugReporter::Register(BugType *BT) { BugTypes = F.add(BugTypes, BT); } void BugReporter::emitReport(std::unique_ptr<BugReport> R) { if (const ExplodedNode *E = R->getErrorNode()) { // An error node must either be a sink or have a tag, otherwise // it could get reclaimed before the path diagnostic is created. assert((E->isSink() || E->getLocation().getTag()) && "Error node must either be a sink or have a tag"); const AnalysisDeclContext *DeclCtx = E->getLocationContext()->getAnalysisDeclContext(); // The source of autosynthesized body can be handcrafted AST or a model // file. The locations from handcrafted ASTs have no valid source locations // and have to be discarded. Locations from model files should be preserved // for processing and reporting. if (DeclCtx->isBodyAutosynthesized() && !DeclCtx->isBodyAutosynthesizedFromModelFile()) return; } bool ValidSourceLoc = R->getLocation(getSourceManager()).isValid(); assert(ValidSourceLoc); // If we mess up in a release build, we'd still prefer to just drop the bug // instead of trying to go on. if (!ValidSourceLoc) return; // Compute the bug report's hash to determine its equivalence class. llvm::FoldingSetNodeID ID; R->Profile(ID); // Lookup the equivance class. If there isn't one, create it. BugType& BT = R->getBugType(); Register(&BT); void *InsertPos; BugReportEquivClass* EQ = EQClasses.FindNodeOrInsertPos(ID, InsertPos); if (!EQ) { EQ = new BugReportEquivClass(std::move(R)); EQClasses.InsertNode(EQ, InsertPos); EQClassesVector.push_back(EQ); } else EQ->AddReport(std::move(R)); } //===----------------------------------------------------------------------===// // Emitting reports in equivalence classes. //===----------------------------------------------------------------------===// namespace { struct FRIEC_WLItem { const ExplodedNode *N; ExplodedNode::const_succ_iterator I, E; FRIEC_WLItem(const ExplodedNode *n) : N(n), I(N->succ_begin()), E(N->succ_end()) {} }; } static BugReport * FindReportInEquivalenceClass(BugReportEquivClass& EQ, SmallVectorImpl<BugReport*> &bugReports) { BugReportEquivClass::iterator I = EQ.begin(), E = EQ.end(); assert(I != E); BugType& BT = I->getBugType(); // If we don't need to suppress any of the nodes because they are // post-dominated by a sink, simply add all the nodes in the equivalence class // to 'Nodes'. Any of the reports will serve as a "representative" report. if (!BT.isSuppressOnSink()) { BugReport *R = I; for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I) { const ExplodedNode *N = I->getErrorNode(); if (N) { R = I; bugReports.push_back(R); } } return R; } // For bug reports that should be suppressed when all paths are post-dominated // by a sink node, iterate through the reports in the equivalence class // until we find one that isn't post-dominated (if one exists). We use a // DFS traversal of the ExplodedGraph to find a non-sink node. We could write // this as a recursive function, but we don't want to risk blowing out the // stack for very long paths. BugReport *exampleReport = nullptr; for (; I != E; ++I) { const ExplodedNode *errorNode = I->getErrorNode(); if (!errorNode) continue; if (errorNode->isSink()) { llvm_unreachable( "BugType::isSuppressSink() should not be 'true' for sink end nodes"); } // No successors? By definition this nodes isn't post-dominated by a sink. if (errorNode->succ_empty()) { bugReports.push_back(I); if (!exampleReport) exampleReport = I; continue; } // At this point we know that 'N' is not a sink and it has at least one // successor. Use a DFS worklist to find a non-sink end-of-path node. typedef FRIEC_WLItem WLItem; typedef SmallVector<WLItem, 10> DFSWorkList; llvm::DenseMap<const ExplodedNode *, unsigned> Visited; DFSWorkList WL; WL.push_back(errorNode); Visited[errorNode] = 1; while (!WL.empty()) { WLItem &WI = WL.back(); assert(!WI.N->succ_empty()); for (; WI.I != WI.E; ++WI.I) { const ExplodedNode *Succ = *WI.I; // End-of-path node? if (Succ->succ_empty()) { // If we found an end-of-path node that is not a sink. if (!Succ->isSink()) { bugReports.push_back(I); if (!exampleReport) exampleReport = I; WL.clear(); break; } // Found a sink? Continue on to the next successor. continue; } // Mark the successor as visited. If it hasn't been explored, // enqueue it to the DFS worklist. unsigned &mark = Visited[Succ]; if (!mark) { mark = 1; WL.push_back(Succ); break; } } // The worklist may have been cleared at this point. First // check if it is empty before checking the last item. if (!WL.empty() && &WL.back() == &WI) WL.pop_back(); } } // ExampleReport will be NULL if all the nodes in the equivalence class // were post-dominated by sinks. return exampleReport; } void BugReporter::FlushReport(BugReportEquivClass& EQ) { SmallVector<BugReport*, 10> bugReports; BugReport *exampleReport = FindReportInEquivalenceClass(EQ, bugReports); if (exampleReport) { for (PathDiagnosticConsumer *PDC : getPathDiagnosticConsumers()) { FlushReport(exampleReport, *PDC, bugReports); } } } void BugReporter::FlushReport(BugReport *exampleReport, PathDiagnosticConsumer &PD, ArrayRef<BugReport*> bugReports) { // FIXME: Make sure we use the 'R' for the path that was actually used. // Probably doesn't make a difference in practice. BugType& BT = exampleReport->getBugType(); std::unique_ptr<PathDiagnostic> D(new PathDiagnostic( exampleReport->getBugType().getCheckName(), exampleReport->getDeclWithIssue(), exampleReport->getBugType().getName(), exampleReport->getDescription(), exampleReport->getShortDescription(/*Fallback=*/false), BT.getCategory(), exampleReport->getUniqueingLocation(), exampleReport->getUniqueingDecl())); MaxBugClassSize = std::max(bugReports.size(), static_cast<size_t>(MaxBugClassSize)); // Generate the full path diagnostic, using the generation scheme // specified by the PathDiagnosticConsumer. Note that we have to generate // path diagnostics even for consumers which do not support paths, because // the BugReporterVisitors may mark this bug as a false positive. if (!bugReports.empty()) if (!generatePathDiagnostic(*D.get(), PD, bugReports)) return; MaxValidBugClassSize = std::max(bugReports.size(), static_cast<size_t>(MaxValidBugClassSize)); // Examine the report and see if the last piece is in a header. Reset the // report location to the last piece in the main source file. AnalyzerOptions& Opts = getAnalyzerOptions(); if (Opts.shouldReportIssuesInMainSourceFile() && !Opts.AnalyzeAll) D->resetDiagnosticLocationToMainFile(); // If the path is empty, generate a single step path with the location // of the issue. if (D->path.empty()) { PathDiagnosticLocation L = exampleReport->getLocation(getSourceManager()); auto piece = llvm::make_unique<PathDiagnosticEventPiece>( L, exampleReport->getDescription()); for (const SourceRange &Range : exampleReport->getRanges()) piece->addRange(Range); D->setEndOfPath(std::move(piece)); } // Get the meta data. const BugReport::ExtraTextList &Meta = exampleReport->getExtraText(); for (BugReport::ExtraTextList::const_iterator i = Meta.begin(), e = Meta.end(); i != e; ++i) { D->addMeta(*i); } PD.HandlePathDiagnostic(std::move(D)); } void BugReporter::EmitBasicReport(const Decl *DeclWithIssue, const CheckerBase *Checker, StringRef Name, StringRef Category, StringRef Str, PathDiagnosticLocation Loc, ArrayRef<SourceRange> Ranges) { EmitBasicReport(DeclWithIssue, Checker->getCheckName(), Name, Category, Str, Loc, Ranges); } void BugReporter::EmitBasicReport(const Decl *DeclWithIssue, CheckName CheckName, StringRef name, StringRef category, StringRef str, PathDiagnosticLocation Loc, ArrayRef<SourceRange> Ranges) { // 'BT' is owned by BugReporter. BugType *BT = getBugTypeForName(CheckName, name, category); auto R = llvm::make_unique<BugReport>(*BT, str, Loc); R->setDeclWithIssue(DeclWithIssue); for (ArrayRef<SourceRange>::iterator I = Ranges.begin(), E = Ranges.end(); I != E; ++I) R->addRange(*I); emitReport(std::move(R)); } BugType *BugReporter::getBugTypeForName(CheckName CheckName, StringRef name, StringRef category) { SmallString<136> fullDesc; llvm::raw_svector_ostream(fullDesc) << CheckName.getName() << ":" << name << ":" << category; BugType *&BT = StrBugTypes[fullDesc]; if (!BT) BT = new BugType(CheckName, name, category); return BT; } LLVM_DUMP_METHOD void PathPieces::dump() const { unsigned index = 0; for (PathPieces::const_iterator I = begin(), E = end(); I != E; ++I) { llvm::errs() << "[" << index++ << "] "; (*I)->dump(); llvm::errs() << "\n"; } } void PathDiagnosticCallPiece::dump() const { llvm::errs() << "CALL\n--------------\n"; if (const Stmt *SLoc = getLocStmt(getLocation())) SLoc->dump(); else if (const NamedDecl *ND = dyn_cast<NamedDecl>(getCallee())) llvm::errs() << *ND << "\n"; else getLocation().dump(); } void PathDiagnosticEventPiece::dump() const { llvm::errs() << "EVENT\n--------------\n"; llvm::errs() << getString() << "\n"; llvm::errs() << " ---- at ----\n"; getLocation().dump(); } void PathDiagnosticControlFlowPiece::dump() const { llvm::errs() << "CONTROL\n--------------\n"; getStartLocation().dump(); llvm::errs() << " ---- to ----\n"; getEndLocation().dump(); } void PathDiagnosticMacroPiece::dump() const { llvm::errs() << "MACRO\n--------------\n"; // FIXME: Print which macro is being invoked. } void PathDiagnosticLocation::dump() const { if (!isValid()) { llvm::errs() << "<INVALID>\n"; return; } switch (K) { case RangeK: // FIXME: actually print the range. llvm::errs() << "<range>\n"; break; case SingleLocK: asLocation().dump(); llvm::errs() << "\n"; break; case StmtK: if (S) S->dump(); else llvm::errs() << "<NULL STMT>\n"; break; case DeclK: if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(D)) llvm::errs() << *ND << "\n"; else if (isa<BlockDecl>(D)) // FIXME: Make this nicer. llvm::errs() << "<block>\n"; else if (D) llvm::errs() << "<unknown decl>\n"; else llvm::errs() << "<NULL DECL>\n"; break; } }
/*========================================================================= * * Copyright Insight Software Consortium * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0.txt * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * *=========================================================================*/ #ifndef __itkSpringColormapFunction_hxx #define __itkSpringColormapFunction_hxx #include "itkSpringColormapFunction.h" namespace itk { namespace Function { template< typename TScalar, typename TRGBPixel > typename SpringColormapFunction< TScalar, TRGBPixel >::RGBPixelType SpringColormapFunction< TScalar, TRGBPixel > ::operator()(const TScalar & v) const { // Map the input scalar between [0, 1]. RealType value = this->RescaleInputValue(v); // Apply the color mapping. RealType red = 1.0; RealType green = value; RealType blue = 1.0 - value; // Set the rgb components after rescaling the values. RGBPixelType pixel; NumericTraits<TRGBPixel>::SetLength(pixel, 3); pixel[0] = this->RescaleRGBComponentValue(red); pixel[1] = this->RescaleRGBComponentValue(green); pixel[2] = this->RescaleRGBComponentValue(blue); return pixel; } } // end namespace Function } // end namespace itk #endif
/*! file insert.hpp * * @copyright Licensed under MIT license by hyperQ – Ewa Hendzel * */ #ifndef INSERT_HPP_ #define INSERT_HPP_ #include <unordered_map> namespace helpers { /** * @brief A helper function that inserts linear or quadratic * coefficients int o QUBO model. * * @tparam NodeType An integer number type used to index nodes of the * QUBO model. * @tparam CoefType A real type used to store values of the QUBO model. * @tparam Hash Type of the pair storing linear or quadratic * coefficients of the QUBO model. * @param um Linear or quadratic coefficients store. * @param key Address of a variable or addresses pair of the variables * to add to the model. * @param val Coefficient value. */ template <class NodeType, class CoefType, class Hash> void insert_model(std::unordered_map<NodeType, CoefType, Hash> &um, const NodeType &key, const CoefType &val) { if (um.count(key) == 0) { um.insert({{key, val}}); } else { um[key] = val; } } }; // namespace helpers #endif
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/cryptauth/foreground_eid_generator.h" #include <memory> #include "base/logging.h" #include "base/strings/string_util.h" #include "base/test/simple_test_clock.h" #include "base/time/time.h" #include "components/cryptauth/proto/cryptauth_api.pb.h" #include "components/cryptauth/raw_eid_generator_impl.h" #include "components/cryptauth/remote_device_ref.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" using testing::_; using testing::AtLeast; using testing::NiceMock; using testing::Return; using testing::StrictMock; using testing::SaveArg; namespace cryptauth { namespace { // These constants could be made as integer amounts of milliseconds by calling // .InMilliseconds(), but this would create a static initializer because there // is no constexpr implementation of TimeDelta::InMilliseconds() yet. Static // initializers are not a big problem in tests, but it is preferable to avoid // them here for consistensy with similar definitions going into release // binaries. constexpr base::TimeDelta kEidPeriod = base::TimeDelta::FromHours(8); constexpr base::TimeDelta kEidSeedPeriod = base::TimeDelta::FromDays(14); const int32_t kNumBytesInEidValue = 2; // Midnight on 1/1/2020. const int64_t kDefaultCurrentPeriodStart = 1577836800000L; // 1:43am on 1/1/2020. const int64_t kDefaultCurrentTime = 1577843000000L; // The Base64 encoded values of these raw data strings are, respectively: // "Zmlyc3RTZWVk", "c2Vjb25kU2VlZA==", "dGhpcmRTZWVk","Zm91cnRoU2VlZA==". const std::string kFirstSeed = "firstSeed"; const std::string kSecondSeed = "secondSeed"; const std::string kThirdSeed = "thirdSeed"; const std::string kFourthSeed = "fourthSeed"; const std::string kDefaultAdvertisingDevicePublicKey = "publicKey"; BeaconSeed CreateBeaconSeed(const std::string& data, int64_t start_timestamp_ms, int64_t end_timestamp_ms) { BeaconSeed seed; seed.set_data(data); seed.set_start_time_millis(start_timestamp_ms); seed.set_end_time_millis(end_timestamp_ms); return seed; } std::string GenerateFakeEidData(const std::string& eid_seed, int64_t start_of_period_timestamp_ms, const std::string* extra_entropy) { std::hash<std::string> string_hash; int64_t seed_hash = string_hash(eid_seed); int64_t extra_hash = extra_entropy ? string_hash(*extra_entropy) : 0; int64_t fake_data_xor = seed_hash ^ start_of_period_timestamp_ms ^ extra_hash; std::string fake_data(reinterpret_cast<const char*>(&fake_data_xor), sizeof(fake_data_xor)); fake_data.resize(kNumBytesInEidValue); return fake_data; } std::string GenerateFakeAdvertisement( const std::string& scanning_device_eid_seed, int64_t start_of_period_timestamp_ms, const std::string& advertising_device_public_key) { std::string fake_scanning_eid = GenerateFakeEidData( scanning_device_eid_seed, start_of_period_timestamp_ms, nullptr); std::string fake_advertising_id = GenerateFakeEidData( scanning_device_eid_seed, start_of_period_timestamp_ms, &advertising_device_public_key); std::string fake_advertisement; fake_advertisement.append(fake_scanning_eid); fake_advertisement.append(fake_advertising_id); return fake_advertisement; } } // namespace class CryptAuthForegroundEidGeneratorTest : public testing::Test { protected: CryptAuthForegroundEidGeneratorTest() { scanning_device_beacon_seeds_.push_back(CreateBeaconSeed( kFirstSeed, kDefaultCurrentPeriodStart - kEidSeedPeriod.InMilliseconds(), kDefaultCurrentPeriodStart)); scanning_device_beacon_seeds_.push_back(CreateBeaconSeed( kSecondSeed, kDefaultCurrentPeriodStart, kDefaultCurrentPeriodStart + kEidSeedPeriod.InMilliseconds())); scanning_device_beacon_seeds_.push_back(CreateBeaconSeed( kThirdSeed, kDefaultCurrentPeriodStart + kEidSeedPeriod.InMilliseconds(), kDefaultCurrentPeriodStart + 2 * kEidSeedPeriod.InMilliseconds())); scanning_device_beacon_seeds_.push_back(CreateBeaconSeed( kFourthSeed, kDefaultCurrentPeriodStart + 2 * kEidSeedPeriod.InMilliseconds(), kDefaultCurrentPeriodStart + 3 * kEidSeedPeriod.InMilliseconds())); } class TestRawEidGenerator : public RawEidGenerator { public: TestRawEidGenerator() {} ~TestRawEidGenerator() override {} // RawEidGenerator: std::string GenerateEid(const std::string& eid_seed, int64_t start_of_period_timestamp_ms, std::string const* extra_entropy) override { return GenerateFakeEidData(eid_seed, start_of_period_timestamp_ms, extra_entropy); } }; void SetUp() override { SetTestTime(kDefaultCurrentTime); eid_generator_.reset(new ForegroundEidGenerator( std::make_unique<TestRawEidGenerator>(), &test_clock_)); } // TODO(khorimoto): Is there an easier way to do this? void SetTestTime(int64_t timestamp_ms) { base::Time time = base::Time::UnixEpoch() + base::TimeDelta::FromMilliseconds(timestamp_ms); test_clock_.SetNow(time); } std::unique_ptr<ForegroundEidGenerator> eid_generator_; base::SimpleTestClock test_clock_; std::vector<BeaconSeed> scanning_device_beacon_seeds_; }; TEST_F(CryptAuthForegroundEidGeneratorTest, GenerateBackgroundScanFilter_StartOfPeriod_AnotherSeedInPreviousPeriod) { SetTestTime(kDefaultCurrentTime); std::unique_ptr<ForegroundEidGenerator::EidData> data = eid_generator_->GenerateBackgroundScanFilter( scanning_device_beacon_seeds_); ASSERT_TRUE(data); EXPECT_EQ(data->GetAdjacentDataType(), ForegroundEidGenerator::EidData::AdjacentDataType::PAST); EXPECT_EQ(kDefaultCurrentPeriodStart, data->current_data.start_timestamp_ms); EXPECT_EQ(kDefaultCurrentPeriodStart + kEidPeriod.InMilliseconds(), data->current_data.end_timestamp_ms); EXPECT_EQ( GenerateFakeEidData(kSecondSeed, kDefaultCurrentPeriodStart, nullptr), data->current_data.data); ASSERT_TRUE(data->adjacent_data); EXPECT_EQ(kDefaultCurrentPeriodStart - kEidPeriod.InMilliseconds(), data->adjacent_data->start_timestamp_ms); EXPECT_EQ(kDefaultCurrentPeriodStart, data->adjacent_data->end_timestamp_ms); EXPECT_EQ( GenerateFakeEidData( kFirstSeed, kDefaultCurrentPeriodStart - kEidPeriod.InMilliseconds(), nullptr), data->adjacent_data->data); } TEST_F(CryptAuthForegroundEidGeneratorTest, GenerateBackgroundScanFilter_StartOfPeriod_NoSeedBefore) { SetTestTime(kDefaultCurrentTime - kEidSeedPeriod.InMilliseconds()); std::unique_ptr<ForegroundEidGenerator::EidData> data = eid_generator_->GenerateBackgroundScanFilter( scanning_device_beacon_seeds_); ASSERT_TRUE(data); EXPECT_EQ(data->GetAdjacentDataType(), ForegroundEidGenerator::EidData::AdjacentDataType::NONE); EXPECT_EQ(kDefaultCurrentPeriodStart - kEidSeedPeriod.InMilliseconds(), data->current_data.start_timestamp_ms); EXPECT_EQ(kDefaultCurrentPeriodStart - kEidSeedPeriod.InMilliseconds() + kEidPeriod.InMilliseconds(), data->current_data.end_timestamp_ms); EXPECT_EQ(GenerateFakeEidData( kFirstSeed, kDefaultCurrentPeriodStart - kEidSeedPeriod.InMilliseconds(), nullptr), data->current_data.data); EXPECT_FALSE(data->adjacent_data); } TEST_F(CryptAuthForegroundEidGeneratorTest, GenerateBackgroundScanFilter_PastStartOfPeriod) { SetTestTime(kDefaultCurrentTime + base::TimeDelta::FromHours(3).InMilliseconds()); std::unique_ptr<ForegroundEidGenerator::EidData> data = eid_generator_->GenerateBackgroundScanFilter( scanning_device_beacon_seeds_); ASSERT_TRUE(data); EXPECT_EQ(data->GetAdjacentDataType(), ForegroundEidGenerator::EidData::AdjacentDataType::FUTURE); EXPECT_EQ(kDefaultCurrentPeriodStart, data->current_data.start_timestamp_ms); EXPECT_EQ(kDefaultCurrentPeriodStart + kEidPeriod.InMilliseconds(), data->current_data.end_timestamp_ms); EXPECT_EQ( GenerateFakeEidData(kSecondSeed, kDefaultCurrentPeriodStart, nullptr), data->current_data.data); ASSERT_TRUE(data->adjacent_data); EXPECT_EQ(kDefaultCurrentPeriodStart + kEidPeriod.InMilliseconds(), data->adjacent_data->start_timestamp_ms); EXPECT_EQ(kDefaultCurrentPeriodStart + 2 * kEidPeriod.InMilliseconds(), data->adjacent_data->end_timestamp_ms); EXPECT_EQ( GenerateFakeEidData( kSecondSeed, kDefaultCurrentPeriodStart + kEidPeriod.InMilliseconds(), nullptr), data->adjacent_data->data); } TEST_F(CryptAuthForegroundEidGeneratorTest, GenerateBackgroundScanFilter_EndOfPeriod) { SetTestTime(kDefaultCurrentPeriodStart + kEidSeedPeriod.InMilliseconds() - 1); std::unique_ptr<ForegroundEidGenerator::EidData> data = eid_generator_->GenerateBackgroundScanFilter( scanning_device_beacon_seeds_); ASSERT_TRUE(data); EXPECT_EQ(data->GetAdjacentDataType(), ForegroundEidGenerator::EidData::AdjacentDataType::FUTURE); EXPECT_EQ(kDefaultCurrentPeriodStart + kEidSeedPeriod.InMilliseconds() - kEidPeriod.InMilliseconds(), data->current_data.start_timestamp_ms); EXPECT_EQ(kDefaultCurrentPeriodStart + kEidSeedPeriod.InMilliseconds(), data->current_data.end_timestamp_ms); EXPECT_EQ(GenerateFakeEidData(kSecondSeed, kDefaultCurrentPeriodStart + kEidSeedPeriod.InMilliseconds() - kEidPeriod.InMilliseconds(), nullptr), data->current_data.data); ASSERT_TRUE(data->adjacent_data); EXPECT_EQ(kDefaultCurrentPeriodStart + kEidSeedPeriod.InMilliseconds(), data->adjacent_data->start_timestamp_ms); EXPECT_EQ(kDefaultCurrentPeriodStart + kEidSeedPeriod.InMilliseconds() + kEidPeriod.InMilliseconds(), data->adjacent_data->end_timestamp_ms); EXPECT_EQ(GenerateFakeEidData( kThirdSeed, kDefaultCurrentPeriodStart + kEidSeedPeriod.InMilliseconds(), nullptr), data->adjacent_data->data); } TEST_F(CryptAuthForegroundEidGeneratorTest, GenerateBackgroundScanFilter_EndOfPeriod_NoSeedAfter) { SetTestTime(kDefaultCurrentPeriodStart + 3 * kEidSeedPeriod.InMilliseconds() - 1); std::unique_ptr<ForegroundEidGenerator::EidData> data = eid_generator_->GenerateBackgroundScanFilter( scanning_device_beacon_seeds_); ASSERT_TRUE(data); EXPECT_EQ(data->GetAdjacentDataType(), ForegroundEidGenerator::EidData::AdjacentDataType::NONE); EXPECT_EQ(kDefaultCurrentPeriodStart + 3 * kEidSeedPeriod.InMilliseconds() - kEidPeriod.InMilliseconds(), data->current_data.start_timestamp_ms); EXPECT_EQ(kDefaultCurrentPeriodStart + 3 * kEidSeedPeriod.InMilliseconds(), data->current_data.end_timestamp_ms); EXPECT_EQ(GenerateFakeEidData(kFourthSeed, kDefaultCurrentPeriodStart + 3 * kEidSeedPeriod.InMilliseconds() - kEidPeriod.InMilliseconds(), nullptr), data->current_data.data); EXPECT_FALSE(data->adjacent_data); } TEST_F(CryptAuthForegroundEidGeneratorTest, GenerateBackgroundScanFilter_NoCurrentPeriodSeed) { SetTestTime(kDefaultCurrentPeriodStart + 4 * kEidSeedPeriod.InMilliseconds() - 1); std::unique_ptr<ForegroundEidGenerator::EidData> data = eid_generator_->GenerateBackgroundScanFilter( scanning_device_beacon_seeds_); EXPECT_FALSE(data); } TEST_F(CryptAuthForegroundEidGeneratorTest, GenerateBackgroundScanFilter_EmptySeeds) { SetTestTime(kDefaultCurrentTime); std::vector<BeaconSeed> empty; std::unique_ptr<ForegroundEidGenerator::EidData> data = eid_generator_->GenerateBackgroundScanFilter(empty); EXPECT_FALSE(data); } TEST_F(CryptAuthForegroundEidGeneratorTest, GenerateBackgroundScanFilter_InvalidSeed_PeriodNotMultipleOf8Hours) { SetTestTime(kDefaultCurrentTime); // Seed has a period of 1ms, but it should have a period of 8 hours. std::vector<BeaconSeed> invalid_seed_vector = {CreateBeaconSeed( kFirstSeed, kDefaultCurrentPeriodStart, kDefaultCurrentPeriodStart + 1)}; std::unique_ptr<ForegroundEidGenerator::EidData> data = eid_generator_->GenerateBackgroundScanFilter(invalid_seed_vector); EXPECT_FALSE(data); } TEST_F(CryptAuthForegroundEidGeneratorTest, GenerateBackgroundScanFilter_UsingRealEids) { SetTestTime(kDefaultCurrentTime); // Use real RawEidGenerator implementation instead of test version. eid_generator_.reset(new ForegroundEidGenerator( std::make_unique<RawEidGeneratorImpl>(), &test_clock_)); std::unique_ptr<ForegroundEidGenerator::EidData> data = eid_generator_->GenerateBackgroundScanFilter( scanning_device_beacon_seeds_); ASSERT_TRUE(data); EXPECT_EQ(data->GetAdjacentDataType(), ForegroundEidGenerator::EidData::AdjacentDataType::PAST); EXPECT_EQ(kDefaultCurrentPeriodStart, data->current_data.start_timestamp_ms); EXPECT_EQ(kDefaultCurrentPeriodStart + kEidPeriod.InMilliseconds(), data->current_data.end_timestamp_ms); // Since this uses the real RawEidGenerator, just make sure the data // exists and has the proper length. EXPECT_EQ((size_t)kNumBytesInEidValue, data->current_data.data.length()); ASSERT_TRUE(data->adjacent_data); EXPECT_EQ(kDefaultCurrentPeriodStart - kEidPeriod.InMilliseconds(), data->adjacent_data->start_timestamp_ms); EXPECT_EQ(kDefaultCurrentPeriodStart, data->adjacent_data->end_timestamp_ms); // Since this uses the real RawEidGenerator, just make sure the data // exists and has the proper length. EXPECT_EQ((size_t)kNumBytesInEidValue, data->adjacent_data->data.length()); } TEST_F(CryptAuthForegroundEidGeneratorTest, GenerateAdvertisementData) { SetTestTime(kDefaultCurrentTime); std::unique_ptr<DataWithTimestamp> data = eid_generator_->GenerateAdvertisement(kDefaultAdvertisingDevicePublicKey, scanning_device_beacon_seeds_); ASSERT_TRUE(data); EXPECT_EQ(kDefaultCurrentPeriodStart, data->start_timestamp_ms); EXPECT_EQ(kDefaultCurrentPeriodStart + kEidPeriod.InMilliseconds(), data->end_timestamp_ms); EXPECT_EQ(GenerateFakeAdvertisement(kSecondSeed, kDefaultCurrentPeriodStart, kDefaultAdvertisingDevicePublicKey), data->data); } TEST_F(CryptAuthForegroundEidGeneratorTest, GenerateAdvertisementData_NoSeedForPeriod) { SetTestTime(kDefaultCurrentTime + 4 * kEidSeedPeriod.InMilliseconds()); std::unique_ptr<DataWithTimestamp> data = eid_generator_->GenerateAdvertisement(kDefaultAdvertisingDevicePublicKey, scanning_device_beacon_seeds_); EXPECT_FALSE(data); } TEST_F(CryptAuthForegroundEidGeneratorTest, GenerateAdvertisementData_EmptySeeds) { SetTestTime(kDefaultCurrentTime + 4 * kEidSeedPeriod.InMilliseconds()); std::vector<BeaconSeed> empty; std::unique_ptr<DataWithTimestamp> data = eid_generator_->GenerateAdvertisement(kDefaultAdvertisingDevicePublicKey, empty); EXPECT_FALSE(data); } TEST_F(CryptAuthForegroundEidGeneratorTest, GeneratePossibleAdvertisements_CurrentAndPastAdjacentPeriods) { SetTestTime(kDefaultCurrentPeriodStart); std::vector<std::string> possible_advertisements = eid_generator_->GeneratePossibleAdvertisements( kDefaultAdvertisingDevicePublicKey, scanning_device_beacon_seeds_); EXPECT_EQ((size_t)2, possible_advertisements.size()); EXPECT_EQ(GenerateFakeAdvertisement(kSecondSeed, kDefaultCurrentPeriodStart, kDefaultAdvertisingDevicePublicKey), possible_advertisements[0]); EXPECT_EQ( GenerateFakeAdvertisement( kFirstSeed, kDefaultCurrentPeriodStart - kEidPeriod.InMilliseconds(), kDefaultAdvertisingDevicePublicKey), possible_advertisements[1]); } TEST_F(CryptAuthForegroundEidGeneratorTest, testGeneratePossibleAdvertisements_CurrentAndFutureAdjacentPeriods) { SetTestTime(kDefaultCurrentPeriodStart + base::TimeDelta::FromHours(3).InMilliseconds()); std::vector<std::string> possible_advertisements = eid_generator_->GeneratePossibleAdvertisements( kDefaultAdvertisingDevicePublicKey, scanning_device_beacon_seeds_); EXPECT_EQ((size_t)2, possible_advertisements.size()); EXPECT_EQ(GenerateFakeAdvertisement(kSecondSeed, kDefaultCurrentPeriodStart, kDefaultAdvertisingDevicePublicKey), possible_advertisements[0]); EXPECT_EQ( GenerateFakeAdvertisement( kSecondSeed, kDefaultCurrentPeriodStart + kEidPeriod.InMilliseconds(), kDefaultAdvertisingDevicePublicKey), possible_advertisements[1]); } TEST_F(CryptAuthForegroundEidGeneratorTest, GeneratePossibleAdvertisements_OnlyCurrentPeriod) { SetTestTime(kDefaultCurrentPeriodStart - kEidSeedPeriod.InMilliseconds()); std::vector<std::string> possible_advertisements = eid_generator_->GeneratePossibleAdvertisements( kDefaultAdvertisingDevicePublicKey, scanning_device_beacon_seeds_); EXPECT_EQ((size_t)1, possible_advertisements.size()); EXPECT_EQ(GenerateFakeAdvertisement( kFirstSeed, kDefaultCurrentPeriodStart - kEidSeedPeriod.InMilliseconds(), kDefaultAdvertisingDevicePublicKey), possible_advertisements[0]); } TEST_F(CryptAuthForegroundEidGeneratorTest, GeneratePossibleAdvertisements_OnlyFuturePeriod) { SetTestTime(kDefaultCurrentPeriodStart - kEidSeedPeriod.InMilliseconds() - kEidPeriod.InMilliseconds()); std::vector<std::string> possible_advertisements = eid_generator_->GeneratePossibleAdvertisements( kDefaultAdvertisingDevicePublicKey, scanning_device_beacon_seeds_); EXPECT_EQ((size_t)1, possible_advertisements.size()); EXPECT_EQ(GenerateFakeAdvertisement( kFirstSeed, kDefaultCurrentPeriodStart - kEidSeedPeriod.InMilliseconds(), kDefaultAdvertisingDevicePublicKey), possible_advertisements[0]); } TEST_F(CryptAuthForegroundEidGeneratorTest, GeneratePossibleAdvertisements_NoAdvertisements_SeedsTooFarInFuture) { SetTestTime(kDefaultCurrentPeriodStart - kEidSeedPeriod.InMilliseconds() - kEidPeriod.InMilliseconds() - 1); std::vector<std::string> possible_advertisements = eid_generator_->GeneratePossibleAdvertisements( kDefaultAdvertisingDevicePublicKey, scanning_device_beacon_seeds_); EXPECT_TRUE(possible_advertisements.empty()); } TEST_F(CryptAuthForegroundEidGeneratorTest, GeneratePossibleAdvertisements_OnlyPastPeriod) { SetTestTime(kDefaultCurrentPeriodStart + 3 * kEidSeedPeriod.InMilliseconds() + kEidPeriod.InMilliseconds()); std::vector<std::string> possible_advertisements = eid_generator_->GeneratePossibleAdvertisements( kDefaultAdvertisingDevicePublicKey, scanning_device_beacon_seeds_); EXPECT_EQ((size_t)1, possible_advertisements.size()); EXPECT_EQ(GenerateFakeAdvertisement(kFourthSeed, kDefaultCurrentPeriodStart + 3 * kEidSeedPeriod.InMilliseconds() - kEidPeriod.InMilliseconds(), kDefaultAdvertisingDevicePublicKey), possible_advertisements[0]); } TEST_F(CryptAuthForegroundEidGeneratorTest, GeneratePossibleAdvertisements_NoAdvertisements_SeedsTooFarInPast) { SetTestTime(kDefaultCurrentPeriodStart + 3 * kEidSeedPeriod.InMilliseconds() + kEidPeriod.InMilliseconds() + 1); std::vector<std::string> possible_advertisements = eid_generator_->GeneratePossibleAdvertisements( kDefaultAdvertisingDevicePublicKey, scanning_device_beacon_seeds_); EXPECT_TRUE(possible_advertisements.empty()); } TEST_F(CryptAuthForegroundEidGeneratorTest, GeneratePossibleAdvertisements_NoAdvertisements_EmptySeeds) { SetTestTime(kDefaultCurrentPeriodStart); std::vector<BeaconSeed> empty; std::vector<std::string> possible_advertisements = eid_generator_->GeneratePossibleAdvertisements( kDefaultAdvertisingDevicePublicKey, empty); EXPECT_TRUE(possible_advertisements.empty()); } TEST_F(CryptAuthForegroundEidGeneratorTest, IdentifyRemoteDevice_NoDevices) { SetTestTime(kDefaultCurrentPeriodStart); std::string service_data = GenerateFakeAdvertisement(kSecondSeed, kDefaultCurrentPeriodStart, kDefaultAdvertisingDevicePublicKey); std::vector<std::string> device_id_list; const std::string identified_device_id = eid_generator_->IdentifyRemoteDeviceByAdvertisement( service_data, device_id_list, scanning_device_beacon_seeds_); EXPECT_TRUE(identified_device_id.empty()); } TEST_F(CryptAuthForegroundEidGeneratorTest, IdentifyRemoteDevice_OneDevice_Success) { SetTestTime(kDefaultCurrentPeriodStart); std::string service_data = GenerateFakeAdvertisement(kSecondSeed, kDefaultCurrentPeriodStart, kDefaultAdvertisingDevicePublicKey); std::string device_id = RemoteDeviceRef::GenerateDeviceId(kDefaultAdvertisingDevicePublicKey); std::vector<std::string> device_id_list = {device_id}; std::string identified_device_id = eid_generator_->IdentifyRemoteDeviceByAdvertisement( service_data, device_id_list, scanning_device_beacon_seeds_); EXPECT_EQ(device_id, identified_device_id); } TEST_F(CryptAuthForegroundEidGeneratorTest, IdentifyRemoteDevice_OneDevice_ServiceDataWithOneByteFlag_Success) { SetTestTime(kDefaultCurrentPeriodStart); std::string service_data = GenerateFakeAdvertisement(kSecondSeed, kDefaultCurrentPeriodStart, kDefaultAdvertisingDevicePublicKey); // Identifying device should still succeed if there is an extra "flag" byte // after the first 4 bytes. service_data.append( 1, static_cast<char>(ForegroundEidGenerator::kBluetooth4Flag)); std::string device_id = RemoteDeviceRef::GenerateDeviceId(kDefaultAdvertisingDevicePublicKey); std::vector<std::string> device_id_list = {device_id}; std::string identified_device_id = eid_generator_->IdentifyRemoteDeviceByAdvertisement( service_data, device_id_list, scanning_device_beacon_seeds_); EXPECT_EQ(device_id, identified_device_id); } TEST_F(CryptAuthForegroundEidGeneratorTest, IdentifyRemoteDevice_OneDevice_ServiceDataWithLongerFlag_Success) { SetTestTime(kDefaultCurrentPeriodStart); std::string service_data = GenerateFakeAdvertisement(kSecondSeed, kDefaultCurrentPeriodStart, kDefaultAdvertisingDevicePublicKey); // Identifying device should still succeed if there are extra "flag" bytes // after the first 4 bytes. service_data.append("extra_flag_bytes"); std::string device_id = RemoteDeviceRef::GenerateDeviceId(kDefaultAdvertisingDevicePublicKey); std::vector<std::string> device_id_list = {device_id}; std::string identified_device_id = eid_generator_->IdentifyRemoteDeviceByAdvertisement( service_data, device_id_list, scanning_device_beacon_seeds_); EXPECT_EQ(device_id, identified_device_id); } TEST_F(CryptAuthForegroundEidGeneratorTest, IdentifyRemoteDevice_OneDevice_Failure) { SetTestTime(kDefaultCurrentPeriodStart); std::string service_data = GenerateFakeAdvertisement(kSecondSeed, kDefaultCurrentPeriodStart, kDefaultAdvertisingDevicePublicKey); std::vector<std::string> device_id_list = {"wrongDeviceId"}; std::string identified_device_id = eid_generator_->IdentifyRemoteDeviceByAdvertisement( service_data, device_id_list, scanning_device_beacon_seeds_); EXPECT_TRUE(identified_device_id.empty()); } TEST_F(CryptAuthForegroundEidGeneratorTest, IdentifyRemoteDevice_MultipleDevices_Success) { SetTestTime(kDefaultCurrentPeriodStart); std::string service_data = GenerateFakeAdvertisement(kSecondSeed, kDefaultCurrentPeriodStart, kDefaultAdvertisingDevicePublicKey); std::string device_id = RemoteDeviceRef::GenerateDeviceId(kDefaultAdvertisingDevicePublicKey); std::vector<std::string> device_id_list = {device_id, "wrongDeviceId"}; std::string identified_device_id = eid_generator_->IdentifyRemoteDeviceByAdvertisement( service_data, device_id_list, scanning_device_beacon_seeds_); EXPECT_EQ(device_id, identified_device_id); } TEST_F(CryptAuthForegroundEidGeneratorTest, IdentifyRemoteDevice_MultipleDevices_Failure) { SetTestTime(kDefaultCurrentPeriodStart); std::string service_data = GenerateFakeAdvertisement(kSecondSeed, kDefaultCurrentPeriodStart, kDefaultAdvertisingDevicePublicKey); std::vector<std::string> device_id_list = {"wrongDeviceId", "wrongDeviceId"}; std::string identified_device_id = eid_generator_->IdentifyRemoteDeviceByAdvertisement( service_data, device_id_list, scanning_device_beacon_seeds_); EXPECT_TRUE(identified_device_id.empty()); } TEST_F(CryptAuthForegroundEidGeneratorTest, DataWithTimestamp_ContainsTime) { DataWithTimestamp data_with_timestamp("data", /* start */ 1000L, /* end */ 2000L); EXPECT_FALSE(data_with_timestamp.ContainsTime(999L)); EXPECT_TRUE(data_with_timestamp.ContainsTime(1000L)); EXPECT_TRUE(data_with_timestamp.ContainsTime(1500L)); EXPECT_TRUE(data_with_timestamp.ContainsTime(1999L)); EXPECT_FALSE(data_with_timestamp.ContainsTime(2000L)); } } // namespace cryptauth
// // Created by Jiang Lu on 6/3/15. // #include <cassert> #include "dukv8/function.h" #include "dukv8/isolate.h" #include "dukv8/string.h" #include "dukv8/integer.h" #include "dukv8/function_template.h" #include "dukv8/duk_stack_scope.h" #include "dukv8/local.h" #include "dukv8/arguments.h" namespace v8 { RTTI_IMPLEMENT(v8::Function, v8::Object); Function *Function::Init(DukContextRef duk_ctx, void *heap_ptr) { Object::Init(duk_ctx); DUK_STACK_SCOPE(duk_ctx); if (heap_ptr) { duk_obj_heapptr_ = heap_ptr; duk_obj_index_ = DukObjectRetain(duk_obj_heapptr_); } return this; } Function *Function::Cast(Value *value) { if (RTTI_IsKindOf(Function, value)) { return RTTI_StaticCast(Function, value); } if (value) { if (value->IsFunction()) { DukContextRef ctx = value->GetDukContext(); DUK_STACK_SCOPE(ctx); value->Push(); return (new Function)->Init(ctx, duk_get_heapptr(ctx, -1)); } } return NULL; } Local<Object> Function::NewInstance() const { TODO(); return Local<Object>(); } Local<Object> Function::NewInstance(int argc, Handle<Value> argv[]) const { TODO(); return Local<Object>(); } Local<Value> Function::Call(Handle<Object> that, int argc, Handle<Value> argv[]) { if (!function_template_.IsEmpty() && function_template_->callback_) { // C++ 函数 Arguments args(Isolate::GetCurrent(), Handle<Function>(this), that, that, function_template_->data_, argc, argv, false); // TODO: holder? Handle<Value> result = function_template_->callback_(args); return Local<Value>::New(result); } else { // JavaScript 函数 Push(); assert(duk_is_function(duk_ctx_, -1)); for (int i = 0; i < argc; i++) { argv[i]->Push(); } duk_int_t rc = duk_pcall(duk_ctx_, argc); if (rc == DUK_EXEC_SUCCESS) { return Local<Value>::New(Value::FromStack(duk_ctx_, -1)); } else { printf("error: %s\n", duk_to_string(duk_ctx_, -1)); return Local<Value>(); } } } void Function::SetName(Handle<String> name) { name_ = name; } Handle<Value> Function::GetName() const { return name_; } }
//Name: Aria Avazkhani //Student ID: 134465160 #include <iostream> #include "tools.h" using namespace std; namespace sict { // displays the user interface menu int menu() { cout << "1- Number of Samples" << endl; cout << "2- Sample Entry" << endl; cout << "3- Draw Graph" << endl; cout << "0- Exit" << endl; cout << "> "; return getInt(0, 3); } // Performs a fool-proof integer entry int getInt(int min, int max) { int val; bool done = false; while (!done) { cin >> val; if (cin.fail()) { cin.clear(); cout << "Invalid Integer, try again: "; } else { if (val < min || val > max) { cout << "Invalid value!" << endl << "Enter a value between " << min << " and " << max << ": "; } else { done = true; } } cin.ignore(1000, '\n'); } return val; } }
//===-------------------- ONNXOps.hpp - ONNX Operations -------------------===// // // Copyright 2019 The IBM Research Authors. // // ============================================================================= // // This file defines ONNX operations in the MLIR operation set. // //===----------------------------------------------------------------------===// #pragma once #include <map> #include <string> #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/Builders.h" #include "mlir/IR/Dialect.h" #include "mlir/IR/OpDefinition.h" #include "mlir/IR/StandardTypes.h" #include "src/Interface/PromotableConstOperandsOpInterface.hpp" #include "src/Interface/ResultTypeInferenceOpInterface.hpp" #include "src/Interface/ShapeInferenceInterface.hpp" #include "ONNXOpsHelper.hpp" namespace mlir { class ONNXOpsDialect : public Dialect { public: ONNXOpsDialect(MLIRContext *context); /// Parse an instance of a type registered to the onnx dialect. mlir::Type parseType(mlir::DialectAsmParser &parser) const override; /// Print an instance of a type registered to the onnx dialect. void printType( mlir::Type type, mlir::DialectAsmPrinter &printer) const override; /// Provide a utility accessor to the dialect namespace. This is used by /// several utilities for casting between dialects. static StringRef getDialectNamespace() { return "onnx"; } }; /// Include the auto-generated header file containing the declarations of the /// ONNX operations. #define GET_OP_CLASSES #include "src/Dialect/ONNX/ONNXOps.hpp.inc" // The namespace onnxmlir is experimental. // onnx_mlir has been used in KRNL. Other candidates are onnxops, onnxdialect. // Should this namesapce for onnx mlir project or ONNXOp dialect? // Or we need two namespace? // Will put all the ONNXOps into this namespace namespace onnxmlir { class StringType : public mlir::Type::TypeBase<StringType, mlir::Type, mlir::TypeStorage> { public: using Base::Base; static StringType get(MLIRContext *ctx) { return Base::get(ctx); } }; namespace detail { struct SeqTypeStorage; } // namespace detail class SeqType : public mlir::Type::TypeBase<SeqType, mlir::Type, detail::SeqTypeStorage> { public: using Base::Base; static SeqType get(llvm::ArrayRef<mlir::Type> elementTypes); llvm::ArrayRef<mlir::Type> getElementTypes(); mlir::Type getElementType(); size_t getNumElementTypes() { return getElementTypes().size(); } }; } // end namespace onnxmlir } // end namespace mlir namespace onnx_mlir {}
/* * Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Apple Inc. All * rights reserved. * Copyright (C) 2010 Google Inc. All rights reserved. * Copyright (C) 2012 Samsung Electronics. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. * */ #include "third_party/blink/renderer/core/html/forms/image_input_type.h" #include "third_party/blink/renderer/core/css/style_change_reason.h" #include "third_party/blink/renderer/core/dom/shadow_root.h" #include "third_party/blink/renderer/core/events/mouse_event.h" #include "third_party/blink/renderer/core/frame/web_feature.h" #include "third_party/blink/renderer/core/html/forms/form_data.h" #include "third_party/blink/renderer/core/html/forms/html_form_element.h" #include "third_party/blink/renderer/core/html/forms/html_input_element.h" #include "third_party/blink/renderer/core/html/html_image_fallback_helper.h" #include "third_party/blink/renderer/core/html/html_image_loader.h" #include "third_party/blink/renderer/core/html/parser/html_parser_idioms.h" #include "third_party/blink/renderer/core/html_names.h" #include "third_party/blink/renderer/core/input_type_names.h" #include "third_party/blink/renderer/core/layout/adjust_for_absolute_zoom.h" #include "third_party/blink/renderer/core/layout/layout_block_flow.h" #include "third_party/blink/renderer/core/layout/layout_image.h" #include "third_party/blink/renderer/core/layout/layout_inline.h" #include "third_party/blink/renderer/core/layout/layout_object_factory.h" #include "third_party/blink/renderer/platform/wtf/text/string_builder.h" namespace blink { ImageInputType::ImageInputType(HTMLInputElement& element) : BaseButtonInputType(element), use_fallback_content_(false) {} void ImageInputType::CountUsage() { CountUsageIfVisible(WebFeature::kInputTypeImage); } const AtomicString& ImageInputType::FormControlType() const { return input_type_names::kImage; } bool ImageInputType::IsFormDataAppendable() const { return true; } void ImageInputType::AppendToFormData(FormData& form_data) const { if (!GetElement().IsActivatedSubmit()) return; const AtomicString& name = GetElement().GetName(); if (name.IsEmpty()) { form_data.AppendFromElement("x", click_location_.X()); form_data.AppendFromElement("y", click_location_.Y()); return; } DEFINE_STATIC_LOCAL(String, dot_x_string, (".x")); DEFINE_STATIC_LOCAL(String, dot_y_string, (".y")); form_data.AppendFromElement(name + dot_x_string, click_location_.X()); form_data.AppendFromElement(name + dot_y_string, click_location_.Y()); } String ImageInputType::ResultForDialogSubmit() const { StringBuilder result; result.AppendNumber(click_location_.X()); result.Append(','); result.AppendNumber(click_location_.Y()); return result.ToString(); } bool ImageInputType::SupportsValidation() const { return false; } static IntPoint ExtractClickLocation(const Event& event) { const auto* mouse_event = DynamicTo<MouseEvent>(event.UnderlyingEvent()); if (!event.UnderlyingEvent() || !mouse_event) return IntPoint(); if (!mouse_event->HasPosition()) return IntPoint(); return IntPoint(mouse_event->offsetX(), mouse_event->offsetY()); } void ImageInputType::HandleDOMActivateEvent(Event& event) { if (GetElement().IsDisabledFormControl() || !GetElement().Form()) return; click_location_ = ExtractClickLocation(event); // Event handlers can run. GetElement().Form()->PrepareForSubmission(&event, &GetElement()); event.SetDefaultHandled(); } LayoutObject* ImageInputType::CreateLayoutObject(const ComputedStyle& style, LegacyLayout legacy) const { if (use_fallback_content_) { if (style.Display() == EDisplay::kInline) return new LayoutInline(&GetElement()); return LayoutObjectFactory::CreateBlockFlow(GetElement(), style, legacy); } LayoutImage* image = new LayoutImage(&GetElement()); image->SetImageResource(MakeGarbageCollected<LayoutImageResource>()); return image; } void ImageInputType::AltAttributeChanged() { if (GetElement().UserAgentShadowRoot()) { Element* text = GetElement().UserAgentShadowRoot()->getElementById("alttext"); String value = GetElement().AltText(); if (text && text->textContent() != value) text->setTextContent(GetElement().AltText()); } } void ImageInputType::SrcAttributeChanged() { if (!GetElement().GetLayoutObject()) return; GetElement().EnsureImageLoader().UpdateFromElement( ImageLoader::kUpdateIgnorePreviousError); } void ImageInputType::ValueAttributeChanged() { if (use_fallback_content_) return; BaseButtonInputType::ValueAttributeChanged(); } void ImageInputType::OnAttachWithLayoutObject() { LayoutObject* layout_object = GetElement().GetLayoutObject(); DCHECK(layout_object); if (!layout_object->IsLayoutImage()) return; HTMLImageLoader& image_loader = GetElement().EnsureImageLoader(); image_loader.UpdateFromElement(); LayoutImageResource* image_resource = To<LayoutImage>(layout_object)->ImageResource(); image_resource->SetImageResource(image_loader.GetContent()); } bool ImageInputType::ShouldRespectAlignAttribute() { return true; } bool ImageInputType::CanBeSuccessfulSubmitButton() { return true; } bool ImageInputType::IsEnumeratable() { return false; } bool ImageInputType::ShouldRespectHeightAndWidthAttributes() { return true; } unsigned ImageInputType::Height() const { if (!GetElement().GetLayoutObject()) { // Check the attribute first for an explicit pixel value. unsigned height; if (ParseHTMLNonNegativeInteger( GetElement().FastGetAttribute(html_names::kHeightAttr), height)) return height; // If the image is available, use its height. HTMLImageLoader* image_loader = GetElement().ImageLoader(); if (image_loader && image_loader->GetContent()) { return image_loader->GetContent() ->IntrinsicSize(kRespectImageOrientation) .Height(); } } GetElement().GetDocument().UpdateStyleAndLayout( DocumentUpdateReason::kJavaScript); LayoutBox* box = GetElement().GetLayoutBox(); return box ? AdjustForAbsoluteZoom::AdjustInt(box->ContentHeight().ToInt(), box) : 0; } unsigned ImageInputType::Width() const { if (!GetElement().GetLayoutObject()) { // Check the attribute first for an explicit pixel value. unsigned width; if (ParseHTMLNonNegativeInteger( GetElement().FastGetAttribute(html_names::kWidthAttr), width)) return width; // If the image is available, use its width. HTMLImageLoader* image_loader = GetElement().ImageLoader(); if (image_loader && image_loader->GetContent()) { return image_loader->GetContent() ->IntrinsicSize(kRespectImageOrientation) .Width(); } } GetElement().GetDocument().UpdateStyleAndLayout( DocumentUpdateReason::kJavaScript); LayoutBox* box = GetElement().GetLayoutBox(); return box ? AdjustForAbsoluteZoom::AdjustInt(box->ContentWidth().ToInt(), box) : 0; } bool ImageInputType::HasLegalLinkAttribute(const QualifiedName& name) const { return name == html_names::kSrcAttr || BaseButtonInputType::HasLegalLinkAttribute(name); } const QualifiedName& ImageInputType::SubResourceAttributeName() const { return html_names::kSrcAttr; } void ImageInputType::EnsureFallbackContent() { if (use_fallback_content_) return; SetUseFallbackContent(); ReattachFallbackContent(); } void ImageInputType::SetUseFallbackContent() { if (use_fallback_content_) return; use_fallback_content_ = true; if (GetElement().GetDocument().InStyleRecalc()) return; if (ShadowRoot* root = GetElement().UserAgentShadowRoot()) root->RemoveChildren(); CreateShadowSubtree(); } void ImageInputType::EnsurePrimaryContent() { if (!use_fallback_content_) return; use_fallback_content_ = false; if (ShadowRoot* root = GetElement().UserAgentShadowRoot()) root->RemoveChildren(); CreateShadowSubtree(); ReattachFallbackContent(); } void ImageInputType::ReattachFallbackContent() { if (!GetElement().GetDocument().InStyleRecalc()) { // ComputedStyle depends on use_fallback_content_. Trigger recalc. GetElement().SetNeedsStyleRecalc( kLocalStyleChange, StyleChangeReasonForTracing::Create(style_change_reason::kUseFallback)); // LayoutObject type depends on use_fallback_content_. Trigger re-attach. GetElement().SetForceReattachLayoutTree(); } } void ImageInputType::CreateShadowSubtree() { if (!use_fallback_content_) { BaseButtonInputType::CreateShadowSubtree(); return; } HTMLImageFallbackHelper::CreateAltTextShadowTree(GetElement()); } void ImageInputType::CustomStyleForLayoutObject(ComputedStyle& style) { if (use_fallback_content_) HTMLImageFallbackHelper::CustomStyleForAltText(GetElement(), style); } } // namespace blink
#include <drivers/Joystick.h> #include <hardware/HwJoystick.h> Joystick::Joystick() : _buttonStateOld(0x00), _buttonPressed(0x00) { } Joystick::~Joystick() { } void Joystick::setup() { hwJoystickSetup(); } /* Must be called at the beginning of every frame */ void Joystick::update() { /* Sample button state to prevent abnormalities */ ButtonStateType buttonStateCurrent = hwJoystickState(); /* Check whether some button has been pressed */ _buttonPressed = (_buttonStateOld ^ buttonStateCurrent) & (buttonStateCurrent ^ 0x00); /* Update old button values */ _buttonStateOld = buttonStateCurrent; } bool Joystick::held(JoystickButton button) { return (hwJoystickState() & (1 << button)); } bool Joystick::pressed(JoystickButton button) { return (_buttonPressed & (1 << button)); }
#include <ros/ros.h> #include <move_base_msgs/MoveBaseAction.h> #include <actionlib/client/simple_action_client.h> #include <tf/transform_broadcaster.h> #include <sstream> typedef actionlib::SimpleActionClient<move_base_msgs::MoveBaseAction> MoveBaseClient; int main(int argc, char** argv){ ros::init(argc, argv, "navigation_goals"); MoveBaseClient ac("move_base", true); while(!ac.waitForServer(ros::Duration(5.0))){ ROS_INFO("Waiting for the move_base action server"); } move_base_msgs::MoveBaseGoal goal; goal.target_pose.header.frame_id = "map"; goal.target_pose.header.stamp = ros::Time::now(); goal.target_pose.pose.position.x = 1.0; goal.target_pose.pose.position.y = 1.0; goal.target_pose.pose.orientation.w = 1.0; ROS_INFO("Sending goal"); ac.sendGoal(goal); ac.waitForResult(); if(ac.getState() == actionlib::SimpleClientGoalState::SUCCEEDED) ROS_INFO("You have arrived to the goal position"); else{ ROS_INFO("The base failed for some reason"); } return 0; }
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef __POSIX_ISOLATOR_HPP__ #define __POSIX_ISOLATOR_HPP__ #include <process/future.hpp> #include <process/id.hpp> #include <stout/hashmap.hpp> #include <stout/os.hpp> #include <stout/os/pstree.hpp> #include "slave/flags.hpp" #include "slave/containerizer/mesos/isolator.hpp" #include "usage/usage.hpp" namespace mesos { namespace internal { namespace slave { // A basic MesosIsolatorProcess that keeps track of the pid but // doesn't do any resource isolation. Subclasses must implement // usage() for their appropriate resource(s). class PosixIsolatorProcess : public MesosIsolatorProcess { public: virtual process::Future<Nothing> recover( const std::list<mesos::slave::ContainerState>& state, const hashset<ContainerID>& orphans) { foreach (const mesos::slave::ContainerState& run, state) { // This should (almost) never occur: see comment in // SubprocessLauncher::recover(). if (pids.contains(run.container_id())) { return process::Failure("Container already recovered"); } pids.put(run.container_id(), static_cast<pid_t>(run.pid())); process::Owned<process::Promise<mesos::slave::ContainerLimitation>> promise(new process::Promise<mesos::slave::ContainerLimitation>()); promises.put(run.container_id(), promise); } return Nothing(); } virtual process::Future<Option<mesos::slave::ContainerLaunchInfo>> prepare( const ContainerID& containerId, const mesos::slave::ContainerConfig& containerConfig) { if (promises.contains(containerId)) { return process::Failure("Container " + stringify(containerId) + " has already been prepared"); } process::Owned<process::Promise<mesos::slave::ContainerLimitation>> promise( new process::Promise<mesos::slave::ContainerLimitation>()); promises.put(containerId, promise); return None(); } virtual process::Future<Nothing> isolate( const ContainerID& containerId, pid_t pid) { if (!promises.contains(containerId)) { return process::Failure("Unknown container: " + stringify(containerId)); } pids.put(containerId, pid); return Nothing(); } virtual process::Future<mesos::slave::ContainerLimitation> watch( const ContainerID& containerId) { if (!promises.contains(containerId)) { return process::Failure("Unknown container: " + stringify(containerId)); } return promises[containerId]->future(); } virtual process::Future<Nothing> update( const ContainerID& containerId, const Resources& resources) { if (!promises.contains(containerId)) { return process::Failure("Unknown container: " + stringify(containerId)); } // No resources are actually isolated so nothing to do. return Nothing(); } virtual process::Future<Nothing> cleanup(const ContainerID& containerId) { if (!promises.contains(containerId)) { VLOG(1) << "Ignoring cleanup request for unknown container " << containerId; return Nothing(); } // TODO(idownes): We should discard the container's promise here to signal // to anyone that holds the future from watch(). promises.erase(containerId); pids.erase(containerId); return Nothing(); } protected: hashmap<ContainerID, pid_t> pids; hashmap<ContainerID, process::Owned<process::Promise<mesos::slave::ContainerLimitation>>> promises; }; class PosixCpuIsolatorProcess : public PosixIsolatorProcess { public: static Try<mesos::slave::Isolator*> create(const Flags& flags) { process::Owned<MesosIsolatorProcess> process( new PosixCpuIsolatorProcess()); return new MesosIsolator(process); } virtual process::Future<ResourceStatistics> usage( const ContainerID& containerId) { if (!pids.contains(containerId)) { LOG(WARNING) << "No resource usage for unknown container '" << containerId << "'"; return ResourceStatistics(); } // Use 'mesos-usage' but only request 'cpus_' values. Try<ResourceStatistics> usage = mesos::internal::usage(pids.get(containerId).get(), false, true); if (usage.isError()) { return process::Failure(usage.error()); } return usage.get(); } protected: PosixCpuIsolatorProcess() : ProcessBase(process::ID::generate("posix-cpu-isolator")) {} }; class PosixMemIsolatorProcess : public PosixIsolatorProcess { public: static Try<mesos::slave::Isolator*> create(const Flags& flags) { process::Owned<MesosIsolatorProcess> process( new PosixMemIsolatorProcess()); return new MesosIsolator(process); } virtual process::Future<ResourceStatistics> usage( const ContainerID& containerId) { if (!pids.contains(containerId)) { LOG(WARNING) << "No resource usage for unknown container '" << containerId << "'"; return ResourceStatistics(); } // Use 'mesos-usage' but only request 'mem_' values. Try<ResourceStatistics> usage = mesos::internal::usage(pids.get(containerId).get(), true, false); if (usage.isError()) { return process::Failure(usage.error()); } return usage.get(); } protected: PosixMemIsolatorProcess() : ProcessBase(process::ID::generate("posix-mem-isolator")) {} }; } // namespace slave { } // namespace internal { } // namespace mesos { #endif // __POSIX_ISOLATOR_HPP__
#pragma once #include <Register/Utility.hpp> namespace Kvasir { //Registers group namespace I2cI2con{ ///<I2C Control Register using Addr = Register::Address<0x40020000,0xffffff0b,0x00000000,unsigned>; ///Assert Acknowledge control bit. When AA=1 prior to address or data received, an acknowledged (low level to SDA) will be returned during the acknowledge clock pulse on the SCL line when 1.) A slave is acknowledging the address sent from master, 2.) The receiver devices are acknowledging the data sent by transmitter. When AA=0 prior to address or data received, a Not acknowledged (high level to SDA) will be returned during the acknowledge clock pulse on the SCL line. constexpr Register::FieldLocation<Addr,Register::maskFromRange(2,2),Register::ReadWriteAccess,unsigned> aa{}; ///I2C Interrupt Flag. When a new SIO state is present in the I2CSTATUS register, the SI flag is set by hardware, and if bit EI (I2CON [7]) is set, the I2C interrupt is requested. SI must be cleared by software. Clear SI is by writing one to this bit. constexpr Register::FieldLocation<Addr,Register::maskFromRange(3,3),Register::Access<Register::AccessType::readWrite,Register::ReadActionType::normal,Register::ModifiedWriteValueType::oneToClear>,unsigned> si{}; ///I2C STOP Flag. In master mode, setting STO to transmit a STOP condition to bus then I2C hardware will check the bus condition if a STOP condition is detected this flag will be cleared by hardware automatically.In a slave mode, setting STO resets I2C hardware to the defined "not addressed" slave mode. This means it is NO LONGER in the slave receiver mode to receive data from the master transmit device. constexpr Register::FieldLocation<Addr,Register::maskFromRange(4,4),Register::ReadWriteAccess,unsigned> sto{}; ///I2C START Flag. Setting STA to logic 1 to enter master mode, the I2C hardware sends a START or repeat START condition to bus when the bus is free. constexpr Register::FieldLocation<Addr,Register::maskFromRange(5,5),Register::ReadWriteAccess,unsigned> sta{}; ///I2C controller is enabled/disable1 = Enable0 = DisableSet to enable I2C serial function block. When ENS=1 the I2C serial function enables. The multi-function pin function of SDA and SCL must set to I2C function first. constexpr Register::FieldLocation<Addr,Register::maskFromRange(6,6),Register::ReadWriteAccess,unsigned> ensi{}; ///Enable interrupt. 1 = Enable I2C interrupt.0 = Disable I2C interrupt. constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,7),Register::ReadWriteAccess,unsigned> ei{}; } namespace I2cI2caddr0{ ///<I2C slave Address Register0 using Addr = Register::Address<0x40020004,0xffffff00,0x00000000,unsigned>; ///General Call Function0 = Disable General Call Function.1 = Enable General Call Function. constexpr Register::FieldLocation<Addr,Register::maskFromRange(0,0),Register::ReadWriteAccess,unsigned> gc{}; ///I2C Address RegisterThe content of this register is irrelevant when I2C is in master mode. In the slave mode, the seven most significant bits must be loaded with the MCU's own address. The I2C hardware will react if either of the address is matched. constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,1),Register::ReadWriteAccess,unsigned> i2caddr{}; } namespace I2cI2cdat{ ///<I2C DATA Register using Addr = Register::Address<0x40020008,0xffffff00,0x00000000,unsigned>; ///I2C Data RegisterBit[7:0] is located with the 8-bit transferred data of I2C serial port. constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,0),Register::ReadWriteAccess,unsigned> i2cdat{}; } namespace I2cI2cstatus{ ///<I2C Status Register using Addr = Register::Address<0x4002000c,0xffffff00,0x00000000,unsigned>; ///I2C Status Register The status register of I2C:The three least significant bits are always 0. The five most significant bits contain the status code. There are 26 possible status codes. When I2STATUS contains F8H, no serial interrupt is requested. All other I2STATUS values correspond to defined I2C states. When each of these states is entered, a status interrupt is requested (SI = 1). A valid status code is present in I2STATUS one machine cycle after SI is set by hardware and is still present one machine cycle after SI has been reset by software. In addition, states 00H stands for a Bus Error. A Bus Error occurs when a START or STOP condition is present at an illegal position in the formation frame. Example of illegal position are during the serial transfer of an address byte, a data byte or an acknowledge bit. constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,0),Register::Access<Register::AccessType::readOnly,Register::ReadActionType::normal,Register::ModifiedWriteValueType::normal>,unsigned> i2cstatus{}; } namespace I2cI2clk{ ///<I2C clock divided Register using Addr = Register::Address<0x40020010,0xffffff00,0x00000000,unsigned>; ///I2C clock divided RegisterThe I2C clock rate bits: Data Baud Rate of I2C = PCLK /(4x(I2CLK+1)). constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,0),Register::ReadWriteAccess,unsigned> i2clk{}; } namespace I2cI2ctoc{ ///<I2C Time out control Register using Addr = Register::Address<0x40020014,0xfffffff9,0x00000000,unsigned>; ///Time-Out flag. 1 = Time-Out falg is set by H/W. It can interrupt CPU.0 = S/W can clear the flag. constexpr Register::FieldLocation<Addr,Register::maskFromRange(0,0),Register::Access<Register::AccessType::readWrite,Register::ReadActionType::normal,Register::ModifiedWriteValueType::oneToClear>,unsigned> tif{}; ///Time-Out counter input clock is divider by 4 1 = Enable0 = DisableWhen Enable, The time-Out period is prolong 4 times. constexpr Register::FieldLocation<Addr,Register::maskFromRange(1,1),Register::ReadWriteAccess,unsigned> div4{}; ///Time-out counter is enabled/disable1 = Enable0 = DisableWhen Enable, the 14 bit time-out counter will start counting when SI is clear. Setting flag SI to high will reset counter and re-start up counting after SI is cleared. constexpr Register::FieldLocation<Addr,Register::maskFromRange(2,2),Register::ReadWriteAccess,unsigned> enti{}; } namespace I2cI2caddr1{ ///<I2C slave Address Register1 using Addr = Register::Address<0x40020018,0xffffff00,0x00000000,unsigned>; ///General Call Function0 = Disable General Call Function.1 = Enable General Call Function. constexpr Register::FieldLocation<Addr,Register::maskFromRange(0,0),Register::ReadWriteAccess,unsigned> gc{}; ///I2C Address RegisterThe content of this register is irrelevant when I2C is in master mode. In the slave mode, the seven most significant bits must be loaded with the MCU's own address. The I2C hardware will react if either of the address is matched. constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,1),Register::ReadWriteAccess,unsigned> i2caddr{}; } namespace I2cI2caddr2{ ///<I2C slave Address Register2 using Addr = Register::Address<0x4002001c,0xffffff00,0x00000000,unsigned>; ///General Call Function0 = Disable General Call Function.1 = Enable General Call Function. constexpr Register::FieldLocation<Addr,Register::maskFromRange(0,0),Register::ReadWriteAccess,unsigned> gc{}; ///I2C Address RegisterThe content of this register is irrelevant when I2C is in master mode. In the slave mode, the seven most significant bits must be loaded with the MCU's own address. The I2C hardware will react if either of the address is matched. constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,1),Register::ReadWriteAccess,unsigned> i2caddr{}; } namespace I2cI2caddr3{ ///<I2C slave Address Register3 using Addr = Register::Address<0x40020020,0xffffff00,0x00000000,unsigned>; ///General Call Function0 = Disable General Call Function.1 = Enable General Call Function. constexpr Register::FieldLocation<Addr,Register::maskFromRange(0,0),Register::ReadWriteAccess,unsigned> gc{}; ///I2C Address RegisterThe content of this register is irrelevant when I2C is in master mode. In the slave mode, the seven most significant bits must be loaded with the MCU's own address. The I2C hardware will react if either of the address is matched. constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,1),Register::ReadWriteAccess,unsigned> i2caddr{}; } namespace I2cI2cadm0{ ///<I2C Slave address Mask Register0 using Addr = Register::Address<0x40020024,0xffffff01,0x00000000,unsigned>; ///I2C Address Mask register1 = Mask enable (the received corresponding address bit is don't care.)0 = Mask disable (the received corresponding register bit should be exact the same as address register.) I2C bus controllers support multiple address recognition with four address mask register. When the bit in the address mask register is set to one, it means the received corresponding address bit is don't-care. If the bit is set to zero, that means the received corresponding register bit should be exact the same as address register. constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,1),Register::ReadWriteAccess,unsigned> i2admx{}; } namespace I2cI2cadm1{ ///<I2C Slave address Mask Register1 using Addr = Register::Address<0x40020028,0xffffff01,0x00000000,unsigned>; ///I2C Address Mask register1 = Mask enable (the received corresponding address bit is don't care.)0 = Mask disable (the received corresponding register bit should be exact the same as address register.) I2C bus controllers support multiple address recognition with four address mask register. When the bit in the address mask register is set to one, it means the received corresponding address bit is don't-care. If the bit is set to zero, that means the received corresponding register bit should be exact the same as address register. constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,1),Register::ReadWriteAccess,unsigned> i2admx{}; } namespace I2cI2cadm2{ ///<I2C Slave address Mask Register2 using Addr = Register::Address<0x4002002c,0xffffff01,0x00000000,unsigned>; ///I2C Address Mask register1 = Mask enable (the received corresponding address bit is don't care.)0 = Mask disable (the received corresponding register bit should be exact the same as address register.) I2C bus controllers support multiple address recognition with four address mask register. When the bit in the address mask register is set to one, it means the received corresponding address bit is don't-care. If the bit is set to zero, that means the received corresponding register bit should be exact the same as address register. constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,1),Register::ReadWriteAccess,unsigned> i2admx{}; } namespace I2cI2cadm3{ ///<I2C Slave address Mask Register3 using Addr = Register::Address<0x40020030,0xffffff01,0x00000000,unsigned>; ///I2C Address Mask register1 = Mask enable (the received corresponding address bit is don't care.)0 = Mask disable (the received corresponding register bit should be exact the same as address register.) I2C bus controllers support multiple address recognition with four address mask register. When the bit in the address mask register is set to one, it means the received corresponding address bit is don't-care. If the bit is set to zero, that means the received corresponding register bit should be exact the same as address register. constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,1),Register::ReadWriteAccess,unsigned> i2admx{}; } }
#include <iostream> #include <SDL2/SDL_image.h> #include "Window.h" /* Window constructor uint w: window width uint h: window height std::string title: window title bool resize: is the window resizable */ Window::Window(uint w, uint h, std::string title, bool resize){ window = NULL; renderer = NULL; if(resize){ window = SDL_CreateWindow(title.c_str(), SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, w, h, SDL_WINDOW_RESIZABLE); }else{ window = SDL_CreateWindow(title.c_str(), SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, w, h, SDL_WINDOW_SHOWN); } if(window){ SDL_Surface *icon = IMG_Load(iconLocation.c_str()); SDL_SetWindowIcon(window, icon); SDL_FreeSurface(icon); renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_TARGETTEXTURE); if(renderer){ SDL_SetRenderDrawColor(renderer, 0x00, 0x00, 0x00, 0xFF); SDL_SetRenderDrawBlendMode(renderer, SDL_BLENDMODE_BLEND); }else{ std::cout << "Failed to create renderer. Given SDL error: " << SDL_GetError() << std::endl; } }else{ std::cout << "Failed to create window. Given SDL error: " << SDL_GetError() << std::endl; } width = w; height = h; windowDark = darkMode; windowAccent = *accent; event = NULL; activeControl = NULL; } /*get the width of the window*/ int Window::getWidth(){ int w; SDL_GetWindowSize(window, &w, NULL); return w; } /*get the height of the window*/ int Window::getHeight(){ int h; SDL_GetWindowSize(window, NULL, &h); return h; } /*get the title of the window*/ std::string Window::getTitle(){ return SDL_GetWindowTitle(window); } /*set the width of the window*/ void Window::setWidth(uint w){ SDL_SetWindowSize(window, w, getHeight()); } /*set the height of the window*/ void Window::setHeight(uint h){ SDL_SetWindowSize(window, getWidth(), h); } /*set the title of the window*/ bool Window::setTitle(std::string s){ SDL_SetWindowTitle(window, s.c_str()); return getTitle() == s; } /*tells if the window is ready to be used*/ bool Window::isReady(){ return window != NULL; } /*closes the window*/ void Window::close(){ SDL_DestroyWindow(window); window = NULL; } /*handle events generated by the window*/ void Window::handleEvents(){ /*if the window's theme doesn't match the program theme*/ if(windowDark != darkMode || windowAccent.getUint() != accent->getUint()){ for(int i=0;i < controls.size();i++){ controls.at(i)->updateTheme(); } windowDark = darkMode; windowAccent = *accent; } bool eventNeeded = true; if(event != NULL){ /*if the recent event can simple be cleared from the linked-list*/ if(event->getType() == e_KEYPRESS && event->getKeyState() == s_UP || event->getType() == e_SCROLL || event->getType() == e_TEXTENTRY){ Event *tail = event->getTail(); delete event; event = tail; } } /*poll events from the queue*/ SDL_Event e; while(SDL_PollEvent(&e)){ /*close the window if the quit event comes up*/ if(e.type == SDL_QUIT){ close(); }else if(e.type == SDL_WINDOWEVENT){ if((e.window.event == SDL_WINDOWEVENT_RESIZED || e.window.event == SDL_WINDOWEVENT_FOCUS_GAINED) && sys == WIN){ fullRedraw = true; } /*if window resize event*/ if(e.window.event == SDL_WINDOWEVENT_RESIZED){ for(int i=0;i<controls.size();i++){ Control *control = controls.at(i); if(control->getLeftLock() && control->getRightLock()){ bool noResize = true; if(control->getLockResize()){ noResize = !control->setWidth(control->getWidth() + (e.window.data1 - (int)width)); } if(noResize){ double ratio = (control->getX() + (double)(control->getWidth()/2)) / (double)width; control->setX((double)e.window.data1*ratio - (double)(control->getWidth()/2)); } }else if(control->getRightLock()){ control->setX(control->getX() + (e.window.data1 - (int)width)); } if(control->getTopLock() && control->getBottomLock()){ bool noResize = true; if(control->getLockResize()){ noResize = !control->setHeight(control->getHeight() + (e.window.data2 - (int)height)); } if(noResize){ double ratio = (control->getY() + (double)(control->getHeight()/2)) / (double)height; control->setY((double)e.window.data2*ratio - (double)(control->getHeight()/2)); } }else if(control->getBottomLock()){ control->setY(control->getY() + (e.window.data2 - (int)height)); } } width = e.window.data1; height = e.window.data2; } } if(event != NULL){ /*if the recent event can simple be cleared from the linked-list*/ if(event->getType() == e_KEYPRESS && event->getKeyState() == s_UP || event->getType() == e_SCROLL || event->getType() == e_TEXTENTRY){ Event *tail = event->getTail(); delete event; event = tail; } } /*Mouse Button Down Event*/ if(e.type == SDL_MOUSEBUTTONDOWN){ Event *hold = NULL; Event *holdPrev = NULL; Event *temp = event; Event *prev = NULL; while(temp){ if(temp->getType() == e_MOVE || temp->getType() == e_HOVER){ if(prev != NULL){ prev->setTail(temp->getTail()); delete temp; }else{ event = temp->getTail(); delete temp; } if(e.button.button == SDL_BUTTON_LEFT){ event = new Event(e.button.x, e.button.y, b_LEFT, s_DOWN, event); }else if(e.button.button == SDL_BUTTON_RIGHT){ event = new Event(e.button.x, e.button.y, b_RIGHT, s_DOWN, event); }else if(e.button.button == SDL_BUTTON_MIDDLE){ event = new Event(e.button.x, e.button.y, b_MIDDLE, s_DOWN, event); } break; }else if(temp->getType() == e_DRAG){ break; }else if(temp->getType() == e_HOLD){ hold = temp; holdPrev = prev; }else if(temp->getType() == e_MOUSEPRESS && temp->getMouseState() == s_DOWN){ if(temp->getControl() == NULL){ if(prev == hold && prev != NULL){ if(holdPrev != NULL){ holdPrev->setTail(temp->getTail()); delete hold; delete temp; }else{ event = temp->getTail(); delete hold; delete temp; } }else{ if(hold != NULL){ if(holdPrev != NULL){ holdPrev->setTail(hold->getTail()); delete hold; }else{ event = hold->getTail(); delete hold; } } if(prev != NULL){ prev->setTail(temp->getTail()); delete temp; }else{ event = temp->getTail(); delete temp; } } if(e.button.button == SDL_BUTTON_LEFT){ event = new Event(e.button.x, e.button.y, b_LEFT, s_DOWN, event); }else if(e.button.button == SDL_BUTTON_RIGHT){ event = new Event(e.button.x, e.button.y, b_RIGHT, s_DOWN, event); }else if(e.button.button == SDL_BUTTON_MIDDLE){ event = new Event(e.button.x, e.button.y, b_MIDDLE, s_DOWN, event); } break; }else{ break; } }else if(temp->getType() == e_CLICK){ if(e.button.timestamp - temp->getTimeStamp() > 500 || temp->getButton() == b_LEFT && e.button.button != SDL_BUTTON_LEFT || temp->getButton() == b_RIGHT && e.button.button != SDL_BUTTON_RIGHT || temp->getButton() == b_MIDDLE && e.button.button != SDL_BUTTON_MIDDLE){ if(prev != NULL){ prev->setTail(temp->getTail()); delete temp; }else{ event = temp->getTail(); delete temp; } if(e.button.button == SDL_BUTTON_LEFT){ event = new Event(e.button.x, e.button.y, b_LEFT, s_DOWN, event); }else if(e.button.button == SDL_BUTTON_RIGHT){ event = new Event(e.button.x, e.button.y, b_RIGHT, s_DOWN, event); }else if(e.button.button == SDL_BUTTON_MIDDLE){ event = new Event(e.button.x, e.button.y, b_MIDDLE, s_DOWN, event); } break; } } prev = temp; temp = temp->getTail(); } /*Mouse Button Up Event*/ }else if(e.type == SDL_MOUSEBUTTONUP){ Event *press = NULL; Event *click = NULL; Event *temp = event; Event *prev = NULL; while(temp){ if(temp->getType() == e_MOUSEPRESS && temp->getMouseState() == s_DOWN){ if(temp->getButton() == b_LEFT && e.button.button == SDL_BUTTON_LEFT || temp->getButton() == b_RIGHT && e.button.button == SDL_BUTTON_RIGHT || temp->getButton() == b_MIDDLE && e.button.button == SDL_BUTTON_MIDDLE){ press = temp; if(prev != NULL){ prev->setTail(temp->getTail()); }else{ event = temp->getTail(); } temp = temp->getTail(); continue; } }else if(temp->getType() == e_CLICK){ if(e.button.timestamp - temp->getTimeStamp() > 500){ if(prev != NULL){ prev->setTail(temp->getTail()); }else{ event = temp->getTail(); } Event *holder = temp->getTail(); delete temp; temp = holder; break; } if(press != NULL){ if(temp->getButton() == b_LEFT && e.button.button == SDL_BUTTON_LEFT || temp->getButton() == b_RIGHT && e.button.button == SDL_BUTTON_RIGHT || temp->getButton() == b_MIDDLE && e.button.button == SDL_BUTTON_MIDDLE){ click = temp; if(prev != NULL){ prev->setTail(temp->getTail()); }else{ event = temp->getTail(); } break; } }else{ break; } }else if(temp->getType() == e_HOLD || temp->getType() == e_DRAG){ if(prev != NULL){ prev->setTail(temp->getTail()); }else{ event = temp->getTail(); } Event *holder = temp->getTail(); delete temp; temp = holder; continue; } prev = temp; temp = temp->getTail(); } if(click){ if(click->getClick() == c_SINGLE){ event = new Event(e.button.x, e.button.y, click->getButton(), c_DOUBLE, event); }else if(click->getClick() == c_DOUBLE){ event = new Event(e.button.x, e.button.y, click->getButton(), c_TRIPLE, event); }else if(click->getClick() == c_TRIPLE){ event = new Event(e.button.x, e.button.y, click->getButton(), c_SINGLE, event); } delete click; delete press; }else if(press){ event = new Event(e.button.x, e.button.y, press->getButton(), c_SINGLE, event); delete press; }else{ if(e.button.button == SDL_BUTTON_LEFT){ event = new Event(e.button.x, e.button.y, b_LEFT, s_UP, event); }else if(e.button.button == SDL_BUTTON_RIGHT){ event = new Event(e.button.x, e.button.y, b_RIGHT, s_UP, event); }else if(e.button.button == SDL_BUTTON_MIDDLE){ event = new Event(e.button.x, e.button.y, b_MIDDLE, s_UP, event); } } /*Mouse Motion Event*/ }else if(e.type == SDL_MOUSEMOTION){ Event *temp = event; Event *prev = NULL; while(temp){ if(temp->getType() == e_MOVE || temp->getType() == e_HOVER){ if(prev != NULL){ prev->setTail(temp->getTail()); }else{ event = temp->getTail(); } event = new Event(e.motion.x, e.motion.y, temp->getX(), temp->getY(), event); delete temp; break; }else if(temp->getType() == e_DRAG){ if(prev != NULL){ prev->setTail(temp->getTail()); }else{ event = temp->getTail(); } event = new Event(e.motion.x, e.motion.y, temp->getX(), temp->getY(), temp->getButton(), event); delete temp; break; }else if(temp->getType() == e_HOLD){ if(prev != NULL){ prev->setTail(temp->getTail()); }else{ event = temp->getTail(); } Event *holder = temp->getTail(); delete temp; temp = holder; continue; }else if(temp->getType() == e_MOUSEPRESS && temp->getMouseState() == s_DOWN){ if(prev != NULL){ prev->setTail(temp->getTail()); }else{ event = temp->getTail(); } if(temp->getControl() == NULL){ event = new Event(e.motion.x, e.motion.y, temp->getX(), temp->getY(), event); }else{ event = new Event(e.motion.x, e.motion.y, temp->getX(), temp->getY(), temp->getButton(), event); } delete temp; break; } prev = temp; temp = temp->getTail(); } /*Mouse Wheel Event*/ }else if(e.type == SDL_MOUSEWHEEL){ int x; int y; uint state = SDL_GetMouseState(&x, &y); EVENT_Modifier mod = m_NONE; if(SDL_GetModState() & KMOD_SHIFT){ mod = m_SHIFT; } if(e.wheel.y > 0){ event = new Event(x, y, w_UP, mod, event); }else if(e.wheel.y < 0){ event = new Event(x, y, w_DOWN, mod, event); }else if(e.wheel.x > 0){ event = new Event(x, y, w_LEFT, mod, event); }else if(e.wheel.x < 0){ event = new Event(x, y, w_RIGHT, mod, event); } /*Text Input Event*/ }else if(e.type == SDL_TEXTINPUT){ event = new Event(e.text.text, event); /*Key Down Event*/ }else if(e.type == SDL_KEYDOWN){ if(SDL_GetModState() & KMOD_SHIFT){ event = new Event(e.key.keysym.sym, s_DOWN, m_SHIFT, event); }else if(SDL_GetModState() & KMOD_CTRL){ event = new Event(e.key.keysym.sym, s_DOWN, m_CONTROL, event); }else if(SDL_GetModState() & KMOD_ALT){ event = new Event(e.key.keysym.sym, s_DOWN, m_ALT, event); }else{ event = new Event(e.key.keysym.sym, s_DOWN, m_NONE, event); } /*Key Up Event*/ }else if(e.type == SDL_KEYUP){ Event *temp = event; Event *prev = NULL; while(temp){ if(temp->getType() == e_KEYPRESS && temp->getKeyState() == s_DOWN && temp->getKey() == e.key.keysym.sym){ if(prev != NULL){ prev->setTail(temp->getTail()); delete temp; break; }else{ event = temp->getTail(); delete temp; break; } } prev = temp; temp = temp->getTail(); } if(SDL_GetModState() & KMOD_SHIFT){ event = new Event(e.key.keysym.sym, s_UP, m_SHIFT, event); }else if(SDL_GetModState() & KMOD_CTRL){ event = new Event(e.key.keysym.sym, s_UP, m_CONTROL, event); }else if(SDL_GetModState() & KMOD_ALT){ event = new Event(e.key.keysym.sym, s_UP, m_ALT, event); }else{ event = new Event(e.key.keysym.sym, s_UP, m_NONE, event); } }else{ continue; } if(event != NULL){ eventNeeded = false; handleControls(event); } } /*if an event still needs to be generated*/ if(eventNeeded){ Event *temp = event; Event *prev = NULL; while(temp){ if(event->getType() == e_HOVER){ if(prev != NULL){ prev->setTail(temp->getTail()); }else{ event = temp->getTail(); } event = new Event(temp->getX(), temp->getY(), event); delete temp; break; }else if(event->getType() == e_HOLD){ if(prev != NULL){ prev->setTail(temp->getTail()); }else{ event = temp->getTail(); } event = new Event(temp->getX(), temp->getY(), temp->getButton(), event); delete temp; break; }else if(temp->getType() == e_MOVE){ event = new Event(temp->getX(), temp->getY(), event); break; }else if(temp->getType() == e_DRAG){ event = new Event(temp->getX(), temp->getY(), temp->getButton(), event); break; }else if(temp->getType() == e_MOUSEPRESS && temp->getMouseState() == s_DOWN){ event = new Event(temp->getX(), temp->getY(), temp->getButton(), event); break; }else if(temp->getType() == e_MOUSEPRESS && temp->getMouseState() == s_UP){ if(prev != NULL){ prev->setTail(temp->getTail()); }else{ event = temp->getTail(); } event = new Event(temp->getX(), temp->getY(), event); delete temp; break; } prev = temp; temp = temp->getTail(); } if(temp == NULL){ int x; int y; uint state = SDL_GetMouseState(&x, &y); event = new Event(x, y, event); } handleControls(event); } } /*see which controls have a use for the given event*/ void Window::handleControls(Event *event){ if(activeControl != NULL && activeControl->getVisible()){ activeControl->handleEvent(event); if(event->getControl() == activeControl){ return; } } for(int i=controls.size()-1;i>=0;i--){ if(controls.at(i) != activeControl && controls.at(i)->getVisible()){ controls.at(i)->handleEvent(event); if(event->getControl() == controls.at(i)){ activeControl = controls.at(i); return; } } } } /*clear the window*/ void Window::clear(Color color){ SDL_SetRenderDrawColor(renderer, color.getRed(), color.getGreen(), color.getBlue(), color.getAlpha()); SDL_RenderClear(renderer); } /*post the prepared frame to the window*/ void Window::postFrame(){ SDL_RenderPresent(renderer); fullRedraw = false; } /*draw the given object*/ void Window::draw(DrawObject *object){ object->draw(renderer); } /*add the given control*/ void Window::addControl(Control *control){ control->setWindow(this); controls.push_back(control); } /*remove the given control*/ void Window::removeControl(Control *control){ for(int i=0;i < controls.size();i++){ if(controls.at(i) == control){ controls.erase(controls.begin() + i); return; } } } /*draw all controls*/ void Window::drawControls(){ for(int i=0;i<controls.size();i++){ if(controls.at(i) != activeControl && controls.at(i)->getVisible()){ controls.at(i)->draw(renderer); } } if(activeControl != NULL && activeControl->getVisible()){ activeControl->draw(renderer); } } /*update teh theme of the controls in the window*/ bool Window::updateTheme(){ bool ret = true; for(int i=0;i<controls.size();i++){ ret = ret && controls.at(i)->updateTheme(); } return ret; }
#ifndef LSMS_DEVICE_STORAGE_HPP #define LSMS_DEVICE_STORAGE_HPP #include "Real.hpp" #include "Complex.hpp" #include <cublas_v2.h> #ifdef _OPENMP #include <omp.h> #else #ifndef LSMS_DUMMY_OPENMP #define LSMS_DUMMY_OPENMP inline int omp_get_max_threads() {return 1;} inline int omp_get_num_threads() {return 1;} inline int omp_get_thread_num() {return 0;} #endif #endif // #include "DeviceMatrix.hpp" template <class T> class DeviceMatrix; extern "C" Complex* get_dev_m_(); extern "C" Complex* get_dev_bgij_(); extern "C" Complex* get_dev_tmat_n_(); extern "C" int* get_dev_ipvt_(); extern "C" cudaStream_t get_stream_(const int &id); extern "C" cublasHandle_t get_cublas_handle_(); extern "C" cudaEvent_t get_cuda_event_(); extern "C" Complex* get_host_m_(const int &max_nrmat_ns); static const int MAX_THREADS=16; class DeviceStorage { private: static int nThreads; static Complex *dev_m[MAX_THREADS], *dev_bgij[MAX_THREADS], *dev_tmat_n[MAX_THREADS]; static int *dev_ipvt[MAX_THREADS]; static cublasHandle_t cublas_h[MAX_THREADS]; static cudaEvent_t event[MAX_THREADS]; static cudaStream_t stream[MAX_THREADS][2]; static DeviceMatrix<Complex> dev_tmat_store; static bool initialized; public: static int allocate(int kkrsz_max,int nspin, int numLIZ, int _nThreads); static void free(); static Complex* getDevM() { return dev_m[omp_get_thread_num()]; } static Complex* getDevBGij() { if(!initialized) {printf("DeviceStorage not initialized\n"); exit(1);} return dev_bgij[omp_get_thread_num()]; } static Complex* getDevTmatN() { return dev_tmat_n[omp_get_thread_num()]; } static int* getDevIpvt() { return dev_ipvt[omp_get_thread_num()]; } static cudaStream_t getStream(int i) { return stream[omp_get_thread_num()][i]; } static cudaEvent_t getEvent() { return event[omp_get_thread_num()]; } static cublasHandle_t getCublasHandle() { return cublas_h[omp_get_thread_num()]; } static DeviceMatrix<Complex>* getDevTmatStore() { return &dev_tmat_store; } }; DeviceMatrix<Complex>* get_dev_tmat_store(); void *allocateDStore(void); void freeDStore(void * d_store); int initDStore(void * d_store,int kkrsz_max, int nspin, int numLIZ, int nthreads); #endif
/** * Copyright 2014-2017 Andreas Schäfer * Copyright 2018 Google * * Distributed under the Boost Software License, Version 1.0. (See accompanying * file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) */ #ifndef FLAT_ARRAY_SHORT_VEC_HPP #define FLAT_ARRAY_SHORT_VEC_HPP // disable certain warnings from system headers when compiling with // Microsoft Visual Studio: #ifdef _MSC_BUILD #pragma warning( push ) #pragma warning( disable : 4514 4710 ) #endif #include <cstdlib> #include <sstream> #ifdef _MSC_BUILD #pragma warning( pop ) #endif namespace LibFlatArray { template<typename CARGO, std::size_t ARITY> class short_vec; template<typename CARGO, std::size_t ARITY> inline short_vec<CARGO, ARITY> operator+(CARGO a, const short_vec<CARGO, ARITY>& b) { return short_vec<CARGO, ARITY>(a) + b; } template<typename CARGO, std::size_t ARITY> inline short_vec<CARGO, ARITY> operator-(CARGO a, const short_vec<CARGO, ARITY>& b) { return short_vec<CARGO, ARITY>(a) - b; } template<typename CARGO, std::size_t ARITY> inline short_vec<CARGO, ARITY> operator*(CARGO a, const short_vec<CARGO, ARITY>& b) { return short_vec<CARGO, ARITY>(a) * b; } template<typename CARGO, std::size_t ARITY> inline short_vec<CARGO, ARITY> operator/(CARGO a, const short_vec<CARGO, ARITY>& b) { return short_vec<CARGO, ARITY>(a) / b; } // Don't warn about these functions being stripped from an executable // as they're not being used, that's actually expected behavior. #ifdef _MSC_BUILD #pragma warning( push ) #pragma warning( disable : 4514 ) #endif template<typename CARGO, std::size_t ARITY > inline bool any(const short_vec<CARGO, ARITY>& vec) { return vec.any(); } inline unsigned any(unsigned mask) { return mask; } inline unsigned short any(unsigned short mask) { return mask; } inline unsigned char any(unsigned char mask) { return mask; } template<typename CARGO, std::size_t ARITY > inline CARGO get(const short_vec<CARGO, ARITY>& vec, const int i) { return vec[i]; } inline bool get(unsigned mask, const int i) { return (mask >> i) & 1; } inline bool get(unsigned short mask, const int i) { return (mask >> i) & 1; } inline bool get(unsigned char mask, const int i) { return (mask >> i) & 1; } #ifdef _MSC_BUILD #pragma warning( pop ) #endif // not inlining is ok: #ifdef _MSC_BUILD #pragma warning( push ) #pragma warning( disable : 4710 ) #endif template<typename SHORT_VEC1, typename SHORT_VEC2> inline SHORT_VEC1 blend(const SHORT_VEC1& v1, const SHORT_VEC2& v2, const typename SHORT_VEC1::mask_type& mask) { SHORT_VEC1 ret = v1; ret.blend(mask, v2); return ret; } #ifdef _MSC_BUILD #pragma warning( pop ) #endif // fixme: this is slow // fixme: replace by horizontal sum, get rid of get() alltoggether template<typename T, std::size_t ARITY> inline std::size_t count_mask(const typename short_vec<T, ARITY>::mask_type& mask) { if (!any(mask)) { return 0; } short_vec<T, ARITY> v(T(0)); v.blend(mask, short_vec<T, ARITY>(T(1))); std::size_t sum = 0; for (int i = 0; i < static_cast<int>(ARITY); ++i) { sum += static_cast<std::size_t>(get(v, i)); } return sum; } class short_vec_strategy { public: class scalar { public: template<typename CARGO> class alignment { public: const static int ALIGNMENT = sizeof(CARGO); }; }; class avx { public: template<typename CARGO> class alignment { public: const static int ALIGNMENT = 32; }; }; class avx2 { public: template<typename CARGO> class alignment { public: const static int ALIGNMENT = 32; }; }; class avx512f { public: template<typename CARGO> class alignment { public: const static int ALIGNMENT = 64; }; }; class cuda { public: template<typename CARGO> class alignment { public: const static int ALIGNMENT = sizeof(CARGO); }; }; class qpx { public: template<typename CARGO> class alignment { public: const static int ALIGNMENT = 32; }; }; class sse { public: template<typename CARGO> class alignment { public: const static int ALIGNMENT = 16; }; }; class sse2 { public: template<typename CARGO> class alignment { public: const static int ALIGNMENT = 16; }; }; class sse4_1 { public: template<typename CARGO> class alignment { public: const static int ALIGNMENT = 16; }; }; class mic { public: template<typename CARGO> class alignment { public: const static int ALIGNMENT = 32; }; }; class neon { public: template<typename CARGO> class alignment { public: const static int ALIGNMENT = 16; }; }; }; } #define LIBFLATARRAY_SCALAR 10 #define LIBFLATARRAY_QPX 11 #define LIBFLATARRAY_ARM_NEON 12 #define LIBFLATARRAY_MIC 13 #define LIBFLATARRAY_AVX512F 14 #define LIBFLATARRAY_AVX 15 #define LIBFLATARRAY_AVX2 16 #define LIBFLATARRAY_SSE 17 #define LIBFLATARRAY_SSE2 18 #define LIBFLATARRAY_SSE4_1 19 #ifdef __CUDA_ARCH__ // Use only scalar short_vec implementations on CUDA devices: #define LIBFLATARRAY_WIDEST_VECTOR_ISA LIBFLATARRAY_SCALAR #else // for IBM Blue Gene/Q's QPX, which is mutually exclusive to // Intel/AMD's AVX/SSE or ARM's NEON ISAs: # ifdef __VECTOR4DOUBLE__ # define LIBFLATARRAY_WIDEST_VECTOR_ISA LIBFLATARRAY_QPX # endif // Dito for ARM NEON: # ifdef __ARM_NEON__ # define LIBFLATARRAY_WIDEST_VECTOR_ISA LIBFLATARRAY_ARM_NEON # endif # ifndef LIBFLATARRAY_WIDEST_VECTOR_ISA // Only the case of the IBM PC is complicated. No thanks to you, // history! # ifdef __MIC__ # define LIBFLATARRAY_WIDEST_VECTOR_ISA LIBFLATARRAY_MIC # else # ifdef __AVX512F__ # define LIBFLATARRAY_WIDEST_VECTOR_ISA LIBFLATARRAY_AVX512F # else # ifdef __AVX2__ # define LIBFLATARRAY_WIDEST_VECTOR_ISA LIBFLATARRAY_AVX2 # else # ifdef __AVX__ # define LIBFLATARRAY_WIDEST_VECTOR_ISA LIBFLATARRAY_AVX # else # ifdef __SSE4_1__ # define LIBFLATARRAY_WIDEST_VECTOR_ISA LIBFLATARRAY_SSE4_1 # else # ifdef __SSE2__ # define LIBFLATARRAY_WIDEST_VECTOR_ISA LIBFLATARRAY_SSE2 # else # ifdef __SSE__ # define LIBFLATARRAY_WIDEST_VECTOR_ISA LIBFLATARRAY_SSE # else // fallback: scalar implementation always works and is still yields // code that's easy to vectorize for the compiler: # define LIBFLATARRAY_WIDEST_VECTOR_ISA LIBFLATARRAY_SCALAR # endif # endif # endif # endif # endif # endif # endif # endif #endif // Dirty workaround for using GCC 6.4.0 with CUDA related to GCC // changing the data type in the signatured of intrinsic function, but // not updating the built-in functions as well. Without this, // compilation would fail with a lots of error messages a la // // avx512vlintrin.h(11169): error: argument of type "void *" is incompatible with parameter of type "int *" // #if defined(__GNUC__) && defined(__CUDACC__) # if ((__GNUC__ == 6) && (__GNUC_MINOR__ == 4)) # define _AVX512BWINTRIN_H_INCLUDED # define _AVX512CDINTRIN_H_INCLUDED # define _AVX512DQINTRIN_H_INCLUDED # define _AVX512ERINTRIN_H_INCLUDED # define _AVX512FINTRIN_H_INCLUDED # define _AVX512IFMAINTRIN_H_INCLUDED # define _AVX512IFMAVLINTRIN_H_INCLUDED # define _AVX512PFINTRIN_H_INCLUDED # define _AVX512VBMIINTRIN_H_INCLUDED # define _AVX512VBMIVLINTRIN_H_INCLUDED # define _AVX512VLBWINTRIN_H_INCLUDED # define _AVX512VLDQINTRIN_H_INCLUDED # define _AVX512VLINTRIN_H_INCLUDED # endif #endif #include <libflatarray/detail/short_vec_avx512_double_8.hpp> #include <libflatarray/detail/short_vec_avx512_double_16.hpp> #include <libflatarray/detail/short_vec_avx512_double_32.hpp> #include <libflatarray/detail/short_vec_avx512_float_16.hpp> #include <libflatarray/detail/short_vec_avx512_float_32.hpp> #include <libflatarray/detail/short_vec_avx_double_4.hpp> #include <libflatarray/detail/short_vec_avx_double_8.hpp> #include <libflatarray/detail/short_vec_avx_double_16.hpp> #include <libflatarray/detail/short_vec_avx_double_32.hpp> #include <libflatarray/detail/short_vec_avx_float_8.hpp> #include <libflatarray/detail/short_vec_avx_float_16.hpp> #include <libflatarray/detail/short_vec_avx_float_32.hpp> #include <libflatarray/detail/short_vec_scalar_double_1.hpp> #include <libflatarray/detail/short_vec_scalar_double_2.hpp> #include <libflatarray/detail/short_vec_scalar_double_4.hpp> #include <libflatarray/detail/short_vec_scalar_double_8.hpp> #include <libflatarray/detail/short_vec_scalar_double_16.hpp> #include <libflatarray/detail/short_vec_scalar_double_32.hpp> #include <libflatarray/detail/short_vec_scalar_float_1.hpp> #include <libflatarray/detail/short_vec_scalar_float_2.hpp> #include <libflatarray/detail/short_vec_scalar_float_4.hpp> #include <libflatarray/detail/short_vec_scalar_float_8.hpp> #include <libflatarray/detail/short_vec_scalar_float_16.hpp> #include <libflatarray/detail/short_vec_scalar_float_32.hpp> #include <libflatarray/detail/short_vec_scalar_int_1.hpp> #include <libflatarray/detail/short_vec_scalar_int_2.hpp> #include <libflatarray/detail/short_vec_scalar_int_4.hpp> #include <libflatarray/detail/short_vec_scalar_int_8.hpp> #include <libflatarray/detail/short_vec_scalar_int_16.hpp> #include <libflatarray/detail/short_vec_scalar_int_32.hpp> #include <libflatarray/detail/short_vec_sse_int_4.hpp> #include <libflatarray/detail/short_vec_sse_int_8.hpp> #include <libflatarray/detail/short_vec_sse_int_16.hpp> #include <libflatarray/detail/short_vec_sse_int_32.hpp> #include <libflatarray/detail/short_vec_avx_int_8.hpp> #include <libflatarray/detail/short_vec_avx_int_16.hpp> #include <libflatarray/detail/short_vec_avx_int_32.hpp> #include <libflatarray/detail/short_vec_avx512_int_16.hpp> #include <libflatarray/detail/short_vec_avx512_int_32.hpp> #include <libflatarray/detail/short_vec_sse_double_2.hpp> #include <libflatarray/detail/short_vec_sse_double_4.hpp> #include <libflatarray/detail/short_vec_sse_double_8.hpp> #include <libflatarray/detail/short_vec_sse_double_16.hpp> #include <libflatarray/detail/short_vec_sse_double_32.hpp> #include <libflatarray/detail/short_vec_sse_float_4.hpp> #include <libflatarray/detail/short_vec_sse_float_8.hpp> #include <libflatarray/detail/short_vec_sse_float_16.hpp> #include <libflatarray/detail/short_vec_sse_float_32.hpp> #include <libflatarray/detail/short_vec_qpx_double_4.hpp> #include <libflatarray/detail/short_vec_qpx_double_8.hpp> #include <libflatarray/detail/short_vec_qpx_double_16.hpp> #include <libflatarray/detail/short_vec_qpx_double_32.hpp> #include <libflatarray/detail/short_vec_neon_float_4.hpp> #include <libflatarray/detail/short_vec_neon_float_8.hpp> #include <libflatarray/detail/short_vec_neon_float_16.hpp> #include <libflatarray/detail/short_vec_neon_float_32.hpp> #include <libflatarray/detail/short_vec_mic_double_8.hpp> #include <libflatarray/detail/short_vec_mic_double_16.hpp> #include <libflatarray/detail/short_vec_mic_double_32.hpp> #include <libflatarray/detail/short_vec_mic_float_16.hpp> #include <libflatarray/detail/short_vec_mic_float_32.hpp> #endif
/* * Copyright 2017 WebAssembly Community Group participants * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // // Convert the AST to a CFG, and optimize+convert it back to the AST // using the relooper. // // This pass depends on flatten being run before it. // #include <memory> #include "cfg/Relooper.h" #include "ir/flat.h" #include "ir/utils.h" #include "pass.h" #include "wasm-builder.h" #include "wasm-traversal.h" #include "wasm.h" namespace wasm { struct ReReloop final : public Pass { bool isFunctionParallel() override { return true; } Pass* create() override { return new ReReloop; } std::unique_ptr<CFG::Relooper> relooper; std::unique_ptr<Builder> builder; // block handling CFG::Block* currCFGBlock = nullptr; CFG::Block* makeCFGBlock() { return relooper->AddBlock(builder->makeBlock()); } CFG::Block* setCurrCFGBlock(CFG::Block* curr) { if (currCFGBlock) { finishBlock(); } return currCFGBlock = curr; } CFG::Block* startCFGBlock() { return setCurrCFGBlock(makeCFGBlock()); } CFG::Block* getCurrCFGBlock() { return currCFGBlock; } Block* getCurrBlock() { return currCFGBlock->Code->cast<Block>(); } void finishBlock() { getCurrBlock()->finalize(); } // break handling std::map<Name, CFG::Block*> breakTargets; void addBreakTarget(Name name, CFG::Block* target) { breakTargets[name] = target; } CFG::Block* getBreakTarget(Name name) { return breakTargets[name]; } // branch handling void addBranch(CFG::Block* from, CFG::Block* to, Expression* condition = nullptr) { from->AddBranchTo(to, condition); } void addSwitchBranch(CFG::Block* from, CFG::Block* to, const std::set<Index>& values) { std::vector<Index> list; for (auto i : values) { list.push_back(i); } from->AddSwitchBranchTo(to, std::move(list)); } // we work using a stack of control flow tasks struct Task { ReReloop& parent; Task(ReReloop& parent) : parent(parent) {} virtual void run() { WASM_UNREACHABLE("unimpl"); } }; typedef std::shared_ptr<Task> TaskPtr; std::vector<TaskPtr> stack; struct TriageTask final : public Task { Expression* curr; TriageTask(ReReloop& parent, Expression* curr) : Task(parent), curr(curr) {} void run() override { parent.triage(curr); } }; struct BlockTask final : public Task { Block* curr; CFG::Block* later; BlockTask(ReReloop& parent, Block* curr) : Task(parent), curr(curr) {} static void handle(ReReloop& parent, Block* curr) { if (curr->name.is()) { // we may be branched to. create a target, and // ensure we are called at the join point auto task = std::make_shared<BlockTask>(parent, curr); task->curr = curr; task->later = parent.makeCFGBlock(); parent.addBreakTarget(curr->name, task->later); parent.stack.push_back(task); } auto& list = curr->list; for (int i = int(list.size()) - 1; i >= 0; i--) { parent.stack.push_back(std::make_shared<TriageTask>(parent, list[i])); } } void run() override { // add fallthrough parent.addBranch(parent.getCurrCFGBlock(), later); parent.setCurrCFGBlock(later); } }; struct LoopTask final : public Task { static void handle(ReReloop& parent, Loop* curr) { parent.stack.push_back(std::make_shared<TriageTask>(parent, curr->body)); if (curr->name.is()) { // we may be branched to. create a target auto* before = parent.getCurrCFGBlock(); auto* top = parent.startCFGBlock(); parent.addBreakTarget(curr->name, top); parent.addBranch(before, top); } } }; struct IfTask final : public Task { If* curr; CFG::Block* condition; CFG::Block* ifTrueEnd; int phase = 0; IfTask(ReReloop& parent, If* curr) : Task(parent), curr(curr) {} static void handle(ReReloop& parent, If* curr) { auto task = std::make_shared<IfTask>(parent, curr); task->curr = curr; task->condition = parent.getCurrCFGBlock(); auto* ifTrueBegin = parent.startCFGBlock(); parent.addBranch(task->condition, ifTrueBegin, curr->condition); if (curr->ifFalse) { parent.stack.push_back(task); parent.stack.push_back( std::make_shared<TriageTask>(parent, curr->ifFalse)); } parent.stack.push_back(task); parent.stack.push_back( std::make_shared<TriageTask>(parent, curr->ifTrue)); } void run() override { if (phase == 0) { // end of ifTrue ifTrueEnd = parent.getCurrCFGBlock(); auto* after = parent.startCFGBlock(); // if condition was false, go after the ifTrue, to ifFalse or outside parent.addBranch(condition, after); if (!curr->ifFalse) { parent.addBranch(ifTrueEnd, after); } phase++; } else if (phase == 1) { // end if ifFalse auto* ifFalseEnd = parent.getCurrCFGBlock(); auto* after = parent.startCFGBlock(); parent.addBranch(ifTrueEnd, after); parent.addBranch(ifFalseEnd, after); } else { WASM_UNREACHABLE("invalid phase"); } } }; struct BreakTask : public Task { static void handle(ReReloop& parent, Break* curr) { // add the branch. note how if the condition is false, it is the right // value there as well auto* before = parent.getCurrCFGBlock(); parent.addBranch( before, parent.getBreakTarget(curr->name), curr->condition); if (curr->condition) { auto* after = parent.startCFGBlock(); parent.addBranch(before, after); } else { parent.stopControlFlow(); } } }; struct SwitchTask : public Task { static void handle(ReReloop& parent, Switch* curr) { // set the switch condition for the block ending now auto* before = parent.getCurrCFGBlock(); assert(!before->SwitchCondition); before->SwitchCondition = curr->condition; std::map<Name, std::set<Index>> targetValues; auto& targets = curr->targets; auto num = targets.size(); for (Index i = 0; i < num; i++) { targetValues[targets[i]].insert(i); } for (auto& iter : targetValues) { parent.addSwitchBranch( before, parent.getBreakTarget(iter.first), iter.second); } // the default may be among the targets, in which case, we can't add it // simply as it would be a duplicate, so create a temp block if (targetValues.count(curr->default_) == 0) { parent.addSwitchBranch( before, parent.getBreakTarget(curr->default_), std::set<Index>()); } else { auto* temp = parent.startCFGBlock(); parent.addSwitchBranch(before, temp, std::set<Index>()); parent.addBranch(temp, parent.getBreakTarget(curr->default_)); } parent.stopControlFlow(); } }; struct ReturnTask : public Task { static void handle(ReReloop& parent, Return* curr) { // reuse the return parent.getCurrBlock()->list.push_back(curr); parent.stopControlFlow(); } }; struct UnreachableTask : public Task { static void handle(ReReloop& parent, Unreachable* curr) { // reuse the unreachable parent.getCurrBlock()->list.push_back(curr); parent.stopControlFlow(); } }; // handle an element we encounter void triage(Expression* curr) { if (auto* block = curr->dynCast<Block>()) { BlockTask::handle(*this, block); } else if (auto* loop = curr->dynCast<Loop>()) { LoopTask::handle(*this, loop); } else if (auto* iff = curr->dynCast<If>()) { IfTask::handle(*this, iff); } else if (auto* br = curr->dynCast<Break>()) { BreakTask::handle(*this, br); } else if (auto* sw = curr->dynCast<Switch>()) { SwitchTask::handle(*this, sw); } else if (auto* ret = curr->dynCast<Return>()) { ReturnTask::handle(*this, ret); } else if (auto* un = curr->dynCast<Unreachable>()) { UnreachableTask::handle(*this, un); } else if (curr->is<Try>() || curr->is<Throw>() || curr->is<Rethrow>()) { Fatal() << "ReReloop does not support EH instructions yet"; } else { // not control flow, so just a simple element getCurrBlock()->list.push_back(curr); } } void stopControlFlow() { startCFGBlock(); // TODO: optimize with this? } void runOnFunction(PassRunner* runner, Module* module, Function* function) override { Flat::verifyFlatness(function); // since control flow is flattened, this is pretty simple // first, traverse the function body. note how we don't need to traverse // into expressions, as we know they contain no control flow builder = make_unique<Builder>(*module); relooper = make_unique<CFG::Relooper>(module); auto* entry = startCFGBlock(); stack.push_back(TaskPtr(new TriageTask(*this, function->body))); // main loop while (stack.size() > 0) { TaskPtr curr = stack.back(); stack.pop_back(); curr->run(); } // finish the current block finishBlock(); // blocks that do not have any exits are dead ends in the relooper. we need // to make sure that are in fact dead ends, and do not flow control // anywhere. add a return as needed for (auto& cfgBlock : relooper->Blocks) { auto* block = cfgBlock->Code->cast<Block>(); if (cfgBlock->BranchesOut.empty() && block->type != Type::unreachable) { block->list.push_back(function->sig.results == Type::none ? (Expression*)builder->makeReturn() : (Expression*)builder->makeUnreachable()); block->finalize(); } } #ifdef RERELOOP_DEBUG std::cout << "rerelooping " << function->name << '\n'; for (auto* block : relooper->Blocks) { std::cout << block << " block:\n" << block->Code << '\n'; for (auto& pair : block->BranchesOut) { auto* target = pair.first; auto* branch = pair.second; std::cout << "branch to " << target << "\n"; if (branch->Condition) { std::cout << " with condition\n" << branch->Condition << '\n'; } } } #endif // run the relooper to recreate control flow relooper->Calculate(entry); // render { auto temp = builder->addVar(function, Type::i32); CFG::RelooperBuilder builder(*module, temp); function->body = relooper->Render(builder); // if the function has a result, and the relooper emitted // something that seems like it flows out without a value // (but that path is never reached; it just has a br to it // because of the relooper's boilerplate switch-handling // code, for example, which could be optimized out later // but isn't yet), then make sure it has a proper type if (function->sig.results != Type::none && function->body->type == Type::none) { function->body = builder.makeSequence(function->body, builder.makeUnreachable()); } } // TODO: should this be in the relooper itself? ReFinalize().walkFunctionInModule(function, module); } }; Pass* createReReloopPass() { return new ReReloop(); } } // namespace wasm
// (C) Copyright Steve Cleary, Beman Dawes, Howard Hinnant & John Maddock 2000. // Use, modification and distribution are subject to the Boost Software License, // Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt). // // See http://www.boost.org/libs/type_traits for most recent version including documentation. #ifndef BOOST_TT_HAS_NOTHROW_ASSIGN_HPP_INCLUDED #define BOOST_TT_HAS_NOTHROW_ASSIGN_HPP_INCLUDED #include "boost/type_traits/has_trivial_assign.hpp" // should be the last #include #include "boost/type_traits/detail/bool_trait_def.hpp" namespace boost { BOOST_TT_AUX_BOOL_TRAIT_DEF1(has_nothrow_assign,T,::boost::has_trivial_assign<T>::value) } // namespace boost #include "boost/type_traits/detail/bool_trait_undef.hpp" #endif // BOOST_TT_HAS_NOTHROW_ASSIGN_HPP_INCLUDED
/* * Copyright (C) 2019-2020 Intel Corporation * * SPDX-License-Identifier: MIT * */ #include <level_zero/zet_api.h> #include "sysman/sysman.h" ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceGetProperties( zes_device_handle_t hDevice, zes_device_properties_t *pProperties) { return L0::SysmanDevice::fromHandle(hDevice)->deviceGetProperties(pProperties); } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceGetState( zes_device_handle_t hDevice, zes_device_state_t *pState) { return L0::SysmanDevice::fromHandle(hDevice)->deviceGetState(pState); } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceEnumSchedulers( zes_device_handle_t hDevice, uint32_t *pCount, zes_sched_handle_t *phScheduler) { return L0::SysmanDevice::fromHandle(hDevice)->schedulerGet(pCount, phScheduler); } ZE_APIEXPORT ze_result_t ZE_APICALL zesSchedulerGetProperties( zes_sched_handle_t hScheduler, zes_sched_properties_t *pProperties) { return L0::Scheduler::fromHandle(hScheduler)->schedulerGetProperties(pProperties); } ZE_APIEXPORT ze_result_t ZE_APICALL zesSchedulerGetCurrentMode( zes_sched_handle_t hScheduler, zes_sched_mode_t *pMode) { return L0::Scheduler::fromHandle(hScheduler)->getCurrentMode(pMode); } ZE_APIEXPORT ze_result_t ZE_APICALL zesSchedulerGetTimeoutModeProperties( zes_sched_handle_t hScheduler, ze_bool_t getDefaults, zes_sched_timeout_properties_t *pConfig) { return L0::Scheduler::fromHandle(hScheduler)->getTimeoutModeProperties(getDefaults, pConfig); } ZE_APIEXPORT ze_result_t ZE_APICALL zesSchedulerGetTimesliceModeProperties( zes_sched_handle_t hScheduler, ze_bool_t getDefaults, zes_sched_timeslice_properties_t *pConfig) { return L0::Scheduler::fromHandle(hScheduler)->getTimesliceModeProperties(getDefaults, pConfig); } ZE_APIEXPORT ze_result_t ZE_APICALL zesSchedulerSetTimeoutMode( zes_sched_handle_t hScheduler, zes_sched_timeout_properties_t *pProperties, ze_bool_t *pNeedReload) { return L0::Scheduler::fromHandle(hScheduler)->setTimeoutMode(pProperties, pNeedReload); } ZE_APIEXPORT ze_result_t ZE_APICALL zesSchedulerSetTimesliceMode( zes_sched_handle_t hScheduler, zes_sched_timeslice_properties_t *pProperties, ze_bool_t *pNeedReload) { return L0::Scheduler::fromHandle(hScheduler)->setTimesliceMode(pProperties, pNeedReload); } ZE_APIEXPORT ze_result_t ZE_APICALL zesSchedulerSetExclusiveMode( zes_sched_handle_t hScheduler, ze_bool_t *pNeedReload) { return L0::Scheduler::fromHandle(hScheduler)->setExclusiveMode(pNeedReload); } ZE_APIEXPORT ze_result_t ZE_APICALL zesSchedulerSetComputeUnitDebugMode( zes_sched_handle_t hScheduler, ze_bool_t *pNeedReload) { return L0::Scheduler::fromHandle(hScheduler)->setComputeUnitDebugMode(pNeedReload); } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceProcessesGetState( zes_device_handle_t hDevice, uint32_t *pCount, zes_process_state_t *pProcesses) { return L0::SysmanDevice::fromHandle(hDevice)->processesGetState(pCount, pProcesses); } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceReset( zes_device_handle_t hDevice, ze_bool_t force) { return L0::SysmanDevice::fromHandle(hDevice)->deviceReset(force); } ZE_APIEXPORT ze_result_t ZE_APICALL zesDevicePciGetProperties( zes_device_handle_t hDevice, zes_pci_properties_t *pProperties) { return L0::SysmanDevice::fromHandle(hDevice)->pciGetProperties(pProperties); } ZE_APIEXPORT ze_result_t ZE_APICALL zesDevicePciGetState( zes_device_handle_t hDevice, zes_pci_state_t *pState) { return L0::SysmanDevice::fromHandle(hDevice)->pciGetState(pState); } ZE_APIEXPORT ze_result_t ZE_APICALL zesDevicePciGetBars( zes_device_handle_t hDevice, uint32_t *pCount, zes_pci_bar_properties_t *pProperties) { return L0::SysmanDevice::fromHandle(hDevice)->pciGetBars(pCount, pProperties); } ZE_APIEXPORT ze_result_t ZE_APICALL zesDevicePciGetStats( zes_device_handle_t hDevice, zes_pci_stats_t *pStats) { return L0::SysmanDevice::fromHandle(hDevice)->pciGetStats(pStats); } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceEnumPowerDomains( zes_device_handle_t hDevice, uint32_t *pCount, zes_pwr_handle_t *phPower) { return L0::SysmanDevice::fromHandle(hDevice)->powerGet(pCount, phPower); } ZE_APIEXPORT ze_result_t ZE_APICALL zesPowerGetProperties( zes_pwr_handle_t hPower, zes_power_properties_t *pProperties) { return L0::Power::fromHandle(hPower)->powerGetProperties(pProperties); } ZE_APIEXPORT ze_result_t ZE_APICALL zesPowerGetEnergyCounter( zes_pwr_handle_t hPower, zes_power_energy_counter_t *pEnergy) { return L0::Power::fromHandle(hPower)->powerGetEnergyCounter(pEnergy); } ZE_APIEXPORT ze_result_t ZE_APICALL zesPowerGetLimits( zes_pwr_handle_t hPower, zes_power_sustained_limit_t *pSustained, zes_power_burst_limit_t *pBurst, zes_power_peak_limit_t *pPeak) { return L0::Power::fromHandle(hPower)->powerGetLimits(pSustained, pBurst, pPeak); } ZE_APIEXPORT ze_result_t ZE_APICALL zesPowerSetLimits( zes_pwr_handle_t hPower, const zes_power_sustained_limit_t *pSustained, const zes_power_burst_limit_t *pBurst, const zes_power_peak_limit_t *pPeak) { return L0::Power::fromHandle(hPower)->powerSetLimits(pSustained, pBurst, pPeak); } ZE_APIEXPORT ze_result_t ZE_APICALL zesPowerGetEnergyThreshold( zes_pwr_handle_t hPower, zes_energy_threshold_t *pThreshold) { return L0::Power::fromHandle(hPower)->powerGetEnergyThreshold(pThreshold); } ZE_APIEXPORT ze_result_t ZE_APICALL zesPowerSetEnergyThreshold( zes_pwr_handle_t hPower, double threshold) { return L0::Power::fromHandle(hPower)->powerSetEnergyThreshold(threshold); } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceEnumFrequencyDomains( zes_device_handle_t hDevice, uint32_t *pCount, zes_freq_handle_t *phFrequency) { return L0::SysmanDevice::fromHandle(hDevice)->frequencyGet(pCount, phFrequency); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFrequencyGetProperties( zes_freq_handle_t hFrequency, zes_freq_properties_t *pProperties) { return L0::Frequency::fromHandle(hFrequency)->frequencyGetProperties(pProperties); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFrequencyGetAvailableClocks( zes_freq_handle_t hFrequency, uint32_t *pCount, double *phFrequency) { return L0::Frequency::fromHandle(hFrequency)->frequencyGetAvailableClocks(pCount, phFrequency); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFrequencyGetRange( zes_freq_handle_t hFrequency, zes_freq_range_t *pLimits) { return L0::Frequency::fromHandle(hFrequency)->frequencyGetRange(pLimits); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFrequencySetRange( zes_freq_handle_t hFrequency, const zes_freq_range_t *pLimits) { return L0::Frequency::fromHandle(hFrequency)->frequencySetRange(pLimits); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFrequencyGetState( zes_freq_handle_t hFrequency, zes_freq_state_t *pState) { return L0::Frequency::fromHandle(hFrequency)->frequencyGetState(pState); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFrequencyGetThrottleTime( zes_freq_handle_t hFrequency, zes_freq_throttle_time_t *pThrottleTime) { return L0::Frequency::fromHandle(hFrequency)->frequencyGetThrottleTime(pThrottleTime); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFrequencyOcGetFrequencyTarget( zes_freq_handle_t hFrequency, double *pCurrentOcFrequency) { return L0::Frequency::fromHandle(hFrequency)->frequencyOcGetFrequencyTarget(pCurrentOcFrequency); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFrequencyOcSetFrequencyTarget( zes_freq_handle_t hFrequency, double currentOcFrequency) { return L0::Frequency::fromHandle(hFrequency)->frequencyOcSetFrequencyTarget(currentOcFrequency); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFrequencyOcGetVoltageTarget( zes_freq_handle_t hFrequency, double *pCurrentVoltageTarget, double *pCurrentVoltageOffset) { return L0::Frequency::fromHandle(hFrequency)->frequencyOcGetVoltageTarget(pCurrentVoltageTarget, pCurrentVoltageOffset); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFrequencyOcSetVoltageTarget( zes_freq_handle_t hFrequency, double currentVoltageTarget, double currentVoltageOffset) { return L0::Frequency::fromHandle(hFrequency)->frequencyOcSetVoltageTarget(currentVoltageTarget, currentVoltageOffset); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFrequencyOcSetMode( zes_freq_handle_t hFrequency, zes_oc_mode_t currentOcMode) { return L0::Frequency::fromHandle(hFrequency)->frequencyOcSetMode(currentOcMode); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFrequencyOcGetMode( zes_freq_handle_t hFrequency, zes_oc_mode_t *pCurrentOcMode) { return L0::Frequency::fromHandle(hFrequency)->frequencyOcGetMode(pCurrentOcMode); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFrequencyOcGetCapabilities( zes_freq_handle_t hFrequency, zes_oc_capabilities_t *pOcCapabilities) { return L0::Frequency::fromHandle(hFrequency)->frequencyOcGetCapabilities(pOcCapabilities); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFrequencyOcGetIccMax( zes_freq_handle_t hFrequency, double *pOcIccMax) { return L0::Frequency::fromHandle(hFrequency)->frequencyOcGetIccMax(pOcIccMax); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFrequencyOcSetIccMax( zes_freq_handle_t hFrequency, double ocIccMax) { return L0::Frequency::fromHandle(hFrequency)->frequencyOcSetIccMax(ocIccMax); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFrequencyOcGetTjMax( zes_freq_handle_t hFrequency, double *pOcTjMax) { return L0::Frequency::fromHandle(hFrequency)->frequencyOcGeTjMax(pOcTjMax); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFrequencyOcSetTjMax( zes_freq_handle_t hFrequency, double ocTjMax) { return L0::Frequency::fromHandle(hFrequency)->frequencyOcSetTjMax(ocTjMax); } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceEnumEngineGroups( zes_device_handle_t hDevice, uint32_t *pCount, zes_engine_handle_t *phEngine) { return L0::SysmanDevice::fromHandle(hDevice)->engineGet(pCount, phEngine); } ZE_APIEXPORT ze_result_t ZE_APICALL zesEngineGetProperties( zes_engine_handle_t hEngine, zes_engine_properties_t *pProperties) { return L0::Engine::fromHandle(hEngine)->engineGetProperties(pProperties); } ZE_APIEXPORT ze_result_t ZE_APICALL zesEngineGetActivity( zes_engine_handle_t hEngine, zes_engine_stats_t *pStats) { return L0::Engine::fromHandle(hEngine)->engineGetActivity(pStats); } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceEnumStandbyDomains( zes_device_handle_t hDevice, uint32_t *pCount, zes_standby_handle_t *phStandby) { return L0::SysmanDevice::fromHandle(hDevice)->standbyGet(pCount, phStandby); } ZE_APIEXPORT ze_result_t ZE_APICALL zesStandbyGetProperties( zes_standby_handle_t hStandby, zes_standby_properties_t *pProperties) { return L0::Standby::fromHandle(hStandby)->standbyGetProperties(pProperties); } ZE_APIEXPORT ze_result_t ZE_APICALL zesStandbyGetMode( zes_standby_handle_t hStandby, zes_standby_promo_mode_t *pMode) { return L0::Standby::fromHandle(hStandby)->standbyGetMode(pMode); } ZE_APIEXPORT ze_result_t ZE_APICALL zesStandbySetMode( zes_standby_handle_t hStandby, zes_standby_promo_mode_t mode) { return L0::Standby::fromHandle(hStandby)->standbySetMode(mode); } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceEnumFirmwares( zes_device_handle_t hDevice, uint32_t *pCount, zes_firmware_handle_t *phFirmware) { return L0::SysmanDevice::fromHandle(hDevice)->firmwareGet(pCount, phFirmware); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFirmwareGetProperties( zes_firmware_handle_t hFirmware, zes_firmware_properties_t *pProperties) { return L0::Firmware::fromHandle(hFirmware)->firmwareGetProperties(pProperties); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFirmwareFlash( zes_firmware_handle_t hFirmware, void *pImage, uint32_t size) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceEnumMemoryModules( zes_device_handle_t hDevice, uint32_t *pCount, zes_mem_handle_t *phMemory) { return L0::SysmanDevice::fromHandle(hDevice)->memoryGet(pCount, phMemory); } ZE_APIEXPORT ze_result_t ZE_APICALL zesMemoryGetProperties( zes_mem_handle_t hMemory, zes_mem_properties_t *pProperties) { return L0::Memory::fromHandle(hMemory)->memoryGetProperties(pProperties); } ZE_APIEXPORT ze_result_t ZE_APICALL zesMemoryGetState( zes_mem_handle_t hMemory, zes_mem_state_t *pState) { return L0::Memory::fromHandle(hMemory)->memoryGetState(pState); } ZE_APIEXPORT ze_result_t ZE_APICALL zesMemoryGetBandwidth( zes_mem_handle_t hMemory, zes_mem_bandwidth_t *pBandwidth) { return L0::Memory::fromHandle(hMemory)->memoryGetBandwidth(pBandwidth); } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceEnumFabricPorts( zes_device_handle_t hDevice, uint32_t *pCount, zes_fabric_port_handle_t *phPort) { return L0::SysmanDevice::fromHandle(hDevice)->fabricPortGet(pCount, phPort); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFabricPortGetProperties( zes_fabric_port_handle_t hPort, zes_fabric_port_properties_t *pProperties) { return L0::FabricPort::fromHandle(hPort)->fabricPortGetProperties(pProperties); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFabricPortGetLinkType( zes_fabric_port_handle_t hPort, zes_fabric_link_type_t *pLinkType) { return L0::FabricPort::fromHandle(hPort)->fabricPortGetLinkType(pLinkType); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFabricPortGetConfig( zes_fabric_port_handle_t hPort, zes_fabric_port_config_t *pConfig) { return L0::FabricPort::fromHandle(hPort)->fabricPortGetConfig(pConfig); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFabricPortSetConfig( zes_fabric_port_handle_t hPort, const zes_fabric_port_config_t *pConfig) { return L0::FabricPort::fromHandle(hPort)->fabricPortSetConfig(pConfig); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFabricPortGetState( zes_fabric_port_handle_t hPort, zes_fabric_port_state_t *pState) { return L0::FabricPort::fromHandle(hPort)->fabricPortGetState(pState); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFabricPortGetThroughput( zes_fabric_port_handle_t hPort, zes_fabric_port_throughput_t *pThroughput) { return L0::FabricPort::fromHandle(hPort)->fabricPortGetThroughput(pThroughput); } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceEnumTemperatureSensors( zes_device_handle_t hDevice, uint32_t *pCount, zes_temp_handle_t *phTemperature) { return L0::SysmanDevice::fromHandle(hDevice)->temperatureGet(pCount, phTemperature); } ZE_APIEXPORT ze_result_t ZE_APICALL zesTemperatureGetProperties( zes_temp_handle_t hTemperature, zes_temp_properties_t *pProperties) { return L0::Temperature::fromHandle(hTemperature)->temperatureGetProperties(pProperties); } ZE_APIEXPORT ze_result_t ZE_APICALL zesTemperatureGetConfig( zes_temp_handle_t hTemperature, zes_temp_config_t *pConfig) { return L0::Temperature::fromHandle(hTemperature)->temperatureGetConfig(pConfig); } ZE_APIEXPORT ze_result_t ZE_APICALL zesTemperatureSetConfig( zes_temp_handle_t hTemperature, const zes_temp_config_t *pConfig) { return L0::Temperature::fromHandle(hTemperature)->temperatureSetConfig(pConfig); } ZE_APIEXPORT ze_result_t ZE_APICALL zesTemperatureGetState( zes_temp_handle_t hTemperature, double *pTemperature) { return L0::Temperature::fromHandle(hTemperature)->temperatureGetState(pTemperature); } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceEnumPsus( zes_device_handle_t hDevice, uint32_t *pCount, zes_psu_handle_t *phPsu) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; } ZE_APIEXPORT ze_result_t ZE_APICALL zesPsuGetProperties( zes_psu_handle_t hPsu, zes_psu_properties_t *pProperties) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; } ZE_APIEXPORT ze_result_t ZE_APICALL zesPsuGetState( zes_psu_handle_t hPsu, zes_psu_state_t *pState) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceEnumFans( zes_device_handle_t hDevice, uint32_t *pCount, zes_fan_handle_t *phFan) { return L0::SysmanDevice::fromHandle(hDevice)->fanGet(pCount, phFan); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFanGetProperties( zes_fan_handle_t hFan, zes_fan_properties_t *pProperties) { return L0::Fan::fromHandle(hFan)->fanGetProperties(pProperties); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFanGetConfig( zes_fan_handle_t hFan, zes_fan_config_t *pConfig) { return L0::Fan::fromHandle(hFan)->fanGetConfig(pConfig); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFanSetDefaultMode( zes_fan_handle_t hFan) { return L0::Fan::fromHandle(hFan)->fanSetDefaultMode(); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFanSetFixedSpeedMode( zes_fan_handle_t hFan, const zes_fan_speed_t *speed) { return L0::Fan::fromHandle(hFan)->fanSetFixedSpeedMode(speed); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFanSetSpeedTableMode( zes_fan_handle_t hFan, const zes_fan_speed_table_t *speedTable) { return L0::Fan::fromHandle(hFan)->fanSetSpeedTableMode(speedTable); } ZE_APIEXPORT ze_result_t ZE_APICALL zesFanGetState( zes_fan_handle_t hFan, zes_fan_speed_units_t units, int32_t *pSpeed) { return L0::Fan::fromHandle(hFan)->fanGetState(units, pSpeed); } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceEnumLeds( zes_device_handle_t hDevice, uint32_t *pCount, zes_led_handle_t *phLed) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; } ZE_APIEXPORT ze_result_t ZE_APICALL zesLedGetProperties( zes_led_handle_t hLed, zes_led_properties_t *pProperties) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; } ZE_APIEXPORT ze_result_t ZE_APICALL zesLedGetState( zes_led_handle_t hLed, zes_led_state_t *pState) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; } ZE_APIEXPORT ze_result_t ZE_APICALL zesLedSetState( zes_led_handle_t hLed, ze_bool_t enable) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; } ZE_APIEXPORT ze_result_t ZE_APICALL zesLedSetColor( zes_led_handle_t hLed, const zes_led_color_t *pColor) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceEnumRasErrorSets( zes_device_handle_t hDevice, uint32_t *pCount, zes_ras_handle_t *phRas) { return L0::SysmanDevice::fromHandle(hDevice)->rasGet(pCount, phRas); } ZE_APIEXPORT ze_result_t ZE_APICALL zesRasGetProperties( zes_ras_handle_t hRas, zes_ras_properties_t *pProperties) { return L0::Ras::fromHandle(hRas)->rasGetProperties(pProperties); } ZE_APIEXPORT ze_result_t ZE_APICALL zesRasGetConfig( zes_ras_handle_t hRas, zes_ras_config_t *pConfig) { return L0::Ras::fromHandle(hRas)->rasGetConfig(pConfig); } ZE_APIEXPORT ze_result_t ZE_APICALL zesRasSetConfig( zes_ras_handle_t hRas, const zes_ras_config_t *pConfig) { return L0::Ras::fromHandle(hRas)->rasSetConfig(pConfig); } ZE_APIEXPORT ze_result_t ZE_APICALL zesRasGetState( zes_ras_handle_t hRas, ze_bool_t clear, zes_ras_state_t *pState) { return L0::Ras::fromHandle(hRas)->rasGetState(pState); } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceEventRegister( zes_device_handle_t hDevice, zes_event_type_flags_t events) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; } ZE_APIEXPORT ze_result_t ZE_APICALL zesDriverEventListen( ze_driver_handle_t hDriver, uint32_t timeout, uint32_t count, zes_device_handle_t *phDevices, uint32_t *pNumDeviceEvents, zes_event_type_flags_t *pEvents) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceEnumDiagnosticTestSuites( zes_device_handle_t hDevice, uint32_t *pCount, zes_diag_handle_t *phDiagnostics) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; } ZE_APIEXPORT ze_result_t ZE_APICALL zesDiagnosticsGetProperties( zes_diag_handle_t hDiagnostics, zes_diag_properties_t *pProperties) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; } ZE_APIEXPORT ze_result_t ZE_APICALL zesDiagnosticsGetTests( zes_diag_handle_t hDiagnostics, uint32_t *pCount, zes_diag_test_t *pTests) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; } ZE_APIEXPORT ze_result_t ZE_APICALL zesDiagnosticsRunTests( zes_diag_handle_t hDiagnostics, uint32_t start, uint32_t end, zes_diag_result_t *pResult) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; } ZE_APIEXPORT ze_result_t ZE_APICALL zesDeviceEnumPerformanceFactorDomains( zes_device_handle_t hDevice, uint32_t *pCount, zes_perf_handle_t *phPerf) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; } ZE_APIEXPORT ze_result_t ZE_APICALL zesPerformanceFactorGetProperties( zes_perf_handle_t hPerf, zes_perf_properties_t *pProperties) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; } ZE_APIEXPORT ze_result_t ZE_APICALL zesPerformanceFactorGetConfig( zes_perf_handle_t hPerf, double *pFactor) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; } ZE_APIEXPORT ze_result_t ZE_APICALL zesPerformanceFactorSetConfig( zes_perf_handle_t hPerf, double factor) { return ZE_RESULT_ERROR_UNSUPPORTED_FEATURE; }
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include <aws/medialive/model/AacProfile.h> #include <aws/core/utils/HashingUtils.h> #include <aws/core/Globals.h> #include <aws/core/utils/EnumParseOverflowContainer.h> using namespace Aws::Utils; namespace Aws { namespace MediaLive { namespace Model { namespace AacProfileMapper { static const int HEV1_HASH = HashingUtils::HashString("HEV1"); static const int HEV2_HASH = HashingUtils::HashString("HEV2"); static const int LC_HASH = HashingUtils::HashString("LC"); AacProfile GetAacProfileForName(const Aws::String& name) { int hashCode = HashingUtils::HashString(name.c_str()); if (hashCode == HEV1_HASH) { return AacProfile::HEV1; } else if (hashCode == HEV2_HASH) { return AacProfile::HEV2; } else if (hashCode == LC_HASH) { return AacProfile::LC; } EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) { overflowContainer->StoreOverflow(hashCode, name); return static_cast<AacProfile>(hashCode); } return AacProfile::NOT_SET; } Aws::String GetNameForAacProfile(AacProfile enumValue) { switch(enumValue) { case AacProfile::HEV1: return "HEV1"; case AacProfile::HEV2: return "HEV2"; case AacProfile::LC: return "LC"; default: EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) { return overflowContainer->RetrieveOverflow(static_cast<int>(enumValue)); } return {}; } } } // namespace AacProfileMapper } // namespace Model } // namespace MediaLive } // namespace Aws
#include "external_source_ui.h" #include "printing.h" #include "config.h" #include "util.h" #include <iostream> Move stringToMove(const std::string& moveStr) { Move move; move.type = M_CLOSE; if (stringMatch(moveStr, move.toString())) return move; move.type = M_EXCHANGE; if (stringMatch(moveStr, move.toString())) return move; move.type = M_PLAY; for (int code = 0; code < NUM_SUITS * NUM_RANKS; ++code) { move.score = 0; move.card = Card(code); if (stringMatch(moveStr, move.toString())) return move; if (!isMarriageRank(move.card.rank)) continue; move.score = 1; if (stringMatch(moveStr, move.toString())) return move; } return Move(); } Move inputMove() { std::string moveStr; Move move; do { std::cin >> moveStr; move = stringToMove(moveStr); } while (move.type == M_NONE); return move; } Card inputCard() { Move cardMove; do { cardMove = inputMove(); } while (cardMove.type != M_PLAY || cardMove.score); return cardMove.card; } ExternalSourceUI::ExternalSourceUI(bool printing): printing(printing) {} bool ExternalSourceUI::getLeading() { std::cout << std::endl; std::cout << "Leading (0/1): "; bool leading; std::cin >> leading; moveUpOneLine(); if (printing) { if (leading) std::cout << "You are leading" << std::endl; else std::cout << "Opponent is leading" << std::endl; } else moveUpOneLine(); return leading; } Card ExternalSourceUI::getTrumpCard() { if (!printing) std::cout << std::endl; std::cout << "Trump card: "; Card trumpCard = inputCard(); moveUpOneLine(); if (printing) { std::cout << "Trump card: "; printCard(trumpCard); std::cout << std::endl; } else moveUpOneLine(); return trumpCard; } std::vector<Card> ExternalSourceUI::getHand() { if (!printing) std::cout << std::endl; std::cout << "Hand: "; std::vector<Card> hand(HAND_SIZE); for (Card& card : hand) { card = inputCard(); } moveUpOneLine(); if (printing) { std::cout << "Hand:"; for (Card& card : hand) { std::cout << " "; printCard(card); } std::cout << std::endl; } else moveUpOneLine(); return hand; } Card ExternalSourceUI::getDrawnCard() { if (!printing) std::cout << std::endl; std::cout << "Drawn card: "; Card drawnCard = inputCard(); moveUpOneLine(); if (printing) { std::cout << "Drawn card: "; printCard(drawnCard); std::cout << std::endl; } else moveUpOneLine(); return drawnCard; } Move ExternalSourceUI::getMove() { if (!printing) std::cout << std::endl; std::cout << "Opponent's move: "; Move move = inputMove(); moveUpOneLine(); if (printing) { std::cout << "Opponent's move: "; printMove(move); std::cout << std::endl; } else moveUpOneLine(); return move; } void ExternalSourceUI::giveMove(Move move) { if (!printing) return; std::cout << "Your move: "; printMove(move); std::cout << std::endl; }
// Copyright 2021 The Tint Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "src/tint/fuzzers/tint_ast_fuzzer/mutator.h" #include <cassert> #include <memory> #include <unordered_map> #include <utility> #include <vector> #include "src/tint/fuzzers/tint_ast_fuzzer/mutation_finders/change_binary_operators.h" #include "src/tint/fuzzers/tint_ast_fuzzer/mutation_finders/change_unary_operators.h" #include "src/tint/fuzzers/tint_ast_fuzzer/mutation_finders/replace_identifiers.h" #include "src/tint/fuzzers/tint_ast_fuzzer/mutation_finders/wrap_unary_operators.h" #include "src/tint/fuzzers/tint_ast_fuzzer/node_id_map.h" #include "src/tint/program_builder.h" namespace tint::fuzzers::ast_fuzzer { namespace { template <typename T, typename... Args> void MaybeAddFinder(bool enable_all_mutations, ProbabilityContext* probability_context, MutationFinderList* finders, Args&&... args) { if (enable_all_mutations || probability_context->RandomBool()) { finders->push_back(std::make_unique<T>(std::forward<Args>(args)...)); } } MutationFinderList CreateMutationFinders(ProbabilityContext* probability_context, bool enable_all_mutations) { MutationFinderList result; do { MaybeAddFinder<MutationFinderChangeBinaryOperators>(enable_all_mutations, probability_context, &result); MaybeAddFinder<MutationFinderChangeUnaryOperators>(enable_all_mutations, probability_context, &result); MaybeAddFinder<MutationFinderReplaceIdentifiers>(enable_all_mutations, probability_context, &result); MaybeAddFinder<MutationFinderWrapUnaryOperators>(enable_all_mutations, probability_context, &result); } while (result.empty()); return result; } } // namespace bool MaybeApplyMutation(const tint::Program& program, const Mutation& mutation, const NodeIdMap& node_id_map, tint::Program* out_program, NodeIdMap* out_node_id_map, protobufs::MutationSequence* mutation_sequence) { assert(out_program && "`out_program` may not be a nullptr"); assert(out_node_id_map && "`out_node_id_map` may not be a nullptr"); if (!mutation.IsApplicable(program, node_id_map)) { return false; } // The mutated `program` will be copied into the `mutated` program builder. tint::ProgramBuilder mutated; tint::CloneContext clone_context(&mutated, &program); NodeIdMap new_node_id_map; clone_context.ReplaceAll( [&node_id_map, &new_node_id_map, &clone_context](const ast::Node* node) { // Make sure all `tint::ast::` nodes' ids are preserved. auto* cloned = tint::As<ast::Node>(node->Clone(&clone_context)); new_node_id_map.Add(cloned, node_id_map.GetId(node)); return cloned; }); mutation.Apply(node_id_map, &clone_context, &new_node_id_map); if (mutation_sequence) { *mutation_sequence->add_mutation() = mutation.ToMessage(); } clone_context.Clone(); *out_program = tint::Program(std::move(mutated)); *out_node_id_map = std::move(new_node_id_map); return true; } tint::Program Replay(tint::Program program, const protobufs::MutationSequence& mutation_sequence) { assert(program.IsValid() && "Initial program is invalid"); NodeIdMap node_id_map(program); for (const auto& mutation_message : mutation_sequence.mutation()) { auto mutation = Mutation::FromMessage(mutation_message); auto status = MaybeApplyMutation(program, *mutation, node_id_map, &program, &node_id_map, nullptr); (void)status; // `status` will be unused in release mode. assert(status && "`mutation` is inapplicable - it's most likely a bug"); if (!program.IsValid()) { // `mutation` has a bug. break; } } return program; } tint::Program Mutate(tint::Program program, ProbabilityContext* probability_context, bool enable_all_mutations, uint32_t max_applied_mutations, protobufs::MutationSequence* mutation_sequence) { assert(max_applied_mutations != 0 && "Maximum number of mutations is invalid"); assert(program.IsValid() && "Initial program is invalid"); // The number of allowed failed attempts to apply mutations. If this number is // exceeded, the mutator is considered stuck and the mutation session is // stopped. const uint32_t kMaxFailureToApply = 10; auto finders = CreateMutationFinders(probability_context, enable_all_mutations); NodeIdMap node_id_map(program); // Total number of applied mutations during this call to `Mutate`. uint32_t applied_mutations = 0; // The number of consecutively failed attempts to apply mutations. uint32_t failure_to_apply = 0; // Apply mutations as long as the `program` is valid, the limit on the number // of mutations is not reached and the mutator is not stuck (i.e. unable to // apply any mutations for some time). while (program.IsValid() && applied_mutations < max_applied_mutations && failure_to_apply < kMaxFailureToApply) { // Get all applicable mutations from some mutation finder. const auto& mutation_finder = finders[probability_context->GetRandomIndex(finders)]; auto mutations = mutation_finder->FindMutations(program, &node_id_map, probability_context); const auto old_applied_mutations = applied_mutations; for (const auto& mutation : mutations) { if (!probability_context->ChoosePercentage( mutation_finder->GetChanceOfApplyingMutation(probability_context))) { // Skip this `mutation` probabilistically. continue; } if (!MaybeApplyMutation(program, *mutation, node_id_map, &program, &node_id_map, mutation_sequence)) { // This `mutation` is inapplicable. This may happen if some of the // earlier mutations cancelled this one. continue; } applied_mutations++; if (!program.IsValid()) { // This `mutation` has a bug. return program; } } if (old_applied_mutations == applied_mutations) { // No mutation was applied. Increase the counter to prevent an infinite // loop. failure_to_apply++; } else { failure_to_apply = 0; } } return program; } } // namespace tint::fuzzers::ast_fuzzer
#pragma once #include "Game/EntityDefinition.hpp" #include "Engine/Core/XmlUtils.hpp" #include <map> #include <vector> class Texture; class RenderContext; class EnvironmentEntityDefinition; class CutsceneDefinition; class MapDefinition { public: static std::map< std::string, MapDefinition* > s_definitionMap; static void LoadDefinitions( RenderContext* context, const std::string& deinitionsXmlFilePath ); static MapDefinition* GetDefinitions( const std::string& mapName ); static Strings GetAllMapNames(); static const std::vector<MapDefinition*> GetAllMapDefs(); public: MapDefinition( RenderContext* context, const XmlElement& mapDefElement, const std::map<char, std::string>& legendMap ); ~MapDefinition(); static void InitialLegend( std::map<char, std::string>& out, const XmlElement& legendElement ); public: std::string m_name = "UNNAMED"; std::string m_levelName = "UNNAMED"; std::string m_nextLevelName = ""; IntVec2 m_dimensions = IntVec2( 10, 10 ); IntVec2 m_playerStartIndex = IntVec2::ZERO; IntVec2 m_petStartIndex = IntVec2(2,2); Vec2 m_mapStartPoint = Vec2::ZERO; Texture* m_backgroundImage = nullptr; int m_startAbilityLimitNumber = 5; std::vector<EnvironmentEntityDefinition*> m_environmentEntities; CutsceneDefinition* m_beforeLevelCutscene = nullptr; CutsceneDefinition* m_afterLevelCutscene = nullptr; };
// // Author: Vladimir Migashko <migashko@gmail.com>, (C) 2013-2015 // // Copyright: See COPYING file that comes with this distribution // #include "logger_singleton.hpp" #include "logger_config_json.hpp" #include "logger.hpp" #include <wfc/module/singleton.hpp> #include <wfc/module/instance.hpp> #include <wfc/name.hpp> namespace wfc{ namespace core{ namespace { WFC_NAME2(singleton_name, "logger") class impl: public ::wfc::singleton < singleton_name, ::wfc::instance<logger>, logger_config_json, component_features::DisabledWorkflow | component_features::DisabledSuspend > { public: virtual std::string interface_name() const override { return std::string("wfc::ilogger"); } virtual std::string description() const override { return std::string("Logging system"); } }; } logger_singleton::logger_singleton() : component( std::make_shared<impl>() ) {} }}
#include <iostream> #include <optional> #include <vector> using namespace std; namespace { template<class T> bool pop_if_exist(vector<T> &vec, T &item) { if (vec.size() == 0) return false; item = move(vec.back()); vec.pop_back(); return true; } template<class T> std::optional<T> pop_if_exist(vector<T> &vec) { if (vec.size() == 0) return nullopt; auto opt(make_optional(move(vec.back()))); vec.pop_back(); return opt; } } int main() { vector<int> vec1({3,2,1}); for (int item; pop_if_exist(vec1, item);) cout << item << endl; vector<int> vec2({3,2,1}); while (auto item = pop_if_exist(vec2)) cout << item.value() << endl; }