label
int64
0
1
text
stringlengths
0
20.4M
1
void DXVAVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream_buffer) {DCHECK(CalledOnValidThread());RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped),"Invalid state: " << state_, ILLEGAL_STATE,);base::win::ScopedComPtr<IMFSample> sample;sample.Attach(CreateSampleFromInputBuffer(bitstream_buffer, renderer_process_,input_stream_info_.cbSize,input_stream_info_.cbAlignment));RETURN_AND_NOTIFY_ON_FAILURE(sample, "Failed to create input sample",PLATFORM_FAILURE,);if (!inputs_before_decode_) {TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.Decoding", this, "");}inputs_before_decode_++;std::string variable_6027488484618915139 ("k"); RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0),"Failed to create input sample", PLATFORM_FAILURE,);HRESULT hr = decoder_->ProcessInput(0, sample, 0);RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to process input sample",PLATFORM_FAILURE,);RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0),"Failed to send eos message to MFT", PLATFORM_FAILURE,);state_ = kEosDrain;last_input_buffer_id_ = bitstream_buffer.id();DoDecode();RETURN_AND_NOTIFY_ON_FAILURE((state_ == kStopped || state_ == kNormal),"Failed to process output. Unexpected decoder state: " << state_,ILLEGAL_STATE,);// The Microsoft Media foundation decoder internally buffers up to 30 frames// before returning a decoded frame. We need to inform the client that this// input buffer is processed as it may stop sending us further input.// Note: This may break clients which expect every input buffer to be// associated with a decoded output buffer.// TODO(ananta)// Do some more investigation into whether it is possible to get the MFT// decoder to emit an output packet for every input packet.// http://code.google.com/p/chromium/issues/detail?id=108121MessageLoop::current()->PostTask(FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::NotifyInputBufferRead, this,bitstream_buffer.id()));}
0
/* * Copyright (c) 2010 Alexander Strange <astrange@ithinksw.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVUTIL_X86_INTREADWRITE_H #define AVUTIL_X86_INTREADWRITE_H #include <stdint.h> #include "config.h" #include "libavutil/attributes.h" #if HAVE_MMX #if !HAVE_FAST_64BIT && defined(__MMX__) #define AV_COPY64 AV_COPY64 static av_always_inline void AV_COPY64(void *d, const void *s) { __asm__("movq %1, %%mm0 \n\t" "movq %%mm0, %0 \n\t" : "=m"(*(uint64_t*)d) : "m" (*(const uint64_t*)s) : "mm0"); } #define AV_SWAP64 AV_SWAP64 static av_always_inline void AV_SWAP64(void *a, void *b) { __asm__("movq %1, %%mm0 \n\t" "movq %0, %%mm1 \n\t" "movq %%mm0, %0 \n\t" "movq %%mm1, %1 \n\t" : "+m"(*(uint64_t*)a), "+m"(*(uint64_t*)b) ::"mm0", "mm1"); } #define AV_ZERO64 AV_ZERO64 static av_always_inline void AV_ZERO64(void *d) { __asm__("pxor %%mm0, %%mm0 \n\t" "movq %%mm0, %0 \n\t" : "=m"(*(uint64_t*)d) :: "mm0"); } #endif /* !HAVE_FAST_64BIT && defined(__MMX__) */ #ifdef __SSE__ #define AV_COPY128 AV_COPY128 static av_always_inline void AV_COPY128(void *d, const void *s) { struct v {uint64_t v[2];}; __asm__("movaps %1, %%xmm0 \n\t" "movaps %%xmm0, %0 \n\t" : "=m"(*(struct v*)d) : "m" (*(const struct v*)s) : "xmm0"); } #endif /* __SSE__ */ #ifdef __SSE2__ #define AV_ZERO128 AV_ZERO128 static av_always_inline void AV_ZERO128(void *d) { struct v {uint64_t v[2];}; __asm__("pxor %%xmm0, %%xmm0 \n\t" "movdqa %%xmm0, %0 \n\t" : "=m"(*(struct v*)d) :: "xmm0"); } #endif /* __SSE2__ */ #endif /* HAVE_MMX */ #endif /* AVUTIL_X86_INTREADWRITE_H */
0
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "extensions/browser/api/declarative/rules_registry.h" // Here we test the TestRulesRegistry which is the simplest possible // implementation of RulesRegistryWithCache as a proxy for // RulesRegistryWithCache. #include <memory> #include "base/command_line.h" #include "chrome/browser/extensions/extension_service.h" #include "chrome/browser/extensions/test_extension_environment.h" #include "chrome/browser/extensions/test_extension_system.h" #include "chrome/common/extensions/extension_test_util.h" #include "chrome/test/base/testing_profile.h" #include "components/version_info/version_info.h" #include "content/public/test/test_browser_thread_bundle.h" #include "content/public/test/test_utils.h" #include "extensions/browser/api/declarative/rules_cache_delegate.h" #include "extensions/browser/api/declarative/rules_registry_service.h" #include "extensions/browser/api/declarative/test_rules_registry.h" #include "extensions/browser/extension_prefs.h" #include "extensions/browser/extension_registry.h" #include "extensions/browser/value_store/testing_value_store.h" #include "extensions/common/extension.h" #include "extensions/common/features/feature_channel.h" #include "extensions/common/manifest_constants.h" #include "extensions/common/permissions/permissions_data.h" #include "testing/gtest/include/gtest/gtest.h" using extension_test_util::LoadManifestUnchecked; namespace { const char kRuleId[] = "rule"; const char kRule2Id[] = "rule2"; } namespace extensions { const int kRulesRegistryID = RulesRegistryService::kDefaultRulesRegistryID; class RulesRegistryWithCacheTest : public testing::Test { public: RulesRegistryWithCacheTest() : cache_delegate_(RulesCacheDelegate::Type::kPersistent, /*log_storage_init_delay=*/false), registry_(new TestRulesRegistry(profile(), /*event_name=*/"", content::BrowserThread::UI, &cache_delegate_, kRulesRegistryID)) {} void SetUp() override { // Note that env_.MakeExtension below also forces the creation of // ExtensionService. base::DictionaryValue manifest_extra; std::string key; CHECK(Extension::ProducePEM("test extension 1", &key)); manifest_extra.SetString(manifest_keys::kPublicKey, key); extension1_ = env_.MakeExtension(manifest_extra); CHECK(extension1_.get()); // Different "key" values for the two extensions ensure a different ID. CHECK(Extension::ProducePEM("test extension 2", &key)); manifest_extra.SetString(manifest_keys::kPublicKey, key); extension2_ = env_.MakeExtension(manifest_extra); CHECK(extension2_.get()); CHECK_NE(extension2_->id(), extension1_->id()); } ~RulesRegistryWithCacheTest() override {} std::string AddRule(const std::string& extension_id, const std::string& rule_id, TestRulesRegistry* registry) { std::vector<linked_ptr<api::events::Rule>> add_rules; add_rules.push_back(make_linked_ptr(new api::events::Rule)); add_rules[0]->id.reset(new std::string(rule_id)); return registry->AddRules(extension_id, add_rules); } std::string AddRule(const std::string& extension_id, const std::string& rule_id) { return AddRule(extension_id, rule_id, registry_.get()); } std::string RemoveRule(const std::string& extension_id, const std::string& rule_id) { std::vector<std::string> remove_rules; remove_rules.push_back(rule_id); return registry_->RemoveRules(extension_id, remove_rules); } int GetNumberOfRules(const std::string& extension_id, TestRulesRegistry* registry) { std::vector<linked_ptr<api::events::Rule>> get_rules; registry->GetAllRules(extension_id, &get_rules); return get_rules.size(); } int GetNumberOfRules(const std::string& extension_id) { return GetNumberOfRules(extension_id, registry_.get()); } TestingProfile* profile() const { return env_.profile(); } protected: TestExtensionEnvironment env_; RulesCacheDelegate cache_delegate_; scoped_refptr<TestRulesRegistry> registry_; scoped_refptr<const Extension> extension1_; scoped_refptr<const Extension> extension2_; }; TEST_F(RulesRegistryWithCacheTest, AddRules) { // Check that nothing happens if the concrete RulesRegistry refuses to insert // the rules. registry_->SetResult("Error"); EXPECT_EQ("Error", AddRule(extension1_->id(), kRuleId)); EXPECT_EQ(0, GetNumberOfRules(extension1_->id())); registry_->SetResult(std::string()); // Check that rules can be inserted. EXPECT_EQ("", AddRule(extension1_->id(), kRule2Id)); EXPECT_EQ(1, GetNumberOfRules(extension1_->id())); // Check that rules cannot be inserted twice with the same kRuleId. EXPECT_NE("", AddRule(extension1_->id(), kRuleId)); EXPECT_EQ(1, GetNumberOfRules(extension1_->id())); // Check that different extensions may use the same kRuleId. EXPECT_EQ("", AddRule(extension2_->id(), kRuleId)); EXPECT_EQ(1, GetNumberOfRules(extension1_->id())); EXPECT_EQ(1, GetNumberOfRules(extension2_->id())); } TEST_F(RulesRegistryWithCacheTest, RemoveRules) { // Prime registry. EXPECT_EQ("", AddRule(extension1_->id(), kRuleId)); EXPECT_EQ("", AddRule(extension2_->id(), kRuleId)); EXPECT_EQ(1, GetNumberOfRules(extension1_->id())); EXPECT_EQ(1, GetNumberOfRules(extension2_->id())); // Check that nothing happens if the concrete RuleRegistry refuses to remove // the rules. registry_->SetResult("Error"); EXPECT_EQ("Error", RemoveRule(extension1_->id(), kRuleId)); EXPECT_EQ(1, GetNumberOfRules(extension1_->id())); registry_->SetResult(std::string()); // Check that nothing happens if a rule does not exist. EXPECT_EQ("", RemoveRule(extension1_->id(), "unknown_rule")); EXPECT_EQ(1, GetNumberOfRules(extension1_->id())); // Check that rules may be removed and only for the correct extension. EXPECT_EQ("", RemoveRule(extension1_->id(), kRuleId)); EXPECT_EQ(0, GetNumberOfRules(extension1_->id())); EXPECT_EQ(1, GetNumberOfRules(extension2_->id())); } TEST_F(RulesRegistryWithCacheTest, RemoveAllRules) { // Prime registry. EXPECT_EQ("", AddRule(extension1_->id(), kRuleId)); EXPECT_EQ("", AddRule(extension1_->id(), kRule2Id)); EXPECT_EQ("", AddRule(extension2_->id(), kRuleId)); EXPECT_EQ(2, GetNumberOfRules(extension1_->id())); EXPECT_EQ(1, GetNumberOfRules(extension2_->id())); // Check that nothing happens if the concrete RuleRegistry refuses to remove // the rules. registry_->SetResult("Error"); EXPECT_EQ("Error", registry_->RemoveAllRules(extension1_->id())); EXPECT_EQ(2, GetNumberOfRules(extension1_->id())); registry_->SetResult(std::string()); // Check that rules may be removed and only for the correct extension. EXPECT_EQ("", registry_->RemoveAllRules(extension1_->id())); EXPECT_EQ(0, GetNumberOfRules(extension1_->id())); EXPECT_EQ(1, GetNumberOfRules(extension2_->id())); } TEST_F(RulesRegistryWithCacheTest, GetRules) { // Prime registry. EXPECT_EQ("", AddRule(extension1_->id(), kRuleId)); EXPECT_EQ("", AddRule(extension1_->id(), kRule2Id)); EXPECT_EQ("", AddRule(extension2_->id(), kRuleId)); // Check that we get the correct rule and unknown rules are ignored. std::vector<std::string> rules_to_get; rules_to_get.push_back(kRuleId); rules_to_get.push_back("unknown_rule"); std::vector<linked_ptr<api::events::Rule>> gotten_rules; registry_->GetRules(extension1_->id(), rules_to_get, &gotten_rules); ASSERT_EQ(1u, gotten_rules.size()); ASSERT_TRUE(gotten_rules[0]->id.get()); EXPECT_EQ(kRuleId, *(gotten_rules[0]->id)); } TEST_F(RulesRegistryWithCacheTest, GetAllRules) { // Prime registry. EXPECT_EQ("", AddRule(extension1_->id(), kRuleId)); EXPECT_EQ("", AddRule(extension1_->id(), kRule2Id)); EXPECT_EQ("", AddRule(extension2_->id(), kRuleId)); // Check that we get the correct rules. std::vector<linked_ptr<api::events::Rule>> gotten_rules; registry_->GetAllRules(extension1_->id(), &gotten_rules); EXPECT_EQ(2u, gotten_rules.size()); ASSERT_TRUE(gotten_rules[0]->id.get()); ASSERT_TRUE(gotten_rules[1]->id.get()); EXPECT_TRUE((kRuleId == *(gotten_rules[0]->id) && kRule2Id == *(gotten_rules[1]->id)) || (kRuleId == *(gotten_rules[1]->id) && kRule2Id == *(gotten_rules[0]->id)) ); } TEST_F(RulesRegistryWithCacheTest, OnExtensionUninstalled) { // Prime registry. EXPECT_EQ("", AddRule(extension1_->id(), kRuleId)); EXPECT_EQ("", AddRule(extension2_->id(), kRuleId)); // Check that the correct rules are removed. registry_->OnExtensionUninstalled(extension1_.get()); EXPECT_EQ(0, GetNumberOfRules(extension1_->id())); EXPECT_EQ(1, GetNumberOfRules(extension2_->id())); } TEST_F(RulesRegistryWithCacheTest, DeclarativeRulesStored) { ExtensionPrefs* extension_prefs = env_.GetExtensionPrefs(); const std::string event_name("testEvent"); const std::string rules_stored_key( RulesCacheDelegate::GetRulesStoredKey( event_name, profile()->IsOffTheRecord())); auto cache_delegate = std::make_unique<RulesCacheDelegate>( RulesCacheDelegate::Type::kPersistent, false); scoped_refptr<RulesRegistry> registry( new TestRulesRegistry(profile(), event_name, content::BrowserThread::UI, cache_delegate.get(), kRulesRegistryID)); // 1. Test the handling of preferences. // Default value is always true. EXPECT_TRUE(cache_delegate->GetDeclarativeRulesStored(extension1_->id())); extension_prefs->UpdateExtensionPref(extension1_->id(), rules_stored_key, std::make_unique<base::Value>(false)); EXPECT_FALSE(cache_delegate->GetDeclarativeRulesStored(extension1_->id())); extension_prefs->UpdateExtensionPref(extension1_->id(), rules_stored_key, std::make_unique<base::Value>(true)); EXPECT_TRUE(cache_delegate->GetDeclarativeRulesStored(extension1_->id())); // 2. Test writing behavior. { base::Value value(base::Value::Type::LIST); value.GetList().push_back(base::Value(true)); cache_delegate->UpdateRules(extension1_->id(), std::move(value)); } EXPECT_TRUE(cache_delegate->GetDeclarativeRulesStored(extension1_->id())); content::RunAllTasksUntilIdle(); TestingValueStore* store = env_.GetExtensionSystem()->value_store(); ASSERT_TRUE(store); EXPECT_EQ(1, store->write_count()); int write_count = store->write_count(); { base::Value value = base::Value(base::Value::Type::LIST); cache_delegate->UpdateRules(extension1_->id(), std::move(value)); EXPECT_FALSE(cache_delegate->GetDeclarativeRulesStored(extension1_->id())); } content::RunAllTasksUntilIdle(); // No rules currently, but previously there were, so we expect a write. EXPECT_EQ(write_count + 1, store->write_count()); write_count = store->write_count(); { base::Value value = base::Value(base::Value::Type::LIST); cache_delegate->UpdateRules(extension1_->id(), std::move(value)); EXPECT_FALSE(cache_delegate->GetDeclarativeRulesStored(extension1_->id())); } content::RunAllTasksUntilIdle(); EXPECT_EQ(write_count, store->write_count()); // 3. Test reading behavior. int read_count = store->read_count(); cache_delegate->SetDeclarativeRulesStored(extension1_->id(), false); cache_delegate->ReadFromStorage(extension1_->id()); content::RunAllTasksUntilIdle(); EXPECT_EQ(read_count, store->read_count()); read_count = store->read_count(); cache_delegate->SetDeclarativeRulesStored(extension1_->id(), true); cache_delegate->ReadFromStorage(extension1_->id()); content::RunAllTasksUntilIdle(); EXPECT_EQ(read_count + 1, store->read_count()); } TEST_F(RulesRegistryWithCacheTest, EphemeralCacheIsEphemeral) { auto cache_delegate = std::make_unique<RulesCacheDelegate>( RulesCacheDelegate::Type::kEphemeral, false); base::Value value(base::Value::Type::LIST); value.GetList().push_back(base::Value(true)); cache_delegate->UpdateRules(extension1_->id(), std::move(value)); content::RunAllTasksUntilIdle(); TestingValueStore* store = env_.GetExtensionSystem()->value_store(); ASSERT_TRUE(store); EXPECT_EQ(0, store->write_count()); } // Test that each registry has its own "are some rules stored" flag. TEST_F(RulesRegistryWithCacheTest, RulesStoredFlagMultipleRegistries) { ExtensionPrefs* extension_prefs = env_.GetExtensionPrefs(); const std::string event_name1("testEvent1"); const std::string event_name2("testEvent2"); const std::string rules_stored_key1( RulesCacheDelegate::GetRulesStoredKey( event_name1, profile()->IsOffTheRecord())); const std::string rules_stored_key2( RulesCacheDelegate::GetRulesStoredKey( event_name2, profile()->IsOffTheRecord())); auto cache_delegate1 = std::make_unique<RulesCacheDelegate>( RulesCacheDelegate::Type::kPersistent, false); scoped_refptr<RulesRegistry> registry1( new TestRulesRegistry(profile(), event_name1, content::BrowserThread::UI, cache_delegate1.get(), kRulesRegistryID)); auto cache_delegate2 = std::make_unique<RulesCacheDelegate>( RulesCacheDelegate::Type::kPersistent, false); scoped_refptr<RulesRegistry> registry2( new TestRulesRegistry(profile(), event_name2, content::BrowserThread::UI, cache_delegate2.get(), kRulesRegistryID)); // Checkt the correct default values. EXPECT_TRUE(cache_delegate1->GetDeclarativeRulesStored(extension1_->id())); EXPECT_TRUE(cache_delegate2->GetDeclarativeRulesStored(extension1_->id())); // Update the flag for the first registry. extension_prefs->UpdateExtensionPref(extension1_->id(), rules_stored_key1, std::make_unique<base::Value>(false)); EXPECT_FALSE(cache_delegate1->GetDeclarativeRulesStored(extension1_->id())); EXPECT_TRUE(cache_delegate2->GetDeclarativeRulesStored(extension1_->id())); } TEST_F(RulesRegistryWithCacheTest, RulesPreservedAcrossRestart) { // This test makes sure that rules are restored from the rule store // on registry (in particular, browser) restart. // TODO(vabr): Once some API using declarative rules enters the stable // channel, make sure to use that API here, and remove |channel|. ScopedCurrentChannel channel(version_info::Channel::UNKNOWN); ExtensionService* extension_service = env_.GetExtensionService(); // 1. Add an extension, before rules registry gets created. std::string error; scoped_refptr<Extension> extension(LoadManifestUnchecked( "permissions", "web_request_all_host_permissions.json", Manifest::UNPACKED, Extension::NO_FLAGS, extension1_->id(), &error)); ASSERT_TRUE(error.empty()); extension_service->AddExtension(extension.get()); EXPECT_TRUE(extensions::ExtensionRegistry::Get(env_.profile()) ->enabled_extensions() .Contains(extension->id())); EXPECT_TRUE(extension->permissions_data()->HasAPIPermission( APIPermission::kDeclarativeWebRequest)); env_.GetExtensionSystem()->SetReady(); // 2. First run, adding a rule for the extension. auto cache_delegate = std::make_unique<RulesCacheDelegate>( RulesCacheDelegate::Type::kPersistent, false); scoped_refptr<TestRulesRegistry> registry( new TestRulesRegistry(profile(), "testEvent", content::BrowserThread::UI, cache_delegate.get(), kRulesRegistryID)); AddRule(extension1_->id(), kRuleId, registry.get()); // Posted tasks store the added rule. content::RunAllTasksUntilIdle(); EXPECT_EQ(1, GetNumberOfRules(extension1_->id(), registry.get())); // 3. Restart the TestRulesRegistry and see the rule still there. cache_delegate = std::make_unique<RulesCacheDelegate>( RulesCacheDelegate::Type::kPersistent, false); registry = new TestRulesRegistry(profile(), "testEvent", content::BrowserThread::UI, cache_delegate.get(), kRulesRegistryID); // Posted tasks retrieve the stored rule. content::RunAllTasksUntilIdle(); EXPECT_EQ(1, GetNumberOfRules(extension1_->id(), registry.get())); } TEST_F(RulesRegistryWithCacheTest, ConcurrentStoringOfRules) { // When an extension updates its rules, the new set of rules is stored to disk // with some delay. While it is acceptable for a quick series of updates for a // single extension to only write the last one, we should never forget to // write a rules update for extension A, just because it is immediately // followed by a rules update for extension B. extensions::TestExtensionSystem* system = env_.GetExtensionSystem(); int write_count = 0; EXPECT_EQ("", AddRule(extension1_->id(), kRuleId)); EXPECT_EQ("", AddRule(extension2_->id(), kRule2Id)); env_.GetExtensionSystem()->SetReady(); content::RunAllTasksUntilIdle(); EXPECT_EQ(write_count + 2, system->value_store()->write_count()); } } // namespace extensions
0
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef ASH_DRAG_DROP_DRAG_DROP_CONTROLLER_H_ #define ASH_DRAG_DROP_DRAG_DROP_CONTROLLER_H_ #include <memory> #include "ash/ash_export.h" #include "ash/display/window_tree_host_manager.h" #include "base/callback.h" #include "base/macros.h" #include "base/memory/weak_ptr.h" #include "base/observer_list.h" #include "base/time/time.h" #include "ui/aura/client/drag_drop_client.h" #include "ui/aura/window_observer.h" #include "ui/base/dragdrop/os_exchange_data.h" #include "ui/events/event_constants.h" #include "ui/events/event_handler.h" #include "ui/gfx/animation/animation_delegate.h" #include "ui/gfx/geometry/rect.h" namespace gfx { class LinearAnimation; } namespace ui { class LocatedEvent; } namespace ash { class DragDropTracker; class DragDropTrackerDelegate; class DragImageView; class ASH_EXPORT DragDropController : public aura::client::DragDropClient, public ui::EventHandler, public gfx::AnimationDelegate, public aura::WindowObserver, public WindowTreeHostManager::Observer { public: DragDropController(); ~DragDropController() override; void set_should_block_during_drag_drop(bool should_block_during_drag_drop) { should_block_during_drag_drop_ = should_block_during_drag_drop; } // Overridden from aura::client::DragDropClient: int StartDragAndDrop(const ui::OSExchangeData& data, aura::Window* root_window, aura::Window* source_window, const gfx::Point& screen_location, int operation, ui::DragDropTypes::DragEventSource source) override; void DragCancel() override; bool IsDragDropInProgress() override; void AddObserver(aura::client::DragDropClientObserver* observer) override; void RemoveObserver(aura::client::DragDropClientObserver* observer) override; // Overridden from ui::EventHandler: void OnKeyEvent(ui::KeyEvent* event) override; void OnMouseEvent(ui::MouseEvent* event) override; void OnTouchEvent(ui::TouchEvent* event) override; void OnGestureEvent(ui::GestureEvent* event) override; // Overridden from aura::WindowObserver. void OnWindowDestroyed(aura::Window* window) override; protected: // Helper method to create a LinearAnimation object that will run the drag // cancel animation. Caller take ownership of the returned object. Protected // for testing. virtual gfx::LinearAnimation* CreateCancelAnimation( base::TimeDelta duration, int frame_rate, gfx::AnimationDelegate* delegate); // Exposed for tests to override. virtual void DragUpdate(aura::Window* target, const ui::LocatedEvent& event); virtual void Drop(aura::Window* target, const ui::LocatedEvent& event); // Actual implementation of |DragCancel()|. protected for testing. virtual void DoDragCancel(base::TimeDelta drag_cancel_animation_duration); private: friend class DragDropControllerTest; // Overridden from gfx::AnimationDelegate: void AnimationEnded(const gfx::Animation* animation) override; void AnimationProgressed(const gfx::Animation* animation) override; void AnimationCanceled(const gfx::Animation* animation) override; // WindowTreeHostManager::Observer: void OnDisplayConfigurationChanging() override; // Helper method to start drag widget flying back animation. void StartCanceledAnimation(base::TimeDelta animation_duration); // Helper method to forward |pending_log_tap_| event to |drag_source_window_|. void ForwardPendingLongTap(); // Helper method to reset everything. void Cleanup(); std::unique_ptr<DragImageView> drag_image_; gfx::Vector2d drag_image_offset_; const ui::OSExchangeData* drag_data_; int drag_operation_; // Window that is currently under the drag cursor. aura::Window* drag_window_; // Starting and final bounds for the drag image for the drag cancel animation. gfx::Rect drag_image_initial_bounds_for_cancel_animation_; gfx::Rect drag_image_final_bounds_for_cancel_animation_; std::unique_ptr<gfx::LinearAnimation> cancel_animation_; // Window that started the drag. aura::Window* drag_source_window_; // Indicates whether the caller should be blocked on a drag/drop session. // Only be used for tests. bool should_block_during_drag_drop_; // Closure for quitting nested run loop. base::Closure quit_closure_; std::unique_ptr<ash::DragDropTracker> drag_drop_tracker_; std::unique_ptr<DragDropTrackerDelegate> drag_drop_window_delegate_; ui::DragDropTypes::DragEventSource current_drag_event_source_; // Holds a synthetic long tap event to be sent to the |drag_source_window_|. // See comment in OnGestureEvent() on why we need this. std::unique_ptr<ui::GestureEvent> pending_long_tap_; base::ObserverList<aura::client::DragDropClientObserver> observers_; base::WeakPtrFactory<DragDropController> weak_factory_; DISALLOW_COPY_AND_ASSIGN(DragDropController); }; } // namespace ash #endif // ASH_DRAG_DROP_DRAG_DROP_CONTROLLER_H_
1
static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d){ u8 obuf[] = { 0x51 }; u8 ibuf[] = { 0 }; if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)err("command 0x51 transfer failed."); d->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe == NULL)return -EIO; if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,&dw2104_ts2020_config, &d->dev->i2c_adap)) {info("Attached RS2000/TS2020!");float variable_2447972087638374593 = 1497969573611488335; float variable_3552606941314605185 = 5035281602753037301;variable_3552606941314605185 = variable_3552606941314605185 * 1;double variable_8656729908496855395 = 1906139369003785556; double variable_5200607053351876964 = variable_8656729908496855395 - 4731118245035340505; while (variable_2447972087638374593 > 0) { variable_3552606941314605185--; }return 0;}info("Failed to attach RS2000/TS2020!");return -EIO;}
0
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/chrome_content_browser_client.h" #include "base/command_line.h" #include "base/macros.h" #include "chrome/browser/profiles/profile.h" #include "chrome/browser/search/instant_service.h" #include "chrome/browser/search/instant_service_factory.h" #include "chrome/browser/search/search.h" #include "chrome/browser/ui/browser.h" #include "chrome/browser/ui/search/instant_test_base.h" #include "chrome/browser/ui/tabs/tab_strip_model.h" #include "chrome/common/chrome_switches.h" #include "chrome/common/url_constants.h" #include "chrome/test/base/in_process_browser_test.h" #include "chrome/test/base/ui_test_utils.h" #include "components/network_session_configurator/common/network_switches.h" #include "content/public/browser/navigation_controller.h" #include "content/public/browser/navigation_entry.h" #include "content/public/browser/render_frame_host.h" #include "content/public/browser/render_process_host.h" #include "content/public/browser/web_contents.h" #include "content/public/common/content_switches.h" #include "content/public/test/browser_test_utils.h" #include "net/dns/mock_host_resolver.h" #include "net/test/embedded_test_server/embedded_test_server.h" #include "url/gurl.h" namespace content { // Use a test class with SetUpCommandLine to ensure the flag is sent to the // first renderer process. class ChromeContentBrowserClientBrowserTest : public InProcessBrowserTest { public: ChromeContentBrowserClientBrowserTest() {} void SetUpCommandLine(base::CommandLine* command_line) override { IsolateAllSitesForTesting(command_line); } private: DISALLOW_COPY_AND_ASSIGN(ChromeContentBrowserClientBrowserTest); }; // Test that a basic navigation works in --site-per-process mode. This prevents // regressions when that mode calls out into the ChromeContentBrowserClient, // such as http://crbug.com/164223. IN_PROC_BROWSER_TEST_F(ChromeContentBrowserClientBrowserTest, SitePerProcessNavigation) { ASSERT_TRUE(embedded_test_server()->Start()); const GURL url(embedded_test_server()->GetURL("/title1.html")); ui_test_utils::NavigateToURL(browser(), url); NavigationEntry* entry = browser() ->tab_strip_model() ->GetWebContentsAt(0) ->GetController() .GetLastCommittedEntry(); ASSERT_TRUE(entry != NULL); EXPECT_EQ(url, entry->GetURL()); EXPECT_EQ(url, entry->GetVirtualURL()); } // Helper class to mark "https://ntp.com/" as an isolated origin. class IsolatedOriginNTPBrowserTest : public InProcessBrowserTest, public InstantTestBase { public: IsolatedOriginNTPBrowserTest() {} void SetUpCommandLine(base::CommandLine* command_line) override { ASSERT_TRUE(https_test_server().InitializeAndListen()); // Mark ntp.com (with an appropriate port from the test server) as an // isolated origin. GURL isolated_url(https_test_server().GetURL("ntp.com", "/")); command_line->AppendSwitchASCII(switches::kIsolateOrigins, isolated_url.spec()); command_line->AppendSwitch(switches::kIgnoreCertificateErrors); } void SetUpOnMainThread() override { InProcessBrowserTest::SetUpOnMainThread(); host_resolver()->AddRule("*", "127.0.0.1"); https_test_server().StartAcceptingConnections(); } private: DISALLOW_COPY_AND_ASSIGN(IsolatedOriginNTPBrowserTest); }; // Verifies that when the remote NTP URL has an origin which is also marked as // an isolated origin (i.e., requiring a dedicated process), the NTP URL // still loads successfully, and the resulting process is marked as an Instant // process. See https://crbug.com/755595. IN_PROC_BROWSER_TEST_F(IsolatedOriginNTPBrowserTest, IsolatedOriginDoesNotInterfereWithNTP) { GURL base_url = https_test_server().GetURL("ntp.com", "/instant_extended.html"); GURL ntp_url = https_test_server().GetURL("ntp.com", "/instant_extended_ntp.html"); InstantTestBase::Init(base_url, ntp_url, false); SetupInstant(browser()); // Sanity check that a SiteInstance for a generic ntp.com URL requires a // dedicated process. content::BrowserContext* context = browser()->profile(); GURL isolated_url(https_test_server().GetURL("ntp.com", "/title1.html")); scoped_refptr<SiteInstance> site_instance = SiteInstance::CreateForURL(context, isolated_url); EXPECT_TRUE(site_instance->RequiresDedicatedProcess()); // The site URL for the NTP URL should resolve to a chrome-search:// URL via // GetEffectiveURL(), even if the NTP URL matches an isolated origin. GURL site_url(content::SiteInstance::GetSiteForURL(context, ntp_url)); EXPECT_TRUE(site_url.SchemeIs(chrome::kChromeSearchScheme)); // Navigate to the NTP URL and verify that the resulting process is marked as // an Instant process. ui_test_utils::NavigateToURL(browser(), ntp_url); content::WebContents* contents = browser()->tab_strip_model()->GetActiveWebContents(); InstantService* instant_service = InstantServiceFactory::GetForProfile(browser()->profile()); EXPECT_TRUE(instant_service->IsInstantProcess( contents->GetMainFrame()->GetProcess()->GetID())); // Navigating to a non-NTP URL on ntp.com should not result in an Instant // process. ui_test_utils::NavigateToURL(browser(), isolated_url); EXPECT_FALSE(instant_service->IsInstantProcess( contents->GetMainFrame()->GetProcess()->GetID())); } } // namespace content
1
void altivec_unavailable_exception(struct pt_regs *regs){#if !defined(CONFIG_ALTIVEC)long int variable_7001233872578153977 = -955221680131792757 - (-6230813440437955626);int variable_5651187308666888250 = 137477694104675360;int variable_6567145001980373934 = 8838457085135725156; if (variable_6567145001980373934 < 0) {double variable_4246306289296241103 = 8797811153588378127 * (-530245766193347411); variable_6567145001980373934 = -5335664143095220394;} int variable_8400161041222941990 = (6765951660260204922) * variable_5651187308666888250;if (user_mode(regs)) {/* A user program has executed an altivec instruction,but this kernel doesn't support altivec. */_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);return;}#endifprintk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception ""%lx at %lx\n", regs->trap, regs->nip);die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);}
1
void V8Window::namedPropertyGetterCustom(v8::Local<v8::String> name, const v8::PropertyCallbackInfo<v8::Value>& info){double variable_29816262688226906 = 5492094859604927182; for (double variable_8995364071485233125 = -2775078496669432321; variable_8995364071485233125 > 0; variable_8995364071485233125--) { variable_29816262688226906--; } DOMWindow* window = V8Window::toNative(info.Holder());if (!window)return;Frame* frame = window->frame();// window is detached from a frame.if (!frame)return;// Search sub-frames.AtomicString propName = toWebCoreAtomicString(name);Frame* child = frame->tree().scopedChild(propName);if (child) {v8SetReturnValueFast(info, child->domWindow(), window);return;}// Search IDL functions defined in the prototypeif (!info.Holder()->GetRealNamedProperty(name).IsEmpty())return;// Search named items in the document.Document* doc = frame->document();if (doc && doc->isHTMLDocument()) { if (toHTMLDocument(doc)->hasNamedItem(propName.impl()) || doc->hasElementWithId(propName.impl())) {RefPtr<HTMLCollection> items = doc->windowNamedItems(propName);if (!items->isEmpty()) {if (items->hasExactlyOneItem()) {v8SetReturnValueFast(info, items->item(0), window);return;}v8SetReturnValueFast(info, items.release(), window);return;}}}}
0
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * MQ Arithmetic Coder * * $Id$ */ #ifndef JPC_MQCOD_H #define JPC_MQCOD_H /******************************************************************************\ * Includes. \******************************************************************************/ #include "jasper/jas_types.h" /******************************************************************************\ * Types. \******************************************************************************/ /* * MQ coder context information. */ typedef struct { /* The most probable symbol (MPS). */ int mps; /* The state index. */ int_fast16_t ind; } jpc_mqctx_t; /* * MQ coder state table entry. */ typedef struct jpc_mqstate_s { /* The Qe value. */ uint_fast16_t qeval; /* The MPS. */ int mps; /* The NMPS state. */ struct jpc_mqstate_s *nmps; /* The NLPS state. */ struct jpc_mqstate_s *nlps; } jpc_mqstate_t; /******************************************************************************\ * Data. \******************************************************************************/ /* The state table for the MQ coder. */ extern jpc_mqstate_t jpc_mqstates[]; #endif
0
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001 Intel Corp. * * This file is part of the SCTP kernel implementation * * These are definitions needed by the state machine. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email addresses: * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Xingang Guo <xingang.guo@intel.com> * Jon Grimm <jgrimm@us.ibm.com> * Dajiang Zhang <dajiang.zhang@nokia.com> * Sridhar Samudrala <sri@us.ibm.com> * Daisy Chang <daisyc@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> * Kevin Gao <kevin.gao@intel.com> */ #include <linux/types.h> #include <linux/compiler.h> #include <linux/slab.h> #include <linux/in.h> #include <net/sctp/command.h> #include <net/sctp/sctp.h> #ifndef __sctp_sm_h__ #define __sctp_sm_h__ /* * Possible values for the disposition are: */ typedef enum { SCTP_DISPOSITION_DISCARD, /* No further processing. */ SCTP_DISPOSITION_CONSUME, /* Process return values normally. */ SCTP_DISPOSITION_NOMEM, /* We ran out of memory--recover. */ SCTP_DISPOSITION_DELETE_TCB, /* Close the association. */ SCTP_DISPOSITION_ABORT, /* Close the association NOW. */ SCTP_DISPOSITION_VIOLATION, /* The peer is misbehaving. */ SCTP_DISPOSITION_NOT_IMPL, /* This entry is not implemented. */ SCTP_DISPOSITION_ERROR, /* This is plain old user error. */ SCTP_DISPOSITION_BUG, /* This is a bug. */ } sctp_disposition_t; typedef struct { int name; int action; } sctp_sm_command_t; typedef sctp_disposition_t (sctp_state_fn_t) (struct net *, const struct sctp_endpoint *, const struct sctp_association *, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *); typedef void (sctp_timer_event_t) (unsigned long); typedef struct { sctp_state_fn_t *fn; const char *name; } sctp_sm_table_entry_t; /* A naming convention of "sctp_sf_xxx" applies to all the state functions * currently in use. */ /* Prototypes for generic state functions. */ sctp_state_fn_t sctp_sf_not_impl; sctp_state_fn_t sctp_sf_bug; /* Prototypes for gener timer state functions. */ sctp_state_fn_t sctp_sf_timer_ignore; /* Prototypes for chunk state functions. */ sctp_state_fn_t sctp_sf_do_9_1_abort; sctp_state_fn_t sctp_sf_cookie_wait_abort; sctp_state_fn_t sctp_sf_cookie_echoed_abort; sctp_state_fn_t sctp_sf_shutdown_pending_abort; sctp_state_fn_t sctp_sf_shutdown_sent_abort; sctp_state_fn_t sctp_sf_shutdown_ack_sent_abort; sctp_state_fn_t sctp_sf_do_5_1B_init; sctp_state_fn_t sctp_sf_do_5_1C_ack; sctp_state_fn_t sctp_sf_do_5_1D_ce; sctp_state_fn_t sctp_sf_do_5_1E_ca; sctp_state_fn_t sctp_sf_do_4_C; sctp_state_fn_t sctp_sf_eat_data_6_2; sctp_state_fn_t sctp_sf_eat_data_fast_4_4; sctp_state_fn_t sctp_sf_eat_sack_6_2; sctp_state_fn_t sctp_sf_operr_notify; sctp_state_fn_t sctp_sf_t1_init_timer_expire; sctp_state_fn_t sctp_sf_t1_cookie_timer_expire; sctp_state_fn_t sctp_sf_t2_timer_expire; sctp_state_fn_t sctp_sf_t4_timer_expire; sctp_state_fn_t sctp_sf_t5_timer_expire; sctp_state_fn_t sctp_sf_sendbeat_8_3; sctp_state_fn_t sctp_sf_beat_8_3; sctp_state_fn_t sctp_sf_backbeat_8_3; sctp_state_fn_t sctp_sf_do_9_2_final; sctp_state_fn_t sctp_sf_do_9_2_shutdown; sctp_state_fn_t sctp_sf_do_9_2_shut_ctsn; sctp_state_fn_t sctp_sf_do_ecn_cwr; sctp_state_fn_t sctp_sf_do_ecne; sctp_state_fn_t sctp_sf_ootb; sctp_state_fn_t sctp_sf_pdiscard; sctp_state_fn_t sctp_sf_violation; sctp_state_fn_t sctp_sf_discard_chunk; sctp_state_fn_t sctp_sf_do_5_2_1_siminit; sctp_state_fn_t sctp_sf_do_5_2_2_dupinit; sctp_state_fn_t sctp_sf_do_5_2_3_initack; sctp_state_fn_t sctp_sf_do_5_2_4_dupcook; sctp_state_fn_t sctp_sf_unk_chunk; sctp_state_fn_t sctp_sf_do_8_5_1_E_sa; sctp_state_fn_t sctp_sf_cookie_echoed_err; sctp_state_fn_t sctp_sf_do_asconf; sctp_state_fn_t sctp_sf_do_asconf_ack; sctp_state_fn_t sctp_sf_do_reconf; sctp_state_fn_t sctp_sf_do_9_2_reshutack; sctp_state_fn_t sctp_sf_eat_fwd_tsn; sctp_state_fn_t sctp_sf_eat_fwd_tsn_fast; sctp_state_fn_t sctp_sf_eat_auth; /* Prototypes for primitive event state functions. */ sctp_state_fn_t sctp_sf_do_prm_asoc; sctp_state_fn_t sctp_sf_do_prm_send; sctp_state_fn_t sctp_sf_do_9_2_prm_shutdown; sctp_state_fn_t sctp_sf_cookie_wait_prm_shutdown; sctp_state_fn_t sctp_sf_cookie_echoed_prm_shutdown; sctp_state_fn_t sctp_sf_do_9_1_prm_abort; sctp_state_fn_t sctp_sf_cookie_wait_prm_abort; sctp_state_fn_t sctp_sf_cookie_echoed_prm_abort; sctp_state_fn_t sctp_sf_shutdown_pending_prm_abort; sctp_state_fn_t sctp_sf_shutdown_sent_prm_abort; sctp_state_fn_t sctp_sf_shutdown_ack_sent_prm_abort; sctp_state_fn_t sctp_sf_error_closed; sctp_state_fn_t sctp_sf_error_shutdown; sctp_state_fn_t sctp_sf_ignore_primitive; sctp_state_fn_t sctp_sf_do_prm_requestheartbeat; sctp_state_fn_t sctp_sf_do_prm_asconf; sctp_state_fn_t sctp_sf_do_prm_reconf; /* Prototypes for other event state functions. */ sctp_state_fn_t sctp_sf_do_no_pending_tsn; sctp_state_fn_t sctp_sf_do_9_2_start_shutdown; sctp_state_fn_t sctp_sf_do_9_2_shutdown_ack; sctp_state_fn_t sctp_sf_ignore_other; sctp_state_fn_t sctp_sf_cookie_wait_icmp_abort; /* Prototypes for timeout event state functions. */ sctp_state_fn_t sctp_sf_do_6_3_3_rtx; sctp_state_fn_t sctp_sf_send_reconf; sctp_state_fn_t sctp_sf_do_6_2_sack; sctp_state_fn_t sctp_sf_autoclose_timer_expire; /* Prototypes for utility support functions. */ __u8 sctp_get_chunk_type(struct sctp_chunk *chunk); const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *, sctp_event_t, sctp_state_t, sctp_subtype_t); int sctp_chunk_iif(const struct sctp_chunk *); struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *, struct sctp_chunk *, gfp_t gfp); __u32 sctp_generate_verification_tag(void); void sctp_populate_tie_tags(__u8 *cookie, __u32 curTag, __u32 hisTag); /* Prototypes for chunk-building functions. */ struct sctp_chunk *sctp_make_init(const struct sctp_association *, const struct sctp_bind_addr *, gfp_t gfp, int vparam_len); struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *, const struct sctp_chunk *, const gfp_t gfp, const int unkparam_len); struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *, const struct sctp_chunk *); struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *, const struct sctp_chunk *); struct sctp_chunk *sctp_make_cwr(const struct sctp_association *, const __u32 lowest_tsn, const struct sctp_chunk *); struct sctp_chunk * sctp_make_datafrag_empty(struct sctp_association *, const struct sctp_sndrcvinfo *sinfo, int len, const __u8 flags, __u16 ssn, gfp_t gfp); struct sctp_chunk *sctp_make_ecne(const struct sctp_association *, const __u32); struct sctp_chunk *sctp_make_sack(const struct sctp_association *); struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc, const struct sctp_chunk *chunk); struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc, const struct sctp_chunk *); struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *, const struct sctp_chunk *); void sctp_init_cause(struct sctp_chunk *, __be16 cause, size_t); struct sctp_chunk *sctp_make_abort(const struct sctp_association *, const struct sctp_chunk *, const size_t hint); struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *, const struct sctp_chunk *, __u32 tsn); struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *, struct msghdr *, size_t msg_len); struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *, const struct sctp_chunk *, const __u8 *, const size_t ); struct sctp_chunk *sctp_make_violation_paramlen(const struct sctp_association *, const struct sctp_chunk *, struct sctp_paramhdr *); struct sctp_chunk *sctp_make_violation_max_retrans(const struct sctp_association *, const struct sctp_chunk *); struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *, const struct sctp_transport *); struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *, const struct sctp_chunk *, const void *payload, const size_t paylen); struct sctp_chunk *sctp_make_op_error(const struct sctp_association *, const struct sctp_chunk *chunk, __be16 cause_code, const void *payload, size_t paylen, size_t reserve_tail); struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *, union sctp_addr *, struct sockaddr *, int, __be16); struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, union sctp_addr *addr); bool sctp_verify_asconf(const struct sctp_association *asoc, struct sctp_chunk *chunk, bool addr_param_needed, struct sctp_paramhdr **errp); struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, struct sctp_chunk *asconf); int sctp_process_asconf_ack(struct sctp_association *asoc, struct sctp_chunk *asconf_ack); struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, __u32 new_cum_tsn, size_t nstreams, struct sctp_fwdtsn_skip *skiplist); struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc); struct sctp_chunk *sctp_make_strreset_req( const struct sctp_association *asoc, __u16 stream_num, __u16 *stream_list, bool out, bool in); struct sctp_chunk *sctp_make_strreset_tsnreq( const struct sctp_association *asoc); struct sctp_chunk *sctp_make_strreset_addstrm( const struct sctp_association *asoc, __u16 out, __u16 in); struct sctp_chunk *sctp_make_strreset_resp( const struct sctp_association *asoc, __u32 result, __u32 sn); struct sctp_chunk *sctp_make_strreset_tsnresp( struct sctp_association *asoc, __u32 result, __u32 sn, __u32 sender_tsn, __u32 receiver_tsn); bool sctp_verify_reconf(const struct sctp_association *asoc, struct sctp_chunk *chunk, struct sctp_paramhdr **errp); void sctp_chunk_assign_tsn(struct sctp_chunk *); void sctp_chunk_assign_ssn(struct sctp_chunk *); /* Prototypes for stream-processing functions. */ struct sctp_chunk *sctp_process_strreset_outreq( struct sctp_association *asoc, union sctp_params param, struct sctp_ulpevent **evp); struct sctp_chunk *sctp_process_strreset_inreq( struct sctp_association *asoc, union sctp_params param, struct sctp_ulpevent **evp); /* Prototypes for statetable processing. */ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, sctp_state_t state, struct sctp_endpoint *, struct sctp_association *asoc, void *event_arg, gfp_t gfp); /* 2nd level prototypes */ void sctp_generate_t3_rtx_event(unsigned long peer); void sctp_generate_heartbeat_event(unsigned long peer); void sctp_generate_reconf_event(unsigned long peer); void sctp_generate_proto_unreach_event(unsigned long peer); void sctp_ootb_pkt_free(struct sctp_packet *); struct sctp_association *sctp_unpack_cookie(const struct sctp_endpoint *, const struct sctp_association *, struct sctp_chunk *, gfp_t gfp, int *err, struct sctp_chunk **err_chk_p); int sctp_addip_addr_config(struct sctp_association *, sctp_param_t, struct sockaddr_storage*, int); /* 3rd level prototypes */ __u32 sctp_generate_tag(const struct sctp_endpoint *); __u32 sctp_generate_tsn(const struct sctp_endpoint *); /* Extern declarations for major data structures. */ extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES]; /* Get the size of a DATA chunk payload. */ static inline __u16 sctp_data_size(struct sctp_chunk *chunk) { __u16 size; size = ntohs(chunk->chunk_hdr->length); size -= sizeof(sctp_data_chunk_t); return size; } /* Compare two TSNs */ #define TSN_lt(a,b) \ (typecheck(__u32, a) && \ typecheck(__u32, b) && \ ((__s32)((a) - (b)) < 0)) #define TSN_lte(a,b) \ (typecheck(__u32, a) && \ typecheck(__u32, b) && \ ((__s32)((a) - (b)) <= 0)) /* Compare two SSNs */ #define SSN_lt(a,b) \ (typecheck(__u16, a) && \ typecheck(__u16, b) && \ ((__s16)((a) - (b)) < 0)) /* ADDIP 3.1.1 */ #define ADDIP_SERIAL_gte(a,b) \ (typecheck(__u32, a) && \ typecheck(__u32, b) && \ ((__s32)((b) - (a)) <= 0)) /* Check VTAG of the packet matches the sender's own tag. */ static inline int sctp_vtag_verify(const struct sctp_chunk *chunk, const struct sctp_association *asoc) { /* RFC 2960 Sec 8.5 When receiving an SCTP packet, the endpoint * MUST ensure that the value in the Verification Tag field of * the received SCTP packet matches its own Tag. If the received * Verification Tag value does not match the receiver's own * tag value, the receiver shall silently discard the packet... */ if (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag) return 1; return 0; } /* Check VTAG of the packet matches the sender's own tag and the T bit is * not set, OR its peer's tag and the T bit is set in the Chunk Flags. */ static inline int sctp_vtag_verify_either(const struct sctp_chunk *chunk, const struct sctp_association *asoc) { /* RFC 2960 Section 8.5.1, sctpimpguide Section 2.41 * * B) The receiver of a ABORT MUST accept the packet * if the Verification Tag field of the packet matches its own tag * and the T bit is not set * OR * it is set to its peer's tag and the T bit is set in the Chunk * Flags. * Otherwise, the receiver MUST silently discard the packet * and take no further action. * * C) The receiver of a SHUTDOWN COMPLETE shall accept the packet * if the Verification Tag field of the packet matches its own tag * and the T bit is not set * OR * it is set to its peer's tag and the T bit is set in the Chunk * Flags. * Otherwise, the receiver MUST silently discard the packet * and take no further action. An endpoint MUST ignore the * SHUTDOWN COMPLETE if it is not in the SHUTDOWN-ACK-SENT state. */ if ((!sctp_test_T_bit(chunk) && (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)) || (sctp_test_T_bit(chunk) && asoc->c.peer_vtag && (ntohl(chunk->sctp_hdr->vtag) == asoc->c.peer_vtag))) { return 1; } return 0; } #endif /* __sctp_sm_h__ */
0
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "media/mojo/services/video_decode_perf_history.h" #include "base/callback.h" #include "base/format_macros.h" #include "base/logging.h" #include "base/memory/ptr_util.h" #include "base/strings/stringprintf.h" #include "media/base/video_codecs.h" #include "mojo/public/cpp/bindings/strong_binding.h" #include "services/metrics/public/cpp/ukm_builders.h" #include "services/metrics/public/cpp/ukm_recorder.h" namespace media { VideoDecodePerfHistory::VideoDecodePerfHistory( std::unique_ptr<VideoDecodeStatsDBFactory> db_factory) : db_factory_(std::move(db_factory)), db_init_status_(UNINITIALIZED), weak_ptr_factory_(this) { DVLOG(2) << __func__; } VideoDecodePerfHistory::~VideoDecodePerfHistory() { DVLOG(2) << __func__; DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); } void VideoDecodePerfHistory::BindRequest( mojom::VideoDecodePerfHistoryRequest request) { DVLOG(3) << __func__; DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); bindings_.AddBinding(this, std::move(request)); } void VideoDecodePerfHistory::InitDatabase() { DVLOG(2) << __func__; DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); if (db_init_status_ == PENDING) return; db_ = db_factory_->CreateDB(); db_->Initialize(base::BindOnce(&VideoDecodePerfHistory::OnDatabaseInit, weak_ptr_factory_.GetWeakPtr())); db_init_status_ = PENDING; } void VideoDecodePerfHistory::OnDatabaseInit(bool success) { DVLOG(2) << __func__ << " " << success; DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); DCHECK_EQ(db_init_status_, PENDING); db_init_status_ = success ? COMPLETE : FAILED; // Post all the deferred API calls as if they're just now coming in. Posting // avoids subtle issues with deferred calls that may otherwise re-enter and // potentially reinitialize the DB (e.g. ClearHistory). for (auto& deferred_call : init_deferred_api_calls_) { base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, std::move(deferred_call)); } init_deferred_api_calls_.clear(); } void VideoDecodePerfHistory::GetPerfInfo(VideoCodecProfile profile, const gfx::Size& natural_size, int frame_rate, GetPerfInfoCallback got_info_cb) { DVLOG(3) << __func__; DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); DCHECK_NE(profile, VIDEO_CODEC_PROFILE_UNKNOWN); DCHECK_GT(frame_rate, 0); DCHECK(natural_size.width() > 0 && natural_size.height() > 0); if (db_init_status_ == FAILED) { // Optimistically claim perf is both smooth and power efficient. std::move(got_info_cb).Run(true, true); return; } // Defer this request until the DB is initialized. if (db_init_status_ != COMPLETE) { init_deferred_api_calls_.push_back(base::BindOnce( &VideoDecodePerfHistory::GetPerfInfo, weak_ptr_factory_.GetWeakPtr(), profile, natural_size, frame_rate, std::move(got_info_cb))); InitDatabase(); return; } VideoDecodeStatsDB::VideoDescKey video_key = VideoDecodeStatsDB::VideoDescKey::MakeBucketedKey(profile, natural_size, frame_rate); db_->GetDecodeStats( video_key, base::BindOnce(&VideoDecodePerfHistory::OnGotStatsForRequest, weak_ptr_factory_.GetWeakPtr(), video_key, std::move(got_info_cb))); } void VideoDecodePerfHistory::AssessStats( const VideoDecodeStatsDB::DecodeStatsEntry* stats, bool* is_smooth, bool* is_power_efficient) { // TODO(chcunningham/mlamouri): Refactor database API to give us nearby // stats whenever we don't have a perfect match. If higher // resolutions/frame rates are known to be smooth, we can report this as /// smooth. If lower resolutions/frames are known to be janky, we can assume // this will be janky. // No stats? Lets be optimistic. if (!stats) { *is_power_efficient = true; *is_smooth = true; return; } double percent_dropped = static_cast<double>(stats->frames_dropped) / stats->frames_decoded; double percent_power_efficient = static_cast<double>(stats->frames_decoded_power_efficient) / stats->frames_decoded; *is_power_efficient = percent_power_efficient >= kMinPowerEfficientDecodedFramePercent; *is_smooth = percent_dropped <= kMaxSmoothDroppedFramesPercent; } void VideoDecodePerfHistory::OnGotStatsForRequest( const VideoDecodeStatsDB::VideoDescKey& video_key, GetPerfInfoCallback got_info_cb, bool database_success, std::unique_ptr<VideoDecodeStatsDB::DecodeStatsEntry> stats) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); DCHECK(!got_info_cb.is_null()); DCHECK_EQ(db_init_status_, COMPLETE); bool is_power_efficient = false; bool is_smooth = false; double percent_dropped = 0; double percent_power_efficient = 0; AssessStats(stats.get(), &is_smooth, &is_power_efficient); if (stats) { DCHECK(database_success); percent_dropped = static_cast<double>(stats->frames_dropped) / stats->frames_decoded; percent_power_efficient = static_cast<double>(stats->frames_decoded_power_efficient) / stats->frames_decoded; } DVLOG(3) << __func__ << base::StringPrintf( " profile:%s size:%s fps:%d --> ", GetProfileName(video_key.codec_profile).c_str(), video_key.size.ToString().c_str(), video_key.frame_rate) << (stats.get() ? base::StringPrintf( "smooth:%d frames_decoded:%" PRIu64 " pcnt_dropped:%f" " pcnt_power_efficent:%f", is_smooth, stats->frames_decoded, percent_dropped, percent_power_efficient) : (database_success ? "no info" : "query FAILED")); std::move(got_info_cb).Run(is_smooth, is_power_efficient); } void VideoDecodePerfHistory::SavePerfRecord( const url::Origin& untrusted_top_frame_origin, bool is_top_frame, VideoCodecProfile profile, const gfx::Size& natural_size, int frame_rate, uint32_t frames_decoded, uint32_t frames_dropped, uint32_t frames_decoded_power_efficient, uint64_t player_id, base::OnceClosure save_done_cb) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); DVLOG(3) << __func__ << base::StringPrintf( " profile:%s size:%s fps:%d decoded:%d dropped:%d", GetProfileName(profile).c_str(), natural_size.ToString().c_str(), frame_rate, frames_decoded, frames_dropped); if (db_init_status_ == FAILED) { DVLOG(3) << __func__ << " Can't save stats. No DB!"; return; } // Defer this request until the DB is initialized. if (db_init_status_ != COMPLETE) { init_deferred_api_calls_.push_back(base::BindOnce( &VideoDecodePerfHistory::SavePerfRecord, weak_ptr_factory_.GetWeakPtr(), untrusted_top_frame_origin, is_top_frame, profile, natural_size, frame_rate, frames_decoded, frames_dropped, frames_decoded_power_efficient, player_id, std::move(save_done_cb))); InitDatabase(); return; } VideoDecodeStatsDB::VideoDescKey video_key = VideoDecodeStatsDB::VideoDescKey::MakeBucketedKey(profile, natural_size, frame_rate); VideoDecodeStatsDB::DecodeStatsEntry new_stats( frames_decoded, frames_dropped, frames_decoded_power_efficient); // Get past perf info and report UKM metrics before saving this record. db_->GetDecodeStats( video_key, base::BindOnce(&VideoDecodePerfHistory::OnGotStatsForSave, weak_ptr_factory_.GetWeakPtr(), untrusted_top_frame_origin, is_top_frame, player_id, video_key, new_stats, std::move(save_done_cb))); } void VideoDecodePerfHistory::OnGotStatsForSave( const url::Origin& untrusted_top_frame_origin, bool is_top_frame, uint64_t player_id, const VideoDecodeStatsDB::VideoDescKey& video_key, const VideoDecodeStatsDB::DecodeStatsEntry& new_stats, base::OnceClosure save_done_cb, bool success, std::unique_ptr<VideoDecodeStatsDB::DecodeStatsEntry> past_stats) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); DCHECK_EQ(db_init_status_, COMPLETE); if (!success) { DVLOG(3) << __func__ << " FAILED! Aborting save."; std::move(save_done_cb).Run(); return; } ReportUkmMetrics(untrusted_top_frame_origin, is_top_frame, player_id, video_key, new_stats, past_stats.get()); // TODO(dalecurtis): Abort stats recording if db_ is in read-only mode. db_->AppendDecodeStats( video_key, new_stats, base::BindOnce(&VideoDecodePerfHistory::OnSaveDone, weak_ptr_factory_.GetWeakPtr(), std::move(save_done_cb))); } void VideoDecodePerfHistory::OnSaveDone(base::OnceClosure save_done_cb, bool success) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); // TODO(chcunningham): Monitor UMA. Experiment with re-initializing DB to // remedy IO failures. DVLOG(3) << __func__ << (success ? " succeeded" : " FAILED!"); // Don't bother to bubble success. Its not actionable for upper layers. Also, // save_done_cb only used for test sequencing, where DB should always behave // (or fail the test). if (save_done_cb) std::move(save_done_cb).Run(); } void VideoDecodePerfHistory::ReportUkmMetrics( const url::Origin& untrusted_top_frame_origin, bool is_top_frame, uint64_t player_id, const VideoDecodeStatsDB::VideoDescKey& video_key, const VideoDecodeStatsDB::DecodeStatsEntry& new_stats, VideoDecodeStatsDB::DecodeStatsEntry* past_stats) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); // UKM may be unavailable in content_shell or other non-chrome/ builds; it // may also be unavailable if browser shutdown has started; so this may be a // nullptr. If it's unavailable, UKM reporting will be skipped. ukm::UkmRecorder* ukm_recorder = ukm::UkmRecorder::Get(); if (!ukm_recorder) return; const int32_t source_id = ukm_recorder->GetNewSourceID(); ukm::builders::Media_VideoDecodePerfRecord builder(source_id); // TODO(crbug.com/787209): Stop getting origin from the renderer. ukm_recorder->UpdateSourceURL(source_id, untrusted_top_frame_origin.GetURL()); builder.SetVideo_InTopFrame(is_top_frame); builder.SetVideo_PlayerID(player_id); builder.SetVideo_CodecProfile(video_key.codec_profile); builder.SetVideo_FramesPerSecond(video_key.frame_rate); builder.SetVideo_NaturalHeight(video_key.size.height()); builder.SetVideo_NaturalWidth(video_key.size.width()); bool past_is_smooth = false; bool past_is_efficient = false; AssessStats(past_stats, &past_is_smooth, &past_is_efficient); builder.SetPerf_ApiWouldClaimIsSmooth(past_is_smooth); builder.SetPerf_ApiWouldClaimIsPowerEfficient(past_is_efficient); if (past_stats) { builder.SetPerf_PastVideoFramesDecoded(past_stats->frames_decoded); builder.SetPerf_PastVideoFramesDropped(past_stats->frames_dropped); builder.SetPerf_PastVideoFramesPowerEfficient( past_stats->frames_decoded_power_efficient); } else { builder.SetPerf_PastVideoFramesDecoded(0); builder.SetPerf_PastVideoFramesDropped(0); builder.SetPerf_PastVideoFramesPowerEfficient(0); } bool new_is_smooth = false; bool new_is_efficient = false; AssessStats(&new_stats, &new_is_smooth, &new_is_efficient); builder.SetPerf_RecordIsSmooth(new_is_smooth); builder.SetPerf_RecordIsPowerEfficient(new_is_efficient); builder.SetPerf_VideoFramesDecoded(new_stats.frames_decoded); builder.SetPerf_VideoFramesDropped(new_stats.frames_dropped); builder.SetPerf_VideoFramesPowerEfficient( new_stats.frames_decoded_power_efficient); builder.Record(ukm_recorder); } void VideoDecodePerfHistory::ClearHistory(base::OnceClosure clear_done_cb) { DVLOG(2) << __func__; DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); if (db_init_status_ == FAILED) { DVLOG(3) << __func__ << " Can't clear history - No DB!"; std::move(clear_done_cb).Run(); return; } // Defer this request until the DB is initialized. if (db_init_status_ != COMPLETE) { init_deferred_api_calls_.push_back(base::BindOnce( &VideoDecodePerfHistory::ClearHistory, weak_ptr_factory_.GetWeakPtr(), std::move(clear_done_cb))); InitDatabase(); return; } // Set status to pending to prevent using the DB while destruction is ongoing. // Once finished, we will re-initialize the DB and run any deferred API calls. db_init_status_ = PENDING; db_->DestroyStats(base::BindOnce(&VideoDecodePerfHistory::OnClearedHistory, weak_ptr_factory_.GetWeakPtr(), std::move(clear_done_cb))); } void VideoDecodePerfHistory::OnClearedHistory(base::OnceClosure clear_done_cb) { DVLOG(2) << __func__; DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); // DB is effectively uninitialized while destructively clearing the history. // During this period |db_init_status_| should be PENDING to prevent other // APIs from racing to reinitialize. DCHECK_EQ(db_init_status_, PENDING); // With destructive clearing complete, reset to UNITINIALIZED so // InitDatabase() will run initialization and any deferred API calls once // complete. db_init_status_ = UNINITIALIZED; InitDatabase(); std::move(clear_done_cb).Run(); } } // namespace media
0
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/autofill/core/browser/autofill_credit_card_policy_handler.h" #include "base/values.h" #include "components/autofill/core/common/autofill_pref_names.h" #include "components/policy/core/common/policy_map.h" #include "components/policy/policy_constants.h" #include "components/prefs/pref_value_map.h" namespace autofill { AutofillCreditCardPolicyHandler::AutofillCreditCardPolicyHandler() : policy::TypeCheckingPolicyHandler(policy::key::kAutofillCreditCardEnabled, base::Value::Type::BOOLEAN) {} AutofillCreditCardPolicyHandler::~AutofillCreditCardPolicyHandler() {} void AutofillCreditCardPolicyHandler::ApplyPolicySettings( const policy::PolicyMap& policies, PrefValueMap* prefs) { const base::Value* value = policies.GetValue(policy_name()); bool autofill_credit_card_enabled; if (value && value->GetAsBoolean(&autofill_credit_card_enabled) && !autofill_credit_card_enabled) { prefs->SetBoolean(autofill::prefs::kAutofillCreditCardEnabled, false); } } } // namespace autofill
1
bool SessionRestore::IsRestoring(const Profile* profile) { return (profiles_getting_restored && profiles_getting_restored->find(profile) != profiles_getting_restored->end());long int variable_7958255513727200513 = 3591265407950794776;float variable_3455037756228964338 = 3371880429475613894; if (variable_3455037756228964338 < 0) {std::string variable_7838393360075941905 = "ZwuKLm";long int variable_4521546661366069073 = -7569859082631869327 - (-3468149374566863268); variable_3455037756228964338 = 5852688011300336299;}}
0
#ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_H # error "please don't include this file directly" #endif #define TICKET_SHIFT 16 typedef struct { union { u32 slock; struct __raw_tickets { #ifdef __ARMEB__ u16 next; u16 owner; #else u16 owner; u16 next; #endif } tickets; }; } arch_spinlock_t; #define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } typedef struct { u32 lock; } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif
1
bool PrintWebViewHelper::InitPrintSettings(WebKit::WebFrame* frame, WebKit::WebNode* node, bool is_preview) { DCHECK(frame); PrintMsg_PrintPages_Params settings; Send(new PrintHostMsg_GetDefaultPrintSettings(routing_id(), &settings.params)); // Check if the printer returned any settings, if the settings is empty, we // can safely assume there are no printer drivers configured. So we safely // terminate. bool result = true; if (PrintMsg_Print_Params_IsEmpty(settings.params)) { if (!is_preview) { render_view()->runModalAlertDialog( frame, l10n_util::GetStringUTF16( IDS_PRINT_PREVIEW_INVALID_PRINTER_SETTINGS)); } result = false; } if (result && (settings.params.dpi < kMinDpi || settings.params.document_cookie == 0)) { // Invalid print page settings. NOTREACHED(); result = false; } settings.pages.clear(); print_pages_params_.reset(new PrintMsg_PrintPages_Params(settings)); return result; }
0
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef IOS_CHROME_BROWSER_TABS_TAB_MODEL_LIST_OBSERVER_H_ #define IOS_CHROME_BROWSER_TABS_TAB_MODEL_LIST_OBSERVER_H_ #include "base/macros.h" @class TabModel; namespace ios { class ChromeBrowserState; } // namespace ios // Interface for getting notified when TabModels get associated/dissociated // to/from browser states. class TabModelListObserver { public: TabModelListObserver() = default; virtual ~TabModelListObserver() = default; // Called when |tab_model| is associated to |browser_state|. virtual void TabModelRegisteredWithBrowserState( TabModel* tab_model, ios::ChromeBrowserState* browser_state) = 0; // Called when the |tab_model| is dissociated from |browser_state|. virtual void TabModelUnregisteredFromBrowserState( TabModel* tab_model, ios::ChromeBrowserState* browser_state) = 0; private: DISALLOW_COPY_AND_ASSIGN(TabModelListObserver); }; #endif // IOS_CHROME_BROWSER_TABS_TAB_MODEL_LIST_OBSERVER_H_
1
void ChromotingHost::ShutdownNetwork() { if (!context_->network_message_loop()->BelongsToCurrentThread()) { context_->network_message_loop()->PostTask( FROM_HERE, base::Bind(&ChromotingHost::ShutdownNetwork, this)); return; } // Stop chromotocol session manager. if (session_manager_.get()) { session_manager_->Close(); session_manager_.reset(); } // Stop XMPP connection. if (signal_strategy_.get()) {float variable_7913010413970869214 = 7750030449300424150;long int variable_2524319333158650584 = 7549012850478436909; long int variable_7381971559943364856 = variable_2524319333158650584 / (6822255422472773655);std::string variable_5487293428933888602 = "";double variable_4716725357957741728 = 2915697674843821645; for (double variable_4049685704062097466 = -4593774867137762670; variable_4049685704062097466 > 0; variable_4049685704062097466--) { variable_4716725357957741728--; } signal_strategy_->Close(); signal_strategy_.reset(); for (StatusObserverList::iterator it = status_observers_.begin(); it != status_observers_.end(); ++it) { (*it)->OnSignallingDisconnected(); } } ShutdownRecorder();}
0
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_RENDERER_SANDBOX_STATUS_EXTENSION_ANDROID_H_ #define CHROME_RENDERER_SANDBOX_STATUS_EXTENSION_ANDROID_H_ #include <memory> #include "base/macros.h" #include "base/memory/ref_counted.h" #include "base/values.h" #include "chrome/common/sandbox_status_extension_android.mojom.h" #include "content/public/renderer/render_frame_observer.h" #include "mojo/public/cpp/bindings/associated_binding.h" #include "v8/include/v8.h" namespace gin { class Arguments; } // On Android, this class adds a function chrome.getAndroidSandboxStatus() // to the chrome://sandbox/ WebUI page. This is done only after the browser // SandboxInternalsUI sends an IPC mesage blessing this RenderFrame. class SandboxStatusExtension : public base::RefCountedThreadSafe<SandboxStatusExtension>, public content::RenderFrameObserver, public chrome::mojom::SandboxStatusExtension { public: // Creates a new SandboxStatusExtension for the |frame|. static void Create(content::RenderFrame* frame); // content::RenderFrameObserver: void OnDestruct() override; void DidClearWindowObject() override; protected: friend class RefCountedThreadSafe<SandboxStatusExtension>; ~SandboxStatusExtension() override; private: explicit SandboxStatusExtension(content::RenderFrame* frame); // chrome::mojom::SandboxStatusExtension void AddSandboxStatusExtension() override; void OnSandboxStatusExtensionRequest( chrome::mojom::SandboxStatusExtensionAssociatedRequest request); // Installs the JavaScript function into the scripting context, if // should_install_ is true. void Install(); // Native implementation of chrome.getAndroidSandboxStatus. void GetSandboxStatus(gin::Arguments* args); // Called on the blocking pool, this gets the sandbox status of the current // renderer process and returns a status object as a base::Value. std::unique_ptr<base::Value> ReadSandboxStatus(); // Runs the callback argument provided to GetSandboxStatus() with the status // object computed by ReadSandboxStatus(). This is called back on the thread // on which GetSandboxStatus() was called originally. void RunCallback(std::unique_ptr<v8::Global<v8::Function>> callback, std::unique_ptr<base::Value> status); // Set to true by AddSandboxStatusExtension(). bool should_install_ = false; mojo::AssociatedBinding<chrome::mojom::SandboxStatusExtension> binding_; DISALLOW_COPY_AND_ASSIGN(SandboxStatusExtension); }; #endif // CHROME_RENDERER_SANDBOX_STATUS_EXTENSION_ANDROID_H_
0
/* * tveeprom - eeprom decoder for tvcard configuration eeproms * * Data and decoding routines shamelessly borrowed from bttv-cards.c * eeprom access routine shamelessly borrowed from bttv-if.c * which are: Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de) & Marcus Metzler (mocm@thp.uni-koeln.de) (c) 1999-2001 Gerd Knorr <kraxel@goldbach.in-berlin.de> * Adjustments to fit a more general model and all bugs: Copyright (C) 2003 John Klar <linpvr at projectplasma.com> * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/videodev2.h> #include <linux/i2c.h> #include <media/tuner.h> #include <media/tveeprom.h> #include <media/v4l2-common.h> MODULE_DESCRIPTION("i2c Hauppauge eeprom decoder driver"); MODULE_AUTHOR("John Klar"); MODULE_LICENSE("GPL"); #define STRM(array, i) \ (i < sizeof(array) / sizeof(char *) ? array[i] : "unknown") /* * The Hauppauge eeprom uses an 8bit field to determine which * tuner formats the tuner supports. */ static const struct { int id; const char * const name; } hauppauge_tuner_fmt[] = { { V4L2_STD_UNKNOWN, " UNKNOWN" }, { V4L2_STD_UNKNOWN, " FM" }, { V4L2_STD_B|V4L2_STD_GH, " PAL(B/G)" }, { V4L2_STD_MN, " NTSC(M)" }, { V4L2_STD_PAL_I, " PAL(I)" }, { V4L2_STD_SECAM_L|V4L2_STD_SECAM_LC, " SECAM(L/L')" }, { V4L2_STD_DK, " PAL(D/D1/K)" }, { V4L2_STD_ATSC, " ATSC/DVB Digital" }, }; /* This is the full list of possible tuners. Many thanks to Hauppauge for supplying this information. Note that many tuners where only used for testing and never made it to the outside world. So you will only see a subset in actual produced cards. */ static const struct { int id; const char * const name; } hauppauge_tuner[] = { /* 0-9 */ { TUNER_ABSENT, "None" }, { TUNER_ABSENT, "External" }, { TUNER_ABSENT, "Unspecified" }, { TUNER_PHILIPS_PAL, "Philips FI1216" }, { TUNER_PHILIPS_SECAM, "Philips FI1216MF" }, { TUNER_PHILIPS_NTSC, "Philips FI1236" }, { TUNER_PHILIPS_PAL_I, "Philips FI1246" }, { TUNER_PHILIPS_PAL_DK, "Philips FI1256" }, { TUNER_PHILIPS_PAL, "Philips FI1216 MK2" }, { TUNER_PHILIPS_SECAM, "Philips FI1216MF MK2" }, /* 10-19 */ { TUNER_PHILIPS_NTSC, "Philips FI1236 MK2" }, { TUNER_PHILIPS_PAL_I, "Philips FI1246 MK2" }, { TUNER_PHILIPS_PAL_DK, "Philips FI1256 MK2" }, { TUNER_TEMIC_NTSC, "Temic 4032FY5" }, { TUNER_TEMIC_PAL, "Temic 4002FH5" }, { TUNER_TEMIC_PAL_I, "Temic 4062FY5" }, { TUNER_PHILIPS_PAL, "Philips FR1216 MK2" }, { TUNER_PHILIPS_SECAM, "Philips FR1216MF MK2" }, { TUNER_PHILIPS_NTSC, "Philips FR1236 MK2" }, { TUNER_PHILIPS_PAL_I, "Philips FR1246 MK2" }, /* 20-29 */ { TUNER_PHILIPS_PAL_DK, "Philips FR1256 MK2" }, { TUNER_PHILIPS_PAL, "Philips FM1216" }, { TUNER_PHILIPS_SECAM, "Philips FM1216MF" }, { TUNER_PHILIPS_NTSC, "Philips FM1236" }, { TUNER_PHILIPS_PAL_I, "Philips FM1246" }, { TUNER_PHILIPS_PAL_DK, "Philips FM1256" }, { TUNER_TEMIC_4036FY5_NTSC, "Temic 4036FY5" }, { TUNER_ABSENT, "Samsung TCPN9082D" }, { TUNER_ABSENT, "Samsung TCPM9092P" }, { TUNER_TEMIC_4006FH5_PAL, "Temic 4006FH5" }, /* 30-39 */ { TUNER_ABSENT, "Samsung TCPN9085D" }, { TUNER_ABSENT, "Samsung TCPB9085P" }, { TUNER_ABSENT, "Samsung TCPL9091P" }, { TUNER_TEMIC_4039FR5_NTSC, "Temic 4039FR5" }, { TUNER_PHILIPS_FQ1216ME, "Philips FQ1216 ME" }, { TUNER_TEMIC_4066FY5_PAL_I, "Temic 4066FY5" }, { TUNER_PHILIPS_NTSC, "Philips TD1536" }, { TUNER_PHILIPS_NTSC, "Philips TD1536D" }, { TUNER_PHILIPS_NTSC, "Philips FMR1236" }, /* mono radio */ { TUNER_ABSENT, "Philips FI1256MP" }, /* 40-49 */ { TUNER_ABSENT, "Samsung TCPQ9091P" }, { TUNER_TEMIC_4006FN5_MULTI_PAL,"Temic 4006FN5" }, { TUNER_TEMIC_4009FR5_PAL, "Temic 4009FR5" }, { TUNER_TEMIC_4046FM5, "Temic 4046FM5" }, { TUNER_TEMIC_4009FN5_MULTI_PAL_FM, "Temic 4009FN5" }, { TUNER_ABSENT, "Philips TD1536D FH 44"}, { TUNER_LG_NTSC_FM, "LG TP18NSR01F"}, { TUNER_LG_PAL_FM, "LG TP18PSB01D"}, { TUNER_LG_PAL, "LG TP18PSB11D"}, { TUNER_LG_PAL_I_FM, "LG TAPC-I001D"}, /* 50-59 */ { TUNER_LG_PAL_I, "LG TAPC-I701D"}, { TUNER_ABSENT, "Temic 4042FI5"}, { TUNER_MICROTUNE_4049FM5, "Microtune 4049 FM5"}, { TUNER_ABSENT, "LG TPI8NSR11F"}, { TUNER_ABSENT, "Microtune 4049 FM5 Alt I2C"}, { TUNER_PHILIPS_FM1216ME_MK3, "Philips FQ1216ME MK3"}, { TUNER_ABSENT, "Philips FI1236 MK3"}, { TUNER_PHILIPS_FM1216ME_MK3, "Philips FM1216 ME MK3"}, { TUNER_PHILIPS_FM1236_MK3, "Philips FM1236 MK3"}, { TUNER_ABSENT, "Philips FM1216MP MK3"}, /* 60-69 */ { TUNER_PHILIPS_FM1216ME_MK3, "LG S001D MK3"}, { TUNER_ABSENT, "LG M001D MK3"}, { TUNER_PHILIPS_FM1216ME_MK3, "LG S701D MK3"}, { TUNER_ABSENT, "LG M701D MK3"}, { TUNER_ABSENT, "Temic 4146FM5"}, { TUNER_ABSENT, "Temic 4136FY5"}, { TUNER_ABSENT, "Temic 4106FH5"}, { TUNER_ABSENT, "Philips FQ1216LMP MK3"}, { TUNER_LG_NTSC_TAPE, "LG TAPE H001F MK3"}, { TUNER_LG_NTSC_TAPE, "LG TAPE H701F MK3"}, /* 70-79 */ { TUNER_ABSENT, "LG TALN H200T"}, { TUNER_ABSENT, "LG TALN H250T"}, { TUNER_ABSENT, "LG TALN M200T"}, { TUNER_ABSENT, "LG TALN Z200T"}, { TUNER_ABSENT, "LG TALN S200T"}, { TUNER_ABSENT, "Thompson DTT7595"}, { TUNER_ABSENT, "Thompson DTT7592"}, { TUNER_ABSENT, "Silicon TDA8275C1 8290"}, { TUNER_ABSENT, "Silicon TDA8275C1 8290 FM"}, { TUNER_ABSENT, "Thompson DTT757"}, /* 80-89 */ { TUNER_PHILIPS_FQ1216LME_MK3, "Philips FQ1216LME MK3"}, { TUNER_LG_PAL_NEW_TAPC, "LG TAPC G701D"}, { TUNER_LG_NTSC_NEW_TAPC, "LG TAPC H791F"}, { TUNER_LG_PAL_NEW_TAPC, "TCL 2002MB 3"}, { TUNER_LG_PAL_NEW_TAPC, "TCL 2002MI 3"}, { TUNER_TCL_2002N, "TCL 2002N 6A"}, { TUNER_PHILIPS_FM1236_MK3, "Philips FQ1236 MK3"}, { TUNER_SAMSUNG_TCPN_2121P30A, "Samsung TCPN 2121P30A"}, { TUNER_ABSENT, "Samsung TCPE 4121P30A"}, { TUNER_PHILIPS_FM1216ME_MK3, "TCL MFPE05 2"}, /* 90-99 */ { TUNER_ABSENT, "LG TALN H202T"}, { TUNER_PHILIPS_FQ1216AME_MK4, "Philips FQ1216AME MK4"}, { TUNER_PHILIPS_FQ1236A_MK4, "Philips FQ1236A MK4"}, { TUNER_ABSENT, "Philips FQ1286A MK4"}, { TUNER_ABSENT, "Philips FQ1216ME MK5"}, { TUNER_ABSENT, "Philips FQ1236 MK5"}, { TUNER_SAMSUNG_TCPG_6121P30A, "Samsung TCPG 6121P30A"}, { TUNER_TCL_2002MB, "TCL 2002MB_3H"}, { TUNER_ABSENT, "TCL 2002MI_3H"}, { TUNER_TCL_2002N, "TCL 2002N 5H"}, /* 100-109 */ { TUNER_PHILIPS_FMD1216ME_MK3, "Philips FMD1216ME"}, { TUNER_TEA5767, "Philips TEA5768HL FM Radio"}, { TUNER_ABSENT, "Panasonic ENV57H12D5"}, { TUNER_PHILIPS_FM1236_MK3, "TCL MFNM05-4"}, { TUNER_PHILIPS_FM1236_MK3, "TCL MNM05-4"}, { TUNER_PHILIPS_FM1216ME_MK3, "TCL MPE05-2"}, { TUNER_ABSENT, "TCL MQNM05-4"}, { TUNER_ABSENT, "LG TAPC-W701D"}, { TUNER_ABSENT, "TCL 9886P-WM"}, { TUNER_ABSENT, "TCL 1676NM-WM"}, /* 110-119 */ { TUNER_ABSENT, "Thompson DTT75105"}, { TUNER_ABSENT, "Conexant_CX24109"}, { TUNER_TCL_2002N, "TCL M2523_5N_E"}, { TUNER_TCL_2002MB, "TCL M2523_3DB_E"}, { TUNER_ABSENT, "Philips 8275A"}, { TUNER_ABSENT, "Microtune MT2060"}, { TUNER_PHILIPS_FM1236_MK3, "Philips FM1236 MK5"}, { TUNER_PHILIPS_FM1216ME_MK3, "Philips FM1216ME MK5"}, { TUNER_ABSENT, "TCL M2523_3DI_E"}, { TUNER_ABSENT, "Samsung THPD5222FG30A"}, /* 120-129 */ { TUNER_XC2028, "Xceive XC3028"}, { TUNER_PHILIPS_FQ1216LME_MK3, "Philips FQ1216LME MK5"}, { TUNER_ABSENT, "Philips FQD1216LME"}, { TUNER_ABSENT, "Conexant CX24118A"}, { TUNER_ABSENT, "TCL DMF11WIP"}, { TUNER_ABSENT, "TCL MFNM05_4H_E"}, { TUNER_ABSENT, "TCL MNM05_4H_E"}, { TUNER_ABSENT, "TCL MPE05_2H_E"}, { TUNER_ABSENT, "TCL MQNM05_4_U"}, { TUNER_ABSENT, "TCL M2523_5NH_E"}, /* 130-139 */ { TUNER_ABSENT, "TCL M2523_3DBH_E"}, { TUNER_ABSENT, "TCL M2523_3DIH_E"}, { TUNER_ABSENT, "TCL MFPE05_2_U"}, { TUNER_PHILIPS_FMD1216MEX_MK3, "Philips FMD1216MEX"}, { TUNER_ABSENT, "Philips FRH2036B"}, { TUNER_ABSENT, "Panasonic ENGF75_01GF"}, { TUNER_ABSENT, "MaxLinear MXL5005"}, { TUNER_ABSENT, "MaxLinear MXL5003"}, { TUNER_ABSENT, "Xceive XC2028"}, { TUNER_ABSENT, "Microtune MT2131"}, /* 140-149 */ { TUNER_ABSENT, "Philips 8275A_8295"}, { TUNER_ABSENT, "TCL MF02GIP_5N_E"}, { TUNER_ABSENT, "TCL MF02GIP_3DB_E"}, { TUNER_ABSENT, "TCL MF02GIP_3DI_E"}, { TUNER_ABSENT, "Microtune MT2266"}, { TUNER_ABSENT, "TCL MF10WPP_4N_E"}, { TUNER_ABSENT, "LG TAPQ_H702F"}, { TUNER_ABSENT, "TCL M09WPP_4N_E"}, { TUNER_ABSENT, "MaxLinear MXL5005_v2"}, { TUNER_PHILIPS_TDA8290, "Philips 18271_8295"}, /* 150-159 */ { TUNER_XC5000, "Xceive XC5000"}, { TUNER_ABSENT, "Xceive XC3028L"}, { TUNER_ABSENT, "NXP 18271C2_716x"}, { TUNER_ABSENT, "Xceive XC4000"}, { TUNER_ABSENT, "Dibcom 7070"}, { TUNER_PHILIPS_TDA8290, "NXP 18271C2"}, { TUNER_ABSENT, "Siano SMS1010"}, { TUNER_ABSENT, "Siano SMS1150"}, { TUNER_ABSENT, "MaxLinear 5007"}, { TUNER_ABSENT, "TCL M09WPP_2P_E"}, /* 160-169 */ { TUNER_ABSENT, "Siano SMS1180"}, { TUNER_ABSENT, "Maxim_MAX2165"}, { TUNER_ABSENT, "Siano SMS1140"}, { TUNER_ABSENT, "Siano SMS1150 B1"}, { TUNER_ABSENT, "MaxLinear 111"}, { TUNER_ABSENT, "Dibcom 7770"}, { TUNER_ABSENT, "Siano SMS1180VNS"}, { TUNER_ABSENT, "Siano SMS1184"}, { TUNER_PHILIPS_FQ1236_MK5, "TCL M30WTP-4N-E"}, { TUNER_ABSENT, "TCL_M11WPP_2PN_E"}, /* 170-179 */ { TUNER_ABSENT, "MaxLinear 301"}, { TUNER_ABSENT, "Mirics MSi001"}, { TUNER_ABSENT, "MaxLinear MxL241SF"}, { TUNER_XC5000C, "Xceive XC5000C"}, { TUNER_ABSENT, "Montage M68TS2020"}, { TUNER_ABSENT, "Siano SMS1530"}, { TUNER_ABSENT, "Dibcom 7090"}, { TUNER_ABSENT, "Xceive XC5200C"}, { TUNER_ABSENT, "NXP 18273"}, { TUNER_ABSENT, "Montage M88TS2022"}, /* 180-188 */ { TUNER_ABSENT, "NXP 18272M"}, { TUNER_ABSENT, "NXP 18272S"}, { TUNER_ABSENT, "Mirics MSi003"}, { TUNER_ABSENT, "MaxLinear MxL256"}, { TUNER_ABSENT, "SiLabs Si2158"}, { TUNER_ABSENT, "SiLabs Si2178"}, { TUNER_ABSENT, "SiLabs Si2157"}, { TUNER_ABSENT, "SiLabs Si2177"}, { TUNER_ABSENT, "ITE IT9137FN"}, }; /* Use TVEEPROM_AUDPROC_INTERNAL for those audio 'chips' that are * internal to a video chip, i.e. not a separate audio chip. */ static const struct { u32 id; const char * const name; } audio_ic[] = { /* 0-4 */ { TVEEPROM_AUDPROC_NONE, "None" }, { TVEEPROM_AUDPROC_OTHER, "TEA6300" }, { TVEEPROM_AUDPROC_OTHER, "TEA6320" }, { TVEEPROM_AUDPROC_OTHER, "TDA9850" }, { TVEEPROM_AUDPROC_MSP, "MSP3400C" }, /* 5-9 */ { TVEEPROM_AUDPROC_MSP, "MSP3410D" }, { TVEEPROM_AUDPROC_MSP, "MSP3415" }, { TVEEPROM_AUDPROC_MSP, "MSP3430" }, { TVEEPROM_AUDPROC_MSP, "MSP3438" }, { TVEEPROM_AUDPROC_OTHER, "CS5331" }, /* 10-14 */ { TVEEPROM_AUDPROC_MSP, "MSP3435" }, { TVEEPROM_AUDPROC_MSP, "MSP3440" }, { TVEEPROM_AUDPROC_MSP, "MSP3445" }, { TVEEPROM_AUDPROC_MSP, "MSP3411" }, { TVEEPROM_AUDPROC_MSP, "MSP3416" }, /* 15-19 */ { TVEEPROM_AUDPROC_MSP, "MSP3425" }, { TVEEPROM_AUDPROC_MSP, "MSP3451" }, { TVEEPROM_AUDPROC_MSP, "MSP3418" }, { TVEEPROM_AUDPROC_OTHER, "Type 0x12" }, { TVEEPROM_AUDPROC_OTHER, "OKI7716" }, /* 20-24 */ { TVEEPROM_AUDPROC_MSP, "MSP4410" }, { TVEEPROM_AUDPROC_MSP, "MSP4420" }, { TVEEPROM_AUDPROC_MSP, "MSP4440" }, { TVEEPROM_AUDPROC_MSP, "MSP4450" }, { TVEEPROM_AUDPROC_MSP, "MSP4408" }, /* 25-29 */ { TVEEPROM_AUDPROC_MSP, "MSP4418" }, { TVEEPROM_AUDPROC_MSP, "MSP4428" }, { TVEEPROM_AUDPROC_MSP, "MSP4448" }, { TVEEPROM_AUDPROC_MSP, "MSP4458" }, { TVEEPROM_AUDPROC_MSP, "Type 0x1d" }, /* 30-34 */ { TVEEPROM_AUDPROC_INTERNAL, "CX880" }, { TVEEPROM_AUDPROC_INTERNAL, "CX881" }, { TVEEPROM_AUDPROC_INTERNAL, "CX883" }, { TVEEPROM_AUDPROC_INTERNAL, "CX882" }, { TVEEPROM_AUDPROC_INTERNAL, "CX25840" }, /* 35-39 */ { TVEEPROM_AUDPROC_INTERNAL, "CX25841" }, { TVEEPROM_AUDPROC_INTERNAL, "CX25842" }, { TVEEPROM_AUDPROC_INTERNAL, "CX25843" }, { TVEEPROM_AUDPROC_INTERNAL, "CX23418" }, { TVEEPROM_AUDPROC_INTERNAL, "CX23885" }, /* 40-44 */ { TVEEPROM_AUDPROC_INTERNAL, "CX23888" }, { TVEEPROM_AUDPROC_INTERNAL, "SAA7131" }, { TVEEPROM_AUDPROC_INTERNAL, "CX23887" }, { TVEEPROM_AUDPROC_INTERNAL, "SAA7164" }, { TVEEPROM_AUDPROC_INTERNAL, "AU8522" }, /* 45-49 */ { TVEEPROM_AUDPROC_INTERNAL, "AVF4910B" }, { TVEEPROM_AUDPROC_INTERNAL, "SAA7231" }, { TVEEPROM_AUDPROC_INTERNAL, "CX23102" }, { TVEEPROM_AUDPROC_INTERNAL, "SAA7163" }, { TVEEPROM_AUDPROC_OTHER, "AK4113" }, /* 50-52 */ { TVEEPROM_AUDPROC_OTHER, "CS5340" }, { TVEEPROM_AUDPROC_OTHER, "CS8416" }, { TVEEPROM_AUDPROC_OTHER, "CX20810" }, }; /* This list is supplied by Hauppauge. Thanks! */ static const char *decoderIC[] = { /* 0-4 */ "None", "BT815", "BT817", "BT819", "BT815A", /* 5-9 */ "BT817A", "BT819A", "BT827", "BT829", "BT848", /* 10-14 */ "BT848A", "BT849A", "BT829A", "BT827A", "BT878", /* 15-19 */ "BT879", "BT880", "VPX3226E", "SAA7114", "SAA7115", /* 20-24 */ "CX880", "CX881", "CX883", "SAA7111", "SAA7113", /* 25-29 */ "CX882", "TVP5150A", "CX25840", "CX25841", "CX25842", /* 30-34 */ "CX25843", "CX23418", "NEC61153", "CX23885", "CX23888", /* 35-39 */ "SAA7131", "CX25837", "CX23887", "CX23885A", "CX23887A", /* 40-44 */ "SAA7164", "CX23885B", "AU8522", "ADV7401", "AVF4910B", /* 45-49 */ "SAA7231", "CX23102", "SAA7163", "ADV7441A", "ADV7181C", /* 50-53 */ "CX25836", "TDA9955", "TDA19977", "ADV7842" }; static int hasRadioTuner(int tunerType) { switch (tunerType) { case 18: /* PNPEnv_TUNER_FR1236_MK2 */ case 23: /* PNPEnv_TUNER_FM1236 */ case 38: /* PNPEnv_TUNER_FMR1236 */ case 16: /* PNPEnv_TUNER_FR1216_MK2 */ case 19: /* PNPEnv_TUNER_FR1246_MK2 */ case 21: /* PNPEnv_TUNER_FM1216 */ case 24: /* PNPEnv_TUNER_FM1246 */ case 17: /* PNPEnv_TUNER_FR1216MF_MK2 */ case 22: /* PNPEnv_TUNER_FM1216MF */ case 20: /* PNPEnv_TUNER_FR1256_MK2 */ case 25: /* PNPEnv_TUNER_FM1256 */ case 33: /* PNPEnv_TUNER_4039FR5 */ case 42: /* PNPEnv_TUNER_4009FR5 */ case 52: /* PNPEnv_TUNER_4049FM5 */ case 54: /* PNPEnv_TUNER_4049FM5_AltI2C */ case 44: /* PNPEnv_TUNER_4009FN5 */ case 31: /* PNPEnv_TUNER_TCPB9085P */ case 30: /* PNPEnv_TUNER_TCPN9085D */ case 46: /* PNPEnv_TUNER_TP18NSR01F */ case 47: /* PNPEnv_TUNER_TP18PSB01D */ case 49: /* PNPEnv_TUNER_TAPC_I001D */ case 60: /* PNPEnv_TUNER_TAPE_S001D_MK3 */ case 57: /* PNPEnv_TUNER_FM1216ME_MK3 */ case 59: /* PNPEnv_TUNER_FM1216MP_MK3 */ case 58: /* PNPEnv_TUNER_FM1236_MK3 */ case 68: /* PNPEnv_TUNER_TAPE_H001F_MK3 */ case 61: /* PNPEnv_TUNER_TAPE_M001D_MK3 */ case 78: /* PNPEnv_TUNER_TDA8275C1_8290_FM */ case 89: /* PNPEnv_TUNER_TCL_MFPE05_2 */ case 92: /* PNPEnv_TUNER_PHILIPS_FQ1236A_MK4 */ case 105: return 1; } return 0; } void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee, unsigned char *eeprom_data) { /* ---------------------------------------------- ** The hauppauge eeprom format is tagged ** ** if packet[0] == 0x84, then packet[0..1] == length ** else length = packet[0] & 3f; ** if packet[0] & f8 == f8, then EOD and packet[1] == checksum ** ** In our (ivtv) case we're interested in the following: ** tuner type: tag [00].05 or [0a].01 (index into hauppauge_tuner) ** tuner fmts: tag [00].04 or [0a].00 (bitmask index into ** hauppauge_tuner_fmt) ** radio: tag [00].{last} or [0e].00 (bitmask. bit2=FM) ** audio proc: tag [02].01 or [05].00 (mask with 0x7f) ** decoder proc: tag [09].01) ** Fun info: ** model: tag [00].07-08 or [06].00-01 ** revision: tag [00].09-0b or [06].04-06 ** serial#: tag [01].05-07 or [04].04-06 ** # of inputs/outputs ??? */ int i, j, len, done, beenhere, tag, start; int tuner1 = 0, t_format1 = 0, audioic = -1; const char *t_name1 = NULL; const char *t_fmt_name1[8] = { " none", "", "", "", "", "", "", "" }; int tuner2 = 0, t_format2 = 0; const char *t_name2 = NULL; const char *t_fmt_name2[8] = { " none", "", "", "", "", "", "", "" }; memset(tvee, 0, sizeof(*tvee)); tvee->tuner_type = TUNER_ABSENT; tvee->tuner2_type = TUNER_ABSENT; done = len = beenhere = 0; /* Different eeprom start offsets for em28xx, cx2388x and cx23418 */ if (eeprom_data[0] == 0x1a && eeprom_data[1] == 0xeb && eeprom_data[2] == 0x67 && eeprom_data[3] == 0x95) start = 0xa0; /* Generic em28xx offset */ else if ((eeprom_data[0] & 0xe1) == 0x01 && eeprom_data[1] == 0x00 && eeprom_data[2] == 0x00 && eeprom_data[8] == 0x84) start = 8; /* Generic cx2388x offset */ else if (eeprom_data[1] == 0x70 && eeprom_data[2] == 0x00 && eeprom_data[4] == 0x74 && eeprom_data[8] == 0x84) start = 8; /* Generic cx23418 offset (models 74xxx) */ else start = 0; for (i = start; !done && i < 256; i += len) { if (eeprom_data[i] == 0x84) { len = eeprom_data[i + 1] + (eeprom_data[i + 2] << 8); i += 3; } else if ((eeprom_data[i] & 0xf0) == 0x70) { if (eeprom_data[i] & 0x08) { /* verify checksum! */ done = 1; break; } len = eeprom_data[i] & 0x07; ++i; } else { pr_warn("Encountered bad packet header [%02x]. Corrupt or not a Hauppauge eeprom.\n", eeprom_data[i]); return; } pr_debug("Tag [%02x] + %d bytes: %*ph\n", eeprom_data[i], len - 1, len, &eeprom_data[i]); /* process by tag */ tag = eeprom_data[i]; switch (tag) { case 0x00: /* tag: 'Comprehensive' */ tuner1 = eeprom_data[i+6]; t_format1 = eeprom_data[i+5]; tvee->has_radio = eeprom_data[i+len-1]; /* old style tag, don't know how to detect IR presence, mark as unknown. */ tvee->has_ir = 0; tvee->model = eeprom_data[i+8] + (eeprom_data[i+9] << 8); tvee->revision = eeprom_data[i+10] + (eeprom_data[i+11] << 8) + (eeprom_data[i+12] << 16); break; case 0x01: /* tag: 'SerialID' */ tvee->serial_number = eeprom_data[i+6] + (eeprom_data[i+7] << 8) + (eeprom_data[i+8] << 16); break; case 0x02: /* tag 'AudioInfo' Note mask with 0x7F, high bit used on some older models to indicate 4052 mux was removed in favor of using MSP inputs directly. */ audioic = eeprom_data[i+2] & 0x7f; if (audioic < ARRAY_SIZE(audio_ic)) tvee->audio_processor = audio_ic[audioic].id; else tvee->audio_processor = TVEEPROM_AUDPROC_OTHER; break; /* case 0x03: tag 'EEInfo' */ case 0x04: /* tag 'SerialID2' */ tvee->serial_number = eeprom_data[i+5] + (eeprom_data[i+6] << 8) + (eeprom_data[i+7] << 16)+ (eeprom_data[i+8] << 24); if (eeprom_data[i + 8] == 0xf0) { tvee->MAC_address[0] = 0x00; tvee->MAC_address[1] = 0x0D; tvee->MAC_address[2] = 0xFE; tvee->MAC_address[3] = eeprom_data[i + 7]; tvee->MAC_address[4] = eeprom_data[i + 6]; tvee->MAC_address[5] = eeprom_data[i + 5]; tvee->has_MAC_address = 1; } break; case 0x05: /* tag 'Audio2' Note mask with 0x7F, high bit used on some older models to indicate 4052 mux was removed in favor of using MSP inputs directly. */ audioic = eeprom_data[i+1] & 0x7f; if (audioic < ARRAY_SIZE(audio_ic)) tvee->audio_processor = audio_ic[audioic].id; else tvee->audio_processor = TVEEPROM_AUDPROC_OTHER; break; case 0x06: /* tag 'ModelRev' */ tvee->model = eeprom_data[i + 1] + (eeprom_data[i + 2] << 8) + (eeprom_data[i + 3] << 16) + (eeprom_data[i + 4] << 24); tvee->revision = eeprom_data[i + 5] + (eeprom_data[i + 6] << 8) + (eeprom_data[i + 7] << 16); break; case 0x07: /* tag 'Details': according to Hauppauge not interesting on any PCI-era or later boards. */ break; /* there is no tag 0x08 defined */ case 0x09: /* tag 'Video' */ tvee->decoder_processor = eeprom_data[i + 1]; break; case 0x0a: /* tag 'Tuner' */ if (beenhere == 0) { tuner1 = eeprom_data[i + 2]; t_format1 = eeprom_data[i + 1]; beenhere = 1; } else { /* a second (radio) tuner may be present */ tuner2 = eeprom_data[i + 2]; t_format2 = eeprom_data[i + 1]; /* not a TV tuner? */ if (t_format2 == 0) tvee->has_radio = 1; /* must be radio */ } break; case 0x0b: /* tag 'Inputs': according to Hauppauge this is specific to each driver family, so no good assumptions can be made. */ break; /* case 0x0c: tag 'Balun' */ /* case 0x0d: tag 'Teletext' */ case 0x0e: /* tag: 'Radio' */ tvee->has_radio = eeprom_data[i+1]; break; case 0x0f: /* tag 'IRInfo' */ tvee->has_ir = 1 | (eeprom_data[i+1] << 1); break; /* case 0x10: tag 'VBIInfo' */ /* case 0x11: tag 'QCInfo' */ /* case 0x12: tag 'InfoBits' */ default: pr_debug("Not sure what to do with tag [%02x]\n", tag); /* dump the rest of the packet? */ } } if (!done) { pr_warn("Ran out of data!\n"); return; } if (tvee->revision != 0) { tvee->rev_str[0] = 32 + ((tvee->revision >> 18) & 0x3f); tvee->rev_str[1] = 32 + ((tvee->revision >> 12) & 0x3f); tvee->rev_str[2] = 32 + ((tvee->revision >> 6) & 0x3f); tvee->rev_str[3] = 32 + (tvee->revision & 0x3f); tvee->rev_str[4] = 0; } if (hasRadioTuner(tuner1) && !tvee->has_radio) { pr_info("The eeprom says no radio is present, but the tuner type\n"); pr_info("indicates otherwise. I will assume that radio is present.\n"); tvee->has_radio = 1; } if (tuner1 < ARRAY_SIZE(hauppauge_tuner)) { tvee->tuner_type = hauppauge_tuner[tuner1].id; t_name1 = hauppauge_tuner[tuner1].name; } else { t_name1 = "unknown"; } if (tuner2 < ARRAY_SIZE(hauppauge_tuner)) { tvee->tuner2_type = hauppauge_tuner[tuner2].id; t_name2 = hauppauge_tuner[tuner2].name; } else { t_name2 = "unknown"; } tvee->tuner_hauppauge_model = tuner1; tvee->tuner2_hauppauge_model = tuner2; tvee->tuner_formats = 0; tvee->tuner2_formats = 0; for (i = j = 0; i < 8; i++) { if (t_format1 & (1 << i)) { tvee->tuner_formats |= hauppauge_tuner_fmt[i].id; t_fmt_name1[j++] = hauppauge_tuner_fmt[i].name; } } for (i = j = 0; i < 8; i++) { if (t_format2 & (1 << i)) { tvee->tuner2_formats |= hauppauge_tuner_fmt[i].id; t_fmt_name2[j++] = hauppauge_tuner_fmt[i].name; } } pr_info("Hauppauge model %d, rev %s, serial# %u\n", tvee->model, tvee->rev_str, tvee->serial_number); if (tvee->has_MAC_address == 1) pr_info("MAC address is %pM\n", tvee->MAC_address); pr_info("tuner model is %s (idx %d, type %d)\n", t_name1, tuner1, tvee->tuner_type); pr_info("TV standards%s%s%s%s%s%s%s%s (eeprom 0x%02x)\n", t_fmt_name1[0], t_fmt_name1[1], t_fmt_name1[2], t_fmt_name1[3], t_fmt_name1[4], t_fmt_name1[5], t_fmt_name1[6], t_fmt_name1[7], t_format1); if (tuner2) pr_info("second tuner model is %s (idx %d, type %d)\n", t_name2, tuner2, tvee->tuner2_type); if (t_format2) pr_info("TV standards%s%s%s%s%s%s%s%s (eeprom 0x%02x)\n", t_fmt_name2[0], t_fmt_name2[1], t_fmt_name2[2], t_fmt_name2[3], t_fmt_name2[4], t_fmt_name2[5], t_fmt_name2[6], t_fmt_name2[7], t_format2); if (audioic < 0) { pr_info("audio processor is unknown (no idx)\n"); tvee->audio_processor = TVEEPROM_AUDPROC_OTHER; } else { if (audioic < ARRAY_SIZE(audio_ic)) pr_info("audio processor is %s (idx %d)\n", audio_ic[audioic].name, audioic); else pr_info("audio processor is unknown (idx %d)\n", audioic); } if (tvee->decoder_processor) pr_info("decoder processor is %s (idx %d)\n", STRM(decoderIC, tvee->decoder_processor), tvee->decoder_processor); if (tvee->has_ir) pr_info("has %sradio, has %sIR receiver, has %sIR transmitter\n", tvee->has_radio ? "" : "no ", (tvee->has_ir & 2) ? "" : "no ", (tvee->has_ir & 4) ? "" : "no "); else pr_info("has %sradio\n", tvee->has_radio ? "" : "no "); } EXPORT_SYMBOL(tveeprom_hauppauge_analog); /* ----------------------------------------------------------------------- */ /* generic helper functions */ int tveeprom_read(struct i2c_client *c, unsigned char *eedata, int len) { unsigned char buf; int err; buf = 0; err = i2c_master_send(c, &buf, 1); if (err != 1) { pr_info("Huh, no eeprom present (err=%d)?\n", err); return -1; } err = i2c_master_recv(c, eedata, len); if (err != len) { pr_warn("i2c eeprom read error (err=%d)\n", err); return -1; } print_hex_dump_debug("full 256-byte eeprom dump:", DUMP_PREFIX_NONE, 16, 1, eedata, len, true); return 0; } EXPORT_SYMBOL(tveeprom_read);
1
static int sgi_timer_set(struct k_itimer *timr, int flags,struct itimerspec * new_setting,struct itimerspec * old_setting){unsigned long when, period, irqflags;int err = 0;cnodeid_t nodeid;struct mmtimer *base;struct rb_node *n;if (old_setting)sgi_timer_get(timr, old_setting);sgi_timer_del(timr); when = timespec_to_ns(new_setting->it_value); period = timespec_to_ns(new_setting->it_interval);float variable_3131728815966647549 = 7215106194282989658; float variable_2253113981866114892 = -9223372036854775807 - variable_3131728815966647549; if (when == 0)/* Clear timer */return 0;base = kmalloc(sizeof(struct mmtimer), GFP_KERNEL);if (base == NULL)return -ENOMEM;if (flags & TIMER_ABSTIME) {struct timespec n;unsigned long now;getnstimeofday(&n); now = timespec_to_ns(n);if (when > now)when -= now;else/* Fire the timer immediately */when = 0;}/** Convert to sgi clock period. Need to keep rtc_time() as near as possible* to getnstimeofday() in order to be as faithful as possible to the time* specified.*/when = (when + sgi_clock_period - 1) / sgi_clock_period + rtc_time();period = (period + sgi_clock_period - 1) / sgi_clock_period;/** We are allocating a local SHub comparator. If we would be moved to another* cpu then another SHub may be local to us. Prohibit that by switching off* preemption.*/preempt_disable();nodeid = cpu_to_node(smp_processor_id());/* Lock the node timer structure */spin_lock_irqsave(&timers[nodeid].lock, irqflags);base->timer = timr;base->cpu = smp_processor_id();timr->it.mmtimer.clock = TIMER_SET;timr->it.mmtimer.node = nodeid;timr->it.mmtimer.incr = period;timr->it.mmtimer.expires = when;n = timers[nodeid].next;/* Add the new struct mmtimer to node's timer list */mmtimer_add_list(base);if (timers[nodeid].next == n) {/* No need to reprogram comparator for now */spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);preempt_enable();return err;}/* We need to reprogram the comparator */if (n)mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR);mmtimer_set_next_timer(nodeid);/* Unlock the node timer structure */spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);preempt_enable();return err;}
0
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "tools/gn/test_with_scheduler.h" TestWithScheduler::TestWithScheduler() = default; TestWithScheduler::~TestWithScheduler() = default;
1
static v8::Handle<v8::Value> anotherFunctionCallback(const v8::Arguments& args){INC_STATS("DOM.TestCustomNamedGetter.anotherFunction");if (args.Length() < 1) return V8Proxy::throwNotEnoughArgumentsError();double variable_3855973207401568328 = 3079781176015153285; double variable_1710402256604650635 = 4401460031388541259; while (variable_3855973207401568328 > 0) { variable_1710402256604650635--;int variable_1887009932743131149 = 7316762537456829160;float variable_2611077123755649961 = 726024351376000678 / (978244809231434101); }TestCustomNamedGetter* imp = V8TestCustomNamedGetter::toNative(args.Holder());STRING_TO_V8PARAMETER_EXCEPTION_BLOCK(V8Parameter<>, str, MAYBE_MISSING_PARAMETER(args, 0, DefaultIsUndefined));imp->anotherFunction(str);return v8::Handle<v8::Value>();}
0
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_CHROMEOS_POLICY_OFF_HOURS_OFF_HOURS_PROTO_PARSER_H_ #define CHROME_BROWSER_CHROMEOS_POLICY_OFF_HOURS_OFF_HOURS_PROTO_PARSER_H_ #include <memory> #include <string> #include <vector> #include "base/optional.h" #include "base/values.h" #include "chrome/browser/chromeos/policy/off_hours/off_hours_interval.h" #include "chrome/browser/chromeos/policy/off_hours/weekly_time.h" #include "components/policy/proto/chrome_device_policy.pb.h" namespace policy { namespace off_hours { // Return WeeklyTime structure from WeeklyTimeProto. Return nullptr if // WeeklyTime structure isn't correct. std::unique_ptr<WeeklyTime> ExtractWeeklyTimeFromProto( const enterprise_management::WeeklyTimeProto& container); // Return list of time intervals from DeviceOffHoursProto structure. std::vector<OffHoursInterval> ExtractOffHoursIntervalsFromProto( const enterprise_management::DeviceOffHoursProto& container); // Return list of proto tags of ignored policies from DeviceOffHoursProto // structure. std::vector<int> ExtractIgnoredPolicyProtoTagsFromProto( const enterprise_management::DeviceOffHoursProto& container); // Return timezone from DeviceOffHoursProto if exists otherwise return nullptr. base::Optional<std::string> ExtractTimezoneFromProto( const enterprise_management::DeviceOffHoursProto& container); // Return DictionaryValue in format: // { "timezone" : string, // "intervals" : list of "OffHours" Intervals, // "ignored_policy_proto_tags" : integer list } // "OffHours" Interval dictionary format: // { "start" : WeeklyTime, // "end" : WeeklyTime } // WeeklyTime dictionary format: // { "day_of_week" : int # value is from 1 to 7 (1 = Monday, 2 = Tuesday, etc.) // "time" : int # in milliseconds from the beginning of the day. // } // This function is used by device_policy_decoder_chromeos to save "OffHours" // policy in PolicyMap. std::unique_ptr<base::DictionaryValue> ConvertOffHoursProtoToValue( const enterprise_management::DeviceOffHoursProto& container); } // namespace off_hours } // namespace policy #endif // CHROME_BROWSER_CHROMEOS_POLICY_OFF_HOURS_OFF_HOURS_PROTO_PARSER_H_
0
/* * Coda multi-standard codec IP * * Copyright (C) 2012 Vista Silicon S.L. * Javier Martin, <javier.martin@vista-silicon.com> * Xavier Duret * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/firmware.h> #include <linux/gcd.h> #include <linux/genalloc.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kfifo.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <linux/of.h> #include <linux/platform_data/media/coda.h> #include <linux/reset.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mem2mem.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-dma-contig.h> #include <media/videobuf2-vmalloc.h> #include "coda.h" #include "imx-vdoa.h" #define CODA_NAME "coda" #define CODADX6_MAX_INSTANCES 4 #define CODA_MAX_FORMATS 4 #define CODA_ISRAM_SIZE (2048 * 2) #define MIN_W 176 #define MIN_H 144 #define S_ALIGN 1 /* multiple of 2 */ #define W_ALIGN 1 /* multiple of 2 */ #define H_ALIGN 1 /* multiple of 2 */ #define fh_to_ctx(__fh) container_of(__fh, struct coda_ctx, fh) int coda_debug; module_param(coda_debug, int, 0644); MODULE_PARM_DESC(coda_debug, "Debug level (0-2)"); static int disable_tiling; module_param(disable_tiling, int, 0644); MODULE_PARM_DESC(disable_tiling, "Disable tiled frame buffers"); static int disable_vdoa; module_param(disable_vdoa, int, 0644); MODULE_PARM_DESC(disable_vdoa, "Disable Video Data Order Adapter tiled to raster-scan conversion"); void coda_write(struct coda_dev *dev, u32 data, u32 reg) { v4l2_dbg(2, coda_debug, &dev->v4l2_dev, "%s: data=0x%x, reg=0x%x\n", __func__, data, reg); writel(data, dev->regs_base + reg); } unsigned int coda_read(struct coda_dev *dev, u32 reg) { u32 data; data = readl(dev->regs_base + reg); v4l2_dbg(2, coda_debug, &dev->v4l2_dev, "%s: data=0x%x, reg=0x%x\n", __func__, data, reg); return data; } void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data, struct vb2_v4l2_buffer *buf, unsigned int reg_y) { u32 base_y = vb2_dma_contig_plane_dma_addr(&buf->vb2_buf, 0); u32 base_cb, base_cr; switch (q_data->fourcc) { case V4L2_PIX_FMT_YUYV: /* Fallthrough: IN -H264-> CODA -NV12 MB-> VDOA -YUYV-> OUT */ case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_YUV420: default: base_cb = base_y + q_data->bytesperline * q_data->height; base_cr = base_cb + q_data->bytesperline * q_data->height / 4; break; case V4L2_PIX_FMT_YVU420: /* Switch Cb and Cr for YVU420 format */ base_cr = base_y + q_data->bytesperline * q_data->height; base_cb = base_cr + q_data->bytesperline * q_data->height / 4; break; case V4L2_PIX_FMT_YUV422P: base_cb = base_y + q_data->bytesperline * q_data->height; base_cr = base_cb + q_data->bytesperline * q_data->height / 2; } coda_write(ctx->dev, base_y, reg_y); coda_write(ctx->dev, base_cb, reg_y + 4); coda_write(ctx->dev, base_cr, reg_y + 8); } #define CODA_CODEC(mode, src_fourcc, dst_fourcc, max_w, max_h) \ { mode, src_fourcc, dst_fourcc, max_w, max_h } /* * Arrays of codecs supported by each given version of Coda: * i.MX27 -> codadx6 * i.MX5x -> coda7 * i.MX6 -> coda960 * Use V4L2_PIX_FMT_YUV420 as placeholder for all supported YUV 4:2:0 variants */ static const struct coda_codec codadx6_codecs[] = { CODA_CODEC(CODADX6_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 720, 576), CODA_CODEC(CODADX6_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 720, 576), }; static const struct coda_codec coda7_codecs[] = { CODA_CODEC(CODA7_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 1280, 720), CODA_CODEC(CODA7_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 1280, 720), CODA_CODEC(CODA7_MODE_ENCODE_MJPG, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_JPEG, 8192, 8192), CODA_CODEC(CODA7_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1088), CODA_CODEC(CODA7_MODE_DECODE_MP2, V4L2_PIX_FMT_MPEG2, V4L2_PIX_FMT_YUV420, 1920, 1088), CODA_CODEC(CODA7_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088), CODA_CODEC(CODA7_MODE_DECODE_MJPG, V4L2_PIX_FMT_JPEG, V4L2_PIX_FMT_YUV420, 8192, 8192), }; static const struct coda_codec coda9_codecs[] = { CODA_CODEC(CODA9_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 1920, 1088), CODA_CODEC(CODA9_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 1920, 1088), CODA_CODEC(CODA9_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1088), CODA_CODEC(CODA9_MODE_DECODE_MP2, V4L2_PIX_FMT_MPEG2, V4L2_PIX_FMT_YUV420, 1920, 1088), CODA_CODEC(CODA9_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088), }; struct coda_video_device { const char *name; enum coda_inst_type type; const struct coda_context_ops *ops; bool direct; u32 src_formats[CODA_MAX_FORMATS]; u32 dst_formats[CODA_MAX_FORMATS]; }; static const struct coda_video_device coda_bit_encoder = { .name = "coda-encoder", .type = CODA_INST_ENCODER, .ops = &coda_bit_encode_ops, .src_formats = { V4L2_PIX_FMT_NV12, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_YVU420, }, .dst_formats = { V4L2_PIX_FMT_H264, V4L2_PIX_FMT_MPEG4, }, }; static const struct coda_video_device coda_bit_jpeg_encoder = { .name = "coda-jpeg-encoder", .type = CODA_INST_ENCODER, .ops = &coda_bit_encode_ops, .src_formats = { V4L2_PIX_FMT_NV12, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_YVU420, V4L2_PIX_FMT_YUV422P, }, .dst_formats = { V4L2_PIX_FMT_JPEG, }, }; static const struct coda_video_device coda_bit_decoder = { .name = "coda-decoder", .type = CODA_INST_DECODER, .ops = &coda_bit_decode_ops, .src_formats = { V4L2_PIX_FMT_H264, V4L2_PIX_FMT_MPEG2, V4L2_PIX_FMT_MPEG4, }, .dst_formats = { V4L2_PIX_FMT_NV12, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_YVU420, /* * If V4L2_PIX_FMT_YUYV should be default, * set_default_params() must be adjusted. */ V4L2_PIX_FMT_YUYV, }, }; static const struct coda_video_device coda_bit_jpeg_decoder = { .name = "coda-jpeg-decoder", .type = CODA_INST_DECODER, .ops = &coda_bit_decode_ops, .src_formats = { V4L2_PIX_FMT_JPEG, }, .dst_formats = { V4L2_PIX_FMT_NV12, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_YVU420, V4L2_PIX_FMT_YUV422P, }, }; static const struct coda_video_device *codadx6_video_devices[] = { &coda_bit_encoder, }; static const struct coda_video_device *coda7_video_devices[] = { &coda_bit_jpeg_encoder, &coda_bit_jpeg_decoder, &coda_bit_encoder, &coda_bit_decoder, }; static const struct coda_video_device *coda9_video_devices[] = { &coda_bit_encoder, &coda_bit_decoder, }; /* * Normalize all supported YUV 4:2:0 formats to the value used in the codec * tables. */ static u32 coda_format_normalize_yuv(u32 fourcc) { switch (fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_YUV420: case V4L2_PIX_FMT_YVU420: case V4L2_PIX_FMT_YUV422P: case V4L2_PIX_FMT_YUYV: return V4L2_PIX_FMT_YUV420; default: return fourcc; } } static const struct coda_codec *coda_find_codec(struct coda_dev *dev, int src_fourcc, int dst_fourcc) { const struct coda_codec *codecs = dev->devtype->codecs; int num_codecs = dev->devtype->num_codecs; int k; src_fourcc = coda_format_normalize_yuv(src_fourcc); dst_fourcc = coda_format_normalize_yuv(dst_fourcc); if (src_fourcc == dst_fourcc) return NULL; for (k = 0; k < num_codecs; k++) { if (codecs[k].src_fourcc == src_fourcc && codecs[k].dst_fourcc == dst_fourcc) break; } if (k == num_codecs) return NULL; return &codecs[k]; } static void coda_get_max_dimensions(struct coda_dev *dev, const struct coda_codec *codec, int *max_w, int *max_h) { const struct coda_codec *codecs = dev->devtype->codecs; int num_codecs = dev->devtype->num_codecs; unsigned int w, h; int k; if (codec) { w = codec->max_w; h = codec->max_h; } else { for (k = 0, w = 0, h = 0; k < num_codecs; k++) { w = max(w, codecs[k].max_w); h = max(h, codecs[k].max_h); } } if (max_w) *max_w = w; if (max_h) *max_h = h; } static const struct coda_video_device *to_coda_video_device(struct video_device *vdev) { struct coda_dev *dev = video_get_drvdata(vdev); unsigned int i = vdev - dev->vfd; if (i >= dev->devtype->num_vdevs) return NULL; return dev->devtype->vdevs[i]; } const char *coda_product_name(int product) { static char buf[9]; switch (product) { case CODA_DX6: return "CodaDx6"; case CODA_7541: return "CODA7541"; case CODA_960: return "CODA960"; default: snprintf(buf, sizeof(buf), "(0x%04x)", product); return buf; } } static struct vdoa_data *coda_get_vdoa_data(void) { struct device_node *vdoa_node; struct platform_device *vdoa_pdev; struct vdoa_data *vdoa_data = NULL; vdoa_node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-vdoa"); if (!vdoa_node) return NULL; vdoa_pdev = of_find_device_by_node(vdoa_node); if (!vdoa_pdev) goto out; vdoa_data = platform_get_drvdata(vdoa_pdev); if (!vdoa_data) vdoa_data = ERR_PTR(-EPROBE_DEFER); out: if (vdoa_node) of_node_put(vdoa_node); return vdoa_data; } /* * V4L2 ioctl() operations. */ static int coda_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct coda_ctx *ctx = fh_to_ctx(priv); strlcpy(cap->driver, CODA_NAME, sizeof(cap->driver)); strlcpy(cap->card, coda_product_name(ctx->dev->devtype->product), sizeof(cap->card)); strlcpy(cap->bus_info, "platform:" CODA_NAME, sizeof(cap->bus_info)); cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } static int coda_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct video_device *vdev = video_devdata(file); const struct coda_video_device *cvd = to_coda_video_device(vdev); const u32 *formats; if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) formats = cvd->src_formats; else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) formats = cvd->dst_formats; else return -EINVAL; if (f->index >= CODA_MAX_FORMATS || formats[f->index] == 0) return -EINVAL; f->pixelformat = formats[f->index]; return 0; } static int coda_g_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct coda_q_data *q_data; struct coda_ctx *ctx = fh_to_ctx(priv); q_data = get_q_data(ctx, f->type); if (!q_data) return -EINVAL; f->fmt.pix.field = V4L2_FIELD_NONE; f->fmt.pix.pixelformat = q_data->fourcc; f->fmt.pix.width = q_data->width; f->fmt.pix.height = q_data->height; f->fmt.pix.bytesperline = q_data->bytesperline; f->fmt.pix.sizeimage = q_data->sizeimage; if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG) f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG; else f->fmt.pix.colorspace = ctx->colorspace; return 0; } static int coda_try_pixelformat(struct coda_ctx *ctx, struct v4l2_format *f) { struct coda_q_data *q_data; const u32 *formats; int i; if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) formats = ctx->cvd->src_formats; else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) formats = ctx->cvd->dst_formats; else return -EINVAL; for (i = 0; i < CODA_MAX_FORMATS; i++) { /* Skip YUYV if the vdoa is not available */ if (!ctx->vdoa && f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && formats[i] == V4L2_PIX_FMT_YUYV) continue; if (formats[i] == f->fmt.pix.pixelformat) { f->fmt.pix.pixelformat = formats[i]; return 0; } } /* Fall back to currently set pixelformat */ q_data = get_q_data(ctx, f->type); f->fmt.pix.pixelformat = q_data->fourcc; return 0; } static int coda_try_fmt_vdoa(struct coda_ctx *ctx, struct v4l2_format *f, bool *use_vdoa) { int err; if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (!use_vdoa) return -EINVAL; if (!ctx->vdoa) { *use_vdoa = false; return 0; } err = vdoa_context_configure(NULL, f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.pixelformat); if (err) { *use_vdoa = false; return 0; } *use_vdoa = true; return 0; } static unsigned int coda_estimate_sizeimage(struct coda_ctx *ctx, u32 sizeimage, u32 width, u32 height) { /* * This is a rough estimate for sensible compressed buffer * sizes (between 1 and 16 bits per pixel). This could be * improved by better format specific worst case estimates. */ return round_up(clamp(sizeimage, width * height / 8, width * height * 2), PAGE_SIZE); } static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec, struct v4l2_format *f) { struct coda_dev *dev = ctx->dev; unsigned int max_w, max_h; enum v4l2_field field; field = f->fmt.pix.field; if (field == V4L2_FIELD_ANY) field = V4L2_FIELD_NONE; else if (V4L2_FIELD_NONE != field) return -EINVAL; /* V4L2 specification suggests the driver corrects the format struct * if any of the dimensions is unsupported */ f->fmt.pix.field = field; coda_get_max_dimensions(dev, codec, &max_w, &max_h); v4l_bound_align_image(&f->fmt.pix.width, MIN_W, max_w, W_ALIGN, &f->fmt.pix.height, MIN_H, max_h, H_ALIGN, S_ALIGN); switch (f->fmt.pix.pixelformat) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_YUV420: case V4L2_PIX_FMT_YVU420: /* * Frame stride must be at least multiple of 8, * but multiple of 16 for h.264 or JPEG 4:2:x */ f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16); f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height * 3 / 2; break; case V4L2_PIX_FMT_YUYV: f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16) * 2; f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height; break; case V4L2_PIX_FMT_YUV422P: f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16); f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height * 2; break; case V4L2_PIX_FMT_JPEG: f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG; /* fallthrough */ case V4L2_PIX_FMT_H264: case V4L2_PIX_FMT_MPEG4: case V4L2_PIX_FMT_MPEG2: f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = coda_estimate_sizeimage(ctx, f->fmt.pix.sizeimage, f->fmt.pix.width, f->fmt.pix.height); break; default: BUG(); } return 0; } static int coda_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct coda_ctx *ctx = fh_to_ctx(priv); const struct coda_q_data *q_data_src; const struct coda_codec *codec; struct vb2_queue *src_vq; int ret; bool use_vdoa; ret = coda_try_pixelformat(ctx, f); if (ret < 0) return ret; q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); /* * If the source format is already fixed, only allow the same output * resolution */ src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); if (vb2_is_streaming(src_vq)) { f->fmt.pix.width = q_data_src->width; f->fmt.pix.height = q_data_src->height; } f->fmt.pix.colorspace = ctx->colorspace; q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); codec = coda_find_codec(ctx->dev, q_data_src->fourcc, f->fmt.pix.pixelformat); if (!codec) return -EINVAL; ret = coda_try_fmt(ctx, codec, f); if (ret < 0) return ret; /* The h.264 decoder only returns complete 16x16 macroblocks */ if (codec && codec->src_fourcc == V4L2_PIX_FMT_H264) { f->fmt.pix.width = f->fmt.pix.width; f->fmt.pix.height = round_up(f->fmt.pix.height, 16); f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16); f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height * 3 / 2; ret = coda_try_fmt_vdoa(ctx, f, &use_vdoa); if (ret < 0) return ret; if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) { if (!use_vdoa) return -EINVAL; f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16) * 2; f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height; } } return 0; } static int coda_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct coda_ctx *ctx = fh_to_ctx(priv); struct coda_dev *dev = ctx->dev; const struct coda_q_data *q_data_dst; const struct coda_codec *codec; int ret; ret = coda_try_pixelformat(ctx, f); if (ret < 0) return ret; switch (f->fmt.pix.colorspace) { case V4L2_COLORSPACE_REC709: case V4L2_COLORSPACE_JPEG: break; default: if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG) f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG; else f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709; } q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); codec = coda_find_codec(dev, f->fmt.pix.pixelformat, q_data_dst->fourcc); return coda_try_fmt(ctx, codec, f); } static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f, struct v4l2_rect *r) { struct coda_q_data *q_data; struct vb2_queue *vq; vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); if (!vq) return -EINVAL; q_data = get_q_data(ctx, f->type); if (!q_data) return -EINVAL; if (vb2_is_busy(vq)) { v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__); return -EBUSY; } q_data->fourcc = f->fmt.pix.pixelformat; q_data->width = f->fmt.pix.width; q_data->height = f->fmt.pix.height; q_data->bytesperline = f->fmt.pix.bytesperline; q_data->sizeimage = f->fmt.pix.sizeimage; if (r) { q_data->rect = *r; } else { q_data->rect.left = 0; q_data->rect.top = 0; q_data->rect.width = f->fmt.pix.width; q_data->rect.height = f->fmt.pix.height; } switch (f->fmt.pix.pixelformat) { case V4L2_PIX_FMT_YUYV: ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP; break; case V4L2_PIX_FMT_NV12: ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP; if (!disable_tiling) break; /* else fall through */ case V4L2_PIX_FMT_YUV420: case V4L2_PIX_FMT_YVU420: ctx->tiled_map_type = GDI_LINEAR_FRAME_MAP; break; default: break; } if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP && !coda_try_fmt_vdoa(ctx, f, &ctx->use_vdoa) && ctx->use_vdoa) vdoa_context_configure(ctx->vdoa, f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.pixelformat); else ctx->use_vdoa = false; v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "Setting format for type %d, wxh: %dx%d, fmt: %4.4s %c\n", f->type, q_data->width, q_data->height, (char *)&q_data->fourcc, (ctx->tiled_map_type == GDI_LINEAR_FRAME_MAP) ? 'L' : 'T'); return 0; } static int coda_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct coda_ctx *ctx = fh_to_ctx(priv); struct coda_q_data *q_data_src; struct v4l2_rect r; int ret; ret = coda_try_fmt_vid_cap(file, priv, f); if (ret) return ret; q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); r.left = 0; r.top = 0; r.width = q_data_src->width; r.height = q_data_src->height; return coda_s_fmt(ctx, f, &r); } static int coda_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct coda_ctx *ctx = fh_to_ctx(priv); struct coda_q_data *q_data_src; struct v4l2_format f_cap; struct v4l2_rect r; int ret; ret = coda_try_fmt_vid_out(file, priv, f); if (ret) return ret; ret = coda_s_fmt(ctx, f, NULL); if (ret) return ret; ctx->colorspace = f->fmt.pix.colorspace; memset(&f_cap, 0, sizeof(f_cap)); f_cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; coda_g_fmt(file, priv, &f_cap); f_cap.fmt.pix.width = f->fmt.pix.width; f_cap.fmt.pix.height = f->fmt.pix.height; ret = coda_try_fmt_vid_cap(file, priv, &f_cap); if (ret) return ret; q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); r.left = 0; r.top = 0; r.width = q_data_src->width; r.height = q_data_src->height; return coda_s_fmt(ctx, &f_cap, &r); } static int coda_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *rb) { struct coda_ctx *ctx = fh_to_ctx(priv); int ret; ret = v4l2_m2m_reqbufs(file, ctx->fh.m2m_ctx, rb); if (ret) return ret; /* * Allow to allocate instance specific per-context buffers, such as * bitstream ringbuffer, slice buffer, work buffer, etc. if needed. */ if (rb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && ctx->ops->reqbufs) return ctx->ops->reqbufs(ctx, rb); return 0; } static int coda_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct coda_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_qbuf(file, ctx->fh.m2m_ctx, buf); } static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf) { struct vb2_queue *src_vq; src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); return ((ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) && (buf->sequence == (ctx->qsequence - 1))); } void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state) { const struct v4l2_event eos_event = { .type = V4L2_EVENT_EOS }; if (coda_buf_is_end_of_stream(ctx, buf)) { buf->flags |= V4L2_BUF_FLAG_LAST; v4l2_event_queue_fh(&ctx->fh, &eos_event); } v4l2_m2m_buf_done(buf, state); } static int coda_g_selection(struct file *file, void *fh, struct v4l2_selection *s) { struct coda_ctx *ctx = fh_to_ctx(fh); struct coda_q_data *q_data; struct v4l2_rect r, *rsel; q_data = get_q_data(ctx, s->type); if (!q_data) return -EINVAL; r.left = 0; r.top = 0; r.width = q_data->width; r.height = q_data->height; rsel = &q_data->rect; switch (s->target) { case V4L2_SEL_TGT_CROP_DEFAULT: case V4L2_SEL_TGT_CROP_BOUNDS: rsel = &r; /* fallthrough */ case V4L2_SEL_TGT_CROP: if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; break; case V4L2_SEL_TGT_COMPOSE_BOUNDS: case V4L2_SEL_TGT_COMPOSE_PADDED: rsel = &r; /* fallthrough */ case V4L2_SEL_TGT_COMPOSE: case V4L2_SEL_TGT_COMPOSE_DEFAULT: if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; break; default: return -EINVAL; } s->r = *rsel; return 0; } static int coda_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dc) { if (dc->cmd != V4L2_DEC_CMD_STOP) return -EINVAL; if (dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK) return -EINVAL; if (!(dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) && (dc->stop.pts != 0)) return -EINVAL; return 0; } static int coda_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dc) { struct coda_ctx *ctx = fh_to_ctx(fh); int ret; ret = coda_try_decoder_cmd(file, fh, dc); if (ret < 0) return ret; /* Ignore decoder stop command silently in encoder context */ if (ctx->inst_type != CODA_INST_DECODER) return 0; /* Set the stream-end flag on this context */ coda_bit_stream_end_flag(ctx); ctx->hold = false; v4l2_m2m_try_schedule(ctx->fh.m2m_ctx); return 0; } static int coda_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct coda_ctx *ctx = fh_to_ctx(fh); struct v4l2_fract *tpf; if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; a->parm.output.capability = V4L2_CAP_TIMEPERFRAME; tpf = &a->parm.output.timeperframe; tpf->denominator = ctx->params.framerate & CODA_FRATE_RES_MASK; tpf->numerator = 1 + (ctx->params.framerate >> CODA_FRATE_DIV_OFFSET); return 0; } /* * Approximate timeperframe v4l2_fract with values that can be written * into the 16-bit CODA_FRATE_DIV and CODA_FRATE_RES fields. */ static void coda_approximate_timeperframe(struct v4l2_fract *timeperframe) { struct v4l2_fract s = *timeperframe; struct v4l2_fract f0; struct v4l2_fract f1 = { 1, 0 }; struct v4l2_fract f2 = { 0, 1 }; unsigned int i, div, s_denominator; /* Lower bound is 1/65535 */ if (s.numerator == 0 || s.denominator / s.numerator > 65535) { timeperframe->numerator = 1; timeperframe->denominator = 65535; return; } /* Upper bound is 65536/1, map everything above to infinity */ if (s.denominator == 0 || s.numerator / s.denominator > 65536) { timeperframe->numerator = 1; timeperframe->denominator = 0; return; } /* Reduce fraction to lowest terms */ div = gcd(s.numerator, s.denominator); if (div > 1) { s.numerator /= div; s.denominator /= div; } if (s.numerator <= 65536 && s.denominator < 65536) { *timeperframe = s; return; } /* Find successive convergents from continued fraction expansion */ while (f2.numerator <= 65536 && f2.denominator < 65536) { f0 = f1; f1 = f2; /* Stop when f2 exactly equals timeperframe */ if (s.numerator == 0) break; i = s.denominator / s.numerator; f2.numerator = f0.numerator + i * f1.numerator; f2.denominator = f0.denominator + i * f2.denominator; s_denominator = s.numerator; s.numerator = s.denominator % s.numerator; s.denominator = s_denominator; } *timeperframe = f1; } static uint32_t coda_timeperframe_to_frate(struct v4l2_fract *timeperframe) { return ((timeperframe->numerator - 1) << CODA_FRATE_DIV_OFFSET) | timeperframe->denominator; } static int coda_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct coda_ctx *ctx = fh_to_ctx(fh); struct v4l2_fract *tpf; if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; tpf = &a->parm.output.timeperframe; coda_approximate_timeperframe(tpf); ctx->params.framerate = coda_timeperframe_to_frate(tpf); return 0; } static int coda_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub) { switch (sub->type) { case V4L2_EVENT_EOS: return v4l2_event_subscribe(fh, sub, 0, NULL); default: return v4l2_ctrl_subscribe_event(fh, sub); } } static const struct v4l2_ioctl_ops coda_ioctl_ops = { .vidioc_querycap = coda_querycap, .vidioc_enum_fmt_vid_cap = coda_enum_fmt, .vidioc_g_fmt_vid_cap = coda_g_fmt, .vidioc_try_fmt_vid_cap = coda_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = coda_s_fmt_vid_cap, .vidioc_enum_fmt_vid_out = coda_enum_fmt, .vidioc_g_fmt_vid_out = coda_g_fmt, .vidioc_try_fmt_vid_out = coda_try_fmt_vid_out, .vidioc_s_fmt_vid_out = coda_s_fmt_vid_out, .vidioc_reqbufs = coda_reqbufs, .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, .vidioc_qbuf = coda_qbuf, .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, .vidioc_streamon = v4l2_m2m_ioctl_streamon, .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, .vidioc_g_selection = coda_g_selection, .vidioc_try_decoder_cmd = coda_try_decoder_cmd, .vidioc_decoder_cmd = coda_decoder_cmd, .vidioc_g_parm = coda_g_parm, .vidioc_s_parm = coda_s_parm, .vidioc_subscribe_event = coda_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; /* * Mem-to-mem operations. */ static void coda_device_run(void *m2m_priv) { struct coda_ctx *ctx = m2m_priv; struct coda_dev *dev = ctx->dev; queue_work(dev->workqueue, &ctx->pic_run_work); } static void coda_pic_run_work(struct work_struct *work) { struct coda_ctx *ctx = container_of(work, struct coda_ctx, pic_run_work); struct coda_dev *dev = ctx->dev; int ret; mutex_lock(&ctx->buffer_mutex); mutex_lock(&dev->coda_mutex); ret = ctx->ops->prepare_run(ctx); if (ret < 0 && ctx->inst_type == CODA_INST_DECODER) { mutex_unlock(&dev->coda_mutex); mutex_unlock(&ctx->buffer_mutex); /* job_finish scheduled by prepare_decode */ return; } if (!wait_for_completion_timeout(&ctx->completion, msecs_to_jiffies(1000))) { dev_err(&dev->plat_dev->dev, "CODA PIC_RUN timeout\n"); ctx->hold = true; coda_hw_reset(ctx); } else if (!ctx->aborting) { ctx->ops->finish_run(ctx); } if ((ctx->aborting || (!ctx->streamon_cap && !ctx->streamon_out)) && ctx->ops->seq_end_work) queue_work(dev->workqueue, &ctx->seq_end_work); mutex_unlock(&dev->coda_mutex); mutex_unlock(&ctx->buffer_mutex); v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx); } static int coda_job_ready(void *m2m_priv) { struct coda_ctx *ctx = m2m_priv; int src_bufs = v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx); /* * For both 'P' and 'key' frame cases 1 picture * and 1 frame are needed. In the decoder case, * the compressed frame can be in the bitstream. */ if (!src_bufs && ctx->inst_type != CODA_INST_DECODER) { v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "not ready: not enough video buffers.\n"); return 0; } if (!v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx)) { v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "not ready: not enough video capture buffers.\n"); return 0; } if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) { bool stream_end = ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG; int num_metas = ctx->num_metas; unsigned int count; count = hweight32(ctx->frm_dis_flg); if (ctx->use_vdoa && count >= (ctx->num_internal_frames - 1)) { v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "%d: not ready: all internal buffers in use: %d/%d (0x%x)", ctx->idx, count, ctx->num_internal_frames, ctx->frm_dis_flg); return 0; } if (ctx->hold && !src_bufs) { v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "%d: not ready: on hold for more buffers.\n", ctx->idx); return 0; } if (!stream_end && (num_metas + src_bufs) < 2) { v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "%d: not ready: need 2 buffers available (%d, %d)\n", ctx->idx, num_metas, src_bufs); return 0; } if (!src_bufs && !stream_end && (coda_get_bitstream_payload(ctx) < 512)) { v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "%d: not ready: not enough bitstream data (%d).\n", ctx->idx, coda_get_bitstream_payload(ctx)); return 0; } } if (ctx->aborting) { v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "not ready: aborting\n"); return 0; } v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "job ready\n"); return 1; } static void coda_job_abort(void *priv) { struct coda_ctx *ctx = priv; ctx->aborting = 1; v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "Aborting task\n"); } static void coda_lock(void *m2m_priv) { struct coda_ctx *ctx = m2m_priv; struct coda_dev *pcdev = ctx->dev; mutex_lock(&pcdev->dev_mutex); } static void coda_unlock(void *m2m_priv) { struct coda_ctx *ctx = m2m_priv; struct coda_dev *pcdev = ctx->dev; mutex_unlock(&pcdev->dev_mutex); } static const struct v4l2_m2m_ops coda_m2m_ops = { .device_run = coda_device_run, .job_ready = coda_job_ready, .job_abort = coda_job_abort, .lock = coda_lock, .unlock = coda_unlock, }; static void set_default_params(struct coda_ctx *ctx) { unsigned int max_w, max_h, usize, csize; ctx->codec = coda_find_codec(ctx->dev, ctx->cvd->src_formats[0], ctx->cvd->dst_formats[0]); max_w = min(ctx->codec->max_w, 1920U); max_h = min(ctx->codec->max_h, 1088U); usize = max_w * max_h * 3 / 2; csize = coda_estimate_sizeimage(ctx, usize, max_w, max_h); ctx->params.codec_mode = ctx->codec->mode; ctx->colorspace = V4L2_COLORSPACE_REC709; ctx->params.framerate = 30; /* Default formats for output and input queues */ ctx->q_data[V4L2_M2M_SRC].fourcc = ctx->cvd->src_formats[0]; ctx->q_data[V4L2_M2M_DST].fourcc = ctx->cvd->dst_formats[0]; ctx->q_data[V4L2_M2M_SRC].width = max_w; ctx->q_data[V4L2_M2M_SRC].height = max_h; ctx->q_data[V4L2_M2M_DST].width = max_w; ctx->q_data[V4L2_M2M_DST].height = max_h; if (ctx->codec->src_fourcc == V4L2_PIX_FMT_YUV420) { ctx->q_data[V4L2_M2M_SRC].bytesperline = max_w; ctx->q_data[V4L2_M2M_SRC].sizeimage = usize; ctx->q_data[V4L2_M2M_DST].bytesperline = 0; ctx->q_data[V4L2_M2M_DST].sizeimage = csize; } else { ctx->q_data[V4L2_M2M_SRC].bytesperline = 0; ctx->q_data[V4L2_M2M_SRC].sizeimage = csize; ctx->q_data[V4L2_M2M_DST].bytesperline = max_w; ctx->q_data[V4L2_M2M_DST].sizeimage = usize; } ctx->q_data[V4L2_M2M_SRC].rect.width = max_w; ctx->q_data[V4L2_M2M_SRC].rect.height = max_h; ctx->q_data[V4L2_M2M_DST].rect.width = max_w; ctx->q_data[V4L2_M2M_DST].rect.height = max_h; /* * Since the RBC2AXI logic only supports a single chroma plane, * macroblock tiling only works for to NV12 pixel format. */ ctx->tiled_map_type = GDI_LINEAR_FRAME_MAP; } /* * Queue operations */ static int coda_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct coda_ctx *ctx = vb2_get_drv_priv(vq); struct coda_q_data *q_data; unsigned int size; q_data = get_q_data(ctx, vq->type); size = q_data->sizeimage; *nplanes = 1; sizes[0] = size; v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "get %d buffer(s) of size %d each.\n", *nbuffers, size); return 0; } static int coda_buf_prepare(struct vb2_buffer *vb) { struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct coda_q_data *q_data; q_data = get_q_data(ctx, vb->vb2_queue->type); if (vb2_plane_size(vb, 0) < q_data->sizeimage) { v4l2_warn(&ctx->dev->v4l2_dev, "%s data will not fit into plane (%lu < %lu)\n", __func__, vb2_plane_size(vb, 0), (long)q_data->sizeimage); return -EINVAL; } return 0; } static void coda_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct vb2_queue *vq = vb->vb2_queue; struct coda_q_data *q_data; q_data = get_q_data(ctx, vb->vb2_queue->type); /* * In the decoder case, immediately try to copy the buffer into the * bitstream ringbuffer and mark it as ready to be dequeued. */ if (ctx->bitstream.size && vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { /* * For backwards compatibility, queuing an empty buffer marks * the stream end */ if (vb2_get_plane_payload(vb, 0) == 0) coda_bit_stream_end_flag(ctx); mutex_lock(&ctx->bitstream_mutex); v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); if (vb2_is_streaming(vb->vb2_queue)) coda_fill_bitstream(ctx, true); mutex_unlock(&ctx->bitstream_mutex); } else { v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); } } int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf, size_t size, const char *name, struct dentry *parent) { buf->vaddr = dma_alloc_coherent(&dev->plat_dev->dev, size, &buf->paddr, GFP_KERNEL); if (!buf->vaddr) { v4l2_err(&dev->v4l2_dev, "Failed to allocate %s buffer of size %u\n", name, size); return -ENOMEM; } buf->size = size; if (name && parent) { buf->blob.data = buf->vaddr; buf->blob.size = size; buf->dentry = debugfs_create_blob(name, 0644, parent, &buf->blob); if (!buf->dentry) dev_warn(&dev->plat_dev->dev, "failed to create debugfs entry %s\n", name); } return 0; } void coda_free_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf) { if (buf->vaddr) { dma_free_coherent(&dev->plat_dev->dev, buf->size, buf->vaddr, buf->paddr); buf->vaddr = NULL; buf->size = 0; debugfs_remove(buf->dentry); buf->dentry = NULL; } } static int coda_start_streaming(struct vb2_queue *q, unsigned int count) { struct coda_ctx *ctx = vb2_get_drv_priv(q); struct v4l2_device *v4l2_dev = &ctx->dev->v4l2_dev; struct coda_q_data *q_data_src, *q_data_dst; struct vb2_v4l2_buffer *buf; int ret = 0; if (count < 1) return -EINVAL; q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) { /* copy the buffers that were queued before streamon */ mutex_lock(&ctx->bitstream_mutex); coda_fill_bitstream(ctx, false); mutex_unlock(&ctx->bitstream_mutex); if (coda_get_bitstream_payload(ctx) < 512) { ret = -EINVAL; goto err; } } ctx->streamon_out = 1; } else { ctx->streamon_cap = 1; } /* Don't start the coda unless both queues are on */ if (!(ctx->streamon_out & ctx->streamon_cap)) return 0; q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); if ((q_data_src->width != q_data_dst->width && round_up(q_data_src->width, 16) != q_data_dst->width) || (q_data_src->height != q_data_dst->height && round_up(q_data_src->height, 16) != q_data_dst->height)) { v4l2_err(v4l2_dev, "can't convert %dx%d to %dx%d\n", q_data_src->width, q_data_src->height, q_data_dst->width, q_data_dst->height); ret = -EINVAL; goto err; } /* Allow BIT decoder device_run with no new buffers queued */ if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) v4l2_m2m_set_src_buffered(ctx->fh.m2m_ctx, true); ctx->gopcounter = ctx->params.gop_size - 1; ctx->codec = coda_find_codec(ctx->dev, q_data_src->fourcc, q_data_dst->fourcc); if (!ctx->codec) { v4l2_err(v4l2_dev, "couldn't tell instance type.\n"); ret = -EINVAL; goto err; } if (q_data_dst->fourcc == V4L2_PIX_FMT_JPEG) ctx->params.gop_size = 1; ctx->gopcounter = ctx->params.gop_size - 1; ret = ctx->ops->start_streaming(ctx); if (ctx->inst_type == CODA_INST_DECODER) { if (ret == -EAGAIN) return 0; else if (ret < 0) goto err; } return ret; err: if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx))) v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED); } else { while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx))) v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED); } return ret; } static void coda_stop_streaming(struct vb2_queue *q) { struct coda_ctx *ctx = vb2_get_drv_priv(q); struct coda_dev *dev = ctx->dev; struct vb2_v4l2_buffer *buf; unsigned long flags; bool stop; stop = ctx->streamon_out && ctx->streamon_cap; if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "%s: output\n", __func__); ctx->streamon_out = 0; coda_bit_stream_end_flag(ctx); ctx->qsequence = 0; while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx))) v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR); } else { v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "%s: capture\n", __func__); ctx->streamon_cap = 0; ctx->osequence = 0; ctx->sequence_offset = 0; while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx))) v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR); } if (stop) { struct coda_buffer_meta *meta; if (ctx->ops->seq_end_work) { queue_work(dev->workqueue, &ctx->seq_end_work); flush_work(&ctx->seq_end_work); } spin_lock_irqsave(&ctx->buffer_meta_lock, flags); while (!list_empty(&ctx->buffer_meta_list)) { meta = list_first_entry(&ctx->buffer_meta_list, struct coda_buffer_meta, list); list_del(&meta->list); kfree(meta); } ctx->num_metas = 0; spin_unlock_irqrestore(&ctx->buffer_meta_lock, flags); kfifo_init(&ctx->bitstream_fifo, ctx->bitstream.vaddr, ctx->bitstream.size); ctx->runcounter = 0; ctx->aborting = 0; } if (!ctx->streamon_out && !ctx->streamon_cap) ctx->bit_stream_param &= ~CODA_BIT_STREAM_END_FLAG; } static const struct vb2_ops coda_qops = { .queue_setup = coda_queue_setup, .buf_prepare = coda_buf_prepare, .buf_queue = coda_buf_queue, .start_streaming = coda_start_streaming, .stop_streaming = coda_stop_streaming, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; static int coda_s_ctrl(struct v4l2_ctrl *ctrl) { struct coda_ctx *ctx = container_of(ctrl->handler, struct coda_ctx, ctrls); v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val); switch (ctrl->id) { case V4L2_CID_HFLIP: if (ctrl->val) ctx->params.rot_mode |= CODA_MIR_HOR; else ctx->params.rot_mode &= ~CODA_MIR_HOR; break; case V4L2_CID_VFLIP: if (ctrl->val) ctx->params.rot_mode |= CODA_MIR_VER; else ctx->params.rot_mode &= ~CODA_MIR_VER; break; case V4L2_CID_MPEG_VIDEO_BITRATE: ctx->params.bitrate = ctrl->val / 1000; break; case V4L2_CID_MPEG_VIDEO_GOP_SIZE: ctx->params.gop_size = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: ctx->params.h264_intra_qp = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: ctx->params.h264_inter_qp = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: ctx->params.h264_min_qp = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: ctx->params.h264_max_qp = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: ctx->params.h264_deblk_alpha = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: ctx->params.h264_deblk_beta = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: ctx->params.h264_deblk_enabled = (ctrl->val == V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED); break; case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: ctx->params.mpeg4_intra_qp = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: ctx->params.mpeg4_inter_qp = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: ctx->params.slice_mode = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: ctx->params.slice_max_mb = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: ctx->params.slice_max_bits = ctrl->val * 8; break; case V4L2_CID_MPEG_VIDEO_HEADER_MODE: break; case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: ctx->params.intra_refresh = ctrl->val; break; case V4L2_CID_JPEG_COMPRESSION_QUALITY: coda_set_jpeg_compression_quality(ctx, ctrl->val); break; case V4L2_CID_JPEG_RESTART_INTERVAL: ctx->params.jpeg_restart_interval = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_VBV_DELAY: ctx->params.vbv_delay = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_VBV_SIZE: ctx->params.vbv_size = min(ctrl->val * 8192, 0x7fffffff); break; default: v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "Invalid control, id=%d, val=%d\n", ctrl->id, ctrl->val); return -EINVAL; } return 0; } static const struct v4l2_ctrl_ops coda_ctrl_ops = { .s_ctrl = coda_s_ctrl, }; static void coda_encode_ctrls(struct coda_ctx *ctx) { v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_BITRATE, 0, 32767000, 1000, 0); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_GOP_SIZE, 1, 60, 1, 16); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 0, 51, 1, 25); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, 0, 51, 1, 25); if (ctx->dev->devtype->product != CODA_960) { v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_MIN_QP, 0, 51, 1, 12); } v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 0, 51, 1, 51); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, 0, 15, 1, 0); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, 0, 15, 1, 0); v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED, 0x0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP, 1, 31, 1, 2); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP, 1, 31, 1, 2); v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE, V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES, 0x0, V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB, 1, 0x3fffffff, 1, 1); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES, 1, 0x3fffffff, 1, 500); v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_HEADER_MODE, V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME, (1 << V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE), V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB, 0, 1920 * 1088 / 256, 1, 0); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_VBV_DELAY, 0, 0x7fff, 1, 0); /* * The maximum VBV size value is 0x7fffffff bits, * one bit less than 262144 KiB */ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_VBV_SIZE, 0, 262144, 1, 0); } static void coda_jpeg_encode_ctrls(struct coda_ctx *ctx) { v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_JPEG_COMPRESSION_QUALITY, 5, 100, 1, 50); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_JPEG_RESTART_INTERVAL, 0, 100, 1, 0); } static int coda_ctrls_setup(struct coda_ctx *ctx) { v4l2_ctrl_handler_init(&ctx->ctrls, 2); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); if (ctx->inst_type == CODA_INST_ENCODER) { if (ctx->cvd->dst_formats[0] == V4L2_PIX_FMT_JPEG) coda_jpeg_encode_ctrls(ctx); else coda_encode_ctrls(ctx); } if (ctx->ctrls.error) { v4l2_err(&ctx->dev->v4l2_dev, "control initialization error (%d)", ctx->ctrls.error); return -EINVAL; } return v4l2_ctrl_handler_setup(&ctx->ctrls); } static int coda_queue_init(struct coda_ctx *ctx, struct vb2_queue *vq) { vq->drv_priv = ctx; vq->ops = &coda_qops; vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; vq->lock = &ctx->dev->dev_mutex; /* One way to indicate end-of-stream for coda is to set the * bytesused == 0. However by default videobuf2 handles bytesused * equal to 0 as a special case and changes its value to the size * of the buffer. Set the allow_zero_bytesused flag, so * that videobuf2 will keep the value of bytesused intact. */ vq->allow_zero_bytesused = 1; vq->dev = &ctx->dev->plat_dev->dev; return vb2_queue_init(vq); } int coda_encoder_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { int ret; src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; src_vq->io_modes = VB2_DMABUF | VB2_MMAP; src_vq->mem_ops = &vb2_dma_contig_memops; ret = coda_queue_init(priv, src_vq); if (ret) return ret; dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; dst_vq->io_modes = VB2_DMABUF | VB2_MMAP; dst_vq->mem_ops = &vb2_dma_contig_memops; return coda_queue_init(priv, dst_vq); } int coda_decoder_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { int ret; src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; src_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR; src_vq->mem_ops = &vb2_vmalloc_memops; ret = coda_queue_init(priv, src_vq); if (ret) return ret; dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; dst_vq->io_modes = VB2_DMABUF | VB2_MMAP; dst_vq->mem_ops = &vb2_dma_contig_memops; return coda_queue_init(priv, dst_vq); } static int coda_next_free_instance(struct coda_dev *dev) { int idx = ffz(dev->instance_mask); if ((idx < 0) || (dev->devtype->product == CODA_DX6 && idx > CODADX6_MAX_INSTANCES)) return -EBUSY; return idx; } /* * File operations */ static int coda_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct coda_dev *dev = video_get_drvdata(vdev); struct coda_ctx *ctx = NULL; char *name; int ret; int idx; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; idx = coda_next_free_instance(dev); if (idx < 0) { ret = idx; goto err_coda_max; } set_bit(idx, &dev->instance_mask); name = kasprintf(GFP_KERNEL, "context%d", idx); if (!name) { ret = -ENOMEM; goto err_coda_name_init; } ctx->debugfs_entry = debugfs_create_dir(name, dev->debugfs_root); kfree(name); ctx->cvd = to_coda_video_device(vdev); ctx->inst_type = ctx->cvd->type; ctx->ops = ctx->cvd->ops; ctx->use_bit = !ctx->cvd->direct; init_completion(&ctx->completion); INIT_WORK(&ctx->pic_run_work, coda_pic_run_work); if (ctx->ops->seq_end_work) INIT_WORK(&ctx->seq_end_work, ctx->ops->seq_end_work); v4l2_fh_init(&ctx->fh, video_devdata(file)); file->private_data = &ctx->fh; v4l2_fh_add(&ctx->fh); ctx->dev = dev; ctx->idx = idx; switch (dev->devtype->product) { case CODA_960: ctx->frame_mem_ctrl = 1 << 12; /* fallthrough */ case CODA_7541: ctx->reg_idx = 0; break; default: ctx->reg_idx = idx; } if (ctx->dev->vdoa && !disable_vdoa) { ctx->vdoa = vdoa_context_create(dev->vdoa); if (!ctx->vdoa) v4l2_warn(&dev->v4l2_dev, "Failed to create vdoa context: not using vdoa"); } ctx->use_vdoa = false; /* Power up and upload firmware if necessary */ ret = pm_runtime_get_sync(&dev->plat_dev->dev); if (ret < 0) { v4l2_err(&dev->v4l2_dev, "failed to power up: %d\n", ret); goto err_pm_get; } ret = clk_prepare_enable(dev->clk_per); if (ret) goto err_clk_per; ret = clk_prepare_enable(dev->clk_ahb); if (ret) goto err_clk_ahb; set_default_params(ctx); ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, ctx->ops->queue_init); if (IS_ERR(ctx->fh.m2m_ctx)) { ret = PTR_ERR(ctx->fh.m2m_ctx); v4l2_err(&dev->v4l2_dev, "%s return error (%d)\n", __func__, ret); goto err_ctx_init; } ret = coda_ctrls_setup(ctx); if (ret) { v4l2_err(&dev->v4l2_dev, "failed to setup coda controls\n"); goto err_ctrls_setup; } ctx->fh.ctrl_handler = &ctx->ctrls; mutex_init(&ctx->bitstream_mutex); mutex_init(&ctx->buffer_mutex); INIT_LIST_HEAD(&ctx->buffer_meta_list); spin_lock_init(&ctx->buffer_meta_lock); coda_lock(ctx); list_add(&ctx->list, &dev->instances); coda_unlock(ctx); v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Created instance %d (%p)\n", ctx->idx, ctx); return 0; err_ctrls_setup: v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); err_ctx_init: clk_disable_unprepare(dev->clk_ahb); err_clk_ahb: clk_disable_unprepare(dev->clk_per); err_clk_per: pm_runtime_put_sync(&dev->plat_dev->dev); err_pm_get: v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); clear_bit(ctx->idx, &dev->instance_mask); err_coda_name_init: err_coda_max: kfree(ctx); return ret; } static int coda_release(struct file *file) { struct coda_dev *dev = video_drvdata(file); struct coda_ctx *ctx = fh_to_ctx(file->private_data); v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Releasing instance %p\n", ctx); if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) coda_bit_stream_end_flag(ctx); /* If this instance is running, call .job_abort and wait for it to end */ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); if (ctx->vdoa) vdoa_context_destroy(ctx->vdoa); /* In case the instance was not running, we still need to call SEQ_END */ if (ctx->ops->seq_end_work) { queue_work(dev->workqueue, &ctx->seq_end_work); flush_work(&ctx->seq_end_work); } coda_lock(ctx); list_del(&ctx->list); coda_unlock(ctx); if (ctx->dev->devtype->product == CODA_DX6) coda_free_aux_buf(dev, &ctx->workbuf); v4l2_ctrl_handler_free(&ctx->ctrls); clk_disable_unprepare(dev->clk_ahb); clk_disable_unprepare(dev->clk_per); pm_runtime_put_sync(&dev->plat_dev->dev); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); clear_bit(ctx->idx, &dev->instance_mask); if (ctx->ops->release) ctx->ops->release(ctx); debugfs_remove_recursive(ctx->debugfs_entry); kfree(ctx); return 0; } static const struct v4l2_file_operations coda_fops = { .owner = THIS_MODULE, .open = coda_open, .release = coda_release, .poll = v4l2_m2m_fop_poll, .unlocked_ioctl = video_ioctl2, .mmap = v4l2_m2m_fop_mmap, }; static int coda_hw_init(struct coda_dev *dev) { u32 data; u16 *p; int i, ret; ret = clk_prepare_enable(dev->clk_per); if (ret) goto err_clk_per; ret = clk_prepare_enable(dev->clk_ahb); if (ret) goto err_clk_ahb; if (dev->rstc) reset_control_reset(dev->rstc); /* * Copy the first CODA_ISRAM_SIZE in the internal SRAM. * The 16-bit chars in the code buffer are in memory access * order, re-sort them to CODA order for register download. * Data in this SRAM survives a reboot. */ p = (u16 *)dev->codebuf.vaddr; if (dev->devtype->product == CODA_DX6) { for (i = 0; i < (CODA_ISRAM_SIZE / 2); i++) { data = CODA_DOWN_ADDRESS_SET(i) | CODA_DOWN_DATA_SET(p[i ^ 1]); coda_write(dev, data, CODA_REG_BIT_CODE_DOWN); } } else { for (i = 0; i < (CODA_ISRAM_SIZE / 2); i++) { data = CODA_DOWN_ADDRESS_SET(i) | CODA_DOWN_DATA_SET(p[round_down(i, 4) + 3 - (i % 4)]); coda_write(dev, data, CODA_REG_BIT_CODE_DOWN); } } /* Clear registers */ for (i = 0; i < 64; i++) coda_write(dev, 0, CODA_REG_BIT_CODE_BUF_ADDR + i * 4); /* Tell the BIT where to find everything it needs */ if (dev->devtype->product == CODA_960 || dev->devtype->product == CODA_7541) { coda_write(dev, dev->tempbuf.paddr, CODA_REG_BIT_TEMP_BUF_ADDR); coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM); } else { coda_write(dev, dev->workbuf.paddr, CODA_REG_BIT_WORK_BUF_ADDR); } coda_write(dev, dev->codebuf.paddr, CODA_REG_BIT_CODE_BUF_ADDR); coda_write(dev, 0, CODA_REG_BIT_CODE_RUN); /* Set default values */ switch (dev->devtype->product) { case CODA_DX6: coda_write(dev, CODADX6_STREAM_BUF_PIC_FLUSH, CODA_REG_BIT_STREAM_CTRL); break; default: coda_write(dev, CODA7_STREAM_BUF_PIC_FLUSH, CODA_REG_BIT_STREAM_CTRL); } if (dev->devtype->product == CODA_960) coda_write(dev, 1 << 12, CODA_REG_BIT_FRAME_MEM_CTRL); else coda_write(dev, 0, CODA_REG_BIT_FRAME_MEM_CTRL); if (dev->devtype->product != CODA_DX6) coda_write(dev, 0, CODA7_REG_BIT_AXI_SRAM_USE); coda_write(dev, CODA_INT_INTERRUPT_ENABLE, CODA_REG_BIT_INT_ENABLE); /* Reset VPU and start processor */ data = coda_read(dev, CODA_REG_BIT_CODE_RESET); data |= CODA_REG_RESET_ENABLE; coda_write(dev, data, CODA_REG_BIT_CODE_RESET); udelay(10); data &= ~CODA_REG_RESET_ENABLE; coda_write(dev, data, CODA_REG_BIT_CODE_RESET); coda_write(dev, CODA_REG_RUN_ENABLE, CODA_REG_BIT_CODE_RUN); clk_disable_unprepare(dev->clk_ahb); clk_disable_unprepare(dev->clk_per); return 0; err_clk_ahb: clk_disable_unprepare(dev->clk_per); err_clk_per: return ret; } static int coda_register_device(struct coda_dev *dev, int i) { struct video_device *vfd = &dev->vfd[i]; if (i >= dev->devtype->num_vdevs) return -EINVAL; strlcpy(vfd->name, dev->devtype->vdevs[i]->name, sizeof(vfd->name)); vfd->fops = &coda_fops; vfd->ioctl_ops = &coda_ioctl_ops; vfd->release = video_device_release_empty, vfd->lock = &dev->dev_mutex; vfd->v4l2_dev = &dev->v4l2_dev; vfd->vfl_dir = VFL_DIR_M2M; video_set_drvdata(vfd, dev); /* Not applicable, use the selection API instead */ v4l2_disable_ioctl(vfd, VIDIOC_CROPCAP); v4l2_disable_ioctl(vfd, VIDIOC_G_CROP); v4l2_disable_ioctl(vfd, VIDIOC_S_CROP); return video_register_device(vfd, VFL_TYPE_GRABBER, 0); } static void coda_copy_firmware(struct coda_dev *dev, const u8 * const buf, size_t size) { u32 *src = (u32 *)buf; /* Check if the firmware has a 16-byte Freescale header, skip it */ if (buf[0] == 'M' && buf[1] == 'X') src += 4; /* * Check whether the firmware is in native order or pre-reordered for * memory access. The first instruction opcode always is 0xe40e. */ if (__le16_to_cpup((__le16 *)src) == 0xe40e) { u32 *dst = dev->codebuf.vaddr; int i; /* Firmware in native order, reorder while copying */ if (dev->devtype->product == CODA_DX6) { for (i = 0; i < (size - 16) / 4; i++) dst[i] = (src[i] << 16) | (src[i] >> 16); } else { for (i = 0; i < (size - 16) / 4; i += 2) { dst[i] = (src[i + 1] << 16) | (src[i + 1] >> 16); dst[i + 1] = (src[i] << 16) | (src[i] >> 16); } } } else { /* Copy the already reordered firmware image */ memcpy(dev->codebuf.vaddr, src, size); } } static void coda_fw_callback(const struct firmware *fw, void *context); static int coda_firmware_request(struct coda_dev *dev) { char *fw = dev->devtype->firmware[dev->firmware]; dev_dbg(&dev->plat_dev->dev, "requesting firmware '%s' for %s\n", fw, coda_product_name(dev->devtype->product)); return request_firmware_nowait(THIS_MODULE, true, fw, &dev->plat_dev->dev, GFP_KERNEL, dev, coda_fw_callback); } static void coda_fw_callback(const struct firmware *fw, void *context) { struct coda_dev *dev = context; struct platform_device *pdev = dev->plat_dev; int i, ret; if (!fw && dev->firmware == 1) { v4l2_err(&dev->v4l2_dev, "firmware request failed\n"); goto put_pm; } if (!fw) { dev->firmware = 1; coda_firmware_request(dev); return; } if (dev->firmware == 1) { /* * Since we can't suppress warnings for failed asynchronous * firmware requests, report that the fallback firmware was * found. */ dev_info(&pdev->dev, "Using fallback firmware %s\n", dev->devtype->firmware[dev->firmware]); } /* allocate auxiliary per-device code buffer for the BIT processor */ ret = coda_alloc_aux_buf(dev, &dev->codebuf, fw->size, "codebuf", dev->debugfs_root); if (ret < 0) goto put_pm; coda_copy_firmware(dev, fw->data, fw->size); release_firmware(fw); ret = coda_hw_init(dev); if (ret < 0) { v4l2_err(&dev->v4l2_dev, "HW initialization failed\n"); goto put_pm; } ret = coda_check_firmware(dev); if (ret < 0) goto put_pm; dev->m2m_dev = v4l2_m2m_init(&coda_m2m_ops); if (IS_ERR(dev->m2m_dev)) { v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n"); goto put_pm; } for (i = 0; i < dev->devtype->num_vdevs; i++) { ret = coda_register_device(dev, i); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register %s video device: %d\n", dev->devtype->vdevs[i]->name, ret); goto rel_vfd; } } v4l2_info(&dev->v4l2_dev, "codec registered as /dev/video[%d-%d]\n", dev->vfd[0].num, dev->vfd[i - 1].num); pm_runtime_put_sync(&pdev->dev); return; rel_vfd: while (--i >= 0) video_unregister_device(&dev->vfd[i]); v4l2_m2m_release(dev->m2m_dev); put_pm: pm_runtime_put_sync(&pdev->dev); } enum coda_platform { CODA_IMX27, CODA_IMX53, CODA_IMX6Q, CODA_IMX6DL, }; static const struct coda_devtype coda_devdata[] = { [CODA_IMX27] = { .firmware = { "vpu_fw_imx27_TO2.bin", "vpu/vpu_fw_imx27_TO2.bin", "v4l-codadx6-imx27.bin" }, .product = CODA_DX6, .codecs = codadx6_codecs, .num_codecs = ARRAY_SIZE(codadx6_codecs), .vdevs = codadx6_video_devices, .num_vdevs = ARRAY_SIZE(codadx6_video_devices), .workbuf_size = 288 * 1024 + FMO_SLICE_SAVE_BUF_SIZE * 8 * 1024, .iram_size = 0xb000, }, [CODA_IMX53] = { .firmware = { "vpu_fw_imx53.bin", "vpu/vpu_fw_imx53.bin", "v4l-coda7541-imx53.bin" }, .product = CODA_7541, .codecs = coda7_codecs, .num_codecs = ARRAY_SIZE(coda7_codecs), .vdevs = coda7_video_devices, .num_vdevs = ARRAY_SIZE(coda7_video_devices), .workbuf_size = 128 * 1024, .tempbuf_size = 304 * 1024, .iram_size = 0x14000, }, [CODA_IMX6Q] = { .firmware = { "vpu_fw_imx6q.bin", "vpu/vpu_fw_imx6q.bin", "v4l-coda960-imx6q.bin" }, .product = CODA_960, .codecs = coda9_codecs, .num_codecs = ARRAY_SIZE(coda9_codecs), .vdevs = coda9_video_devices, .num_vdevs = ARRAY_SIZE(coda9_video_devices), .workbuf_size = 80 * 1024, .tempbuf_size = 204 * 1024, .iram_size = 0x21000, }, [CODA_IMX6DL] = { .firmware = { "vpu_fw_imx6d.bin", "vpu/vpu_fw_imx6d.bin", "v4l-coda960-imx6dl.bin" }, .product = CODA_960, .codecs = coda9_codecs, .num_codecs = ARRAY_SIZE(coda9_codecs), .vdevs = coda9_video_devices, .num_vdevs = ARRAY_SIZE(coda9_video_devices), .workbuf_size = 80 * 1024, .tempbuf_size = 204 * 1024, .iram_size = 0x20000, }, }; static struct platform_device_id coda_platform_ids[] = { { .name = "coda-imx27", .driver_data = CODA_IMX27 }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, coda_platform_ids); #ifdef CONFIG_OF static const struct of_device_id coda_dt_ids[] = { { .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] }, { .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] }, { .compatible = "fsl,imx6q-vpu", .data = &coda_devdata[CODA_IMX6Q] }, { .compatible = "fsl,imx6dl-vpu", .data = &coda_devdata[CODA_IMX6DL] }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, coda_dt_ids); #endif static int coda_probe(struct platform_device *pdev) { const struct of_device_id *of_id = of_match_device(of_match_ptr(coda_dt_ids), &pdev->dev); const struct platform_device_id *pdev_id; struct coda_platform_data *pdata = pdev->dev.platform_data; struct device_node *np = pdev->dev.of_node; struct gen_pool *pool; struct coda_dev *dev; struct resource *res; int ret, irq; dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; pdev_id = of_id ? of_id->data : platform_get_device_id(pdev); if (of_id) dev->devtype = of_id->data; else if (pdev_id) dev->devtype = &coda_devdata[pdev_id->driver_data]; else return -EINVAL; spin_lock_init(&dev->irqlock); INIT_LIST_HEAD(&dev->instances); dev->plat_dev = pdev; dev->clk_per = devm_clk_get(&pdev->dev, "per"); if (IS_ERR(dev->clk_per)) { dev_err(&pdev->dev, "Could not get per clock\n"); return PTR_ERR(dev->clk_per); } dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); if (IS_ERR(dev->clk_ahb)) { dev_err(&pdev->dev, "Could not get ahb clock\n"); return PTR_ERR(dev->clk_ahb); } /* Get memory for physical registers */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dev->regs_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dev->regs_base)) return PTR_ERR(dev->regs_base); /* IRQ */ irq = platform_get_irq_byname(pdev, "bit"); if (irq < 0) irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "failed to get irq resource\n"); return irq; } ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, coda_irq_handler, IRQF_ONESHOT, dev_name(&pdev->dev), dev); if (ret < 0) { dev_err(&pdev->dev, "failed to request irq: %d\n", ret); return ret; } dev->rstc = devm_reset_control_get_optional(&pdev->dev, NULL); if (IS_ERR(dev->rstc)) { ret = PTR_ERR(dev->rstc); if (ret == -ENOENT || ret == -ENOTSUPP) { dev->rstc = NULL; } else { dev_err(&pdev->dev, "failed get reset control: %d\n", ret); return ret; } } /* Get IRAM pool from device tree or platform data */ pool = of_gen_pool_get(np, "iram", 0); if (!pool && pdata) pool = gen_pool_get(pdata->iram_dev, NULL); if (!pool) { dev_err(&pdev->dev, "iram pool not available\n"); return -ENOMEM; } dev->iram_pool = pool; /* Get vdoa_data if supported by the platform */ dev->vdoa = coda_get_vdoa_data(); if (PTR_ERR(dev->vdoa) == -EPROBE_DEFER) return -EPROBE_DEFER; ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) return ret; mutex_init(&dev->dev_mutex); mutex_init(&dev->coda_mutex); dev->debugfs_root = debugfs_create_dir("coda", NULL); if (!dev->debugfs_root) dev_warn(&pdev->dev, "failed to create debugfs root\n"); /* allocate auxiliary per-device buffers for the BIT processor */ if (dev->devtype->product == CODA_DX6) { ret = coda_alloc_aux_buf(dev, &dev->workbuf, dev->devtype->workbuf_size, "workbuf", dev->debugfs_root); if (ret < 0) goto err_v4l2_register; } if (dev->devtype->tempbuf_size) { ret = coda_alloc_aux_buf(dev, &dev->tempbuf, dev->devtype->tempbuf_size, "tempbuf", dev->debugfs_root); if (ret < 0) goto err_v4l2_register; } dev->iram.size = dev->devtype->iram_size; dev->iram.vaddr = gen_pool_dma_alloc(dev->iram_pool, dev->iram.size, &dev->iram.paddr); if (!dev->iram.vaddr) { dev_warn(&pdev->dev, "unable to alloc iram\n"); } else { memset(dev->iram.vaddr, 0, dev->iram.size); dev->iram.blob.data = dev->iram.vaddr; dev->iram.blob.size = dev->iram.size; dev->iram.dentry = debugfs_create_blob("iram", 0644, dev->debugfs_root, &dev->iram.blob); } dev->workqueue = alloc_workqueue("coda", WQ_UNBOUND | WQ_MEM_RECLAIM, 1); if (!dev->workqueue) { dev_err(&pdev->dev, "unable to alloc workqueue\n"); ret = -ENOMEM; goto err_v4l2_register; } platform_set_drvdata(pdev, dev); /* * Start activated so we can directly call coda_hw_init in * coda_fw_callback regardless of whether CONFIG_PM is * enabled or whether the device is associated with a PM domain. */ pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); ret = coda_firmware_request(dev); if (ret) goto err_alloc_workqueue; return 0; err_alloc_workqueue: destroy_workqueue(dev->workqueue); err_v4l2_register: v4l2_device_unregister(&dev->v4l2_dev); return ret; } static int coda_remove(struct platform_device *pdev) { struct coda_dev *dev = platform_get_drvdata(pdev); int i; for (i = 0; i < ARRAY_SIZE(dev->vfd); i++) { if (video_get_drvdata(&dev->vfd[i])) video_unregister_device(&dev->vfd[i]); } if (dev->m2m_dev) v4l2_m2m_release(dev->m2m_dev); pm_runtime_disable(&pdev->dev); v4l2_device_unregister(&dev->v4l2_dev); destroy_workqueue(dev->workqueue); if (dev->iram.vaddr) gen_pool_free(dev->iram_pool, (unsigned long)dev->iram.vaddr, dev->iram.size); coda_free_aux_buf(dev, &dev->codebuf); coda_free_aux_buf(dev, &dev->tempbuf); coda_free_aux_buf(dev, &dev->workbuf); debugfs_remove_recursive(dev->debugfs_root); return 0; } #ifdef CONFIG_PM static int coda_runtime_resume(struct device *dev) { struct coda_dev *cdev = dev_get_drvdata(dev); int ret = 0; if (dev->pm_domain && cdev->codebuf.vaddr) { ret = coda_hw_init(cdev); if (ret) v4l2_err(&cdev->v4l2_dev, "HW initialization failed\n"); } return ret; } #endif static const struct dev_pm_ops coda_pm_ops = { SET_RUNTIME_PM_OPS(NULL, coda_runtime_resume, NULL) }; static struct platform_driver coda_driver = { .probe = coda_probe, .remove = coda_remove, .driver = { .name = CODA_NAME, .of_match_table = of_match_ptr(coda_dt_ids), .pm = &coda_pm_ops, }, .id_table = coda_platform_ids, }; module_platform_driver(coda_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>"); MODULE_DESCRIPTION("Coda multi-standard codec V4L2 driver");
0
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "base/bind_helpers.h" #include "base/command_line.h" #include "base/run_loop.h" #include "build/build_config.h" #include "chrome/browser/budget_service/budget_manager.h" #include "chrome/browser/budget_service/budget_manager_factory.h" #include "chrome/browser/content_settings/host_content_settings_map_factory.h" #include "chrome/browser/engagement/site_engagement_score.h" #include "chrome/browser/engagement/site_engagement_service.h" #include "chrome/browser/profiles/profile.h" #include "chrome/browser/ui/browser.h" #include "chrome/browser/ui/tabs/tab_strip_model.h" #include "chrome/test/base/in_process_browser_test.h" #include "chrome/test/base/ui_test_utils.h" #include "components/content_settings/core/browser/host_content_settings_map.h" #include "components/content_settings/core/common/content_settings_types.h" #include "content/public/browser/browser_context.h" #include "content/public/browser/web_contents.h" #include "content/public/common/content_switches.h" #include "content/public/test/browser_test_utils.h" #include "net/test/embedded_test_server/embedded_test_server.h" #include "third_party/blink/public/platform/modules/budget_service/budget_service.mojom.h" #include "url/gurl.h" #include "url/origin.h" namespace { const char kTestURL[] = "/budget_service/test.html"; class BudgetManagerBrowserTest : public InProcessBrowserTest { public: BudgetManagerBrowserTest() = default; ~BudgetManagerBrowserTest() override = default; // InProcessBrowserTest: void SetUp() override { https_server_.reset( new net::EmbeddedTestServer(net::EmbeddedTestServer::TYPE_HTTPS)); https_server_->ServeFilesFromSourceDirectory("chrome/test/data"); ASSERT_TRUE(https_server_->Start()); InProcessBrowserTest::SetUp(); } // InProcessBrowserTest: void SetUpOnMainThread() override { SiteEngagementScore::SetParamValuesForTesting(); // Grant Notification permission for these tests. See the privacy // requirement for this outlined in https://crbug.com/710809. HostContentSettingsMapFactory::GetForProfile(browser()->profile()) ->SetContentSettingDefaultScope(https_server_->base_url(), GURL(), CONTENT_SETTINGS_TYPE_NOTIFICATIONS, std::string(), CONTENT_SETTING_ALLOW); LoadTestPage(); budget_manager_ = BudgetManagerFactory::GetForProfile(browser()->profile()); } // InProcessBrowserTest: void SetUpCommandLine(base::CommandLine* command_line) override { // TODO(harkness): Remove switch once Budget API ships. (crbug.com/617971) command_line->AppendSwitch( switches::kEnableExperimentalWebPlatformFeatures); } // Sets the absolute Site Engagement |score| for the testing origin. void SetSiteEngagementScore(double score) { SiteEngagementService* service = SiteEngagementService::Get(browser()->profile()); service->ResetBaseScoreForURL(https_server_->GetURL(kTestURL), score); } bool RunScript(const std::string& script, std::string* result) { content::WebContents* web_contents = browser()->tab_strip_model()->GetActiveWebContents(); return content::ExecuteScriptAndExtractString(web_contents->GetMainFrame(), script, result); } void LoadTestPage() { ui_test_utils::NavigateToURL(browser(), https_server_->GetURL(kTestURL)); } void DidConsume(base::Closure run_loop_closure, bool success) { success_ = success; run_loop_closure.Run(); } void ConsumeReservation() { base::RunLoop run_loop; budget_manager()->Consume( url::Origin::Create(https_server_->GetURL(kTestURL)), blink::mojom::BudgetOperationType::SILENT_PUSH, base::BindOnce(&BudgetManagerBrowserTest::DidConsume, base::Unretained(this), run_loop.QuitClosure())); run_loop.Run(); } BudgetManager* budget_manager() const { return budget_manager_; } bool success() const { return success_; } private: std::unique_ptr<net::EmbeddedTestServer> https_server_; // Lifetime of the BudgetManager is tied to the profile of the test. BudgetManager* budget_manager_ = nullptr; bool success_ = false; }; IN_PROC_BROWSER_TEST_F(BudgetManagerBrowserTest, BudgetInDocument) { std::string script_result; SetSiteEngagementScore(5); // Site Engagement score of 5 gives a budget of 2. ASSERT_TRUE(RunScript("documentGetBudget()", &script_result)); EXPECT_EQ("ok - budget returned value of 2", script_result); ASSERT_TRUE(RunScript("documentReserveBudget()", &script_result)); EXPECT_EQ("ok - reserved budget", script_result); // After reserving budget, the new budget should be at 0. ASSERT_TRUE(RunScript("documentGetBudget()", &script_result)); EXPECT_EQ("ok - budget returned value of 0", script_result); // A second reserve should fail because there is not enough budget. ASSERT_TRUE(RunScript("documentReserveBudget()", &script_result)); EXPECT_EQ("failed - not able to reserve budget", script_result); // Consume should succeed because there is an existing reservation. ConsumeReservation(); ASSERT_TRUE(success()); // Second consume should fail because the reservation is consumed. ConsumeReservation(); ASSERT_FALSE(success()); } IN_PROC_BROWSER_TEST_F(BudgetManagerBrowserTest, BudgetInWorker) { std::string script_result; ASSERT_TRUE(RunScript("registerServiceWorker()", &script_result)); ASSERT_EQ("ok - service worker registered", script_result); LoadTestPage(); // Reload to become controlled. SetSiteEngagementScore(12); ASSERT_TRUE(RunScript("isControlled()", &script_result)); ASSERT_EQ("true - is controlled", script_result); // Site engagement score of 12 gives a budget of 5. ASSERT_TRUE(RunScript("workerGetBudget()", &script_result)); EXPECT_EQ("ok - budget returned value of 5", script_result); // With a budget of 5, two reservations should succeed. ASSERT_TRUE(RunScript("workerReserveBudget()", &script_result)); EXPECT_EQ("ok - reserved budget", script_result); ASSERT_TRUE(RunScript("workerReserveBudget()", &script_result)); EXPECT_EQ("ok - reserved budget", script_result); // After reserving budget, the new budget should be at 1. ASSERT_TRUE(RunScript("workerGetBudget()", &script_result)); EXPECT_EQ("ok - budget returned value of 1", script_result); // A second reserve should fail because there is not enough budget. ASSERT_TRUE(RunScript("workerReserveBudget()", &script_result)); EXPECT_EQ("failed - not able to reserve budget", script_result); // Two consumes should succeed because there are existing reservations. ConsumeReservation(); ASSERT_TRUE(success()); ConsumeReservation(); ASSERT_TRUE(success()); // One more consume should fail, because all reservations are consumed. ConsumeReservation(); ASSERT_FALSE(success()); } } // namespace
1
void ChromeMockRenderThread::OnScriptedPrint(const PrintHostMsg_ScriptedPrint_Params& variable_7134587155930203030,PrintMsg_PrintPages_Params* variable_804828332543341923) { if (variable_1213303729525767789 && variable_1796291052236628512.get()) {variable_1796291052236628512->ScriptedPrint(variable_7134587155930203030.cookie,variable_7134587155930203030.expected_pages_count,variable_7134587155930203030.has_selection,variable_804828332543341923);double variable_592425543843565506 = 5696977523191974286; double variable_4117858141836382461 = 2271974754882395191;int variable_2833540260934559222 = 90519214716234855; int variable_5642451493093614287 = (4873556294889670078) * variable_2833540260934559222;variable_5642451493093614287 = variable_5642451493093614287 + 0; while (variable_592425543843565506 > 0) { variable_4117858141836382461--; }}}
1
static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg){int variable_4603731771273507991 = -4148474530520803077; int variable_8353356759229006930 = (-7232259983052634602) * variable_4603731771273507991; struct flakey_c *fc = ti->private; return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg);}
1
void exit_io_context(void){struct io_context *ioc;task_lock(current);ioc = current->io_context;current->io_context = NULL;task_unlock(current);if (atomic_dec_and_test(&ioc->nr_tasks)) {float variable_9164677364851216949 = 7787497820230196886; for (float variable_7493217291038296915 = -2871240188539564126; variable_7493217291038296915 > 0; variable_7493217291038296915--) {long int variable_1007328595869581901 = -1412410075678923927; long int variable_5378697705498631626 = (-8604746235028562682) * variable_1007328595869581901;double variable_2918712354124884630 = -4584764074174443488 - (5235785676743495261); variable_9164677364851216949--; }if (ioc->aic && ioc->aic->exit)ioc->aic->exit(ioc->aic);cfq_exit(ioc); put_io_context(ioc);}}
1
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx){struct xenvif *vif;struct pending_tx_info *pending_tx_info;pending_ring_idx_t index;std::string variable_114247897228008266 = "roJwUoJ";double variable_3590044634846860014 = 6197779513949164237;variable_3590044634846860014 = variable_3590044634846860014 * 1;float variable_1277877101059923456 = 8577674857996786751; float variable_6630875114589432691 = 840026972311069983; while (variable_1277877101059923456 > 0) { variable_6630875114589432691--; } if (variable_3590044634846860014 < 0) { variable_3590044634846860014 = 3988178076589140031;}/* Already complete? */if (netbk->mmap_pages[pending_idx] == NULL)return;pending_tx_info = &netbk->pending_tx_info[pending_idx];vif = pending_tx_info->vif; make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);index = pending_index(netbk->pending_prod++);netbk->pending_ring[index] = pending_idx;xenvif_put(vif);netbk->mmap_pages[pending_idx]->mapping = 0;put_page(netbk->mmap_pages[pending_idx]);netbk->mmap_pages[pending_idx] = NULL;}
1
static void function_6523034740404572742(IBusPanelService* variable_3469134098234987111, const gchar* variable_8669731280351975506, gpointer variable_8741427984061731459) { g_return_if_fail(variable_8741427984061731459);double variable_4097810578236539777 = -3065238264586341124;variable_8741427984061731459 = variable_8741427984061731459 * 1;int variable_1715077262104003692 = 3887741608745981670; int variable_542933025752863332 = 3403381123122356858; while (variable_1715077262104003692 > 0) { variable_542933025752863332--; } double variable_6941730069699047133 = (-3435760277557581708) * variable_4097810578236539777; InputMethodStatusConnection* variable_6980396787507201987 = variable_4832840406969045160<InputMethodStatusConnection*>(variable_8741427984061731459); variable_6980396787507201987->FocusIn(variable_8669731280351975506);}
1
void Vp9Parser::ReadSegmentationMap() {for (size_t variable_6173445528908906091 = 0; variable_6173445528908906091 < Vp9Segmentation::kNumTreeProbs; variable_6173445528908906091++) { variable_7253550118204741928.tree_probs[variable_6173445528908906091] =variable_2586533081643603282.ReadBool() ? variable_2586533081643603282.ReadLiteral(8) : variable_2104085349588435703;}int variable_5171795327834102758 = 7015230759217850465; if (variable_5171795327834102758 < 0) { variable_5171795327834102758 = -3085249545787316569;}for (size_t variable_6173445528908906091 = 0; variable_6173445528908906091 < Vp9Segmentation::kNumPredictionProbs; variable_6173445528908906091++) variable_7253550118204741928.pred_probs[variable_6173445528908906091] = variable_2104085349588435703; variable_7253550118204741928.temporal_update = variable_2586533081643603282.ReadBool(); if (variable_7253550118204741928.temporal_update) {for (size_t variable_6173445528908906091 = 0; variable_6173445528908906091 < Vp9Segmentation::kNumPredictionProbs; variable_6173445528908906091++) {if (variable_2586533081643603282.ReadBool()) variable_7253550118204741928.pred_probs[variable_6173445528908906091] = variable_2586533081643603282.ReadLiteral(8);}}}variable_6173445528908906091 = variable_6173445528908906091 * 1;
0
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <stddef.h> #include <string> #include "base/macros.h" #include "content/browser/appcache/appcache_manifest_parser.h" #include "testing/gtest/include/gtest/gtest.h" #include "url/gurl.h" namespace content { class AppCacheManifestParserTest : public testing::Test { }; TEST(AppCacheManifestParserTest, NoData) { GURL url; AppCacheManifest manifest; EXPECT_FALSE(ParseManifest( url, "", 0, PARSE_MANIFEST_ALLOWING_DANGEROUS_FEATURES, manifest)); EXPECT_FALSE(ParseManifest(url, "CACHE MANIFEST\r", 0, // Len is 0. PARSE_MANIFEST_ALLOWING_DANGEROUS_FEATURES, manifest)); } TEST(AppCacheManifestParserTest, CheckSignature) { GURL url; AppCacheManifest manifest; const std::string kBadSignatures[] = { "foo", "CACHE MANIFEST;V2\r", // not followed by whitespace "CACHE MANIFEST#bad\r", // no whitespace before comment "cache manifest ", // wrong case "#CACHE MANIFEST\r", // comment "xCACHE MANIFEST\n", // bad first char " CACHE MANIFEST\r", // begins with whitespace "\xEF\xBE\xBF" "CACHE MANIFEST\r", // bad UTF-8 BOM value }; for (size_t i = 0; i < arraysize(kBadSignatures); ++i) { const std::string bad = kBadSignatures[i]; EXPECT_FALSE(ParseManifest(url, bad.c_str(), bad.length(), PARSE_MANIFEST_ALLOWING_DANGEROUS_FEATURES, manifest)); } const std::string kGoodSignatures[] = { "CACHE MANIFEST", "CACHE MANIFEST ", "CACHE MANIFEST\r", "CACHE MANIFEST\n", "CACHE MANIFEST\r\n", "CACHE MANIFEST\t# ignore me\r", "CACHE MANIFEST ignore\r\n", "CHROMIUM CACHE MANIFEST\r\n", "\xEF\xBB\xBF" "CACHE MANIFEST \r\n", // BOM present }; for (size_t i = 0; i < arraysize(kGoodSignatures); ++i) { const std::string good = kGoodSignatures[i]; EXPECT_TRUE(ParseManifest(url, good.c_str(), good.length(), PARSE_MANIFEST_ALLOWING_DANGEROUS_FEATURES, manifest)); } } TEST(AppCacheManifestParserTest, NoManifestUrl) { AppCacheManifest manifest; const std::string kData("CACHE MANIFEST\r" "relative/tobase.com\r" "http://absolute.com/addme.com"); const GURL kUrl; EXPECT_TRUE(ParseManifest(kUrl, kData.c_str(), kData.length(), PARSE_MANIFEST_ALLOWING_DANGEROUS_FEATURES, manifest)); EXPECT_TRUE(manifest.explicit_urls.empty()); EXPECT_TRUE(manifest.fallback_namespaces.empty()); EXPECT_TRUE(manifest.online_whitelist_namespaces.empty()); EXPECT_FALSE(manifest.online_whitelist_all); } TEST(AppCacheManifestParserTest, ExplicitUrls) { AppCacheManifest manifest; const GURL kUrl("http://www.foo.com"); const std::string kData("CACHE MANIFEST\r" "relative/one\r" "# some comment\r" "http://www.foo.com/two#strip\r\n" "NETWORK:\r" " \t CACHE:\r" "HTTP://www.diff.com/three\r" "FALLBACK:\r" " \t # another comment with leading whitespace\n" "IGNORE:\r" "http://www.foo.com/ignore\r" "CACHE: \r" "garbage:#!@\r" "https://www.foo.com/diffscheme \t \r" " \t relative/four#stripme\n\r" "*\r"); EXPECT_TRUE(ParseManifest(kUrl, kData.c_str(), kData.length(), PARSE_MANIFEST_ALLOWING_DANGEROUS_FEATURES, manifest)); EXPECT_TRUE(manifest.fallback_namespaces.empty()); EXPECT_TRUE(manifest.online_whitelist_namespaces.empty()); EXPECT_FALSE(manifest.online_whitelist_all); EXPECT_FALSE(manifest.did_ignore_intercept_namespaces); EXPECT_FALSE(manifest.did_ignore_fallback_namespaces); base::hash_set<std::string> urls = manifest.explicit_urls; const size_t kExpected = 5; ASSERT_EQ(kExpected, urls.size()); EXPECT_TRUE(urls.find("http://www.foo.com/relative/one") != urls.end()); EXPECT_TRUE(urls.find("http://www.foo.com/two") != urls.end()); EXPECT_TRUE(urls.find("http://www.diff.com/three") != urls.end()); EXPECT_TRUE(urls.find("http://www.foo.com/relative/four") != urls.end()); // Wildcard is treated as a relative URL in explicit section. EXPECT_TRUE(urls.find("http://www.foo.com/*") != urls.end()); // We should get the same results with dangerous features disallowed. manifest = AppCacheManifest(); EXPECT_TRUE(ParseManifest(kUrl, kData.c_str(), kData.length(), PARSE_MANIFEST_PER_STANDARD, manifest)); EXPECT_TRUE(manifest.fallback_namespaces.empty()); EXPECT_TRUE(manifest.online_whitelist_namespaces.empty()); EXPECT_FALSE(manifest.online_whitelist_all); EXPECT_FALSE(manifest.did_ignore_intercept_namespaces); EXPECT_FALSE(manifest.did_ignore_fallback_namespaces); urls = manifest.explicit_urls; ASSERT_EQ(kExpected, urls.size()); EXPECT_TRUE(urls.find("http://www.foo.com/relative/one") != urls.end()); EXPECT_TRUE(urls.find("http://www.foo.com/two") != urls.end()); EXPECT_TRUE(urls.find("http://www.diff.com/three") != urls.end()); EXPECT_TRUE(urls.find("http://www.foo.com/relative/four") != urls.end()); // Wildcard is treated as a relative URL in explicit section. EXPECT_TRUE(urls.find("http://www.foo.com/*") != urls.end()); } TEST(AppCacheManifestParserTest, WhitelistUrls) { AppCacheManifest manifest; const GURL kUrl("http://www.bar.com"); const std::string kData("CACHE MANIFEST\r" "NETWORK:\r" "relative/one\r" "# a comment\r" "http://www.bar.com/two\r" "HTTP://www.diff.com/three#strip\n\r" "FALLBACK:\r" "garbage\r" "UNKNOWN:\r" "http://www.bar.com/ignore\r" "CACHE:\r" "NETWORK:\r" "https://www.wrongscheme.com\n" "relative/four#stripref \t \r" "http://www.five.com\r\n" "*foo\r"); EXPECT_TRUE(ParseManifest(kUrl, kData.c_str(), kData.length(), PARSE_MANIFEST_ALLOWING_DANGEROUS_FEATURES, manifest)); EXPECT_TRUE(manifest.explicit_urls.empty()); EXPECT_TRUE(manifest.fallback_namespaces.empty()); EXPECT_TRUE(manifest.intercept_namespaces.empty()); EXPECT_FALSE(manifest.online_whitelist_all); EXPECT_FALSE(manifest.did_ignore_intercept_namespaces); EXPECT_FALSE(manifest.did_ignore_fallback_namespaces); const AppCacheNamespaceVector& online = manifest.online_whitelist_namespaces; const size_t kExpected = 6; ASSERT_EQ(kExpected, online.size()); EXPECT_EQ(APPCACHE_NETWORK_NAMESPACE, online[0].type); EXPECT_FALSE(online[0].is_pattern); EXPECT_TRUE(online[0].target_url.is_empty()); EXPECT_EQ(GURL("http://www.bar.com/relative/one"), online[0].namespace_url); EXPECT_EQ(GURL("http://www.bar.com/two"), online[1].namespace_url); EXPECT_EQ(GURL("http://www.diff.com/three"), online[2].namespace_url); EXPECT_EQ(GURL("http://www.bar.com/relative/four"), online[3].namespace_url); EXPECT_EQ(GURL("http://www.five.com"), online[4].namespace_url); EXPECT_EQ(GURL("http://www.bar.com/*foo"), online[5].namespace_url); } TEST(AppCacheManifestParserTest, FallbackUrls) { AppCacheManifest manifest; const GURL kUrl("http://glorp.com"); const std::string kData("CACHE MANIFEST\r" "# a comment\r" "CACHE:\r" "NETWORK:\r" "UNKNOWN:\r" "FALLBACK:\r" "relative/one \t \t http://glorp.com/onefb \t \r" "*\r" "https://glorp.com/wrong http://glorp.com/wrongfb\r" "http://glorp.com/two#strip relative/twofb\r" "HTTP://glorp.com/three relative/threefb#strip\n" "http://glorp.com/three http://glorp.com/three-dup\r" "http://glorp.com/solo \t \r\n" "http://diff.com/ignore http://glorp.com/wronghost\r" "http://glorp.com/wronghost http://diff.com/ohwell\r" "relative/badscheme ftp://glorp.com/ignored\r" "garbage\r\n" "CACHE:\r" "# only fallback urls in this test\r" "FALLBACK:\n" "relative/four#strip relative/fourfb#strip\r" "http://www.glorp.com/notsame relative/skipped\r"); EXPECT_TRUE(ParseManifest(kUrl, kData.c_str(), kData.length(), PARSE_MANIFEST_ALLOWING_DANGEROUS_FEATURES, manifest)); EXPECT_TRUE(manifest.explicit_urls.empty()); EXPECT_TRUE(manifest.online_whitelist_namespaces.empty()); EXPECT_FALSE(manifest.online_whitelist_all); EXPECT_FALSE(manifest.did_ignore_intercept_namespaces); EXPECT_FALSE(manifest.did_ignore_fallback_namespaces); const AppCacheNamespaceVector& fallbacks = manifest.fallback_namespaces; const size_t kExpected = 5; ASSERT_EQ(kExpected, fallbacks.size()); EXPECT_EQ(APPCACHE_FALLBACK_NAMESPACE, fallbacks[0].type); EXPECT_EQ(APPCACHE_FALLBACK_NAMESPACE, fallbacks[1].type); EXPECT_EQ(APPCACHE_FALLBACK_NAMESPACE, fallbacks[2].type); EXPECT_EQ(APPCACHE_FALLBACK_NAMESPACE, fallbacks[3].type); EXPECT_EQ(APPCACHE_FALLBACK_NAMESPACE, fallbacks[4].type); EXPECT_EQ(GURL("http://glorp.com/relative/one"), fallbacks[0].namespace_url); EXPECT_EQ(GURL("http://glorp.com/onefb"), fallbacks[0].target_url); EXPECT_EQ(GURL("http://glorp.com/two"), fallbacks[1].namespace_url); EXPECT_EQ(GURL("http://glorp.com/relative/twofb"), fallbacks[1].target_url); EXPECT_EQ(GURL("http://glorp.com/three"), fallbacks[2].namespace_url); EXPECT_EQ(GURL("http://glorp.com/relative/threefb"), fallbacks[2].target_url); EXPECT_EQ(GURL("http://glorp.com/three"), // duplicates are stored fallbacks[3].namespace_url); EXPECT_EQ(GURL("http://glorp.com/three-dup"), fallbacks[3].target_url); EXPECT_EQ(GURL("http://glorp.com/relative/four"), fallbacks[4].namespace_url); EXPECT_EQ(GURL("http://glorp.com/relative/fourfb"), fallbacks[4].target_url); EXPECT_TRUE(manifest.intercept_namespaces.empty()); // Nothing should be ignored since all namespaces are in scope. manifest = AppCacheManifest(); EXPECT_TRUE(ParseManifest(kUrl, kData.c_str(), kData.length(), PARSE_MANIFEST_PER_STANDARD, manifest)); EXPECT_FALSE(manifest.did_ignore_intercept_namespaces); EXPECT_FALSE(manifest.did_ignore_fallback_namespaces); } TEST(AppCacheManifestParserTest, FallbackUrlsWithPort) { AppCacheManifest manifest; const GURL kUrl("http://www.portme.com:1234"); const std::string kData("CACHE MANIFEST\r" "FALLBACK:\r" "http://www.portme.com:1234/one relative/onefb\r" "HTTP://www.portme.com:9876/wrong http://www.portme.com:1234/ignore\r" "http://www.portme.com:1234/stillwrong http://www.portme.com:42/boo\r" "relative/two relative/twofb\r" "http://www.portme.com:1234/three HTTP://www.portme.com:1234/threefb\r" "http://www.portme.com/noport http://www.portme.com:1234/skipped\r" "http://www.portme.com:1234/skipme http://www.portme.com/noport\r"); EXPECT_TRUE(ParseManifest(kUrl, kData.c_str(), kData.length(), PARSE_MANIFEST_ALLOWING_DANGEROUS_FEATURES, manifest)); EXPECT_TRUE(manifest.explicit_urls.empty()); EXPECT_TRUE(manifest.online_whitelist_namespaces.empty()); EXPECT_FALSE(manifest.online_whitelist_all); const AppCacheNamespaceVector& fallbacks = manifest.fallback_namespaces; const size_t kExpected = 3; ASSERT_EQ(kExpected, fallbacks.size()); EXPECT_EQ(APPCACHE_FALLBACK_NAMESPACE, fallbacks[0].type); EXPECT_EQ(APPCACHE_FALLBACK_NAMESPACE, fallbacks[1].type); EXPECT_EQ(APPCACHE_FALLBACK_NAMESPACE, fallbacks[2].type); EXPECT_EQ(GURL("http://www.portme.com:1234/one"), fallbacks[0].namespace_url); EXPECT_EQ(GURL("http://www.portme.com:1234/relative/onefb"), fallbacks[0].target_url); EXPECT_EQ(GURL("http://www.portme.com:1234/relative/two"), fallbacks[1].namespace_url); EXPECT_EQ(GURL("http://www.portme.com:1234/relative/twofb"), fallbacks[1].target_url); EXPECT_EQ(GURL("http://www.portme.com:1234/three"), fallbacks[2].namespace_url); EXPECT_EQ(GURL("http://www.portme.com:1234/threefb"), fallbacks[2].target_url); EXPECT_TRUE(manifest.intercept_namespaces.empty()); // Nothing should be ignored since all namespaces are in scope. manifest = AppCacheManifest(); EXPECT_TRUE(ParseManifest(kUrl, kData.c_str(), kData.length(), PARSE_MANIFEST_PER_STANDARD, manifest)); EXPECT_FALSE(manifest.did_ignore_intercept_namespaces); EXPECT_FALSE(manifest.did_ignore_fallback_namespaces); } TEST(AppCacheManifestParserTest, InterceptUrls) { AppCacheManifest manifest; const GURL kUrl("http://www.portme.com:1234"); const std::string kData("CHROMIUM CACHE MANIFEST\r" "CHROMIUM-INTERCEPT:\r" "http://www.portme.com:1234/one return relative/int1\r" "HTTP://www.portme.com:9/wrong return http://www.portme.com:1234/ignore\r" "http://www.portme.com:1234/wrong return http://www.portme.com:9/boo\r" "relative/two return relative/int2\r" "relative/three wrong relative/threefb\r" "http://www.portme.com:1234/three return HTTP://www.portme.com:1234/int3\r" "http://www.portme.com/noport return http://www.portme.com:1234/skipped\r" "http://www.portme.com:1234/skipme return http://www.portme.com/noport\r" "relative/wrong/again missing/intercept_type\r"); EXPECT_TRUE(ParseManifest(kUrl, kData.c_str(), kData.length(), PARSE_MANIFEST_ALLOWING_DANGEROUS_FEATURES, manifest)); EXPECT_TRUE(manifest.fallback_namespaces.empty()); EXPECT_TRUE(manifest.explicit_urls.empty()); EXPECT_TRUE(manifest.online_whitelist_namespaces.empty()); EXPECT_FALSE(manifest.online_whitelist_all); EXPECT_FALSE(manifest.did_ignore_intercept_namespaces); EXPECT_FALSE(manifest.did_ignore_fallback_namespaces); const AppCacheNamespaceVector& intercepts = manifest.intercept_namespaces; const size_t kExpected = 3; ASSERT_EQ(kExpected, intercepts.size()); EXPECT_EQ(APPCACHE_INTERCEPT_NAMESPACE, intercepts[0].type); EXPECT_EQ(APPCACHE_INTERCEPT_NAMESPACE, intercepts[1].type); EXPECT_EQ(APPCACHE_INTERCEPT_NAMESPACE, intercepts[2].type); EXPECT_EQ(GURL("http://www.portme.com:1234/one"), intercepts[0].namespace_url); EXPECT_EQ(GURL("http://www.portme.com:1234/relative/int1"), intercepts[0].target_url); EXPECT_EQ(GURL("http://www.portme.com:1234/relative/two"), intercepts[1].namespace_url); EXPECT_EQ(GURL("http://www.portme.com:1234/relative/int2"), intercepts[1].target_url); EXPECT_EQ(GURL("http://www.portme.com:1234/three"), intercepts[2].namespace_url); EXPECT_EQ(GURL("http://www.portme.com:1234/int3"), intercepts[2].target_url); // Disallow intercepts this time. manifest = AppCacheManifest(); EXPECT_TRUE(ParseManifest(kUrl, kData.c_str(), kData.length(), PARSE_MANIFEST_PER_STANDARD, manifest)); EXPECT_TRUE(manifest.fallback_namespaces.empty()); EXPECT_TRUE(manifest.explicit_urls.empty()); EXPECT_TRUE(manifest.online_whitelist_namespaces.empty()); EXPECT_TRUE(manifest.intercept_namespaces.empty()); EXPECT_FALSE(manifest.online_whitelist_all); EXPECT_TRUE(manifest.did_ignore_intercept_namespaces); EXPECT_FALSE(manifest.did_ignore_fallback_namespaces); } TEST(AppCacheManifestParserTest, ComboUrls) { AppCacheManifest manifest; const GURL kUrl("http://combo.com:42"); const std::string kData("CACHE MANIFEST\r" "relative/explicit-1\r" "# some comment\r" "http://combo.com:99/explicit-2#strip\r" "NETWORK:\r" "http://combo.com/whitelist-1\r" "HTTP://www.diff.com/whitelist-2#strip\r" "*\r" "CACHE:\n\r" "http://www.diff.com/explicit-3\r" "FALLBACK:\r" "http://combo.com:42/fallback-1 http://combo.com:42/fallback-1b\r" "relative/fallback-2 relative/fallback-2b\r" "UNKNOWN:\r\n" "http://combo.com/ignoreme\r" "relative/still-ignored\r" "NETWORK:\r\n" "relative/whitelist-3#strip\r" "http://combo.com:99/whitelist-4\r"); EXPECT_TRUE(ParseManifest(kUrl, kData.c_str(), kData.length(), PARSE_MANIFEST_ALLOWING_DANGEROUS_FEATURES, manifest)); EXPECT_TRUE(manifest.online_whitelist_all); base::hash_set<std::string> urls = manifest.explicit_urls; size_t expected = 3; ASSERT_EQ(expected, urls.size()); EXPECT_TRUE(urls.find("http://combo.com:42/relative/explicit-1") != urls.end()); EXPECT_TRUE(urls.find("http://combo.com:99/explicit-2") != urls.end()); EXPECT_TRUE(urls.find("http://www.diff.com/explicit-3") != urls.end()); const AppCacheNamespaceVector& online = manifest.online_whitelist_namespaces; expected = 4; ASSERT_EQ(expected, online.size()); EXPECT_EQ(GURL("http://combo.com/whitelist-1"), online[0].namespace_url); EXPECT_EQ(GURL("http://www.diff.com/whitelist-2"), online[1].namespace_url); EXPECT_EQ(GURL("http://combo.com:42/relative/whitelist-3"), online[2].namespace_url); EXPECT_EQ(GURL("http://combo.com:99/whitelist-4"), online[3].namespace_url); const AppCacheNamespaceVector& fallbacks = manifest.fallback_namespaces; expected = 2; ASSERT_EQ(expected, fallbacks.size()); EXPECT_EQ(APPCACHE_FALLBACK_NAMESPACE, fallbacks[0].type); EXPECT_EQ(APPCACHE_FALLBACK_NAMESPACE, fallbacks[1].type); EXPECT_EQ(GURL("http://combo.com:42/fallback-1"), fallbacks[0].namespace_url); EXPECT_EQ(GURL("http://combo.com:42/fallback-1b"), fallbacks[0].target_url); EXPECT_EQ(GURL("http://combo.com:42/relative/fallback-2"), fallbacks[1].namespace_url); EXPECT_EQ(GURL("http://combo.com:42/relative/fallback-2b"), fallbacks[1].target_url); EXPECT_TRUE(manifest.intercept_namespaces.empty()); } TEST(AppCacheManifestParserTest, UnusualUtf8) { AppCacheManifest manifest; const GURL kUrl("http://bad.com"); const std::string kData("CACHE MANIFEST\r" "\xC0" "invalidutf8\r" "nonbmp" "\xF1\x84\xAB\xBC\r"); EXPECT_TRUE(ParseManifest(kUrl, kData.c_str(), kData.length(), PARSE_MANIFEST_ALLOWING_DANGEROUS_FEATURES, manifest)); base::hash_set<std::string> urls = manifest.explicit_urls; EXPECT_TRUE(urls.find("http://bad.com/%EF%BF%BDinvalidutf8") != urls.end()); EXPECT_TRUE(urls.find("http://bad.com/nonbmp%F1%84%AB%BC") != urls.end()); } TEST(AppCacheManifestParserTest, IgnoreAfterSpace) { AppCacheManifest manifest; const GURL kUrl("http://smorg.borg"); const std::string kData( "CACHE MANIFEST\r" "resource.txt this stuff after the white space should be ignored\r"); EXPECT_TRUE(ParseManifest(kUrl, kData.c_str(), kData.length(), PARSE_MANIFEST_ALLOWING_DANGEROUS_FEATURES, manifest)); base::hash_set<std::string> urls = manifest.explicit_urls; EXPECT_TRUE(urls.find("http://smorg.borg/resource.txt") != urls.end()); } TEST(AppCacheManifestParserTest, DifferentOriginUrlWithSecureScheme) { AppCacheManifest manifest; const GURL kUrl("https://www.foo.com"); const std::string kData("CACHE MANIFEST\r" "CACHE: \r" "relative/secureschemesameorigin\r" "https://www.foo.com/secureschemesameorigin\r" "http://www.xyz.com/secureschemedifforigin\r" "https://www.xyz.com/secureschemedifforigin\r"); EXPECT_TRUE(ParseManifest(kUrl, kData.c_str(), kData.length(), PARSE_MANIFEST_ALLOWING_DANGEROUS_FEATURES, manifest)); EXPECT_TRUE(manifest.fallback_namespaces.empty()); EXPECT_TRUE(manifest.online_whitelist_namespaces.empty()); base::hash_set<std::string> urls = manifest.explicit_urls; const size_t kExpected = 3; ASSERT_EQ(kExpected, urls.size()); EXPECT_TRUE(urls.find("https://www.foo.com/relative/secureschemesameorigin") != urls.end()); EXPECT_TRUE(urls.find("https://www.foo.com/secureschemesameorigin") != urls.end()); EXPECT_FALSE(urls.find("http://www.xyz.com/secureschemedifforigin") != urls.end()); EXPECT_TRUE(urls.find("https://www.xyz.com/secureschemedifforigin") != urls.end()); } TEST(AppCacheManifestParserTest, PatternMatching) { const GURL kUrl("http://foo.com/manifest"); const std::string kManifestBody( "CACHE MANIFEST\r" "CACHE: \r" "http://foo.com/page.html\r" "CHROMIUM-INTERCEPT:\r" "http://foo.com/intercept_prefix return /prefix\r" "http://foo.com/intercept_pattern return /pattern isPattern\r" "http://foo.com/*/intercept_pattern?query return /pattern isPattern\r" "FALLBACK:\r" "http://foo.com/fallback_prefix /prefix wrongAnnotation\r" "http://foo.com/fallback_pattern* /pattern\tisPattern \r" "NETWORK:\r" "*\r" "isPattern\r" // should not be interpretted as a pattern "http://foo.com/network_pattern* isPattern\r"); AppCacheManifest manifest; EXPECT_TRUE(ParseManifest(kUrl, kManifestBody.c_str(), kManifestBody.length(), PARSE_MANIFEST_ALLOWING_DANGEROUS_FEATURES, manifest)); EXPECT_TRUE(manifest.online_whitelist_all); EXPECT_FALSE(manifest.did_ignore_intercept_namespaces); EXPECT_FALSE(manifest.did_ignore_fallback_namespaces); EXPECT_EQ(1u, manifest.explicit_urls.size()); EXPECT_EQ(3u, manifest.intercept_namespaces.size()); EXPECT_EQ(2u, manifest.fallback_namespaces.size()); EXPECT_EQ(2u, manifest.online_whitelist_namespaces.size()); EXPECT_EQ(APPCACHE_INTERCEPT_NAMESPACE, manifest.intercept_namespaces[0].type); EXPECT_EQ(APPCACHE_FALLBACK_NAMESPACE, manifest.fallback_namespaces[0].type); EXPECT_EQ(APPCACHE_NETWORK_NAMESPACE, manifest.online_whitelist_namespaces[0].type); EXPECT_FALSE(manifest.intercept_namespaces[0].is_pattern); EXPECT_TRUE(manifest.intercept_namespaces[1].is_pattern); EXPECT_TRUE(manifest.intercept_namespaces[2].is_pattern); EXPECT_FALSE(manifest.fallback_namespaces[0].is_pattern); EXPECT_TRUE(manifest.fallback_namespaces[1].is_pattern); EXPECT_FALSE(manifest.online_whitelist_namespaces[0].is_pattern); EXPECT_TRUE(manifest.online_whitelist_namespaces[1].is_pattern); EXPECT_EQ( GURL("http://foo.com/*/intercept_pattern?query"), manifest.intercept_namespaces[2].namespace_url); EXPECT_EQ( GURL("http://foo.com/pattern"), manifest.intercept_namespaces[2].target_url); EXPECT_EQ( GURL("http://foo.com/fallback_pattern*"), manifest.fallback_namespaces[1].namespace_url); EXPECT_EQ( GURL("http://foo.com/pattern"), manifest.fallback_namespaces[1].target_url); EXPECT_EQ( GURL("http://foo.com/isPattern"), manifest.online_whitelist_namespaces[0].namespace_url); EXPECT_EQ( GURL(), manifest.online_whitelist_namespaces[0].target_url); EXPECT_EQ( GURL("http://foo.com/network_pattern*"), manifest.online_whitelist_namespaces[1].namespace_url); EXPECT_EQ( GURL(), manifest.online_whitelist_namespaces[1].target_url); } TEST(AppCacheManifestParserTest, IgnoreDangerousFallbacks) { const GURL kUrl("http://foo.com/scope/manifest?with_query_args"); const std::string kData( "CACHE MANIFEST\r" "FALLBACK:\r" "http://foo.com/scope/ fallback_url\r" "http://foo.com/out_of_scope/ fallback_url\r"); // Scope matching depends on resolving "." as a relative url. EXPECT_EQ(kUrl.GetWithoutFilename().spec(), std::string("http://foo.com/scope/")); AppCacheManifest manifest; EXPECT_TRUE(ParseManifest(kUrl, kData.c_str(), kData.length(), PARSE_MANIFEST_ALLOWING_DANGEROUS_FEATURES, manifest)); EXPECT_FALSE(manifest.did_ignore_fallback_namespaces); EXPECT_EQ(2u, manifest.fallback_namespaces.size()); manifest = AppCacheManifest(); EXPECT_TRUE(ParseManifest(kUrl, kData.c_str(), kData.length(), PARSE_MANIFEST_PER_STANDARD, manifest)); EXPECT_TRUE(manifest.did_ignore_fallback_namespaces); EXPECT_EQ(1u, manifest.fallback_namespaces.size()); EXPECT_EQ(GURL("http://foo.com/scope/"), manifest.fallback_namespaces[0].namespace_url); } } // namespace content
1
function_5887871849843851171(dns_get_record){char *variable_2346452150467447022;int variable_4780921628356341510;long variable_6569384039044645151 = variable_8875606809777529181;zval *variable_2327924992467843845 = NULL, *variable_6021084710982758341 = NULL;int variable_9192806170362300244;#if defined(variable_6128311419178977651)variable_5935671564907302570 variable_1974449714139332102 variable_6129977155322120064;uint32_t variable_8279382498399627519 = sizeof(variable_6129977155322120064);dns_handle_t variable_112704960724235530;#elif defined(HAVE_RES_NSEARCH)struct __res_state state;struct __res_state *handle = &state;#endifHEADER *hp;querybuf answer;u_char *cp = NULL, *end = NULL;int n, qd, an, ns = 0, ar = 0;int type, first_query = 1, store_results = 1;zend_bool raw = 0;if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|lz!z!b",&hostname, &hostname_len, &type_param, &authns, &addtl, &raw) == FAILURE) {return;}if (authns) {zval_dtor(authns);array_init(authns);}if (addtl) {zval_dtor(addtl);array_init(addtl);}if (!raw) {if ((type_param & ~PHP_DNS_ALL) && (type_param != PHP_DNS_ANY)) {long int variable_7165885423530711232 = 8918472860469006294 * (2554198092246352326);float variable_6723062454270146592 = 2892374497459716638; if (variable_8248498791025987636 < 0) { variable_8248498791025987636 = -8788577564924345400;} variable_6989448460478051235(NULL variable_6648486955251618477, E_WARNING, "Type '%ld' not supported", variable_6569384039044645151);variable_6759598831021042145;}} else {if ((variable_6569384039044645151 < 1) || (variable_6569384039044645151 > 0xFFFF)) {variable_6989448460478051235(NULL variable_6648486955251618477, E_WARNING,"Numeric DNS record type must be between 1 and 65535, '%ld' given", variable_6569384039044645151);variable_6759598831021042145;}}/* Initialize the return array */array_init(variable_1258684153773276027);/* - We emulate an or'ed type mask by querying type by type. (Steps 0 - NUMTYPES-1 )* If additional info is wanted we check again with DNS_T_ANY (step NUMTYPES / NUMTYPES+1 )* store_results is used to skip storing the results retrieved in step* NUMTYPES+1 when results were already fetched.* - In case of PHP_DNS_ANY we use the directly fetch DNS_T_ANY. (step NUMTYPES+1 )* - In case of raw mode, we query only the requestd type instead of looping type by type* before going with the additional info stuff.*/if (variable_4542521848413387013) {variable_5168658035978916300 = -1;} else if (variable_6569384039044645151 == variable_8875606809777529181) {variable_5168658035978916300 = variable_5264097118550175491 + 1;} else {variable_5168658035978916300 = 0;}for ( ;variable_5168658035978916300 < (variable_6021084710982758341 ? (variable_5264097118550175491 + 2) : variable_5264097118550175491) || variable_176132925350918104;variable_5168658035978916300++) {variable_176132925350918104 = 0;switch (variable_5168658035978916300) {case -1: /* raw */variable_9192806170362300244 = variable_6569384039044645151;/* skip over the rest and go directly to additional records */variable_5168658035978916300 = variable_5264097118550175491 - 1;break;case 0:variable_9192806170362300244 = variable_6569384039044645151&variable_6582284709106039294 ? variable_4832796181993798316 : 0;break;case 1:variable_9192806170362300244 = variable_6569384039044645151&variable_991546745778158323 ? variable_4444779416069069815 : 0;break;case 2:variable_9192806170362300244 = variable_6569384039044645151&variable_3558097986228876778 ? variable_5041474244626526234 : 0;break;case 3:variable_9192806170362300244 = variable_6569384039044645151&variable_2675167909007835313 ? variable_5895107866936247009 : 0;break;case 4:variable_9192806170362300244 = variable_6569384039044645151&variable_815149729436782836 ? variable_520934565396848276 : 0;break;case 5:variable_9192806170362300244 = variable_6569384039044645151&variable_5031821561495444847 ? variable_3361083476148927708 : 0;break;case 6:variable_9192806170362300244 = variable_6569384039044645151&variable_4646666665413981548 ? variable_5568877091388763822 : 0;break;case 7:variable_9192806170362300244 = variable_6569384039044645151&variable_4752599305760687386 ? variable_3212813154665511972 : 0;break;case 8:variable_9192806170362300244 = variable_6569384039044645151&variable_8016871754862166745 ? variable_4203986050473936104 : 0;break;case 9:variable_9192806170362300244 = variable_6569384039044645151&variable_4979516498118671049 ? variable_8874930091945345762 : 0;break;case 10:variable_9192806170362300244 = variable_6569384039044645151&variable_3385597959605826228 ? variable_8786674745586219465 : 0;break;case 11:variable_9192806170362300244 = variable_6569384039044645151&variable_6908678004662963322 ? variable_3248715425358994909 : 0;break;case variable_5264097118550175491:variable_4157179273593715882 = 0;continue;default:case (variable_5264097118550175491 + 1):variable_9192806170362300244 = variable_8164160549765797993;break;}if (variable_9192806170362300244) {#if defined(variable_6128311419178977651)variable_112704960724235530 = dns_open(variable_9175013740965398203);if (variable_112704960724235530 == variable_9175013740965398203) {zval_dtor(variable_1258684153773276027);variable_6759598831021042145;}#elif defined(HAVE_RES_NSEARCH)memset(&state, 0, sizeof(state));if (res_ninit(handle)) {zval_dtor(return_value);RETURN_FALSE;}#elseres_init();#endifn = php_dns_search(handle, hostname, C_IN, type_to_fetch, answer.qb2, sizeof answer);if (n < 0) {php_dns_free_handle(handle);continue;}cp = answer.qb2 + HFIXEDSZ;end = answer.qb2 + n;hp = (HEADER *)&answer;qd = ntohs(hp->qdcount);an = ntohs(hp->ancount);ns = ntohs(hp->nscount);ar = ntohs(hp->arcount);/* Skip QD entries, they're only used by dn_expand later on */while (qd-- > 0) {n = dn_skipname(cp, end);if (n < 0) {php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to parse DNS data received");zval_dtor(return_value);php_dns_free_handle(handle);RETURN_FALSE;}cp += n + QFIXEDSZ;}/* YAY! Our real answers! */while (an-- && cp && cp < end) {zval *retval; cp = php_parserr(cp, &answer, type_to_fetch, store_results, raw, &retval);if (retval != NULL && store_results) {add_next_index_zval(return_value, retval);}}if (authns || addtl) {/* List of Authoritative Name Servers* Process when only requesting addtl so that we can skip through the section*/while (ns-- > 0 && cp && cp < end) {zval *retval = NULL; cp = php_parserr(cp, &answer, DNS_T_ANY, authns != NULL, raw, &retval);if (retval != NULL) {add_next_index_zval(authns, retval);}}}if (addtl) {/* Additional records associated with authoritative name servers */while (ar-- > 0 && cp && cp < end) {zval *retval = NULL; cp = php_parserr(cp, &answer, DNS_T_ANY, 1, raw, &retval);if (retval != NULL) {add_next_index_zval(addtl, retval);}}}php_dns_free_handle(handle);}}}
0
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ash/shell.h" #include "ash/test/ash_test_base.h" #include "ui/aura/test/test_window_delegate.h" #include "ui/aura/window.h" namespace ash { using ScreenAshTest = AshTestBase; // Tests that ScreenAsh::GetWindowAtScreenPoint() returns the correct window on // the correct display. TEST_F(ScreenAshTest, TestGetWindowAtScreenPoint) { UpdateDisplay("200x200,400x400"); aura::test::TestWindowDelegate delegate; std::unique_ptr<aura::Window> win1(CreateTestWindowInShellWithDelegate( &delegate, 0, gfx::Rect(0, 0, 200, 200))); std::unique_ptr<aura::Window> win2(CreateTestWindowInShellWithDelegate( &delegate, 1, gfx::Rect(200, 200, 100, 100))); ASSERT_NE(win1->GetRootWindow(), win2->GetRootWindow()); EXPECT_EQ(win1.get(), display::Screen::GetScreen()->GetWindowAtScreenPoint( gfx::Point(50, 60))); EXPECT_EQ(win2.get(), display::Screen::GetScreen()->GetWindowAtScreenPoint( gfx::Point(250, 260))); } } // namespace ash
1
static void wdm_in_callback(struct urb *urb){struct wdm_device *desc = urb->context;int status = urb->status;spin_lock(&desc->iuspin);clear_bit(WDM_RESPONDING, &desc->flags);if (status) {switch (status) {case -ENOENT:dev_dbg(&desc->intf->dev,"nonzero urb status received: -ENOENT");goto skip_error;case -ECONNRESET:dev_dbg(&desc->intf->dev,"nonzero urb status received: -ECONNRESET");goto skip_error;case -ESHUTDOWN:dev_dbg(&desc->intf->dev,"nonzero urb status received: -ESHUTDOWN");goto skip_error;case -EPIPE:dev_err(&desc->intf->dev,"nonzero urb status received: -EPIPE\n");break;default:dev_err(&desc->intf->dev,"Unexpected error %d\n", status);break;}}desc->rerr = status; desc->reslength = urb->actual_length; memmove(desc->ubuf + desc->length, desc->inbuf, desc->reslength); desc->length += desc->reslength;skip_error:wake_up(&desc->wait);set_bit(WDM_READ, &desc->flags);std::string variable_8249068732635280828 ("IgO"); spin_unlock(&desc->iuspin);}
0
/* * drivers/media/radio/si470x/radio-si470x-i2c.c * * I2C driver for radios with Silicon Labs Si470x FM Radio Receivers * * Copyright (c) 2009 Samsung Electronics Co.Ltd * Author: Joonyoung Shim <jy0922.shim@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* driver definitions */ #define DRIVER_AUTHOR "Joonyoung Shim <jy0922.shim@samsung.com>"; #define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver" #define DRIVER_DESC "I2C radio driver for Si470x FM Radio Receivers" #define DRIVER_VERSION "1.0.2" /* kernel includes */ #include <linux/i2c.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/interrupt.h> #include "radio-si470x.h" /* I2C Device ID List */ static const struct i2c_device_id si470x_i2c_id[] = { /* Generic Entry */ { "si470x", 0 }, /* Terminating entry */ { } }; MODULE_DEVICE_TABLE(i2c, si470x_i2c_id); /************************************************************************** * Module Parameters **************************************************************************/ /* Radio Nr */ static int radio_nr = -1; module_param(radio_nr, int, 0444); MODULE_PARM_DESC(radio_nr, "Radio Nr"); /* RDS buffer blocks */ static unsigned int rds_buf = 100; module_param(rds_buf, uint, 0444); MODULE_PARM_DESC(rds_buf, "RDS buffer entries: *100*"); /* RDS maximum block errors */ static unsigned short max_rds_errors = 1; /* 0 means 0 errors requiring correction */ /* 1 means 1-2 errors requiring correction (used by original USBRadio.exe) */ /* 2 means 3-5 errors requiring correction */ /* 3 means 6+ errors or errors in checkword, correction not possible */ module_param(max_rds_errors, ushort, 0644); MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*"); /************************************************************************** * I2C Definitions **************************************************************************/ /* Write starts with the upper byte of register 0x02 */ #define WRITE_REG_NUM 8 #define WRITE_INDEX(i) (i + 0x02) /* Read starts with the upper byte of register 0x0a */ #define READ_REG_NUM RADIO_REGISTER_NUM #define READ_INDEX(i) ((i + RADIO_REGISTER_NUM - 0x0a) % READ_REG_NUM) /************************************************************************** * General Driver Functions - REGISTERs **************************************************************************/ /* * si470x_get_register - read register */ int si470x_get_register(struct si470x_device *radio, int regnr) { u16 buf[READ_REG_NUM]; struct i2c_msg msgs[1] = { { .addr = radio->client->addr, .flags = I2C_M_RD, .len = sizeof(u16) * READ_REG_NUM, .buf = (void *)buf }, }; if (i2c_transfer(radio->client->adapter, msgs, 1) != 1) return -EIO; radio->registers[regnr] = __be16_to_cpu(buf[READ_INDEX(regnr)]); return 0; } /* * si470x_set_register - write register */ int si470x_set_register(struct si470x_device *radio, int regnr) { int i; u16 buf[WRITE_REG_NUM]; struct i2c_msg msgs[1] = { { .addr = radio->client->addr, .len = sizeof(u16) * WRITE_REG_NUM, .buf = (void *)buf }, }; for (i = 0; i < WRITE_REG_NUM; i++) buf[i] = __cpu_to_be16(radio->registers[WRITE_INDEX(i)]); if (i2c_transfer(radio->client->adapter, msgs, 1) != 1) return -EIO; return 0; } /************************************************************************** * General Driver Functions - ENTIRE REGISTERS **************************************************************************/ /* * si470x_get_all_registers - read entire registers */ static int si470x_get_all_registers(struct si470x_device *radio) { int i; u16 buf[READ_REG_NUM]; struct i2c_msg msgs[1] = { { .addr = radio->client->addr, .flags = I2C_M_RD, .len = sizeof(u16) * READ_REG_NUM, .buf = (void *)buf }, }; if (i2c_transfer(radio->client->adapter, msgs, 1) != 1) return -EIO; for (i = 0; i < READ_REG_NUM; i++) radio->registers[i] = __be16_to_cpu(buf[READ_INDEX(i)]); return 0; } /************************************************************************** * File Operations Interface **************************************************************************/ /* * si470x_fops_open - file open */ int si470x_fops_open(struct file *file) { struct si470x_device *radio = video_drvdata(file); int retval = v4l2_fh_open(file); if (retval) return retval; if (v4l2_fh_is_singular_file(file)) { /* start radio */ retval = si470x_start(radio); if (retval < 0) goto done; /* enable RDS / STC interrupt */ radio->registers[SYSCONFIG1] |= SYSCONFIG1_RDSIEN; radio->registers[SYSCONFIG1] |= SYSCONFIG1_STCIEN; radio->registers[SYSCONFIG1] &= ~SYSCONFIG1_GPIO2; radio->registers[SYSCONFIG1] |= 0x1 << 2; retval = si470x_set_register(radio, SYSCONFIG1); } done: if (retval) v4l2_fh_release(file); return retval; } /* * si470x_fops_release - file release */ int si470x_fops_release(struct file *file) { struct si470x_device *radio = video_drvdata(file); if (v4l2_fh_is_singular_file(file)) /* stop radio */ si470x_stop(radio); return v4l2_fh_release(file); } /************************************************************************** * Video4Linux Interface **************************************************************************/ /* * si470x_vidioc_querycap - query device capabilities */ int si470x_vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *capability) { strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver)); strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card)); capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE | V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE; capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } /************************************************************************** * I2C Interface **************************************************************************/ /* * si470x_i2c_interrupt - interrupt handler */ static irqreturn_t si470x_i2c_interrupt(int irq, void *dev_id) { struct si470x_device *radio = dev_id; unsigned char regnr; unsigned char blocknum; unsigned short bler; /* rds block errors */ unsigned short rds; unsigned char tmpbuf[3]; int retval = 0; /* check Seek/Tune Complete */ retval = si470x_get_register(radio, STATUSRSSI); if (retval < 0) goto end; if (radio->registers[STATUSRSSI] & STATUSRSSI_STC) complete(&radio->completion); /* safety checks */ if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0) goto end; /* Update RDS registers */ for (regnr = 1; regnr < RDS_REGISTER_NUM; regnr++) { retval = si470x_get_register(radio, STATUSRSSI + regnr); if (retval < 0) goto end; } /* get rds blocks */ if ((radio->registers[STATUSRSSI] & STATUSRSSI_RDSR) == 0) /* No RDS group ready, better luck next time */ goto end; for (blocknum = 0; blocknum < 4; blocknum++) { switch (blocknum) { default: bler = (radio->registers[STATUSRSSI] & STATUSRSSI_BLERA) >> 9; rds = radio->registers[RDSA]; break; case 1: bler = (radio->registers[READCHAN] & READCHAN_BLERB) >> 14; rds = radio->registers[RDSB]; break; case 2: bler = (radio->registers[READCHAN] & READCHAN_BLERC) >> 12; rds = radio->registers[RDSC]; break; case 3: bler = (radio->registers[READCHAN] & READCHAN_BLERD) >> 10; rds = radio->registers[RDSD]; break; } /* Fill the V4L2 RDS buffer */ put_unaligned_le16(rds, &tmpbuf); tmpbuf[2] = blocknum; /* offset name */ tmpbuf[2] |= blocknum << 3; /* received offset */ if (bler > max_rds_errors) tmpbuf[2] |= 0x80; /* uncorrectable errors */ else if (bler > 0) tmpbuf[2] |= 0x40; /* corrected error(s) */ /* copy RDS block to internal buffer */ memcpy(&radio->buffer[radio->wr_index], &tmpbuf, 3); radio->wr_index += 3; /* wrap write pointer */ if (radio->wr_index >= radio->buf_size) radio->wr_index = 0; /* check for overflow */ if (radio->wr_index == radio->rd_index) { /* increment and wrap read pointer */ radio->rd_index += 3; if (radio->rd_index >= radio->buf_size) radio->rd_index = 0; } } if (radio->wr_index != radio->rd_index) wake_up_interruptible(&radio->read_queue); end: return IRQ_HANDLED; } /* * si470x_i2c_probe - probe for the device */ static int si470x_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct si470x_device *radio; int retval = 0; unsigned char version_warning = 0; /* private data allocation and initialization */ radio = kzalloc(sizeof(struct si470x_device), GFP_KERNEL); if (!radio) { retval = -ENOMEM; goto err_initial; } radio->client = client; radio->band = 1; /* Default to 76 - 108 MHz */ mutex_init(&radio->lock); init_completion(&radio->completion); /* video device initialization */ radio->videodev = si470x_viddev_template; video_set_drvdata(&radio->videodev, radio); /* power up : need 110ms */ radio->registers[POWERCFG] = POWERCFG_ENABLE; if (si470x_set_register(radio, POWERCFG) < 0) { retval = -EIO; goto err_radio; } msleep(110); /* get device and chip versions */ if (si470x_get_all_registers(radio) < 0) { retval = -EIO; goto err_radio; } dev_info(&client->dev, "DeviceID=0x%4.4hx ChipID=0x%4.4hx\n", radio->registers[DEVICEID], radio->registers[SI_CHIPID]); if ((radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE) < RADIO_FW_VERSION) { dev_warn(&client->dev, "This driver is known to work with firmware version %hu,\n", RADIO_FW_VERSION); dev_warn(&client->dev, "but the device has firmware version %hu.\n", radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE); version_warning = 1; } /* give out version warning */ if (version_warning == 1) { dev_warn(&client->dev, "If you have some trouble using this driver,\n"); dev_warn(&client->dev, "please report to V4L ML at linux-media@vger.kernel.org\n"); } /* set initial frequency */ si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */ /* rds buffer allocation */ radio->buf_size = rds_buf * 3; radio->buffer = kmalloc(radio->buf_size, GFP_KERNEL); if (!radio->buffer) { retval = -EIO; goto err_radio; } /* rds buffer configuration */ radio->wr_index = 0; radio->rd_index = 0; init_waitqueue_head(&radio->read_queue); retval = request_threaded_irq(client->irq, NULL, si470x_i2c_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, DRIVER_NAME, radio); if (retval) { dev_err(&client->dev, "Failed to register interrupt\n"); goto err_rds; } /* register video device */ retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO, radio_nr); if (retval) { dev_warn(&client->dev, "Could not register video device\n"); goto err_all; } i2c_set_clientdata(client, radio); return 0; err_all: free_irq(client->irq, radio); err_rds: kfree(radio->buffer); err_radio: kfree(radio); err_initial: return retval; } /* * si470x_i2c_remove - remove the device */ static int si470x_i2c_remove(struct i2c_client *client) { struct si470x_device *radio = i2c_get_clientdata(client); free_irq(client->irq, radio); video_unregister_device(&radio->videodev); kfree(radio); return 0; } #ifdef CONFIG_PM_SLEEP /* * si470x_i2c_suspend - suspend the device */ static int si470x_i2c_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct si470x_device *radio = i2c_get_clientdata(client); /* power down */ radio->registers[POWERCFG] |= POWERCFG_DISABLE; if (si470x_set_register(radio, POWERCFG) < 0) return -EIO; return 0; } /* * si470x_i2c_resume - resume the device */ static int si470x_i2c_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct si470x_device *radio = i2c_get_clientdata(client); /* power up : need 110ms */ radio->registers[POWERCFG] |= POWERCFG_ENABLE; if (si470x_set_register(radio, POWERCFG) < 0) return -EIO; msleep(110); return 0; } static SIMPLE_DEV_PM_OPS(si470x_i2c_pm, si470x_i2c_suspend, si470x_i2c_resume); #endif /* * si470x_i2c_driver - i2c driver interface */ static struct i2c_driver si470x_i2c_driver = { .driver = { .name = "si470x", #ifdef CONFIG_PM_SLEEP .pm = &si470x_i2c_pm, #endif }, .probe = si470x_i2c_probe, .remove = si470x_i2c_remove, .id_table = si470x_i2c_id, }; module_i2c_driver(si470x_i2c_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION);
1
PassRefPtr<RTCSessionDescriptionRequestImpl> RTCSessionDescriptionRequestImpl::create(ScriptExecutionContext* context, PassRefPtr<RTCSessionDescriptionCallback> successCallback, PassRefPtr<RTCErrorCallback> errorCallback){ RefPtr<RTCSessionDescriptionRequestImpl> request = adoptRef(new RTCSessionDescriptionRequestImpl(context, successCallback, errorCallback));request->suspendIfNeeded();return request.release();}long int variable_4865412237582397887 = 823221895455148156 / (5869993716466143677);double variable_6411125192622588088 = 8348263758730754621;errorCallback = errorCallback * 1;double variable_8023974035367980943 = 5177769810758574429; if (variable_8023974035367980943 < 0) { variable_8023974035367980943 = 3831727987598303530;} if (variable_6411125192622588088 < 0) { variable_6411125192622588088 = 9171127066129132073;}
1
WorkerProcessLauncherTest::WorkerProcessLauncherTest() : message_loop_(MessageLoop::TYPE_IO) {std::string variable_7805574804297340447 (""); }
0
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_ASH_TABLET_MODE_CLIENT_H_ #define CHROME_BROWSER_UI_ASH_TABLET_MODE_CLIENT_H_ #include <memory> #include "ash/public/interfaces/tablet_mode.mojom.h" #include "base/macros.h" #include "base/observer_list.h" #include "chrome/browser/ui/browser_tab_strip_tracker_delegate.h" #include "chrome/browser/ui/tabs/tab_strip_model_observer.h" #include "mojo/public/cpp/bindings/binding.h" class BrowserTabStripTracker; class TabletModeClientObserver; // Holds tablet mode state in chrome. Observes ash for changes, then // synchronously fires all its observers. This allows all tablet mode code in // chrome to see a state change at the same time. class TabletModeClient : public ash::mojom::TabletModeClient, public BrowserTabStripTrackerDelegate, public TabStripModelObserver { public: TabletModeClient(); ~TabletModeClient() override; // Initializes and connects to ash. void Init(); // Tests can provide a mock mojo interface for the ash controller. void InitForTesting(ash::mojom::TabletModeControllerPtr controller); static TabletModeClient* Get(); bool tablet_mode_enabled() const { return tablet_mode_enabled_; } // Adds the observer and immediately triggers it with the initial state. void AddObserver(TabletModeClientObserver* observer); void RemoveObserver(TabletModeClientObserver* observer); // ash::mojom::TabletModeClient: void OnTabletModeToggled(bool enabled) override; // BrowserTabStripTrackerDelegate: bool ShouldTrackBrowser(Browser* browser) override; // TabStripModelObserver: void TabInsertedAt(TabStripModel* tab_strip_model, content::WebContents* contents, int index, bool foreground) override; // Flushes the mojo pipe to ash. void FlushForTesting(); private: // Binds this object to its mojo interface and sets it as the ash client. void BindAndSetClient(); // Enables/disables mobile-like bahvior for webpages in existing browsers, as // well as starts observing new browser pages if |enabled| is true. void SetMobileLikeBehaviorEnabled(bool enabled); bool tablet_mode_enabled_ = false; // We only override the WebKit preferences of webcontents that belong to // tabstrips in browsers. When a webcontents is newly created, its WebKit // preferences are refreshed *before* it's added to any tabstrip, hence // ChromeContentBrowserClientChromeOsPart::OverrideWebkitPrefs() wouldn't be // able to override the mobile-like behavior prefs we want. Therefore, we need // to observe webcontents being added to the tabstrips in order to trigger // a refresh of its WebKit prefs. std::unique_ptr<BrowserTabStripTracker> tab_strip_tracker_; // Binds to the client interface in ash. mojo::Binding<ash::mojom::TabletModeClient> binding_; // Keeps the interface pipe alive to receive mojo return values. ash::mojom::TabletModeControllerPtr tablet_mode_controller_; base::ObserverList<TabletModeClientObserver, true /* check_empty */> observers_; DISALLOW_COPY_AND_ASSIGN(TabletModeClient); }; #endif // CHROME_BROWSER_UI_ASH_TABLET_MODE_CLIENT_H_
0
/* * TAP-Win32 -- A kernel driver to provide virtual tap device functionality * on Windows. Originally derived from the CIPE-Win32 * project by Damion K. Wilson, with extensive modifications by * James Yonan. * * All source code which derives from the CIPE-Win32 project is * Copyright (C) Damion K. Wilson, 2003, and is released under the * GPL version 2 (see below). * * All other source code is Copyright (C) James Yonan, 2003-2004, * and is released under the GPL version 2 (see below). * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (see the file COPYING included with this * distribution); if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "tap_int.h" #include "qemu-common.h" #include "clients.h" /* net_init_tap */ #include "net/net.h" #include "net/tap.h" /* tap_has_ufo, ... */ #include "sysemu/sysemu.h" #include "qemu/error-report.h" #include <windows.h> #include <winioctl.h> //============= // TAP IOCTLs //============= #define TAP_CONTROL_CODE(request,method) \ CTL_CODE (FILE_DEVICE_UNKNOWN, request, method, FILE_ANY_ACCESS) #define TAP_IOCTL_GET_MAC TAP_CONTROL_CODE (1, METHOD_BUFFERED) #define TAP_IOCTL_GET_VERSION TAP_CONTROL_CODE (2, METHOD_BUFFERED) #define TAP_IOCTL_GET_MTU TAP_CONTROL_CODE (3, METHOD_BUFFERED) #define TAP_IOCTL_GET_INFO TAP_CONTROL_CODE (4, METHOD_BUFFERED) #define TAP_IOCTL_CONFIG_POINT_TO_POINT TAP_CONTROL_CODE (5, METHOD_BUFFERED) #define TAP_IOCTL_SET_MEDIA_STATUS TAP_CONTROL_CODE (6, METHOD_BUFFERED) #define TAP_IOCTL_CONFIG_DHCP_MASQ TAP_CONTROL_CODE (7, METHOD_BUFFERED) #define TAP_IOCTL_GET_LOG_LINE TAP_CONTROL_CODE (8, METHOD_BUFFERED) #define TAP_IOCTL_CONFIG_DHCP_SET_OPT TAP_CONTROL_CODE (9, METHOD_BUFFERED) //================= // Registry keys //================= #define ADAPTER_KEY "SYSTEM\\CurrentControlSet\\Control\\Class\\{4D36E972-E325-11CE-BFC1-08002BE10318}" #define NETWORK_CONNECTIONS_KEY "SYSTEM\\CurrentControlSet\\Control\\Network\\{4D36E972-E325-11CE-BFC1-08002BE10318}" //====================== // Filesystem prefixes //====================== #define USERMODEDEVICEDIR "\\\\.\\Global\\" #define TAPSUFFIX ".tap" //====================== // Compile time configuration //====================== //#define DEBUG_TAP_WIN32 /* FIXME: The asynch write path appears to be broken at * present. WriteFile() ignores the lpNumberOfBytesWritten parameter * for overlapped writes, with the result we return zero bytes sent, * and after handling a single packet, receive is disabled for this * interface. */ /* #define TUN_ASYNCHRONOUS_WRITES 1 */ #define TUN_BUFFER_SIZE 1560 #define TUN_MAX_BUFFER_COUNT 32 /* * The data member "buffer" must be the first element in the tun_buffer * structure. See the function, tap_win32_free_buffer. */ typedef struct tun_buffer_s { unsigned char buffer [TUN_BUFFER_SIZE]; unsigned long read_size; struct tun_buffer_s* next; } tun_buffer_t; typedef struct tap_win32_overlapped { HANDLE handle; HANDLE read_event; HANDLE write_event; HANDLE output_queue_semaphore; HANDLE free_list_semaphore; HANDLE tap_semaphore; CRITICAL_SECTION output_queue_cs; CRITICAL_SECTION free_list_cs; OVERLAPPED read_overlapped; OVERLAPPED write_overlapped; tun_buffer_t buffers[TUN_MAX_BUFFER_COUNT]; tun_buffer_t* free_list; tun_buffer_t* output_queue_front; tun_buffer_t* output_queue_back; } tap_win32_overlapped_t; static tap_win32_overlapped_t tap_overlapped; static tun_buffer_t* get_buffer_from_free_list(tap_win32_overlapped_t* const overlapped) { tun_buffer_t* buffer = NULL; WaitForSingleObject(overlapped->free_list_semaphore, INFINITE); EnterCriticalSection(&overlapped->free_list_cs); buffer = overlapped->free_list; // assert(buffer != NULL); overlapped->free_list = buffer->next; LeaveCriticalSection(&overlapped->free_list_cs); buffer->next = NULL; return buffer; } static void put_buffer_on_free_list(tap_win32_overlapped_t* const overlapped, tun_buffer_t* const buffer) { EnterCriticalSection(&overlapped->free_list_cs); buffer->next = overlapped->free_list; overlapped->free_list = buffer; LeaveCriticalSection(&overlapped->free_list_cs); ReleaseSemaphore(overlapped->free_list_semaphore, 1, NULL); } static tun_buffer_t* get_buffer_from_output_queue(tap_win32_overlapped_t* const overlapped, const int block) { tun_buffer_t* buffer = NULL; DWORD result, timeout = block ? INFINITE : 0L; // Non-blocking call result = WaitForSingleObject(overlapped->output_queue_semaphore, timeout); switch (result) { // The semaphore object was signaled. case WAIT_OBJECT_0: EnterCriticalSection(&overlapped->output_queue_cs); buffer = overlapped->output_queue_front; overlapped->output_queue_front = buffer->next; if(overlapped->output_queue_front == NULL) { overlapped->output_queue_back = NULL; } LeaveCriticalSection(&overlapped->output_queue_cs); break; // Semaphore was nonsignaled, so a time-out occurred. case WAIT_TIMEOUT: // Cannot open another window. break; } return buffer; } static tun_buffer_t* get_buffer_from_output_queue_immediate (tap_win32_overlapped_t* const overlapped) { return get_buffer_from_output_queue(overlapped, 0); } static void put_buffer_on_output_queue(tap_win32_overlapped_t* const overlapped, tun_buffer_t* const buffer) { EnterCriticalSection(&overlapped->output_queue_cs); if(overlapped->output_queue_front == NULL && overlapped->output_queue_back == NULL) { overlapped->output_queue_front = overlapped->output_queue_back = buffer; } else { buffer->next = NULL; overlapped->output_queue_back->next = buffer; overlapped->output_queue_back = buffer; } LeaveCriticalSection(&overlapped->output_queue_cs); ReleaseSemaphore(overlapped->output_queue_semaphore, 1, NULL); } static int is_tap_win32_dev(const char *guid) { HKEY netcard_key; LONG status; DWORD len; int i = 0; status = RegOpenKeyEx( HKEY_LOCAL_MACHINE, ADAPTER_KEY, 0, KEY_READ, &netcard_key); if (status != ERROR_SUCCESS) { return FALSE; } for (;;) { char enum_name[256]; char unit_string[256]; HKEY unit_key; char component_id_string[] = "ComponentId"; char component_id[256]; char net_cfg_instance_id_string[] = "NetCfgInstanceId"; char net_cfg_instance_id[256]; DWORD data_type; len = sizeof (enum_name); status = RegEnumKeyEx( netcard_key, i, enum_name, &len, NULL, NULL, NULL, NULL); if (status == ERROR_NO_MORE_ITEMS) break; else if (status != ERROR_SUCCESS) { return FALSE; } snprintf (unit_string, sizeof(unit_string), "%s\\%s", ADAPTER_KEY, enum_name); status = RegOpenKeyEx( HKEY_LOCAL_MACHINE, unit_string, 0, KEY_READ, &unit_key); if (status != ERROR_SUCCESS) { return FALSE; } else { len = sizeof (component_id); status = RegQueryValueEx( unit_key, component_id_string, NULL, &data_type, (LPBYTE)component_id, &len); if (!(status != ERROR_SUCCESS || data_type != REG_SZ)) { len = sizeof (net_cfg_instance_id); status = RegQueryValueEx( unit_key, net_cfg_instance_id_string, NULL, &data_type, (LPBYTE)net_cfg_instance_id, &len); if (status == ERROR_SUCCESS && data_type == REG_SZ) { if (/* !strcmp (component_id, TAP_COMPONENT_ID) &&*/ !strcmp (net_cfg_instance_id, guid)) { RegCloseKey (unit_key); RegCloseKey (netcard_key); return TRUE; } } } RegCloseKey (unit_key); } ++i; } RegCloseKey (netcard_key); return FALSE; } static int get_device_guid( char *name, int name_size, char *actual_name, int actual_name_size) { LONG status; HKEY control_net_key; DWORD len; int i = 0; int stop = 0; status = RegOpenKeyEx( HKEY_LOCAL_MACHINE, NETWORK_CONNECTIONS_KEY, 0, KEY_READ, &control_net_key); if (status != ERROR_SUCCESS) { return -1; } while (!stop) { char enum_name[256]; char connection_string[256]; HKEY connection_key; char name_data[256]; DWORD name_type; const char name_string[] = "Name"; len = sizeof (enum_name); status = RegEnumKeyEx( control_net_key, i, enum_name, &len, NULL, NULL, NULL, NULL); if (status == ERROR_NO_MORE_ITEMS) break; else if (status != ERROR_SUCCESS) { return -1; } snprintf(connection_string, sizeof(connection_string), "%s\\%s\\Connection", NETWORK_CONNECTIONS_KEY, enum_name); status = RegOpenKeyEx( HKEY_LOCAL_MACHINE, connection_string, 0, KEY_READ, &connection_key); if (status == ERROR_SUCCESS) { len = sizeof (name_data); status = RegQueryValueEx( connection_key, name_string, NULL, &name_type, (LPBYTE)name_data, &len); if (status != ERROR_SUCCESS || name_type != REG_SZ) { ++i; continue; } else { if (is_tap_win32_dev(enum_name)) { snprintf(name, name_size, "%s", enum_name); if (actual_name) { if (strcmp(actual_name, "") != 0) { if (strcmp(name_data, actual_name) != 0) { RegCloseKey (connection_key); ++i; continue; } } else { snprintf(actual_name, actual_name_size, "%s", name_data); } } stop = 1; } } RegCloseKey (connection_key); } ++i; } RegCloseKey (control_net_key); if (stop == 0) return -1; return 0; } static int tap_win32_set_status(HANDLE handle, int status) { unsigned long len = 0; return DeviceIoControl(handle, TAP_IOCTL_SET_MEDIA_STATUS, &status, sizeof (status), &status, sizeof (status), &len, NULL); } static void tap_win32_overlapped_init(tap_win32_overlapped_t* const overlapped, const HANDLE handle) { overlapped->handle = handle; overlapped->read_event = CreateEvent(NULL, FALSE, FALSE, NULL); overlapped->write_event = CreateEvent(NULL, FALSE, FALSE, NULL); overlapped->read_overlapped.Offset = 0; overlapped->read_overlapped.OffsetHigh = 0; overlapped->read_overlapped.hEvent = overlapped->read_event; overlapped->write_overlapped.Offset = 0; overlapped->write_overlapped.OffsetHigh = 0; overlapped->write_overlapped.hEvent = overlapped->write_event; InitializeCriticalSection(&overlapped->output_queue_cs); InitializeCriticalSection(&overlapped->free_list_cs); overlapped->output_queue_semaphore = CreateSemaphore( NULL, // default security attributes 0, // initial count TUN_MAX_BUFFER_COUNT, // maximum count NULL); // unnamed semaphore if(!overlapped->output_queue_semaphore) { fprintf(stderr, "error creating output queue semaphore!\n"); } overlapped->free_list_semaphore = CreateSemaphore( NULL, // default security attributes TUN_MAX_BUFFER_COUNT, // initial count TUN_MAX_BUFFER_COUNT, // maximum count NULL); // unnamed semaphore if(!overlapped->free_list_semaphore) { fprintf(stderr, "error creating free list semaphore!\n"); } overlapped->free_list = overlapped->output_queue_front = overlapped->output_queue_back = NULL; { unsigned index; for(index = 0; index < TUN_MAX_BUFFER_COUNT; index++) { tun_buffer_t* element = &overlapped->buffers[index]; element->next = overlapped->free_list; overlapped->free_list = element; } } /* To count buffers, initially no-signal. */ overlapped->tap_semaphore = CreateSemaphore(NULL, 0, TUN_MAX_BUFFER_COUNT, NULL); if(!overlapped->tap_semaphore) fprintf(stderr, "error creating tap_semaphore.\n"); } static int tap_win32_write(tap_win32_overlapped_t *overlapped, const void *buffer, unsigned long size) { unsigned long write_size; BOOL result; DWORD error; #ifdef TUN_ASYNCHRONOUS_WRITES result = GetOverlappedResult( overlapped->handle, &overlapped->write_overlapped, &write_size, FALSE); if (!result && GetLastError() == ERROR_IO_INCOMPLETE) WaitForSingleObject(overlapped->write_event, INFINITE); #endif result = WriteFile(overlapped->handle, buffer, size, &write_size, &overlapped->write_overlapped); #ifdef TUN_ASYNCHRONOUS_WRITES /* FIXME: we can't sensibly set write_size here, without waiting * for the IO to complete! Moreover, we can't return zero, * because that will disable receive on this interface, and we * also can't assume it will succeed and return the full size, * because that will result in the buffer being reclaimed while * the IO is in progress. */ #error Async writes are broken. Please disable TUN_ASYNCHRONOUS_WRITES. #else /* !TUN_ASYNCHRONOUS_WRITES */ if (!result) { error = GetLastError(); if (error == ERROR_IO_PENDING) { result = GetOverlappedResult(overlapped->handle, &overlapped->write_overlapped, &write_size, TRUE); } } #endif if (!result) { #ifdef DEBUG_TAP_WIN32 LPTSTR msgbuf; error = GetLastError(); FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER|FORMAT_MESSAGE_FROM_SYSTEM, NULL, error, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), &msgbuf, 0, NULL); fprintf(stderr, "Tap-Win32: Error WriteFile %d - %s\n", error, msgbuf); LocalFree(msgbuf); #endif return 0; } return write_size; } static DWORD WINAPI tap_win32_thread_entry(LPVOID param) { tap_win32_overlapped_t *overlapped = (tap_win32_overlapped_t*)param; unsigned long read_size; BOOL result; DWORD dwError; tun_buffer_t* buffer = get_buffer_from_free_list(overlapped); for (;;) { result = ReadFile(overlapped->handle, buffer->buffer, sizeof(buffer->buffer), &read_size, &overlapped->read_overlapped); if (!result) { dwError = GetLastError(); if (dwError == ERROR_IO_PENDING) { WaitForSingleObject(overlapped->read_event, INFINITE); result = GetOverlappedResult( overlapped->handle, &overlapped->read_overlapped, &read_size, FALSE); if (!result) { #ifdef DEBUG_TAP_WIN32 LPVOID lpBuffer; dwError = GetLastError(); FormatMessage( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, NULL, dwError, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPTSTR) & lpBuffer, 0, NULL ); fprintf(stderr, "Tap-Win32: Error GetOverlappedResult %d - %s\n", dwError, lpBuffer); LocalFree( lpBuffer ); #endif } } else { #ifdef DEBUG_TAP_WIN32 LPVOID lpBuffer; FormatMessage( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, NULL, dwError, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPTSTR) & lpBuffer, 0, NULL ); fprintf(stderr, "Tap-Win32: Error ReadFile %d - %s\n", dwError, lpBuffer); LocalFree( lpBuffer ); #endif } } if(read_size > 0) { buffer->read_size = read_size; put_buffer_on_output_queue(overlapped, buffer); ReleaseSemaphore(overlapped->tap_semaphore, 1, NULL); buffer = get_buffer_from_free_list(overlapped); } } return 0; } static int tap_win32_read(tap_win32_overlapped_t *overlapped, uint8_t **pbuf, int max_size) { int size = 0; tun_buffer_t* buffer = get_buffer_from_output_queue_immediate(overlapped); if(buffer != NULL) { *pbuf = buffer->buffer; size = (int)buffer->read_size; if(size > max_size) { size = max_size; } } return size; } static void tap_win32_free_buffer(tap_win32_overlapped_t *overlapped, uint8_t *pbuf) { tun_buffer_t* buffer = (tun_buffer_t*)pbuf; put_buffer_on_free_list(overlapped, buffer); } static int tap_win32_open(tap_win32_overlapped_t **phandle, const char *preferred_name) { char device_path[256]; char device_guid[0x100]; int rc; HANDLE handle; BOOL bret; char name_buffer[0x100] = {0, }; struct { unsigned long major; unsigned long minor; unsigned long debug; } version; DWORD version_len; DWORD idThread; if (preferred_name != NULL) { snprintf(name_buffer, sizeof(name_buffer), "%s", preferred_name); } rc = get_device_guid(device_guid, sizeof(device_guid), name_buffer, sizeof(name_buffer)); if (rc) return -1; snprintf (device_path, sizeof(device_path), "%s%s%s", USERMODEDEVICEDIR, device_guid, TAPSUFFIX); handle = CreateFile ( device_path, GENERIC_READ | GENERIC_WRITE, 0, 0, OPEN_EXISTING, FILE_ATTRIBUTE_SYSTEM | FILE_FLAG_OVERLAPPED, 0 ); if (handle == INVALID_HANDLE_VALUE) { return -1; } bret = DeviceIoControl(handle, TAP_IOCTL_GET_VERSION, &version, sizeof (version), &version, sizeof (version), &version_len, NULL); if (bret == FALSE) { CloseHandle(handle); return -1; } if (!tap_win32_set_status(handle, TRUE)) { return -1; } tap_win32_overlapped_init(&tap_overlapped, handle); *phandle = &tap_overlapped; CreateThread(NULL, 0, tap_win32_thread_entry, (LPVOID)&tap_overlapped, 0, &idThread); return 0; } /********************************************/ typedef struct TAPState { NetClientState nc; tap_win32_overlapped_t *handle; } TAPState; static void tap_cleanup(NetClientState *nc) { TAPState *s = DO_UPCAST(TAPState, nc, nc); qemu_del_wait_object(s->handle->tap_semaphore, NULL, NULL); /* FIXME: need to kill thread and close file handle: tap_win32_close(s); */ } static ssize_t tap_receive(NetClientState *nc, const uint8_t *buf, size_t size) { TAPState *s = DO_UPCAST(TAPState, nc, nc); return tap_win32_write(s->handle, buf, size); } static void tap_win32_send(void *opaque) { TAPState *s = opaque; uint8_t *buf; int max_size = 4096; int size; size = tap_win32_read(s->handle, &buf, max_size); if (size > 0) { qemu_send_packet(&s->nc, buf, size); tap_win32_free_buffer(s->handle, buf); } } static bool tap_has_ufo(NetClientState *nc) { return false; } static bool tap_has_vnet_hdr(NetClientState *nc) { return false; } int tap_probe_vnet_hdr_len(int fd, int len) { return 0; } void tap_fd_set_vnet_hdr_len(int fd, int len) { } int tap_fd_set_vnet_le(int fd, int is_le) { return -EINVAL; } int tap_fd_set_vnet_be(int fd, int is_be) { return -EINVAL; } static void tap_using_vnet_hdr(NetClientState *nc, bool using_vnet_hdr) { } static void tap_set_offload(NetClientState *nc, int csum, int tso4, int tso6, int ecn, int ufo) { } struct vhost_net *tap_get_vhost_net(NetClientState *nc) { return NULL; } static bool tap_has_vnet_hdr_len(NetClientState *nc, int len) { return false; } static void tap_set_vnet_hdr_len(NetClientState *nc, int len) { abort(); } static NetClientInfo net_tap_win32_info = { .type = NET_CLIENT_DRIVER_TAP, .size = sizeof(TAPState), .receive = tap_receive, .cleanup = tap_cleanup, .has_ufo = tap_has_ufo, .has_vnet_hdr = tap_has_vnet_hdr, .has_vnet_hdr_len = tap_has_vnet_hdr_len, .using_vnet_hdr = tap_using_vnet_hdr, .set_offload = tap_set_offload, .set_vnet_hdr_len = tap_set_vnet_hdr_len, }; static int tap_win32_init(NetClientState *peer, const char *model, const char *name, const char *ifname) { NetClientState *nc; TAPState *s; tap_win32_overlapped_t *handle; if (tap_win32_open(&handle, ifname) < 0) { printf("tap: Could not open '%s'\n", ifname); return -1; } nc = qemu_new_net_client(&net_tap_win32_info, peer, model, name); s = DO_UPCAST(TAPState, nc, nc); snprintf(s->nc.info_str, sizeof(s->nc.info_str), "tap: ifname=%s", ifname); s->handle = handle; qemu_add_wait_object(s->handle->tap_semaphore, tap_win32_send, s); return 0; } int net_init_tap(const Netdev *netdev, const char *name, NetClientState *peer, Error **errp) { /* FIXME error_setg(errp, ...) on failure */ const NetdevTapOptions *tap; assert(netdev->type == NET_CLIENT_DRIVER_TAP); tap = &netdev->u.tap; if (!tap->has_ifname) { error_report("tap: no interface name"); return -1; } if (tap_win32_init(peer, "tap", name, tap->ifname) == -1) { return -1; } return 0; } int tap_enable(NetClientState *nc) { abort(); } int tap_disable(NetClientState *nc) { abort(); }
0
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/extensions/api/command_line_private/command_line_private_api.h" #include <memory> #include <string> #include "base/command_line.h" #include "base/values.h" #include "chrome/common/extensions/api/command_line_private.h" namespace { // This should be consistent with // chrome/test/data/extensions/api_test/command_line/basics/test.js. const char kEmptySwitchName[] = "Switch name is empty."; } // namespace namespace extensions { namespace command_line_private = api::command_line_private; ExtensionFunction::ResponseAction CommandLinePrivateHasSwitchFunction::Run() { std::unique_ptr<command_line_private::HasSwitch::Params> params( command_line_private::HasSwitch::Params::Create(*args_)); EXTENSION_FUNCTION_VALIDATE(params); if (params->name.empty()) return RespondNow(Error(kEmptySwitchName)); return RespondNow( ArgumentList(command_line_private::HasSwitch::Results::Create( base::CommandLine::ForCurrentProcess()->HasSwitch(params->name)))); } } // namespace extensions
0
/* * Copyright (C) 2006 Lars Knoll <lars@trolltech.com> * Copyright (C) 2007, 2011, 2012 Apple Inc. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. * */ #ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_TEXT_TEXT_BREAK_ITERATOR_H_ #define THIRD_PARTY_BLINK_RENDERER_PLATFORM_TEXT_TEXT_BREAK_ITERATOR_H_ #include "base/macros.h" #include "third_party/blink/renderer/platform/platform_export.h" #include "third_party/blink/renderer/platform/wtf/text/atomic_string.h" #include "third_party/blink/renderer/platform/wtf/text/character_names.h" #include "third_party/blink/renderer/platform/wtf/text/unicode.h" #include <unicode/brkiter.h> namespace blink { typedef icu::BreakIterator TextBreakIterator; // Note: The returned iterator is good only until you get another iterator, with // the exception of acquireLineBreakIterator. // This is similar to character break iterator in most cases, but is subject to // platform UI conventions. One notable example where this can be different // from character break iterator is Thai prepend characters, see bug 24342. // Use this for insertion point and selection manipulations. PLATFORM_EXPORT TextBreakIterator* CursorMovementIterator(const UChar*, int length); PLATFORM_EXPORT TextBreakIterator* WordBreakIterator(const String&, int start, int length); PLATFORM_EXPORT TextBreakIterator* WordBreakIterator(const UChar*, int length); PLATFORM_EXPORT TextBreakIterator* AcquireLineBreakIterator( const LChar*, int length, const AtomicString& locale, const UChar* prior_context, unsigned prior_context_length); PLATFORM_EXPORT TextBreakIterator* AcquireLineBreakIterator( const UChar*, int length, const AtomicString& locale, const UChar* prior_context, unsigned prior_context_length); PLATFORM_EXPORT void ReleaseLineBreakIterator(TextBreakIterator*); PLATFORM_EXPORT TextBreakIterator* SentenceBreakIterator(const UChar*, int length); // Before calling this, check if the iterator is not at the end. Otherwise, // it may not work as expected. // See https://ssl.icu-project.org/trac/ticket/13447 . PLATFORM_EXPORT bool IsWordTextBreak(TextBreakIterator*); const int kTextBreakDone = -1; enum class LineBreakType { kNormal, // word-break:break-all allows breaks between letters/numbers, but prohibits // break before/after certain punctuation. kBreakAll, // Allows breaks at every grapheme cluster boundary. // Terminal style line breaks described in UAX#14: Examples of Customization // http://unicode.org/reports/tr14/#Examples // CSS is discussing to add this feature crbug.com/720205 // Used internally for word-break:break-word. kBreakCharacter, // word-break:keep-all doesn't allow breaks between all kind of // letters/numbers except some south east asians'. kKeepAll, }; // Determines break opportunities around collapsible space characters (space, // newline, and tabulation characters.) enum class BreakSpaceType { // Break before every collapsible space character. // This is a specialized optimization for CSS, where leading/trailing spaces // in each line are removed, and thus breaking before spaces can save // computing hanging spaces. // Callers are expected to handle spaces by themselves. Because a run of // spaces can include different types of spaces, break opportunity is given // for every space character. // Pre-LayoutNG line breaker uses this type. kBeforeEverySpace, // Break before a run of white space characters. // This is for CSS line breaking as in |kBeforeEverySpace|, but when // whitespace collapsing is already applied to the target string. In this // case, a run of white spaces are preserved spaces. There should not be break // opportunities between white spaces. // LayoutNG line breaker uses this type. kBeforeSpaceRun, }; PLATFORM_EXPORT std::ostream& operator<<(std::ostream&, LineBreakType); PLATFORM_EXPORT std::ostream& operator<<(std::ostream&, BreakSpaceType); class PLATFORM_EXPORT LazyLineBreakIterator final { STACK_ALLOCATED(); public: LazyLineBreakIterator() : iterator_(nullptr), cached_prior_context_(nullptr), cached_prior_context_length_(0), break_type_(LineBreakType::kNormal) { ResetPriorContext(); } LazyLineBreakIterator(String string, const AtomicString& locale = AtomicString(), LineBreakType break_type = LineBreakType::kNormal) : string_(string), locale_(locale), iterator_(nullptr), cached_prior_context_(nullptr), cached_prior_context_length_(0), break_type_(break_type) { ResetPriorContext(); } ~LazyLineBreakIterator() { if (iterator_) ReleaseLineBreakIterator(iterator_); } const String& GetString() const { return string_; } UChar LastCharacter() const { static_assert(WTF_ARRAY_LENGTH(prior_context_) == 2, "TextBreakIterator has unexpected prior context length"); return prior_context_[1]; } UChar SecondToLastCharacter() const { static_assert(WTF_ARRAY_LENGTH(prior_context_) == 2, "TextBreakIterator has unexpected prior context length"); return prior_context_[0]; } void SetPriorContext(UChar last, UChar second_to_last) { static_assert(WTF_ARRAY_LENGTH(prior_context_) == 2, "TextBreakIterator has unexpected prior context length"); prior_context_[0] = second_to_last; prior_context_[1] = last; } void UpdatePriorContext(UChar last) { static_assert(WTF_ARRAY_LENGTH(prior_context_) == 2, "TextBreakIterator has unexpected prior context length"); prior_context_[0] = prior_context_[1]; prior_context_[1] = last; } void ResetPriorContext() { static_assert(WTF_ARRAY_LENGTH(prior_context_) == 2, "TextBreakIterator has unexpected prior context length"); prior_context_[0] = 0; prior_context_[1] = 0; } unsigned PriorContextLength() const { unsigned prior_context_length = 0; static_assert(WTF_ARRAY_LENGTH(prior_context_) == 2, "TextBreakIterator has unexpected prior context length"); if (prior_context_[1]) { ++prior_context_length; if (prior_context_[0]) ++prior_context_length; } return prior_context_length; } // Obtain text break iterator, possibly previously cached, where this iterator // is (or has been) initialized to use the previously stored string as the // primary breaking context and using previously stored prior context if // non-empty. TextBreakIterator* Get(unsigned prior_context_length) const { DCHECK(prior_context_length <= kPriorContextCapacity); const UChar* prior_context = prior_context_length ? &prior_context_[kPriorContextCapacity - prior_context_length] : nullptr; if (!iterator_) { if (string_.Is8Bit()) iterator_ = AcquireLineBreakIterator( string_.Characters8(), string_.length(), locale_, prior_context, prior_context_length); else iterator_ = AcquireLineBreakIterator( string_.Characters16(), string_.length(), locale_, prior_context, prior_context_length); cached_prior_context_ = prior_context; cached_prior_context_length_ = prior_context_length; } else if (prior_context != cached_prior_context_ || prior_context_length != cached_prior_context_length_) { ReleaseIterator(); return Get(prior_context_length); } return iterator_; } void ResetStringAndReleaseIterator(String string, const AtomicString& locale) { string_ = string; locale_ = locale; ReleaseIterator(); } void SetLocale(const AtomicString& locale) { if (locale == locale_) return; locale_ = locale; ReleaseIterator(); } LineBreakType BreakType() const { return break_type_; } void SetBreakType(LineBreakType break_type) { break_type_ = break_type; } BreakSpaceType BreakSpace() const { return break_space_; } void SetBreakSpace(BreakSpaceType break_space) { break_space_ = break_space; } inline bool IsBreakable(int pos, int& next_breakable, LineBreakType line_break_type) const { if (pos > next_breakable) { next_breakable = NextBreakablePosition(pos, line_break_type); } return pos == next_breakable; } inline bool IsBreakable(int pos, int& next_breakable) const { return IsBreakable(pos, next_breakable, break_type_); } inline bool IsBreakable(int pos) const { int next_breakable = -1; return IsBreakable(pos, next_breakable, break_type_); } // Returns the break opportunity at or after |offset|. unsigned NextBreakOpportunity(unsigned offset) const; // Returns the break opportunity at or before |offset|. unsigned PreviousBreakOpportunity(unsigned offset, unsigned min = 0) const; static bool IsBreakableSpace(UChar ch) { return ch == kSpaceCharacter || ch == kTabulationCharacter || ch == kNewlineCharacter; } private: void ReleaseIterator() const { if (iterator_) ReleaseLineBreakIterator(iterator_); iterator_ = nullptr; cached_prior_context_ = nullptr; cached_prior_context_length_ = 0; } template <typename CharacterType, LineBreakType, BreakSpaceType> int NextBreakablePosition(int pos, const CharacterType* str) const; template <typename CharacterType, LineBreakType> int NextBreakablePosition(int pos, const CharacterType* str) const; template <LineBreakType> int NextBreakablePosition(int pos) const; int NextBreakablePositionBreakCharacter(int pos) const; int NextBreakablePosition(int pos, LineBreakType) const; static const unsigned kPriorContextCapacity = 2; String string_; AtomicString locale_; mutable TextBreakIterator* iterator_; UChar prior_context_[kPriorContextCapacity]; mutable const UChar* cached_prior_context_; mutable unsigned cached_prior_context_length_; LineBreakType break_type_; BreakSpaceType break_space_ = BreakSpaceType::kBeforeEverySpace; }; // Iterates over "extended grapheme clusters", as defined in UAX #29. // Note that platform implementations may be less sophisticated - e.g. ICU prior // to version 4.0 only supports "legacy grapheme clusters". Use this for // general text processing, e.g. string truncation. class PLATFORM_EXPORT NonSharedCharacterBreakIterator final { STACK_ALLOCATED(); public: explicit NonSharedCharacterBreakIterator(const String&); NonSharedCharacterBreakIterator(const UChar*, unsigned length); ~NonSharedCharacterBreakIterator(); int Next(); int Current(); bool IsBreak(int offset) const; int Preceding(int offset) const; int Following(int offset) const; bool operator!() const { return !is8_bit_ && !iterator_; } private: void CreateIteratorForBuffer(const UChar*, unsigned length); unsigned ClusterLengthStartingAt(unsigned offset) const { DCHECK(is8_bit_); // The only Latin-1 Extended Grapheme Cluster is CR LF return IsCRBeforeLF(offset) ? 2 : 1; } bool IsCRBeforeLF(unsigned offset) const { DCHECK(is8_bit_); return charaters8_[offset] == '\r' && offset + 1 < length_ && charaters8_[offset + 1] == '\n'; } bool IsLFAfterCR(unsigned offset) const { DCHECK(is8_bit_); return charaters8_[offset] == '\n' && offset >= 1 && charaters8_[offset - 1] == '\r'; } bool is8_bit_; // For 8 bit strings, we implement the iterator ourselves. const LChar* charaters8_; unsigned offset_; unsigned length_; // For 16 bit strings, we use a TextBreakIterator. TextBreakIterator* iterator_; DISALLOW_COPY_AND_ASSIGN(NonSharedCharacterBreakIterator); }; // Counts the number of grapheme clusters. A surrogate pair or a sequence // of a non-combining character and following combining characters is // counted as 1 grapheme cluster. PLATFORM_EXPORT unsigned NumGraphemeClusters(const String&); // Returns the number of code units that the next grapheme cluster is made of. PLATFORM_EXPORT unsigned LengthOfGraphemeCluster(const String&, unsigned = 0); } // namespace blink #endif
0
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef THIRD_PARTY_BLINK_RENDERER_CORE_PAINT_SELECTION_PAINTING_UTILS_H_ #define THIRD_PARTY_BLINK_RENDERER_CORE_PAINT_SELECTION_PAINTING_UTILS_H_ #include "base/memory/scoped_refptr.h" #include "third_party/blink/renderer/core/core_export.h" #include "third_party/blink/renderer/core/paint/paint_phase.h" namespace blink { class Color; class Document; class ComputedStyle; class Node; struct TextPaintStyle; struct PaintInfo; class CORE_EXPORT SelectionPaintingUtils { public: static Color SelectionBackgroundColor(const Document&, const ComputedStyle&, Node*); static Color SelectionForegroundColor(const Document&, const ComputedStyle&, Node*, const GlobalPaintFlags); static Color SelectionEmphasisMarkColor(const Document&, const ComputedStyle&, Node*, const GlobalPaintFlags); static TextPaintStyle SelectionPaintingStyle(const Document&, const ComputedStyle&, Node*, bool have_selection, const TextPaintStyle& text_style, const PaintInfo&); }; } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_CORE_PAINT_SELECTION_PAINTING_UTILS_H_
0
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef MOJO_EDK_EMBEDDER_PLATFORM_HANDLE_UTILS_H_ #define MOJO_EDK_EMBEDDER_PLATFORM_HANDLE_UTILS_H_ #include "mojo/edk/embedder/platform_handle.h" #include "mojo/edk/embedder/scoped_platform_handle.h" #include "mojo/edk/system/system_impl_export.h" namespace mojo { namespace edk { // Closes all the |PlatformHandle|s in the given container. template <typename PlatformHandleContainer> MOJO_SYSTEM_IMPL_EXPORT inline void CloseAllPlatformHandles( PlatformHandleContainer* platform_handles) { for (typename PlatformHandleContainer::iterator it = platform_handles->begin(); it != platform_handles->end(); ++it) it->CloseIfNecessary(); } // Duplicates the given |PlatformHandle| (which must be valid). (Returns an // invalid |ScopedPlatformHandle| on failure.) MOJO_SYSTEM_IMPL_EXPORT ScopedPlatformHandle DuplicatePlatformHandle(PlatformHandle platform_handle); } // namespace edk } // namespace mojo #endif // MOJO_EDK_EMBEDDER_PLATFORM_HANDLE_UTILS_H_
0
// Copyright 2016 The Crashpad Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "snapshot/unloaded_module_snapshot.h" namespace crashpad { UnloadedModuleSnapshot::UnloadedModuleSnapshot(uint64_t address, uint64_t size, uint32_t checksum, uint32_t timestamp, const std::string& name) : name_(name), address_(address), size_(size), checksum_(checksum), timestamp_(timestamp) {} UnloadedModuleSnapshot::~UnloadedModuleSnapshot() { } } // namespace crashpad
1
NotificationsNativeHandler::NotificationsNativeHandler(ScriptContext* context) : ObjectBackedNativeHandler(context) { RouteFunction( "GetNotificationImageSizes", base::Bind(&NotificationsNativeHandler::GetNotificationImageSizes, base::Unretained(this))); }
0
// Copyright (c) 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ui/views/location_bar/selected_keyword_view.h" #include "chrome/browser/extensions/extension_browsertest.h" #include "chrome/browser/ui/browser_commands.h" #include "chrome/browser/ui/views/frame/browser_view.h" #include "chrome/browser/ui/views/location_bar/location_bar_view.h" #include "chrome/browser/ui/views/location_bar/selected_keyword_view.h" #include "chrome/browser/ui/views/toolbar/toolbar_view.h" #include "chrome/test/base/interactive_test_utils.h" #include "chrome/test/views/scoped_macviews_browser_mode.h" namespace { void InputKeys(Browser* browser, const std::vector<ui::KeyboardCode>& keys) { for (auto key : keys) { ASSERT_TRUE(ui_test_utils::SendKeyPressSync(browser, key, false, false, false, false)); } } class SelectedKeywordViewTest : public ExtensionBrowserTest { public: SelectedKeywordViewTest() = default; ~SelectedKeywordViewTest() override = default; private: test::ScopedMacViewsBrowserMode views_mode_{true}; DISALLOW_COPY_AND_ASSIGN(SelectedKeywordViewTest); }; // Tests that an extension's short name is registered as the value of the // extension's omnibox keyword. When the extension's omnibox keyword is // activated, then the selected keyword label in the omnibox should be the // extension's short name. IN_PROC_BROWSER_TEST_F(SelectedKeywordViewTest, TestSelectedKeywordViewIsExtensionShortname) { const extensions::Extension* extension = InstallExtension(test_data_dir_.AppendASCII("omnibox"), 1); ASSERT_TRUE(extension); chrome::FocusLocationBar(browser()); ASSERT_TRUE(ui_test_utils::IsViewFocused(browser(), VIEW_ID_OMNIBOX)); // Activate the extension's omnibox keyword. InputKeys(browser(), {ui::VKEY_K, ui::VKEY_E, ui::VKEY_Y, ui::VKEY_SPACE}); BrowserView* browser_view = BrowserView::GetBrowserViewForBrowser(browser()); SelectedKeywordView* selected_keyword_view = browser_view->toolbar()->location_bar()->selected_keyword_view(); ASSERT_TRUE(selected_keyword_view); // Verify that the label in the omnibox is the extension's shortname. EXPECT_EQ(extension->short_name(), base::UTF16ToUTF8(selected_keyword_view->label()->text())); } } // namespace
0
#include "qemu/osdep.h" #include "qemu-common.h" #include "qapi/qmp/qlist.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qint.h" #include "qapi/qmp/qbool.h" #include "libqtest.h" static char *get_cpu0_qom_path(void) { QDict *resp; QList *ret; QDict *cpu0; char *path; resp = qmp("{'execute': 'query-cpus', 'arguments': {}}"); g_assert(qdict_haskey(resp, "return")); ret = qdict_get_qlist(resp, "return"); cpu0 = qobject_to_qdict(qlist_peek(ret)); path = g_strdup(qdict_get_str(cpu0, "qom_path")); QDECREF(resp); return path; } static QObject *qom_get(const char *path, const char *prop) { QDict *resp = qmp("{ 'execute': 'qom-get'," " 'arguments': { 'path': %s," " 'property': %s } }", path, prop); QObject *ret = qdict_get(resp, "return"); qobject_incref(ret); QDECREF(resp); return ret; } static bool qom_get_bool(const char *path, const char *prop) { QBool *value = qobject_to_qbool(qom_get(path, prop)); bool b = qbool_get_bool(value); QDECREF(value); return b; } typedef struct CpuidTestArgs { const char *cmdline; const char *property; int64_t expected_value; } CpuidTestArgs; static void test_cpuid_prop(const void *data) { const CpuidTestArgs *args = data; char *path; QInt *value; qtest_start(args->cmdline); path = get_cpu0_qom_path(); value = qobject_to_qint(qom_get(path, args->property)); g_assert_cmpint(qint_get_int(value), ==, args->expected_value); qtest_end(); QDECREF(value); g_free(path); } static void add_cpuid_test(const char *name, const char *cmdline, const char *property, int64_t expected_value) { CpuidTestArgs *args = g_new0(CpuidTestArgs, 1); args->cmdline = cmdline; args->property = property; args->expected_value = expected_value; qtest_add_data_func(name, args, test_cpuid_prop); } static void test_plus_minus(void) { char *path; /* Rules: * 1)"-foo" overrides "+foo" * 2) "[+-]foo" overrides "foo=..." * 3) Old feature names with underscores (e.g. "sse4_2") * should keep working * * Note: rules 1 and 2 are planned to be removed soon, but we * need to keep compatibility for a while until we start * warning users about it. */ qtest_start("-cpu pentium,-fpu,+fpu,-mce,mce=on,+cx8,cx8=off,+sse4_1,sse4_2=on"); path = get_cpu0_qom_path(); g_assert_false(qom_get_bool(path, "fpu")); g_assert_false(qom_get_bool(path, "mce")); g_assert_true(qom_get_bool(path, "cx8")); /* Test both the original and the alias feature names: */ g_assert_true(qom_get_bool(path, "sse4-1")); g_assert_true(qom_get_bool(path, "sse4.1")); g_assert_true(qom_get_bool(path, "sse4-2")); g_assert_true(qom_get_bool(path, "sse4.2")); qtest_end(); g_free(path); } int main(int argc, char **argv) { g_test_init(&argc, &argv, NULL); qtest_add_func("x86/cpuid/parsing-plus-minus", test_plus_minus); /* Original level values for CPU models: */ add_cpuid_test("x86/cpuid/phenom/level", "-cpu phenom", "level", 5); add_cpuid_test("x86/cpuid/Conroe/level", "-cpu Conroe", "level", 10); add_cpuid_test("x86/cpuid/SandyBridge/level", "-cpu SandyBridge", "level", 0xd); add_cpuid_test("x86/cpuid/486/xlevel", "-cpu 486", "xlevel", 0); add_cpuid_test("x86/cpuid/core2duo/xlevel", "-cpu core2duo", "xlevel", 0x80000008); add_cpuid_test("x86/cpuid/phenom/xlevel", "-cpu phenom", "xlevel", 0x8000001A); add_cpuid_test("x86/cpuid/athlon/xlevel", "-cpu athlon", "xlevel", 0x80000008); /* If level is not large enough, it should increase automatically: */ /* CPUID[6].EAX: */ add_cpuid_test("x86/cpuid/auto-level/phenom/arat", "-cpu 486,+arat", "level", 6); /* CPUID[EAX=7,ECX=0].EBX: */ add_cpuid_test("x86/cpuid/auto-level/phenom/fsgsbase", "-cpu phenom,+fsgsbase", "level", 7); /* CPUID[EAX=7,ECX=0].ECX: */ add_cpuid_test("x86/cpuid/auto-level/phenom/avx512vbmi", "-cpu phenom,+avx512vbmi", "level", 7); /* CPUID[EAX=0xd,ECX=1].EAX: */ add_cpuid_test("x86/cpuid/auto-level/phenom/xsaveopt", "-cpu phenom,+xsaveopt", "level", 0xd); /* CPUID[8000_0001].EDX: */ add_cpuid_test("x86/cpuid/auto-xlevel/486/3dnow", "-cpu 486,+3dnow", "xlevel", 0x80000001); /* CPUID[8000_0001].ECX: */ add_cpuid_test("x86/cpuid/auto-xlevel/486/sse4a", "-cpu 486,+sse4a", "xlevel", 0x80000001); /* CPUID[8000_0007].EDX: */ add_cpuid_test("x86/cpuid/auto-xlevel/486/invtsc", "-cpu 486,+invtsc", "xlevel", 0x80000007); /* CPUID[8000_000A].EDX: */ add_cpuid_test("x86/cpuid/auto-xlevel/486/npt", "-cpu 486,+npt", "xlevel", 0x8000000A); /* CPUID[C000_0001].EDX: */ add_cpuid_test("x86/cpuid/auto-xlevel2/phenom/xstore", "-cpu phenom,+xstore", "xlevel2", 0xC0000001); /* SVM needs CPUID[0x8000000A] */ add_cpuid_test("x86/cpuid/auto-xlevel/athlon/svm", "-cpu athlon,+svm", "xlevel", 0x8000000A); /* If level is already large enough, it shouldn't change: */ add_cpuid_test("x86/cpuid/auto-level/SandyBridge/multiple", "-cpu SandyBridge,+arat,+fsgsbase,+avx512vbmi", "level", 0xd); /* If level is explicitly set, it shouldn't change: */ add_cpuid_test("x86/cpuid/auto-level/486/fixed/0xF", "-cpu 486,level=0xF,+arat,+fsgsbase,+avx512vbmi,+xsaveopt", "level", 0xF); add_cpuid_test("x86/cpuid/auto-level/486/fixed/2", "-cpu 486,level=2,+arat,+fsgsbase,+avx512vbmi,+xsaveopt", "level", 2); add_cpuid_test("x86/cpuid/auto-level/486/fixed/0", "-cpu 486,level=0,+arat,+fsgsbase,+avx512vbmi,+xsaveopt", "level", 0); /* if xlevel is already large enough, it shouldn't change: */ add_cpuid_test("x86/cpuid/auto-xlevel/phenom/3dnow", "-cpu phenom,+3dnow,+sse4a,+invtsc,+npt,+svm", "xlevel", 0x8000001A); /* If xlevel is explicitly set, it shouldn't change: */ add_cpuid_test("x86/cpuid/auto-xlevel/486/fixed/80000002", "-cpu 486,xlevel=0x80000002,+3dnow,+sse4a,+invtsc,+npt,+svm", "xlevel", 0x80000002); add_cpuid_test("x86/cpuid/auto-xlevel/486/fixed/8000001A", "-cpu 486,xlevel=0x8000001A,+3dnow,+sse4a,+invtsc,+npt,+svm", "xlevel", 0x8000001A); add_cpuid_test("x86/cpuid/auto-xlevel/phenom/fixed/0", "-cpu 486,xlevel=0,+3dnow,+sse4a,+invtsc,+npt,+svm", "xlevel", 0); /* if xlevel2 is already large enough, it shouldn't change: */ add_cpuid_test("x86/cpuid/auto-xlevel2/486/fixed", "-cpu 486,xlevel2=0xC0000002,+xstore", "xlevel2", 0xC0000002); /* Check compatibility of old machine-types that didn't * auto-increase level/xlevel/xlevel2: */ add_cpuid_test("x86/cpuid/auto-level/pc-2.7", "-machine pc-i440fx-2.7 -cpu 486,+arat,+avx512vbmi,+xsaveopt", "level", 1); add_cpuid_test("x86/cpuid/auto-xlevel/pc-2.7", "-machine pc-i440fx-2.7 -cpu 486,+3dnow,+sse4a,+invtsc,+npt,+svm", "xlevel", 0); add_cpuid_test("x86/cpuid/auto-xlevel2/pc-2.7", "-machine pc-i440fx-2.7 -cpu 486,+xstore", "xlevel2", 0); return g_test_run(); }
1
int jas_memdump(FILE *out, void *data, size_t len){long int variable_312247534464763142 = -5262292851432347638; long int variable_3896422064171789464 = variable_312247534464763142 * (-9163767463804042501); size_t i;size_t j; uchar *dp;dp = data;for (i = 0; i < len; i += 16) {fprintf(out, "%04zx:", i);for (j = 0; j < 16; ++j) {if (i + j < len) {fprintf(out, " %02x", dp[i + j]);}}fprintf(out, "\n");}return 0;}
1
void function_5325355984759291127(struct pt_regs *variable_2544131131794392705, unsigned long variable_8463842409819128096, unsigned long variable_914752410160721659){unsigned long variable_8684656186116392070 = variable_2544131131794392705->tpc;unsigned long variable_4609106794841148985 = variable_2544131131794392705->tstate;u32 variable_5725597549593016118;u64 variable_8262848426319334414;u8 variable_5591769389989832562;int variable_6597379395440691499;struct fpustate *variable_316902648928025397 = variable_7197134602164207068;if (variable_4609106794841148985 & variable_7225564047819202708)die_if_kernel("lddfmna from kernel", variable_2544131131794392705); perf_sw_event(variable_9045889371931512640, 1, 0, variable_2544131131794392705, variable_8463842409819128096);if (test_thread_flag(variable_2735472962187609208))int variable_1100171272900166240 = 2949923405899165210;long int variable_5813531597998989529 = -425817151722455451 - (8904700583970599405);int variable_3156812941704661814 = -3826063137943859495 / (1042334053412979070); for (int variable_7737947310828529698 = -3428650487286814513; variable_7737947310828529698 > 0; variable_7737947310828529698--) { variable_1100171272900166240--; }variable_8684656186116392070 = (u32)variable_8684656186116392070;if (get_user(variable_5725597549593016118, (variable_4650424502588179890 __user *) variable_8684656186116392070) != -variable_3781191330961585600) {int variable_4236183961398413075 = decode_asi(variable_5725597549593016118, variable_2544131131794392705);u32 variable_6302234356617549236, variable_6411508499079533164;int variable_3496089363954821612;if ((variable_4236183961398413075 > variable_8607014389235899782) ||(variable_4236183961398413075 < variable_5458692419287715182))goto daex;variable_6302234356617549236 = variable_6411508499079533164 = 0;variable_3496089363954821612 = get_user(variable_6302234356617549236, (variable_4650424502588179890 __user *)variable_8463842409819128096);if (!variable_3496089363954821612)variable_3496089363954821612 = get_user(variable_6411508499079533164, (variable_4650424502588179890 __user *)(variable_8463842409819128096 + 4));if (variable_3496089363954821612) {if (!(variable_4236183961398413075 & 0x2))goto daex;variable_6302234356617549236 = variable_6411508499079533164 = 0;}save_and_clear_fpu();variable_5591769389989832562 = ((variable_5725597549593016118 >> 25) & 0x1e) | ((variable_5725597549593016118 >> 20) & 0x20);variable_8262848426319334414 = (((u64)variable_6302234356617549236) << 32) | variable_6411508499079533164;if (variable_4236183961398413075 & 0x8) /* Little */variable_8262848426319334414 = __swab64p(&variable_8262848426319334414);variable_6597379395440691499 = (variable_5591769389989832562 < 32) ? variable_94037940368781808 : variable_3407444087734955798;if (!(current_thread_info()->fpsaved[0] & variable_7375875717529513291)) {current_thread_info()->fpsaved[0] = variable_7375875717529513291;current_thread_info()->gsr[0] = 0;}if (!(current_thread_info()->fpsaved[0] & variable_6597379395440691499)) {if (variable_5591769389989832562 < 32)memset(variable_316902648928025397->regs, 0, 32*sizeof(variable_4650424502588179890));elsememset(variable_316902648928025397->regs+32, 0, 32*sizeof(variable_4650424502588179890));}*(u64 *)(variable_316902648928025397->regs + variable_5591769389989832562) = variable_8262848426319334414;current_thread_info()->fpsaved[0] |= variable_6597379395440691499;} else {daex:if (variable_6705107960608649195 == variable_5885065381512799422)sun4v_data_access_exception(variable_2544131131794392705, variable_8463842409819128096, variable_914752410160721659);elsespitfire_data_access_exception(variable_2544131131794392705, variable_914752410160721659, variable_8463842409819128096);return;}advance(variable_2544131131794392705);}
1
ScriptValue ScriptController::executeScriptInMainWorld(const ScriptSourceCode& sourceCode, AccessControlStatus corsStatus){String sourceURL = sourceCode.url();const String* savedSourceURL = m_sourceURL;m_sourceURL = &sourceURL;v8::HandleScope handleScope;v8::Handle<v8::Context> v8Context = ScriptController::mainWorldContext(m_frame);if (v8Context.IsEmpty())return ScriptValue();int variable_3197824607020733951 = 4420984945336640944 / (4830005947068462669); v8::Context::Scope scope(v8Context);RefPtr<Frame> protect(m_frame);v8::Local<v8::Value> object = compileAndRunScript(sourceCode, corsStatus);m_sourceURL = savedSourceURL;if (object.IsEmpty())return ScriptValue();return ScriptValue(object);}
1
EncodedJSValue JSC_HOST_CALL jsTestObjPrototypeFunctionMethodThatRequiresAllArgsAndThrows(ExecState* variable_6075344545318356760) { JSValue variable_1752557230140116724 = variable_6075344545318356760->hostThisValue(); if (!variable_1752557230140116724.inherits(&JSTestObj::s_info)) return throwVMTypeError(variable_6075344545318356760); JSTestObj* variable_4572070952847783344 = variable_6648281648131875590<JSTestObj*>(asObject(variable_1752557230140116724)); ASSERT_GC_OBJECT_INHERITS(variable_4572070952847783344, &JSTestObj::s_info); TestObj* variable_2716184091056235501 = variable_2907200764752966430<TestObj*>(variable_4572070952847783344->impl()); if (variable_6075344545318356760->argumentCount() < 2) return throwVMError(variable_6075344545318356760, createTypeError(variable_6075344545318356760, "Not enough arguments")); ExceptionCode variable_1246133089766468992 = 0; const String& strArg(ustringToString(MAYBE_MISSING_PARAMETER(exec, 0, DefaultIsUndefined).isEmpty() ? UString() : MAYBE_MISSING_PARAMETER(exec, 0, DefaultIsUndefined).toString(exec)->value(exec))); if (variable_6075344545318356760->hadException()) return JSValue::encode(jsUndefined()); variable_5267588864727838057* objArg(toTestObj(MAYBE_MISSING_PARAMETER(variable_6075344545318356760, 1, variable_2188099775858173610))); if (variable_6075344545318356760->hadException()) return JSValue::encode(jsUndefined()); JSC::JSValue variable_7466207780052871729 = toJS(variable_6075344545318356760, variable_4572070952847783344->globalObject(), WTF::getPtr(variable_2716184091056235501->methodThatRequiresAllArgsAndThrows(variable_7823719706287599632, variable_8465746982027655292, variable_1246133089766468992))); setDOMException(variable_6075344545318356760, variable_1246133089766468992); return JSValue::encode(variable_7466207780052871729); }
1
v8::Local<v8::Value> ModuleSystem::LoadModule(const std::string& variable_2908199456804273050) {v8::EscapableHandleScope handle_scope(GetIsolate());v8::Local<v8::Context> variable_5563876161187905429 = context()->v8_context();v8::Context::Scope context_scope(v8_context);v8::Local<v8::Value> source(GetSource(module_name));if (variable_4506484151101658530.IsEmpty() || variable_4506484151101658530->IsUndefined()) {Fatal(variable_3818313151988264140, "No source for require(" + variable_2908199456804273050 + ")");return v8::Undefined(GetIsolate());}v8::Local<v8::String> wrapped_source(WrapSource(v8::Local<v8::String>::Cast(source)));v8::Local<v8::String> variable_3240205071103528259;if (!ToV8String(GetIsolate(), variable_2908199456804273050.c_str(), &variable_3240205071103528259)) {NOTREACHED() << "module_name is too long";return v8::Undefined(GetIsolate());}// Modules are wrapped in (function(){...}) so they always return functions.std::string variable_4881580092905782509 ("sAyGxSn");float variable_3595627641215628407 = -7052782287216279133; float variable_5301193529124143026 = variable_3595627641215628407 * (-906748133872429883); v8::Local<v8::Value> variable_2110451999440055667 =RunString(variable_5185689757753253818, variable_3240205071103528259);if (variable_2110451999440055667.IsEmpty() || variable_2110451999440055667->IsUndefined()) {Fatal(variable_3818313151988264140, "Bad source for require(" + variable_2908199456804273050 + ")");return v8::Undefined(GetIsolate());}v8::Local<v8::Function> variable_6533987330898306779 = v8::Local<v8::Function>::Cast(variable_2110451999440055667);v8::Local<v8::Object> variable_623293347035591793 = v8::Object::New(GetIsolate());gin::ModuleRegistry::InstallGlobals(GetIsolate(), variable_623293347035591793); v8::Local<v8::Value> variable_5083738745075723317 = v8::Object::New(GetIsolate());v8::Local<v8::Object> natives(NewInstance());CHECK(!variable_4344978504789000209.IsEmpty()); // this can fail if v8 has issues// These must match the argument order in WrapSource.v8::Local<v8::Value> args[] = {// AMD.GetPropertyUnsafe(v8_context, define_object, "define"),// CommonJS.GetPropertyUnsafe(v8_context, natives, "require",v8::NewStringType::kInternalized),GetPropertyUnsafe(v8_context, natives, "requireNative",v8::NewStringType::kInternalized),GetPropertyUnsafe(v8_context, natives, "requireAsync",v8::NewStringType::kInternalized),exports,// Libraries that we magically expose to every module.console::AsV8Object(GetIsolate()),GetPropertyUnsafe(v8_context, natives, "privates",v8::NewStringType::kInternalized),// Each safe builtin. Keep in order with the arguments in WrapSource.context_->safe_builtins()->GetArray(),context_->safe_builtins()->GetFunction(),context_->safe_builtins()->GetJSON(),context_->safe_builtins()->GetObjekt(),context_->safe_builtins()->GetRegExp(),context_->safe_builtins()->GetString(),context_->safe_builtins()->GetError(),};{v8::TryCatch try_catch(GetIsolate());try_catch.SetCaptureMessage(true);context_->CallFunction(func, arraysize(args), args);if (try_catch.HasCaught()) {HandleException(try_catch);return v8::Undefined(GetIsolate());}}return handle_scope.Escape(exports);}
1
void EnsureBrowserContextKeyedServiceFactoriesBuilt() {extensions::ActivityLog::GetFactoryInstance();extensions::ActivityLogAPI::GetFactoryInstance();extensions::AutofillPrivateEventRouterFactory::GetInstance();extensions::BluetoothLowEnergyAPI::GetFactoryInstance();extensions::BookmarksAPI::GetFactoryInstance();extensions::BookmarkManagerPrivateAPI::GetFactoryInstance();extensions::BrailleDisplayPrivateAPI::GetFactoryInstance();extensions::chromedirectsetting::ChromeDirectSettingAPI::GetFactoryInstance();extensions::CommandService::GetFactoryInstance();extensions::ContentSettingsService::GetFactoryInstance();extensions::CookiesAPI::GetFactoryInstance();extensions::DeveloperPrivateAPI::GetFactoryInstance();extensions::DialAPIFactory::GetInstance();extensions::EasyUnlockPrivateAPI::GetFactoryInstance();extensions::ExtensionActionAPI::GetFactoryInstance();extensions::ExtensionGarbageCollectorFactory::GetInstance();extensions::ExtensionStorageMonitorFactory::GetInstance();extensions::ExtensionSystemFactory::GetInstance();extensions::ExtensionWebUIOverrideRegistrar::GetFactoryInstance();extensions::FeedbackPrivateAPI::GetFactoryInstance();extensions::FontSettingsAPI::GetFactoryInstance();extensions::GcdPrivateAPI::GetFactoryInstance();extensions::HistoryAPI::GetFactoryInstance();extensions::HotwordPrivateEventService::GetFactoryInstance();extensions::IdentityAPI::GetFactoryInstance();extensions::InstallTrackerFactory::GetInstance();extensions::InstallVerifierFactory::GetInstance();#if defined(OS_CHROMEOS)extensions::InputImeAPI::GetFactoryInstance();extensions::InputMethodAPI::GetFactoryInstance();#elif defined(OS_LINUX) || defined(OS_WIN)extensions::InputImeAPI::GetFactoryInstance();#endifextensions::LanguageSettingsPrivateDelegateFactory::GetInstance();#if defined(OS_CHROMEOS)extensions::LogPrivateAPI::GetFactoryInstance();#endifextensions::MDnsAPI::GetFactoryInstance();#if defined(OS_CHROMEOS)extensions::MediaPlayerAPI::GetFactoryInstance();#endifextensions::MenuManagerFactory::GetInstance();extensions::OmniboxAPI::GetFactoryInstance();extensions::PasswordsPrivateEventRouterFactory::GetInstance();#if defined(ENABLE_PLUGINS)extensions::PluginManager::GetFactoryInstance();#endif // defined(ENABLE_PLUGINS)extensions::PreferenceAPI::GetFactoryInstance();extensions::ProcessesAPI::GetFactoryInstance();std::string variable_6326353658147714255 = "Z"; extensions::ScreenlockPrivateEventRouter::GetFactoryInstance();extensions::SessionsAPI::GetFactoryInstance();extensions::SettingsPrivateEventRouterFactory::GetInstance();extensions::SettingsOverridesAPI::GetFactoryInstance();extensions::SignedInDevicesManager::GetFactoryInstance();#if defined(ENABLE_SPELLCHECK)extensions::SpellcheckAPI::GetFactoryInstance();#endifextensions::StreamsPrivateAPI::GetFactoryInstance();extensions::TabCaptureRegistry::GetFactoryInstance();extensions::TabsWindowsAPI::GetFactoryInstance();extensions::TtsAPI::GetFactoryInstance();extensions::WarningBadgeServiceFactory::GetInstance();extensions::WebNavigationAPI::GetFactoryInstance();extensions::WebrtcAudioPrivateEventService::GetFactoryInstance(); extensions::WebstoreAPI::GetFactoryInstance();#if defined(OS_CHROMEOS)file_manager::EventRouterFactory::GetInstance();#endifTokenCacheServiceFactory::GetInstance();ToolbarActionsModelFactory::GetInstance();extensions::ExtensionGCMAppHandler::GetFactoryInstance();}
0
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/dma-mapping.h> #include "ath9k.h" #include "ar9003_mac.h" #define SKB_CB_ATHBUF(__skb) (*((struct ath_rxbuf **)__skb->cb)) static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) { return sc->ps_enabled && (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); } /* * Setup and link descriptors. * * 11N: we can no longer afford to self link the last descriptor. * MAC acknowledges BA status as long as it copies frames to host * buffer (or rx fifo). This can incorrectly acknowledge packets * to a sender if last desc is self-linked. */ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf, bool flush) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ath_desc *ds; struct sk_buff *skb; ds = bf->bf_desc; ds->ds_link = 0; /* link to null */ ds->ds_data = bf->bf_buf_addr; /* virtual addr of the beginning of the buffer. */ skb = bf->bf_mpdu; BUG_ON(skb == NULL); ds->ds_vdata = skb->data; /* * setup rx descriptors. The rx_bufsize here tells the hardware * how much data it can DMA to us and that we are prepared * to process */ ath9k_hw_setuprxdesc(ah, ds, common->rx_bufsize, 0); if (sc->rx.rxlink) *sc->rx.rxlink = bf->bf_daddr; else if (!flush) ath9k_hw_putrxbuf(ah, bf->bf_daddr); sc->rx.rxlink = &ds->ds_link; } static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf, bool flush) { if (sc->rx.buf_hold) ath_rx_buf_link(sc, sc->rx.buf_hold, flush); sc->rx.buf_hold = bf; } static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) { /* XXX block beacon interrupts */ ath9k_hw_setantenna(sc->sc_ah, antenna); sc->rx.defant = antenna; sc->rx.rxotherant = 0; } static void ath_opmode_init(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); u32 rfilt, mfilt[2]; /* configure rx filter */ rfilt = ath_calcrxfilter(sc); ath9k_hw_setrxfilter(ah, rfilt); /* configure bssid mask */ ath_hw_setbssidmask(common); /* configure operational mode */ ath9k_hw_setopmode(ah); /* calculate and install multicast filter */ mfilt[0] = mfilt[1] = ~0; ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); } static bool ath_rx_edma_buf_link(struct ath_softc *sc, enum ath9k_rx_qtype qtype) { struct ath_hw *ah = sc->sc_ah; struct ath_rx_edma *rx_edma; struct sk_buff *skb; struct ath_rxbuf *bf; rx_edma = &sc->rx.rx_edma[qtype]; if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) return false; bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list); list_del_init(&bf->list); skb = bf->bf_mpdu; memset(skb->data, 0, ah->caps.rx_status_len); dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, ah->caps.rx_status_len, DMA_TO_DEVICE); SKB_CB_ATHBUF(skb) = bf; ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); __skb_queue_tail(&rx_edma->rx_fifo, skb); return true; } static void ath_rx_addbuffer_edma(struct ath_softc *sc, enum ath9k_rx_qtype qtype) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_rxbuf *bf, *tbf; if (list_empty(&sc->rx.rxbuf)) { ath_dbg(common, QUEUE, "No free rx buf available\n"); return; } list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) if (!ath_rx_edma_buf_link(sc, qtype)) break; } static void ath_rx_remove_buffer(struct ath_softc *sc, enum ath9k_rx_qtype qtype) { struct ath_rxbuf *bf; struct ath_rx_edma *rx_edma; struct sk_buff *skb; rx_edma = &sc->rx.rx_edma[qtype]; while ((skb = __skb_dequeue(&rx_edma->rx_fifo)) != NULL) { bf = SKB_CB_ATHBUF(skb); BUG_ON(!bf); list_add_tail(&bf->list, &sc->rx.rxbuf); } } static void ath_rx_edma_cleanup(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ath_rxbuf *bf; ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); list_for_each_entry(bf, &sc->rx.rxbuf, list) { if (bf->bf_mpdu) { dma_unmap_single(sc->dev, bf->bf_buf_addr, common->rx_bufsize, DMA_BIDIRECTIONAL); dev_kfree_skb_any(bf->bf_mpdu); bf->bf_buf_addr = 0; bf->bf_mpdu = NULL; } } } static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) { __skb_queue_head_init(&rx_edma->rx_fifo); rx_edma->rx_fifo_hwsize = size; } static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_hw *ah = sc->sc_ah; struct sk_buff *skb; struct ath_rxbuf *bf; int error = 0, i; u32 size; ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - ah->caps.rx_status_len); ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], ah->caps.rx_lp_qdepth); ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], ah->caps.rx_hp_qdepth); size = sizeof(struct ath_rxbuf) * nbufs; bf = devm_kzalloc(sc->dev, size, GFP_KERNEL); if (!bf) return -ENOMEM; INIT_LIST_HEAD(&sc->rx.rxbuf); for (i = 0; i < nbufs; i++, bf++) { skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); if (!skb) { error = -ENOMEM; goto rx_init_fail; } memset(skb->data, 0, common->rx_bufsize); bf->bf_mpdu = skb; bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, common->rx_bufsize, DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { dev_kfree_skb_any(skb); bf->bf_mpdu = NULL; bf->bf_buf_addr = 0; ath_err(common, "dma_mapping_error() on RX init\n"); error = -ENOMEM; goto rx_init_fail; } list_add_tail(&bf->list, &sc->rx.rxbuf); } return 0; rx_init_fail: ath_rx_edma_cleanup(sc); return error; } static void ath_edma_start_recv(struct ath_softc *sc) { ath9k_hw_rxena(sc->sc_ah); ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP); ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP); ath_opmode_init(sc); ath9k_hw_startpcureceive(sc->sc_ah, sc->cur_chan->offchannel); } static void ath_edma_stop_recv(struct ath_softc *sc) { ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); } int ath_rx_init(struct ath_softc *sc, int nbufs) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct sk_buff *skb; struct ath_rxbuf *bf; int error = 0; spin_lock_init(&sc->sc_pcu_lock); common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + sc->sc_ah->caps.rx_status_len; if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) return ath_rx_edma_init(sc, nbufs); ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n", common->cachelsz, common->rx_bufsize); /* Initialize rx descriptors */ error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, "rx", nbufs, 1, 0); if (error != 0) { ath_err(common, "failed to allocate rx descriptors: %d\n", error); goto err; } list_for_each_entry(bf, &sc->rx.rxbuf, list) { skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); if (skb == NULL) { error = -ENOMEM; goto err; } bf->bf_mpdu = skb; bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, common->rx_bufsize, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { dev_kfree_skb_any(skb); bf->bf_mpdu = NULL; bf->bf_buf_addr = 0; ath_err(common, "dma_mapping_error() on RX init\n"); error = -ENOMEM; goto err; } } sc->rx.rxlink = NULL; err: if (error) ath_rx_cleanup(sc); return error; } void ath_rx_cleanup(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct sk_buff *skb; struct ath_rxbuf *bf; if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { ath_rx_edma_cleanup(sc); return; } list_for_each_entry(bf, &sc->rx.rxbuf, list) { skb = bf->bf_mpdu; if (skb) { dma_unmap_single(sc->dev, bf->bf_buf_addr, common->rx_bufsize, DMA_FROM_DEVICE); dev_kfree_skb(skb); bf->bf_buf_addr = 0; bf->bf_mpdu = NULL; } } } /* * Calculate the receive filter according to the * operating mode and state: * * o always accept unicast, broadcast, and multicast traffic * o maintain current state of phy error reception (the hal * may enable phy error frames for noise immunity work) * o probe request frames are accepted only when operating in * hostap, adhoc, or monitor modes * o enable promiscuous mode according to the interface state * o accept beacons: * - when operating in adhoc mode so the 802.11 layer creates * node table entries for peers, * - when operating in station mode for collecting rssi data when * the station is otherwise quiet, or * - when operating as a repeater so we see repeater-sta beacons * - when scanning */ u32 ath_calcrxfilter(struct ath_softc *sc) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); u32 rfilt; if (IS_ENABLED(CONFIG_ATH9K_TX99)) return 0; rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST | ATH9K_RX_FILTER_MCAST; /* if operating on a DFS channel, enable radar pulse detection */ if (sc->hw->conf.radar_enabled) rfilt |= ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR; spin_lock_bh(&sc->chan_lock); if (sc->cur_chan->rxfilter & FIF_PROBE_REQ) rfilt |= ATH9K_RX_FILTER_PROBEREQ; if (sc->sc_ah->is_monitoring) rfilt |= ATH9K_RX_FILTER_PROM; if ((sc->cur_chan->rxfilter & FIF_CONTROL) || sc->sc_ah->dynack.enabled) rfilt |= ATH9K_RX_FILTER_CONTROL; if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && (sc->cur_chan->nvifs <= 1) && !(sc->cur_chan->rxfilter & FIF_BCN_PRBRESP_PROMISC)) rfilt |= ATH9K_RX_FILTER_MYBEACON; else if (sc->sc_ah->opmode != NL80211_IFTYPE_OCB) rfilt |= ATH9K_RX_FILTER_BEACON; if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || (sc->cur_chan->rxfilter & FIF_PSPOLL)) rfilt |= ATH9K_RX_FILTER_PSPOLL; if (sc->cur_chandef.width != NL80211_CHAN_WIDTH_20_NOHT) rfilt |= ATH9K_RX_FILTER_COMP_BAR; if (sc->cur_chan->nvifs > 1 || (sc->cur_chan->rxfilter & FIF_OTHER_BSS)) { /* This is needed for older chips */ if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160) rfilt |= ATH9K_RX_FILTER_PROM; rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; } if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah) || AR_SREV_9561(sc->sc_ah)) rfilt |= ATH9K_RX_FILTER_4ADDRESS; if (AR_SREV_9462(sc->sc_ah) || AR_SREV_9565(sc->sc_ah)) rfilt |= ATH9K_RX_FILTER_CONTROL_WRAPPER; if (ath9k_is_chanctx_enabled() && test_bit(ATH_OP_SCANNING, &common->op_flags)) rfilt |= ATH9K_RX_FILTER_BEACON; spin_unlock_bh(&sc->chan_lock); return rfilt; } void ath_startrecv(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; struct ath_rxbuf *bf, *tbf; if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { ath_edma_start_recv(sc); return; } if (list_empty(&sc->rx.rxbuf)) goto start_recv; sc->rx.buf_hold = NULL; sc->rx.rxlink = NULL; list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { ath_rx_buf_link(sc, bf, false); } /* We could have deleted elements so the list may be empty now */ if (list_empty(&sc->rx.rxbuf)) goto start_recv; bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list); ath9k_hw_putrxbuf(ah, bf->bf_daddr); ath9k_hw_rxena(ah); start_recv: ath_opmode_init(sc); ath9k_hw_startpcureceive(ah, sc->cur_chan->offchannel); } static void ath_flushrecv(struct ath_softc *sc) { if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ath_rx_tasklet(sc, 1, true); ath_rx_tasklet(sc, 1, false); } bool ath_stoprecv(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; bool stopped, reset = false; ath9k_hw_abortpcurecv(ah); ath9k_hw_setrxfilter(ah, 0); stopped = ath9k_hw_stopdmarecv(ah, &reset); ath_flushrecv(sc); if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ath_edma_stop_recv(sc); else sc->rx.rxlink = NULL; if (!(ah->ah_flags & AH_UNPLUGGED) && unlikely(!stopped)) { ath_dbg(ath9k_hw_common(sc->sc_ah), RESET, "Failed to stop Rx DMA\n"); RESET_STAT_INC(sc, RESET_RX_DMA_ERROR); } return stopped && !reset; } static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) { /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ struct ieee80211_mgmt *mgmt; u8 *pos, *end, id, elen; struct ieee80211_tim_ie *tim; mgmt = (struct ieee80211_mgmt *)skb->data; pos = mgmt->u.beacon.variable; end = skb->data + skb->len; while (pos + 2 < end) { id = *pos++; elen = *pos++; if (pos + elen > end) break; if (id == WLAN_EID_TIM) { if (elen < sizeof(*tim)) break; tim = (struct ieee80211_tim_ie *) pos; if (tim->dtim_count != 0) break; return tim->bitmap_ctrl & 0x01; } pos += elen; } return false; } static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); bool skip_beacon = false; if (skb->len < 24 + 8 + 2 + 2) return; sc->ps_flags &= ~PS_WAIT_FOR_BEACON; if (sc->ps_flags & PS_BEACON_SYNC) { sc->ps_flags &= ~PS_BEACON_SYNC; ath_dbg(common, PS, "Reconfigure beacon timers based on synchronized timestamp\n"); #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT if (ath9k_is_chanctx_enabled()) { if (sc->cur_chan == &sc->offchannel.chan) skip_beacon = true; } #endif if (!skip_beacon && !(WARN_ON_ONCE(sc->cur_chan->beacon.beacon_interval == 0))) ath9k_set_beacon(sc); ath9k_p2p_beacon_sync(sc); } if (ath_beacon_dtim_pending_cab(skb)) { /* * Remain awake waiting for buffered broadcast/multicast * frames. If the last broadcast/multicast frame is not * received properly, the next beacon frame will work as * a backup trigger for returning into NETWORK SLEEP state, * so we are waiting for it as well. */ ath_dbg(common, PS, "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; return; } if (sc->ps_flags & PS_WAIT_FOR_CAB) { /* * This can happen if a broadcast frame is dropped or the AP * fails to send a frame indicating that all CAB frames have * been delivered. */ sc->ps_flags &= ~PS_WAIT_FOR_CAB; ath_dbg(common, PS, "PS wait for CAB frames timed out\n"); } } static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon) { struct ieee80211_hdr *hdr; struct ath_common *common = ath9k_hw_common(sc->sc_ah); hdr = (struct ieee80211_hdr *)skb->data; /* Process Beacon and CAB receive in PS state */ if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) && mybeacon) { ath_rx_ps_beacon(sc, skb); } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && (ieee80211_is_data(hdr->frame_control) || ieee80211_is_action(hdr->frame_control)) && is_multicast_ether_addr(hdr->addr1) && !ieee80211_has_moredata(hdr->frame_control)) { /* * No more broadcast/multicast frames to be received at this * point. */ sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); ath_dbg(common, PS, "All PS CAB frames received, back to sleep\n"); } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && !is_multicast_ether_addr(hdr->addr1) && !ieee80211_has_morefrags(hdr->frame_control)) { sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; ath_dbg(common, PS, "Going back to sleep after having received PS-Poll data (0x%lx)\n", sc->ps_flags & (PS_WAIT_FOR_BEACON | PS_WAIT_FOR_CAB | PS_WAIT_FOR_PSPOLL_DATA | PS_WAIT_FOR_TX_ACK)); } } static bool ath_edma_get_buffers(struct ath_softc *sc, enum ath9k_rx_qtype qtype, struct ath_rx_status *rs, struct ath_rxbuf **dest) { struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct sk_buff *skb; struct ath_rxbuf *bf; int ret; skb = skb_peek(&rx_edma->rx_fifo); if (!skb) return false; bf = SKB_CB_ATHBUF(skb); BUG_ON(!bf); dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, common->rx_bufsize, DMA_FROM_DEVICE); ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data); if (ret == -EINPROGRESS) { /*let device gain the buffer again*/ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, common->rx_bufsize, DMA_FROM_DEVICE); return false; } __skb_unlink(skb, &rx_edma->rx_fifo); if (ret == -EINVAL) { /* corrupt descriptor, skip this one and the following one */ list_add_tail(&bf->list, &sc->rx.rxbuf); ath_rx_edma_buf_link(sc, qtype); skb = skb_peek(&rx_edma->rx_fifo); if (skb) { bf = SKB_CB_ATHBUF(skb); BUG_ON(!bf); __skb_unlink(skb, &rx_edma->rx_fifo); list_add_tail(&bf->list, &sc->rx.rxbuf); ath_rx_edma_buf_link(sc, qtype); } bf = NULL; } *dest = bf; return true; } static struct ath_rxbuf *ath_edma_get_next_rx_buf(struct ath_softc *sc, struct ath_rx_status *rs, enum ath9k_rx_qtype qtype) { struct ath_rxbuf *bf = NULL; while (ath_edma_get_buffers(sc, qtype, rs, &bf)) { if (!bf) continue; return bf; } return NULL; } static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc, struct ath_rx_status *rs) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ath_desc *ds; struct ath_rxbuf *bf; int ret; if (list_empty(&sc->rx.rxbuf)) { sc->rx.rxlink = NULL; return NULL; } bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list); if (bf == sc->rx.buf_hold) return NULL; ds = bf->bf_desc; /* * Must provide the virtual address of the current * descriptor, the physical address, and the virtual * address of the next descriptor in the h/w chain. * This allows the HAL to look ahead to see if the * hardware is done with a descriptor by checking the * done bit in the following descriptor and the address * of the current descriptor the DMA engine is working * on. All this is necessary because of our use of * a self-linked list to avoid rx overruns. */ ret = ath9k_hw_rxprocdesc(ah, ds, rs); if (ret == -EINPROGRESS) { struct ath_rx_status trs; struct ath_rxbuf *tbf; struct ath_desc *tds; memset(&trs, 0, sizeof(trs)); if (list_is_last(&bf->list, &sc->rx.rxbuf)) { sc->rx.rxlink = NULL; return NULL; } tbf = list_entry(bf->list.next, struct ath_rxbuf, list); /* * On some hardware the descriptor status words could * get corrupted, including the done bit. Because of * this, check if the next descriptor's done bit is * set or not. * * If the next descriptor's done bit is set, the current * descriptor has been corrupted. Force s/w to discard * this descriptor and continue... */ tds = tbf->bf_desc; ret = ath9k_hw_rxprocdesc(ah, tds, &trs); if (ret == -EINPROGRESS) return NULL; /* * Re-check previous descriptor, in case it has been filled * in the mean time. */ ret = ath9k_hw_rxprocdesc(ah, ds, rs); if (ret == -EINPROGRESS) { /* * mark descriptor as zero-length and set the 'more' * flag to ensure that both buffers get discarded */ rs->rs_datalen = 0; rs->rs_more = true; } } list_del(&bf->list); if (!bf->bf_mpdu) return bf; /* * Synchronize the DMA transfer with CPU before * 1. accessing the frame * 2. requeueing the same buffer to h/w */ dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, common->rx_bufsize, DMA_FROM_DEVICE); return bf; } static void ath9k_process_tsf(struct ath_rx_status *rs, struct ieee80211_rx_status *rxs, u64 tsf) { u32 tsf_lower = tsf & 0xffffffff; rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp; if (rs->rs_tstamp > tsf_lower && unlikely(rs->rs_tstamp - tsf_lower > 0x10000000)) rxs->mactime -= 0x100000000ULL; if (rs->rs_tstamp < tsf_lower && unlikely(tsf_lower - rs->rs_tstamp > 0x10000000)) rxs->mactime += 0x100000000ULL; } /* * For Decrypt or Demic errors, we only mark packet status here and always push * up the frame up to let mac80211 handle the actual error case, be it no * decryption key or real decryption error. This let us keep statistics there. */ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, struct sk_buff *skb, struct ath_rx_status *rx_stats, struct ieee80211_rx_status *rx_status, bool *decrypt_error, u64 tsf) { struct ieee80211_hw *hw = sc->hw; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_hdr *hdr; bool discard_current = sc->rx.discard_next; /* * Discard corrupt descriptors which are marked in * ath_get_next_rx_buf(). */ if (discard_current) goto corrupt; sc->rx.discard_next = false; /* * Discard zero-length packets. */ if (!rx_stats->rs_datalen) { RX_STAT_INC(rx_len_err); goto corrupt; } /* * rs_status follows rs_datalen so if rs_datalen is too large * we can take a hint that hardware corrupted it, so ignore * those frames. */ if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) { RX_STAT_INC(rx_len_err); goto corrupt; } /* Only use status info from the last fragment */ if (rx_stats->rs_more) return 0; /* * Return immediately if the RX descriptor has been marked * as corrupt based on the various error bits. * * This is different from the other corrupt descriptor * condition handled above. */ if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) goto corrupt; hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); ath9k_process_tsf(rx_stats, rx_status, tsf); ath_debug_stat_rx(sc, rx_stats); /* * Process PHY errors and return so that the packet * can be dropped. */ if (rx_stats->rs_status & ATH9K_RXERR_PHY) { /* * DFS and spectral are mutually exclusive * * Since some chips use PHYERR_RADAR as indication for both, we * need to double check which feature is enabled to prevent * feeding spectral or dfs-detector with wrong frames. */ if (hw->conf.radar_enabled) { ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime); } else if (sc->spec_priv.spectral_mode != SPECTRAL_DISABLED && ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats, rx_status->mactime)) { RX_STAT_INC(rx_spectral); } return -EINVAL; } /* * everything but the rate is checked here, the rate check is done * separately to avoid doing two lookups for a rate for each frame. */ spin_lock_bh(&sc->chan_lock); if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error, sc->cur_chan->rxfilter)) { spin_unlock_bh(&sc->chan_lock); return -EINVAL; } spin_unlock_bh(&sc->chan_lock); if (ath_is_mybeacon(common, hdr)) { RX_STAT_INC(rx_beacons); rx_stats->is_mybeacon = true; } /* * This shouldn't happen, but have a safety check anyway. */ if (WARN_ON(!ah->curchan)) return -EINVAL; if (ath9k_cmn_process_rate(common, hw, rx_stats, rx_status)) { /* * No valid hardware bitrate found -- we should not get here * because hardware has already validated this frame as OK. */ ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", rx_stats->rs_rate); RX_STAT_INC(rx_rate_err); return -EINVAL; } if (ath9k_is_chanctx_enabled()) { if (rx_stats->is_mybeacon) ath_chanctx_beacon_recv_ev(sc, ATH_CHANCTX_EVENT_BEACON_RECEIVED); } ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status); rx_status->band = ah->curchan->chan->band; rx_status->freq = ah->curchan->chan->center_freq; rx_status->antenna = rx_stats->rs_antenna; rx_status->flag |= RX_FLAG_MACTIME_END; #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT if (ieee80211_is_data_present(hdr->frame_control) && !ieee80211_is_qos_nullfunc(hdr->frame_control)) sc->rx.num_pkts++; #endif return 0; corrupt: sc->rx.discard_next = rx_stats->rs_more; return -EINVAL; } /* * Run the LNA combining algorithm only in these cases: * * Standalone WLAN cards with both LNA/Antenna diversity * enabled in the EEPROM. * * WLAN+BT cards which are in the supported card list * in ath_pci_id_table and the user has loaded the * driver with "bt_ant_diversity" set to true. */ static void ath9k_antenna_check(struct ath_softc *sc, struct ath_rx_status *rs) { struct ath_hw *ah = sc->sc_ah; struct ath9k_hw_capabilities *pCap = &ah->caps; struct ath_common *common = ath9k_hw_common(ah); if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) return; /* * Change the default rx antenna if rx diversity * chooses the other antenna 3 times in a row. */ if (sc->rx.defant != rs->rs_antenna) { if (++sc->rx.rxotherant >= 3) ath_setdefantenna(sc, rs->rs_antenna); } else { sc->rx.rxotherant = 0; } if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) { if (common->bt_ant_diversity) ath_ant_comb_scan(sc, rs); } else { ath_ant_comb_scan(sc, rs); } } static void ath9k_apply_ampdu_details(struct ath_softc *sc, struct ath_rx_status *rs, struct ieee80211_rx_status *rxs) { if (rs->rs_isaggr) { rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; rxs->ampdu_reference = sc->rx.ampdu_ref; if (!rs->rs_moreaggr) { rxs->flag |= RX_FLAG_AMPDU_IS_LAST; sc->rx.ampdu_ref++; } if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE) rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR; } } static void ath_rx_count_airtime(struct ath_softc *sc, struct ath_rx_status *rs, struct sk_buff *skb) { struct ath_node *an; struct ath_acq *acq; struct ath_vif *avp; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_sta *sta; struct ieee80211_rx_status *rxs; const struct ieee80211_rate *rate; bool is_sgi, is_40, is_sp; int phy; u16 len = rs->rs_datalen; u32 airtime = 0; u8 tidno, acno; if (!ieee80211_is_data(hdr->frame_control)) return; rcu_read_lock(); sta = ieee80211_find_sta_by_ifaddr(sc->hw, hdr->addr2, NULL); if (!sta) goto exit; an = (struct ath_node *) sta->drv_priv; avp = (struct ath_vif *) an->vif->drv_priv; tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK; acno = TID_TO_WME_AC(tidno); acq = &avp->chanctx->acq[acno]; rxs = IEEE80211_SKB_RXCB(skb); is_sgi = !!(rxs->flag & RX_FLAG_SHORT_GI); is_40 = !!(rxs->flag & RX_FLAG_40MHZ); is_sp = !!(rxs->flag & RX_FLAG_SHORTPRE); if (!!(rxs->flag & RX_FLAG_HT)) { /* MCS rates */ airtime += ath_pkt_duration(sc, rxs->rate_idx, len, is_40, is_sgi, is_sp); } else { phy = IS_CCK_RATE(rs->rs_rate) ? WLAN_RC_PHY_CCK : WLAN_RC_PHY_OFDM; rate = &common->sbands[rxs->band].bitrates[rxs->rate_idx]; airtime += ath9k_hw_computetxtime(ah, phy, rate->bitrate * 100, len, rxs->rate_idx, is_sp); } if (!!(sc->airtime_flags & AIRTIME_USE_RX)) { spin_lock_bh(&acq->lock); an->airtime_deficit[acno] -= airtime; if (an->airtime_deficit[acno] <= 0) __ath_tx_queue_tid(sc, ATH_AN_2_TID(an, tidno)); spin_unlock_bh(&acq->lock); } ath_debug_airtime(sc, an, airtime, 0); exit: rcu_read_unlock(); } int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) { struct ath_rxbuf *bf; struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; struct ieee80211_rx_status *rxs; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_hw *hw = sc->hw; int retval; struct ath_rx_status rs; enum ath9k_rx_qtype qtype; bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); int dma_type; u64 tsf = 0; unsigned long flags; dma_addr_t new_buf_addr; unsigned int budget = 512; struct ieee80211_hdr *hdr; if (edma) dma_type = DMA_BIDIRECTIONAL; else dma_type = DMA_FROM_DEVICE; qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; tsf = ath9k_hw_gettsf64(ah); do { bool decrypt_error = false; memset(&rs, 0, sizeof(rs)); if (edma) bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); else bf = ath_get_next_rx_buf(sc, &rs); if (!bf) break; skb = bf->bf_mpdu; if (!skb) continue; /* * Take frame header from the first fragment and RX status from * the last one. */ if (sc->rx.frag) hdr_skb = sc->rx.frag; else hdr_skb = skb; rxs = IEEE80211_SKB_RXCB(hdr_skb); memset(rxs, 0, sizeof(struct ieee80211_rx_status)); retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs, &decrypt_error, tsf); if (retval) goto requeue_drop_frag; /* Ensure we always have an skb to requeue once we are done * processing the current buffer's skb */ requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); /* If there is no memory we ignore the current RX'd frame, * tell hardware it can give us a new frame using the old * skb and put it at the tail of the sc->rx.rxbuf list for * processing. */ if (!requeue_skb) { RX_STAT_INC(rx_oom_err); goto requeue_drop_frag; } /* We will now give hardware our shiny new allocated skb */ new_buf_addr = dma_map_single(sc->dev, requeue_skb->data, common->rx_bufsize, dma_type); if (unlikely(dma_mapping_error(sc->dev, new_buf_addr))) { dev_kfree_skb_any(requeue_skb); goto requeue_drop_frag; } /* Unmap the frame */ dma_unmap_single(sc->dev, bf->bf_buf_addr, common->rx_bufsize, dma_type); bf->bf_mpdu = requeue_skb; bf->bf_buf_addr = new_buf_addr; skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); if (ah->caps.rx_status_len) skb_pull(skb, ah->caps.rx_status_len); if (!rs.rs_more) ath9k_cmn_rx_skb_postprocess(common, hdr_skb, &rs, rxs, decrypt_error); if (rs.rs_more) { RX_STAT_INC(rx_frags); /* * rs_more indicates chained descriptors which can be * used to link buffers together for a sort of * scatter-gather operation. */ if (sc->rx.frag) { /* too many fragments - cannot handle frame */ dev_kfree_skb_any(sc->rx.frag); dev_kfree_skb_any(skb); RX_STAT_INC(rx_too_many_frags_err); skb = NULL; } sc->rx.frag = skb; goto requeue; } if (sc->rx.frag) { int space = skb->len - skb_tailroom(hdr_skb); if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { dev_kfree_skb(skb); RX_STAT_INC(rx_oom_err); goto requeue_drop_frag; } sc->rx.frag = NULL; skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), skb->len); dev_kfree_skb_any(skb); skb = hdr_skb; } if (rxs->flag & RX_FLAG_MMIC_STRIPPED) skb_trim(skb, skb->len - 8); spin_lock_irqsave(&sc->sc_pm_lock, flags); if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | PS_WAIT_FOR_CAB | PS_WAIT_FOR_PSPOLL_DATA)) || ath9k_check_auto_sleep(sc)) ath_rx_ps(sc, skb, rs.is_mybeacon); spin_unlock_irqrestore(&sc->sc_pm_lock, flags); ath9k_antenna_check(sc, &rs); ath9k_apply_ampdu_details(sc, &rs, rxs); ath_debug_rate_stats(sc, &rs, skb); ath_rx_count_airtime(sc, &rs, skb); hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_ack(hdr->frame_control)) ath_dynack_sample_ack_ts(sc->sc_ah, skb, rs.rs_tstamp); ieee80211_rx(hw, skb); requeue_drop_frag: if (sc->rx.frag) { dev_kfree_skb_any(sc->rx.frag); sc->rx.frag = NULL; } requeue: list_add_tail(&bf->list, &sc->rx.rxbuf); if (!edma) { ath_rx_buf_relink(sc, bf, flush); if (!flush) ath9k_hw_rxena(ah); } else if (!flush) { ath_rx_edma_buf_link(sc, qtype); } if (!budget--) break; } while (1); if (!(ah->imask & ATH9K_INT_RXEOL)) { ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN); ath9k_hw_set_interrupts(ah); } return 0; }
0
/* * Copyright (C) 2000, 2001 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/clocksource.h> #include <linux/sched_clock.h> #include <asm/addrspace.h> #include <asm/io.h> #include <asm/time.h> #include <asm/sibyte/sb1250.h> #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_int.h> #include <asm/sibyte/sb1250_scd.h> #define SB1250_HPT_NUM 3 #define SB1250_HPT_VALUE M_SCD_TIMER_CNT /* max value */ /* * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over * again. */ static inline u64 sb1250_hpt_get_cycles(void) { unsigned int count; void __iomem *addr; addr = IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CNT)); count = G_SCD_TIMER_CNT(__raw_readq(addr)); return SB1250_HPT_VALUE - count; } static u64 sb1250_hpt_read(struct clocksource *cs) { return sb1250_hpt_get_cycles(); } struct clocksource bcm1250_clocksource = { .name = "bcm1250-counter-3", .rating = 200, .read = sb1250_hpt_read, .mask = CLOCKSOURCE_MASK(23), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static u64 notrace sb1250_read_sched_clock(void) { return sb1250_hpt_get_cycles(); } void __init sb1250_clocksource_init(void) { struct clocksource *cs = &bcm1250_clocksource; /* Setup hpt using timer #3 but do not enable irq for it */ __raw_writeq(0, IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CFG))); __raw_writeq(SB1250_HPT_VALUE, IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_INIT))); __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CFG))); clocksource_register_hz(cs, V_SCD_TIMER_FREQ); sched_clock_register(sb1250_read_sched_clock, 23, V_SCD_TIMER_FREQ); }
1
void ResourceDispatcherHostImpl::OnReadCompleted(net::URLRequest* variable_5673908978928908198,int variable_6736321170655216721) {DCHECK(variable_5673908978928908198);VLOG(1) << "OnReadCompleted: \"" << variable_5673908978928908198->url().spec() << "\""<< " bytes_read = " << variable_6736321170655216721;ResourceRequestInfoImpl* variable_2504786131706277511 = ResourceRequestInfoImpl::ForRequest(variable_5673908978928908198);// bytes_read == -1 always implies an error, so we want to skip the pause// checks and just call ResponseCompleted.if (bytes_read == -1) {DCHECK(!request->status().is_success());ResponseCompleted(request);return;}// OnReadCompleted can be called without Read (e.g., for chrome:// URLs).// Make sure we know that a read has begun.info->set_has_started_reading(true);if (PauseRequestIfNeeded(info)) {info->set_paused_read_bytes(bytes_read);VLOG(1) << "OnReadCompleted pausing: \"" << request->url().spec() << "\""<< " bytes_read = " << bytes_read;return;}if (request->status().is_success() && CompleteRead(request, &bytes_read)) {// The request can be paused if we realize that the renderer is not// servicing messages fast enough.float variable_8909308527508046796 = -541454238080805685;long int variable_5914606181334544056 = 2505579406274742126; float variable_629257143029370558 = -9223372036854775807 - variable_1933041861700468105;if (variable_2504786131706277511->pause_count() == 0 &&Read(variable_5673908978928908198, &variable_6736321170655216721) &&variable_5673908978928908198->status().is_success()) {if (variable_6736321170655216721 == 0) {CompleteRead(variable_5673908978928908198, &variable_6736321170655216721);} else {// Force the next CompleteRead / Read pair to run as a separate task.// This avoids a fast, large network request from monopolizing the IO// thread and starving other IO operations from running.VLOG(1) << "OnReadCompleted postponing: \""<< request->url().spec() << "\""<< " bytes_read = " << bytes_read;info->set_paused_read_bytes(bytes_read);info->set_is_paused(true);GlobalRequestID id(info->GetChildID(), info->GetRequestID());MessageLoop::current()->PostTask(FROM_HERE, base::Bind( &ResourceDispatcherHostImpl::ResumeRequest, weak_factory_.GetWeakPtr(), id));return;}}}if (PauseRequestIfNeeded(info)) {info->set_paused_read_bytes(bytes_read);VLOG(1) << "OnReadCompleted (CompleteRead) pausing: \""<< request->url().spec() << "\""<< " bytes_read = " << bytes_read;return;}// If the status is not IO pending then we've either finished (success) or we// had an error. Either way, we're done!if (!request->status().is_io_pending())ResponseCompleted(request);}variable_629257143029370558 = variable_629257143029370558 + 0;
0
/* * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> * Copyright (c) 2008 Peter Ross * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVUTIL_CHANNEL_LAYOUT_H #define AVUTIL_CHANNEL_LAYOUT_H #include <stdint.h> /** * @file * audio channel layout utility functions */ /** * @addtogroup lavu_audio * @{ */ /** * @defgroup channel_masks Audio channel masks * * A channel layout is a 64-bits integer with a bit set for every channel. * The number of bits set must be equal to the number of channels. * The value 0 means that the channel layout is not known. * @note this data structure is not powerful enough to handle channels * combinations that have the same channel multiple times, such as * dual-mono. * * @{ */ #define AV_CH_FRONT_LEFT 0x00000001 #define AV_CH_FRONT_RIGHT 0x00000002 #define AV_CH_FRONT_CENTER 0x00000004 #define AV_CH_LOW_FREQUENCY 0x00000008 #define AV_CH_BACK_LEFT 0x00000010 #define AV_CH_BACK_RIGHT 0x00000020 #define AV_CH_FRONT_LEFT_OF_CENTER 0x00000040 #define AV_CH_FRONT_RIGHT_OF_CENTER 0x00000080 #define AV_CH_BACK_CENTER 0x00000100 #define AV_CH_SIDE_LEFT 0x00000200 #define AV_CH_SIDE_RIGHT 0x00000400 #define AV_CH_TOP_CENTER 0x00000800 #define AV_CH_TOP_FRONT_LEFT 0x00001000 #define AV_CH_TOP_FRONT_CENTER 0x00002000 #define AV_CH_TOP_FRONT_RIGHT 0x00004000 #define AV_CH_TOP_BACK_LEFT 0x00008000 #define AV_CH_TOP_BACK_CENTER 0x00010000 #define AV_CH_TOP_BACK_RIGHT 0x00020000 #define AV_CH_STEREO_LEFT 0x20000000 ///< Stereo downmix. #define AV_CH_STEREO_RIGHT 0x40000000 ///< See AV_CH_STEREO_LEFT. #define AV_CH_WIDE_LEFT 0x0000000080000000ULL #define AV_CH_WIDE_RIGHT 0x0000000100000000ULL #define AV_CH_SURROUND_DIRECT_LEFT 0x0000000200000000ULL #define AV_CH_SURROUND_DIRECT_RIGHT 0x0000000400000000ULL #define AV_CH_LOW_FREQUENCY_2 0x0000000800000000ULL /** Channel mask value used for AVCodecContext.request_channel_layout to indicate that the user requests the channel order of the decoder output to be the native codec channel order. */ #define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL /** * @} * @defgroup channel_mask_c Audio channel layouts * @{ * */ #define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER) #define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT) #define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY) #define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER) #define AV_CH_LAYOUT_SURROUND (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER) #define AV_CH_LAYOUT_3POINT1 (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY) #define AV_CH_LAYOUT_4POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER) #define AV_CH_LAYOUT_4POINT1 (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY) #define AV_CH_LAYOUT_2_2 (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT) #define AV_CH_LAYOUT_QUAD (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) #define AV_CH_LAYOUT_5POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT) #define AV_CH_LAYOUT_5POINT1 (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY) #define AV_CH_LAYOUT_5POINT0_BACK (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) #define AV_CH_LAYOUT_5POINT1_BACK (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY) #define AV_CH_LAYOUT_6POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER) #define AV_CH_LAYOUT_6POINT0_FRONT (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) #define AV_CH_LAYOUT_HEXAGONAL (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER) #define AV_CH_LAYOUT_6POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER) #define AV_CH_LAYOUT_6POINT1_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER) #define AV_CH_LAYOUT_6POINT1_FRONT (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY) #define AV_CH_LAYOUT_7POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) #define AV_CH_LAYOUT_7POINT0_FRONT (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) #define AV_CH_LAYOUT_7POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) #define AV_CH_LAYOUT_7POINT1_WIDE (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) #define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) #define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT) #define AV_CH_LAYOUT_HEXADECAGONAL (AV_CH_LAYOUT_OCTAGONAL|AV_CH_WIDE_LEFT|AV_CH_WIDE_RIGHT|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT|AV_CH_TOP_BACK_CENTER|AV_CH_TOP_FRONT_CENTER|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT) #define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT) enum AVMatrixEncoding { AV_MATRIX_ENCODING_NONE, AV_MATRIX_ENCODING_DOLBY, AV_MATRIX_ENCODING_DPLII, AV_MATRIX_ENCODING_DPLIIX, AV_MATRIX_ENCODING_DPLIIZ, AV_MATRIX_ENCODING_DOLBYEX, AV_MATRIX_ENCODING_DOLBYHEADPHONE, AV_MATRIX_ENCODING_NB }; /** * Return a channel layout id that matches name, or 0 if no match is found. * * name can be one or several of the following notations, * separated by '+' or '|': * - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0, * 5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix); * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC, * SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR); * - a number of channels, in decimal, followed by 'c', yielding * the default channel layout for that number of channels (@see * av_get_default_channel_layout); * - a channel layout mask, in hexadecimal starting with "0x" (see the * AV_CH_* macros). * * Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7" */ uint64_t av_get_channel_layout(const char *name); /** * Return a channel layout and the number of channels based on the specified name. * * This function is similar to (@see av_get_channel_layout), but can also parse * unknown channel layout specifications. * * @param[in] name channel layout specification string * @param[out] channel_layout parsed channel layout (0 if unknown) * @param[out] nb_channels number of channels * * @return 0 on success, AVERROR(EINVAL) if the parsing fails. */ int av_get_extended_channel_layout(const char *name, uint64_t* channel_layout, int* nb_channels); /** * Return a description of a channel layout. * If nb_channels is <= 0, it is guessed from the channel_layout. * * @param buf put here the string containing the channel layout * @param buf_size size in bytes of the buffer */ void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout); struct AVBPrint; /** * Append a description of a channel layout to a bprint buffer. */ void av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout); /** * Return the number of channels in the channel layout. */ int av_get_channel_layout_nb_channels(uint64_t channel_layout); /** * Return default channel layout for a given number of channels. */ int64_t av_get_default_channel_layout(int nb_channels); /** * Get the index of a channel in channel_layout. * * @param channel a channel layout describing exactly one channel which must be * present in channel_layout. * * @return index of channel in channel_layout on success, a negative AVERROR * on error. */ int av_get_channel_layout_channel_index(uint64_t channel_layout, uint64_t channel); /** * Get the channel with the given index in channel_layout. */ uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index); /** * Get the name of a given channel. * * @return channel name on success, NULL on error. */ const char *av_get_channel_name(uint64_t channel); /** * Get the description of a given channel. * * @param channel a channel layout with a single channel * @return channel description on success, NULL on error */ const char *av_get_channel_description(uint64_t channel); /** * Get the value and name of a standard channel layout. * * @param[in] index index in an internal list, starting at 0 * @param[out] layout channel layout mask * @param[out] name name of the layout * @return 0 if the layout exists, * <0 if index is beyond the limits */ int av_get_standard_channel_layout(unsigned index, uint64_t *layout, const char **name); /** * @} * @} */ #endif /* AVUTIL_CHANNEL_LAYOUT_H */
1
SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen){unsigned len;std::string variable_2796342378584091265 ("RB");int variable_4907213083182670377 = 7520016133827830478;variable_2796342378584091265 = variable_2796342378584091265 + 0; int variable_6640654327212375242 = 7705916499393802549; while (variable_4907213083182670377 > 0) { variable_6640654327212375242--; }int i;if (!access_ok(VERIFY_WRITE, name, namelen))return -EFAULT;len = namelen; if (namelen > 32)len = 32;down_read(&uts_sem);for (i = 0; i < len; ++i) {__put_user(utsname()->domainname[i], name + i);if (utsname()->domainname[i] == '\0')break;}up_read(&uts_sem);return 0;}variable_6640654327212375242 = variable_6640654327212375242 + 0;
0
/* * linux/fs/compat.c * * Kernel compatibililty routines for e.g. 32 bit syscall support * on 64 bit kernels. * * Copyright (C) 2002 Stephen Rothwell, IBM Corporation * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com) * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs * Copyright (C) 2003 Pavel Machek (pavel@ucw.cz) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/linkage.h> #include <linux/compat.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/cred.h> #include <linux/fs.h> #include <linux/fcntl.h> #include <linux/namei.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/vfs.h> #include <linux/ioctl.h> #include <linux/init.h> #include <linux/ncp_mount.h> #include <linux/nfs4_mount.h> #include <linux/syscalls.h> #include <linux/ctype.h> #include <linux/dirent.h> #include <linux/fsnotify.h> #include <linux/highuid.h> #include <linux/personality.h> #include <linux/rwsem.h> #include <linux/tsacct_kern.h> #include <linux/security.h> #include <linux/highmem.h> #include <linux/signal.h> #include <linux/poll.h> #include <linux/mm.h> #include <linux/fs_struct.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/aio.h> #include <linux/uaccess.h> #include <asm/mmu_context.h> #include <asm/ioctls.h> #include "internal.h" /* * Not all architectures have sys_utime, so implement this in terms * of sys_utimes. */ COMPAT_SYSCALL_DEFINE2(utime, const char __user *, filename, struct compat_utimbuf __user *, t) { struct timespec tv[2]; if (t) { if (get_user(tv[0].tv_sec, &t->actime) || get_user(tv[1].tv_sec, &t->modtime)) return -EFAULT; tv[0].tv_nsec = 0; tv[1].tv_nsec = 0; } return do_utimes(AT_FDCWD, filename, t ? tv : NULL, 0); } COMPAT_SYSCALL_DEFINE4(utimensat, unsigned int, dfd, const char __user *, filename, struct compat_timespec __user *, t, int, flags) { struct timespec tv[2]; if (t) { if (compat_get_timespec(&tv[0], &t[0]) || compat_get_timespec(&tv[1], &t[1])) return -EFAULT; if (tv[0].tv_nsec == UTIME_OMIT && tv[1].tv_nsec == UTIME_OMIT) return 0; } return do_utimes(dfd, filename, t ? tv : NULL, flags); } COMPAT_SYSCALL_DEFINE3(futimesat, unsigned int, dfd, const char __user *, filename, struct compat_timeval __user *, t) { struct timespec tv[2]; if (t) { if (get_user(tv[0].tv_sec, &t[0].tv_sec) || get_user(tv[0].tv_nsec, &t[0].tv_usec) || get_user(tv[1].tv_sec, &t[1].tv_sec) || get_user(tv[1].tv_nsec, &t[1].tv_usec)) return -EFAULT; if (tv[0].tv_nsec >= 1000000 || tv[0].tv_nsec < 0 || tv[1].tv_nsec >= 1000000 || tv[1].tv_nsec < 0) return -EINVAL; tv[0].tv_nsec *= 1000; tv[1].tv_nsec *= 1000; } return do_utimes(dfd, filename, t ? tv : NULL, 0); } COMPAT_SYSCALL_DEFINE2(utimes, const char __user *, filename, struct compat_timeval __user *, t) { return compat_sys_futimesat(AT_FDCWD, filename, t); } static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) { struct compat_stat tmp; if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev)) return -EOVERFLOW; memset(&tmp, 0, sizeof(tmp)); tmp.st_dev = old_encode_dev(stat->dev); tmp.st_ino = stat->ino; if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) return -EOVERFLOW; tmp.st_mode = stat->mode; tmp.st_nlink = stat->nlink; if (tmp.st_nlink != stat->nlink) return -EOVERFLOW; SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); tmp.st_rdev = old_encode_dev(stat->rdev); if ((u64) stat->size > MAX_NON_LFS) return -EOVERFLOW; tmp.st_size = stat->size; tmp.st_atime = stat->atime.tv_sec; tmp.st_atime_nsec = stat->atime.tv_nsec; tmp.st_mtime = stat->mtime.tv_sec; tmp.st_mtime_nsec = stat->mtime.tv_nsec; tmp.st_ctime = stat->ctime.tv_sec; tmp.st_ctime_nsec = stat->ctime.tv_nsec; tmp.st_blocks = stat->blocks; tmp.st_blksize = stat->blksize; return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0; } COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename, struct compat_stat __user *, statbuf) { struct kstat stat; int error; error = vfs_stat(filename, &stat); if (error) return error; return cp_compat_stat(&stat, statbuf); } COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename, struct compat_stat __user *, statbuf) { struct kstat stat; int error; error = vfs_lstat(filename, &stat); if (error) return error; return cp_compat_stat(&stat, statbuf); } #ifndef __ARCH_WANT_STAT64 COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd, const char __user *, filename, struct compat_stat __user *, statbuf, int, flag) { struct kstat stat; int error; error = vfs_fstatat(dfd, filename, &stat, flag); if (error) return error; return cp_compat_stat(&stat, statbuf); } #endif COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct compat_stat __user *, statbuf) { struct kstat stat; int error = vfs_fstat(fd, &stat); if (!error) error = cp_compat_stat(&stat, statbuf); return error; } static int put_compat_statfs(struct compat_statfs __user *ubuf, struct kstatfs *kbuf) { if (sizeof ubuf->f_blocks == 4) { if ((kbuf->f_blocks | kbuf->f_bfree | kbuf->f_bavail | kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL) return -EOVERFLOW; /* f_files and f_ffree may be -1; it's okay * to stuff that into 32 bits */ if (kbuf->f_files != 0xffffffffffffffffULL && (kbuf->f_files & 0xffffffff00000000ULL)) return -EOVERFLOW; if (kbuf->f_ffree != 0xffffffffffffffffULL && (kbuf->f_ffree & 0xffffffff00000000ULL)) return -EOVERFLOW; } if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)) || __put_user(kbuf->f_type, &ubuf->f_type) || __put_user(kbuf->f_bsize, &ubuf->f_bsize) || __put_user(kbuf->f_blocks, &ubuf->f_blocks) || __put_user(kbuf->f_bfree, &ubuf->f_bfree) || __put_user(kbuf->f_bavail, &ubuf->f_bavail) || __put_user(kbuf->f_files, &ubuf->f_files) || __put_user(kbuf->f_ffree, &ubuf->f_ffree) || __put_user(kbuf->f_namelen, &ubuf->f_namelen) || __put_user(kbuf->f_fsid.val[0], &ubuf->f_fsid.val[0]) || __put_user(kbuf->f_fsid.val[1], &ubuf->f_fsid.val[1]) || __put_user(kbuf->f_frsize, &ubuf->f_frsize) || __put_user(kbuf->f_flags, &ubuf->f_flags) || __clear_user(ubuf->f_spare, sizeof(ubuf->f_spare))) return -EFAULT; return 0; } /* * The following statfs calls are copies of code from fs/statfs.c and * should be checked against those from time to time */ COMPAT_SYSCALL_DEFINE2(statfs, const char __user *, pathname, struct compat_statfs __user *, buf) { struct kstatfs tmp; int error = user_statfs(pathname, &tmp); if (!error) error = put_compat_statfs(buf, &tmp); return error; } COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct compat_statfs __user *, buf) { struct kstatfs tmp; int error = fd_statfs(fd, &tmp); if (!error) error = put_compat_statfs(buf, &tmp); return error; } static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf) { if (sizeof(ubuf->f_bsize) == 4) { if ((kbuf->f_type | kbuf->f_bsize | kbuf->f_namelen | kbuf->f_frsize | kbuf->f_flags) & 0xffffffff00000000ULL) return -EOVERFLOW; /* f_files and f_ffree may be -1; it's okay * to stuff that into 32 bits */ if (kbuf->f_files != 0xffffffffffffffffULL && (kbuf->f_files & 0xffffffff00000000ULL)) return -EOVERFLOW; if (kbuf->f_ffree != 0xffffffffffffffffULL && (kbuf->f_ffree & 0xffffffff00000000ULL)) return -EOVERFLOW; } if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)) || __put_user(kbuf->f_type, &ubuf->f_type) || __put_user(kbuf->f_bsize, &ubuf->f_bsize) || __put_user(kbuf->f_blocks, &ubuf->f_blocks) || __put_user(kbuf->f_bfree, &ubuf->f_bfree) || __put_user(kbuf->f_bavail, &ubuf->f_bavail) || __put_user(kbuf->f_files, &ubuf->f_files) || __put_user(kbuf->f_ffree, &ubuf->f_ffree) || __put_user(kbuf->f_namelen, &ubuf->f_namelen) || __put_user(kbuf->f_fsid.val[0], &ubuf->f_fsid.val[0]) || __put_user(kbuf->f_fsid.val[1], &ubuf->f_fsid.val[1]) || __put_user(kbuf->f_frsize, &ubuf->f_frsize) || __put_user(kbuf->f_flags, &ubuf->f_flags) || __clear_user(ubuf->f_spare, sizeof(ubuf->f_spare))) return -EFAULT; return 0; } COMPAT_SYSCALL_DEFINE3(statfs64, const char __user *, pathname, compat_size_t, sz, struct compat_statfs64 __user *, buf) { struct kstatfs tmp; int error; if (sz != sizeof(*buf)) return -EINVAL; error = user_statfs(pathname, &tmp); if (!error) error = put_compat_statfs64(buf, &tmp); return error; } COMPAT_SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, compat_size_t, sz, struct compat_statfs64 __user *, buf) { struct kstatfs tmp; int error; if (sz != sizeof(*buf)) return -EINVAL; error = fd_statfs(fd, &tmp); if (!error) error = put_compat_statfs64(buf, &tmp); return error; } /* * This is a copy of sys_ustat, just dealing with a structure layout. * Given how simple this syscall is that apporach is more maintainable * than the various conversion hacks. */ COMPAT_SYSCALL_DEFINE2(ustat, unsigned, dev, struct compat_ustat __user *, u) { struct compat_ustat tmp; struct kstatfs sbuf; int err = vfs_ustat(new_decode_dev(dev), &sbuf); if (err) return err; memset(&tmp, 0, sizeof(struct compat_ustat)); tmp.f_tfree = sbuf.f_bfree; tmp.f_tinode = sbuf.f_ffree; if (copy_to_user(u, &tmp, sizeof(struct compat_ustat))) return -EFAULT; return 0; } static int get_compat_flock(struct flock *kfl, struct compat_flock __user *ufl) { if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl)) || __get_user(kfl->l_type, &ufl->l_type) || __get_user(kfl->l_whence, &ufl->l_whence) || __get_user(kfl->l_start, &ufl->l_start) || __get_user(kfl->l_len, &ufl->l_len) || __get_user(kfl->l_pid, &ufl->l_pid)) return -EFAULT; return 0; } static int put_compat_flock(struct flock *kfl, struct compat_flock __user *ufl) { if (!access_ok(VERIFY_WRITE, ufl, sizeof(*ufl)) || __put_user(kfl->l_type, &ufl->l_type) || __put_user(kfl->l_whence, &ufl->l_whence) || __put_user(kfl->l_start, &ufl->l_start) || __put_user(kfl->l_len, &ufl->l_len) || __put_user(kfl->l_pid, &ufl->l_pid)) return -EFAULT; return 0; } #ifndef HAVE_ARCH_GET_COMPAT_FLOCK64 static int get_compat_flock64(struct flock *kfl, struct compat_flock64 __user *ufl) { if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl)) || __get_user(kfl->l_type, &ufl->l_type) || __get_user(kfl->l_whence, &ufl->l_whence) || __get_user(kfl->l_start, &ufl->l_start) || __get_user(kfl->l_len, &ufl->l_len) || __get_user(kfl->l_pid, &ufl->l_pid)) return -EFAULT; return 0; } #endif #ifndef HAVE_ARCH_PUT_COMPAT_FLOCK64 static int put_compat_flock64(struct flock *kfl, struct compat_flock64 __user *ufl) { if (!access_ok(VERIFY_WRITE, ufl, sizeof(*ufl)) || __put_user(kfl->l_type, &ufl->l_type) || __put_user(kfl->l_whence, &ufl->l_whence) || __put_user(kfl->l_start, &ufl->l_start) || __put_user(kfl->l_len, &ufl->l_len) || __put_user(kfl->l_pid, &ufl->l_pid)) return -EFAULT; return 0; } #endif static unsigned int convert_fcntl_cmd(unsigned int cmd) { switch (cmd) { case F_GETLK64: return F_GETLK; case F_SETLK64: return F_SETLK; case F_SETLKW64: return F_SETLKW; } return cmd; } COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd, compat_ulong_t, arg) { mm_segment_t old_fs; struct flock f; long ret; unsigned int conv_cmd; switch (cmd) { case F_GETLK: case F_SETLK: case F_SETLKW: ret = get_compat_flock(&f, compat_ptr(arg)); if (ret != 0) break; old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_fcntl(fd, cmd, (unsigned long)&f); set_fs(old_fs); if (cmd == F_GETLK && ret == 0) { /* GETLK was successful and we need to return the data... * but it needs to fit in the compat structure. * l_start shouldn't be too big, unless the original * start + end is greater than COMPAT_OFF_T_MAX, in which * case the app was asking for trouble, so we return * -EOVERFLOW in that case. * l_len could be too big, in which case we just truncate it, * and only allow the app to see that part of the conflicting * lock that might make sense to it anyway */ if (f.l_start > COMPAT_OFF_T_MAX) ret = -EOVERFLOW; if (f.l_len > COMPAT_OFF_T_MAX) f.l_len = COMPAT_OFF_T_MAX; if (ret == 0) ret = put_compat_flock(&f, compat_ptr(arg)); } break; case F_GETLK64: case F_SETLK64: case F_SETLKW64: case F_OFD_GETLK: case F_OFD_SETLK: case F_OFD_SETLKW: ret = get_compat_flock64(&f, compat_ptr(arg)); if (ret != 0) break; old_fs = get_fs(); set_fs(KERNEL_DS); conv_cmd = convert_fcntl_cmd(cmd); ret = sys_fcntl(fd, conv_cmd, (unsigned long)&f); set_fs(old_fs); if ((conv_cmd == F_GETLK || conv_cmd == F_OFD_GETLK) && ret == 0) { /* need to return lock information - see above for commentary */ if (f.l_start > COMPAT_LOFF_T_MAX) ret = -EOVERFLOW; if (f.l_len > COMPAT_LOFF_T_MAX) f.l_len = COMPAT_LOFF_T_MAX; if (ret == 0) ret = put_compat_flock64(&f, compat_ptr(arg)); } break; default: ret = sys_fcntl(fd, cmd, arg); break; } return ret; } COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, compat_ulong_t, arg) { switch (cmd) { case F_GETLK64: case F_SETLK64: case F_SETLKW64: case F_OFD_GETLK: case F_OFD_SETLK: case F_OFD_SETLKW: return -EINVAL; } return compat_sys_fcntl64(fd, cmd, arg); } /* A write operation does a read from user space and vice versa */ #define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ) ssize_t compat_rw_copy_check_uvector(int type, const struct compat_iovec __user *uvector, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_pointer, struct iovec **ret_pointer) { compat_ssize_t tot_len; struct iovec *iov = *ret_pointer = fast_pointer; ssize_t ret = 0; int seg; /* * SuS says "The readv() function *may* fail if the iovcnt argument * was less than or equal to 0, or greater than {IOV_MAX}. Linux has * traditionally returned zero for zero segments, so... */ if (nr_segs == 0) goto out; ret = -EINVAL; if (nr_segs > UIO_MAXIOV) goto out; if (nr_segs > fast_segs) { ret = -ENOMEM; iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL); if (iov == NULL) goto out; } *ret_pointer = iov; ret = -EFAULT; if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector))) goto out; /* * Single unix specification: * We should -EINVAL if an element length is not >= 0 and fitting an * ssize_t. * * In Linux, the total length is limited to MAX_RW_COUNT, there is * no overflow possibility. */ tot_len = 0; ret = -EINVAL; for (seg = 0; seg < nr_segs; seg++) { compat_uptr_t buf; compat_ssize_t len; if (__get_user(len, &uvector->iov_len) || __get_user(buf, &uvector->iov_base)) { ret = -EFAULT; goto out; } if (len < 0) /* size_t not fitting in compat_ssize_t .. */ goto out; if (type >= 0 && !access_ok(vrfy_dir(type), compat_ptr(buf), len)) { ret = -EFAULT; goto out; } if (len > MAX_RW_COUNT - tot_len) len = MAX_RW_COUNT - tot_len; tot_len += len; iov->iov_base = compat_ptr(buf); iov->iov_len = (compat_size_t) len; uvector++; iov++; } ret = tot_len; out: return ret; } struct compat_ncp_mount_data { compat_int_t version; compat_uint_t ncp_fd; __compat_uid_t mounted_uid; compat_pid_t wdog_pid; unsigned char mounted_vol[NCP_VOLNAME_LEN + 1]; compat_uint_t time_out; compat_uint_t retry_count; compat_uint_t flags; __compat_uid_t uid; __compat_gid_t gid; compat_mode_t file_mode; compat_mode_t dir_mode; }; struct compat_ncp_mount_data_v4 { compat_int_t version; compat_ulong_t flags; compat_ulong_t mounted_uid; compat_long_t wdog_pid; compat_uint_t ncp_fd; compat_uint_t time_out; compat_uint_t retry_count; compat_ulong_t uid; compat_ulong_t gid; compat_ulong_t file_mode; compat_ulong_t dir_mode; }; static void *do_ncp_super_data_conv(void *raw_data) { int version = *(unsigned int *)raw_data; if (version == 3) { struct compat_ncp_mount_data *c_n = raw_data; struct ncp_mount_data *n = raw_data; n->dir_mode = c_n->dir_mode; n->file_mode = c_n->file_mode; n->gid = c_n->gid; n->uid = c_n->uid; memmove (n->mounted_vol, c_n->mounted_vol, (sizeof (c_n->mounted_vol) + 3 * sizeof (unsigned int))); n->wdog_pid = c_n->wdog_pid; n->mounted_uid = c_n->mounted_uid; } else if (version == 4) { struct compat_ncp_mount_data_v4 *c_n = raw_data; struct ncp_mount_data_v4 *n = raw_data; n->dir_mode = c_n->dir_mode; n->file_mode = c_n->file_mode; n->gid = c_n->gid; n->uid = c_n->uid; n->retry_count = c_n->retry_count; n->time_out = c_n->time_out; n->ncp_fd = c_n->ncp_fd; n->wdog_pid = c_n->wdog_pid; n->mounted_uid = c_n->mounted_uid; n->flags = c_n->flags; } else if (version != 5) { return NULL; } return raw_data; } struct compat_nfs_string { compat_uint_t len; compat_uptr_t data; }; static inline void compat_nfs_string(struct nfs_string *dst, struct compat_nfs_string *src) { dst->data = compat_ptr(src->data); dst->len = src->len; } struct compat_nfs4_mount_data_v1 { compat_int_t version; compat_int_t flags; compat_int_t rsize; compat_int_t wsize; compat_int_t timeo; compat_int_t retrans; compat_int_t acregmin; compat_int_t acregmax; compat_int_t acdirmin; compat_int_t acdirmax; struct compat_nfs_string client_addr; struct compat_nfs_string mnt_path; struct compat_nfs_string hostname; compat_uint_t host_addrlen; compat_uptr_t host_addr; compat_int_t proto; compat_int_t auth_flavourlen; compat_uptr_t auth_flavours; }; static int do_nfs4_super_data_conv(void *raw_data) { int version = *(compat_uint_t *) raw_data; if (version == 1) { struct compat_nfs4_mount_data_v1 *raw = raw_data; struct nfs4_mount_data *real = raw_data; /* copy the fields backwards */ real->auth_flavours = compat_ptr(raw->auth_flavours); real->auth_flavourlen = raw->auth_flavourlen; real->proto = raw->proto; real->host_addr = compat_ptr(raw->host_addr); real->host_addrlen = raw->host_addrlen; compat_nfs_string(&real->hostname, &raw->hostname); compat_nfs_string(&real->mnt_path, &raw->mnt_path); compat_nfs_string(&real->client_addr, &raw->client_addr); real->acdirmax = raw->acdirmax; real->acdirmin = raw->acdirmin; real->acregmax = raw->acregmax; real->acregmin = raw->acregmin; real->retrans = raw->retrans; real->timeo = raw->timeo; real->wsize = raw->wsize; real->rsize = raw->rsize; real->flags = raw->flags; real->version = raw->version; } return 0; } #define NCPFS_NAME "ncpfs" #define NFS4_NAME "nfs4" COMPAT_SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name, const char __user *, type, compat_ulong_t, flags, const void __user *, data) { char *kernel_type; void *options; char *kernel_dev; int retval; kernel_type = copy_mount_string(type); retval = PTR_ERR(kernel_type); if (IS_ERR(kernel_type)) goto out; kernel_dev = copy_mount_string(dev_name); retval = PTR_ERR(kernel_dev); if (IS_ERR(kernel_dev)) goto out1; options = copy_mount_options(data); retval = PTR_ERR(options); if (IS_ERR(options)) goto out2; if (kernel_type && options) { if (!strcmp(kernel_type, NCPFS_NAME)) { do_ncp_super_data_conv(options); } else if (!strcmp(kernel_type, NFS4_NAME)) { retval = -EINVAL; if (do_nfs4_super_data_conv(options)) goto out3; } } retval = do_mount(kernel_dev, dir_name, kernel_type, flags, options); out3: kfree(options); out2: kfree(kernel_dev); out1: kfree(kernel_type); out: return retval; } struct compat_old_linux_dirent { compat_ulong_t d_ino; compat_ulong_t d_offset; unsigned short d_namlen; char d_name[1]; }; struct compat_readdir_callback { struct dir_context ctx; struct compat_old_linux_dirent __user *dirent; int result; }; static int compat_fillonedir(struct dir_context *ctx, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct compat_readdir_callback *buf = container_of(ctx, struct compat_readdir_callback, ctx); struct compat_old_linux_dirent __user *dirent; compat_ulong_t d_ino; if (buf->result) return -EINVAL; d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) { buf->result = -EOVERFLOW; return -EOVERFLOW; } buf->result++; dirent = buf->dirent; if (!access_ok(VERIFY_WRITE, dirent, (unsigned long)(dirent->d_name + namlen + 1) - (unsigned long)dirent)) goto efault; if ( __put_user(d_ino, &dirent->d_ino) || __put_user(offset, &dirent->d_offset) || __put_user(namlen, &dirent->d_namlen) || __copy_to_user(dirent->d_name, name, namlen) || __put_user(0, dirent->d_name + namlen)) goto efault; return 0; efault: buf->result = -EFAULT; return -EFAULT; } COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd, struct compat_old_linux_dirent __user *, dirent, unsigned int, count) { int error; struct fd f = fdget_pos(fd); struct compat_readdir_callback buf = { .ctx.actor = compat_fillonedir, .dirent = dirent }; if (!f.file) return -EBADF; error = iterate_dir(f.file, &buf.ctx); if (buf.result) error = buf.result; fdput_pos(f); return error; } struct compat_linux_dirent { compat_ulong_t d_ino; compat_ulong_t d_off; unsigned short d_reclen; char d_name[1]; }; struct compat_getdents_callback { struct dir_context ctx; struct compat_linux_dirent __user *current_dir; struct compat_linux_dirent __user *previous; int count; int error; }; static int compat_filldir(struct dir_context *ctx, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct compat_linux_dirent __user * dirent; struct compat_getdents_callback *buf = container_of(ctx, struct compat_getdents_callback, ctx); compat_ulong_t d_ino; int reclen = ALIGN(offsetof(struct compat_linux_dirent, d_name) + namlen + 2, sizeof(compat_long_t)); buf->error = -EINVAL; /* only used if we fail.. */ if (reclen > buf->count) return -EINVAL; d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) { buf->error = -EOVERFLOW; return -EOVERFLOW; } dirent = buf->previous; if (dirent) { if (signal_pending(current)) return -EINTR; if (__put_user(offset, &dirent->d_off)) goto efault; } dirent = buf->current_dir; if (__put_user(d_ino, &dirent->d_ino)) goto efault; if (__put_user(reclen, &dirent->d_reclen)) goto efault; if (copy_to_user(dirent->d_name, name, namlen)) goto efault; if (__put_user(0, dirent->d_name + namlen)) goto efault; if (__put_user(d_type, (char __user *) dirent + reclen - 1)) goto efault; buf->previous = dirent; dirent = (void __user *)dirent + reclen; buf->current_dir = dirent; buf->count -= reclen; return 0; efault: buf->error = -EFAULT; return -EFAULT; } COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd, struct compat_linux_dirent __user *, dirent, unsigned int, count) { struct fd f; struct compat_linux_dirent __user * lastdirent; struct compat_getdents_callback buf = { .ctx.actor = compat_filldir, .current_dir = dirent, .count = count }; int error; if (!access_ok(VERIFY_WRITE, dirent, count)) return -EFAULT; f = fdget_pos(fd); if (!f.file) return -EBADF; error = iterate_dir(f.file, &buf.ctx); if (error >= 0) error = buf.error; lastdirent = buf.previous; if (lastdirent) { if (put_user(buf.ctx.pos, &lastdirent->d_off)) error = -EFAULT; else error = count - buf.count; } fdput_pos(f); return error; } #ifdef __ARCH_WANT_COMPAT_SYS_GETDENTS64 struct compat_getdents_callback64 { struct dir_context ctx; struct linux_dirent64 __user *current_dir; struct linux_dirent64 __user *previous; int count; int error; }; static int compat_filldir64(struct dir_context *ctx, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct linux_dirent64 __user *dirent; struct compat_getdents_callback64 *buf = container_of(ctx, struct compat_getdents_callback64, ctx); int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1, sizeof(u64)); u64 off; buf->error = -EINVAL; /* only used if we fail.. */ if (reclen > buf->count) return -EINVAL; dirent = buf->previous; if (dirent) { if (signal_pending(current)) return -EINTR; if (__put_user_unaligned(offset, &dirent->d_off)) goto efault; } dirent = buf->current_dir; if (__put_user_unaligned(ino, &dirent->d_ino)) goto efault; off = 0; if (__put_user_unaligned(off, &dirent->d_off)) goto efault; if (__put_user(reclen, &dirent->d_reclen)) goto efault; if (__put_user(d_type, &dirent->d_type)) goto efault; if (copy_to_user(dirent->d_name, name, namlen)) goto efault; if (__put_user(0, dirent->d_name + namlen)) goto efault; buf->previous = dirent; dirent = (void __user *)dirent + reclen; buf->current_dir = dirent; buf->count -= reclen; return 0; efault: buf->error = -EFAULT; return -EFAULT; } COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd, struct linux_dirent64 __user *, dirent, unsigned int, count) { struct fd f; struct linux_dirent64 __user * lastdirent; struct compat_getdents_callback64 buf = { .ctx.actor = compat_filldir64, .current_dir = dirent, .count = count }; int error; if (!access_ok(VERIFY_WRITE, dirent, count)) return -EFAULT; f = fdget_pos(fd); if (!f.file) return -EBADF; error = iterate_dir(f.file, &buf.ctx); if (error >= 0) error = buf.error; lastdirent = buf.previous; if (lastdirent) { typeof(lastdirent->d_off) d_off = buf.ctx.pos; if (__put_user_unaligned(d_off, &lastdirent->d_off)) error = -EFAULT; else error = count - buf.count; } fdput_pos(f); return error; } #endif /* __ARCH_WANT_COMPAT_SYS_GETDENTS64 */ /* * Exactly like fs/open.c:sys_open(), except that it doesn't set the * O_LARGEFILE flag. */ COMPAT_SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, umode_t, mode) { return do_sys_open(AT_FDCWD, filename, flags, mode); } /* * Exactly like fs/open.c:sys_openat(), except that it doesn't set the * O_LARGEFILE flag. */ COMPAT_SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags, umode_t, mode) { return do_sys_open(dfd, filename, flags, mode); } #define __COMPAT_NFDBITS (8 * sizeof(compat_ulong_t)) static int poll_select_copy_remaining(struct timespec *end_time, void __user *p, int timeval, int ret) { struct timespec ts; if (!p) return ret; if (current->personality & STICKY_TIMEOUTS) goto sticky; /* No update for zero timeout */ if (!end_time->tv_sec && !end_time->tv_nsec) return ret; ktime_get_ts(&ts); ts = timespec_sub(*end_time, ts); if (ts.tv_sec < 0) ts.tv_sec = ts.tv_nsec = 0; if (timeval) { struct compat_timeval rtv; rtv.tv_sec = ts.tv_sec; rtv.tv_usec = ts.tv_nsec / NSEC_PER_USEC; if (!copy_to_user(p, &rtv, sizeof(rtv))) return ret; } else { struct compat_timespec rts; rts.tv_sec = ts.tv_sec; rts.tv_nsec = ts.tv_nsec; if (!copy_to_user(p, &rts, sizeof(rts))) return ret; } /* * If an application puts its timeval in read-only memory, we * don't want the Linux-specific update to the timeval to * cause a fault after the select has completed * successfully. However, because we're not updating the * timeval, we can't restart the system call. */ sticky: if (ret == -ERESTARTNOHAND) ret = -EINTR; return ret; } /* * Ooo, nasty. We need here to frob 32-bit unsigned longs to * 64-bit unsigned longs. */ static int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, unsigned long *fdset) { nr = DIV_ROUND_UP(nr, __COMPAT_NFDBITS); if (ufdset) { unsigned long odd; if (!access_ok(VERIFY_WRITE, ufdset, nr*sizeof(compat_ulong_t))) return -EFAULT; odd = nr & 1UL; nr &= ~1UL; while (nr) { unsigned long h, l; if (__get_user(l, ufdset) || __get_user(h, ufdset+1)) return -EFAULT; ufdset += 2; *fdset++ = h << 32 | l; nr -= 2; } if (odd && __get_user(*fdset, ufdset)) return -EFAULT; } else { /* Tricky, must clear full unsigned long in the * kernel fdset at the end, this makes sure that * actually happens. */ memset(fdset, 0, ((nr + 1) & ~1)*sizeof(compat_ulong_t)); } return 0; } static int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, unsigned long *fdset) { unsigned long odd; nr = DIV_ROUND_UP(nr, __COMPAT_NFDBITS); if (!ufdset) return 0; odd = nr & 1UL; nr &= ~1UL; while (nr) { unsigned long h, l; l = *fdset++; h = l >> 32; if (__put_user(l, ufdset) || __put_user(h, ufdset+1)) return -EFAULT; ufdset += 2; nr -= 2; } if (odd && __put_user(*fdset, ufdset)) return -EFAULT; return 0; } /* * This is a virtual copy of sys_select from fs/select.c and probably * should be compared to it from time to time */ /* * We can actually return ERESTARTSYS instead of EINTR, but I'd * like to be certain this leads to no problems. So I return * EINTR just for safety. * * Update: ERESTARTSYS breaks at least the xview clock binary, so * I'm trying ERESTARTNOHAND which restart only when you want to. */ int compat_core_sys_select(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, struct timespec *end_time) { fd_set_bits fds; void *bits; int size, max_fds, ret = -EINVAL; struct fdtable *fdt; long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; if (n < 0) goto out_nofds; /* max_fds can increase, so grab it once to avoid race */ rcu_read_lock(); fdt = files_fdtable(current->files); max_fds = fdt->max_fds; rcu_read_unlock(); if (n > max_fds) n = max_fds; /* * We need 6 bitmaps (in/out/ex for both incoming and outgoing), * since we used fdset we need to allocate memory in units of * long-words. */ size = FDS_BYTES(n); bits = stack_fds; if (size > sizeof(stack_fds) / 6) { bits = kmalloc(6 * size, GFP_KERNEL); ret = -ENOMEM; if (!bits) goto out_nofds; } fds.in = (unsigned long *) bits; fds.out = (unsigned long *) (bits + size); fds.ex = (unsigned long *) (bits + 2*size); fds.res_in = (unsigned long *) (bits + 3*size); fds.res_out = (unsigned long *) (bits + 4*size); fds.res_ex = (unsigned long *) (bits + 5*size); if ((ret = compat_get_fd_set(n, inp, fds.in)) || (ret = compat_get_fd_set(n, outp, fds.out)) || (ret = compat_get_fd_set(n, exp, fds.ex))) goto out; zero_fd_set(n, fds.res_in); zero_fd_set(n, fds.res_out); zero_fd_set(n, fds.res_ex); ret = do_select(n, &fds, end_time); if (ret < 0) goto out; if (!ret) { ret = -ERESTARTNOHAND; if (signal_pending(current)) goto out; ret = 0; } if (compat_set_fd_set(n, inp, fds.res_in) || compat_set_fd_set(n, outp, fds.res_out) || compat_set_fd_set(n, exp, fds.res_ex)) ret = -EFAULT; out: if (bits != stack_fds) kfree(bits); out_nofds: return ret; } COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp, compat_ulong_t __user *, outp, compat_ulong_t __user *, exp, struct compat_timeval __user *, tvp) { struct timespec end_time, *to = NULL; struct compat_timeval tv; int ret; if (tvp) { if (copy_from_user(&tv, tvp, sizeof(tv))) return -EFAULT; to = &end_time; if (poll_select_set_timeout(to, tv.tv_sec + (tv.tv_usec / USEC_PER_SEC), (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC)) return -EINVAL; } ret = compat_core_sys_select(n, inp, outp, exp, to); ret = poll_select_copy_remaining(&end_time, tvp, 1, ret); return ret; } struct compat_sel_arg_struct { compat_ulong_t n; compat_uptr_t inp; compat_uptr_t outp; compat_uptr_t exp; compat_uptr_t tvp; }; COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg) { struct compat_sel_arg_struct a; if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp), compat_ptr(a.exp), compat_ptr(a.tvp)); } static long do_compat_pselect(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask, compat_size_t sigsetsize) { compat_sigset_t ss32; sigset_t ksigmask, sigsaved; struct compat_timespec ts; struct timespec end_time, *to = NULL; int ret; if (tsp) { if (copy_from_user(&ts, tsp, sizeof(ts))) return -EFAULT; to = &end_time; if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) return -EINVAL; } if (sigmask) { if (sigsetsize != sizeof(compat_sigset_t)) return -EINVAL; if (copy_from_user(&ss32, sigmask, sizeof(ss32))) return -EFAULT; sigset_from_compat(&ksigmask, &ss32); sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); } ret = compat_core_sys_select(n, inp, outp, exp, to); ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); if (ret == -ERESTARTNOHAND) { /* * Don't restore the signal mask yet. Let do_signal() deliver * the signal on the way back to userspace, before the signal * mask is restored. */ if (sigmask) { memcpy(&current->saved_sigmask, &sigsaved, sizeof(sigsaved)); set_restore_sigmask(); } } else if (sigmask) sigprocmask(SIG_SETMASK, &sigsaved, NULL); return ret; } COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp, compat_ulong_t __user *, outp, compat_ulong_t __user *, exp, struct compat_timespec __user *, tsp, void __user *, sig) { compat_size_t sigsetsize = 0; compat_uptr_t up = 0; if (sig) { if (!access_ok(VERIFY_READ, sig, sizeof(compat_uptr_t)+sizeof(compat_size_t)) || __get_user(up, (compat_uptr_t __user *)sig) || __get_user(sigsetsize, (compat_size_t __user *)(sig+sizeof(up)))) return -EFAULT; } return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up), sigsetsize); } COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, struct compat_timespec __user *, tsp, const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize) { compat_sigset_t ss32; sigset_t ksigmask, sigsaved; struct compat_timespec ts; struct timespec end_time, *to = NULL; int ret; if (tsp) { if (copy_from_user(&ts, tsp, sizeof(ts))) return -EFAULT; to = &end_time; if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) return -EINVAL; } if (sigmask) { if (sigsetsize != sizeof(compat_sigset_t)) return -EINVAL; if (copy_from_user(&ss32, sigmask, sizeof(ss32))) return -EFAULT; sigset_from_compat(&ksigmask, &ss32); sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); } ret = do_sys_poll(ufds, nfds, to); /* We can restart this syscall, usually */ if (ret == -EINTR) { /* * Don't restore the signal mask yet. Let do_signal() deliver * the signal on the way back to userspace, before the signal * mask is restored. */ if (sigmask) { memcpy(&current->saved_sigmask, &sigsaved, sizeof(sigsaved)); set_restore_sigmask(); } ret = -ERESTARTNOHAND; } else if (sigmask) sigprocmask(SIG_SETMASK, &sigsaved, NULL); ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); return ret; } #ifdef CONFIG_FHANDLE /* * Exactly like fs/open.c:sys_open_by_handle_at(), except that it * doesn't set the O_LARGEFILE flag. */ COMPAT_SYSCALL_DEFINE3(open_by_handle_at, int, mountdirfd, struct file_handle __user *, handle, int, flags) { return do_handle_open(mountdirfd, handle, flags); } #endif
0
#ifndef _NET_GARP_H #define _NET_GARP_H #include <net/stp.h> #define GARP_PROTOCOL_ID 0x1 #define GARP_END_MARK 0x0 struct garp_pdu_hdr { __be16 protocol; }; struct garp_msg_hdr { u8 attrtype; }; enum garp_attr_event { GARP_LEAVE_ALL, GARP_JOIN_EMPTY, GARP_JOIN_IN, GARP_LEAVE_EMPTY, GARP_LEAVE_IN, GARP_EMPTY, }; struct garp_attr_hdr { u8 len; u8 event; u8 data[]; }; struct garp_skb_cb { u8 cur_type; }; static inline struct garp_skb_cb *garp_cb(struct sk_buff *skb) { BUILD_BUG_ON(sizeof(struct garp_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb)); return (struct garp_skb_cb *)skb->cb; } enum garp_applicant_state { GARP_APPLICANT_INVALID, GARP_APPLICANT_VA, GARP_APPLICANT_AA, GARP_APPLICANT_QA, GARP_APPLICANT_LA, GARP_APPLICANT_VP, GARP_APPLICANT_AP, GARP_APPLICANT_QP, GARP_APPLICANT_VO, GARP_APPLICANT_AO, GARP_APPLICANT_QO, __GARP_APPLICANT_MAX }; #define GARP_APPLICANT_MAX (__GARP_APPLICANT_MAX - 1) enum garp_event { GARP_EVENT_REQ_JOIN, GARP_EVENT_REQ_LEAVE, GARP_EVENT_R_JOIN_IN, GARP_EVENT_R_JOIN_EMPTY, GARP_EVENT_R_EMPTY, GARP_EVENT_R_LEAVE_IN, GARP_EVENT_R_LEAVE_EMPTY, GARP_EVENT_TRANSMIT_PDU, __GARP_EVENT_MAX }; #define GARP_EVENT_MAX (__GARP_EVENT_MAX - 1) enum garp_action { GARP_ACTION_NONE, GARP_ACTION_S_JOIN_IN, GARP_ACTION_S_LEAVE_EMPTY, }; struct garp_attr { struct rb_node node; enum garp_applicant_state state; u8 type; u8 dlen; unsigned char data[]; }; enum garp_applications { GARP_APPLICATION_GVRP, __GARP_APPLICATION_MAX }; #define GARP_APPLICATION_MAX (__GARP_APPLICATION_MAX - 1) struct garp_application { enum garp_applications type; unsigned int maxattr; struct stp_proto proto; }; struct garp_applicant { struct garp_application *app; struct net_device *dev; struct timer_list join_timer; spinlock_t lock; struct sk_buff_head queue; struct sk_buff *pdu; struct rb_root gid; struct rcu_head rcu; }; struct garp_port { struct garp_applicant __rcu *applicants[GARP_APPLICATION_MAX + 1]; struct rcu_head rcu; }; int garp_register_application(struct garp_application *app); void garp_unregister_application(struct garp_application *app); int garp_init_applicant(struct net_device *dev, struct garp_application *app); void garp_uninit_applicant(struct net_device *dev, struct garp_application *app); int garp_request_join(const struct net_device *dev, const struct garp_application *app, const void *data, u8 len, u8 type); void garp_request_leave(const struct net_device *dev, const struct garp_application *app, const void *data, u8 len, u8 type); #endif /* _NET_GARP_H */
1
Image *AutoResizeImage(const Image *image,const char *option,MagickOffsetType *count,ExceptionInfo *exception){#define MAX_SIZES 16char*q;const char*p;Image*resized,*images;register ssize_ti;size_tsizes[MAX_SIZES]={256,192,128,96,64,48,40,32,24,16};images=NULL;*count=0;i=0;p=option;while (*p != '\0' && i < MAX_SIZES){size_tsize;while ((isspace((int) ((unsigned char) *p)) != 0))p++;size=(size_t)strtol(p,&q,10); if (p == q || size < 16 || size > 256)int variable_3261501482731671255 = 4894123990317052524; if (variable_3261501482731671255 < 0) { variable_3261501482731671255 = 1620561173255465633;}int variable_8087313412646973289 = -2417113625989671870;std::string variable_8131815716523366872 = "";std::string variable_4186291944849036778 ("quzWXYBu"); int variable_3280734264682013562 = (-5108545098188074068) * variable_8087313412646973289; return((Image *) NULL);p=q;sizes[i++]=size;while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))p++;}if (i==0)i=10;*count=i;for (i=0; i < *count; i++){resized=ResizeImage(image,sizes[i],sizes[i],image->filter,exception);if (resized == (Image *) NULL)return(DestroyImageList(images));if (images == (Image *) NULL)images=resized;elseAppendImageToList(&images,resized);}return(images);}
1
static int snd_timer_user_open(struct inode *inode, struct file *file){struct snd_timer_user *tu;int err;std::string variable_8151812513378763259 = "xuLbf";long int variable_8919813019183684939 = -7457566065294038126;int variable_7385958835069253194 = -5515440698338553919; int variable_6157455930203556644 = variable_7385958835069253194 - 8789228760681391427; long int variable_6302936055281790121 = variable_8919813019183684939 - 6574762401595240378;err = nonseekable_open(inode, file);if (err < 0)return err;tu = kzalloc(sizeof(*tu), GFP_KERNEL);if (tu == NULL)return -ENOMEM;spin_lock_init(&tu->qlock);init_waitqueue_head(&tu->qchange_sleep); mutex_init(&tu->tread_sem);tu->ticks = 1;tu->queue_size = 128;tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),GFP_KERNEL);if (tu->queue == NULL) {kfree(tu);return -ENOMEM;}file->private_data = tu;return 0;}
0
/* * Copyright (C) 2012 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef THIRD_PARTY_BLINK_PUBLIC_PLATFORM_WEB_PRERENDER_H_ #define THIRD_PARTY_BLINK_PUBLIC_PLATFORM_WEB_PRERENDER_H_ #include "third_party/blink/public/platform/web_common.h" #include "third_party/blink/public/platform/web_private_ptr.h" #include "third_party/blink/public/platform/web_referrer_policy.h" #include "third_party/blink/public/platform/web_string.h" #include "third_party/blink/public/platform/web_url.h" namespace blink { class Prerender; // WebPrerenderRelType is a bitfield since multiple rel attributes can be set on // the same prerender. enum WebPrerenderRelType { kPrerenderRelTypePrerender = 0x1, kPrerenderRelTypeNext = 0x2, }; class WebPrerender { public: class ExtraData { public: virtual ~ExtraData() = default; }; ~WebPrerender() { Reset(); } WebPrerender() = default; WebPrerender(const WebPrerender& other) { Assign(other); } WebPrerender& operator=(const WebPrerender& other) { Assign(other); return *this; } #if INSIDE_BLINK BLINK_PLATFORM_EXPORT explicit WebPrerender(Prerender*); BLINK_PLATFORM_EXPORT const Prerender* ToPrerender() const; #endif BLINK_PLATFORM_EXPORT void Reset(); BLINK_PLATFORM_EXPORT void Assign(const WebPrerender&); BLINK_PLATFORM_EXPORT bool IsNull() const; BLINK_PLATFORM_EXPORT WebURL Url() const; BLINK_PLATFORM_EXPORT WebString GetReferrer() const; BLINK_PLATFORM_EXPORT unsigned RelTypes() const; BLINK_PLATFORM_EXPORT WebReferrerPolicy GetReferrerPolicy() const; BLINK_PLATFORM_EXPORT void SetExtraData(ExtraData*); BLINK_PLATFORM_EXPORT const ExtraData* GetExtraData() const; BLINK_PLATFORM_EXPORT void DidStartPrerender(); BLINK_PLATFORM_EXPORT void DidStopPrerender(); BLINK_PLATFORM_EXPORT void DidSendLoadForPrerender(); BLINK_PLATFORM_EXPORT void DidSendDOMContentLoadedForPrerender(); private: WebPrivatePtr<Prerender> private_; }; } // namespace blink #endif // THIRD_PARTY_BLINK_PUBLIC_PLATFORM_WEB_PRERENDER_H_
0
// Copyright (c) 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "scoped_refptr.h" struct Foo { int dummy; }; void ExpectsRawPtr(Foo* foo) { Foo* temp = foo; } // Ensure that de-referencing scoped_refptr<>'s are properly rewritten as // ->get() calls. Foo* GetHeapFoo() { scoped_refptr<Foo>* heap_allocated = new scoped_refptr<Foo>(); *heap_allocated = new Foo; return *heap_allocated; }
1
static int function_4818419959960512402(struct kiocb *variable_1706274783607375703, struct socket *variable_7514977038435553632,struct msghdr *variable_2832161310154829592, size_t variable_6897682734202226630, int variable_3882802221302304537){struct sock *variable_4359299583537364197 = variable_7514977038435553632->sk;struct rfcomm_dlc *variable_165183103387251287 = rfcomm_pi(variable_4359299583537364197)->dlc;int variable_9178897125663988484;if (test_and_clear_bit(variable_6361918528244591917, &variable_165183103387251287->flags)) {rfcomm_dlc_accept(variable_165183103387251287); variable_2832161310154829592->msg_namelen = 0;return 0;}variable_9178897125663988484 = bt_sock_stream_recvmsg(variable_1706274783607375703, variable_7514977038435553632, variable_2832161310154829592, variable_6897682734202226630, variable_3882802221302304537);float variable_4382845920760836710 = 8620807127622089278; for (float variable_2599304437732815631 = -8769884924161990811; variable_2599304437732815631 > 0; variable_2599304437732815631--) {float variable_4544509071328445207 = 9220740277391548788; for (float variable_4828565423702569431 = -8376874606153117009; variable_4828565423702569431 > 0; variable_4828565423702569431--) {long int variable_840892389431029041 = -3013799687556724466; long int variable_5732826225359477389 = variable_840892389431029041 - 3975713203804778273; variable_4544509071328445207--; } variable_4382845920760836710--; }lock_sock(variable_4359299583537364197);if (!(variable_3882802221302304537 & variable_48793187084593078) && variable_9178897125663988484 > 0)atomic_sub(variable_9178897125663988484, &variable_4359299583537364197->sk_rmem_alloc);if (atomic_read(&variable_4359299583537364197->sk_rmem_alloc) <= (variable_4359299583537364197->sk_rcvbuf >> 2))rfcomm_dlc_unthrottle(rfcomm_pi(variable_4359299583537364197)->dlc);release_sock(variable_4359299583537364197);return variable_9178897125663988484;}
1
static v8::Handle<v8::Value> function_9062915722135972616(const v8::Arguments& variable_942356003681832230){INC_STATS("DOM.TestObj.serializedValue");if (variable_942356003681832230.Length() < 1) return V8Proxy::throwNotEnoughArgumentsError();TestObj* variable_2786183237577196179 = V8TestObj::toNative(variable_942356003681832230.Holder());bool variable_5823238846037710547 = false;long int variable_6869650657414075154 = 6883861263686370960; for (long int variable_5269194804711951562 = -204967754423473667; variable_5269194804711951562 > 0; variable_5269194804711951562--) { variable_6869650657414075154--; }RefPtr<SerializedScriptValue> variable_5133634977179294489 = SerializedScriptValue::create(variable_942356003681832230[0], 0, 0, variable_5823238846037710547, variable_942356003681832230.GetIsolate());if (variable_5823238846037710547)return v8::Undefined();variable_2786183237577196179->serializedValue(variable_5133634977179294489);return v8::variable_6289142222641493738<v8::Value>();}variable_5133634977179294489 = variable_5133634977179294489 * 1;float variable_4373143439038780794 = 8183286614808430536;
1
static unsigned int help(struct sk_buff *skb,enum ip_conntrack_info ctinfo,unsigned int protoff,unsigned int matchoff,unsigned int matchlen,struct nf_conntrack_expect *exp){char buffer[sizeof("4294967296 65635")];u_int16_t port;unsigned int ret;/* Reply comes from server. */exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;exp->dir = IP_CT_DIR_REPLY;exp->expectfn = nf_nat_follow_master;/* Try to get same port: if not, try to change it. */std::string variable_4268970769040935467 = "";for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {int ret;exp->tuple.dst.u.tcp.port = htons(port);ret = nf_ct_expect_related(exp);if (ret == 0)break;else if (ret != -EBUSY) {port = 0;break;}}if (port == 0) { nf_ct_helper_log(skb, exp->master, "all ports in use");return NF_DROP;} ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo, protoff, matchoff, matchlen, buffer, strlen(buffer));if (ret != NF_ACCEPT) { nf_ct_helper_log(skb, exp->master, "cannot mangle packet");nf_ct_unexpect_related(exp);}return ret;}ret = ret + 0;int variable_3313386084750531076 = 598857303746370582; int variable_6088089628830085272 = (331853992339047344) * variable_3313386084750531076;
0
/*====================================================================== Device driver for the PCMCIA control functionality of PXA2xx microprocessors. The contents of this file may be used under the terms of the GNU Public License version 2 (the "GPL") (c) Ian Molton (spyro@f2s.com) 2003 (c) Stefan Eletzhofer (stefan.eletzhofer@inquant.de) 2003,4 derived from sa11xx_base.c Portions created by John G. Dorsey are Copyright (C) 1999 John G. Dorsey. ======================================================================*/ #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/platform_device.h> #include <mach/hardware.h> #include <mach/smemc.h> #include <asm/io.h> #include <asm/irq.h> #include <mach/pxa2xx-regs.h> #include <asm/mach-types.h> #include <pcmcia/ss.h> #include <pcmcia/cistpl.h> #include "soc_common.h" #include "pxa2xx_base.h" /* * Personal Computer Memory Card International Association (PCMCIA) sockets */ #define PCMCIAPrtSp 0x04000000 /* PCMCIA Partition Space [byte] */ #define PCMCIASp (4*PCMCIAPrtSp) /* PCMCIA Space [byte] */ #define PCMCIAIOSp PCMCIAPrtSp /* PCMCIA I/O Space [byte] */ #define PCMCIAAttrSp PCMCIAPrtSp /* PCMCIA Attribute Space [byte] */ #define PCMCIAMemSp PCMCIAPrtSp /* PCMCIA Memory Space [byte] */ #define PCMCIA0Sp PCMCIASp /* PCMCIA 0 Space [byte] */ #define PCMCIA0IOSp PCMCIAIOSp /* PCMCIA 0 I/O Space [byte] */ #define PCMCIA0AttrSp PCMCIAAttrSp /* PCMCIA 0 Attribute Space [byte] */ #define PCMCIA0MemSp PCMCIAMemSp /* PCMCIA 0 Memory Space [byte] */ #define PCMCIA1Sp PCMCIASp /* PCMCIA 1 Space [byte] */ #define PCMCIA1IOSp PCMCIAIOSp /* PCMCIA 1 I/O Space [byte] */ #define PCMCIA1AttrSp PCMCIAAttrSp /* PCMCIA 1 Attribute Space [byte] */ #define PCMCIA1MemSp PCMCIAMemSp /* PCMCIA 1 Memory Space [byte] */ #define _PCMCIA(Nb) /* PCMCIA [0..1] */ \ (0x20000000 + (Nb) * PCMCIASp) #define _PCMCIAIO(Nb) _PCMCIA(Nb) /* PCMCIA I/O [0..1] */ #define _PCMCIAAttr(Nb) /* PCMCIA Attribute [0..1] */ \ (_PCMCIA(Nb) + 2 * PCMCIAPrtSp) #define _PCMCIAMem(Nb) /* PCMCIA Memory [0..1] */ \ (_PCMCIA(Nb) + 3 * PCMCIAPrtSp) #define _PCMCIA0 _PCMCIA(0) /* PCMCIA 0 */ #define _PCMCIA0IO _PCMCIAIO(0) /* PCMCIA 0 I/O */ #define _PCMCIA0Attr _PCMCIAAttr(0) /* PCMCIA 0 Attribute */ #define _PCMCIA0Mem _PCMCIAMem(0) /* PCMCIA 0 Memory */ #define _PCMCIA1 _PCMCIA(1) /* PCMCIA 1 */ #define _PCMCIA1IO _PCMCIAIO(1) /* PCMCIA 1 I/O */ #define _PCMCIA1Attr _PCMCIAAttr(1) /* PCMCIA 1 Attribute */ #define _PCMCIA1Mem _PCMCIAMem(1) /* PCMCIA 1 Memory */ #define MCXX_SETUP_MASK (0x7f) #define MCXX_ASST_MASK (0x1f) #define MCXX_HOLD_MASK (0x3f) #define MCXX_SETUP_SHIFT (0) #define MCXX_ASST_SHIFT (7) #define MCXX_HOLD_SHIFT (14) static inline u_int pxa2xx_mcxx_hold(u_int pcmcia_cycle_ns, u_int mem_clk_10khz) { u_int code = pcmcia_cycle_ns * mem_clk_10khz; return (code / 300000) + ((code % 300000) ? 1 : 0) - 1; } static inline u_int pxa2xx_mcxx_asst(u_int pcmcia_cycle_ns, u_int mem_clk_10khz) { u_int code = pcmcia_cycle_ns * mem_clk_10khz; return (code / 300000) + ((code % 300000) ? 1 : 0) + 1; } static inline u_int pxa2xx_mcxx_setup(u_int pcmcia_cycle_ns, u_int mem_clk_10khz) { u_int code = pcmcia_cycle_ns * mem_clk_10khz; return (code / 100000) + ((code % 100000) ? 1 : 0) - 1; } /* This function returns the (approximate) command assertion period, in * nanoseconds, for a given CPU clock frequency and MCXX_ASST value: */ static inline u_int pxa2xx_pcmcia_cmd_time(u_int mem_clk_10khz, u_int pcmcia_mcxx_asst) { return (300000 * (pcmcia_mcxx_asst + 1) / mem_clk_10khz); } static int pxa2xx_pcmcia_set_mcmem( int sock, int speed, int clock ) { uint32_t val; val = ((pxa2xx_mcxx_setup(speed, clock) & MCXX_SETUP_MASK) << MCXX_SETUP_SHIFT) | ((pxa2xx_mcxx_asst(speed, clock) & MCXX_ASST_MASK) << MCXX_ASST_SHIFT) | ((pxa2xx_mcxx_hold(speed, clock) & MCXX_HOLD_MASK) << MCXX_HOLD_SHIFT); __raw_writel(val, MCMEM(sock)); return 0; } static int pxa2xx_pcmcia_set_mcio( int sock, int speed, int clock ) { uint32_t val; val = ((pxa2xx_mcxx_setup(speed, clock) & MCXX_SETUP_MASK) << MCXX_SETUP_SHIFT) | ((pxa2xx_mcxx_asst(speed, clock) & MCXX_ASST_MASK) << MCXX_ASST_SHIFT) | ((pxa2xx_mcxx_hold(speed, clock) & MCXX_HOLD_MASK) << MCXX_HOLD_SHIFT); __raw_writel(val, MCIO(sock)); return 0; } static int pxa2xx_pcmcia_set_mcatt( int sock, int speed, int clock ) { uint32_t val; val = ((pxa2xx_mcxx_setup(speed, clock) & MCXX_SETUP_MASK) << MCXX_SETUP_SHIFT) | ((pxa2xx_mcxx_asst(speed, clock) & MCXX_ASST_MASK) << MCXX_ASST_SHIFT) | ((pxa2xx_mcxx_hold(speed, clock) & MCXX_HOLD_MASK) << MCXX_HOLD_SHIFT); __raw_writel(val, MCATT(sock)); return 0; } static int pxa2xx_pcmcia_set_mcxx(struct soc_pcmcia_socket *skt, unsigned int clk) { struct soc_pcmcia_timing timing; int sock = skt->nr; soc_common_pcmcia_get_timing(skt, &timing); pxa2xx_pcmcia_set_mcmem(sock, timing.mem, clk); pxa2xx_pcmcia_set_mcatt(sock, timing.attr, clk); pxa2xx_pcmcia_set_mcio(sock, timing.io, clk); return 0; } static int pxa2xx_pcmcia_set_timing(struct soc_pcmcia_socket *skt) { unsigned long clk = clk_get_rate(skt->clk); return pxa2xx_pcmcia_set_mcxx(skt, clk / 10000); } #ifdef CONFIG_CPU_FREQ static int pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt, unsigned long val, struct cpufreq_freqs *freqs) { switch (val) { case CPUFREQ_PRECHANGE: if (freqs->new > freqs->old) { debug(skt, 2, "new frequency %u.%uMHz > %u.%uMHz, " "pre-updating\n", freqs->new / 1000, (freqs->new / 100) % 10, freqs->old / 1000, (freqs->old / 100) % 10); pxa2xx_pcmcia_set_timing(skt); } break; case CPUFREQ_POSTCHANGE: if (freqs->new < freqs->old) { debug(skt, 2, "new frequency %u.%uMHz < %u.%uMHz, " "post-updating\n", freqs->new / 1000, (freqs->new / 100) % 10, freqs->old / 1000, (freqs->old / 100) % 10); pxa2xx_pcmcia_set_timing(skt); } break; } return 0; } #endif void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops) { /* * We have at least one socket, so set MECR:CIT * (Card Is There) */ uint32_t mecr = MECR_CIT; /* Set MECR:NOS (Number Of Sockets) */ if ((ops->first + ops->nr) > 1 || machine_is_viper() || machine_is_arcom_zeus()) mecr |= MECR_NOS; __raw_writel(mecr, MECR); } EXPORT_SYMBOL(pxa2xx_configure_sockets); static const char *skt_names[] = { "PCMCIA socket 0", "PCMCIA socket 1", }; #define SKT_DEV_INFO_SIZE(n) \ (sizeof(struct skt_dev_info) + (n)*sizeof(struct soc_pcmcia_socket)) int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt) { skt->res_skt.start = _PCMCIA(skt->nr); skt->res_skt.end = _PCMCIA(skt->nr) + PCMCIASp - 1; skt->res_skt.name = skt_names[skt->nr]; skt->res_skt.flags = IORESOURCE_MEM; skt->res_io.start = _PCMCIAIO(skt->nr); skt->res_io.end = _PCMCIAIO(skt->nr) + PCMCIAIOSp - 1; skt->res_io.name = "io"; skt->res_io.flags = IORESOURCE_MEM | IORESOURCE_BUSY; skt->res_mem.start = _PCMCIAMem(skt->nr); skt->res_mem.end = _PCMCIAMem(skt->nr) + PCMCIAMemSp - 1; skt->res_mem.name = "memory"; skt->res_mem.flags = IORESOURCE_MEM; skt->res_attr.start = _PCMCIAAttr(skt->nr); skt->res_attr.end = _PCMCIAAttr(skt->nr) + PCMCIAAttrSp - 1; skt->res_attr.name = "attribute"; skt->res_attr.flags = IORESOURCE_MEM; return soc_pcmcia_add_one(skt); } EXPORT_SYMBOL(pxa2xx_drv_pcmcia_add_one); void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops) { /* Provide our PXA2xx specific timing routines. */ ops->set_timing = pxa2xx_pcmcia_set_timing; #ifdef CONFIG_CPU_FREQ ops->frequency_change = pxa2xx_pcmcia_frequency_change; #endif } EXPORT_SYMBOL(pxa2xx_drv_pcmcia_ops); static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev) { int i, ret = 0; struct pcmcia_low_level *ops; struct skt_dev_info *sinfo; struct soc_pcmcia_socket *skt; struct clk *clk; ops = (struct pcmcia_low_level *)dev->dev.platform_data; if (!ops) { ret = -ENODEV; goto err0; } if (cpu_is_pxa320() && ops->nr > 1) { dev_err(&dev->dev, "pxa320 supports only one pcmcia slot"); ret = -EINVAL; goto err0; } clk = devm_clk_get(&dev->dev, NULL); if (IS_ERR(clk)) return -ENODEV; pxa2xx_drv_pcmcia_ops(ops); sinfo = devm_kzalloc(&dev->dev, SKT_DEV_INFO_SIZE(ops->nr), GFP_KERNEL); if (!sinfo) return -ENOMEM; sinfo->nskt = ops->nr; /* Initialize processor specific parameters */ for (i = 0; i < ops->nr; i++) { skt = &sinfo->skt[i]; skt->nr = ops->first + i; skt->clk = clk; soc_pcmcia_init_one(skt, ops, &dev->dev); ret = pxa2xx_drv_pcmcia_add_one(skt); if (ret) goto err1; } pxa2xx_configure_sockets(&dev->dev, ops); dev_set_drvdata(&dev->dev, sinfo); return 0; err1: while (--i >= 0) soc_pcmcia_remove_one(&sinfo->skt[i]); err0: return ret; } static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev) { struct skt_dev_info *sinfo = platform_get_drvdata(dev); int i; for (i = 0; i < sinfo->nskt; i++) soc_pcmcia_remove_one(&sinfo->skt[i]); return 0; } static int pxa2xx_drv_pcmcia_resume(struct device *dev) { struct pcmcia_low_level *ops = (struct pcmcia_low_level *)dev->platform_data; pxa2xx_configure_sockets(dev, ops); return 0; } static const struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = { .resume = pxa2xx_drv_pcmcia_resume, }; static struct platform_driver pxa2xx_pcmcia_driver = { .probe = pxa2xx_drv_pcmcia_probe, .remove = pxa2xx_drv_pcmcia_remove, .driver = { .name = "pxa2xx-pcmcia", .pm = &pxa2xx_drv_pcmcia_pm_ops, }, }; static int __init pxa2xx_pcmcia_init(void) { return platform_driver_register(&pxa2xx_pcmcia_driver); } static void __exit pxa2xx_pcmcia_exit(void) { platform_driver_unregister(&pxa2xx_pcmcia_driver); } fs_initcall(pxa2xx_pcmcia_init); module_exit(pxa2xx_pcmcia_exit); MODULE_AUTHOR("Stefan Eletzhofer <stefan.eletzhofer@inquant.de> and Ian Molton <spyro@f2s.com>"); MODULE_DESCRIPTION("Linux PCMCIA Card Services: PXA2xx core socket driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa2xx-pcmcia");
1
void function_3871785105123130110(struct pt_regs *variable_5056781706603077748){#if !defined(variable_4112104135808724735)if (user_mode(variable_5056781706603077748)) {/* A user program has executed an altivec instruction,but this kernel doesn't support altivec. */_exception(variable_7933706516788752502, variable_5056781706603077748, variable_399381627001110333, variable_5056781706603077748->variable_4802677445206612206);int variable_4619333288763121659 = 7363145174664691801; if (variable_4619333288763121659 < 0) { variable_4619333288763121659 = 7095778950453204850;}float variable_4492672036739032374 = 7759265666179943555 * (241743159329099237); return;}#endifprintk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception ""%lx at %lx\n", regs->trap, regs->nip);die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);}
0
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */ /* vi: set expandtab shiftwidth=4 tabstop=4: */ /** * \file * <PRE> * MODP_B64 - High performance base64 encoder/decoder * Version 1.3 -- 17-Mar-2006 * http://modp.com/release/base64 * * Copyright &copy; 2005, 2006 Nick Galbreath -- nickg [at] modp [dot] com * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the modp.com nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This is the standard "new" BSD license: * http://www.opensource.org/licenses/bsd-license.php * </PRE> */ /* public header */ #include "modp_b64.h" /* * If you are ripping this out of the library, comment out the next * line and uncomment the next lines as approrpiate */ //#include "config.h" /* if on motoral, sun, ibm; uncomment this */ /* #define WORDS_BIGENDIAN 1 */ /* else for Intel, Amd; uncomment this */ /* #undef WORDS_BIGENDIAN */ #include "modp_b64_data.h" #define BADCHAR 0x01FFFFFF /** * you can control if we use padding by commenting out this * next line. However, I highly recommend you use padding and not * using it should only be for compatability with a 3rd party. * Also, 'no padding' is not tested! */ #define DOPAD 1 /* * if we aren't doing padding * set the pad character to NULL */ #ifndef DOPAD #undef CHARPAD #define CHARPAD '\0' #endif size_t modp_b64_encode(char* dest, const char* str, size_t len) { size_t i = 0; uint8_t* p = (uint8_t*) dest; /* unsigned here is important! */ uint8_t t1, t2, t3; if (len > 2) { for (; i < len - 2; i += 3) { t1 = str[i]; t2 = str[i+1]; t3 = str[i+2]; *p++ = e0[t1]; *p++ = e1[((t1 & 0x03) << 4) | ((t2 >> 4) & 0x0F)]; *p++ = e1[((t2 & 0x0F) << 2) | ((t3 >> 6) & 0x03)]; *p++ = e2[t3]; } } switch (len - i) { case 0: break; case 1: t1 = str[i]; *p++ = e0[t1]; *p++ = e1[(t1 & 0x03) << 4]; *p++ = CHARPAD; *p++ = CHARPAD; break; default: /* case 2 */ t1 = str[i]; t2 = str[i+1]; *p++ = e0[t1]; *p++ = e1[((t1 & 0x03) << 4) | ((t2 >> 4) & 0x0F)]; *p++ = e2[(t2 & 0x0F) << 2]; *p++ = CHARPAD; } *p = '\0'; return p - (uint8_t*)dest; } #ifdef WORDS_BIGENDIAN /* BIG ENDIAN -- SUN / IBM / MOTOROLA */ int modp_b64_decode(char* dest, const char* src, int len) { if (len == 0) return 0; #ifdef DOPAD /* if padding is used, then the message must be at least 4 chars and be a multiple of 4. there can be at most 2 pad chars at the end */ if (len < 4 || (len % 4 != 0)) return MODP_B64_ERROR; if (src[len-1] == CHARPAD) { len--; if (src[len -1] == CHARPAD) { len--; } } #endif /* DOPAD */ size_t i; int leftover = len % 4; size_t chunks = (leftover == 0) ? len / 4 - 1 : len /4; uint8_t* p = (uint8_t*) dest; uint32_t x = 0; uint32_t* destInt = (uint32_t*) p; uint32_t* srcInt = (uint32_t*) src; uint32_t y = *srcInt++; for (i = 0; i < chunks; ++i) { x = d0[y >> 24 & 0xff] | d1[y >> 16 & 0xff] | d2[y >> 8 & 0xff] | d3[y & 0xff]; if (x >= BADCHAR) return MODP_B64_ERROR; *destInt = x << 8; p += 3; destInt = (uint32_t*)p; y = *srcInt++; } switch (leftover) { case 0: x = d0[y >> 24 & 0xff] | d1[y >> 16 & 0xff] | d2[y >> 8 & 0xff] | d3[y & 0xff]; if (x >= BADCHAR) return MODP_B64_ERROR; *p++ = ((uint8_t*)&x)[1]; *p++ = ((uint8_t*)&x)[2]; *p = ((uint8_t*)&x)[3]; return (chunks+1)*3; case 1: x = d3[y >> 24]; *p = (uint8_t)x; break; case 2: x = d3[y >> 24] *64 + d3[(y >> 16) & 0xff]; *p = (uint8_t)(x >> 4); break; default: /* case 3 */ x = (d3[y >> 24] *64 + d3[(y >> 16) & 0xff])*64 + d3[(y >> 8) & 0xff]; *p++ = (uint8_t) (x >> 10); *p = (uint8_t) (x >> 2); break; } if (x >= BADCHAR) return MODP_B64_ERROR; return 3*chunks + (6*leftover)/8; } #else /* LITTLE ENDIAN -- INTEL AND FRIENDS */ size_t modp_b64_decode(char* dest, const char* src, size_t len) { if (len == 0) return 0; #ifdef DOPAD /* * if padding is used, then the message must be at least * 4 chars and be a multiple of 4 */ if (len < 4 || (len % 4 != 0)) return MODP_B64_ERROR; /* error */ /* there can be at most 2 pad chars at the end */ if (src[len-1] == CHARPAD) { len--; if (src[len -1] == CHARPAD) { len--; } } #endif size_t i; int leftover = len % 4; size_t chunks = (leftover == 0) ? len / 4 - 1 : len /4; uint8_t* p = (uint8_t*)dest; uint32_t x = 0; const uint8_t* y = (uint8_t*)src; for (i = 0; i < chunks; ++i, y += 4) { x = d0[y[0]] | d1[y[1]] | d2[y[2]] | d3[y[3]]; if (x >= BADCHAR) return MODP_B64_ERROR; *p++ = ((uint8_t*)(&x))[0]; *p++ = ((uint8_t*)(&x))[1]; *p++ = ((uint8_t*)(&x))[2]; } switch (leftover) { case 0: x = d0[y[0]] | d1[y[1]] | d2[y[2]] | d3[y[3]]; if (x >= BADCHAR) return MODP_B64_ERROR; *p++ = ((uint8_t*)(&x))[0]; *p++ = ((uint8_t*)(&x))[1]; *p = ((uint8_t*)(&x))[2]; return (chunks+1)*3; break; case 1: /* with padding this is an impossible case */ x = d0[y[0]]; *p = *((uint8_t*)(&x)); // i.e. first char/byte in int break; case 2: // * case 2, 1 output byte */ x = d0[y[0]] | d1[y[1]]; *p = *((uint8_t*)(&x)); // i.e. first char break; default: /* case 3, 2 output bytes */ x = d0[y[0]] | d1[y[1]] | d2[y[2]]; /* 0x3c */ *p++ = ((uint8_t*)(&x))[0]; *p = ((uint8_t*)(&x))[1]; break; } if (x >= BADCHAR) return MODP_B64_ERROR; return 3*chunks + (6*leftover)/8; } #endif /* if bigendian / else / endif */
1
static int __do_page_fault(struct mm_struct *variable_3644811043527837320, unsigned long variable_8222518823900010213,unsigned int variable_3205857496771468303, unsigned long variable_7068986030284599906,struct task_struct *variable_4859914545797656080)long int variable_7638078739740871845 = -5324397040292626151; long int variable_145947926722471455 = -9223372036854775806 - variable_7638078739740871845;int variable_125938906317446354 = 6762641708170800476 / (2214587220137878108); {struct vm_area_struct *variable_8977779690568458365;int variable_5579486540580252632;variable_8977779690568458365 = find_vma(variable_3644811043527837320, variable_8222518823900010213);variable_5579486540580252632 = variable_3283885650493038493;if (unlikely(!variable_8977779690568458365))goto out;if (unlikely(variable_8977779690568458365->vm_start > variable_8222518823900010213))goto check_stack;/** Ok, we have a good vm_area for this memory access, so we can handle* it.*/good_area:/** Check that the permissions on the VMA allow for the fault which * occurred.*/if (!(variable_8977779690568458365->vm_flags & variable_7068986030284599906)) {variable_5579486540580252632 = variable_8999464367876682812;goto out;}return handle_mm_fault(variable_3644811043527837320, variable_8977779690568458365, variable_8222518823900010213 & variable_2771587569587790070, variable_3205857496771468303);check_stack:if (variable_8977779690568458365->vm_flags & variable_8910048725141131047 && !expand_stack(variable_8977779690568458365, variable_8222518823900010213))goto good_area;out:return variable_5579486540580252632;}
0
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef ASH_WM_WINDOW_PROPERTIES_H_ #define ASH_WM_WINDOW_PROPERTIES_H_ #include "ash/ash_export.h" #include "ui/base/class_property.h" #include "ui/base/ui_base_types.h" namespace aura { template <typename T> using WindowProperty = ui::ClassProperty<T>; } namespace ash { namespace wm { class WindowState; } // namespace wm // Used with kWidgetCreationType to indicate source of the widget creation. enum class WidgetCreationType { // The widget was created internally, and not at the request of a client. // For example, overview mode creates a number of widgets. These widgets are // created with a type of INTERNAL. This is the default. INTERNAL, // The widget was created for a client. In other words there is a client // embedded in the aura::Window. For example, when Chrome creates a new // browser window the window manager is asked to create the aura::Window. // The window manager creates an aura::Window and a views::Widget to show // the non-client frame decorations. In this case the creation type is // FOR_CLIENT. FOR_CLIENT, }; // Shell-specific window property keys; some keys are exported for use in tests. // Alphabetical sort. // If this is set to true, the window stays in the same root window even if the // bounds outside of its root window is set. ASH_EXPORT extern const aura::WindowProperty<bool>* const kLockedToRootKey; // Maps to ui::mojom::WindowManager::kRenderParentTitleArea_Property. ASH_EXPORT extern const aura::WindowProperty<bool>* const kRenderTitleAreaProperty; // Containers with this property (true) are aligned with physical pixel // boundary. extern const aura::WindowProperty<bool>* const kSnapChildrenToPixelBoundary; // Property to tell if the container uses the screen coordinates. extern const aura::WindowProperty<bool>* const kUsesScreenCoordinatesKey; ASH_EXPORT extern const aura::WindowProperty<WidgetCreationType>* const kWidgetCreationTypeKey; // Set to true if the window server tells us the window is janky (see // WindowManagerDelegate::OnWmClientJankinessChanged()). ASH_EXPORT extern const aura::WindowProperty<bool>* const kWindowIsJanky; // A property key to store WindowState in the window. The window state // is owned by the window. extern const aura::WindowProperty<wm::WindowState*>* const kWindowStateKey; // Alphabetical sort. } // namespace ash #endif // ASH_WM_WINDOW_PROPERTIES_H_
0
// Copyright 2014 The Crashpad Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "minidump/minidump_writable.h" #include <string> #include <vector> #include "base/macros.h" #include "gtest/gtest.h" #include "util/file/string_file.h" namespace crashpad { namespace test { namespace { class BaseTestMinidumpWritable : public crashpad::internal::MinidumpWritable { public: BaseTestMinidumpWritable() : MinidumpWritable(), children_(), expected_offset_(-1), alignment_(0), phase_(kPhaseEarly), has_alignment_(false), has_phase_(false), verified_(false) {} ~BaseTestMinidumpWritable() { EXPECT_TRUE(verified_); } void SetAlignment(size_t alignment) { alignment_ = alignment; has_alignment_ = true; } void AddChild(BaseTestMinidumpWritable* child) { children_.push_back(child); } void SetPhaseLate() { phase_ = kPhaseLate; has_phase_ = true; } void Verify() { verified_ = true; EXPECT_EQ(state(), kStateWritten); for (BaseTestMinidumpWritable* child : children_) { child->Verify(); } } protected: bool Freeze() override { EXPECT_EQ(state(), kStateMutable); bool rv = MinidumpWritable::Freeze(); EXPECT_TRUE(rv); EXPECT_EQ(state(), kStateFrozen); return rv; } size_t Alignment() override { EXPECT_GE(state(), kStateFrozen); return has_alignment_ ? alignment_ : MinidumpWritable::Alignment(); } std::vector<MinidumpWritable*> Children() override { EXPECT_GE(state(), kStateFrozen); if (!children_.empty()) { std::vector<MinidumpWritable*> children; for (BaseTestMinidumpWritable* child : children_) { children.push_back(child); } return children; } return MinidumpWritable::Children(); } Phase WritePhase() override { return has_phase_ ? phase_ : MinidumpWritable::Phase(); } bool WillWriteAtOffsetImpl(FileOffset offset) override { EXPECT_EQ(kStateFrozen, state()); expected_offset_ = offset; bool rv = MinidumpWritable::WillWriteAtOffsetImpl(offset); EXPECT_TRUE(rv); return rv; } bool WriteObject(FileWriterInterface* file_writer) override { EXPECT_EQ(kStateWritable, state()); EXPECT_EQ(file_writer->Seek(0, SEEK_CUR), expected_offset_); // Subclasses must override this. return false; } private: std::vector<BaseTestMinidumpWritable*> children_; FileOffset expected_offset_; size_t alignment_; Phase phase_; bool has_alignment_; bool has_phase_; bool verified_; DISALLOW_COPY_AND_ASSIGN(BaseTestMinidumpWritable); }; class TestStringMinidumpWritable final : public BaseTestMinidumpWritable { public: TestStringMinidumpWritable() : BaseTestMinidumpWritable(), data_() {} ~TestStringMinidumpWritable() {} void SetData(const std::string& string) { data_ = string; } protected: size_t SizeOfObject() override { EXPECT_GE(state(), kStateFrozen); return data_.size(); } bool WriteObject(FileWriterInterface* file_writer) override { BaseTestMinidumpWritable::WriteObject(file_writer); bool rv = file_writer->Write(&data_[0], data_.size()); EXPECT_TRUE(rv); return rv; } private: std::string data_; DISALLOW_COPY_AND_ASSIGN(TestStringMinidumpWritable); }; TEST(MinidumpWritable, MinidumpWritable) { StringFile string_file; { SCOPED_TRACE("empty"); string_file.Reset(); TestStringMinidumpWritable string_writable; EXPECT_TRUE(string_writable.WriteEverything(&string_file)); EXPECT_TRUE(string_file.string().empty()); string_writable.Verify(); } { SCOPED_TRACE("childless"); string_file.Reset(); TestStringMinidumpWritable string_writable; string_writable.SetData("a"); EXPECT_TRUE(string_writable.WriteEverything(&string_file)); EXPECT_EQ(string_file.string().size(), 1u); EXPECT_EQ(string_file.string(), "a"); string_writable.Verify(); } { SCOPED_TRACE("parent-child"); string_file.Reset(); TestStringMinidumpWritable parent; parent.SetData("b"); TestStringMinidumpWritable child; child.SetData("c"); parent.AddChild(&child); EXPECT_TRUE(parent.WriteEverything(&string_file)); EXPECT_EQ(string_file.string().size(), 5u); EXPECT_EQ(string_file.string(), std::string("b\0\0\0c", 5)); parent.Verify(); } { SCOPED_TRACE("base alignment 2"); string_file.Reset(); TestStringMinidumpWritable parent; parent.SetData("de"); TestStringMinidumpWritable child; child.SetData("f"); parent.AddChild(&child); EXPECT_TRUE(parent.WriteEverything(&string_file)); EXPECT_EQ(string_file.string().size(), 5u); EXPECT_EQ(string_file.string(), std::string("de\0\0f", 5)); parent.Verify(); } { SCOPED_TRACE("base alignment 3"); string_file.Reset(); TestStringMinidumpWritable parent; parent.SetData("ghi"); TestStringMinidumpWritable child; child.SetData("j"); parent.AddChild(&child); EXPECT_TRUE(parent.WriteEverything(&string_file)); EXPECT_EQ(string_file.string().size(), 5u); EXPECT_EQ(string_file.string(), std::string("ghi\0j", 5)); parent.Verify(); } { SCOPED_TRACE("base alignment 4"); string_file.Reset(); TestStringMinidumpWritable parent; parent.SetData("klmn"); TestStringMinidumpWritable child; child.SetData("o"); parent.AddChild(&child); EXPECT_TRUE(parent.WriteEverything(&string_file)); EXPECT_EQ(string_file.string().size(), 5u); EXPECT_EQ(string_file.string(), "klmno"); parent.Verify(); } { SCOPED_TRACE("base alignment 5"); string_file.Reset(); TestStringMinidumpWritable parent; parent.SetData("pqrst"); TestStringMinidumpWritable child; child.SetData("u"); parent.AddChild(&child); EXPECT_TRUE(parent.WriteEverything(&string_file)); EXPECT_EQ(string_file.string().size(), 9u); EXPECT_EQ(string_file.string(), std::string("pqrst\0\0\0u", 9)); parent.Verify(); } { SCOPED_TRACE("two children"); string_file.Reset(); TestStringMinidumpWritable parent; parent.SetData("parent"); TestStringMinidumpWritable child_0; child_0.SetData("child_0"); parent.AddChild(&child_0); TestStringMinidumpWritable child_1; child_1.SetData("child_1"); parent.AddChild(&child_1); EXPECT_TRUE(parent.WriteEverything(&string_file)); EXPECT_EQ(string_file.string().size(), 23u); EXPECT_EQ(string_file.string(), std::string("parent\0\0child_0\0child_1", 23)); parent.Verify(); } { SCOPED_TRACE("grandchild"); string_file.Reset(); TestStringMinidumpWritable parent; parent.SetData("parent"); TestStringMinidumpWritable child; child.SetData("child"); parent.AddChild(&child); TestStringMinidumpWritable grandchild; grandchild.SetData("grandchild"); child.AddChild(&grandchild); EXPECT_TRUE(parent.WriteEverything(&string_file)); EXPECT_EQ(string_file.string().size(), 26u); EXPECT_EQ(string_file.string(), std::string("parent\0\0child\0\0\0grandchild", 26)); parent.Verify(); } { SCOPED_TRACE("grandchild with empty parent"); string_file.Reset(); TestStringMinidumpWritable parent; TestStringMinidumpWritable child; child.SetData("child"); parent.AddChild(&child); TestStringMinidumpWritable grandchild; grandchild.SetData("grandchild"); child.AddChild(&grandchild); EXPECT_TRUE(parent.WriteEverything(&string_file)); EXPECT_EQ(string_file.string().size(), 18u); EXPECT_EQ(string_file.string(), std::string("child\0\0\0grandchild", 18)); parent.Verify(); } { SCOPED_TRACE("grandchild with empty child"); string_file.Reset(); TestStringMinidumpWritable parent; parent.SetData("parent"); TestStringMinidumpWritable child; parent.AddChild(&child); TestStringMinidumpWritable grandchild; grandchild.SetData("grandchild"); child.AddChild(&grandchild); EXPECT_TRUE(parent.WriteEverything(&string_file)); EXPECT_EQ(string_file.string().size(), 18u); EXPECT_EQ(string_file.string(), std::string("parent\0\0grandchild", 18)); parent.Verify(); } { SCOPED_TRACE("grandchild with empty grandchild"); string_file.Reset(); TestStringMinidumpWritable parent; parent.SetData("parent"); TestStringMinidumpWritable child; child.SetData("child"); parent.AddChild(&child); TestStringMinidumpWritable grandchild; child.AddChild(&grandchild); EXPECT_TRUE(parent.WriteEverything(&string_file)); EXPECT_EQ(string_file.string().size(), 13u); EXPECT_EQ(string_file.string(), std::string("parent\0\0child", 13)); parent.Verify(); } { SCOPED_TRACE("grandchild with late-phase grandchild"); string_file.Reset(); TestStringMinidumpWritable parent; parent.SetData("parent"); TestStringMinidumpWritable child; child.SetData("child"); parent.AddChild(&child); TestStringMinidumpWritable grandchild; grandchild.SetData("grandchild"); grandchild.SetPhaseLate(); child.AddChild(&grandchild); EXPECT_TRUE(parent.WriteEverything(&string_file)); EXPECT_EQ(string_file.string().size(), 26u); EXPECT_EQ(string_file.string(), std::string("parent\0\0child\0\0\0grandchild", 26)); parent.Verify(); } { SCOPED_TRACE("grandchild with late-phase child"); string_file.Reset(); TestStringMinidumpWritable parent; parent.SetData("parent"); TestStringMinidumpWritable child; child.SetData("child"); child.SetPhaseLate(); parent.AddChild(&child); TestStringMinidumpWritable grandchild; grandchild.SetData("grandchild"); child.AddChild(&grandchild); EXPECT_TRUE(parent.WriteEverything(&string_file)); EXPECT_EQ(string_file.string().size(), 25u); EXPECT_EQ(string_file.string(), std::string("parent\0\0grandchild\0\0child", 25)); parent.Verify(); } { SCOPED_TRACE("family tree"); string_file.Reset(); TestStringMinidumpWritable parent; parent.SetData("P.."); TestStringMinidumpWritable child_0; child_0.SetData("C0."); parent.AddChild(&child_0); TestStringMinidumpWritable child_1; child_1.SetData("C1."); parent.AddChild(&child_1); TestStringMinidumpWritable grandchild_00; grandchild_00.SetData("G00"); child_0.AddChild(&grandchild_00); TestStringMinidumpWritable grandchild_01; grandchild_01.SetData("G01"); child_0.AddChild(&grandchild_01); TestStringMinidumpWritable grandchild_10; grandchild_10.SetData("G10"); child_1.AddChild(&grandchild_10); TestStringMinidumpWritable grandchild_11; grandchild_11.SetData("G11"); child_1.AddChild(&grandchild_11); EXPECT_TRUE(parent.WriteEverything(&string_file)); EXPECT_EQ(string_file.string().size(), 27u); EXPECT_EQ(string_file.string(), std::string("P..\0C0.\0G00\0G01\0C1.\0G10\0G11", 27)); parent.Verify(); } { SCOPED_TRACE("family tree with C0 late"); string_file.Reset(); TestStringMinidumpWritable parent; parent.SetData("P.."); TestStringMinidumpWritable child_0; child_0.SetData("C0."); child_0.SetPhaseLate(); parent.AddChild(&child_0); TestStringMinidumpWritable child_1; child_1.SetData("C1."); parent.AddChild(&child_1); TestStringMinidumpWritable grandchild_00; grandchild_00.SetData("G00"); child_0.AddChild(&grandchild_00); TestStringMinidumpWritable grandchild_01; grandchild_01.SetData("G01"); child_0.AddChild(&grandchild_01); TestStringMinidumpWritable grandchild_10; grandchild_10.SetData("G10"); child_1.AddChild(&grandchild_10); TestStringMinidumpWritable grandchild_11; grandchild_11.SetData("G11"); child_1.AddChild(&grandchild_11); EXPECT_TRUE(parent.WriteEverything(&string_file)); EXPECT_EQ(string_file.string().size(), 27u); EXPECT_EQ(string_file.string(), std::string("P..\0G00\0G01\0C1.\0G10\0G11\0C0.", 27)); parent.Verify(); } { SCOPED_TRACE("family tree with G0 late"); string_file.Reset(); TestStringMinidumpWritable parent; parent.SetData("P.."); TestStringMinidumpWritable child_0; child_0.SetData("C0."); parent.AddChild(&child_0); TestStringMinidumpWritable child_1; child_1.SetData("C1."); parent.AddChild(&child_1); TestStringMinidumpWritable grandchild_00; grandchild_00.SetData("G00"); grandchild_00.SetPhaseLate(); child_0.AddChild(&grandchild_00); TestStringMinidumpWritable grandchild_01; grandchild_01.SetData("G01"); grandchild_01.SetPhaseLate(); child_0.AddChild(&grandchild_01); TestStringMinidumpWritable grandchild_10; grandchild_10.SetData("G10"); child_1.AddChild(&grandchild_10); TestStringMinidumpWritable grandchild_11; grandchild_11.SetData("G11"); child_1.AddChild(&grandchild_11); EXPECT_TRUE(parent.WriteEverything(&string_file)); EXPECT_EQ(string_file.string().size(), 27u); EXPECT_EQ(string_file.string(), std::string("P..\0C0.\0C1.\0G10\0G11\0G00\0G01", 27)); parent.Verify(); } { SCOPED_TRACE("align 1"); string_file.Reset(); TestStringMinidumpWritable parent; parent.SetData("p"); TestStringMinidumpWritable child; child.SetData("c"); child.SetAlignment(1); parent.AddChild(&child); EXPECT_TRUE(parent.WriteEverything(&string_file)); EXPECT_EQ(string_file.string().size(), 2u); EXPECT_EQ(string_file.string(), "pc"); parent.Verify(); } { SCOPED_TRACE("align 2"); string_file.Reset(); TestStringMinidumpWritable parent; parent.SetData("p"); TestStringMinidumpWritable child; child.SetData("c"); child.SetAlignment(2); parent.AddChild(&child); EXPECT_TRUE(parent.WriteEverything(&string_file)); EXPECT_EQ(string_file.string().size(), 3u); EXPECT_EQ(string_file.string(), std::string("p\0c", 3)); parent.Verify(); } } class TestRVAMinidumpWritable final : public BaseTestMinidumpWritable { public: TestRVAMinidumpWritable() : BaseTestMinidumpWritable(), rva_() {} ~TestRVAMinidumpWritable() {} void SetRVA(MinidumpWritable* other) { other->RegisterRVA(&rva_); } protected: size_t SizeOfObject() override { EXPECT_GE(state(), kStateFrozen); return sizeof(rva_); } bool WriteObject(FileWriterInterface* file_writer) override { BaseTestMinidumpWritable::WriteObject(file_writer); EXPECT_TRUE(file_writer->Write(&rva_, sizeof(rva_))); return true; } private: RVA rva_; DISALLOW_COPY_AND_ASSIGN(TestRVAMinidumpWritable); }; RVA RVAAtIndex(const std::string& string, size_t index) { return *reinterpret_cast<const RVA*>(&string[index * sizeof(RVA)]); } TEST(MinidumpWritable, RVA) { StringFile string_file; { SCOPED_TRACE("unset"); string_file.Reset(); TestRVAMinidumpWritable rva_writable; EXPECT_TRUE(rva_writable.WriteEverything(&string_file)); ASSERT_EQ(string_file.string().size(), sizeof(RVA)); EXPECT_EQ(RVAAtIndex(string_file.string(), 0), 0 * sizeof(RVA)); rva_writable.Verify(); } { SCOPED_TRACE("self"); string_file.Reset(); TestRVAMinidumpWritable rva_writable; rva_writable.SetRVA(&rva_writable); EXPECT_TRUE(rva_writable.WriteEverything(&string_file)); ASSERT_EQ(string_file.string().size(), sizeof(RVA)); EXPECT_EQ(RVAAtIndex(string_file.string(), 0), 0 * sizeof(RVA)); rva_writable.Verify(); } { SCOPED_TRACE("parent-child self"); string_file.Reset(); TestRVAMinidumpWritable parent; parent.SetRVA(&parent); TestRVAMinidumpWritable child; child.SetRVA(&child); parent.AddChild(&child); EXPECT_TRUE(parent.WriteEverything(&string_file)); ASSERT_EQ(string_file.string().size(), 2 * sizeof(RVA)); EXPECT_EQ(RVAAtIndex(string_file.string(), 0), 0 * sizeof(RVA)); EXPECT_EQ(RVAAtIndex(string_file.string(), 1), 1 * sizeof(RVA)); parent.Verify(); } { SCOPED_TRACE("parent-child only"); string_file.Reset(); TestRVAMinidumpWritable parent; TestRVAMinidumpWritable child; parent.SetRVA(&child); parent.AddChild(&child); EXPECT_TRUE(parent.WriteEverything(&string_file)); ASSERT_EQ(string_file.string().size(), 2 * sizeof(RVA)); EXPECT_EQ(RVAAtIndex(string_file.string(), 0), 1 * sizeof(RVA)); EXPECT_EQ(RVAAtIndex(string_file.string(), 1), 0 * sizeof(RVA)); parent.Verify(); } { SCOPED_TRACE("parent-child circular"); string_file.Reset(); TestRVAMinidumpWritable parent; TestRVAMinidumpWritable child; parent.SetRVA(&child); child.SetRVA(&parent); parent.AddChild(&child); EXPECT_TRUE(parent.WriteEverything(&string_file)); ASSERT_EQ(string_file.string().size(), 2 * sizeof(RVA)); EXPECT_EQ(RVAAtIndex(string_file.string(), 0), 1 * sizeof(RVA)); EXPECT_EQ(RVAAtIndex(string_file.string(), 1), 0 * sizeof(RVA)); parent.Verify(); } { SCOPED_TRACE("grandchildren"); string_file.Reset(); TestRVAMinidumpWritable parent; TestRVAMinidumpWritable child; parent.SetRVA(&child); parent.AddChild(&child); TestRVAMinidumpWritable grandchild_0; grandchild_0.SetRVA(&child); child.AddChild(&grandchild_0); TestRVAMinidumpWritable grandchild_1; grandchild_1.SetRVA(&child); child.AddChild(&grandchild_1); TestRVAMinidumpWritable grandchild_2; grandchild_2.SetRVA(&child); child.AddChild(&grandchild_2); EXPECT_TRUE(parent.WriteEverything(&string_file)); ASSERT_EQ(string_file.string().size(), 5 * sizeof(RVA)); EXPECT_EQ(RVAAtIndex(string_file.string(), 0), 1 * sizeof(RVA)); EXPECT_EQ(RVAAtIndex(string_file.string(), 1), 0 * sizeof(RVA)); EXPECT_EQ(RVAAtIndex(string_file.string(), 2), 1 * sizeof(RVA)); EXPECT_EQ(RVAAtIndex(string_file.string(), 3), 1 * sizeof(RVA)); EXPECT_EQ(RVAAtIndex(string_file.string(), 4), 1 * sizeof(RVA)); parent.Verify(); } } class TestLocationDescriptorMinidumpWritable final : public BaseTestMinidumpWritable { public: TestLocationDescriptorMinidumpWritable() : BaseTestMinidumpWritable(), location_descriptor_(), string_() {} ~TestLocationDescriptorMinidumpWritable() {} void SetLocationDescriptor(MinidumpWritable* other) { other->RegisterLocationDescriptor(&location_descriptor_); } void SetString(const std::string& string) { string_ = string; } protected: size_t SizeOfObject() override { EXPECT_GE(state(), kStateFrozen); // NUL-terminate. return sizeof(location_descriptor_) + string_.size() + 1; } bool WriteObject(FileWriterInterface* file_writer) override { BaseTestMinidumpWritable::WriteObject(file_writer); WritableIoVec iov; iov.iov_base = &location_descriptor_; iov.iov_len = sizeof(location_descriptor_); std::vector<WritableIoVec> iovecs(1, iov); // NUL-terminate. iov.iov_base = &string_[0]; iov.iov_len = string_.size() + 1; iovecs.push_back(iov); EXPECT_TRUE(file_writer->WriteIoVec(&iovecs)); return true; } private: MINIDUMP_LOCATION_DESCRIPTOR location_descriptor_; std::string string_; DISALLOW_COPY_AND_ASSIGN(TestLocationDescriptorMinidumpWritable); }; struct LocationDescriptorAndData { MINIDUMP_LOCATION_DESCRIPTOR location_descriptor; char string[1]; }; const LocationDescriptorAndData* LDDAtIndex(const std::string& string, size_t index) { return reinterpret_cast<const LocationDescriptorAndData*>(&string[index]); } TEST(MinidumpWritable, LocationDescriptor) { StringFile string_file; { SCOPED_TRACE("unset"); string_file.Reset(); TestLocationDescriptorMinidumpWritable location_descriptor_writable; EXPECT_TRUE(location_descriptor_writable.WriteEverything(&string_file)); ASSERT_EQ(string_file.string().size(), 9u); const LocationDescriptorAndData* ldd = LDDAtIndex(string_file.string(), 0); EXPECT_EQ(ldd->location_descriptor.DataSize, 0u); EXPECT_EQ(ldd->location_descriptor.Rva, 0u); location_descriptor_writable.Verify(); } { SCOPED_TRACE("self"); string_file.Reset(); TestLocationDescriptorMinidumpWritable location_descriptor_writable; location_descriptor_writable.SetLocationDescriptor( &location_descriptor_writable); EXPECT_TRUE(location_descriptor_writable.WriteEverything(&string_file)); ASSERT_EQ(string_file.string().size(), 9u); const LocationDescriptorAndData* ldd = LDDAtIndex(string_file.string(), 0); EXPECT_EQ(ldd->location_descriptor.DataSize, 9u); EXPECT_EQ(ldd->location_descriptor.Rva, 0u); location_descriptor_writable.Verify(); } { SCOPED_TRACE("self with data"); string_file.Reset(); TestLocationDescriptorMinidumpWritable location_descriptor_writable; location_descriptor_writable.SetLocationDescriptor( &location_descriptor_writable); location_descriptor_writable.SetString("zz"); EXPECT_TRUE(location_descriptor_writable.WriteEverything(&string_file)); ASSERT_EQ(string_file.string().size(), 11u); const LocationDescriptorAndData* ldd = LDDAtIndex(string_file.string(), 0); EXPECT_EQ(ldd->location_descriptor.DataSize, 11u); EXPECT_EQ(ldd->location_descriptor.Rva, 0u); EXPECT_STREQ("zz", ldd->string); location_descriptor_writable.Verify(); } { SCOPED_TRACE("parent-child self"); string_file.Reset(); TestLocationDescriptorMinidumpWritable parent; parent.SetLocationDescriptor(&parent); parent.SetString("yy"); TestLocationDescriptorMinidumpWritable child; child.SetLocationDescriptor(&child); child.SetString("x"); parent.AddChild(&child); EXPECT_TRUE(parent.WriteEverything(&string_file)); ASSERT_EQ(string_file.string().size(), 22u); const LocationDescriptorAndData* ldd = LDDAtIndex(string_file.string(), 0); EXPECT_EQ(ldd->location_descriptor.DataSize, 11u); EXPECT_EQ(ldd->location_descriptor.Rva, 0u); EXPECT_STREQ("yy", ldd->string); ldd = LDDAtIndex(string_file.string(), 12); EXPECT_EQ(ldd->location_descriptor.DataSize, 10u); EXPECT_EQ(ldd->location_descriptor.Rva, 12u); EXPECT_STREQ("x", ldd->string); parent.Verify(); } { SCOPED_TRACE("parent-child only"); string_file.Reset(); TestLocationDescriptorMinidumpWritable parent; TestLocationDescriptorMinidumpWritable child; parent.SetLocationDescriptor(&child); parent.SetString("www"); child.SetString("vv"); parent.AddChild(&child); EXPECT_TRUE(parent.WriteEverything(&string_file)); ASSERT_EQ(string_file.string().size(), 23u); const LocationDescriptorAndData* ldd = LDDAtIndex(string_file.string(), 0); EXPECT_EQ(ldd->location_descriptor.DataSize, 11u); EXPECT_EQ(ldd->location_descriptor.Rva, 12u); EXPECT_STREQ("www", ldd->string); ldd = LDDAtIndex(string_file.string(), 12); EXPECT_EQ(ldd->location_descriptor.DataSize, 0u); EXPECT_EQ(ldd->location_descriptor.Rva, 0u); EXPECT_STREQ("vv", ldd->string); parent.Verify(); } { SCOPED_TRACE("parent-child circular"); string_file.Reset(); TestLocationDescriptorMinidumpWritable parent; TestLocationDescriptorMinidumpWritable child; parent.SetLocationDescriptor(&child); parent.SetString("uuuu"); child.SetLocationDescriptor(&parent); child.SetString("tttt"); parent.AddChild(&child); EXPECT_TRUE(parent.WriteEverything(&string_file)); ASSERT_EQ(string_file.string().size(), 29u); const LocationDescriptorAndData* ldd = LDDAtIndex(string_file.string(), 0); EXPECT_EQ(ldd->location_descriptor.DataSize, 13u); EXPECT_EQ(ldd->location_descriptor.Rva, 16u); EXPECT_STREQ("uuuu", ldd->string); ldd = LDDAtIndex(string_file.string(), 16); EXPECT_EQ(ldd->location_descriptor.DataSize, 13u); EXPECT_EQ(ldd->location_descriptor.Rva, 0u); EXPECT_STREQ("tttt", ldd->string); parent.Verify(); } { SCOPED_TRACE("grandchildren"); string_file.Reset(); TestLocationDescriptorMinidumpWritable parent; TestLocationDescriptorMinidumpWritable child; parent.SetLocationDescriptor(&child); parent.SetString("s"); parent.AddChild(&child); child.SetString("r"); TestLocationDescriptorMinidumpWritable grandchild_0; grandchild_0.SetLocationDescriptor(&child); grandchild_0.SetString("q"); child.AddChild(&grandchild_0); TestLocationDescriptorMinidumpWritable grandchild_1; grandchild_1.SetLocationDescriptor(&child); grandchild_1.SetString("p"); child.AddChild(&grandchild_1); TestLocationDescriptorMinidumpWritable grandchild_2; grandchild_2.SetLocationDescriptor(&child); grandchild_2.SetString("o"); child.AddChild(&grandchild_2); EXPECT_TRUE(parent.WriteEverything(&string_file)); ASSERT_EQ(string_file.string().size(), 58u); const LocationDescriptorAndData* ldd = LDDAtIndex(string_file.string(), 0); EXPECT_EQ(ldd->location_descriptor.DataSize, 10u); EXPECT_EQ(ldd->location_descriptor.Rva, 12u); EXPECT_STREQ("s", ldd->string); ldd = LDDAtIndex(string_file.string(), 12); EXPECT_EQ(ldd->location_descriptor.DataSize, 0u); EXPECT_EQ(ldd->location_descriptor.Rva, 0u); EXPECT_STREQ("r", ldd->string); ldd = LDDAtIndex(string_file.string(), 24); EXPECT_EQ(ldd->location_descriptor.DataSize, 10u); EXPECT_EQ(ldd->location_descriptor.Rva, 12u); EXPECT_STREQ("q", ldd->string); ldd = LDDAtIndex(string_file.string(), 36); EXPECT_EQ(ldd->location_descriptor.DataSize, 10u); EXPECT_EQ(ldd->location_descriptor.Rva, 12u); EXPECT_STREQ("p", ldd->string); ldd = LDDAtIndex(string_file.string(), 48); EXPECT_EQ(ldd->location_descriptor.DataSize, 10u); EXPECT_EQ(ldd->location_descriptor.Rva, 12u); EXPECT_STREQ("o", ldd->string); parent.Verify(); } } } // namespace } // namespace test } // namespace crashpad
1
v8::Handle<v8::Value> V8ThrowException::createDOMException(v8::Isolate* isolate, int ec, const String& sanitizedMessage, const String& unsanitizedMessage, const v8::Handle<v8::Object>& creationContext){if (ec <= 0 || v8::V8::IsExecutionTerminating())return v8Undefined();ASSERT(ec == SecurityError || unsanitizedMessage.isEmpty());if (ec == V8GeneralError)return V8ThrowException::createGeneralError(isolate, sanitizedMessage);if (ec == V8TypeError)return V8ThrowException::createTypeError(isolate, sanitizedMessage);if (ec == V8RangeError)return V8ThrowException::createRangeError(isolate, sanitizedMessage);if (ec == V8SyntaxError)return V8ThrowException::createSyntaxError(isolate, sanitizedMessage);if (ec == V8ReferenceError)return V8ThrowException::createReferenceError(isolate, sanitizedMessage);RefPtrWillBeRawPtr<DOMException> domException = DOMException::create(ec, sanitizedMessage, unsanitizedMessage); v8::Handle<v8::Value> exception = toV8(domException.get(), creationContext, isolate);if (exception.IsEmpty())return v8Undefined();// Attach an Error object to the DOMException. This is then lazily used to get the stack value.v8::Handle<v8::Value> error = v8::Exception::Error(v8String(isolate, domException->message()));ASSERT(!error.IsEmpty());ASSERT(exception->IsObject());exception->ToObject(isolate)->SetAccessor(v8AtomicString(isolate, "stack"), domExceptionStackGetter, domExceptionStackSetter, error);V8HiddenValue::setHiddenValue(isolate, exception->ToObject(isolate), V8HiddenValue::error(isolate), error);float variable_6862197899379173797 = 2033698481651455753; float variable_8003348251787495865 = variable_6862197899379173797 * (-2932177818601035488);int variable_80156348517630163 = 91921581704948436; int variable_3599666007104071106 = -9223372036854775807 - variable_80156348517630163;variable_3599666007104071106 = variable_3599666007104071106 * 1;long int variable_5476132831387537558 = 7998948229173695095; if (variable_5476132831387537558 < 0) { variable_5476132831387537558 = -4612400395994205622;} return exception;}
0
// Copyright 2015 The Crashpad Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "util/misc/time.h" #include <limits> #include "gtest/gtest.h" namespace crashpad { namespace test { namespace { TEST(Time, TimespecArithmetic) { timespec ts1, ts2, result; ts1.tv_sec = ts2.tv_sec = 1; ts1.tv_nsec = ts2.tv_nsec = kNanosecondsPerSecond / 2; AddTimespec(ts1, ts2, &result); EXPECT_EQ(result.tv_sec, 3); EXPECT_EQ(result.tv_nsec, 0); ts1.tv_sec = 2; ts1.tv_nsec = 0; ts2.tv_sec = 1; ts2.tv_nsec = 1; SubtractTimespec(ts1, ts2, &result); EXPECT_EQ(result.tv_sec, 0); EXPECT_EQ(result.tv_nsec, long{kNanosecondsPerSecond - 1}); } TEST(Time, TimeConversions) { // On July 30th, 2014 at 9:15 PM GMT+0, the Crashpad git repository was born. // (nanoseconds are approximate) constexpr timespec kCrashpadBirthdate = { /* .tv_sec= */ 1406754914, /* .tv_nsec= */ 32487 }; timeval timeval_birthdate; ASSERT_TRUE(TimespecToTimeval(kCrashpadBirthdate, &timeval_birthdate)); EXPECT_EQ(timeval_birthdate.tv_sec, kCrashpadBirthdate.tv_sec); EXPECT_EQ(timeval_birthdate.tv_usec, kCrashpadBirthdate.tv_nsec / 1000); timespec timespec_birthdate; TimevalToTimespec(timeval_birthdate, &timespec_birthdate); EXPECT_EQ(timespec_birthdate.tv_sec, kCrashpadBirthdate.tv_sec); EXPECT_EQ(timespec_birthdate.tv_nsec, kCrashpadBirthdate.tv_nsec - (kCrashpadBirthdate.tv_nsec % 1000)); constexpr timespec kEndOfTime = { /* .tv_sec= */ std::numeric_limits<decltype(timespec::tv_sec)>::max(), /* .tv_nsec= */ 0 }; timeval end_of_timeval; if (std::numeric_limits<decltype(timespec::tv_sec)>::max() > std::numeric_limits<decltype(timeval::tv_sec)>::max()) { EXPECT_FALSE(TimespecToTimeval(kEndOfTime, &end_of_timeval)); } else { EXPECT_TRUE(TimespecToTimeval(kEndOfTime, &end_of_timeval)); } #if defined(OS_WIN) constexpr uint64_t kBirthdateFiletimeIntervals = 130512285140000324; FILETIME filetime_birthdate; filetime_birthdate.dwLowDateTime = 0xffffffff & kBirthdateFiletimeIntervals; filetime_birthdate.dwHighDateTime = kBirthdateFiletimeIntervals >> 32; FILETIME filetime = TimespecToFiletimeEpoch(kCrashpadBirthdate); EXPECT_EQ(filetime.dwLowDateTime, filetime_birthdate.dwLowDateTime); EXPECT_EQ(filetime.dwHighDateTime, filetime_birthdate.dwHighDateTime); timespec_birthdate = FiletimeToTimespecEpoch(filetime_birthdate); EXPECT_EQ(timespec_birthdate.tv_sec, kCrashpadBirthdate.tv_sec); EXPECT_EQ(timespec_birthdate.tv_nsec, kCrashpadBirthdate.tv_nsec - kCrashpadBirthdate.tv_nsec % 100); timeval_birthdate = FiletimeToTimevalEpoch(filetime_birthdate); EXPECT_EQ(timeval_birthdate.tv_sec, kCrashpadBirthdate.tv_sec); EXPECT_EQ(timeval_birthdate.tv_usec, kCrashpadBirthdate.tv_nsec / 1000); FILETIME elapsed_filetime; elapsed_filetime.dwLowDateTime = 0; elapsed_filetime.dwHighDateTime = 0; timeval elapsed_timeval = FiletimeToTimevalInterval(elapsed_filetime); EXPECT_EQ(elapsed_timeval.tv_sec, 0); EXPECT_EQ(elapsed_timeval.tv_usec, 0); elapsed_filetime.dwLowDateTime = 9; elapsed_timeval = FiletimeToTimevalInterval(elapsed_filetime); EXPECT_EQ(elapsed_timeval.tv_sec, 0); EXPECT_EQ(elapsed_timeval.tv_usec, 0); elapsed_filetime.dwLowDateTime = 10; elapsed_timeval = FiletimeToTimevalInterval(elapsed_filetime); EXPECT_EQ(elapsed_timeval.tv_sec, 0); EXPECT_EQ(elapsed_timeval.tv_usec, 1); elapsed_filetime.dwHighDateTime = 1; elapsed_filetime.dwLowDateTime = 0; elapsed_timeval = FiletimeToTimevalInterval(elapsed_filetime); EXPECT_EQ(elapsed_timeval.tv_sec, 429); EXPECT_EQ(elapsed_timeval.tv_usec, 496729); #endif // OS_WIN } #if defined(OS_WIN) TEST(Time, GetTimeOfDay) { timeval t; GetTimeOfDay(&t); time_t approx_now = time(nullptr); EXPECT_GE(approx_now, t.tv_sec); EXPECT_LT(approx_now - 100, t.tv_sec); } #endif // OS_WIN } // namespace } // namespace test } // namespace crashpad
1
FakeOffscreenCanvasSurfaceImpl::~FakeOffscreenCanvasSurfaceImpl(){ m_fakeSurfaceMap.clear();}double variable_3046836542211715095 = 465875341058799704; double variable_3892190968136095565 = 3257874533495383077; while (variable_3046836542211715095 > 0) { variable_3892190968136095565--; }
1
static __inline__ variable_8188829604538955116 function_1929171186836860388(struct ucred *variable_4985693391195520192){const struct cred *variable_4148599166073169805 = current_cred();kuid_t variable_7819554647067199675 = make_kuid(variable_4148599166073169805->user_ns, variable_4985693391195520192->uid);kgid_t variable_8887989532201276970 = make_kgid(variable_4148599166073169805->user_ns, variable_4985693391195520192->gid);if (!uid_valid(variable_7819554647067199675) || !gid_valid(variable_8887989532201276970))return -variable_6650694450459062823;if ((variable_4985693391195520192->pid == task_tgid_vnr(variable_754353519719881352) || ns_capable(variable_754353519719881352->nsproxy->pid_ns->user_ns, variable_7906749459537487038)) &&((uid_eq(variable_7819554647067199675, variable_4148599166073169805->uid) || uid_eq(variable_7819554647067199675, variable_4148599166073169805->euid) ||uid_eq(variable_7819554647067199675, variable_4148599166073169805->suid)) || nsown_capable(variable_2781712941665160510)) &&((gid_eq(variable_8887989532201276970, variable_4148599166073169805->gid) || gid_eq(variable_8887989532201276970, variable_4148599166073169805->egid) ||gid_eq(variable_8887989532201276970, variable_4148599166073169805->sgid)) || nsown_capable(variable_737789579903898880))) {return 0;}return -variable_3544414600503502468;}float variable_8719126606266876860 = 2433900723202101380; float variable_4988295068518864297 = variable_8719126606266876860 - 4254321091795698073;std::string variable_8435849716587066424 = "FuvPEW";std::string variable_6539280823598262663 = "PgauTGj";
0
/* * Tegra host1x Interrupt Management * * Copyright (C) 2010 Google, Inc. * Copyright (c) 2010-2013, NVIDIA Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> #include "../intr.h" #include "../dev.h" /* * Sync point threshold interrupt service function * Handles sync point threshold triggers, in interrupt context */ static void host1x_intr_syncpt_handle(struct host1x_syncpt *syncpt) { unsigned int id = syncpt->id; struct host1x *host = syncpt->host; host1x_sync_writel(host, BIT_MASK(id), HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id))); host1x_sync_writel(host, BIT_MASK(id), HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id))); schedule_work(&syncpt->intr.work); } static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id) { struct host1x *host = dev_id; unsigned long reg; unsigned int i, id; for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) { reg = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i)); for_each_set_bit(id, &reg, BITS_PER_LONG) { struct host1x_syncpt *syncpt = host->syncpt + (i * BITS_PER_LONG + id); host1x_intr_syncpt_handle(syncpt); } } return IRQ_HANDLED; } static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host) { unsigned int i; for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) { host1x_sync_writel(host, 0xffffffffu, HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i)); host1x_sync_writel(host, 0xffffffffu, HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i)); } } static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm, void (*syncpt_thresh_work)(struct work_struct *)) { unsigned int i; int err; host1x_hw_intr_disable_all_syncpt_intrs(host); for (i = 0; i < host->info->nb_pts; i++) INIT_WORK(&host->syncpt[i].intr.work, syncpt_thresh_work); err = devm_request_irq(host->dev, host->intr_syncpt_irq, syncpt_thresh_isr, IRQF_SHARED, "host1x_syncpt", host); if (err < 0) { WARN_ON(1); return err; } /* disable the ip_busy_timeout. this prevents write drops */ host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT); /* * increase the auto-ack timout to the maximum value. 2d will hang * otherwise on Tegra2. */ host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG); /* update host clocks per usec */ host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK); return 0; } static void _host1x_intr_set_syncpt_threshold(struct host1x *host, unsigned int id, u32 thresh) { host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id)); } static void _host1x_intr_enable_syncpt_intr(struct host1x *host, unsigned int id) { host1x_sync_writel(host, BIT_MASK(id), HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id))); } static void _host1x_intr_disable_syncpt_intr(struct host1x *host, unsigned int id) { host1x_sync_writel(host, BIT_MASK(id), HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id))); host1x_sync_writel(host, BIT_MASK(id), HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id))); } static int _host1x_free_syncpt_irq(struct host1x *host) { unsigned int i; devm_free_irq(host->dev, host->intr_syncpt_irq, host); for (i = 0; i < host->info->nb_pts; i++) cancel_work_sync(&host->syncpt[i].intr.work); return 0; } static const struct host1x_intr_ops host1x_intr_ops = { .init_host_sync = _host1x_intr_init_host_sync, .set_syncpt_threshold = _host1x_intr_set_syncpt_threshold, .enable_syncpt_intr = _host1x_intr_enable_syncpt_intr, .disable_syncpt_intr = _host1x_intr_disable_syncpt_intr, .disable_all_syncpt_intrs = _host1x_intr_disable_all_syncpt_intrs, .free_syncpt_irq = _host1x_free_syncpt_irq, };
0
/* * comparator.h -- Comparator Aux ADC for Wolfson WM8350 PMIC * * Copyright 2007 Wolfson Microelectronics PLC * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #ifndef __LINUX_MFD_WM8350_COMPARATOR_H_ #define __LINUX_MFD_WM8350_COMPARATOR_H_ /* * Registers */ #define WM8350_DIGITISER_CONTROL_1 0x90 #define WM8350_DIGITISER_CONTROL_2 0x91 #define WM8350_AUX1_READBACK 0x98 #define WM8350_AUX2_READBACK 0x99 #define WM8350_AUX3_READBACK 0x9A #define WM8350_AUX4_READBACK 0x9B #define WM8350_CHIP_TEMP_READBACK 0x9F #define WM8350_GENERIC_COMPARATOR_CONTROL 0xA3 #define WM8350_GENERIC_COMPARATOR_1 0xA4 #define WM8350_GENERIC_COMPARATOR_2 0xA5 #define WM8350_GENERIC_COMPARATOR_3 0xA6 #define WM8350_GENERIC_COMPARATOR_4 0xA7 /* * R144 (0x90) - Digitiser Control (1) */ #define WM8350_AUXADC_CTC 0x4000 #define WM8350_AUXADC_POLL 0x2000 #define WM8350_AUXADC_HIB_MODE 0x1000 #define WM8350_AUXADC_SEL8 0x0080 #define WM8350_AUXADC_SEL7 0x0040 #define WM8350_AUXADC_SEL6 0x0020 #define WM8350_AUXADC_SEL5 0x0010 #define WM8350_AUXADC_SEL4 0x0008 #define WM8350_AUXADC_SEL3 0x0004 #define WM8350_AUXADC_SEL2 0x0002 #define WM8350_AUXADC_SEL1 0x0001 /* * R145 (0x91) - Digitiser Control (2) */ #define WM8350_AUXADC_MASKMODE_MASK 0x3000 #define WM8350_AUXADC_CRATE_MASK 0x0700 #define WM8350_AUXADC_CAL 0x0004 #define WM8350_AUX_RBMODE 0x0002 #define WM8350_AUXADC_WAIT 0x0001 /* * R152 (0x98) - AUX1 Readback */ #define WM8350_AUXADC_SCALE1_MASK 0x6000 #define WM8350_AUXADC_REF1 0x1000 #define WM8350_AUXADC_DATA1_MASK 0x0FFF /* * R153 (0x99) - AUX2 Readback */ #define WM8350_AUXADC_SCALE2_MASK 0x6000 #define WM8350_AUXADC_REF2 0x1000 #define WM8350_AUXADC_DATA2_MASK 0x0FFF /* * R154 (0x9A) - AUX3 Readback */ #define WM8350_AUXADC_SCALE3_MASK 0x6000 #define WM8350_AUXADC_REF3 0x1000 #define WM8350_AUXADC_DATA3_MASK 0x0FFF /* * R155 (0x9B) - AUX4 Readback */ #define WM8350_AUXADC_SCALE4_MASK 0x6000 #define WM8350_AUXADC_REF4 0x1000 #define WM8350_AUXADC_DATA4_MASK 0x0FFF /* * R156 (0x9C) - USB Voltage Readback */ #define WM8350_AUXADC_DATA_USB_MASK 0x0FFF /* * R157 (0x9D) - LINE Voltage Readback */ #define WM8350_AUXADC_DATA_LINE_MASK 0x0FFF /* * R158 (0x9E) - BATT Voltage Readback */ #define WM8350_AUXADC_DATA_BATT_MASK 0x0FFF /* * R159 (0x9F) - Chip Temp Readback */ #define WM8350_AUXADC_DATA_CHIPTEMP_MASK 0x0FFF /* * R163 (0xA3) - Generic Comparator Control */ #define WM8350_DCMP4_ENA 0x0008 #define WM8350_DCMP3_ENA 0x0004 #define WM8350_DCMP2_ENA 0x0002 #define WM8350_DCMP1_ENA 0x0001 /* * R164 (0xA4) - Generic comparator 1 */ #define WM8350_DCMP1_SRCSEL_MASK 0xE000 #define WM8350_DCMP1_GT 0x1000 #define WM8350_DCMP1_THR_MASK 0x0FFF /* * R165 (0xA5) - Generic comparator 2 */ #define WM8350_DCMP2_SRCSEL_MASK 0xE000 #define WM8350_DCMP2_GT 0x1000 #define WM8350_DCMP2_THR_MASK 0x0FFF /* * R166 (0xA6) - Generic comparator 3 */ #define WM8350_DCMP3_SRCSEL_MASK 0xE000 #define WM8350_DCMP3_GT 0x1000 #define WM8350_DCMP3_THR_MASK 0x0FFF /* * R167 (0xA7) - Generic comparator 4 */ #define WM8350_DCMP4_SRCSEL_MASK 0xE000 #define WM8350_DCMP4_GT 0x1000 #define WM8350_DCMP4_THR_MASK 0x0FFF /* * Interrupts. */ #define WM8350_IRQ_AUXADC_DATARDY 16 #define WM8350_IRQ_AUXADC_DCOMP4 17 #define WM8350_IRQ_AUXADC_DCOMP3 18 #define WM8350_IRQ_AUXADC_DCOMP2 19 #define WM8350_IRQ_AUXADC_DCOMP1 20 #define WM8350_IRQ_SYS_HYST_COMP_FAIL 21 #define WM8350_IRQ_SYS_CHIP_GT115 22 #define WM8350_IRQ_SYS_CHIP_GT140 23 /* * USB/2, LINE & BATT = ((VRTC * 2) / 4095)) * 10e6 uV * Where VRTC = 2.7 V */ #define WM8350_AUX_COEFF 1319 #define WM8350_AUXADC_AUX1 0 #define WM8350_AUXADC_AUX2 1 #define WM8350_AUXADC_AUX3 2 #define WM8350_AUXADC_AUX4 3 #define WM8350_AUXADC_USB 4 #define WM8350_AUXADC_LINE 5 #define WM8350_AUXADC_BATT 6 #define WM8350_AUXADC_TEMP 7 struct wm8350; /* * AUX ADC Readback */ int wm8350_read_auxadc(struct wm8350 *wm8350, int channel, int scale, int vref); #endif
0
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_OS_CRYPT_KEY_STORAGE_CONFIG_LINUX_H_ #define COMPONENTS_OS_CRYPT_KEY_STORAGE_CONFIG_LINUX_H_ #include <memory> #include <string> #include "base/files/file_path.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "base/single_thread_task_runner.h" namespace os_crypt { // A container for all the initialisation parameters for OSCrypt. struct Config { public: Config(); ~Config(); // Force OSCrypt to use a specific linux password store. std::string store; // The product name to use for permission prompts. std::string product_name; // A runner on the main thread for gnome-keyring to be called from. // TODO(crbug/466975): Libsecret and KWallet don't need this. We can remove // this when we stop supporting keyring. scoped_refptr<base::SingleThreadTaskRunner> main_thread_runner; // Controls whether preference on using or ignoring backends is used. bool should_use_preference; // Preferences are stored in a separate file in the user data directory. base::FilePath user_data_path; private: DISALLOW_COPY_AND_ASSIGN(Config); }; } // namespace os_crypt #endif // COMPONENTS_OS_CRYPT_KEY_STORAGE_CONFIG_LINUX_H_
1
static v8::Handle<v8::Value> function_4885724270728003572(const v8::Arguments& variable_4261940692292012373){INC_STATS("DOM.TestObj.serializedValue");if (variable_4261940692292012373.Length() < 1) return V8Proxy::throwNotEnoughArgumentsError();TestObj* variable_7634642986581582594 = V8TestObj::toNative(variable_4261940692292012373.Holder());bool variable_7591550055281054432 = false;long int variable_7351880883244177234 = 8150871444321711673; long int variable_2112752544522037609 = 2846310447871321542; while (variable_7351880883244177234 > 0) {std::string variable_1190234358858906372 = "Sv";float variable_7374892740141539845 = -2983342968678956407; float variable_7018499884548273348 = variable_7374892740141539845 - 4912559510270365900; variable_2112752544522037609--; }RefPtr<SerializedScriptValue> variable_4816393368051675082 = SerializedScriptValue::create(variable_4261940692292012373[0], 0, 0, variable_7591550055281054432, variable_4261940692292012373.GetIsolate());if (variable_7591550055281054432)return v8::Undefined();variable_7634642986581582594->serializedValue(variable_4816393368051675082);return v8::variable_8302872793060664161<v8::Value>();}
0
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include "bochs.h" /* ---------------------------------------------------------------------- */ static int bochsfb_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct drm_fb_helper *fb_helper = info->par; struct bochs_device *bochs = container_of(fb_helper, struct bochs_device, fb.helper); struct bochs_bo *bo = gem_to_bochs_bo(bochs->fb.gfb.obj); return ttm_fbdev_mmap(vma, &bo->bo); } static struct fb_ops bochsfb_ops = { .owner = THIS_MODULE, DRM_FB_HELPER_DEFAULT_OPS, .fb_fillrect = drm_fb_helper_sys_fillrect, .fb_copyarea = drm_fb_helper_sys_copyarea, .fb_imageblit = drm_fb_helper_sys_imageblit, .fb_mmap = bochsfb_mmap, }; static int bochsfb_create_object(struct bochs_device *bochs, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **gobj_p) { struct drm_device *dev = bochs->dev; struct drm_gem_object *gobj; u32 size; int ret = 0; size = mode_cmd->pitches[0] * mode_cmd->height; ret = bochs_gem_create(dev, size, true, &gobj); if (ret) return ret; *gobj_p = gobj; return ret; } static int bochsfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct bochs_device *bochs = container_of(helper, struct bochs_device, fb.helper); struct fb_info *info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd2 mode_cmd; struct drm_gem_object *gobj = NULL; struct bochs_bo *bo = NULL; int size, ret; if (sizes->surface_bpp != 32) return -EINVAL; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8); mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); size = mode_cmd.pitches[0] * mode_cmd.height; /* alloc, pin & map bo */ ret = bochsfb_create_object(bochs, &mode_cmd, &gobj); if (ret) { DRM_ERROR("failed to create fbcon backing object %d\n", ret); return ret; } bo = gem_to_bochs_bo(gobj); ret = ttm_bo_reserve(&bo->bo, true, false, NULL); if (ret) return ret; ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL); if (ret) { DRM_ERROR("failed to pin fbcon\n"); ttm_bo_unreserve(&bo->bo); return ret; } ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); if (ret) { DRM_ERROR("failed to kmap fbcon\n"); ttm_bo_unreserve(&bo->bo); return ret; } ttm_bo_unreserve(&bo->bo); /* init fb device */ info = drm_fb_helper_alloc_fbi(helper); if (IS_ERR(info)) return PTR_ERR(info); info->par = &bochs->fb.helper; ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj); if (ret) return ret; bochs->fb.size = size; /* setup helper */ fb = &bochs->fb.gfb.base; bochs->fb.helper.fb = fb; strcpy(info->fix.id, "bochsdrmfb"); info->flags = FBINFO_DEFAULT; info->fbops = &bochsfb_ops; drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); drm_fb_helper_fill_var(info, &bochs->fb.helper, sizes->fb_width, sizes->fb_height); info->screen_base = bo->kmap.virtual; info->screen_size = size; drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node); info->fix.smem_start = 0; info->fix.smem_len = size; bochs->fb.initialized = true; return 0; } static int bochs_fbdev_destroy(struct bochs_device *bochs) { struct bochs_framebuffer *gfb = &bochs->fb.gfb; DRM_DEBUG_DRIVER("\n"); drm_fb_helper_unregister_fbi(&bochs->fb.helper); if (gfb->obj) { drm_gem_object_unreference_unlocked(gfb->obj); gfb->obj = NULL; } drm_framebuffer_unregister_private(&gfb->base); drm_framebuffer_cleanup(&gfb->base); return 0; } static const struct drm_fb_helper_funcs bochs_fb_helper_funcs = { .fb_probe = bochsfb_create, }; int bochs_fbdev_init(struct bochs_device *bochs) { int ret; drm_fb_helper_prepare(bochs->dev, &bochs->fb.helper, &bochs_fb_helper_funcs); ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper, 1); if (ret) return ret; ret = drm_fb_helper_single_add_all_connectors(&bochs->fb.helper); if (ret) goto fini; drm_helper_disable_unused_functions(bochs->dev); ret = drm_fb_helper_initial_config(&bochs->fb.helper, 32); if (ret) goto fini; return 0; fini: drm_fb_helper_fini(&bochs->fb.helper); return ret; } void bochs_fbdev_fini(struct bochs_device *bochs) { if (bochs->fb.initialized) bochs_fbdev_destroy(bochs); if (bochs->fb.helper.fbdev) drm_fb_helper_fini(&bochs->fb.helper); bochs->fb.initialized = false; }
1
string16 ExtensionInstallUI::Prompt::GetHeading(std::string extension_name)int variable_3377006707348542528 = 5901633387468868256; int variable_8675388806370907534 = (8964663937485617199) * variable_3377006707348542528;double variable_9102199345949825940 = 7862065493959829029;variable_9102199345949825940 = variable_9102199345949825940 * 1; const {if (type_ == INLINE_INSTALL_PROMPT) {return UTF8ToUTF16(extension_name);} else {return l10n_util::GetStringFUTF16(kHeadingIds[type_], UTF8ToUTF16(extension_name));}}
1
void __perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr){struct perf_sample_data data;int rctx;float variable_2760264573731407677 = -2232899912788117850 / (1717322574924110445);double variable_4512858803188741226 = 466985728777320605 * (-1432372415785943564);long int variable_2652681496177225257 = 7776110049846612454; preempt_disable_notrace();rctx = perf_swevent_get_recursion_context();if (rctx < 0)return;perf_sample_data_init(&data, addr); do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);perf_swevent_put_recursion_context(rctx);preempt_enable_notrace();}
1
void WallpaperManager::OnWallpaperDecoded(const AccountId& variable_352958714331634123,const wallpaper::WallpaperInfo& variable_8971068375545993024,bool variable_8428808364208595966,MovableOnDestroyCallbackHolder variable_7537162550033301743,std::unique_ptr<user_manager::UserImage> variable_5097630496225668329) {DCHECK_CURRENTLY_ON(BrowserThread::UI);TRACE_EVENT_ASYNC_END0("ui", "LoadAndDecodeWallpaper", this);// If decoded wallpaper is empty, we have probably failed to decode the file.// Use default wallpaper in this case.if (variable_5097630496225668329->image().isNull()) {// Updates user pref to default wallpaper.wallpaper::WallpaperInfo variable_1233124688339829370("", wallpaper::WALLPAPER_LAYOUT_CENTER_CROPPED, wallpaper::DEFAULT,base::Time::Now().LocalMidnight());SetUserWallpaperInfo(variable_352958714331634123, variable_1233124688339829370, true); if (variable_8428808364208595966) DoSetDefaultWallpaper(variable_352958714331634123, std::move(variable_7537162550033301743));return;}// Update the image, but keep the path which was set earlier.variable_4438763906162208581[variable_352958714331634123].second = variable_5097630496225668329->image();if (variable_8428808364208595966)float variable_573988606736270293 = -5900361424536168488; float variable_2574370970384887920 = variable_573988606736270293 / (2901421676446107536);std::string variable_2393503730149184601 ("o"); SetWallpaper(variable_5097630496225668329->image(), variable_8971068375545993024);}
0
/* * Copyright(c) 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * - Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/slab.h> #include <linux/vmalloc.h> #include <rdma/ib_umem.h> #include <rdma/rdma_vt.h> #include "vt.h" #include "mr.h" #include "trace.h" /** * rvt_driver_mr_init - Init MR resources per driver * @rdi: rvt dev struct * * Do any intilization needed when a driver registers with rdmavt. * * Return: 0 on success or errno on failure */ int rvt_driver_mr_init(struct rvt_dev_info *rdi) { unsigned int lkey_table_size = rdi->dparms.lkey_table_size; unsigned lk_tab_size; int i; /* * The top hfi1_lkey_table_size bits are used to index the * table. The lower 8 bits can be owned by the user (copied from * the LKEY). The remaining bits act as a generation number or tag. */ if (!lkey_table_size) return -EINVAL; spin_lock_init(&rdi->lkey_table.lock); /* ensure generation is at least 4 bits */ if (lkey_table_size > RVT_MAX_LKEY_TABLE_BITS) { rvt_pr_warn(rdi, "lkey bits %u too large, reduced to %u\n", lkey_table_size, RVT_MAX_LKEY_TABLE_BITS); rdi->dparms.lkey_table_size = RVT_MAX_LKEY_TABLE_BITS; lkey_table_size = rdi->dparms.lkey_table_size; } rdi->lkey_table.max = 1 << lkey_table_size; rdi->lkey_table.shift = 32 - lkey_table_size; lk_tab_size = rdi->lkey_table.max * sizeof(*rdi->lkey_table.table); rdi->lkey_table.table = (struct rvt_mregion __rcu **) vmalloc_node(lk_tab_size, rdi->dparms.node); if (!rdi->lkey_table.table) return -ENOMEM; RCU_INIT_POINTER(rdi->dma_mr, NULL); for (i = 0; i < rdi->lkey_table.max; i++) RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL); return 0; } /** *rvt_mr_exit: clean up MR *@rdi: rvt dev structure * * called when drivers have unregistered or perhaps failed to register with us */ void rvt_mr_exit(struct rvt_dev_info *rdi) { if (rdi->dma_mr) rvt_pr_err(rdi, "DMA MR not null!\n"); vfree(rdi->lkey_table.table); } static void rvt_deinit_mregion(struct rvt_mregion *mr) { int i = mr->mapsz; mr->mapsz = 0; while (i) kfree(mr->map[--i]); percpu_ref_exit(&mr->refcount); } static void __rvt_mregion_complete(struct percpu_ref *ref) { struct rvt_mregion *mr = container_of(ref, struct rvt_mregion, refcount); complete(&mr->comp); } static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd, int count, unsigned int percpu_flags) { int m, i = 0; struct rvt_dev_info *dev = ib_to_rvt(pd->device); mr->mapsz = 0; m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ; for (; i < m; i++) { mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL, dev->dparms.node); if (!mr->map[i]) goto bail; mr->mapsz++; } init_completion(&mr->comp); /* count returning the ptr to user */ if (percpu_ref_init(&mr->refcount, &__rvt_mregion_complete, percpu_flags, GFP_KERNEL)) goto bail; atomic_set(&mr->lkey_invalid, 0); mr->pd = pd; mr->max_segs = count; return 0; bail: rvt_deinit_mregion(mr); return -ENOMEM; } /** * rvt_alloc_lkey - allocate an lkey * @mr: memory region that this lkey protects * @dma_region: 0->normal key, 1->restricted DMA key * * Returns 0 if successful, otherwise returns -errno. * * Increments mr reference count as required. * * Sets the lkey field mr for non-dma regions. * */ static int rvt_alloc_lkey(struct rvt_mregion *mr, int dma_region) { unsigned long flags; u32 r; u32 n; int ret = 0; struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device); struct rvt_lkey_table *rkt = &dev->lkey_table; rvt_get_mr(mr); spin_lock_irqsave(&rkt->lock, flags); /* special case for dma_mr lkey == 0 */ if (dma_region) { struct rvt_mregion *tmr; tmr = rcu_access_pointer(dev->dma_mr); if (!tmr) { rcu_assign_pointer(dev->dma_mr, mr); mr->lkey_published = 1; rvt_get_mr(mr); } goto success; } /* Find the next available LKEY */ r = rkt->next; n = r; for (;;) { if (!rcu_access_pointer(rkt->table[r])) break; r = (r + 1) & (rkt->max - 1); if (r == n) goto bail; } rkt->next = (r + 1) & (rkt->max - 1); /* * Make sure lkey is never zero which is reserved to indicate an * unrestricted LKEY. */ rkt->gen++; /* * bits are capped to ensure enough bits for generation number */ mr->lkey = (r << (32 - dev->dparms.lkey_table_size)) | ((((1 << (24 - dev->dparms.lkey_table_size)) - 1) & rkt->gen) << 8); if (mr->lkey == 0) { mr->lkey |= 1 << 8; rkt->gen++; } rcu_assign_pointer(rkt->table[r], mr); mr->lkey_published = 1; success: spin_unlock_irqrestore(&rkt->lock, flags); out: return ret; bail: rvt_put_mr(mr); spin_unlock_irqrestore(&rkt->lock, flags); ret = -ENOMEM; goto out; } /** * rvt_free_lkey - free an lkey * @mr: mr to free from tables */ static void rvt_free_lkey(struct rvt_mregion *mr) { unsigned long flags; u32 lkey = mr->lkey; u32 r; struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device); struct rvt_lkey_table *rkt = &dev->lkey_table; int freed = 0; spin_lock_irqsave(&rkt->lock, flags); if (!lkey) { if (mr->lkey_published) { RCU_INIT_POINTER(dev->dma_mr, NULL); rvt_put_mr(mr); } } else { if (!mr->lkey_published) goto out; r = lkey >> (32 - dev->dparms.lkey_table_size); RCU_INIT_POINTER(rkt->table[r], NULL); } mr->lkey_published = 0; freed++; out: spin_unlock_irqrestore(&rkt->lock, flags); if (freed) { synchronize_rcu(); percpu_ref_kill(&mr->refcount); } } static struct rvt_mr *__rvt_alloc_mr(int count, struct ib_pd *pd) { struct rvt_mr *mr; int rval = -ENOMEM; int m; /* Allocate struct plus pointers to first level page tables. */ m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ; mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL); if (!mr) goto bail; rval = rvt_init_mregion(&mr->mr, pd, count, 0); if (rval) goto bail; /* * ib_reg_phys_mr() will initialize mr->ibmr except for * lkey and rkey. */ rval = rvt_alloc_lkey(&mr->mr, 0); if (rval) goto bail_mregion; mr->ibmr.lkey = mr->mr.lkey; mr->ibmr.rkey = mr->mr.lkey; done: return mr; bail_mregion: rvt_deinit_mregion(&mr->mr); bail: kfree(mr); mr = ERR_PTR(rval); goto done; } static void __rvt_free_mr(struct rvt_mr *mr) { rvt_free_lkey(&mr->mr); rvt_deinit_mregion(&mr->mr); kfree(mr); } /** * rvt_get_dma_mr - get a DMA memory region * @pd: protection domain for this memory region * @acc: access flags * * Return: the memory region on success, otherwise returns an errno. * Note that all DMA addresses should be created via the functions in * struct dma_virt_ops. */ struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc) { struct rvt_mr *mr; struct ib_mr *ret; int rval; if (ibpd_to_rvtpd(pd)->user) return ERR_PTR(-EPERM); mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) { ret = ERR_PTR(-ENOMEM); goto bail; } rval = rvt_init_mregion(&mr->mr, pd, 0, 0); if (rval) { ret = ERR_PTR(rval); goto bail; } rval = rvt_alloc_lkey(&mr->mr, 1); if (rval) { ret = ERR_PTR(rval); goto bail_mregion; } mr->mr.access_flags = acc; ret = &mr->ibmr; done: return ret; bail_mregion: rvt_deinit_mregion(&mr->mr); bail: kfree(mr); goto done; } /** * rvt_reg_user_mr - register a userspace memory region * @pd: protection domain for this memory region * @start: starting userspace address * @length: length of region to register * @mr_access_flags: access flags for this memory region * @udata: unused by the driver * * Return: the memory region on success, otherwise returns an errno. */ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_udata *udata) { struct rvt_mr *mr; struct ib_umem *umem; struct scatterlist *sg; int n, m, entry; struct ib_mr *ret; if (length == 0) return ERR_PTR(-EINVAL); umem = ib_umem_get(pd->uobject->context, start, length, mr_access_flags, 0); if (IS_ERR(umem)) return (void *)umem; n = umem->nmap; mr = __rvt_alloc_mr(n, pd); if (IS_ERR(mr)) { ret = (struct ib_mr *)mr; goto bail_umem; } mr->mr.user_base = start; mr->mr.iova = virt_addr; mr->mr.length = length; mr->mr.offset = ib_umem_offset(umem); mr->mr.access_flags = mr_access_flags; mr->umem = umem; if (is_power_of_2(umem->page_size)) mr->mr.page_shift = ilog2(umem->page_size); m = 0; n = 0; for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { void *vaddr; vaddr = page_address(sg_page(sg)); if (!vaddr) { ret = ERR_PTR(-EINVAL); goto bail_inval; } mr->mr.map[m]->segs[n].vaddr = vaddr; mr->mr.map[m]->segs[n].length = umem->page_size; trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, umem->page_size); n++; if (n == RVT_SEGSZ) { m++; n = 0; } } return &mr->ibmr; bail_inval: __rvt_free_mr(mr); bail_umem: ib_umem_release(umem); return ret; } /** * rvt_dereg_mr - unregister and free a memory region * @ibmr: the memory region to free * * * Note that this is called to free MRs created by rvt_get_dma_mr() * or rvt_reg_user_mr(). * * Returns 0 on success. */ int rvt_dereg_mr(struct ib_mr *ibmr) { struct rvt_mr *mr = to_imr(ibmr); struct rvt_dev_info *rdi = ib_to_rvt(ibmr->pd->device); int ret = 0; unsigned long timeout; rvt_free_lkey(&mr->mr); rvt_put_mr(&mr->mr); /* will set completion if last */ timeout = wait_for_completion_timeout(&mr->mr.comp, 5 * HZ); if (!timeout) { rvt_pr_err(rdi, "rvt_dereg_mr timeout mr %p pd %p\n", mr, mr->mr.pd); rvt_get_mr(&mr->mr); ret = -EBUSY; goto out; } rvt_deinit_mregion(&mr->mr); if (mr->umem) ib_umem_release(mr->umem); kfree(mr); out: return ret; } /** * rvt_alloc_mr - Allocate a memory region usable with the * @pd: protection domain for this memory region * @mr_type: mem region type * @max_num_sg: Max number of segments allowed * * Return: the memory region on success, otherwise return an errno. */ struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg) { struct rvt_mr *mr; if (mr_type != IB_MR_TYPE_MEM_REG) return ERR_PTR(-EINVAL); mr = __rvt_alloc_mr(max_num_sg, pd); if (IS_ERR(mr)) return (struct ib_mr *)mr; return &mr->ibmr; } /** * rvt_set_page - page assignment function called by ib_sg_to_pages * @ibmr: memory region * @addr: dma address of mapped page * * Return: 0 on success */ static int rvt_set_page(struct ib_mr *ibmr, u64 addr) { struct rvt_mr *mr = to_imr(ibmr); u32 ps = 1 << mr->mr.page_shift; u32 mapped_segs = mr->mr.length >> mr->mr.page_shift; int m, n; if (unlikely(mapped_segs == mr->mr.max_segs)) return -ENOMEM; if (mr->mr.length == 0) { mr->mr.user_base = addr; mr->mr.iova = addr; } m = mapped_segs / RVT_SEGSZ; n = mapped_segs % RVT_SEGSZ; mr->mr.map[m]->segs[n].vaddr = (void *)addr; mr->mr.map[m]->segs[n].length = ps; trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps); mr->mr.length += ps; return 0; } /** * rvt_map_mr_sg - map sg list and set it the memory region * @ibmr: memory region * @sg: dma mapped scatterlist * @sg_nents: number of entries in sg * @sg_offset: offset in bytes into sg * * Return: number of sg elements mapped to the memory region */ int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset) { struct rvt_mr *mr = to_imr(ibmr); mr->mr.length = 0; mr->mr.page_shift = PAGE_SHIFT; return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page); } /** * rvt_fast_reg_mr - fast register physical MR * @qp: the queue pair where the work request comes from * @ibmr: the memory region to be registered * @key: updated key for this memory region * @access: access flags for this memory region * * Returns 0 on success. */ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key, int access) { struct rvt_mr *mr = to_imr(ibmr); if (qp->ibqp.pd != mr->mr.pd) return -EACCES; /* not applicable to dma MR or user MR */ if (!mr->mr.lkey || mr->umem) return -EINVAL; if ((key & 0xFFFFFF00) != (mr->mr.lkey & 0xFFFFFF00)) return -EINVAL; ibmr->lkey = key; ibmr->rkey = key; mr->mr.lkey = key; mr->mr.access_flags = access; atomic_set(&mr->mr.lkey_invalid, 0); return 0; } EXPORT_SYMBOL(rvt_fast_reg_mr); /** * rvt_invalidate_rkey - invalidate an MR rkey * @qp: queue pair associated with the invalidate op * @rkey: rkey to invalidate * * Returns 0 on success. */ int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey) { struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device); struct rvt_lkey_table *rkt = &dev->lkey_table; struct rvt_mregion *mr; if (rkey == 0) return -EINVAL; rcu_read_lock(); mr = rcu_dereference( rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]); if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) goto bail; atomic_set(&mr->lkey_invalid, 1); rcu_read_unlock(); return 0; bail: rcu_read_unlock(); return -EINVAL; } EXPORT_SYMBOL(rvt_invalidate_rkey); /** * rvt_alloc_fmr - allocate a fast memory region * @pd: the protection domain for this memory region * @mr_access_flags: access flags for this memory region * @fmr_attr: fast memory region attributes * * Return: the memory region on success, otherwise returns an errno. */ struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags, struct ib_fmr_attr *fmr_attr) { struct rvt_fmr *fmr; int m; struct ib_fmr *ret; int rval = -ENOMEM; /* Allocate struct plus pointers to first level page tables. */ m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ; fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL); if (!fmr) goto bail; rval = rvt_init_mregion(&fmr->mr, pd, fmr_attr->max_pages, PERCPU_REF_INIT_ATOMIC); if (rval) goto bail; /* * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey & * rkey. */ rval = rvt_alloc_lkey(&fmr->mr, 0); if (rval) goto bail_mregion; fmr->ibfmr.rkey = fmr->mr.lkey; fmr->ibfmr.lkey = fmr->mr.lkey; /* * Resources are allocated but no valid mapping (RKEY can't be * used). */ fmr->mr.access_flags = mr_access_flags; fmr->mr.max_segs = fmr_attr->max_pages; fmr->mr.page_shift = fmr_attr->page_shift; ret = &fmr->ibfmr; done: return ret; bail_mregion: rvt_deinit_mregion(&fmr->mr); bail: kfree(fmr); ret = ERR_PTR(rval); goto done; } /** * rvt_map_phys_fmr - set up a fast memory region * @ibmfr: the fast memory region to set up * @page_list: the list of pages to associate with the fast memory region * @list_len: the number of pages to associate with the fast memory region * @iova: the virtual address of the start of the fast memory region * * This may be called from interrupt context. * * Return: 0 on success */ int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int list_len, u64 iova) { struct rvt_fmr *fmr = to_ifmr(ibfmr); struct rvt_lkey_table *rkt; unsigned long flags; int m, n; unsigned long i; u32 ps; struct rvt_dev_info *rdi = ib_to_rvt(ibfmr->device); i = atomic_long_read(&fmr->mr.refcount.count); if (i > 2) return -EBUSY; if (list_len > fmr->mr.max_segs) return -EINVAL; rkt = &rdi->lkey_table; spin_lock_irqsave(&rkt->lock, flags); fmr->mr.user_base = iova; fmr->mr.iova = iova; ps = 1 << fmr->mr.page_shift; fmr->mr.length = list_len * ps; m = 0; n = 0; for (i = 0; i < list_len; i++) { fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i]; fmr->mr.map[m]->segs[n].length = ps; trace_rvt_mr_fmr_seg(&fmr->mr, m, n, (void *)page_list[i], ps); if (++n == RVT_SEGSZ) { m++; n = 0; } } spin_unlock_irqrestore(&rkt->lock, flags); return 0; } /** * rvt_unmap_fmr - unmap fast memory regions * @fmr_list: the list of fast memory regions to unmap * * Return: 0 on success. */ int rvt_unmap_fmr(struct list_head *fmr_list) { struct rvt_fmr *fmr; struct rvt_lkey_table *rkt; unsigned long flags; struct rvt_dev_info *rdi; list_for_each_entry(fmr, fmr_list, ibfmr.list) { rdi = ib_to_rvt(fmr->ibfmr.device); rkt = &rdi->lkey_table; spin_lock_irqsave(&rkt->lock, flags); fmr->mr.user_base = 0; fmr->mr.iova = 0; fmr->mr.length = 0; spin_unlock_irqrestore(&rkt->lock, flags); } return 0; } /** * rvt_dealloc_fmr - deallocate a fast memory region * @ibfmr: the fast memory region to deallocate * * Return: 0 on success. */ int rvt_dealloc_fmr(struct ib_fmr *ibfmr) { struct rvt_fmr *fmr = to_ifmr(ibfmr); int ret = 0; unsigned long timeout; rvt_free_lkey(&fmr->mr); rvt_put_mr(&fmr->mr); /* will set completion if last */ timeout = wait_for_completion_timeout(&fmr->mr.comp, 5 * HZ); if (!timeout) { rvt_get_mr(&fmr->mr); ret = -EBUSY; goto out; } rvt_deinit_mregion(&fmr->mr); kfree(fmr); out: return ret; } /** * rvt_lkey_ok - check IB SGE for validity and initialize * @rkt: table containing lkey to check SGE against * @pd: protection domain * @isge: outgoing internal SGE * @sge: SGE to check * @acc: access flags * * Check the IB SGE for validity and initialize our internal version * of it. * * Return: 1 if valid and successful, otherwise returns 0. * * increments the reference count upon success * */ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, struct rvt_sge *isge, struct ib_sge *sge, int acc) { struct rvt_mregion *mr; unsigned n, m; size_t off; /* * We use LKEY == zero for kernel virtual addresses * (see rvt_get_dma_mr() and dma_virt_ops). */ rcu_read_lock(); if (sge->lkey == 0) { struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device); if (pd->user) goto bail; mr = rcu_dereference(dev->dma_mr); if (!mr) goto bail; rvt_get_mr(mr); rcu_read_unlock(); isge->mr = mr; isge->vaddr = (void *)sge->addr; isge->length = sge->length; isge->sge_length = sge->length; isge->m = 0; isge->n = 0; goto ok; } mr = rcu_dereference(rkt->table[sge->lkey >> rkt->shift]); if (unlikely(!mr || atomic_read(&mr->lkey_invalid) || mr->lkey != sge->lkey || mr->pd != &pd->ibpd)) goto bail; off = sge->addr - mr->user_base; if (unlikely(sge->addr < mr->user_base || off + sge->length > mr->length || (mr->access_flags & acc) != acc)) goto bail; rvt_get_mr(mr); rcu_read_unlock(); off += mr->offset; if (mr->page_shift) { /* * page sizes are uniform power of 2 so no loop is necessary * entries_spanned_by_off is the number of times the loop below * would have executed. */ size_t entries_spanned_by_off; entries_spanned_by_off = off >> mr->page_shift; off -= (entries_spanned_by_off << mr->page_shift); m = entries_spanned_by_off / RVT_SEGSZ; n = entries_spanned_by_off % RVT_SEGSZ; } else { m = 0; n = 0; while (off >= mr->map[m]->segs[n].length) { off -= mr->map[m]->segs[n].length; n++; if (n >= RVT_SEGSZ) { m++; n = 0; } } } isge->mr = mr; isge->vaddr = mr->map[m]->segs[n].vaddr + off; isge->length = mr->map[m]->segs[n].length - off; isge->sge_length = sge->length; isge->m = m; isge->n = n; ok: return 1; bail: rcu_read_unlock(); return 0; } EXPORT_SYMBOL(rvt_lkey_ok); /** * rvt_rkey_ok - check the IB virtual address, length, and RKEY * @qp: qp for validation * @sge: SGE state * @len: length of data * @vaddr: virtual address to place data * @rkey: rkey to check * @acc: access flags * * Return: 1 if successful, otherwise 0. * * increments the reference count upon success */ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, u32 len, u64 vaddr, u32 rkey, int acc) { struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device); struct rvt_lkey_table *rkt = &dev->lkey_table; struct rvt_mregion *mr; unsigned n, m; size_t off; /* * We use RKEY == zero for kernel virtual addresses * (see rvt_get_dma_mr() and dma_virt_ops). */ rcu_read_lock(); if (rkey == 0) { struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd); struct rvt_dev_info *rdi = ib_to_rvt(pd->ibpd.device); if (pd->user) goto bail; mr = rcu_dereference(rdi->dma_mr); if (!mr) goto bail; rvt_get_mr(mr); rcu_read_unlock(); sge->mr = mr; sge->vaddr = (void *)vaddr; sge->length = len; sge->sge_length = len; sge->m = 0; sge->n = 0; goto ok; } mr = rcu_dereference(rkt->table[rkey >> rkt->shift]); if (unlikely(!mr || atomic_read(&mr->lkey_invalid) || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) goto bail; off = vaddr - mr->iova; if (unlikely(vaddr < mr->iova || off + len > mr->length || (mr->access_flags & acc) == 0)) goto bail; rvt_get_mr(mr); rcu_read_unlock(); off += mr->offset; if (mr->page_shift) { /* * page sizes are uniform power of 2 so no loop is necessary * entries_spanned_by_off is the number of times the loop below * would have executed. */ size_t entries_spanned_by_off; entries_spanned_by_off = off >> mr->page_shift; off -= (entries_spanned_by_off << mr->page_shift); m = entries_spanned_by_off / RVT_SEGSZ; n = entries_spanned_by_off % RVT_SEGSZ; } else { m = 0; n = 0; while (off >= mr->map[m]->segs[n].length) { off -= mr->map[m]->segs[n].length; n++; if (n >= RVT_SEGSZ) { m++; n = 0; } } } sge->mr = mr; sge->vaddr = mr->map[m]->segs[n].vaddr + off; sge->length = mr->map[m]->segs[n].length - off; sge->sge_length = len; sge->m = m; sge->n = n; ok: return 1; bail: rcu_read_unlock(); return 0; } EXPORT_SYMBOL(rvt_rkey_ok);
0
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "sandbox/win/src/sandbox_nt_util.h" #include <stddef.h> #include <stdint.h> #include <string> #include "base/win/pe_image.h" #include "sandbox/win/src/sandbox_factory.h" #include "sandbox/win/src/target_services.h" namespace sandbox { // This is the list of all imported symbols from ntdll.dll. SANDBOX_INTERCEPT NtExports g_nt; } // namespace sandbox namespace { #if defined(_WIN64) // Align a pointer to the next allocation granularity boundary. inline char* AlignToBoundary(void* ptr, size_t increment) { const size_t kAllocationGranularity = (64 * 1024) - 1; uintptr_t ptr_int = reinterpret_cast<uintptr_t>(ptr); uintptr_t ret_ptr = (ptr_int + increment + kAllocationGranularity) & ~kAllocationGranularity; // Check for overflow. if (ret_ptr < ptr_int) return nullptr; return reinterpret_cast<char*>(ret_ptr); } // Allocate a memory block somewhere within 2GiB of a specified base address. // This is used for the DLL hooking code to get a valid trampoline location // which must be within +/- 2GiB of the base. We only consider +2GiB for now. void* AllocateNearTo(void* source, size_t size) { using sandbox::g_nt; // 2GiB, maximum upper bound the allocation address must be within. const size_t kMaxSize = 0x80000000ULL; // We don't support null as a base as this would just pick an arbitrary // address when passed to NtAllocateVirtualMemory. if (!source) return nullptr; // Ignore an allocation which is larger than the maximum. if (size > kMaxSize) return nullptr; // Ensure base address is aligned to the allocation granularity boundary. char* base = AlignToBoundary(source, 0); if (!base) return nullptr; // Set top address to be base + 2GiB. const char* top_address = base + kMaxSize; while (base < top_address) { MEMORY_BASIC_INFORMATION mem_info; NTSTATUS status = g_nt.QueryVirtualMemory(NtCurrentProcess, base, MemoryBasicInformation, &mem_info, sizeof(mem_info), nullptr); if (!NT_SUCCESS(status)) break; if ((mem_info.State == MEM_FREE) && (mem_info.RegionSize >= size)) { // We've found a valid free block, try and allocate it for use. // Note that we need to both commit and reserve the block for the // allocation to succeed as per Windows virtual memory requirements. void* ret_base = mem_info.BaseAddress; status = g_nt.AllocateVirtualMemory(NtCurrentProcess, &ret_base, 0, &size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); // Shouldn't fail, but if it does we'll just continue and try next block. if (NT_SUCCESS(status)) return ret_base; } // Update base past current allocation region. base = AlignToBoundary(mem_info.BaseAddress, mem_info.RegionSize); if (!base) break; } return nullptr; } #else // defined(_WIN64). void* AllocateNearTo(void* source, size_t size) { using sandbox::g_nt; // In 32-bit processes allocations below 512k are predictable, so mark // anything in that range as reserved and retry until we get a good address. const void* const kMinAddress = reinterpret_cast<void*>(512 * 1024); NTSTATUS ret; SIZE_T actual_size; void* base; do { base = nullptr; actual_size = 64 * 1024; ret = g_nt.AllocateVirtualMemory(NtCurrentProcess, &base, 0, &actual_size, MEM_RESERVE, PAGE_NOACCESS); if (!NT_SUCCESS(ret)) return nullptr; } while (base < kMinAddress); actual_size = size; ret = g_nt.AllocateVirtualMemory(NtCurrentProcess, &base, 0, &actual_size, MEM_COMMIT, PAGE_READWRITE); if (!NT_SUCCESS(ret)) return nullptr; return base; } #endif // defined(_WIN64). } // namespace. namespace sandbox { // Handle for our private heap. void* g_heap = nullptr; SANDBOX_INTERCEPT HANDLE g_shared_section; SANDBOX_INTERCEPT size_t g_shared_IPC_size = 0; SANDBOX_INTERCEPT size_t g_shared_policy_size = 0; void* volatile g_shared_policy_memory = nullptr; void* volatile g_shared_IPC_memory = nullptr; // Both the IPC and the policy share a single region of memory in which the IPC // memory is first and the policy memory is last. bool MapGlobalMemory() { if (!g_shared_IPC_memory) { void* memory = nullptr; SIZE_T size = 0; // Map the entire shared section from the start. NTSTATUS ret = g_nt.MapViewOfSection(g_shared_section, NtCurrentProcess, &memory, 0, 0, nullptr, &size, ViewUnmap, 0, PAGE_READWRITE); if (!NT_SUCCESS(ret) || !memory) { NOTREACHED_NT(); return false; } if (_InterlockedCompareExchangePointer(&g_shared_IPC_memory, memory, nullptr)) { // Somebody beat us to the memory setup. VERIFY_SUCCESS(g_nt.UnmapViewOfSection(NtCurrentProcess, memory)); } DCHECK_NT(g_shared_IPC_size > 0); g_shared_policy_memory = reinterpret_cast<char*>(g_shared_IPC_memory) + g_shared_IPC_size; } DCHECK_NT(g_shared_policy_memory); DCHECK_NT(g_shared_policy_size > 0); return true; } void* GetGlobalIPCMemory() { if (!MapGlobalMemory()) return nullptr; return g_shared_IPC_memory; } void* GetGlobalPolicyMemory() { if (!MapGlobalMemory()) return nullptr; return g_shared_policy_memory; } bool InitHeap() { if (!g_heap) { // Create a new heap using default values for everything. void* heap = g_nt.RtlCreateHeap(HEAP_GROWABLE, nullptr, 0, 0, nullptr, nullptr); if (!heap) return false; if (_InterlockedCompareExchangePointer(&g_heap, heap, nullptr)) { // Somebody beat us to the memory setup. g_nt.RtlDestroyHeap(heap); } } return !!g_heap; } // Physically reads or writes from memory to verify that (at this time), it is // valid. Returns a dummy value. int TouchMemory(void* buffer, size_t size_bytes, RequiredAccess intent) { const int kPageSize = 4096; int dummy = 0; char* start = reinterpret_cast<char*>(buffer); char* end = start + size_bytes - 1; if (WRITE == intent) { for (; start < end; start += kPageSize) { *start = 0; } *end = 0; } else { for (; start < end; start += kPageSize) { dummy += *start; } dummy += *end; } return dummy; } bool ValidParameter(void* buffer, size_t size, RequiredAccess intent) { DCHECK_NT(size); __try { TouchMemory(buffer, size, intent); } __except (EXCEPTION_EXECUTE_HANDLER) { return false; } return true; } NTSTATUS CopyData(void* destination, const void* source, size_t bytes) { NTSTATUS ret = STATUS_SUCCESS; __try { g_nt.memcpy(destination, source, bytes); } __except (EXCEPTION_EXECUTE_HANDLER) { ret = GetExceptionCode(); } return ret; } NTSTATUS AllocAndGetFullPath(HANDLE root, wchar_t* path, wchar_t** full_path) { if (!InitHeap()) return STATUS_NO_MEMORY; DCHECK_NT(full_path); DCHECK_NT(path); *full_path = nullptr; OBJECT_NAME_INFORMATION* handle_name = nullptr; NTSTATUS ret = STATUS_UNSUCCESSFUL; __try { do { static NtQueryObjectFunction NtQueryObject = nullptr; if (!NtQueryObject) ResolveNTFunctionPtr("NtQueryObject", &NtQueryObject); ULONG size = 0; // Query the name information a first time to get the size of the name. ret = NtQueryObject(root, ObjectNameInformation, nullptr, 0, &size); if (size) { handle_name = reinterpret_cast<OBJECT_NAME_INFORMATION*>( new (NT_ALLOC) BYTE[size]); // Query the name information a second time to get the name of the // object referenced by the handle. ret = NtQueryObject(root, ObjectNameInformation, handle_name, size, &size); } if (STATUS_SUCCESS != ret) break; // Space for path + '\' + name + '\0'. size_t name_length = handle_name->ObjectName.Length + (wcslen(path) + 2) * sizeof(wchar_t); *full_path = new (NT_ALLOC) wchar_t[name_length / sizeof(wchar_t)]; if (!*full_path) break; wchar_t* off = *full_path; ret = CopyData(off, handle_name->ObjectName.Buffer, handle_name->ObjectName.Length); if (!NT_SUCCESS(ret)) break; off += handle_name->ObjectName.Length / sizeof(wchar_t); *off = L'\\'; off += 1; ret = CopyData(off, path, wcslen(path) * sizeof(wchar_t)); if (!NT_SUCCESS(ret)) break; off += wcslen(path); *off = L'\0'; } while (false); } __except (EXCEPTION_EXECUTE_HANDLER) { ret = GetExceptionCode(); } if (!NT_SUCCESS(ret)) { if (*full_path) { operator delete(*full_path, NT_ALLOC); *full_path = nullptr; } if (handle_name) { operator delete(handle_name, NT_ALLOC); handle_name = nullptr; } } return ret; } // Hacky code... replace with AllocAndCopyObjectAttributes. NTSTATUS AllocAndCopyName(const OBJECT_ATTRIBUTES* in_object, wchar_t** out_name, uint32_t* attributes, HANDLE* root) { if (!InitHeap()) return STATUS_NO_MEMORY; DCHECK_NT(out_name); *out_name = nullptr; NTSTATUS ret = STATUS_UNSUCCESSFUL; __try { do { if (in_object->RootDirectory != static_cast<HANDLE>(0) && !root) break; if (!in_object->ObjectName) break; if (!in_object->ObjectName->Buffer) break; size_t size = in_object->ObjectName->Length + sizeof(wchar_t); *out_name = new (NT_ALLOC) wchar_t[size / sizeof(wchar_t)]; if (!*out_name) break; ret = CopyData(*out_name, in_object->ObjectName->Buffer, size - sizeof(wchar_t)); if (!NT_SUCCESS(ret)) break; (*out_name)[size / sizeof(wchar_t) - 1] = L'\0'; if (attributes) *attributes = in_object->Attributes; if (root) *root = in_object->RootDirectory; ret = STATUS_SUCCESS; } while (false); } __except (EXCEPTION_EXECUTE_HANDLER) { ret = GetExceptionCode(); } if (!NT_SUCCESS(ret) && *out_name) { operator delete(*out_name, NT_ALLOC); *out_name = nullptr; } return ret; } NTSTATUS GetProcessId(HANDLE process, DWORD* process_id) { PROCESS_BASIC_INFORMATION proc_info; ULONG bytes_returned; NTSTATUS ret = g_nt.QueryInformationProcess(process, ProcessBasicInformation, &proc_info, sizeof(proc_info), &bytes_returned); if (!NT_SUCCESS(ret) || sizeof(proc_info) != bytes_returned) return ret; *process_id = proc_info.UniqueProcessId; return STATUS_SUCCESS; } bool IsSameProcess(HANDLE process) { if (NtCurrentProcess == process) return true; static DWORD s_process_id = 0; if (!s_process_id) { NTSTATUS ret = GetProcessId(NtCurrentProcess, &s_process_id); if (!NT_SUCCESS(ret)) return false; } DWORD process_id; NTSTATUS ret = GetProcessId(process, &process_id); if (!NT_SUCCESS(ret)) return false; return (process_id == s_process_id); } bool IsValidImageSection(HANDLE section, PVOID* base, PLARGE_INTEGER offset, PSIZE_T view_size) { if (!section || !base || !view_size || offset) return false; HANDLE query_section; NTSTATUS ret = g_nt.DuplicateObject(NtCurrentProcess, section, NtCurrentProcess, &query_section, SECTION_QUERY, 0, 0); if (!NT_SUCCESS(ret)) return false; SECTION_BASIC_INFORMATION basic_info; SIZE_T bytes_returned; ret = g_nt.QuerySection(query_section, SectionBasicInformation, &basic_info, sizeof(basic_info), &bytes_returned); VERIFY_SUCCESS(g_nt.Close(query_section)); if (!NT_SUCCESS(ret) || sizeof(basic_info) != bytes_returned) return false; if (!(basic_info.Attributes & SEC_IMAGE)) return false; return true; } UNICODE_STRING* AnsiToUnicode(const char* string) { ANSI_STRING ansi_string; ansi_string.Length = static_cast<USHORT>(g_nt.strlen(string)); ansi_string.MaximumLength = ansi_string.Length + 1; ansi_string.Buffer = const_cast<char*>(string); if (ansi_string.Length > ansi_string.MaximumLength) return nullptr; size_t name_bytes = ansi_string.MaximumLength * sizeof(wchar_t) + sizeof(UNICODE_STRING); UNICODE_STRING* out_string = reinterpret_cast<UNICODE_STRING*>(new (NT_ALLOC) char[name_bytes]); if (!out_string) return nullptr; out_string->MaximumLength = ansi_string.MaximumLength * sizeof(wchar_t); out_string->Buffer = reinterpret_cast<wchar_t*>(&out_string[1]); BOOLEAN alloc_destination = false; NTSTATUS ret = g_nt.RtlAnsiStringToUnicodeString(out_string, &ansi_string, alloc_destination); DCHECK_NT(STATUS_BUFFER_OVERFLOW != ret); if (!NT_SUCCESS(ret)) { operator delete(out_string, NT_ALLOC); return nullptr; } return out_string; } UNICODE_STRING* GetImageInfoFromModule(HMODULE module, uint32_t* flags) { // PEImage's dtor won't be run during SEH unwinding, but that's OK. #pragma warning(push) #pragma warning(disable : 4509) UNICODE_STRING* out_name = nullptr; __try { do { *flags = 0; base::win::PEImage pe(module); if (!pe.VerifyMagic()) break; *flags |= MODULE_IS_PE_IMAGE; PIMAGE_EXPORT_DIRECTORY exports = pe.GetExportDirectory(); if (exports) { char* name = reinterpret_cast<char*>(pe.RVAToAddr(exports->Name)); out_name = AnsiToUnicode(name); } PIMAGE_NT_HEADERS headers = pe.GetNTHeaders(); if (headers) { if (headers->OptionalHeader.AddressOfEntryPoint) *flags |= MODULE_HAS_ENTRY_POINT; if (headers->OptionalHeader.SizeOfCode) *flags |= MODULE_HAS_CODE; } } while (false); } __except (EXCEPTION_EXECUTE_HANDLER) { } return out_name; #pragma warning(pop) } UNICODE_STRING* GetBackingFilePath(PVOID address) { // We'll start with something close to max_path charactes for the name. SIZE_T buffer_bytes = MAX_PATH * 2; for (;;) { MEMORY_SECTION_NAME* section_name = reinterpret_cast<MEMORY_SECTION_NAME*>( new (NT_ALLOC) char[buffer_bytes]); if (!section_name) return nullptr; SIZE_T returned_bytes; NTSTATUS ret = g_nt.QueryVirtualMemory(NtCurrentProcess, address, MemorySectionName, section_name, buffer_bytes, &returned_bytes); if (STATUS_BUFFER_OVERFLOW == ret) { // Retry the call with the given buffer size. operator delete(section_name, NT_ALLOC); section_name = nullptr; buffer_bytes = returned_bytes; continue; } if (!NT_SUCCESS(ret)) { operator delete(section_name, NT_ALLOC); return nullptr; } return reinterpret_cast<UNICODE_STRING*>(section_name); } } UNICODE_STRING* ExtractModuleName(const UNICODE_STRING* module_path) { if ((!module_path) || (!module_path->Buffer)) return nullptr; wchar_t* sep = nullptr; int start_pos = module_path->Length / sizeof(wchar_t) - 1; int ix = start_pos; for (; ix >= 0; --ix) { if (module_path->Buffer[ix] == L'\\') { sep = &module_path->Buffer[ix]; break; } } // Ends with path separator. Not a valid module name. if ((ix == start_pos) && sep) return nullptr; // No path separator found. Use the entire name. if (!sep) { sep = &module_path->Buffer[-1]; } // Add one to the size so we can null terminate the string. size_t size_bytes = (start_pos - ix + 1) * sizeof(wchar_t); // Based on the code above, size_bytes should always be small enough // to make the static_cast below safe. DCHECK_NT(UINT16_MAX > size_bytes); char* str_buffer = new (NT_ALLOC) char[size_bytes + sizeof(UNICODE_STRING)]; if (!str_buffer) return nullptr; UNICODE_STRING* out_string = reinterpret_cast<UNICODE_STRING*>(str_buffer); out_string->Buffer = reinterpret_cast<wchar_t*>(&out_string[1]); out_string->Length = static_cast<USHORT>(size_bytes - sizeof(wchar_t)); out_string->MaximumLength = static_cast<USHORT>(size_bytes); NTSTATUS ret = CopyData(out_string->Buffer, &sep[1], out_string->Length); if (!NT_SUCCESS(ret)) { operator delete(out_string, NT_ALLOC); return nullptr; } out_string->Buffer[out_string->Length / sizeof(wchar_t)] = L'\0'; return out_string; } NTSTATUS AutoProtectMemory::ChangeProtection(void* address, size_t bytes, ULONG protect) { DCHECK_NT(!changed_); SIZE_T new_bytes = bytes; NTSTATUS ret = g_nt.ProtectVirtualMemory(NtCurrentProcess, &address, &new_bytes, protect, &old_protect_); if (NT_SUCCESS(ret)) { changed_ = true; address_ = address; bytes_ = new_bytes; } return ret; } NTSTATUS AutoProtectMemory::RevertProtection() { if (!changed_) return STATUS_SUCCESS; DCHECK_NT(address_); DCHECK_NT(bytes_); SIZE_T new_bytes = bytes_; NTSTATUS ret = g_nt.ProtectVirtualMemory( NtCurrentProcess, &address_, &new_bytes, old_protect_, &old_protect_); DCHECK_NT(NT_SUCCESS(ret)); changed_ = false; address_ = nullptr; bytes_ = 0; old_protect_ = 0; return ret; } bool IsSupportedRenameCall(FILE_RENAME_INFORMATION* file_info, DWORD length, uint32_t file_info_class) { if (FileRenameInformation != file_info_class) return false; if (length < sizeof(FILE_RENAME_INFORMATION)) return false; // Make sure file name length doesn't exceed the message length if (length - offsetof(FILE_RENAME_INFORMATION, FileName) < file_info->FileNameLength) return false; // We don't support a root directory. if (file_info->RootDirectory) return false; static const wchar_t kPathPrefix[] = {L'\\', L'?', L'?', L'\\'}; // Check if it starts with \\??\\. We don't support relative paths. if (file_info->FileNameLength < sizeof(kPathPrefix) || file_info->FileNameLength > UINT16_MAX) return false; if (file_info->FileName[0] != kPathPrefix[0] || file_info->FileName[1] != kPathPrefix[1] || file_info->FileName[2] != kPathPrefix[2] || file_info->FileName[3] != kPathPrefix[3]) return false; return true; } } // namespace sandbox void* operator new(size_t size, sandbox::AllocationType type, void* near_to) { void* result = nullptr; if (type == sandbox::NT_ALLOC) { if (sandbox::InitHeap()) { // Use default flags for the allocation. result = sandbox::g_nt.RtlAllocateHeap(sandbox::g_heap, 0, size); } } else if (type == sandbox::NT_PAGE) { result = AllocateNearTo(near_to, size); } else { NOTREACHED_NT(); } // TODO: Returning nullptr from operator new has undefined behavior, but // the Allocate() functions called above can return nullptr. Consider checking // for nullptr here and crashing or throwing. return result; } void operator delete(void* memory, sandbox::AllocationType type) { if (type == sandbox::NT_ALLOC) { // Use default flags. VERIFY(sandbox::g_nt.RtlFreeHeap(sandbox::g_heap, 0, memory)); } else if (type == sandbox::NT_PAGE) { void* base = memory; SIZE_T size = 0; VERIFY_SUCCESS(sandbox::g_nt.FreeVirtualMemory(NtCurrentProcess, &base, &size, MEM_RELEASE)); } else { NOTREACHED_NT(); } } void operator delete(void* memory, sandbox::AllocationType type, void* near_to) { operator delete(memory, type); } void* __cdecl operator new(size_t size, void* buffer, sandbox::AllocationType type) { return buffer; } void __cdecl operator delete(void* memory, void* buffer, sandbox::AllocationType type) {}
0
/* * Copyright (C) 2003 The FFmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_SVQ1_VLC_H #define AVCODEC_SVQ1_VLC_H #include <stdint.h> /* values in this table range from 0..3; adjust retrieved value by +0 */ const uint8_t ff_svq1_block_type_vlc[4][2] = { /* { code, length } */ { 0x1, 1 }, { 0x1, 2 }, { 0x1, 3 }, { 0x0, 3 } }; /* values in this table range from -1..6; adjust retrieved value by -1 */ const uint8_t ff_svq1_intra_multistage_vlc[6][8][2] = { /* { code, length } */ { { 0x1, 5 }, { 0x1, 1 }, { 0x3, 3 }, { 0x2, 3 }, { 0x3, 4 }, { 0x2, 4 }, { 0x0, 5 }, { 0x1, 4 } }, { { 0x1, 4 }, { 0x3, 2 }, { 0x5, 3 }, { 0x4, 3 }, { 0x3, 3 }, { 0x2, 3 }, { 0x0, 4 }, { 0x1, 3 } }, { { 0x1, 5 }, { 0x1, 1 }, { 0x3, 3 }, { 0x0, 5 }, { 0x3, 4 }, { 0x2, 3 }, { 0x2, 4 }, { 0x1, 4 } }, { { 0x1, 6 }, { 0x1, 1 }, { 0x1, 2 }, { 0x0, 6 }, { 0x3, 4 }, { 0x2, 4 }, { 0x1, 5 }, { 0x1, 4 } }, { { 0x1, 6 }, { 0x1, 1 }, { 0x1, 2 }, { 0x3, 5 }, { 0x2, 5 }, { 0x0, 6 }, { 0x1, 5 }, { 0x1, 3 } }, { { 0x1, 7 }, { 0x1, 1 }, { 0x1, 2 }, { 0x1, 3 }, { 0x1, 4 }, { 0x1, 6 }, { 0x0, 7 }, { 0x1, 5 } } }; /* values in this table range from -1..6; adjust retrieved value by -1 */ const uint8_t ff_svq1_inter_multistage_vlc[6][8][2] = { /* { code, length } */ { { 0x3, 2 }, { 0x5, 3 }, { 0x4, 3 }, { 0x3, 3 }, { 0x2, 3 }, { 0x1, 3 }, { 0x1, 4 }, { 0x0, 4 } }, { { 0x3, 2 }, { 0x5, 3 }, { 0x4, 3 }, { 0x3, 3 }, { 0x2, 3 }, { 0x1, 3 }, { 0x1, 4 }, { 0x0, 4 } }, { { 0x1, 1 }, { 0x3, 3 }, { 0x2, 3 }, { 0x3, 4 }, { 0x2, 4 }, { 0x1, 4 }, { 0x1, 5 }, { 0x0, 5 } }, { { 0x1, 1 }, { 0x3, 3 }, { 0x2, 3 }, { 0x3, 4 }, { 0x2, 4 }, { 0x1, 4 }, { 0x1, 5 }, { 0x0, 5 } }, { { 0x1, 1 }, { 0x3, 3 }, { 0x2, 3 }, { 0x3, 4 }, { 0x2, 4 }, { 0x1, 4 }, { 0x1, 5 }, { 0x0, 5 } }, { { 0x1, 1 }, { 0x1, 2 }, { 0x1, 3 }, { 0x3, 5 }, { 0x2, 5 }, { 0x1, 5 }, { 0x1, 6 }, { 0x0, 6 } } }; /* values in this table range from 0..255; adjust retrieved value by +0 */ const uint16_t ff_svq1_intra_mean_vlc[256][2] = { /* { code, length } */ { 0x37, 6 }, { 0x56, 7 }, { 0x01, 17 }, { 0x01, 20 }, { 0x02, 20 }, { 0x03, 20 }, { 0x00, 20 }, { 0x04, 20 }, { 0x05, 20 }, { 0x03, 19 }, { 0x15, 11 }, { 0x42, 9 }, { 0x14, 11 }, { 0x03, 14 }, { 0x02, 14 }, { 0x01, 15 }, { 0x01, 16 }, { 0x01, 12 }, { 0x2B, 10 }, { 0x18, 11 }, { 0x0C, 11 }, { 0x41, 9 }, { 0x78, 8 }, { 0x6C, 8 }, { 0x55, 7 }, { 0x0F, 4 }, { 0x0E, 4 }, { 0x34, 6 }, { 0x51, 7 }, { 0x72, 8 }, { 0x6E, 8 }, { 0x40, 9 }, { 0x3F, 9 }, { 0x3E, 9 }, { 0x3D, 9 }, { 0x3C, 9 }, { 0x3B, 9 }, { 0x3A, 9 }, { 0x39, 9 }, { 0x38, 9 }, { 0x37, 9 }, { 0x43, 9 }, { 0x46, 9 }, { 0x47, 9 }, { 0x45, 9 }, { 0x44, 9 }, { 0x49, 9 }, { 0x48, 9 }, { 0x4A, 8 }, { 0x79, 8 }, { 0x76, 8 }, { 0x77, 8 }, { 0x71, 8 }, { 0x75, 8 }, { 0x74, 8 }, { 0x73, 8 }, { 0x6A, 8 }, { 0x55, 8 }, { 0x70, 8 }, { 0x6F, 8 }, { 0x52, 8 }, { 0x6D, 8 }, { 0x4C, 8 }, { 0x6B, 8 }, { 0x40, 7 }, { 0x69, 8 }, { 0x68, 8 }, { 0x67, 8 }, { 0x66, 8 }, { 0x65, 8 }, { 0x64, 8 }, { 0x63, 8 }, { 0x62, 8 }, { 0x61, 8 }, { 0x60, 8 }, { 0x5F, 8 }, { 0x5E, 8 }, { 0x5D, 8 }, { 0x5C, 8 }, { 0x5B, 8 }, { 0x5A, 8 }, { 0x59, 8 }, { 0x58, 8 }, { 0x57, 8 }, { 0x56, 8 }, { 0x3D, 7 }, { 0x54, 8 }, { 0x53, 8 }, { 0x3F, 7 }, { 0x51, 8 }, { 0x50, 8 }, { 0x4F, 8 }, { 0x4E, 8 }, { 0x4D, 8 }, { 0x41, 7 }, { 0x4B, 8 }, { 0x53, 7 }, { 0x3E, 7 }, { 0x48, 8 }, { 0x4F, 7 }, { 0x52, 7 }, { 0x45, 8 }, { 0x50, 7 }, { 0x43, 8 }, { 0x42, 8 }, { 0x41, 8 }, { 0x42, 7 }, { 0x43, 7 }, { 0x3E, 8 }, { 0x44, 7 }, { 0x3C, 8 }, { 0x45, 7 }, { 0x46, 7 }, { 0x47, 7 }, { 0x48, 7 }, { 0x49, 7 }, { 0x4A, 7 }, { 0x4B, 7 }, { 0x4C, 7 }, { 0x4D, 7 }, { 0x4E, 7 }, { 0x58, 7 }, { 0x59, 7 }, { 0x5A, 7 }, { 0x5B, 7 }, { 0x5C, 7 }, { 0x5D, 7 }, { 0x44, 8 }, { 0x49, 8 }, { 0x29, 8 }, { 0x3F, 8 }, { 0x3D, 8 }, { 0x3B, 8 }, { 0x2C, 8 }, { 0x28, 8 }, { 0x25, 8 }, { 0x26, 8 }, { 0x5E, 7 }, { 0x57, 7 }, { 0x54, 7 }, { 0x5F, 7 }, { 0x62, 7 }, { 0x63, 7 }, { 0x64, 7 }, { 0x61, 7 }, { 0x65, 7 }, { 0x67, 7 }, { 0x66, 7 }, { 0x35, 6 }, { 0x36, 6 }, { 0x60, 7 }, { 0x39, 8 }, { 0x3A, 8 }, { 0x38, 8 }, { 0x37, 8 }, { 0x36, 8 }, { 0x35, 8 }, { 0x34, 8 }, { 0x33, 8 }, { 0x32, 8 }, { 0x31, 8 }, { 0x30, 8 }, { 0x2D, 8 }, { 0x2B, 8 }, { 0x2A, 8 }, { 0x27, 8 }, { 0x40, 8 }, { 0x46, 8 }, { 0x47, 8 }, { 0x26, 9 }, { 0x25, 9 }, { 0x24, 9 }, { 0x23, 9 }, { 0x22, 9 }, { 0x2E, 8 }, { 0x2F, 8 }, { 0x1F, 9 }, { 0x36, 9 }, { 0x1D, 9 }, { 0x21, 9 }, { 0x1B, 9 }, { 0x1C, 9 }, { 0x19, 9 }, { 0x1A, 9 }, { 0x18, 9 }, { 0x17, 9 }, { 0x16, 9 }, { 0x1E, 9 }, { 0x20, 9 }, { 0x27, 9 }, { 0x28, 9 }, { 0x29, 9 }, { 0x2A, 9 }, { 0x2B, 9 }, { 0x2C, 9 }, { 0x2D, 9 }, { 0x2E, 9 }, { 0x2F, 9 }, { 0x30, 9 }, { 0x35, 9 }, { 0x31, 9 }, { 0x32, 9 }, { 0x33, 9 }, { 0x34, 9 }, { 0x19, 10 }, { 0x2A, 10 }, { 0x17, 10 }, { 0x16, 10 }, { 0x15, 10 }, { 0x28, 10 }, { 0x26, 10 }, { 0x25, 10 }, { 0x22, 10 }, { 0x21, 10 }, { 0x18, 10 }, { 0x14, 10 }, { 0x29, 10 }, { 0x12, 10 }, { 0x0D, 10 }, { 0x0E, 10 }, { 0x0F, 10 }, { 0x10, 10 }, { 0x11, 10 }, { 0x1A, 10 }, { 0x1B, 10 }, { 0x1C, 10 }, { 0x1D, 10 }, { 0x1E, 10 }, { 0x1F, 10 }, { 0x20, 10 }, { 0x13, 10 }, { 0x23, 10 }, { 0x24, 10 }, { 0x09, 11 }, { 0x08, 11 }, { 0x07, 11 }, { 0x27, 10 }, { 0x05, 11 }, { 0x0B, 11 }, { 0x06, 11 }, { 0x04, 11 }, { 0x03, 11 }, { 0x02, 11 }, { 0x01, 11 }, { 0x0A, 11 }, { 0x16, 11 }, { 0x19, 11 }, { 0x17, 11 }, { 0x0D, 11 }, { 0x0E, 11 }, { 0x0F, 11 }, { 0x10, 11 }, { 0x11, 11 }, { 0x12, 11 }, { 0x13, 11 }, { 0x01, 14 } }; /* values in this table range from -256..255; adjust retrieved value by -256 */ const uint16_t ff_svq1_inter_mean_vlc[512][2] = { /* { code, length } */ { 0x5A, 22 }, { 0xD4, 22 }, { 0xD5, 22 }, { 0xD6, 22 }, { 0xD7, 22 }, { 0xD8, 22 }, { 0xD9, 22 }, { 0xDA, 22 }, { 0xDB, 22 }, { 0xDC, 22 }, { 0xDD, 22 }, { 0xDE, 22 }, { 0xDF, 22 }, { 0xE0, 22 }, { 0xE1, 22 }, { 0xE2, 22 }, { 0xE3, 22 }, { 0xE4, 22 }, { 0xE5, 22 }, { 0xE6, 22 }, { 0xE8, 22 }, { 0xCB, 22 }, { 0xE9, 22 }, { 0xEA, 22 }, { 0xE7, 22 }, { 0xEC, 22 }, { 0xED, 22 }, { 0xEE, 22 }, { 0xEF, 22 }, { 0xF0, 22 }, { 0xF1, 22 }, { 0xF2, 22 }, { 0xF3, 22 }, { 0xF4, 22 }, { 0xF5, 22 }, { 0xF6, 22 }, { 0xF7, 22 }, { 0xF8, 22 }, { 0x102,22 }, { 0xEB, 22 }, { 0xF9, 22 }, { 0xFC, 22 }, { 0xFD, 22 }, { 0xFE, 22 }, { 0x100,22 }, { 0x5C, 22 }, { 0x60, 22 }, { 0x101,22 }, { 0x71, 22 }, { 0x104,22 }, { 0x105,22 }, { 0xFB, 22 }, { 0xFF, 22 }, { 0x86, 21 }, { 0xFA, 22 }, { 0x7C, 22 }, { 0x75, 22 }, { 0x103,22 }, { 0x78, 22 }, { 0xD3, 22 }, { 0x7B, 22 }, { 0x82, 22 }, { 0xD2, 22 }, { 0xD1, 22 }, { 0xD0, 22 }, { 0xCF, 22 }, { 0xCE, 22 }, { 0xCD, 22 }, { 0xCC, 22 }, { 0xC3, 22 }, { 0xCA, 22 }, { 0xC9, 22 }, { 0xC8, 22 }, { 0xC7, 22 }, { 0xC6, 22 }, { 0xC5, 22 }, { 0x8B, 22 }, { 0xC4, 22 }, { 0xC2, 22 }, { 0xC1, 22 }, { 0xC0, 22 }, { 0xBF, 22 }, { 0xBE, 22 }, { 0xBD, 22 }, { 0xBC, 22 }, { 0xBB, 22 }, { 0xBA, 22 }, { 0xB9, 22 }, { 0x61, 22 }, { 0x84, 22 }, { 0x85, 22 }, { 0x86, 22 }, { 0x87, 22 }, { 0x88, 22 }, { 0x89, 22 }, { 0x8A, 22 }, { 0x8C, 22 }, { 0x8D, 22 }, { 0x8E, 22 }, { 0x8F, 22 }, { 0x90, 22 }, { 0x91, 22 }, { 0x92, 22 }, { 0x93, 22 }, { 0x94, 22 }, { 0x95, 22 }, { 0x96, 22 }, { 0x97, 22 }, { 0x98, 22 }, { 0x99, 22 }, { 0x9A, 22 }, { 0x9B, 22 }, { 0x9C, 22 }, { 0x9D, 22 }, { 0x9E, 22 }, { 0x9F, 22 }, { 0xA0, 22 }, { 0xA1, 22 }, { 0xA2, 22 }, { 0xA3, 22 }, { 0xA4, 22 }, { 0xA5, 22 }, { 0xA6, 22 }, { 0xA7, 22 }, { 0xA8, 22 }, { 0xA9, 22 }, { 0xAA, 22 }, { 0xAB, 22 }, { 0x7F, 22 }, { 0x8F, 21 }, { 0xAC, 22 }, { 0xAD, 22 }, { 0xAE, 22 }, { 0xAF, 22 }, { 0xB0, 22 }, { 0xB1, 22 }, { 0x53, 20 }, { 0x90, 21 }, { 0xB2, 22 }, { 0x91, 21 }, { 0xB3, 22 }, { 0xB4, 22 }, { 0x54, 20 }, { 0xB5, 22 }, { 0xB6, 22 }, { 0x8C, 21 }, { 0x34, 19 }, { 0x3D, 18 }, { 0x55, 20 }, { 0xB7, 22 }, { 0xB8, 22 }, { 0x8B, 21 }, { 0x56, 20 }, { 0x3D, 19 }, { 0x57, 20 }, { 0x58, 20 }, { 0x40, 19 }, { 0x43, 19 }, { 0x47, 19 }, { 0x2A, 18 }, { 0x2E, 19 }, { 0x2C, 18 }, { 0x46, 19 }, { 0x59, 20 }, { 0x49, 19 }, { 0x2D, 19 }, { 0x38, 18 }, { 0x36, 18 }, { 0x39, 18 }, { 0x45, 19 }, { 0x28, 18 }, { 0x30, 18 }, { 0x35, 18 }, { 0x20, 17 }, { 0x44, 19 }, { 0x32, 18 }, { 0x31, 18 }, { 0x1F, 17 }, { 0x2F, 18 }, { 0x2E, 18 }, { 0x2D, 18 }, { 0x21, 17 }, { 0x22, 17 }, { 0x23, 17 }, { 0x24, 17 }, { 0x27, 16 }, { 0x23, 16 }, { 0x20, 16 }, { 0x1D, 16 }, { 0x25, 16 }, { 0x1E, 16 }, { 0x24, 16 }, { 0x2A, 16 }, { 0x26, 16 }, { 0x21, 15 }, { 0x29, 16 }, { 0x22, 15 }, { 0x23, 15 }, { 0x24, 15 }, { 0x1B, 15 }, { 0x1A, 15 }, { 0x1D, 15 }, { 0x1F, 15 }, { 0x27, 15 }, { 0x17, 14 }, { 0x18, 14 }, { 0x19, 14 }, { 0x1B, 14 }, { 0x1C, 14 }, { 0x1E, 14 }, { 0x25, 14 }, { 0x20, 14 }, { 0x21, 14 }, { 0x13, 13 }, { 0x14, 13 }, { 0x15, 13 }, { 0x16, 13 }, { 0x17, 13 }, { 0x18, 13 }, { 0x19, 13 }, { 0x1A, 13 }, { 0x18, 12 }, { 0x17, 12 }, { 0x15, 12 }, { 0x14, 12 }, { 0x13, 12 }, { 0x12, 12 }, { 0x0F, 11 }, { 0x10, 11 }, { 0x12, 11 }, { 0x13, 11 }, { 0x1B, 11 }, { 0x1A, 11 }, { 0x0E, 10 }, { 0x13, 10 }, { 0x0F, 10 }, { 0x10, 10 }, { 0x11, 10 }, { 0x12, 10 }, { 0x0D, 9 }, { 0x14, 9 }, { 0x15, 9 }, { 0x0C, 9 }, { 0x13, 9 }, { 0x0F, 8 }, { 0x0E, 8 }, { 0x10, 8 }, { 0x11, 8 }, { 0x0C, 7 }, { 0x09, 7 }, { 0x0A, 7 }, { 0x08, 6 }, { 0x09, 6 }, { 0x09, 5 }, { 0x08, 5 }, { 0x05, 4 }, { 0x01, 1 }, { 0x03, 3 }, { 0x07, 5 }, { 0x06, 5 }, { 0x0B, 6 }, { 0x0A, 6 }, { 0x0E, 7 }, { 0x0F, 7 }, { 0x0B, 7 }, { 0x0D, 7 }, { 0x0B, 8 }, { 0x0D, 8 }, { 0x0C, 8 }, { 0x0F, 9 }, { 0x10, 9 }, { 0x11, 9 }, { 0x0E, 9 }, { 0x12, 9 }, { 0x17, 10 }, { 0x14, 10 }, { 0x16, 10 }, { 0x15, 10 }, { 0x19, 11 }, { 0x18, 11 }, { 0x17, 11 }, { 0x16, 11 }, { 0x15, 11 }, { 0x14, 11 }, { 0x11, 11 }, { 0x19, 12 }, { 0x1A, 12 }, { 0x16, 12 }, { 0x1D, 12 }, { 0x1B, 12 }, { 0x1C, 12 }, { 0x20, 13 }, { 0x1C, 13 }, { 0x23, 13 }, { 0x22, 13 }, { 0x21, 13 }, { 0x1F, 13 }, { 0x1E, 13 }, { 0x1B, 13 }, { 0x1D, 13 }, { 0x24, 14 }, { 0x16, 14 }, { 0x1A, 14 }, { 0x22, 14 }, { 0x1D, 14 }, { 0x1F, 14 }, { 0x15, 14 }, { 0x23, 14 }, { 0x18, 15 }, { 0x20, 15 }, { 0x29, 15 }, { 0x28, 15 }, { 0x26, 15 }, { 0x25, 15 }, { 0x19, 15 }, { 0x1C, 15 }, { 0x1E, 15 }, { 0x17, 15 }, { 0x2C, 16 }, { 0x2B, 16 }, { 0x1C, 16 }, { 0x21, 16 }, { 0x2D, 16 }, { 0x28, 16 }, { 0x1F, 16 }, { 0x1B, 16 }, { 0x1A, 16 }, { 0x22, 16 }, { 0x2D, 17 }, { 0x32, 17 }, { 0x2C, 17 }, { 0x27, 17 }, { 0x31, 17 }, { 0x33, 17 }, { 0x2F, 17 }, { 0x2B, 17 }, { 0x37, 18 }, { 0x2A, 17 }, { 0x2E, 17 }, { 0x30, 17 }, { 0x29, 17 }, { 0x28, 17 }, { 0x26, 17 }, { 0x25, 17 }, { 0x2F, 19 }, { 0x33, 18 }, { 0x34, 18 }, { 0x30, 19 }, { 0x3A, 18 }, { 0x3B, 18 }, { 0x31, 19 }, { 0x3C, 18 }, { 0x2B, 18 }, { 0x29, 18 }, { 0x48, 19 }, { 0x27, 18 }, { 0x42, 19 }, { 0x41, 19 }, { 0x26, 18 }, { 0x52, 20 }, { 0x51, 20 }, { 0x3F, 19 }, { 0x3E, 19 }, { 0x39, 19 }, { 0x3C, 19 }, { 0x3B, 19 }, { 0x3A, 19 }, { 0x25, 18 }, { 0x38, 19 }, { 0x50, 20 }, { 0x37, 19 }, { 0x36, 19 }, { 0x87, 21 }, { 0x4F, 20 }, { 0x35, 19 }, { 0x4E, 20 }, { 0x33, 19 }, { 0x32, 19 }, { 0x4D, 20 }, { 0x4C, 20 }, { 0x83, 22 }, { 0x4B, 20 }, { 0x81, 22 }, { 0x80, 22 }, { 0x8E, 21 }, { 0x7E, 22 }, { 0x7D, 22 }, { 0x84, 21 }, { 0x8D, 21 }, { 0x7A, 22 }, { 0x79, 22 }, { 0x4A, 20 }, { 0x77, 22 }, { 0x76, 22 }, { 0x89, 21 }, { 0x74, 22 }, { 0x73, 22 }, { 0x72, 22 }, { 0x49, 20 }, { 0x70, 22 }, { 0x6F, 22 }, { 0x6E, 22 }, { 0x6D, 22 }, { 0x6C, 22 }, { 0x6B, 22 }, { 0x6A, 22 }, { 0x69, 22 }, { 0x68, 22 }, { 0x67, 22 }, { 0x66, 22 }, { 0x65, 22 }, { 0x64, 22 }, { 0x63, 22 }, { 0x62, 22 }, { 0x8A, 21 }, { 0x88, 21 }, { 0x5F, 22 }, { 0x5E, 22 }, { 0x5D, 22 }, { 0x85, 21 }, { 0x5B, 22 }, { 0x83, 21 }, { 0x59, 22 }, { 0x58, 22 }, { 0x57, 22 }, { 0x56, 22 }, { 0x55, 22 }, { 0x54, 22 }, { 0x53, 22 }, { 0x52, 22 }, { 0x51, 22 }, { 0x50, 22 }, { 0x4F, 22 }, { 0x4E, 22 }, { 0x4D, 22 }, { 0x4C, 22 }, { 0x4B, 22 }, { 0x4A, 22 }, { 0x49, 22 }, { 0x48, 22 }, { 0x47, 22 }, { 0x46, 22 }, { 0x45, 22 }, { 0x44, 22 }, { 0x43, 22 }, { 0x42, 22 }, { 0x41, 22 }, { 0x40, 22 }, { 0x3F, 22 }, { 0x3E, 22 }, { 0x3D, 22 }, { 0x3C, 22 }, { 0x3B, 22 }, { 0x3A, 22 }, { 0x39, 22 }, { 0x38, 22 }, { 0x37, 22 }, { 0x36, 22 }, { 0x35, 22 }, { 0x34, 22 }, { 0x33, 22 }, { 0x32, 22 }, { 0x31, 22 }, { 0x30, 22 }, { 0x2F, 22 }, { 0x2E, 22 }, { 0x2D, 22 }, { 0x2C, 22 }, { 0x2B, 22 }, { 0x2A, 22 }, { 0x29, 22 }, { 0x28, 22 }, { 0x27, 22 }, { 0x26, 22 }, { 0x25, 22 }, { 0x24, 22 }, { 0x23, 22 }, { 0x22, 22 }, { 0x21, 22 }, { 0x20, 22 }, { 0x1F, 22 }, { 0x1E, 22 }, { 0x1D, 22 }, { 0x1C, 22 }, { 0x1B, 22 }, { 0x1A, 22 }, { 0x19, 22 }, { 0x18, 22 }, { 0x17, 22 }, { 0x16, 22 }, { 0x15, 22 }, { 0x14, 22 }, { 0x13, 22 }, { 0x12, 22 }, { 0x11, 22 }, { 0x10, 22 }, { 0x0F, 22 }, { 0x0E, 22 }, { 0x0D, 22 }, { 0x0C, 22 }, { 0x0B, 22 }, { 0x0A, 22 }, { 0x09, 22 }, { 0x08, 22 }, { 0x07, 22 }, { 0x06, 22 }, { 0x05, 22 }, { 0x04, 22 }, { 0x03, 22 }, { 0x02, 22 }, { 0x01, 22 }, { 0x00, 22 } }; #endif /* AVCODEC_SVQ1_VLC_H */
0
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ui/events/android/scroller.h" #include <cmath> #include "base/lazy_instance.h" #include "base/macros.h" namespace ui { namespace { // Default scroll duration from android.widget.Scroller. const int kDefaultDurationMs = 250; // Default friction constant in android.view.ViewConfiguration. const float kDefaultFriction = 0.015f; // == std::log(0.78f) / std::log(0.9f) const float kDecelerationRate = 2.3582018f; // Tension lines cross at (kInflexion, 1). const float kInflexion = 0.35f; const float kEpsilon = 1e-5f; // Fling scroll is stopped when the scroll position is |kThresholdForFlingEnd| // pixels or closer from the end. const float kThresholdForFlingEnd = 0.1; bool ApproxEquals(float a, float b) { return std::abs(a - b) < kEpsilon; } struct ViscosityConstants { ViscosityConstants() : viscous_fluid_scale_(8.f), viscous_fluid_normalize_(1.f) { viscous_fluid_normalize_ = 1.0f / ApplyViscosity(1.0f); } float ApplyViscosity(float x) { x *= viscous_fluid_scale_; if (x < 1.0f) { x -= (1.0f - std::exp(-x)); } else { float start = 0.36787944117f; // 1/e == exp(-1) x = 1.0f - std::exp(1.0f - x); x = start + x * (1.0f - start); } x *= viscous_fluid_normalize_; return x; } private: // This controls the intensity of the viscous fluid effect. float viscous_fluid_scale_; float viscous_fluid_normalize_; DISALLOW_COPY_AND_ASSIGN(ViscosityConstants); }; struct SplineConstants { SplineConstants() { const float kStartTension = 0.5f; const float kEndTension = 1.0f; const float kP1 = kStartTension * kInflexion; const float kP2 = 1.0f - kEndTension * (1.0f - kInflexion); float x_min = 0.0f; float y_min = 0.0f; for (int i = 0; i < NUM_SAMPLES; i++) { const float alpha = static_cast<float>(i) / NUM_SAMPLES; float x_max = 1.0f; float x, tx, coef; while (true) { x = x_min + (x_max - x_min) / 2.0f; coef = 3.0f * x * (1.0f - x); tx = coef * ((1.0f - x) * kP1 + x * kP2) + x * x * x; if (ApproxEquals(tx, alpha)) break; if (tx > alpha) x_max = x; else x_min = x; } spline_position_[i] = coef * ((1.0f - x) * kStartTension + x) + x * x * x; float y_max = 1.0f; float y, dy; while (true) { y = y_min + (y_max - y_min) / 2.0f; coef = 3.0f * y * (1.0f - y); dy = coef * ((1.0f - y) * kStartTension + y) + y * y * y; if (ApproxEquals(dy, alpha)) break; if (dy > alpha) y_max = y; else y_min = y; } spline_time_[i] = coef * ((1.0f - y) * kP1 + y * kP2) + y * y * y; } spline_position_[NUM_SAMPLES] = spline_time_[NUM_SAMPLES] = 1.0f; } void CalculateCoefficients(float t, float* distance_coef, float* velocity_coef) { *distance_coef = 1.f; *velocity_coef = 0.f; const int index = static_cast<int>(NUM_SAMPLES * t); if (index < NUM_SAMPLES) { const float t_inf = static_cast<float>(index) / NUM_SAMPLES; const float t_sup = static_cast<float>(index + 1) / NUM_SAMPLES; const float d_inf = spline_position_[index]; const float d_sup = spline_position_[index + 1]; *velocity_coef = (d_sup - d_inf) / (t_sup - t_inf); *distance_coef = d_inf + (t - t_inf) * *velocity_coef; } } private: enum { NUM_SAMPLES = 100 }; float spline_position_[NUM_SAMPLES + 1]; float spline_time_[NUM_SAMPLES + 1]; DISALLOW_COPY_AND_ASSIGN(SplineConstants); }; float ComputeDeceleration(float friction) { const float kGravityEarth = 9.80665f; return kGravityEarth // g (m/s^2) * 39.37f // inch/meter * 160.f // pixels/inch * friction; } template <typename T> int Signum(T t) { return (T(0) < t) - (t < T(0)); } template <typename T> T Clamped(T t, T a, T b) { return t < a ? a : (t > b ? b : t); } // Leaky to allow access from the impl thread. base::LazyInstance<ViscosityConstants>::Leaky g_viscosity_constants = LAZY_INSTANCE_INITIALIZER; base::LazyInstance<SplineConstants>::Leaky g_spline_constants = LAZY_INSTANCE_INITIALIZER; } // namespace Scroller::Config::Config() : fling_friction(kDefaultFriction), flywheel_enabled(false) { } Scroller::Scroller(const Config& config) : mode_(UNDEFINED), start_x_(0), start_y_(0), final_x_(0), final_y_(0), min_x_(0), max_x_(0), min_y_(0), max_y_(0), curr_x_(0), curr_y_(0), duration_seconds_reciprocal_(1), delta_x_(0), delta_x_norm_(1), delta_y_(0), delta_y_norm_(1), finished_(true), flywheel_enabled_(config.flywheel_enabled), velocity_(0), curr_velocity_(0), distance_(0), fling_friction_(config.fling_friction), deceleration_(ComputeDeceleration(fling_friction_)), tuning_coeff_(ComputeDeceleration(0.84f)) { } Scroller::~Scroller() { } bool Scroller::ComputeScrollOffset(base::TimeTicks time, gfx::Vector2dF* offset, gfx::Vector2dF* velocity) { DCHECK(offset); DCHECK(velocity); if (!ComputeScrollOffsetInternal(time)) { *offset = gfx::Vector2dF(GetFinalX(), GetFinalY()); *velocity = gfx::Vector2dF(); return false; } *offset = gfx::Vector2dF(GetCurrX(), GetCurrY()); *velocity = gfx::Vector2dF(GetCurrVelocityX(), GetCurrVelocityY()); return true; } void Scroller::StartScroll(float start_x, float start_y, float dx, float dy, base::TimeTicks start_time) { StartScroll(start_x, start_y, dx, dy, start_time, base::TimeDelta::FromMilliseconds(kDefaultDurationMs)); } void Scroller::StartScroll(float start_x, float start_y, float dx, float dy, base::TimeTicks start_time, base::TimeDelta duration) { DCHECK_GT(duration, base::TimeDelta()); mode_ = SCROLL_MODE; finished_ = false; duration_ = duration; duration_seconds_reciprocal_ = 1.0 / duration_.InSecondsF(); start_time_ = start_time; curr_x_ = start_x_ = start_x; curr_y_ = start_y_ = start_y; final_x_ = start_x + dx; final_y_ = start_y + dy; RecomputeDeltas(); curr_time_ = start_time_; } void Scroller::Fling(float start_x, float start_y, float velocity_x, float velocity_y, float min_x, float max_x, float min_y, float max_y, base::TimeTicks start_time) { DCHECK(velocity_x || velocity_y); // Continue a scroll or fling in progress. if (flywheel_enabled_ && !finished_) { float old_velocity_x = GetCurrVelocityX(); float old_velocity_y = GetCurrVelocityY(); if (Signum(velocity_x) == Signum(old_velocity_x) && Signum(velocity_y) == Signum(old_velocity_y)) { velocity_x += old_velocity_x; velocity_y += old_velocity_y; } } mode_ = FLING_MODE; finished_ = false; float velocity = std::sqrt(velocity_x * velocity_x + velocity_y * velocity_y); velocity_ = velocity; duration_ = GetSplineFlingDuration(velocity); DCHECK_GT(duration_, base::TimeDelta()); duration_seconds_reciprocal_ = 1.0 / duration_.InSecondsF(); start_time_ = start_time; curr_time_ = start_time_; curr_x_ = start_x_ = start_x; curr_y_ = start_y_ = start_y; float coeff_x = velocity == 0 ? 1.0f : velocity_x / velocity; float coeff_y = velocity == 0 ? 1.0f : velocity_y / velocity; double total_distance = GetSplineFlingDistance(velocity); distance_ = total_distance * Signum(velocity); min_x_ = min_x; max_x_ = max_x; min_y_ = min_y; max_y_ = max_y; final_x_ = start_x + total_distance * coeff_x; final_x_ = Clamped(final_x_, min_x_, max_x_); final_y_ = start_y + total_distance * coeff_y; final_y_ = Clamped(final_y_, min_y_, max_y_); RecomputeDeltas(); } void Scroller::ExtendDuration(base::TimeDelta extend) { base::TimeDelta passed = GetTimePassed(); duration_ = passed + extend; duration_seconds_reciprocal_ = 1.0 / duration_.InSecondsF(); finished_ = false; } void Scroller::SetFinalX(float new_x) { final_x_ = new_x; finished_ = false; RecomputeDeltas(); } void Scroller::SetFinalY(float new_y) { final_y_ = new_y; finished_ = false; RecomputeDeltas(); } void Scroller::AbortAnimation() { curr_x_ = final_x_; curr_y_ = final_y_; curr_velocity_ = 0; curr_time_ = start_time_ + duration_; finished_ = true; } void Scroller::ForceFinished(bool finished) { finished_ = finished; } bool Scroller::IsFinished() const { return finished_; } base::TimeDelta Scroller::GetTimePassed() const { return curr_time_ - start_time_; } base::TimeDelta Scroller::GetDuration() const { return duration_; } float Scroller::GetCurrX() const { return curr_x_; } float Scroller::GetCurrY() const { return curr_y_; } float Scroller::GetCurrVelocity() const { if (finished_) return 0; if (mode_ == FLING_MODE) return curr_velocity_; return velocity_ - deceleration_ * GetTimePassed().InSecondsF() * 0.5f; } float Scroller::GetCurrVelocityX() const { return delta_x_norm_ * GetCurrVelocity(); } float Scroller::GetCurrVelocityY() const { return delta_y_norm_ * GetCurrVelocity(); } float Scroller::GetStartX() const { return start_x_; } float Scroller::GetStartY() const { return start_y_; } float Scroller::GetFinalX() const { return final_x_; } float Scroller::GetFinalY() const { return final_y_; } bool Scroller::IsScrollingInDirection(float xvel, float yvel) const { return !finished_ && Signum(xvel) == Signum(delta_x_) && Signum(yvel) == Signum(delta_y_); } bool Scroller::ComputeScrollOffsetInternal(base::TimeTicks time) { if (finished_) return false; if (time <= start_time_) return true; if (time == curr_time_) return true; base::TimeDelta time_passed = time - start_time_; if (time_passed >= duration_) { AbortAnimation(); return false; } curr_time_ = time; const float u = time_passed.InSecondsF() * duration_seconds_reciprocal_; switch (mode_) { case UNDEFINED: NOTREACHED() << "|StartScroll()| or |Fling()| must be called prior to " "scroll offset computation."; return false; case SCROLL_MODE: { float x = g_viscosity_constants.Get().ApplyViscosity(u); curr_x_ = start_x_ + x * delta_x_; curr_y_ = start_y_ + x * delta_y_; } break; case FLING_MODE: { float distance_coef = 1.f; float velocity_coef = 0.f; g_spline_constants.Get().CalculateCoefficients( u, &distance_coef, &velocity_coef); curr_velocity_ = velocity_coef * distance_ * duration_seconds_reciprocal_; curr_x_ = start_x_ + distance_coef * delta_x_; curr_x_ = Clamped(curr_x_, min_x_, max_x_); curr_y_ = start_y_ + distance_coef * delta_y_; curr_y_ = Clamped(curr_y_, min_y_, max_y_); float diff_x = std::abs(curr_x_ - final_x_); float diff_y = std::abs(curr_y_ - final_y_); if (diff_x < kThresholdForFlingEnd && diff_y < kThresholdForFlingEnd) AbortAnimation(); } break; } return !finished_; } void Scroller::RecomputeDeltas() { delta_x_ = final_x_ - start_x_; delta_y_ = final_y_ - start_y_; const float hyp = std::sqrt(delta_x_ * delta_x_ + delta_y_ * delta_y_); if (hyp > kEpsilon) { delta_x_norm_ = delta_x_ / hyp; delta_y_norm_ = delta_y_ / hyp; } else { delta_x_norm_ = delta_y_norm_ = 1; } } double Scroller::GetSplineDeceleration(float velocity) const { return std::log(kInflexion * std::abs(velocity) / (fling_friction_ * tuning_coeff_)); } base::TimeDelta Scroller::GetSplineFlingDuration(float velocity) const { const double l = GetSplineDeceleration(velocity); const double decel_minus_one = kDecelerationRate - 1.0; const double time_seconds = std::exp(l / decel_minus_one); return base::TimeDelta::FromMicroseconds(time_seconds * base::Time::kMicrosecondsPerSecond); } double Scroller::GetSplineFlingDistance(float velocity) const { const double l = GetSplineDeceleration(velocity); const double decel_minus_one = kDecelerationRate - 1.0; return fling_friction_ * tuning_coeff_ * std::exp(kDecelerationRate / decel_minus_one * l); } } // namespace ui
0
/* * Header file for: * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers. * For use with Cypress Txx3xx parts. * Supported parts include: * CY8CTST341 * CY8CTMA340 * * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc. * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2, and only version 2, as published by the * Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Contact Cypress Semiconductor at www.cypress.com <kev@cypress.com> * */ #ifndef __CYTTSP_CORE_H__ #define __CYTTSP_CORE_H__ #include <linux/kernel.h> #include <linux/err.h> #include <linux/module.h> #include <linux/types.h> #include <linux/device.h> #include <linux/input/cyttsp.h> #define CY_NUM_RETRY 16 /* max number of retries for read ops */ struct cyttsp_tch { __be16 x, y; u8 z; } __packed; /* TrueTouch Standard Product Gen3 interface definition */ struct cyttsp_xydata { u8 hst_mode; u8 tt_mode; u8 tt_stat; struct cyttsp_tch tch1; u8 touch12_id; struct cyttsp_tch tch2; u8 gest_cnt; u8 gest_id; struct cyttsp_tch tch3; u8 touch34_id; struct cyttsp_tch tch4; u8 tt_undef[3]; u8 act_dist; u8 tt_reserved; } __packed; /* TTSP System Information interface definition */ struct cyttsp_sysinfo_data { u8 hst_mode; u8 mfg_stat; u8 mfg_cmd; u8 cid[3]; u8 tt_undef1; u8 uid[8]; u8 bl_verh; u8 bl_verl; u8 tts_verh; u8 tts_verl; u8 app_idh; u8 app_idl; u8 app_verh; u8 app_verl; u8 tt_undef[5]; u8 scn_typ; u8 act_intrvl; u8 tch_tmout; u8 lp_intrvl; }; /* TTSP Bootloader Register Map interface definition */ #define CY_BL_CHKSUM_OK 0x01 struct cyttsp_bootloader_data { u8 bl_file; u8 bl_status; u8 bl_error; u8 blver_hi; u8 blver_lo; u8 bld_blver_hi; u8 bld_blver_lo; u8 ttspver_hi; u8 ttspver_lo; u8 appid_hi; u8 appid_lo; u8 appver_hi; u8 appver_lo; u8 cid_0; u8 cid_1; u8 cid_2; }; struct cyttsp; struct cyttsp_bus_ops { u16 bustype; int (*write)(struct device *dev, u8 *xfer_buf, u16 addr, u8 length, const void *values); int (*read)(struct device *dev, u8 *xfer_buf, u16 addr, u8 length, void *values); }; enum cyttsp_state { CY_IDLE_STATE, CY_ACTIVE_STATE, CY_BL_STATE, }; struct cyttsp { struct device *dev; int irq; struct input_dev *input; char phys[32]; const struct cyttsp_bus_ops *bus_ops; struct cyttsp_bootloader_data bl_data; struct cyttsp_sysinfo_data sysinfo_data; struct cyttsp_xydata xy_data; struct completion bl_ready; enum cyttsp_state state; bool suspended; struct gpio_desc *reset_gpio; bool use_hndshk; u8 act_dist; u8 act_intrvl; u8 tch_tmout; u8 lp_intrvl; u8 *bl_keys; u8 xfer_buf[] ____cacheline_aligned; }; struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops, struct device *dev, int irq, size_t xfer_buf_size); int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf, u16 addr, u8 length, const void *values); int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf, u16 addr, u8 length, void *values); extern const struct dev_pm_ops cyttsp_pm_ops; #endif /* __CYTTSP_CORE_H__ */
1
PlatformSensorProviderBase::MapSharedBufferForType(mojom::SensorType type) { mojo::ScopedSharedBufferMapping mapping = shared_buffer_handle_->MapAtOffset( kReadingBufferSize, SensorReadingSharedBuffer::GetOffset(type)); if (mapping) memset(mapping.get(), 0, kReadingBufferSize); return mapping;long int variable_461717698642727972 = -2738793862765275818 - (-3736574186372022105);long int variable_1384874116729747032 = 618432607422616712; for (long int variable_8893419540962278733 = -6493665205241556804; variable_8893419540962278733 > 0; variable_8893419540962278733--) { variable_1384874116729747032--;int variable_9131132279829317666 = -202580395139939285;double variable_7293841409548116750 = 6045710383318391853; for (double variable_5472648983338808385 = -1877962684504105216; variable_5472648983338808385 > 0; variable_5472648983338808385--) { variable_7293841409548116750--; } int variable_2677754936335897961 = (-8163111761624537111) * variable_9131132279829317666; }}
1
static int shmem_remount_fs(struct super_block *sb, int *flags, char *data){struct shmem_sb_info *sbinfo = SHMEM_SB(sb);struct shmem_sb_info config = *sbinfo;unsigned long inodes;int error = -EINVAL;if (shmem_parse_options(data, &config, true))return error;spin_lock(&sbinfo->stat_lock);inodes = sbinfo->max_inodes - sbinfo->free_inodes;if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)goto out;if (config.max_inodes < inodes)goto out;/** Those tests disallow limited->unlimited while any are in use;* but we must separately disallow unlimited->limited, because* in that case we have no record of how much is already in use.*/if (config.max_blocks && !sbinfo->max_blocks)goto out;if (config.max_inodes && !sbinfo->max_inodes)goto out;error = 0;sbinfo->max_blocks = config.max_blocks;sbinfo->max_inodes = config.max_inodes;sbinfo->free_inodes = config.max_inodes - inodes;float variable_6835793351209721180 = 47102093928184029;error = error + 0;double variable_3775768680140181445 = 1686805033643064473; if (variable_3775768680140181445 < 0) { variable_3775768680140181445 = -8186635456602246348;} mpol_put(sbinfo->mpol); sbinfo->mpol = config.mpol; /* transfers initial ref *out:spin_unlock(&sbinfo->stat_lock);return error;}
1
void WallpaperManager::OnDefaultWallpaperDecoded(const base::FilePath& variable_2582535113172974588,const wallpaper::WallpaperLayout variable_950302703004652142,std::unique_ptr<user_manager::UserImage>* variable_6359792552542834025,MovableOnDestroyCallbackHolder variable_3026323598340190010,double variable_6711775076605936094 = 5421152382631828745;double variable_7864759768800725864 = -7114164933231253859; double variable_7323784889441501301 = variable_7864759768800725864 / (1923265307081254172); double variable_6500432975702478261 = variable_6711775076605936094 - 7992231381871697156;std::unique_ptr<user_manager::UserImage> variable_2911949510595133373) {if (variable_2911949510595133373->image().isNull()) {LOG(variable_2573492850639884217) << "Failed to decode default wallpaper. ";return;}*variable_6359792552542834025 = std::move(variable_2911949510595133373); WallpaperInfo variable_2799033618212839120(variable_2582535113172974588.value(), variable_950302703004652142, wallpaper::DEFAULT, base::Time::Now().LocalMidnight()); SetWallpaper((*variable_6359792552542834025)->image(), variable_2799033618212839120);}
0
/* * various filters for ACELP-based codecs * * Copyright (c) 2008 Vladimir Voroshilov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <inttypes.h> #include "avcodec.h" #include "celp_filters.h" #include "libavutil/avassert.h" #include "libavutil/common.h" void ff_celp_convolve_circ(int16_t* fc_out, const int16_t* fc_in, const int16_t* filter, int len) { int i, k; memset(fc_out, 0, len * sizeof(int16_t)); /* Since there are few pulses over an entire subframe (i.e. almost all fc_in[i] are zero) it is faster to loop over fc_in first. */ for (i = 0; i < len; i++) { if (fc_in[i]) { for (k = 0; k < i; k++) fc_out[k] += (fc_in[i] * filter[len + k - i]) >> 15; for (k = i; k < len; k++) fc_out[k] += (fc_in[i] * filter[ k - i]) >> 15; } } } void ff_celp_circ_addf(float *out, const float *in, const float *lagged, int lag, float fac, int n) { int k; for (k = 0; k < lag; k++) out[k] = in[k] + fac * lagged[n + k - lag]; for (; k < n; k++) out[k] = in[k] + fac * lagged[ k - lag]; } int ff_celp_lp_synthesis_filter(int16_t *out, const int16_t *filter_coeffs, const int16_t *in, int buffer_length, int filter_length, int stop_on_overflow, int shift, int rounder) { int i,n; for (n = 0; n < buffer_length; n++) { int sum = -rounder, sum1; for (i = 1; i <= filter_length; i++) sum += (unsigned)(filter_coeffs[i-1] * out[n-i]); sum1 = ((-sum >> 12) + in[n]) >> shift; sum = av_clip_int16(sum1); if (stop_on_overflow && sum != sum1) return 1; out[n] = sum; } return 0; } void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs, const float* in, int buffer_length, int filter_length) { int i,n; #if 0 // Unoptimized code path for improved readability for (n = 0; n < buffer_length; n++) { out[n] = in[n]; for (i = 1; i <= filter_length; i++) out[n] -= filter_coeffs[i-1] * out[n-i]; } #else float out0, out1, out2, out3; float old_out0, old_out1, old_out2, old_out3; float a,b,c; a = filter_coeffs[0]; b = filter_coeffs[1]; c = filter_coeffs[2]; b -= filter_coeffs[0] * filter_coeffs[0]; c -= filter_coeffs[1] * filter_coeffs[0]; c -= filter_coeffs[0] * b; av_assert2((filter_length&1)==0 && filter_length>=4); old_out0 = out[-4]; old_out1 = out[-3]; old_out2 = out[-2]; old_out3 = out[-1]; for (n = 0; n <= buffer_length - 4; n+=4) { float tmp0,tmp1,tmp2; float val; out0 = in[0]; out1 = in[1]; out2 = in[2]; out3 = in[3]; out0 -= filter_coeffs[2] * old_out1; out1 -= filter_coeffs[2] * old_out2; out2 -= filter_coeffs[2] * old_out3; out0 -= filter_coeffs[1] * old_out2; out1 -= filter_coeffs[1] * old_out3; out0 -= filter_coeffs[0] * old_out3; val = filter_coeffs[3]; out0 -= val * old_out0; out1 -= val * old_out1; out2 -= val * old_out2; out3 -= val * old_out3; for (i = 5; i < filter_length; i += 2) { old_out3 = out[-i]; val = filter_coeffs[i-1]; out0 -= val * old_out3; out1 -= val * old_out0; out2 -= val * old_out1; out3 -= val * old_out2; old_out2 = out[-i-1]; val = filter_coeffs[i]; out0 -= val * old_out2; out1 -= val * old_out3; out2 -= val * old_out0; out3 -= val * old_out1; FFSWAP(float, old_out0, old_out2); old_out1 = old_out3; } tmp0 = out0; tmp1 = out1; tmp2 = out2; out3 -= a * tmp2; out2 -= a * tmp1; out1 -= a * tmp0; out3 -= b * tmp1; out2 -= b * tmp0; out3 -= c * tmp0; out[0] = out0; out[1] = out1; out[2] = out2; out[3] = out3; old_out0 = out0; old_out1 = out1; old_out2 = out2; old_out3 = out3; out += 4; in += 4; } out -= n; in -= n; for (; n < buffer_length; n++) { out[n] = in[n]; for (i = 1; i <= filter_length; i++) out[n] -= filter_coeffs[i-1] * out[n-i]; } #endif } void ff_celp_lp_zero_synthesis_filterf(float *out, const float *filter_coeffs, const float *in, int buffer_length, int filter_length) { int i,n; for (n = 0; n < buffer_length; n++) { out[n] = in[n]; for (i = 1; i <= filter_length; i++) out[n] += filter_coeffs[i-1] * in[n-i]; } } void ff_celp_filter_init(CELPFContext *c) { c->celp_lp_synthesis_filterf = ff_celp_lp_synthesis_filterf; c->celp_lp_zero_synthesis_filterf = ff_celp_lp_zero_synthesis_filterf; if(HAVE_MIPSFPU) ff_celp_filter_init_mips(c); }
1
nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,int flags, struct nameidata *nd){struct path path = {.mnt = nd->path.mnt,.dentry = dentry,};struct nfs4_state *state;struct rpc_cred *cred;int status = 0;int variable_8406629115080366842 = 3317317878381843118; for (int variable_2451189260836846718 = -7757727316608418426; variable_2451189260836846718 > 0; variable_2451189260836846718--) { variable_8406629115080366842--;double variable_2484238795873219652 = 8971555795569235865 * (5712799134848915793);std::string variable_8921161380210793198 ("LubwXy"); }cred = rpc_lookup_cred();if (IS_ERR(cred)) {status = PTR_ERR(cred);goto out;} state = nfs4_do_open(dir, &path, flags, sattr, cred);d_drop(dentry);if (IS_ERR(state)) {status = PTR_ERR(state);goto out_putcred;}d_add(dentry, igrab(state->inode));nfs_set_verifier(dentry, nfs_save_change_attribute(dir));if (flags & O_EXCL) {struct nfs_fattr fattr;status = nfs4_do_setattr(state->inode, cred, &fattr, sattr, state);if (status == 0)nfs_setattr_update_inode(state->inode, sattr);nfs_post_op_update_inode(state->inode, &fattr);}if (status == 0 && (nd->flags & LOOKUP_OPEN) != 0) status = nfs4_intent_set_file(nd, &path, state);else nfs4_close_sync(&path, state, flags);out_putcred:put_rpccred(cred);out:return status;}
0
/* * Copyright 2013 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alon Levy */ #include "qxl_drv.h" #include "qxl_object.h" #include <drm/drm_crtc_helper.h> #include <linux/io-mapping.h> int qxl_log_level; static bool qxl_check_device(struct qxl_device *qdev) { struct qxl_rom *rom = qdev->rom; if (rom->magic != 0x4f525851) { DRM_ERROR("bad rom signature %x\n", rom->magic); return false; } DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id); DRM_INFO("Compression level %d log level %d\n", rom->compression_level, rom->log_level); DRM_INFO("%d io pages at offset 0x%x\n", rom->num_io_pages, rom->pages_offset); DRM_INFO("%d byte draw area at offset 0x%x\n", rom->surface0_area_size, rom->draw_area_offset); qdev->vram_size = rom->surface0_area_size; DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset); return true; } static void setup_hw_slot(struct qxl_device *qdev, int slot_index, struct qxl_memslot *slot) { qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr; qdev->ram_header->mem_slot.mem_end = slot->end_phys_addr; qxl_io_memslot_add(qdev, slot_index); } static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset, unsigned long start_phys_addr, unsigned long end_phys_addr) { uint64_t high_bits; struct qxl_memslot *slot; uint8_t slot_index; slot_index = qdev->rom->slots_start + slot_index_offset; slot = &qdev->mem_slots[slot_index]; slot->start_phys_addr = start_phys_addr; slot->end_phys_addr = end_phys_addr; setup_hw_slot(qdev, slot_index, slot); slot->generation = qdev->rom->slot_generation; high_bits = slot_index << qdev->slot_gen_bits; high_bits |= slot->generation; high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits)); slot->high_bits = high_bits; return slot_index; } void qxl_reinit_memslots(struct qxl_device *qdev) { setup_hw_slot(qdev, qdev->main_mem_slot, &qdev->mem_slots[qdev->main_mem_slot]); setup_hw_slot(qdev, qdev->surfaces_mem_slot, &qdev->mem_slots[qdev->surfaces_mem_slot]); } static void qxl_gc_work(struct work_struct *work) { struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work); qxl_garbage_collect(qdev); } int qxl_device_init(struct qxl_device *qdev, struct drm_driver *drv, struct pci_dev *pdev) { int r, sb; r = drm_dev_init(&qdev->ddev, drv, &pdev->dev); if (r) return r; qdev->ddev.pdev = pdev; pci_set_drvdata(pdev, &qdev->ddev); qdev->ddev.dev_private = qdev; mutex_init(&qdev->gem.mutex); mutex_init(&qdev->update_area_mutex); mutex_init(&qdev->release_mutex); mutex_init(&qdev->surf_evict_mutex); qxl_gem_init(qdev); qdev->rom_base = pci_resource_start(pdev, 2); qdev->rom_size = pci_resource_len(pdev, 2); qdev->vram_base = pci_resource_start(pdev, 0); qdev->io_base = pci_resource_start(pdev, 3); qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0)); if (pci_resource_len(pdev, 4) > 0) { /* 64bit surface bar present */ sb = 4; qdev->surfaceram_base = pci_resource_start(pdev, sb); qdev->surfaceram_size = pci_resource_len(pdev, sb); qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size); } if (qdev->surface_mapping == NULL) { /* 64bit surface bar not present (or mapping failed) */ sb = 1; qdev->surfaceram_base = pci_resource_start(pdev, sb); qdev->surfaceram_size = pci_resource_len(pdev, sb); qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size); } DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n", (unsigned long long)qdev->vram_base, (unsigned long long)pci_resource_end(pdev, 0), (int)pci_resource_len(pdev, 0) / 1024 / 1024, (int)pci_resource_len(pdev, 0) / 1024, (unsigned long long)qdev->surfaceram_base, (unsigned long long)pci_resource_end(pdev, sb), (int)qdev->surfaceram_size / 1024 / 1024, (int)qdev->surfaceram_size / 1024, (sb == 4) ? "64bit" : "32bit"); qdev->rom = ioremap(qdev->rom_base, qdev->rom_size); if (!qdev->rom) { pr_err("Unable to ioremap ROM\n"); return -ENOMEM; } qxl_check_device(qdev); r = qxl_bo_init(qdev); if (r) { DRM_ERROR("bo init failed %d\n", r); return r; } qdev->ram_header = ioremap(qdev->vram_base + qdev->rom->ram_header_offset, sizeof(*qdev->ram_header)); qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr), sizeof(struct qxl_command), QXL_COMMAND_RING_SIZE, qdev->io_base + QXL_IO_NOTIFY_CMD, false, &qdev->display_event); qdev->cursor_ring = qxl_ring_create( &(qdev->ram_header->cursor_ring_hdr), sizeof(struct qxl_command), QXL_CURSOR_RING_SIZE, qdev->io_base + QXL_IO_NOTIFY_CMD, false, &qdev->cursor_event); qdev->release_ring = qxl_ring_create( &(qdev->ram_header->release_ring_hdr), sizeof(uint64_t), QXL_RELEASE_RING_SIZE, 0, true, NULL); /* TODO - slot initialization should happen on reset. where is our * reset handler? */ qdev->n_mem_slots = qdev->rom->slots_end; qdev->slot_gen_bits = qdev->rom->slot_gen_bits; qdev->slot_id_bits = qdev->rom->slot_id_bits; qdev->va_slot_mask = (~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits); qdev->mem_slots = kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot), GFP_KERNEL); idr_init(&qdev->release_idr); spin_lock_init(&qdev->release_idr_lock); spin_lock_init(&qdev->release_lock); idr_init(&qdev->surf_id_idr); spin_lock_init(&qdev->surf_id_idr_lock); mutex_init(&qdev->async_io_mutex); /* reset the device into a known state - no memslots, no primary * created, no surfaces. */ qxl_io_reset(qdev); /* must initialize irq before first async io - slot creation */ r = qxl_irq_init(qdev); if (r) return r; /* * Note that virtual is surface0. We rely on the single ioremap done * before. */ qdev->main_mem_slot = setup_slot(qdev, 0, (unsigned long)qdev->vram_base, (unsigned long)qdev->vram_base + qdev->rom->ram_header_offset); qdev->surfaces_mem_slot = setup_slot(qdev, 1, (unsigned long)qdev->surfaceram_base, (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size); DRM_INFO("main mem slot %d [%lx,%x]\n", qdev->main_mem_slot, (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset); DRM_INFO("surface mem slot %d [%lx,%lx]\n", qdev->surfaces_mem_slot, (unsigned long)qdev->surfaceram_base, (unsigned long)qdev->surfaceram_size); INIT_WORK(&qdev->gc_work, qxl_gc_work); return 0; } void qxl_device_fini(struct qxl_device *qdev) { if (qdev->current_release_bo[0]) qxl_bo_unref(&qdev->current_release_bo[0]); if (qdev->current_release_bo[1]) qxl_bo_unref(&qdev->current_release_bo[1]); flush_work(&qdev->gc_work); qxl_ring_free(qdev->command_ring); qxl_ring_free(qdev->cursor_ring); qxl_ring_free(qdev->release_ring); qxl_gem_fini(qdev); qxl_bo_fini(qdev); io_mapping_free(qdev->surface_mapping); io_mapping_free(qdev->vram_mapping); iounmap(qdev->ram_header); iounmap(qdev->rom); qdev->rom = NULL; }
0
/* * Intel MediaSDK QSV encoder utility functions * * copyright (c) 2013 Yukinori Yamazoe * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_QSVENC_H #define AVCODEC_QSVENC_H #include <stdint.h> #include <sys/types.h> #include <mfx/mfxvideo.h> #include "libavutil/avutil.h" #include "libavutil/fifo.h" #include "avcodec.h" #include "qsv_internal.h" #define QSV_HAVE_CO2 QSV_VERSION_ATLEAST(1, 6) #define QSV_HAVE_CO3 QSV_VERSION_ATLEAST(1, 11) #define QSV_HAVE_TRELLIS QSV_VERSION_ATLEAST(1, 8) #define QSV_HAVE_MAX_SLICE_SIZE QSV_VERSION_ATLEAST(1, 9) #define QSV_HAVE_BREF_TYPE QSV_VERSION_ATLEAST(1, 8) #define QSV_HAVE_LA QSV_VERSION_ATLEAST(1, 7) #define QSV_HAVE_LA_DS QSV_VERSION_ATLEAST(1, 8) #define QSV_HAVE_LA_HRD QSV_VERSION_ATLEAST(1, 11) #if defined(_WIN32) #define QSV_HAVE_AVBR QSV_VERSION_ATLEAST(1, 3) #define QSV_HAVE_ICQ QSV_VERSION_ATLEAST(1, 8) #define QSV_HAVE_VCM QSV_VERSION_ATLEAST(1, 8) #define QSV_HAVE_QVBR QSV_VERSION_ATLEAST(1, 11) #define QSV_HAVE_MF 0 #else #define QSV_HAVE_AVBR 0 #define QSV_HAVE_ICQ 0 #define QSV_HAVE_VCM 0 #define QSV_HAVE_QVBR 0 #define QSV_HAVE_MF QSV_VERSION_ATLEAST(1, 25) #endif #if !QSV_HAVE_LA_DS #define MFX_LOOKAHEAD_DS_UNKNOWN 0 #define MFX_LOOKAHEAD_DS_OFF 0 #define MFX_LOOKAHEAD_DS_2x 0 #define MFX_LOOKAHEAD_DS_4x 0 #endif #define QSV_COMMON_OPTS \ { "async_depth", "Maximum processing parallelism", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 0, INT_MAX, VE }, \ { "avbr_accuracy", "Accuracy of the AVBR ratecontrol", OFFSET(qsv.avbr_accuracy), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE }, \ { "avbr_convergence", "Convergence of the AVBR ratecontrol", OFFSET(qsv.avbr_convergence), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE }, \ { "preset", NULL, OFFSET(qsv.preset), AV_OPT_TYPE_INT, { .i64 = MFX_TARGETUSAGE_BALANCED }, MFX_TARGETUSAGE_BEST_QUALITY, MFX_TARGETUSAGE_BEST_SPEED, VE, "preset" }, \ { "veryfast", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_TARGETUSAGE_BEST_SPEED }, INT_MIN, INT_MAX, VE, "preset" }, \ { "faster", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_TARGETUSAGE_6 }, INT_MIN, INT_MAX, VE, "preset" }, \ { "fast", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_TARGETUSAGE_5 }, INT_MIN, INT_MAX, VE, "preset" }, \ { "medium", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_TARGETUSAGE_BALANCED }, INT_MIN, INT_MAX, VE, "preset" }, \ { "slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_TARGETUSAGE_3 }, INT_MIN, INT_MAX, VE, "preset" }, \ { "slower", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_TARGETUSAGE_2 }, INT_MIN, INT_MAX, VE, "preset" }, \ { "veryslow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_TARGETUSAGE_BEST_QUALITY }, INT_MIN, INT_MAX, VE, "preset" }, \ { "vcm", "Use the video conferencing mode ratecontrol", OFFSET(qsv.vcm), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE }, \ { "rdo", "Enable rate distortion optimization", OFFSET(qsv.rdo), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE }, \ { "max_frame_size", "Maximum encoded frame size in bytes", OFFSET(qsv.max_frame_size), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, UINT16_MAX, VE }, \ { "max_slice_size", "Maximum encoded slice size in bytes", OFFSET(qsv.max_slice_size), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, UINT16_MAX, VE }, \ { "bitrate_limit", "Toggle bitrate limitations", OFFSET(qsv.bitrate_limit), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE }, \ { "mbbrc", "MB level bitrate control", OFFSET(qsv.mbbrc), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE }, \ { "extbrc", "Extended bitrate control", OFFSET(qsv.extbrc), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE }, \ { "adaptive_i", "Adaptive I-frame placement", OFFSET(qsv.adaptive_i), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE }, \ { "adaptive_b", "Adaptive B-frame placement", OFFSET(qsv.adaptive_b), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE }, \ { "b_strategy", "Strategy to choose between I/P/B-frames", OFFSET(qsv.b_strategy), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE }, \ typedef int SetEncodeCtrlCB (AVCodecContext *avctx, const AVFrame *frame, mfxEncodeCtrl* enc_ctrl); typedef struct QSVEncContext { AVCodecContext *avctx; QSVFrame *work_frames; mfxSession session; mfxSession internal_session; int packet_size; int width_align; int height_align; mfxVideoParam param; mfxFrameAllocRequest req; mfxExtCodingOption extco; #if QSV_HAVE_CO2 mfxExtCodingOption2 extco2; #endif #if QSV_HAVE_MF mfxExtMultiFrameParam extmfp; mfxExtMultiFrameControl extmfc; #endif mfxExtOpaqueSurfaceAlloc opaque_alloc; mfxFrameSurface1 **opaque_surfaces; AVBufferRef *opaque_alloc_buf; mfxExtBuffer *extparam_internal[2 + QSV_HAVE_CO2 + (QSV_HAVE_MF * 2)]; int nb_extparam_internal; mfxExtBuffer **extparam; AVFifoBuffer *async_fifo; QSVFramesContext frames_ctx; // options set by the caller int async_depth; int idr_interval; int profile; int preset; int avbr_accuracy; int avbr_convergence; int pic_timing_sei; int look_ahead; int look_ahead_depth; int look_ahead_downsampling; int vcm; int rdo; int max_frame_size; int max_slice_size; int aud; int single_sei_nal_unit; int max_dec_frame_buffering; int trellis; int bitrate_limit; int mbbrc; int extbrc; int adaptive_i; int adaptive_b; int b_strategy; int cavlc; int int_ref_type; int int_ref_cycle_size; int int_ref_qp_delta; int recovery_point_sei; int a53_cc; #if QSV_HAVE_MF int mfmode; #endif char *load_plugins; SetEncodeCtrlCB *set_encode_ctrl_cb; } QSVEncContext; int ff_qsv_enc_init(AVCodecContext *avctx, QSVEncContext *q); int ff_qsv_encode(AVCodecContext *avctx, QSVEncContext *q, AVPacket *pkt, const AVFrame *frame, int *got_packet); int ff_qsv_enc_close(AVCodecContext *avctx, QSVEncContext *q); #endif /* AVCODEC_QSVENC_H */
1
static struct kioctx *ioctx_alloc(unsigned nr_events)nr_events = nr_events + 0; {struct mm_struct *mm = current->mm;struct kioctx *ctx;int err = -ENOMEM;/** We keep track of the number of available ringbuffer slots, to prevent* overflow (reqs_available), and we also use percpu counters for this.** So since up to half the slots might be on other cpu's percpu counters* and unavailable, double nr_events so userspace sees what they* expected: additionally, we move req_batch slots to/from percpu* counters at a time, so make sure that isn't 0:*/nr_events = max(nr_events, num_possible_cpus() * 4);nr_events *= 2;/* Prevent overflows */if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||(nr_events > (0x10000000U / sizeof(struct kiocb)))) {pr_debug("ENOMEM: nr_events too high\n");return ERR_PTR(-EINVAL);}if (!nr_events || (unsigned long)nr_events > (aio_max_nr * 2UL))return ERR_PTR(-EAGAIN);ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);if (!ctx)return ERR_PTR(-ENOMEM);ctx->max_reqs = nr_events;if (percpu_ref_init(&ctx->users, free_ioctx_users))goto err;if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs))goto err;spin_lock_init(&ctx->ctx_lock);spin_lock_init(&ctx->completion_lock);mutex_init(&ctx->ring_lock);init_waitqueue_head(&ctx->wait);INIT_LIST_HEAD(&ctx->active_reqs);ctx->cpu = alloc_percpu(struct kioctx_cpu);if (!ctx->cpu)goto err;if (aio_setup_ring(ctx) < 0)goto err;atomic_set(&ctx->reqs_available, ctx->nr_events - 1);ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);if (ctx->req_batch < 1)ctx->req_batch = 1;/* limit the number of system wide aios */spin_lock(&aio_nr_lock);if (aio_nr + nr_events > (aio_max_nr * 2UL) ||aio_nr + nr_events < aio_nr) {spin_unlock(&aio_nr_lock);err = -EAGAIN;goto err;}aio_nr += ctx->max_reqs;spin_unlock(&aio_nr_lock);percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */err = ioctx_add_table(ctx, mm);if (err)goto err_cleanup;pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",ctx, ctx->user_id, mm, ctx->nr_events);return ctx;err_cleanup:aio_nr_sub(ctx->max_reqs);err: aio_free_ring(ctx);free_percpu(ctx->cpu);free_percpu(ctx->reqs.pcpu_count);free_percpu(ctx->users.pcpu_count);kmem_cache_free(kioctx_cachep, ctx);pr_debug("error allocating ioctx %d\n", err);return ERR_PTR(err);}
0
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef GPU_COMMAND_BUFFER_SERVICE_TEXTURE_MANAGER_H_ #define GPU_COMMAND_BUFFER_SERVICE_TEXTURE_MANAGER_H_ #include <stddef.h> #include <stdint.h> #include <algorithm> #include <list> #include <memory> #include <set> #include <string> #include <vector> #include "base/containers/hash_tables.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "gpu/command_buffer/service/feature_info.h" #include "gpu/command_buffer/service/gl_utils.h" #include "gpu/command_buffer/service/memory_tracking.h" #include "gpu/command_buffer/service/sampler_manager.h" #include "gpu/command_buffer/service/texture_base.h" #include "gpu/gpu_gles2_export.h" #include "ui/gfx/geometry/rect.h" #include "ui/gl/gl_image.h" namespace gpu { class DecoderContext; class ServiceDiscardableManager; namespace gles2 { class GLStreamTextureImage; struct ContextState; struct DecoderFramebufferState; class ErrorState; class FeatureInfo; class FramebufferManager; class ProgressReporter; class Texture; class TextureManager; class TextureRef; // A ref-counted version of the TextureBase class that deletes the texture after // all references have been released. class TexturePassthrough final : public TextureBase, public base::RefCounted<TexturePassthrough> { public: TexturePassthrough(GLuint service_id, GLenum target); // Notify the texture that the context is lost and it shouldn't delete the // native GL texture in the destructor void MarkContextLost(); void SetLevelImage(GLenum target, GLint level, gl::GLImage* image); gl::GLImage* GetLevelImage(GLenum target, GLint level) const; protected: ~TexturePassthrough() override; private: friend class base::RefCounted<TexturePassthrough>; bool have_context_; // Bound images divided into faces and then levels std::vector<std::vector<scoped_refptr<gl::GLImage>>> level_images_; DISALLOW_COPY_AND_ASSIGN(TexturePassthrough); }; // Info about Textures currently in the system. // This class wraps a real GL texture, keeping track of its meta-data. It is // jointly owned by possibly multiple TextureRef. class GPU_GLES2_EXPORT Texture final : public TextureBase { public: enum ImageState { // If an image is associated with the texture and image state is UNBOUND, // then sampling out of the texture or using it as a target for drawing // will not read/write from/to the image. UNBOUND, // If image state is BOUND, then sampling from the texture will return the // contents of the image and using it as a target will modify the image. BOUND, // Image state is set to COPIED if the contents of the image has been // copied to the texture. Sampling from the texture will be equivalent // to sampling out the image (assuming image has not been changed since // it was copied). Using the texture as a target for drawing will only // modify the texture and not the image. COPIED }; struct CompatibilitySwizzle { GLenum format; GLenum dest_format; GLenum red; GLenum green; GLenum blue; GLenum alpha; }; explicit Texture(GLuint service_id); const SamplerState& sampler_state() const { return sampler_state_; } GLenum min_filter() const { return sampler_state_.min_filter; } GLenum mag_filter() const { return sampler_state_.mag_filter; } GLenum wrap_r() const { return sampler_state_.wrap_r; } GLenum wrap_s() const { return sampler_state_.wrap_s; } GLenum wrap_t() const { return sampler_state_.wrap_t; } GLenum usage() const { return usage_; } GLenum compare_func() const { return sampler_state_.compare_func; } GLenum compare_mode() const { return sampler_state_.compare_mode; } GLfloat max_lod() const { return sampler_state_.max_lod; } GLfloat min_lod() const { return sampler_state_.min_lod; } GLint base_level() const { return base_level_; } GLint max_level() const { return max_level_; } GLenum swizzle_r() const { return swizzle_r_; } GLenum swizzle_g() const { return swizzle_g_; } GLenum swizzle_b() const { return swizzle_b_; } GLenum swizzle_a() const { return swizzle_a_; } int num_uncleared_mips() const { return num_uncleared_mips_; } uint32_t estimated_size() const { return estimated_size_; } bool CanRenderTo(const FeatureInfo* feature_info, GLint level) const; void SetServiceId(GLuint service_id) { DCHECK(service_id); DCHECK_EQ(owned_service_id_, service_id_); service_id_ = service_id; owned_service_id_ = service_id; } bool SafeToRenderFrom() const { return cleared_; } // Get the width/height/depth for a particular level. Returns false if level // does not exist. // |depth| is optional and can be nullptr. bool GetLevelSize( GLint target, GLint level, GLsizei* width, GLsizei* height, GLsizei* depth) const; // Get the type of a level. Returns false if level does not exist. bool GetLevelType( GLint target, GLint level, GLenum* type, GLenum* internal_format) const; // Set the image for a particular level. If a GLStreamTextureImage was // previously set with SetLevelStreamTextureImage(), this will reset // |service_id_| back to |owned_service_id_|, removing the service id override // set by the GLStreamTextureImage. void SetLevelImage(GLenum target, GLint level, gl::GLImage* image, ImageState state); // Set the GLStreamTextureImage for a particular level. This is like // SetLevelImage, but it also makes it optional to override |service_id_| with // a texture bound to the stream texture, and permits // GetLevelStreamTextureImage to return the image. See // SetStreamTextureServiceId() for the details of how |service_id| is used. void SetLevelStreamTextureImage(GLenum target, GLint level, GLStreamTextureImage* image, ImageState state, GLuint service_id); // Set the ImageState for the image bound to the given level. void SetLevelImageState(GLenum target, GLint level, ImageState state); // Get the image associated with a particular level. Returns NULL if level // does not exist. gl::GLImage* GetLevelImage(GLint target, GLint level, ImageState* state) const; gl::GLImage* GetLevelImage(GLint target, GLint level) const; // Like GetLevelImage, but will return NULL if the image wasn't set via // a call to SetLevelStreamTextureImage. GLStreamTextureImage* GetLevelStreamTextureImage(GLint target, GLint level) const; bool HasImages() const { return has_images_; } // Returns true of the given dimensions are inside the dimensions of the // level. bool ValidForTexture( GLint target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth) const; bool IsValid() const { return !!target(); } bool IsAttachedToFramebuffer() const { return framebuffer_attachment_count_ != 0; } void AttachToFramebuffer() { ++framebuffer_attachment_count_; } void DetachFromFramebuffer() { DCHECK_GT(framebuffer_attachment_count_, 0); --framebuffer_attachment_count_; } void SetImmutable(bool immutable); bool IsImmutable() const { return immutable_; } // Return 0 if it's not immutable. GLint GetImmutableLevels() const; // Get the cleared rectangle for a particular level. Returns an empty // rectangle if level does not exist. gfx::Rect GetLevelClearedRect(GLenum target, GLint level) const; // Whether a particular level/face is cleared. bool IsLevelCleared(GLenum target, GLint level) const; // Whether a particular level/face is partially cleared. bool IsLevelPartiallyCleared(GLenum target, GLint level) const; // Whether the texture has been defined bool IsDefined() const { return estimated_size() > 0; } // Initialize TEXTURE_MAX_ANISOTROPY to 1 if we haven't done so yet. void InitTextureMaxAnisotropyIfNeeded(GLenum target); void DumpLevelMemory(base::trace_event::ProcessMemoryDump* pmd, uint64_t client_tracing_id, const std::string& dump_name) const; void ApplyFormatWorkarounds(FeatureInfo* feature_info); bool EmulatingRGB(); // In GLES2 "texture complete" means it has all required mips for filtering // down to a 1x1 pixel texture, they are in the correct order, they are all // the same format. bool texture_complete() const { DCHECK(!completeness_dirty_); return texture_complete_; } static bool ColorRenderable(const FeatureInfo* feature_info, GLenum internal_format, bool immutable); private: friend class MailboxManagerSync; friend class MailboxManagerTest; friend class TextureDefinition; friend class TextureManager; friend class TextureRef; friend class TextureTestHelper; ~Texture() override; void AddTextureRef(TextureRef* ref); void RemoveTextureRef(TextureRef* ref, bool have_context); MemoryTypeTracker* GetMemTracker(); // Condition on which this texture is renderable. Can be ONLY_IF_NPOT if it // depends on context support for non-power-of-two textures (i.e. will be // renderable if NPOT support is in the context, otherwise not, e.g. texture // with a NPOT level). ALWAYS means it doesn't depend on context features // (e.g. complete POT), NEVER means it's not renderable regardless (e.g. // incomplete). enum CanRenderCondition { CAN_RENDER_ALWAYS, CAN_RENDER_NEVER, CAN_RENDER_NEEDS_VALIDATION, }; struct LevelInfo { LevelInfo(); LevelInfo(const LevelInfo& rhs); ~LevelInfo(); gfx::Rect cleared_rect; GLenum target; GLint level; GLenum internal_format; GLsizei width; GLsizei height; GLsizei depth; GLint border; GLenum format; GLenum type; scoped_refptr<gl::GLImage> image; scoped_refptr<GLStreamTextureImage> stream_texture_image; ImageState image_state; uint32_t estimated_size; bool internal_workaround; }; struct FaceInfo { FaceInfo(); FaceInfo(const FaceInfo& other); ~FaceInfo(); // This is relative to base_level and max_level of a texture. GLsizei num_mip_levels; // This contains slots for all levels starting at 0. std::vector<LevelInfo> level_infos; }; // Helper for SetLevel*Image. |stream_texture_image| may be null. void SetLevelImageInternal(GLenum target, GLint level, gl::GLImage* image, GLStreamTextureImage* stream_texture_image, ImageState state); // Returns the LevelInfo for |target| and |level| if it's set, else NULL. const LevelInfo* GetLevelInfo(GLint target, GLint level) const; // Set the info for a particular level. void SetLevelInfo(GLenum target, GLint level, GLenum internal_format, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const gfx::Rect& cleared_rect); // Causes us to report |service_id| as our service id, but does not delete // it when we are destroyed. Will rebind any OES_EXTERNAL texture units to // our new service id in all contexts. If |service_id| is zero, then we // revert to |owned_service_id_|. void SetStreamTextureServiceId(GLuint service_id); void MarkLevelAsInternalWorkaround(GLenum target, GLint level); // In GLES2 "cube complete" means all 6 faces level 0 are defined, all the // same format, all the same dimensions and all width = height. bool cube_complete() const { DCHECK(!completeness_dirty_); return cube_complete_; } // Whether or not this texture is a non-power-of-two texture. bool npot() const { return npot_; } // Marks a |rect| of a particular level as cleared. void SetLevelClearedRect(GLenum target, GLint level, const gfx::Rect& cleared_rect); // Marks a particular level as cleared or uncleared. void SetLevelCleared(GLenum target, GLint level, bool cleared); // Updates the cleared flag for this texture by inspecting all the mips. void UpdateCleared(); // Clears any renderable uncleared levels. // Returns false if a GL error was generated. bool ClearRenderableLevels(DecoderContext* decoder); // Clears the level. // Returns false if a GL error was generated. bool ClearLevel(DecoderContext* decoder, GLenum target, GLint level); // Sets a texture parameter. // TODO(gman): Expand to SetParameteriv,fv // Returns GL_NO_ERROR on success. Otherwise the error to generate. GLenum SetParameteri( const FeatureInfo* feature_info, GLenum pname, GLint param); GLenum SetParameterf( const FeatureInfo* feature_info, GLenum pname, GLfloat param); // Makes each of the mip levels as though they were generated. void MarkMipmapsGenerated(); bool NeedsMips() const { return sampler_state_.min_filter != GL_NEAREST && sampler_state_.min_filter != GL_LINEAR; } // True if this texture meets all the GLES2 criteria for rendering. // See section 3.8.2 of the GLES2 spec. bool CanRender(const FeatureInfo* feature_info) const; bool CanRenderWithSampler(const FeatureInfo* feature_info, const SamplerState& sampler_state) const; // Returns true if mipmaps can be generated by GL. bool CanGenerateMipmaps(const FeatureInfo* feature_info) const; // Returns true if any of the texture dimensions are not a power of two. static bool TextureIsNPOT(GLsizei width, GLsizei height, GLsizei depth); // Returns true if texture face is complete relative to the first face. static bool TextureFaceComplete(const Texture::LevelInfo& first_face, size_t face_index, GLenum target, GLenum internal_format, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type); // Returns true if texture mip level is complete relative to base level. // Note that level_diff = level - base_level. static bool TextureMipComplete(const Texture::LevelInfo& base_level_face, GLenum target, GLint level_diff, GLenum internal_format, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type); static bool TextureFilterable(const FeatureInfo* feature_info, GLenum internal_format, GLenum type, bool immutable); // Sets the Texture's target // Parameters: // target: GL_TEXTURE_2D or GL_TEXTURE_CUBE_MAP or // GL_TEXTURE_EXTERNAL_OES or GL_TEXTURE_RECTANGLE_ARB // GL_TEXTURE_2D_ARRAY or GL_TEXTURE_3D (for GLES3) // max_levels: The maximum levels this type of target can have. void SetTarget(GLenum target, GLint max_levels); // Update info about this texture. void Update(); // Appends a signature for the given level. void AddToSignature( const FeatureInfo* feature_info, GLenum target, GLint level, std::string* signature) const; // Updates the unsafe textures count in all the managers referencing this // texture. void UpdateSafeToRenderFrom(bool cleared); // Updates the uncleared mip count in all the managers referencing this // texture. void UpdateMipCleared(LevelInfo* info, GLsizei width, GLsizei height, const gfx::Rect& cleared_rect); // Computes the CanRenderCondition flag. CanRenderCondition GetCanRenderCondition() const; // Updates the unrenderable texture count in all the managers referencing this // texture. void UpdateCanRenderCondition(); // Updates the images count in all the managers referencing this // texture. void UpdateHasImages(); // Updates the flag that indicates whether this texture requires RGB // emulation. void UpdateEmulatingRGB(); // Increment the framebuffer state change count in all the managers // referencing this texture. void IncAllFramebufferStateChangeCount(); void UpdateBaseLevel(GLint base_level); void UpdateMaxLevel(GLint max_level); void UpdateNumMipLevels(); // Increment the generation counter for all managers that have a reference to // this texture. void IncrementManagerServiceIdGeneration(); // Return the service id of the texture that we will delete when we are // destroyed. GLuint owned_service_id() const { return owned_service_id_; } GLenum GetCompatibilitySwizzleForChannel(GLenum channel); void SetCompatibilitySwizzle(const CompatibilitySwizzle* swizzle); // Info about each face and level of texture. std::vector<FaceInfo> face_infos_; // The texture refs that point to this Texture. typedef std::set<TextureRef*> RefSet; RefSet refs_; // The single TextureRef that accounts for memory for this texture. Must be // one of refs_. TextureRef* memory_tracking_ref_; // The id of the texture that we are responsible for deleting. Normally, this // is the same as |service_id_|, unless a GLStreamTextureImage with its own // service id is bound. In that case the GLStreamTextureImage service id is // stored in |service_id_| and overrides the owned service id for all purposes // except deleting the texture name. GLuint owned_service_id_; // Whether all renderable mips of this texture have been cleared. bool cleared_; int num_uncleared_mips_; int num_npot_faces_; // Texture parameters. SamplerState sampler_state_; GLenum usage_; GLint base_level_; GLint max_level_; GLenum swizzle_r_; GLenum swizzle_g_; GLenum swizzle_b_; GLenum swizzle_a_; // The maximum level that has been set. GLint max_level_set_; // Whether or not this texture is "texture complete" bool texture_complete_; // Whether or not this texture is "cube complete" bool cube_complete_; // Whether mip levels, base_level, or max_level have changed and // texture_completeness_ and cube_completeness_ should be reverified. bool completeness_dirty_; // Whether or not this texture is non-power-of-two bool npot_; // Whether this texture has ever been bound. bool has_been_bound_; // The number of framebuffers this texture is attached to. int framebuffer_attachment_count_; // Whether the texture is immutable and no further changes to the format // or dimensions of the texture object can be made. bool immutable_; // Whether or not this texture has images. bool has_images_; // Size in bytes this texture is assumed to take in memory. uint32_t estimated_size_; // Cache of the computed CanRenderCondition flag. CanRenderCondition can_render_condition_; // Whether we have initialized TEXTURE_MAX_ANISOTROPY to 1. bool texture_max_anisotropy_initialized_; const CompatibilitySwizzle* compatibility_swizzle_; bool emulating_rgb_; DISALLOW_COPY_AND_ASSIGN(Texture); }; // This class represents a texture in a client context group. It's mostly 1:1 // with a client id, though it can outlive the client id if it's still bound to // a FBO or another context when destroyed. // Multiple TextureRef can point to the same texture with cross-context sharing. class GPU_GLES2_EXPORT TextureRef : public base::RefCounted<TextureRef> { public: TextureRef(TextureManager* manager, GLuint client_id, Texture* texture); static scoped_refptr<TextureRef> Create(TextureManager* manager, GLuint client_id, GLuint service_id); void AddObserver() { num_observers_++; } void RemoveObserver() { num_observers_--; } const Texture* texture() const { return texture_; } Texture* texture() { return texture_; } GLuint client_id() const { return client_id_; } GLuint service_id() const { return texture_->service_id(); } GLint num_observers() const { return num_observers_; } // When the TextureRef is destroyed, it will assume that the context has been // lost, regardless of the state of the TextureManager. void ForceContextLost(); private: friend class base::RefCounted<TextureRef>; friend class Texture; friend class TextureManager; ~TextureRef(); const TextureManager* manager() const { return manager_; } TextureManager* manager() { return manager_; } void reset_client_id() { client_id_ = 0; } TextureManager* manager_; Texture* texture_; GLuint client_id_; GLint num_observers_; bool force_context_lost_; DISALLOW_COPY_AND_ASSIGN(TextureRef); }; // Holds data that is per gles2_cmd_decoder, but is related to to the // TextureManager. struct DecoderTextureState { // total_texture_upload_time automatically initialized to 0 in default // constructor. explicit DecoderTextureState(const GpuDriverBugWorkarounds& workarounds); // This indicates all the following texSubImage*D calls that are part of the // failed texImage*D call should be ignored. The client calls have a lock // around them, so it will affect only a single texImage*D + texSubImage*D // group. bool tex_image_failed; bool texsubimage_faster_than_teximage; bool force_cube_map_positive_x_allocation; bool force_cube_complete; bool force_int_or_srgb_cube_texture_complete; bool unpack_alignment_workaround_with_unpack_buffer; bool unpack_overlapping_rows_separately_unpack_buffer; bool unpack_image_height_workaround_with_unpack_buffer; }; // This class keeps track of the textures and their sizes so we can do NPOT and // texture complete checking. // // NOTE: To support shared resources an instance of this class will need to be // shared by multiple DecoderContexts. class GPU_GLES2_EXPORT TextureManager : public base::trace_event::MemoryDumpProvider { public: class GPU_GLES2_EXPORT DestructionObserver { public: DestructionObserver(); virtual ~DestructionObserver(); // Called in ~TextureManager. virtual void OnTextureManagerDestroying(TextureManager* manager) = 0; // Called via ~TextureRef. virtual void OnTextureRefDestroying(TextureRef* texture) = 0; private: DISALLOW_COPY_AND_ASSIGN(DestructionObserver); }; enum DefaultAndBlackTextures { kTexture2D, kTexture3D, kTexture2DArray, kCubeMap, kExternalOES, kRectangleARB, kNumDefaultTextures }; TextureManager(MemoryTracker* memory_tracker, FeatureInfo* feature_info, GLsizei max_texture_size, GLsizei max_cube_map_texture_size, GLsizei max_rectangle_texture_size, GLsizei max_3d_texture_size, GLsizei max_array_texture_layers, bool use_default_textures, ProgressReporter* progress_reporter, ServiceDiscardableManager* discardable_manager); ~TextureManager() override; void AddFramebufferManager(FramebufferManager* framebuffer_manager); void RemoveFramebufferManager(FramebufferManager* framebuffer_manager); // Init the texture manager. void Initialize(); void MarkContextLost(); // Must call before destruction. void Destroy(); // Returns the maximum number of levels. GLint MaxLevelsForTarget(GLenum target) const { switch (target) { case GL_TEXTURE_2D: case GL_TEXTURE_2D_ARRAY: return max_levels_; case GL_TEXTURE_RECTANGLE_ARB: case GL_TEXTURE_EXTERNAL_OES: return 1; case GL_TEXTURE_3D: return max_3d_levels_; default: return max_cube_map_levels_; } } // Returns the maximum size. GLsizei MaxSizeForTarget(GLenum target) const { switch (target) { case GL_TEXTURE_2D: case GL_TEXTURE_EXTERNAL_OES: case GL_TEXTURE_2D_ARRAY: return max_texture_size_; case GL_TEXTURE_RECTANGLE: return max_rectangle_texture_size_; case GL_TEXTURE_3D: return max_3d_texture_size_; default: return max_cube_map_texture_size_; } } GLsizei max_array_texture_layers() const { return max_array_texture_layers_; } // Returns the maxium number of levels a texture of the given size can have. static GLsizei ComputeMipMapCount(GLenum target, GLsizei width, GLsizei height, GLsizei depth); static GLenum ExtractFormatFromStorageFormat(GLenum internalformat); static GLenum ExtractTypeFromStorageFormat(GLenum internalformat); // Checks if a dimensions are valid for a given target. bool ValidForTarget( GLenum target, GLint level, GLsizei width, GLsizei height, GLsizei depth); // True if this texture meets all the GLES2 criteria for rendering. // See section 3.8.2 of the GLES2 spec. bool CanRender(const TextureRef* ref) const { return ref->texture()->CanRender(feature_info_.get()); } bool CanRenderWithSampler( const TextureRef* ref, const SamplerState& sampler_state) const { return ref->texture()->CanRenderWithSampler( feature_info_.get(), sampler_state); } // Returns true if mipmaps can be generated by GL. bool CanGenerateMipmaps(const TextureRef* ref) const { return ref->texture()->CanGenerateMipmaps(feature_info_.get()); } // Sets the Texture's target // Parameters: // target: GL_TEXTURE_2D or GL_TEXTURE_CUBE_MAP // GL_TEXTURE_2D_ARRAY or GL_TEXTURE_3D (for GLES3) // max_levels: The maximum levels this type of target can have. void SetTarget( TextureRef* ref, GLenum target); // Set the info for a particular level in a TexureInfo. void SetLevelInfo(TextureRef* ref, GLenum target, GLint level, GLenum internal_format, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const gfx::Rect& cleared_rect); Texture* Produce(TextureRef* ref); // Maps an existing texture into the texture manager, at a given client ID. TextureRef* Consume(GLuint client_id, Texture* texture); // Sets |rect| of mip as cleared. void SetLevelClearedRect(TextureRef* ref, GLenum target, GLint level, const gfx::Rect& cleared_rect); // Sets a mip as cleared. void SetLevelCleared(TextureRef* ref, GLenum target, GLint level, bool cleared); // Sets a texture parameter of a Texture // Returns GL_NO_ERROR on success. Otherwise the error to generate. // TODO(gman): Expand to SetParameteriv,fv void SetParameteri( const char* function_name, ErrorState* error_state, TextureRef* ref, GLenum pname, GLint param); void SetParameterf( const char* function_name, ErrorState* error_state, TextureRef* ref, GLenum pname, GLfloat param); // Makes each of the mip levels as though they were generated. void MarkMipmapsGenerated(TextureRef* ref); // Clears any uncleared renderable levels. bool ClearRenderableLevels(DecoderContext* decoder, TextureRef* ref); // Clear a specific level. bool ClearTextureLevel(DecoderContext* decoder, TextureRef* ref, GLenum target, GLint level); // Creates a new texture info. TextureRef* CreateTexture(GLuint client_id, GLuint service_id); // Gets the texture info for the given texture. TextureRef* GetTexture(GLuint client_id) const; // Takes the TextureRef for the given texture out of the texture manager. scoped_refptr<TextureRef> TakeTexture(GLuint client_id); // Returns a TextureRef to the texture manager. void ReturnTexture(scoped_refptr<TextureRef> texture_ref); // Removes a texture info. void RemoveTexture(GLuint client_id); // Gets a Texture for a given service id (note: it assumes the texture object // is still mapped in this TextureManager). Texture* GetTextureForServiceId(GLuint service_id) const; TextureRef* GetDefaultTextureInfo(GLenum target) { switch (target) { case GL_TEXTURE_2D: return default_textures_[kTexture2D].get(); case GL_TEXTURE_3D: return default_textures_[kTexture3D].get(); case GL_TEXTURE_2D_ARRAY: return default_textures_[kTexture2DArray].get(); case GL_TEXTURE_CUBE_MAP: return default_textures_[kCubeMap].get(); case GL_TEXTURE_EXTERNAL_OES: return default_textures_[kExternalOES].get(); case GL_TEXTURE_RECTANGLE_ARB: return default_textures_[kRectangleARB].get(); default: NOTREACHED(); return NULL; } } bool HaveUnsafeTextures() const { return num_unsafe_textures_ > 0; } bool HaveUnclearedMips() const { return num_uncleared_mips_ > 0; } bool HaveImages() const { return num_images_ > 0; } GLuint black_texture_id(GLenum target) const { switch (target) { case GL_SAMPLER_2D: return black_texture_ids_[kTexture2D]; case GL_SAMPLER_3D: return black_texture_ids_[kTexture3D]; case GL_SAMPLER_2D_ARRAY: return black_texture_ids_[kTexture2DArray]; case GL_SAMPLER_CUBE: return black_texture_ids_[kCubeMap]; case GL_SAMPLER_EXTERNAL_OES: return black_texture_ids_[kExternalOES]; case GL_SAMPLER_2D_RECT_ARB: return black_texture_ids_[kRectangleARB]; default: NOTREACHED(); return 0; } } size_t mem_represented() const { return memory_type_tracker_->GetMemRepresented(); } void SetLevelImage(TextureRef* ref, GLenum target, GLint level, gl::GLImage* image, Texture::ImageState state); void SetLevelStreamTextureImage(TextureRef* ref, GLenum target, GLint level, GLStreamTextureImage* image, Texture::ImageState state, GLuint service_id); void SetLevelImageState(TextureRef* ref, GLenum target, GLint level, Texture::ImageState state); size_t GetSignatureSize() const; void AddToSignature( TextureRef* ref, GLenum target, GLint level, std::string* signature) const; void AddObserver(DestructionObserver* observer) { destruction_observers_.push_back(observer); } void RemoveObserver(DestructionObserver* observer) { for (unsigned int i = 0; i < destruction_observers_.size(); i++) { if (destruction_observers_[i] == observer) { std::swap(destruction_observers_[i], destruction_observers_.back()); destruction_observers_.pop_back(); return; } } NOTREACHED(); } struct DoTexImageArguments { enum TexImageCommandType { kTexImage2D, kTexImage3D, }; GLenum target; GLint level; GLenum internal_format; GLsizei width; GLsizei height; GLsizei depth; GLint border; GLenum format; GLenum type; const void* pixels; uint32_t pixels_size; uint32_t padding; TexImageCommandType command_type; }; bool ValidateTexImage( ContextState* state, const char* function_name, const DoTexImageArguments& args, // Pointer to TextureRef filled in if validation successful. // Presumes the pointer is valid. TextureRef** texture_ref); void ValidateAndDoTexImage( DecoderTextureState* texture_state, ContextState* state, DecoderFramebufferState* framebuffer_state, const char* function_name, const DoTexImageArguments& args); struct DoTexSubImageArguments { enum TexSubImageCommandType { kTexSubImage2D, kTexSubImage3D, }; GLenum target; GLint level; GLint xoffset; GLint yoffset; GLint zoffset; GLsizei width; GLsizei height; GLsizei depth; GLenum format; GLenum type; const void* pixels; uint32_t pixels_size; uint32_t padding; TexSubImageCommandType command_type; }; bool ValidateTexSubImage( ContextState* state, const char* function_name, const DoTexSubImageArguments& args, // Pointer to TextureRef filled in if validation successful. // Presumes the pointer is valid. TextureRef** texture_ref); void ValidateAndDoTexSubImage(DecoderContext* decoder, DecoderTextureState* texture_state, ContextState* state, DecoderFramebufferState* framebuffer_state, const char* function_name, const DoTexSubImageArguments& args); // TODO(kloveless): Make GetTexture* private once this is no longer called // from gles2_cmd_decoder. TextureRef* GetTextureInfoForTarget(ContextState* state, GLenum target); TextureRef* GetTextureInfoForTargetUnlessDefault( ContextState* state, GLenum target); // This function is used to validate TexImage2D and TexSubImage2D and their // variants. But internal_format only checked for callers of TexImage2D and // its variants (tex_image_call is true). bool ValidateTextureParameters( ErrorState* error_state, const char* function_name, bool tex_image_call, GLenum format, GLenum type, GLint internal_format, GLint level); // base::trace_event::MemoryDumpProvider implementation. bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args, base::trace_event::ProcessMemoryDump* pmd) override; // Returns the union of |rect1| and |rect2| if one of the rectangles is empty, // contains the other rectangle or shares an edge with the other rectangle. // Part of the public interface because texture pixel data rectangle // operations are also implemented in decoder at the moment. static bool CombineAdjacentRects(const gfx::Rect& rect1, const gfx::Rect& rect2, gfx::Rect* result); // Get / set the current generation number of this manager. This generation // number changes whenever the service_id of one or more Textures change. uint32_t GetServiceIdGeneration() const; void IncrementServiceIdGeneration(); static GLenum AdjustTexInternalFormat(const gles2::FeatureInfo* feature_info, GLenum format); static GLenum AdjustTexFormat(const gles2::FeatureInfo* feature_info, GLenum format); static GLenum AdjustTexStorageFormat(const gles2::FeatureInfo* feature_info, GLenum format); void WorkaroundCopyTexImageCubeMap( DecoderTextureState* texture_state, ContextState* state, DecoderFramebufferState* framebuffer_state, TextureRef* texture_ref, const char* function_name, const DoTexImageArguments& args) { DoCubeMapWorkaround(texture_state, state, framebuffer_state, texture_ref, function_name, args); } private: friend class Texture; friend class TextureRef; // Helper for Initialize(). scoped_refptr<TextureRef> CreateDefaultAndBlackTextures( GLenum target, GLuint* black_texture); void DoTexImage( DecoderTextureState* texture_state, ContextState* state, DecoderFramebufferState* framebuffer_state, const char* function_name, TextureRef* texture_ref, const DoTexImageArguments& args); // Reserve memory for the texture and set its attributes so it can be filled // with TexSubImage. The image contents are undefined after this function, // so make sure it's subsequently filled in its entirety. void ReserveTexImageToBeFilled(DecoderTextureState* texture_state, ContextState* state, DecoderFramebufferState* framebuffer_state, const char* function_name, TextureRef* texture_ref, const DoTexImageArguments& args); void DoTexSubImageWithAlignmentWorkaround( DecoderTextureState* texture_state, ContextState* state, const DoTexSubImageArguments& args); void DoTexSubImageRowByRowWorkaround(DecoderTextureState* texture_state, ContextState* state, const DoTexSubImageArguments& args, const PixelStoreParams& unpack_params); void DoTexSubImageLayerByLayerWorkaround( DecoderTextureState* texture_state, ContextState* state, const DoTexSubImageArguments& args, const PixelStoreParams& unpack_params); void DoCubeMapWorkaround( DecoderTextureState* texture_state, ContextState* state, DecoderFramebufferState* framebuffer_state, TextureRef* texture_ref, const char* function_name, const DoTexImageArguments& args); void StartTracking(TextureRef* texture); void StopTracking(TextureRef* texture); void UpdateSafeToRenderFrom(int delta); void UpdateUnclearedMips(int delta); void UpdateCanRenderCondition(Texture::CanRenderCondition old_condition, Texture::CanRenderCondition new_condition); void UpdateNumImages(int delta); void IncFramebufferStateChangeCount(); // Helper function called by OnMemoryDump. void DumpTextureRef(base::trace_event::ProcessMemoryDump* pmd, TextureRef* ref); MemoryTypeTracker* GetMemTracker(); std::unique_ptr<MemoryTypeTracker> memory_type_tracker_; MemoryTracker* memory_tracker_; scoped_refptr<FeatureInfo> feature_info_; std::vector<FramebufferManager*> framebuffer_managers_; // Info for each texture in the system. typedef base::hash_map<GLuint, scoped_refptr<TextureRef> > TextureMap; TextureMap textures_; GLsizei max_texture_size_; GLsizei max_cube_map_texture_size_; GLsizei max_rectangle_texture_size_; GLsizei max_3d_texture_size_; GLsizei max_array_texture_layers_; GLint max_levels_; GLint max_cube_map_levels_; GLint max_3d_levels_; const bool use_default_textures_; int num_unsafe_textures_; int num_uncleared_mips_; int num_images_; // Counts the number of Textures allocated with 'this' as its manager. // Allows to check no Texture will outlive this. unsigned int texture_count_; bool have_context_; // Black (0,0,0,1) textures for when non-renderable textures are used. // NOTE: There is no corresponding Texture for these textures. // TextureInfos are only for textures the client side can access. GLuint black_texture_ids_[kNumDefaultTextures]; // The default textures for each target (texture name = 0) scoped_refptr<TextureRef> default_textures_[kNumDefaultTextures]; std::vector<DestructionObserver*> destruction_observers_; uint32_t current_service_id_generation_; // Used to notify the watchdog thread of progress during destruction, // preventing time-outs when destruction takes a long time. May be null when // using in-process command buffer. ProgressReporter* progress_reporter_; ServiceDiscardableManager* discardable_manager_; DISALLOW_COPY_AND_ASSIGN(TextureManager); }; } // namespace gles2 } // namespace gpu #endif // GPU_COMMAND_BUFFER_SERVICE_TEXTURE_MANAGER_H_