code
stringlengths 3
1.01M
| repo_name
stringlengths 5
116
| path
stringlengths 3
311
| language
stringclasses 30
values | license
stringclasses 15
values | size
int64 3
1.01M
|
|---|---|---|---|---|---|
/*
* This node publishes the /monitoring/batteryInfo.
* To do so it parses the messages from the /diagnostics_agg topic. The problem is
* that not all the data in the message are now available. Therefore the
* status will be always set to 'Unknown' and the batteryTimeLeft will be
* always 0.
*
* @author Jan Brabec
*/
#include <ros/ros.h>
#include <monitoring_msgs/Battery.h>
#include <diagnostic_msgs/DiagnosticArray.h>
#include <boost/lexical_cast.hpp>
typedef std::vector<diagnostic_msgs::DiagnosticStatus>::const_iterator DiagnosticArrayConstIterator;
std::string namePrefix;
ros::NodeHandlePtr nodeHandle;
size_t correctIndexInArray = std::string::npos;
/**
* Compares the string with the expected name of the battery status in the diagnostics array.
*/
bool isBatteryStatus(const std::string& statusName) {
return statusName == namePrefix + ": Battery";
}
void getNamePrefix() {
if (!nodeHandle->getParam("batteryDiagnosticsPublisherNode", namePrefix)) {
ROS_INFO_STREAM("No prefix parameter specified. using: nifti_robot_driver");
namePrefix = "nifti_robot_driver";
}
}
/**
* Tries to find the index of the battery status in the diagnostics array. The problem
* is that there can be different types of arrays published and some might not contain the
* battery information. Therefore this function returns true if the battery status has been
* successfully found and false otherwise.
*/
bool tryToFindIndexInArray(const diagnostic_msgs::DiagnosticArrayConstPtr& msg) {
DiagnosticArrayConstIterator it = msg->status.begin();
size_t index = 0;
for (;it < msg->status.end(); ++it, ++index) {
if (isBatteryStatus(it->name)) {
correctIndexInArray = index;
ROS_INFO_STREAM("Correct status in the DiagnosticArray has been found.");
return true;
}
}
ROS_WARN_STREAM("Couldn't find the status in the DiagnosticArray. This may be normal in the" <<
" first few seconds.");
return false;
}
/**
* Gets the battery status from the diagnostics array. Returns the end iterator if the
* status is not in the array.
*/
DiagnosticArrayConstIterator getBatteryStatus(const diagnostic_msgs::DiagnosticArrayConstPtr& msg) {
if (correctIndexInArray == std::string::npos && !tryToFindIndexInArray(msg)) {
return msg->status.end(); //Couldn't find the correct index.
}
if (msg->status.size() <= correctIndexInArray) {
return msg->status.end(); //This array is smaller than the correct one.
}
DiagnosticArrayConstIterator it = msg->status.begin() + correctIndexInArray;
if (isBatteryStatus(it->name)) {
return it;
} else {
return msg->status.end(); //Different array than the one we are expecting.
}
}
/**
* Publishes the /monitoring/batteryInfo topic. Takes care of the whole creation of the message.
*/
void publishBatteryInfo(double batteryLevel) {
static ros::Publisher batteryPub =
nodeHandle->advertise<monitoring_msgs::Battery> ("/monitoring/batteryInfo", 10);
monitoring_msgs::Battery bat;
bat.header.stamp = ros::Time::now();
bat.header.frame_id = "base_link";
bat.batteryTimeLeft = 0;
bat.currentBatteryLevel = batteryLevel;
bat.status = "Unknown";
batteryPub.publish(bat);
}
/**
* This callback is invoked every time a new diagnostics message arrives on the /diagnostics
* topic.
*/
void diagnosticsCallback(const diagnostic_msgs::DiagnosticArrayConstPtr& msg) {
DiagnosticArrayConstIterator status = getBatteryStatus(msg);
if (status != msg->status.end()) {
publishBatteryInfo(boost::lexical_cast<double>(status->values[0].value));
}
}
int main(int argc, char** argv) {
ros::init(argc, argv, "batteryInfo");
nodeHandle = ros::NodeHandlePtr(new ros::NodeHandle("~"));
getNamePrefix();
ROS_INFO("Subscribing to /diagnostics ...");
ros::Subscriber diagnosticsSub = nodeHandle->subscribe("/diagnostics", 1, diagnosticsCallback);
ros::spin();
return 0;
}
|
NIFTi-Fraunhofer/nifti_uav
|
mosquito/monitoring/src/batteryInfo2.cpp
|
C++
|
bsd-3-clause
| 4,064
|
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "extensions/browser/api/declarative_net_request/rules_monitor_service.h"
#include <algorithm>
#include <utility>
#include "base/bind.h"
#include "base/callback.h"
#include "base/check_op.h"
#include "base/containers/contains.h"
#include "base/containers/cxx20_erase.h"
#include "base/containers/queue.h"
#include "base/files/file_util.h"
#include "base/lazy_instance.h"
#include "base/location.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram_macros.h"
#include "base/no_destructor.h"
#include "base/task/post_task.h"
#include "base/threading/sequenced_task_runner_handle.h"
#include "base/threading/thread_restrictions.h"
#include "content/public/browser/browser_task_traits.h"
#include "content/public/browser/browser_thread.h"
#include "extensions/browser/api/declarative_net_request/composite_matcher.h"
#include "extensions/browser/api/declarative_net_request/constants.h"
#include "extensions/browser/api/declarative_net_request/file_backed_ruleset_source.h"
#include "extensions/browser/api/declarative_net_request/file_sequence_helper.h"
#include "extensions/browser/api/declarative_net_request/parse_info.h"
#include "extensions/browser/api/declarative_net_request/rules_count_pair.h"
#include "extensions/browser/api/declarative_net_request/ruleset_manager.h"
#include "extensions/browser/api/declarative_net_request/ruleset_matcher.h"
#include "extensions/browser/api/web_request/permission_helper.h"
#include "extensions/browser/api/web_request/web_request_api.h"
#include "extensions/browser/disable_reason.h"
#include "extensions/browser/extension_file_task_runner.h"
#include "extensions/browser/extension_prefs.h"
#include "extensions/browser/extension_prefs_factory.h"
#include "extensions/browser/extension_registry_factory.h"
#include "extensions/browser/unloaded_extension_reason.h"
#include "extensions/browser/warning_service.h"
#include "extensions/browser/warning_service_factory.h"
#include "extensions/browser/warning_set.h"
#include "extensions/common/api/declarative_net_request.h"
#include "extensions/common/api/declarative_net_request/constants.h"
#include "extensions/common/api/declarative_net_request/dnr_manifest_data.h"
#include "extensions/common/error_utils.h"
#include "extensions/common/extension_id.h"
#include "extensions/common/permissions/api_permission.h"
#include "tools/json_schema_compiler/util.h"
namespace extensions {
namespace declarative_net_request {
namespace {
namespace dnr_api = api::declarative_net_request;
static base::LazyInstance<
BrowserContextKeyedAPIFactory<RulesMonitorService>>::Leaky g_factory =
LAZY_INSTANCE_INITIALIZER;
bool RulesetInfoCompareByID(const RulesetInfo& lhs, const RulesetInfo& rhs) {
return lhs.source().id() < rhs.source().id();
}
void LogLoadRulesetResult(LoadRulesetResult result) {
UMA_HISTOGRAM_ENUMERATION(kLoadRulesetResultHistogram, result);
}
// Whether the `extension` has the permission to use the declarativeNetRequest
// API.
bool HasAPIPermission(const Extension& extension) {
const PermissionsData* permissions = extension.permissions_data();
return permissions->HasAPIPermission(
mojom::APIPermissionID::kDeclarativeNetRequest) ||
permissions->HasAPIPermission(
mojom::APIPermissionID::kDeclarativeNetRequestWithHostAccess);
}
// Returns whether the extension's allocation should be released. This would
// return true for cases where we expect the extension to be unloaded for a
// while or if the extension directory's contents changed in a reload.
bool ShouldReleaseAllocationOnUnload(const ExtensionPrefs* prefs,
const Extension& extension,
UnloadedExtensionReason reason) {
if (reason == UnloadedExtensionReason::DISABLE) {
static constexpr int kReleaseAllocationDisableReasons =
disable_reason::DISABLE_BLOCKED_BY_POLICY;
// Release allocation on reload of an unpacked extension and treat it as a
// new install since the extension directory's contents may have changed.
bool is_unpacked_reload =
Manifest::IsUnpackedLocation(extension.location()) &&
prefs->HasDisableReason(extension.id(), disable_reason::DISABLE_RELOAD);
return is_unpacked_reload || (prefs->GetDisableReasons(extension.id()) &
kReleaseAllocationDisableReasons) != 0;
}
return reason == UnloadedExtensionReason::BLOCKLIST;
}
// Helper to create a RulesetMatcher for the session-scoped ruleset
// corresponding to the given |rules|. On failure, null is returned and |error|
// is populated.
std::unique_ptr<RulesetMatcher> CreateSessionScopedMatcher(
const ExtensionId& extension_id,
std::vector<api::declarative_net_request::Rule> rules,
std::string* error) {
DCHECK(error);
RulesetSource source(kSessionRulesetID, GetDynamicAndSessionRuleLimit(),
extension_id, true /* enabled */);
auto parse_flags = RulesetSource::kRaiseErrorOnInvalidRules |
RulesetSource::kRaiseErrorOnLargeRegexRules;
ParseInfo info = source.IndexRules(std::move(rules), parse_flags);
if (info.has_error()) {
*error = info.error();
return nullptr;
}
base::span<const uint8_t> buffer = info.GetBuffer();
std::unique_ptr<RulesetMatcher> matcher;
LoadRulesetResult result = source.CreateVerifiedMatcher(
std::string(reinterpret_cast<const char*>(buffer.data()), buffer.size()),
&matcher);
// Creating a verified matcher for session scoped rules should never result in
// an error, since these are not persisted to disk and are not affected by
// related corruption and verification issues.
DCHECK_EQ(LoadRulesetResult::kSuccess, result)
<< "Loading session scoped ruleset failed unexpectedly "
<< static_cast<int>(result);
return matcher;
}
HostPermissionsAlwaysRequired GetHostPermissionsAlwaysRequired(
const Extension& extension) {
DCHECK(HasAPIPermission(extension));
const PermissionsData* permissions = extension.permissions_data();
if (permissions->HasAPIPermission(
mojom::APIPermissionID::kDeclarativeNetRequest)) {
return HostPermissionsAlwaysRequired::kFalse;
}
// Else the extension only has the kDeclarativeNetRequestWithHostAccess
// permission.
return HostPermissionsAlwaysRequired::kTrue;
}
} // namespace
// Helper to bridge tasks to FileSequenceHelper. Lives on the UI thread.
class RulesMonitorService::FileSequenceBridge {
public:
FileSequenceBridge()
: file_task_runner_(GetExtensionFileTaskRunner()),
file_sequence_helper_(std::make_unique<FileSequenceHelper>()) {}
FileSequenceBridge(const FileSequenceBridge&) = delete;
FileSequenceBridge& operator=(const FileSequenceBridge&) = delete;
~FileSequenceBridge() {
file_task_runner_->DeleteSoon(FROM_HERE, std::move(file_sequence_helper_));
}
void LoadRulesets(
LoadRequestData load_data,
FileSequenceHelper::LoadRulesetsUICallback ui_callback) const {
// base::Unretained is safe here because we trigger the destruction of
// |file_sequence_helper_| on |file_task_runner_| from our destructor. Hence
// it is guaranteed to be alive when |load_ruleset_task| is run.
base::OnceClosure load_ruleset_task =
base::BindOnce(&FileSequenceHelper::LoadRulesets,
base::Unretained(file_sequence_helper_.get()),
std::move(load_data), std::move(ui_callback));
file_task_runner_->PostTask(FROM_HERE, std::move(load_ruleset_task));
}
void UpdateDynamicRules(
LoadRequestData load_data,
std::vector<int> rule_ids_to_remove,
std::vector<dnr_api::Rule> rules_to_add,
const RulesCountPair& rule_limit,
FileSequenceHelper::UpdateDynamicRulesUICallback ui_callback) const {
// base::Unretained is safe here because we trigger the destruction of
// |file_sequence_state_| on |file_task_runner_| from our destructor. Hence
// it is guaranteed to be alive when |update_dynamic_rules_task| is run.
base::OnceClosure update_dynamic_rules_task = base::BindOnce(
&FileSequenceHelper::UpdateDynamicRules,
base::Unretained(file_sequence_helper_.get()), std::move(load_data),
std::move(rule_ids_to_remove), std::move(rules_to_add), rule_limit,
std::move(ui_callback));
file_task_runner_->PostTask(FROM_HERE,
std::move(update_dynamic_rules_task));
}
private:
scoped_refptr<base::SequencedTaskRunner> file_task_runner_;
// Created on the UI thread. Accessed and destroyed on |file_task_runner_|.
// Maintains state needed on |file_task_runner_|.
std::unique_ptr<FileSequenceHelper> file_sequence_helper_;
};
// Helps to ensure FIFO ordering of api calls and that only a single api call
// proceeds at a time.
class RulesMonitorService::ApiCallQueue {
public:
ApiCallQueue() = default;
~ApiCallQueue() = default;
ApiCallQueue(const ApiCallQueue&) = delete;
ApiCallQueue& operator=(const ApiCallQueue&) = delete;
ApiCallQueue(ApiCallQueue&&) = delete;
ApiCallQueue& operator=(ApiCallQueue&&) = delete;
// Signals to start executing API calls. Unless signaled so, the ApiCallQueue
// will queue api calls for future execution.
// Note that this can start running a queued api call synchronously.
void SetReadyToExecuteApiCalls() {
DCHECK(!ready_to_execute_api_calls_);
DCHECK(!executing_api_call_);
ready_to_execute_api_calls_ = true;
ExecuteApiCallIfNecessary();
}
// Executes the api call or queues it for execution if the ApiCallQueue is not
// ready or there is an existing api call in progress.
// `unbound_api_call` will be invoked when the queue is ready, and is
// responsible for invoking `api_callback` upon its completion. Following
// this, `ApiCallQueue::OnApiCallCompleted()` will be called in the next event
// cycle, triggering the next call (if any).
void ExecuteOrQueueApiCall(
base::OnceCallback<void(ApiCallback)> unbound_api_call,
ApiCallback api_callback) {
// Wrap the `api_callback` in a synthetic callback to ensure
// `OnApiCallCompleted()` is run after each api call. Note we schedule
// `OnApiCallCompleted()` to run in the next event cycle to ensure any
// side-effects from the last run api call are "committed" by the time the
// next api call executes.
auto post_async = [](base::OnceClosure async_task) {
base::SequencedTaskRunnerHandle::Get()->PostTask(FROM_HERE,
std::move(async_task));
};
base::OnceClosure async_task = base::BindOnce(
&ApiCallQueue::OnApiCallCompleted, weak_factory_.GetWeakPtr());
ApiCallback wrapped_callback =
std::move(api_callback)
.Then(base::BindOnce(post_async, std::move(async_task)));
base::OnceClosure api_call = base::BindOnce(std::move(unbound_api_call),
std::move(wrapped_callback));
api_call_queue_.push(std::move(api_call));
if (!ready_to_execute_api_calls_ || executing_api_call_)
return;
DCHECK_EQ(1u, api_call_queue_.size());
ExecuteApiCallIfNecessary();
}
private:
// Signals that the last posted api call has completed.
void OnApiCallCompleted() {
DCHECK(executing_api_call_);
executing_api_call_ = false;
ExecuteApiCallIfNecessary();
}
// Executes the api call at the front of the queue if there is one.
void ExecuteApiCallIfNecessary() {
DCHECK(!executing_api_call_);
DCHECK(ready_to_execute_api_calls_);
if (api_call_queue_.empty())
return;
executing_api_call_ = true;
base::OnceClosure api_call = std::move(api_call_queue_.front());
api_call_queue_.pop();
std::move(api_call).Run();
}
bool executing_api_call_ = false;
bool ready_to_execute_api_calls_ = false;
base::queue<base::OnceClosure> api_call_queue_;
// Must be the last member variable. See WeakPtrFactory documentation for
// details.
base::WeakPtrFactory<ApiCallQueue> weak_factory_{this};
};
// static
BrowserContextKeyedAPIFactory<RulesMonitorService>*
RulesMonitorService::GetFactoryInstance() {
return g_factory.Pointer();
}
// static
std::unique_ptr<RulesMonitorService>
RulesMonitorService::CreateInstanceForTesting(
content::BrowserContext* context) {
return base::WrapUnique(new RulesMonitorService(context));
}
// static
RulesMonitorService* RulesMonitorService::Get(
content::BrowserContext* browser_context) {
return BrowserContextKeyedAPIFactory<RulesMonitorService>::Get(
browser_context);
}
void RulesMonitorService::UpdateDynamicRules(
const Extension& extension,
std::vector<int> rule_ids_to_remove,
std::vector<api::declarative_net_request::Rule> rules_to_add,
ApiCallback callback) {
// Sanity check that this is only called for an enabled extension.
DCHECK(extension_registry_->enabled_extensions().Contains(extension.id()));
update_dynamic_or_session_rules_queue_map_[extension.id()]
.ExecuteOrQueueApiCall(
base::BindOnce(&RulesMonitorService::UpdateDynamicRulesInternal,
weak_factory_.GetWeakPtr(), extension.id(),
std::move(rule_ids_to_remove),
std::move(rules_to_add)),
std::move(callback));
}
void RulesMonitorService::UpdateEnabledStaticRulesets(
const Extension& extension,
std::set<RulesetID> ids_to_disable,
std::set<RulesetID> ids_to_enable,
ApiCallback callback) {
// Sanity check that this is only called for an enabled extension.
DCHECK(extension_registry_->enabled_extensions().Contains(extension.id()));
update_enabled_rulesets_queue_map_[extension.id()].ExecuteOrQueueApiCall(
base::BindOnce(&RulesMonitorService::UpdateEnabledStaticRulesetsInternal,
weak_factory_.GetWeakPtr(), extension.id(),
std::move(ids_to_disable), std::move(ids_to_enable)),
std::move(callback));
}
const base::ListValue& RulesMonitorService::GetSessionRulesValue(
const ExtensionId& extension_id) const {
static const base::NoDestructor<base::ListValue> empty_rules;
auto it = session_rules_.find(extension_id);
return it == session_rules_.end() ? *empty_rules : it->second;
}
std::vector<api::declarative_net_request::Rule>
RulesMonitorService::GetSessionRules(const ExtensionId& extension_id) const {
std::vector<api::declarative_net_request::Rule> result;
std::u16string error;
bool populate_result = json_schema_compiler::util::PopulateArrayFromList(
GetSessionRulesValue(extension_id).GetList(), &result, &error);
DCHECK(populate_result);
DCHECK(error.empty());
return result;
}
void RulesMonitorService::UpdateSessionRules(
const Extension& extension,
std::vector<int> rule_ids_to_remove,
std::vector<api::declarative_net_request::Rule> rules_to_add,
ApiCallback callback) {
// Sanity check that this is only called for an enabled extension.
DCHECK(extension_registry_->enabled_extensions().Contains(extension.id()));
update_dynamic_or_session_rules_queue_map_[extension.id()]
.ExecuteOrQueueApiCall(
base::BindOnce(&RulesMonitorService::UpdateSessionRulesInternal,
weak_factory_.GetWeakPtr(), extension.id(),
std::move(rule_ids_to_remove),
std::move(rules_to_add)),
std::move(callback));
}
RulesCountPair RulesMonitorService::GetRulesCountPair(
const ExtensionId& extension_id,
RulesetID id) const {
const CompositeMatcher* matcher =
ruleset_manager_.GetMatcherForExtension(extension_id);
if (!matcher)
return RulesCountPair();
const RulesetMatcher* ruleset_matcher = matcher->GetMatcherWithID(id);
if (!ruleset_matcher)
return RulesCountPair();
return ruleset_matcher->GetRulesCountPair();
}
RulesMonitorService::RulesMonitorService(
content::BrowserContext* browser_context)
: file_sequence_bridge_(std::make_unique<FileSequenceBridge>()),
prefs_(ExtensionPrefs::Get(browser_context)),
extension_registry_(ExtensionRegistry::Get(browser_context)),
warning_service_(WarningService::Get(browser_context)),
context_(browser_context),
ruleset_manager_(browser_context),
action_tracker_(browser_context),
global_rules_tracker_(prefs_, extension_registry_) {
registry_observation_.Observe(extension_registry_);
}
RulesMonitorService::~RulesMonitorService() = default;
/* Description of thread hops for various scenarios:
On ruleset load success:
- UI -> File -> UI.
- The File sequence might reindex the ruleset while parsing JSON OOP.
On ruleset load failure:
- UI -> File -> UI.
- The File sequence might reindex the ruleset while parsing JSON OOP.
On ruleset unload:
- UI.
On dynamic rules update.
- UI -> File -> UI -> IPC to extension
*/
void RulesMonitorService::OnExtensionWillBeInstalled(
content::BrowserContext* browser_context,
const Extension* extension,
bool is_update,
const std::string& old_name) {
if (!HasAPIPermission(*extension))
return;
if (!is_update || Manifest::IsUnpackedLocation(extension->location()))
return;
// Allow the extension to retain its pre-update allocation during the next
// extension load. This can allow the extension to enable some
// non-manifest-enabled rulesets and to retain much of its pre-update
// behavior. The preference is set in OnExtensionWillBeInstalled instead of
// OnExtensionInstalled because OnExtensionInstalled is called after
// OnExtensionLoaded.
prefs_->SetDNRKeepExcessAllocation(extension->id(), true);
}
void RulesMonitorService::OnExtensionLoaded(
content::BrowserContext* browser_context,
const Extension* extension) {
DCHECK_EQ(context_, browser_context);
if (!HasAPIPermission(*extension))
return;
LoadRequestData load_data(extension->id());
int expected_ruleset_checksum;
// Static rulesets.
{
std::vector<FileBackedRulesetSource> sources =
FileBackedRulesetSource::CreateStatic(
*extension, FileBackedRulesetSource::RulesetFilter::kIncludeAll);
absl::optional<std::set<RulesetID>> prefs_enabled_rulesets =
prefs_->GetDNREnabledStaticRulesets(extension->id());
bool ruleset_failed_to_load = false;
for (auto& source : sources) {
bool enabled = prefs_enabled_rulesets
? base::Contains(*prefs_enabled_rulesets, source.id())
: source.enabled_by_default();
bool ignored =
prefs_->ShouldIgnoreDNRRuleset(extension->id(), source.id());
if (!enabled || ignored)
continue;
if (!prefs_->GetDNRStaticRulesetChecksum(extension->id(), source.id(),
&expected_ruleset_checksum)) {
// This might happen on prefs corruption.
LogLoadRulesetResult(LoadRulesetResult::kErrorChecksumNotFound);
ruleset_failed_to_load = true;
continue;
}
RulesetInfo static_ruleset(std::move(source));
static_ruleset.set_expected_checksum(expected_ruleset_checksum);
load_data.rulesets.push_back(std::move(static_ruleset));
}
if (ruleset_failed_to_load) {
warning_service_->AddWarnings(
{Warning::CreateRulesetFailedToLoadWarning(load_data.extension_id)});
}
}
// Dynamic ruleset
if (prefs_->GetDNRDynamicRulesetChecksum(extension->id(),
&expected_ruleset_checksum)) {
RulesetInfo dynamic_ruleset(FileBackedRulesetSource::CreateDynamic(
browser_context, extension->id()));
dynamic_ruleset.set_expected_checksum(expected_ruleset_checksum);
load_data.rulesets.push_back(std::move(dynamic_ruleset));
}
if (load_data.rulesets.empty()) {
OnInitialRulesetsLoadedFromDisk(std::move(load_data));
return;
}
auto load_ruleset_callback =
base::BindOnce(&RulesMonitorService::OnInitialRulesetsLoadedFromDisk,
weak_factory_.GetWeakPtr());
file_sequence_bridge_->LoadRulesets(std::move(load_data),
std::move(load_ruleset_callback));
}
void RulesMonitorService::OnExtensionUnloaded(
content::BrowserContext* browser_context,
const Extension* extension,
UnloadedExtensionReason reason) {
DCHECK_EQ(context_, browser_context);
if (!HasAPIPermission(*extension))
return;
// If the extension is unloaded for any reason other than an update, the
// unused rule allocation should not be kept for this extension the next
// time its rulesets are loaded, as it is no longer "the first load after an
// update".
if (reason != UnloadedExtensionReason::UPDATE)
prefs_->SetDNRKeepExcessAllocation(extension->id(), false);
if (ShouldReleaseAllocationOnUnload(prefs_, *extension, reason))
global_rules_tracker_.ClearExtensionAllocation(extension->id());
// Erase the api call queues for the extension. Any un-executed api calls
// should just be ignored now given the extension is being unloaded.
update_enabled_rulesets_queue_map_.erase(extension->id());
update_dynamic_or_session_rules_queue_map_.erase(extension->id());
// Return early if the extension does not have an active indexed ruleset.
if (!ruleset_manager_.GetMatcherForExtension(extension->id()))
return;
RemoveCompositeMatcher(extension->id());
}
void RulesMonitorService::OnExtensionUninstalled(
content::BrowserContext* browser_context,
const Extension* extension,
UninstallReason reason) {
DCHECK_EQ(context_, browser_context);
if (!HasAPIPermission(*extension))
return;
session_rules_.erase(extension->id());
// Skip if the extension will be reinstalled soon.
if (reason == UNINSTALL_REASON_REINSTALL)
return;
global_rules_tracker_.ClearExtensionAllocation(extension->id());
// Skip if the extension doesn't have a dynamic ruleset.
int dynamic_checksum;
if (!prefs_->GetDNRDynamicRulesetChecksum(extension->id(),
&dynamic_checksum)) {
return;
}
// Cleanup the dynamic rules directory for the extension.
// TODO(karandeepb): It's possible that this task fails, e.g. during shutdown.
// Make this more robust.
FileBackedRulesetSource source =
FileBackedRulesetSource::CreateDynamic(browser_context, extension->id());
DCHECK_EQ(source.json_path().DirName(), source.indexed_path().DirName());
GetExtensionFileTaskRunner()->PostTask(
FROM_HERE, base::BindOnce(base::GetDeleteFileCallback(),
source.json_path().DirName()));
}
void RulesMonitorService::UpdateDynamicRulesInternal(
const ExtensionId& extension_id,
std::vector<int> rule_ids_to_remove,
std::vector<api::declarative_net_request::Rule> rules_to_add,
ApiCallback callback) {
if (!extension_registry_->enabled_extensions().Contains(extension_id)) {
// There is no enabled extension to respond to. While this is probably a
// no-op, still dispatch the callback to ensure any related bookkeeping is
// done.
std::move(callback).Run(absl::nullopt /* error */);
return;
}
LoadRequestData data(extension_id);
// Calculate available shared rule limits. These limits won't be affected by
// another simultaneous api call since we ensure that for a given extension,
// only up to 1 updateDynamicRules/updateSessionRules call is in progress. See
// the usage of `ApiCallQueue`.
RulesCountPair shared_rules_limit(GetDynamicAndSessionRuleLimit(),
GetRegexRuleLimit());
RulesCountPair session_rules_count =
GetRulesCountPair(extension_id, kSessionRulesetID);
RulesCountPair available_limit = shared_rules_limit - session_rules_count;
// We are updating the indexed ruleset. Don't set the expected checksum since
// it'll change.
data.rulesets.emplace_back(
FileBackedRulesetSource::CreateDynamic(context_, extension_id));
auto update_rules_callback =
base::BindOnce(&RulesMonitorService::OnDynamicRulesUpdated,
weak_factory_.GetWeakPtr(), std::move(callback));
file_sequence_bridge_->UpdateDynamicRules(
std::move(data), std::move(rule_ids_to_remove), std::move(rules_to_add),
available_limit, std::move(update_rules_callback));
}
void RulesMonitorService::UpdateSessionRulesInternal(
const ExtensionId& extension_id,
std::vector<int> rule_ids_to_remove,
std::vector<api::declarative_net_request::Rule> rules_to_add,
ApiCallback callback) {
const Extension* extension =
extension_registry_->enabled_extensions().GetByID(extension_id);
if (!extension) {
// There is no enabled extension to respond to. While this is probably a
// no-op, still dispatch the callback to ensure any related bookkeeping is
// done.
std::move(callback).Run(absl::nullopt /* error */);
return;
}
std::vector<api::declarative_net_request::Rule> new_rules =
GetSessionRules(extension_id);
std::set<int> ids_to_remove(rule_ids_to_remove.begin(),
rule_ids_to_remove.end());
base::EraseIf(new_rules, [&ids_to_remove](const dnr_api::Rule& rule) {
return base::Contains(ids_to_remove, rule.id);
});
new_rules.insert(new_rules.end(),
std::make_move_iterator(rules_to_add.begin()),
std::make_move_iterator(rules_to_add.end()));
// Check if the update would exceed shared rule limits.
{
RulesCountPair dynamic_rule_count =
GetRulesCountPair(extension_id, kDynamicRulesetID);
RulesCountPair shared_rule_limit(GetDynamicAndSessionRuleLimit(),
GetRegexRuleLimit());
RulesCountPair available_limit = shared_rule_limit - dynamic_rule_count;
if (new_rules.size() > available_limit.rule_count) {
std::move(callback).Run(kSessionRuleCountExceeded);
return;
}
size_t regex_rule_count = std::count_if(
new_rules.begin(), new_rules.end(), [](const dnr_api::Rule& rule) {
return !!rule.condition.regex_filter;
});
if (regex_rule_count > available_limit.regex_rule_count) {
std::move(callback).Run(kSessionRegexRuleCountExceeded);
return;
}
}
std::unique_ptr<base::ListValue> new_rules_value = base::ListValue::From(
json_schema_compiler::util::CreateValueFromArray(new_rules));
DCHECK(new_rules_value);
std::string error;
std::unique_ptr<RulesetMatcher> matcher =
CreateSessionScopedMatcher(extension_id, std::move(new_rules), &error);
if (!matcher) {
std::move(callback).Run(std::move(error));
return;
}
session_rules_[extension_id] = std::move(*new_rules_value);
UpdateRulesetMatcher(*extension, std::move(matcher));
std::move(callback).Run(absl::nullopt /* error */);
}
void RulesMonitorService::UpdateEnabledStaticRulesetsInternal(
const ExtensionId& extension_id,
std::set<RulesetID> ids_to_disable,
std::set<RulesetID> ids_to_enable,
ApiCallback callback) {
const Extension* extension =
extension_registry_->enabled_extensions().GetByID(extension_id);
if (!extension) {
// There is no enabled extension to respond to. While this is probably a
// no-op, still dispatch the callback to ensure any related bookkeeping is
// done.
std::move(callback).Run(absl::nullopt /* error */);
return;
}
LoadRequestData load_data(extension_id);
int expected_ruleset_checksum = -1;
for (const RulesetID& id_to_enable : ids_to_enable) {
const DNRManifestData::RulesetInfo& info =
DNRManifestData::GetRuleset(*extension, id_to_enable);
RulesetInfo static_ruleset(
FileBackedRulesetSource::CreateStatic(*extension, info));
// Take note of the expected checksum if this ruleset has been indexed in
// the past.
if (prefs_->GetDNRStaticRulesetChecksum(extension_id, id_to_enable,
&expected_ruleset_checksum)) {
static_ruleset.set_expected_checksum(expected_ruleset_checksum);
}
load_data.rulesets.push_back(std::move(static_ruleset));
}
auto load_ruleset_callback =
base::BindOnce(&RulesMonitorService::OnNewStaticRulesetsLoaded,
weak_factory_.GetWeakPtr(), std::move(callback),
std::move(ids_to_disable), std::move(ids_to_enable));
file_sequence_bridge_->LoadRulesets(std::move(load_data),
std::move(load_ruleset_callback));
}
void RulesMonitorService::OnInitialRulesetsLoadedFromDisk(
LoadRequestData load_data) {
if (test_observer_)
test_observer_->OnRulesetLoadComplete(load_data.extension_id);
LogMetricsAndUpdateChecksumsIfNeeded(load_data);
// It's possible that the extension has been disabled since the initial load
// ruleset request. If it's disabled, do nothing.
const Extension* extension =
extension_registry_->enabled_extensions().GetByID(load_data.extension_id);
if (!extension)
return;
// Load session-scoped ruleset.
std::vector<api::declarative_net_request::Rule> session_rules =
GetSessionRules(load_data.extension_id);
// Allocate one additional space for the session-scoped ruleset if needed.
CompositeMatcher::MatcherList matchers;
matchers.reserve(load_data.rulesets.size() + (session_rules.empty() ? 0 : 1));
if (!session_rules.empty()) {
std::string error;
std::unique_ptr<RulesetMatcher> session_matcher =
CreateSessionScopedMatcher(load_data.extension_id,
std::move(session_rules), &error);
DCHECK(session_matcher)
<< "Loading session scoped ruleset failed unexpectedly: " << error;
matchers.push_back(std::move(session_matcher));
}
// Sort by ruleset IDs. This will ensure that the static rulesets are in the
// order in which they are defined in the manifest.
std::sort(load_data.rulesets.begin(), load_data.rulesets.end(),
&RulesetInfoCompareByID);
// Build the CompositeMatcher for the extension. Also enforce rules limit
// across the enabled static rulesets. Note: we don't enforce the rules limit
// at install time (by raising a hard error) to maintain forwards
// compatibility. Since we iterate based on the order of ruleset ID, we'll
// give more preference to rulesets occurring first in the manifest.
RulesCountPair static_rule_count;
bool notify_ruleset_failed_to_load = false;
bool global_rule_limit_exceeded = false;
RulesCountPair static_rule_limit(
global_rules_tracker_.GetAvailableAllocation(load_data.extension_id) +
GetStaticGuaranteedMinimumRuleCount(),
GetRegexRuleLimit());
for (RulesetInfo& ruleset : load_data.rulesets) {
if (!ruleset.did_load_successfully()) {
notify_ruleset_failed_to_load = true;
continue;
}
std::unique_ptr<RulesetMatcher> matcher = ruleset.TakeMatcher();
RulesCountPair matcher_count = matcher->GetRulesCountPair();
// Per-ruleset limits should have been enforced during
// indexing/installation.
DCHECK_LE(matcher_count.regex_rule_count,
static_cast<size_t>(GetRegexRuleLimit()));
DCHECK_LE(matcher_count.rule_count, ruleset.source().rule_count_limit());
if (ruleset.source().is_dynamic_ruleset()) {
matchers.push_back(std::move(matcher));
continue;
}
RulesCountPair new_ruleset_count = static_rule_count + matcher_count;
if (new_ruleset_count.rule_count > static_rule_limit.rule_count) {
global_rule_limit_exceeded = true;
continue;
}
if (new_ruleset_count.regex_rule_count > static_rule_limit.regex_rule_count)
continue;
static_rule_count = new_ruleset_count;
matchers.push_back(std::move(matcher));
}
if (notify_ruleset_failed_to_load) {
warning_service_->AddWarnings(
{Warning::CreateRulesetFailedToLoadWarning(load_data.extension_id)});
}
if (global_rule_limit_exceeded) {
warning_service_->AddWarnings(
{Warning::CreateEnabledRuleCountExceededWarning(
load_data.extension_id)});
}
bool allocation_updated = global_rules_tracker_.OnExtensionRuleCountUpdated(
load_data.extension_id, static_rule_count.rule_count);
DCHECK(allocation_updated);
AddCompositeMatcher(*extension, std::move(matchers));
// Start processing api calls now that the initial ruleset load has completed.
update_enabled_rulesets_queue_map_[load_data.extension_id]
.SetReadyToExecuteApiCalls();
update_dynamic_or_session_rules_queue_map_[load_data.extension_id]
.SetReadyToExecuteApiCalls();
}
void RulesMonitorService::OnNewStaticRulesetsLoaded(
ApiCallback callback,
std::set<RulesetID> ids_to_disable,
std::set<RulesetID> ids_to_enable,
LoadRequestData load_data) {
LogMetricsAndUpdateChecksumsIfNeeded(load_data);
// It's possible that the extension has been disabled since the initial
// request. If it's disabled, return early.
const Extension* extension = extension_registry_->GetExtensionById(
load_data.extension_id, ExtensionRegistry::ENABLED);
if (!extension) {
// Still dispatch the |callback|, even though it's probably a no-op.
std::move(callback).Run(absl::nullopt /* error */);
return;
}
int static_ruleset_count = 0;
RulesCountPair static_rule_count;
CompositeMatcher* matcher =
ruleset_manager_.GetMatcherForExtension(load_data.extension_id);
if (matcher) {
// Iterate over the existing matchers to compute `static_rule_count` and
// `static_ruleset_count`.
for (const std::unique_ptr<RulesetMatcher>& ruleset_matcher :
matcher->matchers()) {
// Exclude since we are only including static rulesets.
if (ruleset_matcher->id() == kDynamicRulesetID)
continue;
// Exclude since we'll be removing this |matcher|.
if (base::Contains(ids_to_disable, ruleset_matcher->id()))
continue;
// Exclude to prevent double counting. This will be a part of
// |new_matchers| below.
if (base::Contains(ids_to_enable, ruleset_matcher->id()))
continue;
static_ruleset_count += 1;
static_rule_count += ruleset_matcher->GetRulesCountPair();
}
}
CompositeMatcher::MatcherList new_matchers;
new_matchers.reserve(load_data.rulesets.size());
for (RulesetInfo& ruleset : load_data.rulesets) {
if (!ruleset.did_load_successfully()) {
std::move(callback).Run(kInternalErrorUpdatingEnabledRulesets);
return;
}
std::unique_ptr<RulesetMatcher> ruleset_matcher = ruleset.TakeMatcher();
RulesCountPair matcher_count = ruleset_matcher->GetRulesCountPair();
// Per-ruleset limits should have been enforced during
// indexing/installation.
DCHECK_LE(matcher_count.regex_rule_count,
static_cast<size_t>(GetRegexRuleLimit()));
DCHECK_LE(matcher_count.rule_count, ruleset.source().rule_count_limit());
static_ruleset_count += 1;
static_rule_count += matcher_count;
new_matchers.push_back(std::move(ruleset_matcher));
}
if (static_ruleset_count > dnr_api::MAX_NUMBER_OF_ENABLED_STATIC_RULESETS) {
std::move(callback).Run(
declarative_net_request::kEnabledRulesetCountExceeded);
return;
}
if (static_rule_count.regex_rule_count >
static_cast<size_t>(GetRegexRuleLimit())) {
std::move(callback).Run(kEnabledRulesetsRegexRuleCountExceeded);
return;
}
// Attempt to update the extension's extra rule count. If this update cannot
// be completed without exceeding the global limit, then the update is not
// applied and an error is returned.
if (!global_rules_tracker_.OnExtensionRuleCountUpdated(
load_data.extension_id, static_rule_count.rule_count)) {
std::move(callback).Run(kEnabledRulesetsRuleCountExceeded);
return;
}
if (!matcher) {
// The extension didn't have any existing rulesets. Hence just add a new
// CompositeMatcher with |new_matchers|.
AddCompositeMatcher(*extension, std::move(new_matchers));
std::move(callback).Run(absl::nullopt);
return;
}
bool had_extra_headers_matcher = ruleset_manager_.HasAnyExtraHeadersMatcher();
matcher->RemoveRulesetsWithIDs(ids_to_disable);
matcher->AddOrUpdateRulesets(std::move(new_matchers));
prefs_->SetDNREnabledStaticRulesets(load_data.extension_id,
matcher->ComputeStaticRulesetIDs());
AdjustExtraHeaderListenerCountIfNeeded(had_extra_headers_matcher);
std::move(callback).Run(absl::nullopt);
}
void RulesMonitorService::OnDynamicRulesUpdated(
ApiCallback callback,
LoadRequestData load_data,
absl::optional<std::string> error) {
DCHECK_EQ(1u, load_data.rulesets.size());
const bool has_error = error.has_value();
LogMetricsAndUpdateChecksumsIfNeeded(load_data);
// Respond to the extension.
std::move(callback).Run(std::move(error));
// It's possible that the extension has been disabled since the initial update
// rule request. If it's disabled, do nothing.
const Extension* extension =
extension_registry_->enabled_extensions().GetByID(load_data.extension_id);
if (!extension)
return;
RulesetInfo& dynamic_ruleset = load_data.rulesets[0];
DCHECK_EQ(dynamic_ruleset.did_load_successfully(), !has_error);
if (!dynamic_ruleset.did_load_successfully())
return;
DCHECK(dynamic_ruleset.new_checksum());
// Update the dynamic ruleset.
UpdateRulesetMatcher(*extension, dynamic_ruleset.TakeMatcher());
}
void RulesMonitorService::RemoveCompositeMatcher(
const ExtensionId& extension_id) {
bool had_extra_headers_matcher = ruleset_manager_.HasAnyExtraHeadersMatcher();
ruleset_manager_.RemoveRuleset(extension_id);
action_tracker_.ClearExtensionData(extension_id);
AdjustExtraHeaderListenerCountIfNeeded(had_extra_headers_matcher);
}
void RulesMonitorService::AddCompositeMatcher(
const Extension& extension,
CompositeMatcher::MatcherList matchers) {
if (matchers.empty())
return;
auto matcher = std::make_unique<CompositeMatcher>(
std::move(matchers), GetHostPermissionsAlwaysRequired(extension));
bool had_extra_headers_matcher = ruleset_manager_.HasAnyExtraHeadersMatcher();
ruleset_manager_.AddRuleset(extension.id(), std::move(matcher));
AdjustExtraHeaderListenerCountIfNeeded(had_extra_headers_matcher);
}
void RulesMonitorService::UpdateRulesetMatcher(
const Extension& extension,
std::unique_ptr<RulesetMatcher> ruleset_matcher) {
CompositeMatcher* matcher =
ruleset_manager_.GetMatcherForExtension(extension.id());
// The extension didn't have a corresponding CompositeMatcher.
if (!matcher) {
CompositeMatcher::MatcherList matchers;
matchers.push_back(std::move(ruleset_matcher));
AddCompositeMatcher(extension, std::move(matchers));
return;
}
bool had_extra_headers_matcher = ruleset_manager_.HasAnyExtraHeadersMatcher();
matcher->AddOrUpdateRuleset(std::move(ruleset_matcher));
AdjustExtraHeaderListenerCountIfNeeded(had_extra_headers_matcher);
}
void RulesMonitorService::AdjustExtraHeaderListenerCountIfNeeded(
bool had_extra_headers_matcher) {
bool has_extra_headers_matcher = ruleset_manager_.HasAnyExtraHeadersMatcher();
if (had_extra_headers_matcher == has_extra_headers_matcher)
return;
if (has_extra_headers_matcher) {
ExtensionWebRequestEventRouter::GetInstance()
->IncrementExtraHeadersListenerCount(context_);
} else {
ExtensionWebRequestEventRouter::GetInstance()
->DecrementExtraHeadersListenerCount(context_);
}
}
void RulesMonitorService::LogMetricsAndUpdateChecksumsIfNeeded(
const LoadRequestData& load_data) {
for (const RulesetInfo& ruleset : load_data.rulesets) {
// The |load_ruleset_result()| might be empty if CreateVerifiedMatcher
// wasn't called on the ruleset.
if (ruleset.load_ruleset_result())
LogLoadRulesetResult(*ruleset.load_ruleset_result());
}
// The extension may have been uninstalled by this point. Return early if
// that's the case.
if (!extension_registry_->GetInstalledExtension(load_data.extension_id))
return;
// Update checksums for all rulesets.
// Note: We also do this for a non-enabled extension. The ruleset on the disk
// has already been modified at this point. So we do want to update the
// checksum for it to be in sync with what's on disk.
for (const RulesetInfo& ruleset : load_data.rulesets) {
if (!ruleset.new_checksum())
continue;
if (ruleset.source().is_dynamic_ruleset()) {
prefs_->SetDNRDynamicRulesetChecksum(load_data.extension_id,
*(ruleset.new_checksum()));
} else {
prefs_->SetDNRStaticRulesetChecksum(load_data.extension_id,
ruleset.source().id(),
*(ruleset.new_checksum()));
}
}
}
} // namespace declarative_net_request
template <>
void BrowserContextKeyedAPIFactory<
declarative_net_request::RulesMonitorService>::
DeclareFactoryDependencies() {
DependsOn(ExtensionRegistryFactory::GetInstance());
DependsOn(ExtensionPrefsFactory::GetInstance());
DependsOn(WarningServiceFactory::GetInstance());
DependsOn(PermissionHelper::GetFactoryInstance());
}
} // namespace extensions
|
nwjs/chromium.src
|
extensions/browser/api/declarative_net_request/rules_monitor_service.cc
|
C++
|
bsd-3-clause
| 41,686
|
/*
* Copyright 2012 The Android Open Source Project
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "SkMorphologyImageFilter.h"
#include "SkBitmap.h"
#include "SkColorPriv.h"
#include "SkReadBuffer.h"
#include "SkWriteBuffer.h"
#include "SkRect.h"
#include "SkMorphology_opts.h"
#if SK_SUPPORT_GPU
#include "GrContext.h"
#include "GrInvariantOutput.h"
#include "GrTexture.h"
#include "effects/Gr1DKernelEffect.h"
#include "gl/GrGLProcessor.h"
#include "gl/builders/GrGLProgramBuilder.h"
#endif
SkMorphologyImageFilter::SkMorphologyImageFilter(int radiusX,
int radiusY,
SkImageFilter* input,
const CropRect* cropRect,
uint32_t uniqueID)
: INHERITED(1, &input, cropRect, uniqueID), fRadius(SkISize::Make(radiusX, radiusY)) {
}
void SkMorphologyImageFilter::flatten(SkWriteBuffer& buffer) const {
this->INHERITED::flatten(buffer);
buffer.writeInt(fRadius.fWidth);
buffer.writeInt(fRadius.fHeight);
}
enum MorphDirection {
kX, kY
};
template<MorphDirection direction>
static void erode(const SkPMColor* src, SkPMColor* dst,
int radius, int width, int height,
int srcStride, int dstStride)
{
const int srcStrideX = direction == kX ? 1 : srcStride;
const int dstStrideX = direction == kX ? 1 : dstStride;
const int srcStrideY = direction == kX ? srcStride : 1;
const int dstStrideY = direction == kX ? dstStride : 1;
radius = SkMin32(radius, width - 1);
const SkPMColor* upperSrc = src + radius * srcStrideX;
for (int x = 0; x < width; ++x) {
const SkPMColor* lp = src;
const SkPMColor* up = upperSrc;
SkPMColor* dptr = dst;
for (int y = 0; y < height; ++y) {
int minB = 255, minG = 255, minR = 255, minA = 255;
for (const SkPMColor* p = lp; p <= up; p += srcStrideX) {
int b = SkGetPackedB32(*p);
int g = SkGetPackedG32(*p);
int r = SkGetPackedR32(*p);
int a = SkGetPackedA32(*p);
if (b < minB) minB = b;
if (g < minG) minG = g;
if (r < minR) minR = r;
if (a < minA) minA = a;
}
*dptr = SkPackARGB32(minA, minR, minG, minB);
dptr += dstStrideY;
lp += srcStrideY;
up += srcStrideY;
}
if (x >= radius) src += srcStrideX;
if (x + radius < width - 1) upperSrc += srcStrideX;
dst += dstStrideX;
}
}
template<MorphDirection direction>
static void dilate(const SkPMColor* src, SkPMColor* dst,
int radius, int width, int height,
int srcStride, int dstStride)
{
const int srcStrideX = direction == kX ? 1 : srcStride;
const int dstStrideX = direction == kX ? 1 : dstStride;
const int srcStrideY = direction == kX ? srcStride : 1;
const int dstStrideY = direction == kX ? dstStride : 1;
radius = SkMin32(radius, width - 1);
const SkPMColor* upperSrc = src + radius * srcStrideX;
for (int x = 0; x < width; ++x) {
const SkPMColor* lp = src;
const SkPMColor* up = upperSrc;
SkPMColor* dptr = dst;
for (int y = 0; y < height; ++y) {
int maxB = 0, maxG = 0, maxR = 0, maxA = 0;
for (const SkPMColor* p = lp; p <= up; p += srcStrideX) {
int b = SkGetPackedB32(*p);
int g = SkGetPackedG32(*p);
int r = SkGetPackedR32(*p);
int a = SkGetPackedA32(*p);
if (b > maxB) maxB = b;
if (g > maxG) maxG = g;
if (r > maxR) maxR = r;
if (a > maxA) maxA = a;
}
*dptr = SkPackARGB32(maxA, maxR, maxG, maxB);
dptr += dstStrideY;
lp += srcStrideY;
up += srcStrideY;
}
if (x >= radius) src += srcStrideX;
if (x + radius < width - 1) upperSrc += srcStrideX;
dst += dstStrideX;
}
}
static void callProcX(SkMorphologyImageFilter::Proc procX, const SkBitmap& src, SkBitmap* dst, int radiusX, const SkIRect& bounds)
{
procX(src.getAddr32(bounds.left(), bounds.top()), dst->getAddr32(0, 0),
radiusX, bounds.width(), bounds.height(),
src.rowBytesAsPixels(), dst->rowBytesAsPixels());
}
static void callProcY(SkMorphologyImageFilter::Proc procY, const SkBitmap& src, SkBitmap* dst, int radiusY, const SkIRect& bounds)
{
procY(src.getAddr32(bounds.left(), bounds.top()), dst->getAddr32(0, 0),
radiusY, bounds.height(), bounds.width(),
src.rowBytesAsPixels(), dst->rowBytesAsPixels());
}
bool SkMorphologyImageFilter::filterImageGeneric(SkMorphologyImageFilter::Proc procX,
SkMorphologyImageFilter::Proc procY,
Proxy* proxy,
const SkBitmap& source,
const Context& ctx,
SkBitmap* dst,
SkIPoint* offset) const {
SkBitmap src = source;
SkIPoint srcOffset = SkIPoint::Make(0, 0);
if (getInput(0) && !getInput(0)->filterImage(proxy, source, ctx, &src, &srcOffset)) {
return false;
}
if (src.colorType() != kN32_SkColorType) {
return false;
}
SkIRect bounds;
if (!this->applyCropRect(ctx, proxy, src, &srcOffset, &bounds, &src)) {
return false;
}
SkAutoLockPixels alp(src);
if (!src.getPixels()) {
return false;
}
if (!dst->tryAllocPixels(src.info().makeWH(bounds.width(), bounds.height()))) {
return false;
}
SkVector radius = SkVector::Make(SkIntToScalar(this->radius().width()),
SkIntToScalar(this->radius().height()));
ctx.ctm().mapVectors(&radius, 1);
int width = SkScalarFloorToInt(radius.fX);
int height = SkScalarFloorToInt(radius.fY);
if (width < 0 || height < 0) {
return false;
}
SkIRect srcBounds = bounds;
srcBounds.offset(-srcOffset);
if (width == 0 && height == 0) {
src.extractSubset(dst, srcBounds);
offset->fX = bounds.left();
offset->fY = bounds.top();
return true;
}
SkBitmap temp;
if (!temp.tryAllocPixels(dst->info())) {
return false;
}
if (width > 0 && height > 0) {
callProcX(procX, src, &temp, width, srcBounds);
SkIRect tmpBounds = SkIRect::MakeWH(srcBounds.width(), srcBounds.height());
callProcY(procY, temp, dst, height, tmpBounds);
} else if (width > 0) {
callProcX(procX, src, dst, width, srcBounds);
} else if (height > 0) {
callProcY(procY, src, dst, height, srcBounds);
}
offset->fX = bounds.left();
offset->fY = bounds.top();
return true;
}
bool SkErodeImageFilter::onFilterImage(Proxy* proxy,
const SkBitmap& source, const Context& ctx,
SkBitmap* dst, SkIPoint* offset) const {
Proc erodeXProc = SkMorphologyGetPlatformProc(kErodeX_SkMorphologyProcType);
if (!erodeXProc) {
erodeXProc = erode<kX>;
}
Proc erodeYProc = SkMorphologyGetPlatformProc(kErodeY_SkMorphologyProcType);
if (!erodeYProc) {
erodeYProc = erode<kY>;
}
return this->filterImageGeneric(erodeXProc, erodeYProc, proxy, source, ctx, dst, offset);
}
bool SkDilateImageFilter::onFilterImage(Proxy* proxy,
const SkBitmap& source, const Context& ctx,
SkBitmap* dst, SkIPoint* offset) const {
Proc dilateXProc = SkMorphologyGetPlatformProc(kDilateX_SkMorphologyProcType);
if (!dilateXProc) {
dilateXProc = dilate<kX>;
}
Proc dilateYProc = SkMorphologyGetPlatformProc(kDilateY_SkMorphologyProcType);
if (!dilateYProc) {
dilateYProc = dilate<kY>;
}
return this->filterImageGeneric(dilateXProc, dilateYProc, proxy, source, ctx, dst, offset);
}
void SkMorphologyImageFilter::computeFastBounds(const SkRect& src, SkRect* dst) const {
if (getInput(0)) {
getInput(0)->computeFastBounds(src, dst);
} else {
*dst = src;
}
dst->outset(SkIntToScalar(fRadius.width()), SkIntToScalar(fRadius.height()));
}
bool SkMorphologyImageFilter::onFilterBounds(const SkIRect& src, const SkMatrix& ctm,
SkIRect* dst) const {
SkIRect bounds = src;
SkVector radius = SkVector::Make(SkIntToScalar(this->radius().width()),
SkIntToScalar(this->radius().height()));
ctm.mapVectors(&radius, 1);
bounds.outset(SkScalarCeilToInt(radius.x()), SkScalarCeilToInt(radius.y()));
if (getInput(0) && !getInput(0)->filterBounds(bounds, ctm, &bounds)) {
return false;
}
*dst = bounds;
return true;
}
SkFlattenable* SkErodeImageFilter::CreateProc(SkReadBuffer& buffer) {
SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
const int width = buffer.readInt();
const int height = buffer.readInt();
return Create(width, height, common.getInput(0), &common.cropRect(), common.uniqueID());
}
SkFlattenable* SkDilateImageFilter::CreateProc(SkReadBuffer& buffer) {
SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
const int width = buffer.readInt();
const int height = buffer.readInt();
return Create(width, height, common.getInput(0), &common.cropRect(), common.uniqueID());
}
#ifndef SK_IGNORE_TO_STRING
void SkErodeImageFilter::toString(SkString* str) const {
str->appendf("SkErodeImageFilter: (");
str->appendf("radius: (%d,%d)", this->radius().fWidth, this->radius().fHeight);
str->append(")");
}
#endif
#ifndef SK_IGNORE_TO_STRING
void SkDilateImageFilter::toString(SkString* str) const {
str->appendf("SkDilateImageFilter: (");
str->appendf("radius: (%d,%d)", this->radius().fWidth, this->radius().fHeight);
str->append(")");
}
#endif
#if SK_SUPPORT_GPU
///////////////////////////////////////////////////////////////////////////////
/**
* Morphology effects. Depending upon the type of morphology, either the
* component-wise min (Erode_Type) or max (Dilate_Type) of all pixels in the
* kernel is selected as the new color. The new color is modulated by the input
* color.
*/
class GrMorphologyEffect : public Gr1DKernelEffect {
public:
enum MorphologyType {
kErode_MorphologyType,
kDilate_MorphologyType,
};
static GrFragmentProcessor* Create(GrTexture* tex, Direction dir, int radius,
MorphologyType type) {
return SkNEW_ARGS(GrMorphologyEffect, (tex, dir, radius, type));
}
static GrFragmentProcessor* Create(GrTexture* tex, Direction dir, int radius,
MorphologyType type, float bounds[2]) {
return SkNEW_ARGS(GrMorphologyEffect, (tex, dir, radius, type, bounds));
}
virtual ~GrMorphologyEffect();
MorphologyType type() const { return fType; }
bool useRange() const { return fUseRange; }
const float* range() const { return fRange; }
const char* name() const SK_OVERRIDE { return "Morphology"; }
void getGLProcessorKey(const GrGLCaps&, GrProcessorKeyBuilder*) const SK_OVERRIDE;
GrGLFragmentProcessor* createGLInstance() const SK_OVERRIDE;
protected:
MorphologyType fType;
bool fUseRange;
float fRange[2];
private:
bool onIsEqual(const GrFragmentProcessor&) const SK_OVERRIDE;
void onComputeInvariantOutput(GrInvariantOutput* inout) const SK_OVERRIDE;
GrMorphologyEffect(GrTexture*, Direction, int radius, MorphologyType);
GrMorphologyEffect(GrTexture*, Direction, int radius, MorphologyType, float bounds[2]);
GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
typedef Gr1DKernelEffect INHERITED;
};
///////////////////////////////////////////////////////////////////////////////
class GrGLMorphologyEffect : public GrGLFragmentProcessor {
public:
GrGLMorphologyEffect(const GrProcessor&);
virtual void emitCode(GrGLFPBuilder*,
const GrFragmentProcessor&,
const char* outputColor,
const char* inputColor,
const TransformedCoordsArray&,
const TextureSamplerArray&) SK_OVERRIDE;
static inline void GenKey(const GrProcessor&, const GrGLCaps&, GrProcessorKeyBuilder* b);
void setData(const GrGLProgramDataManager&, const GrProcessor&) SK_OVERRIDE;
private:
int width() const { return GrMorphologyEffect::WidthFromRadius(fRadius); }
int fRadius;
Gr1DKernelEffect::Direction fDirection;
bool fUseRange;
GrMorphologyEffect::MorphologyType fType;
GrGLProgramDataManager::UniformHandle fPixelSizeUni;
GrGLProgramDataManager::UniformHandle fRangeUni;
typedef GrGLFragmentProcessor INHERITED;
};
GrGLMorphologyEffect::GrGLMorphologyEffect(const GrProcessor& proc) {
const GrMorphologyEffect& m = proc.cast<GrMorphologyEffect>();
fRadius = m.radius();
fDirection = m.direction();
fUseRange = m.useRange();
fType = m.type();
}
void GrGLMorphologyEffect::emitCode(GrGLFPBuilder* builder,
const GrFragmentProcessor&,
const char* outputColor,
const char* inputColor,
const TransformedCoordsArray& coords,
const TextureSamplerArray& samplers) {
fPixelSizeUni = builder->addUniform(GrGLProgramBuilder::kFragment_Visibility,
kFloat_GrSLType, kDefault_GrSLPrecision,
"PixelSize");
const char* pixelSizeInc = builder->getUniformCStr(fPixelSizeUni);
fRangeUni = builder->addUniform(GrGLProgramBuilder::kFragment_Visibility,
kVec2f_GrSLType, kDefault_GrSLPrecision,
"Range");
const char* range = builder->getUniformCStr(fRangeUni);
GrGLFPFragmentBuilder* fsBuilder = builder->getFragmentShaderBuilder();
SkString coords2D = fsBuilder->ensureFSCoords2D(coords, 0);
const char* func;
switch (fType) {
case GrMorphologyEffect::kErode_MorphologyType:
fsBuilder->codeAppendf("\t\t%s = vec4(1, 1, 1, 1);\n", outputColor);
func = "min";
break;
case GrMorphologyEffect::kDilate_MorphologyType:
fsBuilder->codeAppendf("\t\t%s = vec4(0, 0, 0, 0);\n", outputColor);
func = "max";
break;
default:
SkFAIL("Unexpected type");
func = ""; // suppress warning
break;
}
const char* dir;
switch (fDirection) {
case Gr1DKernelEffect::kX_Direction:
dir = "x";
break;
case Gr1DKernelEffect::kY_Direction:
dir = "y";
break;
default:
SkFAIL("Unknown filter direction.");
dir = ""; // suppress warning
}
// vec2 coord = coord2D;
fsBuilder->codeAppendf("\t\tvec2 coord = %s;\n", coords2D.c_str());
// coord.x -= radius * pixelSize;
fsBuilder->codeAppendf("\t\tcoord.%s -= %d.0 * %s; \n", dir, fRadius, pixelSizeInc);
if (fUseRange) {
// highBound = min(highBound, coord.x + (width-1) * pixelSize);
fsBuilder->codeAppendf("\t\tfloat highBound = min(%s.y, coord.%s + %f * %s);",
range, dir, float(width() - 1), pixelSizeInc);
// coord.x = max(lowBound, coord.x);
fsBuilder->codeAppendf("\t\tcoord.%s = max(%s.x, coord.%s);", dir, range, dir);
}
fsBuilder->codeAppendf("\t\tfor (int i = 0; i < %d; i++) {\n", width());
fsBuilder->codeAppendf("\t\t\t%s = %s(%s, ", outputColor, func, outputColor);
fsBuilder->appendTextureLookup(samplers[0], "coord");
fsBuilder->codeAppend(");\n");
// coord.x += pixelSize;
fsBuilder->codeAppendf("\t\t\tcoord.%s += %s;\n", dir, pixelSizeInc);
if (fUseRange) {
// coord.x = min(highBound, coord.x);
fsBuilder->codeAppendf("\t\t\tcoord.%s = min(highBound, coord.%s);", dir, dir);
}
fsBuilder->codeAppend("\t\t}\n");
SkString modulate;
GrGLSLMulVarBy4f(&modulate, outputColor, inputColor);
fsBuilder->codeAppend(modulate.c_str());
}
void GrGLMorphologyEffect::GenKey(const GrProcessor& proc,
const GrGLCaps&, GrProcessorKeyBuilder* b) {
const GrMorphologyEffect& m = proc.cast<GrMorphologyEffect>();
uint32_t key = static_cast<uint32_t>(m.radius());
key |= (m.type() << 8);
key |= (m.direction() << 9);
if (m.useRange()) key |= 1 << 10;
b->add32(key);
}
void GrGLMorphologyEffect::setData(const GrGLProgramDataManager& pdman,
const GrProcessor& proc) {
const GrMorphologyEffect& m = proc.cast<GrMorphologyEffect>();
GrTexture& texture = *m.texture(0);
// the code we generated was for a specific kernel radius, direction and bound usage
SkASSERT(m.radius() == fRadius);
SkASSERT(m.direction() == fDirection);
SkASSERT(m.useRange() == fUseRange);
float pixelSize = 0.0f;
switch (fDirection) {
case Gr1DKernelEffect::kX_Direction:
pixelSize = 1.0f / texture.width();
break;
case Gr1DKernelEffect::kY_Direction:
pixelSize = 1.0f / texture.height();
break;
default:
SkFAIL("Unknown filter direction.");
}
pdman.set1f(fPixelSizeUni, pixelSize);
if (fUseRange) {
const float* range = m.range();
if (fDirection && texture.origin() == kBottomLeft_GrSurfaceOrigin) {
pdman.set2f(fRangeUni, 1.0f - range[1], 1.0f - range[0]);
} else {
pdman.set2f(fRangeUni, range[0], range[1]);
}
}
}
///////////////////////////////////////////////////////////////////////////////
GrMorphologyEffect::GrMorphologyEffect(GrTexture* texture,
Direction direction,
int radius,
MorphologyType type)
: Gr1DKernelEffect(texture, direction, radius)
, fType(type), fUseRange(false) {
this->initClassID<GrMorphologyEffect>();
}
GrMorphologyEffect::GrMorphologyEffect(GrTexture* texture,
Direction direction,
int radius,
MorphologyType type,
float range[2])
: Gr1DKernelEffect(texture, direction, radius)
, fType(type), fUseRange(true) {
this->initClassID<GrMorphologyEffect>();
fRange[0] = range[0];
fRange[1] = range[1];
}
GrMorphologyEffect::~GrMorphologyEffect() {
}
void GrMorphologyEffect::getGLProcessorKey(const GrGLCaps& caps, GrProcessorKeyBuilder* b) const {
GrGLMorphologyEffect::GenKey(*this, caps, b);
}
GrGLFragmentProcessor* GrMorphologyEffect::createGLInstance() const {
return SkNEW_ARGS(GrGLMorphologyEffect, (*this));
}
bool GrMorphologyEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
const GrMorphologyEffect& s = sBase.cast<GrMorphologyEffect>();
return (this->radius() == s.radius() &&
this->direction() == s.direction() &&
this->useRange() == s.useRange() &&
this->type() == s.type());
}
void GrMorphologyEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
// This is valid because the color components of the result of the kernel all come
// exactly from existing values in the source texture.
this->updateInvariantOutputForModulation(inout);
}
///////////////////////////////////////////////////////////////////////////////
GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrMorphologyEffect);
GrFragmentProcessor* GrMorphologyEffect::TestCreate(SkRandom* random,
GrContext*,
const GrDrawTargetCaps&,
GrTexture* textures[]) {
int texIdx = random->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx :
GrProcessorUnitTest::kAlphaTextureIdx;
Direction dir = random->nextBool() ? kX_Direction : kY_Direction;
static const int kMaxRadius = 10;
int radius = random->nextRangeU(1, kMaxRadius);
MorphologyType type = random->nextBool() ? GrMorphologyEffect::kErode_MorphologyType :
GrMorphologyEffect::kDilate_MorphologyType;
return GrMorphologyEffect::Create(textures[texIdx], dir, radius, type);
}
namespace {
void apply_morphology_rect(GrContext* context,
GrRenderTarget* rt,
GrTexture* texture,
const SkIRect& srcRect,
const SkIRect& dstRect,
int radius,
GrMorphologyEffect::MorphologyType morphType,
float bounds[2],
Gr1DKernelEffect::Direction direction) {
GrPaint paint;
paint.addColorProcessor(GrMorphologyEffect::Create(texture,
direction,
radius,
morphType,
bounds))->unref();
context->drawNonAARectToRect(rt, paint, SkMatrix::I(), SkRect::Make(dstRect),
SkRect::Make(srcRect));
}
void apply_morphology_rect_no_bounds(GrContext* context,
GrRenderTarget* rt,
GrTexture* texture,
const SkIRect& srcRect,
const SkIRect& dstRect,
int radius,
GrMorphologyEffect::MorphologyType morphType,
Gr1DKernelEffect::Direction direction) {
GrPaint paint;
paint.addColorProcessor(GrMorphologyEffect::Create(texture,
direction,
radius,
morphType))->unref();
context->drawNonAARectToRect(rt, paint, SkMatrix::I(), SkRect::Make(dstRect),
SkRect::Make(srcRect));
}
void apply_morphology_pass(GrContext* context,
GrRenderTarget* rt,
GrTexture* texture,
const SkIRect& srcRect,
const SkIRect& dstRect,
int radius,
GrMorphologyEffect::MorphologyType morphType,
Gr1DKernelEffect::Direction direction) {
float bounds[2] = { 0.0f, 1.0f };
SkIRect lowerSrcRect = srcRect, lowerDstRect = dstRect;
SkIRect middleSrcRect = srcRect, middleDstRect = dstRect;
SkIRect upperSrcRect = srcRect, upperDstRect = dstRect;
if (direction == Gr1DKernelEffect::kX_Direction) {
bounds[0] = (SkIntToScalar(srcRect.left()) + 0.5f) / texture->width();
bounds[1] = (SkIntToScalar(srcRect.right()) - 0.5f) / texture->width();
lowerSrcRect.fRight = srcRect.left() + radius;
lowerDstRect.fRight = dstRect.left() + radius;
upperSrcRect.fLeft = srcRect.right() - radius;
upperDstRect.fLeft = dstRect.right() - radius;
middleSrcRect.inset(radius, 0);
middleDstRect.inset(radius, 0);
} else {
bounds[0] = (SkIntToScalar(srcRect.top()) + 0.5f) / texture->height();
bounds[1] = (SkIntToScalar(srcRect.bottom()) - 0.5f) / texture->height();
lowerSrcRect.fBottom = srcRect.top() + radius;
lowerDstRect.fBottom = dstRect.top() + radius;
upperSrcRect.fTop = srcRect.bottom() - radius;
upperDstRect.fTop = dstRect.bottom() - radius;
middleSrcRect.inset(0, radius);
middleDstRect.inset(0, radius);
}
if (middleSrcRect.fLeft - middleSrcRect.fRight >= 0) {
// radius covers srcRect; use bounds over entire draw
apply_morphology_rect(context, rt, texture, srcRect, dstRect, radius,
morphType, bounds, direction);
} else {
// Draw upper and lower margins with bounds; middle without.
apply_morphology_rect(context, rt,texture, lowerSrcRect, lowerDstRect, radius,
morphType, bounds, direction);
apply_morphology_rect(context, rt, texture, upperSrcRect, upperDstRect, radius,
morphType, bounds, direction);
apply_morphology_rect_no_bounds(context, rt, texture, middleSrcRect, middleDstRect, radius,
morphType, direction);
}
}
bool apply_morphology(const SkBitmap& input,
const SkIRect& rect,
GrMorphologyEffect::MorphologyType morphType,
SkISize radius,
SkBitmap* dst) {
SkAutoTUnref<GrTexture> srcTexture(SkRef(input.getTexture()));
SkASSERT(srcTexture);
GrContext* context = srcTexture->getContext();
GrContext::AutoClip acs(context, SkRect::MakeWH(SkIntToScalar(srcTexture->width()),
SkIntToScalar(srcTexture->height())));
SkIRect dstRect = SkIRect::MakeWH(rect.width(), rect.height());
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag | kNoStencil_GrSurfaceFlag;
desc.fWidth = rect.width();
desc.fHeight = rect.height();
desc.fConfig = kSkia8888_GrPixelConfig;
SkIRect srcRect = rect;
if (radius.fWidth > 0) {
GrTexture* texture = context->refScratchTexture(desc, GrContext::kApprox_ScratchTexMatch);
if (NULL == texture) {
return false;
}
apply_morphology_pass(context, texture->asRenderTarget(), srcTexture, srcRect, dstRect,
radius.fWidth, morphType, Gr1DKernelEffect::kX_Direction);
SkIRect clearRect = SkIRect::MakeXYWH(dstRect.fLeft, dstRect.fBottom,
dstRect.width(), radius.fHeight);
GrColor clearColor = GrMorphologyEffect::kErode_MorphologyType == morphType ?
SK_ColorWHITE :
SK_ColorTRANSPARENT;
context->clear(&clearRect, clearColor, false, texture->asRenderTarget());
srcTexture.reset(texture);
srcRect = dstRect;
}
if (radius.fHeight > 0) {
GrTexture* texture = context->refScratchTexture(desc, GrContext::kApprox_ScratchTexMatch);
if (NULL == texture) {
return false;
}
apply_morphology_pass(context, texture->asRenderTarget(), srcTexture, srcRect, dstRect,
radius.fHeight, morphType, Gr1DKernelEffect::kY_Direction);
srcTexture.reset(texture);
}
SkImageFilter::WrapTexture(srcTexture, rect.width(), rect.height(), dst);
return true;
}
};
bool SkMorphologyImageFilter::filterImageGPUGeneric(bool dilate,
Proxy* proxy,
const SkBitmap& src,
const Context& ctx,
SkBitmap* result,
SkIPoint* offset) const {
SkBitmap input = src;
SkIPoint srcOffset = SkIPoint::Make(0, 0);
if (getInput(0) && !getInput(0)->getInputResultGPU(proxy, src, ctx, &input, &srcOffset)) {
return false;
}
SkIRect bounds;
if (!this->applyCropRect(ctx, proxy, input, &srcOffset, &bounds, &input)) {
return false;
}
SkVector radius = SkVector::Make(SkIntToScalar(this->radius().width()),
SkIntToScalar(this->radius().height()));
ctx.ctm().mapVectors(&radius, 1);
int width = SkScalarFloorToInt(radius.fX);
int height = SkScalarFloorToInt(radius.fY);
if (width < 0 || height < 0) {
return false;
}
SkIRect srcBounds = bounds;
srcBounds.offset(-srcOffset);
if (width == 0 && height == 0) {
input.extractSubset(result, srcBounds);
offset->fX = bounds.left();
offset->fY = bounds.top();
return true;
}
GrMorphologyEffect::MorphologyType type = dilate ? GrMorphologyEffect::kDilate_MorphologyType : GrMorphologyEffect::kErode_MorphologyType;
if (!apply_morphology(input, srcBounds, type,
SkISize::Make(width, height), result)) {
return false;
}
offset->fX = bounds.left();
offset->fY = bounds.top();
return true;
}
bool SkDilateImageFilter::filterImageGPU(Proxy* proxy, const SkBitmap& src, const Context& ctx,
SkBitmap* result, SkIPoint* offset) const {
return this->filterImageGPUGeneric(true, proxy, src, ctx, result, offset);
}
bool SkErodeImageFilter::filterImageGPU(Proxy* proxy, const SkBitmap& src, const Context& ctx,
SkBitmap* result, SkIPoint* offset) const {
return this->filterImageGPUGeneric(false, proxy, src, ctx, result, offset);
}
#endif
|
CTSRD-SOAAP/chromium-42.0.2311.135
|
third_party/skia/src/effects/SkMorphologyImageFilter.cpp
|
C++
|
bsd-3-clause
| 30,143
|
// Copyright (c) 2013 Intel Corporation. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "xwalk/runtime/browser/xwalk_runner_tizen.h"
#include "base/command_line.h"
#include "content/public/browser/browser_thread.h"
#include "crypto/nss_util.h"
#include "xwalk/application/browser/application_service.h"
#include "xwalk/application/browser/application_system.h"
#include "xwalk/application/browser/application_tizen.h"
#include "xwalk/application/common/id_util.h"
#include "xwalk/runtime/browser/sysapps_component.h"
#include "xwalk/runtime/browser/xwalk_component.h"
#include "xwalk/runtime/common/xwalk_runtime_features.h"
#include "xwalk/runtime/common/xwalk_switches.h"
namespace xwalk {
XWalkRunnerTizen::XWalkRunnerTizen() {
}
XWalkRunnerTizen::~XWalkRunnerTizen() {}
// static
XWalkRunnerTizen* XWalkRunnerTizen::GetInstance() {
return static_cast<XWalkRunnerTizen*>(XWalkRunner::GetInstance());
}
void XWalkRunnerTizen::PreMainMessageLoopRun() {
XWalkRunner::PreMainMessageLoopRun();
// NSSInitSingleton is a costly operation (up to 100ms on VTC-1010),
// resulting in postponing the parsing and composition steps of the render
// process at cold start. Therefore, move the initialization logic here.
#if defined(USE_NSS)
content::BrowserThread::PostTask(
content::BrowserThread::IO,
FROM_HERE,
base::Bind(&crypto::EnsureNSSInit));
#endif
}
void XWalkRunnerTizen::InitializeRuntimeVariablesForExtensions(
const content::RenderProcessHost* host,
base::ValueMap* variables) {
XWalkRunner::InitializeRuntimeVariablesForExtensions(host, variables);
application::ApplicationTizen* app =
static_cast<application::ApplicationTizen*>(
app_system()->application_service()->
GetApplicationByRenderHostID(host->GetID()));
if (app) {
(*variables)["encoded_bundle"] =
new base::StringValue(app->data()->bundle());
}
}
} // namespace xwalk
|
crosswalk-project/crosswalk-efl
|
runtime/browser/xwalk_runner_tizen.cc
|
C++
|
bsd-3-clause
| 2,022
|
<?php
/* @var $this \yii\web\View */
/* @var $content string */
use backend\assets\DashboardAsset;
use yii\helpers\Html;
use yii\widgets\Breadcrumbs;
use yii\helpers\Url;
use common\widgets\Alert;
DashboardAsset::register($this);
?>
<?php $this->beginPage() ?>
<!DOCTYPE html>
<html lang="<?= Yii::$app->language ?>">
<head>
<meta charset="<?= Yii::$app->charset ?>">
<meta name="viewport" content="width=device-width, initial-scale=1">
<?= Html::csrfMetaTags() ?>
<title><?= Html::encode($this->title) ?></title>
<?php $this->head() ?>
</head>
<body class="hold-transition skin-blue sidebar-mini">
<?php $this->beginBody() ?>
<?= $content ?>
<?php $this->endBody() ?>
</body>
</html>
<?php $this->endPage() ?>
|
ariandi/ktmavia
|
backend/views/layouts/blankpage.php
|
PHP
|
bsd-3-clause
| 737
|
/*
* Copyright (c) 2015 Goldman Sachs.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and Eclipse Distribution License v. 1.0 which accompany this distribution.
* The Eclipse Public License is available at http://www.eclipse.org/legal/epl-v10.html
* and the Eclipse Distribution License is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*/
package org.eclipse.collections.api;
import java.util.function.Consumer;
import org.eclipse.collections.api.block.procedure.Procedure;
import org.eclipse.collections.api.block.procedure.Procedure2;
import org.eclipse.collections.api.block.procedure.primitive.ObjectIntProcedure;
import org.eclipse.collections.api.ordered.OrderedIterable;
/**
* The base interface for all Eclipse Collections. All Eclipse Collections are internally iterable, and this interface provides
* the base set of internal iterators that every Eclipse collection should implement.
*/
public interface InternalIterable<T>
extends Iterable<T>
{
/**
* The procedure is executed for each element in the iterable.
* <p>
* Example using a Java 8 lambda:
* <pre>
* people.forEach(Procedures.cast(person -> LOGGER.info(person.getName())));
* </pre>
* <p>
* Example using an anonymous inner class:
* <pre>
* people.forEach(new Procedure<Person>()
* {
* public void value(Person person)
* {
* LOGGER.info(person.getName());
* }
* });
* </pre>
* NOTE: This method started to conflict with {@link Iterable#forEach(java.util.function.Consumer)}
* since Java 1.8. It is recommended to use {@link RichIterable#each(Procedure)} instead to avoid casting to Procedure.
*
* @see RichIterable#each(Procedure)
* @see Iterable#forEach(java.util.function.Consumer)
*/
@SuppressWarnings("UnnecessaryFullyQualifiedName")
void forEach(Procedure<? super T> procedure);
@Override
default void forEach(Consumer<? super T> consumer)
{
Procedure<? super T> procedure = consumer::accept;
this.forEach(procedure);
}
/**
* Iterates over the iterable passing each element and the current relative int index to the specified instance of
* ObjectIntProcedure.
* <p>
* Example using a Java 8 lambda:
* <pre>
* people.forEachWithIndex((Person person, int index) -> LOGGER.info("Index: " + index + " person: " + person.getName()));
* </pre>
* <p>
* Example using an anonymous inner class:
* <pre>
* people.forEachWithIndex(new ObjectIntProcedure<Person>()
* {
* public void value(Person person, int index)
* {
* LOGGER.info("Index: " + index + " person: " + person.getName());
* }
* });
* </pre>
*
* @deprecated in 6.0. Use {@link OrderedIterable#forEachWithIndex(ObjectIntProcedure)} instead.
*/
@Deprecated
void forEachWithIndex(ObjectIntProcedure<? super T> objectIntProcedure);
/**
* The procedure2 is evaluated for each element in the iterable with the specified parameter provided
* as the second argument.
* <p>
* Example using a Java 8 lambda:
* <pre>
* people.forEachWith((Person person, Person other) ->
* {
* if (person.isRelatedTo(other))
* {
* LOGGER.info(person.getName());
* }
* }, fred);
* </pre>
* <p>
* Example using an anonymous inner class:
* <pre>
* people.forEachWith(new Procedure2<Person, Person>()
* {
* public void value(Person person, Person other)
* {
* if (person.isRelatedTo(other))
* {
* LOGGER.info(person.getName());
* }
* }
* }, fred);
* </pre>
*/
<P> void forEachWith(Procedure2<? super T, ? super P> procedure, P parameter);
}
|
bhav0904/eclipse-collections
|
eclipse-collections-api/src/main/java/org/eclipse/collections/api/InternalIterable.java
|
Java
|
bsd-3-clause
| 4,052
|
// Copyright (c) 2009 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_COCOA_TOOLBAR_VIEW_H_
#define CHROME_BROWSER_COCOA_TOOLBAR_VIEW_H_
#import <Cocoa/Cocoa.h>
#import "chrome/browser/cocoa/background_gradient_view.h"
// A view that handles any special rendering of the toolbar bar. At
// this time it only draws a gradient. Future changes (e.g. themes)
// may require new functionality here.
@interface ToolbarView : BackgroundGradientView {
}
@end
#endif // CHROME_BROWSER_COCOA_TOOLBAR_VIEW_H_
|
kuiche/chromium
|
chrome/browser/cocoa/toolbar_view.h
|
C
|
bsd-3-clause
| 628
|
/*
* Copyright 2020 Google LLC
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrD3DCaps_DEFINED
#define GrD3DCaps_DEFINED
#include "src/gpu/GrCaps.h"
#include "include/gpu/d3d/GrD3DTypes.h"
#include "src/gpu/d3d/GrD3DAttachment.h"
/**
* Stores some capabilities of a D3D backend.
*/
class GrD3DCaps : public GrCaps {
public:
/**
* Creates a GrD3DCaps that is set such that nothing is supported. The init function should
* be called to fill out the caps.
*/
GrD3DCaps(const GrContextOptions& contextOptions, IDXGIAdapter1*, ID3D12Device*);
bool isFormatSRGB(const GrBackendFormat&) const override;
bool isFormatTexturable(const GrBackendFormat&, GrTextureType) const override;
bool isFormatTexturable(DXGI_FORMAT) const;
bool isFormatCopyable(const GrBackendFormat&) const override { return true; }
bool isFormatAsColorTypeRenderable(GrColorType ct, const GrBackendFormat& format,
int sampleCount = 1) const override;
bool isFormatRenderable(const GrBackendFormat& format, int sampleCount) const override;
bool isFormatRenderable(DXGI_FORMAT, int sampleCount) const;
bool isFormatUnorderedAccessible(DXGI_FORMAT) const;
int getRenderTargetSampleCount(int requestedCount, const GrBackendFormat&) const override;
int getRenderTargetSampleCount(int requestedCount, DXGI_FORMAT) const;
int maxRenderTargetSampleCount(const GrBackendFormat&) const override;
int maxRenderTargetSampleCount(DXGI_FORMAT) const;
GrColorType getFormatColorType(DXGI_FORMAT) const;
SupportedWrite supportedWritePixelsColorType(GrColorType surfaceColorType,
const GrBackendFormat& surfaceFormat,
GrColorType srcColorType) const override;
SurfaceReadPixelsSupport surfaceSupportsReadPixels(const GrSurface*) const override;
/**
* Returns both a supported and most preferred stencil format to use in draws.
*/
DXGI_FORMAT preferredStencilFormat() const {
return fPreferredStencilFormat;
}
static int GetStencilFormatTotalBitCount(DXGI_FORMAT format) {
switch (format) {
case DXGI_FORMAT_D24_UNORM_S8_UINT:
return 32;
case DXGI_FORMAT_D32_FLOAT_S8X24_UINT:
// DXGI_FORMAT_D32_FLOAT_S8X24_UINT has 24 unused bits at the end so total bits is 64.
return 64;
default:
SkASSERT(false);
return 0;
}
}
/**
* Helpers used by canCopySurface. In all cases if the SampleCnt parameter is zero that means
* the surface is not a render target, otherwise it is the number of samples in the render
* target.
*/
bool canCopyTexture(DXGI_FORMAT dstFormat, int dstSampleCnt,
DXGI_FORMAT srcFormat, int srcSamplecnt) const;
bool canCopyAsResolve(DXGI_FORMAT dstFormat, int dstSampleCnt,
DXGI_FORMAT srcFormat, int srcSamplecnt) const;
GrBackendFormat getBackendFormatFromCompressionType(SkImage::CompressionType) const override;
DXGI_FORMAT getFormatFromColorType(GrColorType colorType) const {
int idx = static_cast<int>(colorType);
return fColorTypeToFormatTable[idx];
}
skgpu::Swizzle getWriteSwizzle(const GrBackendFormat&, GrColorType) const override;
uint64_t computeFormatKey(const GrBackendFormat&) const override;
void addExtraSamplerKey(skgpu::KeyBuilder*,
GrSamplerState,
const GrBackendFormat&) const override;
GrProgramDesc makeDesc(GrRenderTarget*,
const GrProgramInfo&,
ProgramDescOverrideFlags) const override;
bool resolveSubresourceRegionSupport() const { return fResolveSubresourceRegionSupport; }
bool standardSwizzleLayoutSupport() const { return fStandardSwizzleLayoutSupport; }
#if GR_TEST_UTILS
std::vector<TestFormatColorTypeCombination> getTestingCombinations() const override;
#endif
private:
enum D3DVendor {
kAMD_D3DVendor = 0x1002,
kARM_D3DVendor = 0x13B5,
kImagination_D3DVendor = 0x1010,
kIntel_D3DVendor = 0x8086,
kNVIDIA_D3DVendor = 0x10DE,
kQualcomm_D3DVendor = 0x5143,
};
void init(const GrContextOptions& contextOptions, IDXGIAdapter1*, ID3D12Device*);
void initGrCaps(const D3D12_FEATURE_DATA_D3D12_OPTIONS&,
ID3D12Device*);
void initShaderCaps(int vendorID, const D3D12_FEATURE_DATA_D3D12_OPTIONS& optionsDesc);
void initFormatTable(const DXGI_ADAPTER_DESC&, ID3D12Device*);
void initStencilFormat(ID3D12Device*);
void applyDriverCorrectnessWorkarounds(int vendorID);
bool onSurfaceSupportsWritePixels(const GrSurface*) const override;
bool onCanCopySurface(const GrSurfaceProxy* dst, const GrSurfaceProxy* src,
const SkIRect& srcRect, const SkIPoint& dstPoint) const override;
GrBackendFormat onGetDefaultBackendFormat(GrColorType) const override;
bool onAreColorTypeAndFormatCompatible(GrColorType, const GrBackendFormat&) const override;
SupportedRead onSupportedReadPixelsColorType(GrColorType, const GrBackendFormat&,
GrColorType) const override;
skgpu::Swizzle onGetReadSwizzle(const GrBackendFormat&, GrColorType) const override;
// ColorTypeInfo for a specific format
struct ColorTypeInfo {
GrColorType fColorType = GrColorType::kUnknown;
enum {
kUploadData_Flag = 0x1,
// Does Ganesh itself support rendering to this colorType & format pair. Renderability
// still additionally depends on if the format itself is renderable.
kRenderable_Flag = 0x2,
// Indicates that this colorType is supported only if we are wrapping a texture with
// the given format and colorType. We do not allow creation with this pair.
kWrappedOnly_Flag = 0x4,
};
uint32_t fFlags = 0;
skgpu::Swizzle fReadSwizzle;
skgpu::Swizzle fWriteSwizzle;
};
struct FormatInfo {
uint32_t colorTypeFlags(GrColorType colorType) const {
for (int i = 0; i < fColorTypeInfoCount; ++i) {
if (fColorTypeInfos[i].fColorType == colorType) {
return fColorTypeInfos[i].fFlags;
}
}
return 0;
}
void init(const DXGI_ADAPTER_DESC&, ID3D12Device*, DXGI_FORMAT);
static void InitFormatFlags(const D3D12_FEATURE_DATA_FORMAT_SUPPORT&, uint16_t* flags);
void initSampleCounts(const DXGI_ADAPTER_DESC& adapterDesc, ID3D12Device*, DXGI_FORMAT);
enum {
kTexturable_Flag = 0x1, // Can be sampled in a shader
kRenderable_Flag = 0x2, // Rendertarget and blendable
kMSAA_Flag = 0x4,
kResolve_Flag = 0x8,
kUnorderedAccess_Flag = 0x10,
};
uint16_t fFlags = 0;
SkTDArray<int> fColorSampleCounts;
// This GrColorType represents how the actually GPU format lays out its memory. This is used
// for uploading data to backend textures to make sure we've arranged the memory in the
// correct order.
GrColorType fFormatColorType = GrColorType::kUnknown;
std::unique_ptr<ColorTypeInfo[]> fColorTypeInfos;
int fColorTypeInfoCount = 0;
};
static const size_t kNumDxgiFormats = 15;
FormatInfo fFormatTable[kNumDxgiFormats];
FormatInfo& getFormatInfo(DXGI_FORMAT);
const FormatInfo& getFormatInfo(DXGI_FORMAT) const;
DXGI_FORMAT fColorTypeToFormatTable[kGrColorTypeCnt];
void setColorType(GrColorType, std::initializer_list<DXGI_FORMAT> formats);
int fMaxPerStageShaderResourceViews;
int fMaxPerStageUnorderedAccessViews;
DXGI_FORMAT fPreferredStencilFormat;
bool fResolveSubresourceRegionSupport : 1;
bool fStandardSwizzleLayoutSupport : 1;
using INHERITED = GrCaps;
};
#endif
|
google/skia
|
src/gpu/d3d/GrD3DCaps.h
|
C
|
bsd-3-clause
| 8,196
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE190_Integer_Overflow__char_fscanf_add_22b.c
Label Definition File: CWE190_Integer_Overflow.label.xml
Template File: sources-sinks-22b.tmpl.c
*/
/*
* @description
* CWE: 190 Integer Overflow
* BadSource: fscanf Read data from the console using fscanf()
* GoodSource: Set data to a small, non-zero number (two)
* Sinks: add
* GoodSink: Ensure there will not be an overflow before adding 1 to data
* BadSink : Add 1 to data, which can cause an overflow
* Flow Variant: 22 Control flow: Flow controlled by value of a global variable. Sink functions are in a separate file from sources.
*
* */
#include "std_testcase.h"
#ifndef OMITBAD
/* The global variable below is used to drive control flow in the sink function */
extern int CWE190_Integer_Overflow__char_fscanf_add_22_badGlobal;
void CWE190_Integer_Overflow__char_fscanf_add_22_badSink(char data)
{
if(CWE190_Integer_Overflow__char_fscanf_add_22_badGlobal)
{
{
/* POTENTIAL FLAW: Adding 1 to data could cause an overflow */
char result = data + 1;
printHexCharLine(result);
}
}
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* The global variables below are used to drive control flow in the sink functions. */
extern int CWE190_Integer_Overflow__char_fscanf_add_22_goodB2G1Global;
extern int CWE190_Integer_Overflow__char_fscanf_add_22_goodB2G2Global;
extern int CWE190_Integer_Overflow__char_fscanf_add_22_goodG2BGlobal;
/* goodB2G1() - use badsource and goodsink by setting the static variable to false instead of true */
void CWE190_Integer_Overflow__char_fscanf_add_22_goodB2G1Sink(char data)
{
if(CWE190_Integer_Overflow__char_fscanf_add_22_goodB2G1Global)
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
printLine("Benign, fixed string");
}
else
{
/* FIX: Add a check to prevent an overflow from occurring */
if (data < CHAR_MAX)
{
char result = data + 1;
printHexCharLine(result);
}
else
{
printLine("data value is too large to perform arithmetic safely.");
}
}
}
/* goodB2G2() - use badsource and goodsink by reversing the blocks in the if in the sink function */
void CWE190_Integer_Overflow__char_fscanf_add_22_goodB2G2Sink(char data)
{
if(CWE190_Integer_Overflow__char_fscanf_add_22_goodB2G2Global)
{
/* FIX: Add a check to prevent an overflow from occurring */
if (data < CHAR_MAX)
{
char result = data + 1;
printHexCharLine(result);
}
else
{
printLine("data value is too large to perform arithmetic safely.");
}
}
}
/* goodG2B() - use goodsource and badsink */
void CWE190_Integer_Overflow__char_fscanf_add_22_goodG2BSink(char data)
{
if(CWE190_Integer_Overflow__char_fscanf_add_22_goodG2BGlobal)
{
{
/* POTENTIAL FLAW: Adding 1 to data could cause an overflow */
char result = data + 1;
printHexCharLine(result);
}
}
}
#endif /* OMITGOOD */
|
JianpingZeng/xcc
|
xcc/test/juliet/testcases/CWE190_Integer_Overflow/s01/CWE190_Integer_Overflow__char_fscanf_add_22b.c
|
C
|
bsd-3-clause
| 3,262
|
/**************************************************************************\
* Copyright (c) Kongsberg Oil & Gas Technologies AS
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\**************************************************************************/
/*!
\class SoGLProjectionMatrixElement Inventor/elements/SoGLProjectionMatrixElement.h
\brief The SoGLProjectionMatrixElement class is yet to be documented.
\ingroup elements
FIXME: write doc.
*/
#include <Inventor/elements/SoGLProjectionMatrixElement.h>
#include "coindefs.h"
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif // HAVE_CONFIG_H
#include <Inventor/system/gl.h>
#include <cassert>
SO_ELEMENT_SOURCE(SoGLProjectionMatrixElement);
/*!
This static method initializes static data for the
SoGLProjectionMatrixElement class.
*/
void
SoGLProjectionMatrixElement::initClass(void)
{
SO_ELEMENT_INIT_CLASS(SoGLProjectionMatrixElement, inherited);
}
/*!
The destructor.
*/
SoGLProjectionMatrixElement::~SoGLProjectionMatrixElement(void)
{
}
//! FIXME: write doc.
void
SoGLProjectionMatrixElement::pop(SoState * COIN_UNUSED_ARG(state),
const SoElement * prevTopElement)
{
this->capture(state);
this->updategl();
}
//! FIXME: write doc.
void
SoGLProjectionMatrixElement::setElt(const SbMatrix & matrix)
{
inherited::setElt(matrix);
this->updategl();
}
//! FIXME: write doc.
void
SoGLProjectionMatrixElement::updategl(void)
{
#if 0 // debug
SoDebugError::postInfo("SoGLProjectionMatrixElement::updategl", "");
#endif // debug
glMatrixMode(GL_PROJECTION);
glLoadMatrixf((float*)this->projectionMatrix);
glMatrixMode(GL_MODELVIEW);
}
|
Alexpux/Coin3D
|
src/elements/GL/SoGLProjectionMatrixElement.cpp
|
C++
|
bsd-3-clause
| 3,145
|
from statsmodels.compat.python import lzip
import numpy as np
from scipy import stats
from statsmodels.distributions import ECDF
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.tools import add_constant
from . import utils
__all__ = ["qqplot", "qqplot_2samples", "qqline", "ProbPlot"]
class ProbPlot(object):
"""
Q-Q and P-P Probability Plots
Can take arguments specifying the parameters for dist or fit them
automatically. (See fit under kwargs.)
Parameters
----------
data : array_like
A 1d data array
dist : callable
Compare x against dist. A scipy.stats or statsmodels distribution. The
default is scipy.stats.distributions.norm (a standard normal). Can be
a SciPy frozen distribution.
fit : bool
If fit is false, loc, scale, and distargs are passed to the
distribution. If fit is True then the parameters for dist are fit
automatically using dist.fit. The quantiles are formed from the
standardized data, after subtracting the fitted loc and dividing by
the fitted scale. fit cannot be used if dist is a SciPy frozen
distribution.
distargs : tuple
A tuple of arguments passed to dist to specify it fully so dist.ppf
may be called. distargs must not contain loc or scale. These values
must be passed using the loc or scale inputs. distargs cannot be used
if dist is a SciPy frozen distribution.
a : float
Offset for the plotting position of an expected order statistic, for
example. The plotting positions are given by
(i - a)/(nobs - 2*a + 1) for i in range(0,nobs+1)
loc : float
Location parameter for dist. Cannot be used if dist is a SciPy frozen
distribution.
scale : float
Scale parameter for dist. Cannot be used if dist is a SciPy frozen
distribution.
See Also
--------
scipy.stats.probplot
Notes
-----
1) Depends on matplotlib.
2) If `fit` is True then the parameters are fit using the
distribution's `fit()` method.
3) The call signatures for the `qqplot`, `ppplot`, and `probplot`
methods are similar, so examples 1 through 4 apply to all
three methods.
4) The three plotting methods are summarized below:
ppplot : Probability-Probability plot
Compares the sample and theoretical probabilities (percentiles).
qqplot : Quantile-Quantile plot
Compares the sample and theoretical quantiles
probplot : Probability plot
Same as a Q-Q plot, however probabilities are shown in the scale of
the theoretical distribution (x-axis) and the y-axis contains
unscaled quantiles of the sample data.
Examples
--------
The first example shows a Q-Q plot for regression residuals
>>> # example 1
>>> import statsmodels.api as sm
>>> from matplotlib import pyplot as plt
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> model = sm.OLS(data.endog, data.exog)
>>> mod_fit = model.fit()
>>> res = mod_fit.resid # residuals
>>> pplot = sm.ProbPlot(res)
>>> fig = pplot.qqplot()
>>> h = plt.title("Ex. 1 - qqplot - residuals of OLS fit")
>>> plt.show()
qqplot of the residuals against quantiles of t-distribution with 4
degrees of freedom:
>>> # example 2
>>> import scipy.stats as stats
>>> pplot = sm.ProbPlot(res, stats.t, distargs=(4,))
>>> fig = pplot.qqplot()
>>> h = plt.title("Ex. 2 - qqplot - residuals against quantiles of t-dist")
>>> plt.show()
qqplot against same as above, but with mean 3 and std 10:
>>> # example 3
>>> pplot = sm.ProbPlot(res, stats.t, distargs=(4,), loc=3, scale=10)
>>> fig = pplot.qqplot()
>>> h = plt.title("Ex. 3 - qqplot - resids vs quantiles of t-dist")
>>> plt.show()
Automatically determine parameters for t distribution including the
loc and scale:
>>> # example 4
>>> pplot = sm.ProbPlot(res, stats.t, fit=True)
>>> fig = pplot.qqplot(line="45")
>>> h = plt.title("Ex. 4 - qqplot - resids vs. quantiles of fitted t-dist")
>>> plt.show()
A second `ProbPlot` object can be used to compare two separate sample
sets by using the `other` kwarg in the `qqplot` and `ppplot` methods.
>>> # example 5
>>> import numpy as np
>>> x = np.random.normal(loc=8.25, scale=2.75, size=37)
>>> y = np.random.normal(loc=8.75, scale=3.25, size=37)
>>> pp_x = sm.ProbPlot(x, fit=True)
>>> pp_y = sm.ProbPlot(y, fit=True)
>>> fig = pp_x.qqplot(line="45", other=pp_y)
>>> h = plt.title("Ex. 5 - qqplot - compare two sample sets")
>>> plt.show()
In qqplot, sample size of `other` can be equal or larger than the first.
In case of larger, size of `other` samples will be reduced to match the
size of the first by interpolation
>>> # example 6
>>> x = np.random.normal(loc=8.25, scale=2.75, size=37)
>>> y = np.random.normal(loc=8.75, scale=3.25, size=57)
>>> pp_x = sm.ProbPlot(x, fit=True)
>>> pp_y = sm.ProbPlot(y, fit=True)
>>> fig = pp_x.qqplot(line="45", other=pp_y)
>>> title = "Ex. 6 - qqplot - compare different sample sizes"
>>> h = plt.title(title)
>>> plt.show()
In ppplot, sample size of `other` and the first can be different. `other`
will be used to estimate an empirical cumulative distribution function
(ECDF). ECDF(x) will be plotted against p(x)=0.5/n, 1.5/n, ..., (n-0.5)/n
where x are sorted samples from the first.
>>> # example 7
>>> x = np.random.normal(loc=8.25, scale=2.75, size=37)
>>> y = np.random.normal(loc=8.75, scale=3.25, size=57)
>>> pp_x = sm.ProbPlot(x, fit=True)
>>> pp_y = sm.ProbPlot(y, fit=True)
>>> pp_y.ppplot(line="45", other=pp_x)
>>> plt.title("Ex. 7A- ppplot - compare two sample sets, other=pp_x")
>>> pp_x.ppplot(line="45", other=pp_y)
>>> plt.title("Ex. 7B- ppplot - compare two sample sets, other=pp_y")
>>> plt.show()
The following plot displays some options, follow the link to see the
code.
.. plot:: plots/graphics_gofplots_qqplot.py
"""
def __init__(
self,
data,
dist=stats.norm,
fit=False,
distargs=(),
a=0,
loc=0,
scale=1,
):
self.data = data
self.a = a
self.nobs = data.shape[0]
self.distargs = distargs
self.fit = fit
self._is_frozen = isinstance(dist, stats.distributions.rv_frozen)
if self._is_frozen and (
fit or loc != 0 or scale != 1 or distargs != ()
):
raise ValueError(
"Frozen distributions cannot be combined with fit, loc, scale"
" or distargs."
)
# propertes
self._cache = {}
if self._is_frozen:
self.dist = dist
dist_gen = dist.dist
shapes = dist_gen.shapes
if shapes is not None:
shape_args = tuple(map(str.strip, shapes.split(",")))
else:
shape_args = ()
numargs = len(shape_args)
args = dist.args
if len(args) >= numargs + 1:
self.loc = args[numargs]
else:
self.loc = dist.kwds.get("loc", loc)
if len(args) >= numargs + 2:
self.scale = args[numargs + 1]
else:
self.scale = dist.kwds.get("scale", scale)
fit_params = []
for i, arg in enumerate(shape_args):
if arg in dist.kwds:
value = dist.kwds[arg]
else:
value = dist.args[i]
fit_params.append(value)
self.fit_params = np.r_[fit_params, self.loc, self.scale]
elif fit:
self.fit_params = dist.fit(data)
self.loc = self.fit_params[-2]
self.scale = self.fit_params[-1]
if len(self.fit_params) > 2:
self.dist = dist(*self.fit_params[:-2], **dict(loc=0, scale=1))
else:
self.dist = dist(loc=0, scale=1)
elif distargs or loc != 0 or scale != 1:
try:
self.dist = dist(*distargs, **dict(loc=loc, scale=scale))
except Exception:
distargs = ", ".join([str(da) for da in distargs])
cmd = "dist({distargs}, loc={loc}, scale={scale})"
cmd = cmd.format(distargs=distargs, loc=loc, scale=scale)
raise TypeError(
"Initializing the distribution failed. This "
"can occur if distargs contains loc or scale. "
"The distribution initialization command "
"is:\n{cmd}".format(cmd=cmd)
)
self.loc = loc
self.scale = scale
self.fit_params = np.r_[distargs, loc, scale]
else:
self.dist = dist
self.loc = loc
self.scale = scale
self.fit_params = np.r_[loc, scale]
@cache_readonly
def theoretical_percentiles(self):
"""Theoretical percentiles"""
return plotting_pos(self.nobs, self.a)
@cache_readonly
def theoretical_quantiles(self):
"""Theoretical quantiles"""
try:
return self.dist.ppf(self.theoretical_percentiles)
except TypeError:
msg = "%s requires more parameters to compute ppf".format(
self.dist.name,
)
raise TypeError(msg)
except Exception as exc:
msg = "failed to compute the ppf of {0}".format(self.dist.name)
raise type(exc)(msg)
@cache_readonly
def sorted_data(self):
"""sorted data"""
sorted_data = np.array(self.data, copy=True)
sorted_data.sort()
return sorted_data
@cache_readonly
def sample_quantiles(self):
"""sample quantiles"""
if self.fit and self.loc != 0 and self.scale != 1:
return (self.sorted_data - self.loc) / self.scale
else:
return self.sorted_data
@cache_readonly
def sample_percentiles(self):
"""Sample percentiles"""
_check_for(self.dist, "cdf")
if self._is_frozen:
return self.dist.cdf(self.sorted_data)
quantiles = (self.sorted_data - self.fit_params[-2]) / self.fit_params[
-1
]
return self.dist.cdf(quantiles)
def ppplot(
self,
xlabel=None,
ylabel=None,
line=None,
other=None,
ax=None,
**plotkwargs,
):
"""
Plot of the percentiles of x versus the percentiles of a distribution.
Parameters
----------
xlabel : str or None, optional
User-provided labels for the x-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
ylabel : str or None, optional
User-provided labels for the y-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
line : {None, "45", "s", "r", q"}, optional
Options for the reference line to which the data is compared:
- "45": 45-degree line
- "s": standardized line, the expected order statistics are
scaled by the standard deviation of the given sample and have
the mean added to them
- "r": A regression line is fit
- "q": A line is fit through the quartiles.
- None: by default no reference line is added to the plot.
other : ProbPlot, array_like, or None, optional
If provided, ECDF(x) will be plotted against p(x) where x are
sorted samples from `self`. ECDF is an empirical cumulative
distribution function estimated from `other` and
p(x) = 0.5/n, 1.5/n, ..., (n-0.5)/n where n is the number of
samples in `self`. If an array-object is provided, it will be
turned into a `ProbPlot` instance default parameters. If not
provided (default), `self.dist(x)` is be plotted against p(x).
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure
being created.
**plotkwargs
Additional arguments to be passed to the `plot` command.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
"""
if other is not None:
check_other = isinstance(other, ProbPlot)
if not check_other:
other = ProbPlot(other)
p_x = self.theoretical_percentiles
ecdf_x = ECDF(other.sample_quantiles)(self.sample_quantiles)
fig, ax = _do_plot(
p_x, ecdf_x, self.dist, ax=ax, line=line, **plotkwargs
)
if xlabel is None:
xlabel = "Probabilities of 2nd Sample"
if ylabel is None:
ylabel = "Probabilities of 1st Sample"
else:
fig, ax = _do_plot(
self.theoretical_percentiles,
self.sample_percentiles,
self.dist,
ax=ax,
line=line,
**plotkwargs,
)
if xlabel is None:
xlabel = "Theoretical Probabilities"
if ylabel is None:
ylabel = "Sample Probabilities"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
return fig
def qqplot(
self,
xlabel=None,
ylabel=None,
line=None,
other=None,
ax=None,
swap: bool = False,
**plotkwargs,
):
"""
Plot of the quantiles of x versus the quantiles/ppf of a distribution.
Can also be used to plot against the quantiles of another `ProbPlot`
instance.
Parameters
----------
xlabel : {None, str}
User-provided labels for the x-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
ylabel : {None, str}
User-provided labels for the y-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
line : {None, "45", "s", "r", q"}, optional
Options for the reference line to which the data is compared:
- "45" - 45-degree line
- "s" - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- "r" - A regression line is fit
- "q" - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
other : {ProbPlot, array_like, None}, optional
If provided, the sample quantiles of this `ProbPlot` instance are
plotted against the sample quantiles of the `other` `ProbPlot`
instance. Sample size of `other` must be equal or larger than
this `ProbPlot` instance. If the sample size is larger, sample
quantiles of `other` will be interpolated to match the sample size
of this `ProbPlot` instance. If an array-like object is provided,
it will be turned into a `ProbPlot` instance using default
parameters. If not provided (default), the theoretical quantiles
are used.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure
being created.
swap : bool, optional
Flag indicating to swap the x and y labels.
**plotkwargs
Additional arguments to be passed to the `plot` command.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
"""
if other is not None:
check_other = isinstance(other, ProbPlot)
if not check_other:
other = ProbPlot(other)
s_self = self.sample_quantiles
s_other = other.sample_quantiles
if len(s_self) > len(s_other):
raise ValueError(
"Sample size of `other` must be equal or "
+ "larger than this `ProbPlot` instance"
)
elif len(s_self) < len(s_other):
# Use quantiles of the smaller set and interpolate quantiles of
# the larger data set
p = plotting_pos(self.nobs, self.a)
s_other = stats.mstats.mquantiles(s_other, p)
fig, ax = _do_plot(
s_other, s_self, self.dist, ax=ax, line=line, **plotkwargs
)
if xlabel is None:
xlabel = "Quantiles of 2nd Sample"
if ylabel is None:
ylabel = "Quantiles of 1st Sample"
if swap:
xlabel, ylabel = ylabel, xlabel
else:
fig, ax = _do_plot(
self.theoretical_quantiles,
self.sample_quantiles,
self.dist,
ax=ax,
line=line,
**plotkwargs,
)
if xlabel is None:
xlabel = "Theoretical Quantiles"
if ylabel is None:
ylabel = "Sample Quantiles"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return fig
def probplot(
self,
xlabel=None,
ylabel=None,
line=None,
exceed=False,
ax=None,
**plotkwargs,
):
"""
Plot of unscaled quantiles of x against the prob of a distribution.
The x-axis is scaled linearly with the quantiles, but the probabilities
are used to label the axis.
Parameters
----------
xlabel : {None, str}, optional
User-provided labels for the x-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
ylabel : {None, str}, optional
User-provided labels for the y-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
line : {None, "45", "s", "r", q"}, optional
Options for the reference line to which the data is compared:
- "45" - 45-degree line
- "s" - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- "r" - A regression line is fit
- "q" - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
exceed : bool, optional
If False (default) the raw sample quantiles are plotted against
the theoretical quantiles, show the probability that a sample will
not exceed a given value. If True, the theoretical quantiles are
flipped such that the figure displays the probability that a
sample will exceed a given value.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure
being created.
**plotkwargs
Additional arguments to be passed to the `plot` command.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
"""
if exceed:
fig, ax = _do_plot(
self.theoretical_quantiles[::-1],
self.sorted_data,
self.dist,
ax=ax,
line=line,
**plotkwargs,
)
if xlabel is None:
xlabel = "Probability of Exceedance (%)"
else:
fig, ax = _do_plot(
self.theoretical_quantiles,
self.sorted_data,
self.dist,
ax=ax,
line=line,
**plotkwargs,
)
if xlabel is None:
xlabel = "Non-exceedance Probability (%)"
if ylabel is None:
ylabel = "Sample Quantiles"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
_fmt_probplot_axis(ax, self.dist, self.nobs)
return fig
def qqplot(
data,
dist=stats.norm,
distargs=(),
a=0,
loc=0,
scale=1,
fit=False,
line=None,
ax=None,
**plotkwargs,
):
"""
Q-Q plot of the quantiles of x versus the quantiles/ppf of a distribution.
Can take arguments specifying the parameters for dist or fit them
automatically. (See fit under Parameters.)
Parameters
----------
data : array_like
A 1d data array.
dist : callable
Comparison distribution. The default is
scipy.stats.distributions.norm (a standard normal).
distargs : tuple
A tuple of arguments passed to dist to specify it fully
so dist.ppf may be called.
a : float
Offset for the plotting position of an expected order statistic, for
example. The plotting positions are given by (i - a)/(nobs - 2*a + 1)
for i in range(0,nobs+1)
loc : float
Location parameter for dist
scale : float
Scale parameter for dist
fit : bool
If fit is false, loc, scale, and distargs are passed to the
distribution. If fit is True then the parameters for dist
are fit automatically using dist.fit. The quantiles are formed
from the standardized data, after subtracting the fitted loc
and dividing by the fitted scale.
line : {None, "45", "s", "r", "q"}
Options for the reference line to which the data is compared:
- "45" - 45-degree line
- "s" - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- "r" - A regression line is fit
- "q" - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure being
created.
**plotkwargs
Additional matplotlib arguments to be passed to the `plot` command.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
scipy.stats.probplot
Notes
-----
Depends on matplotlib. If `fit` is True then the parameters are fit using
the distribution's fit() method.
Examples
--------
>>> import statsmodels.api as sm
>>> from matplotlib import pyplot as plt
>>> data = sm.datasets.longley.load()
>>> exog = sm.add_constant(data.exog)
>>> mod_fit = sm.OLS(data.endog, exog).fit()
>>> res = mod_fit.resid # residuals
>>> fig = sm.qqplot(res)
>>> plt.show()
qqplot of the residuals against quantiles of t-distribution with 4 degrees
of freedom:
>>> import scipy.stats as stats
>>> fig = sm.qqplot(res, stats.t, distargs=(4,))
>>> plt.show()
qqplot against same as above, but with mean 3 and std 10:
>>> fig = sm.qqplot(res, stats.t, distargs=(4,), loc=3, scale=10)
>>> plt.show()
Automatically determine parameters for t distribution including the
loc and scale:
>>> fig = sm.qqplot(res, stats.t, fit=True, line="45")
>>> plt.show()
The following plot displays some options, follow the link to see the code.
.. plot:: plots/graphics_gofplots_qqplot.py
"""
probplot = ProbPlot(
data, dist=dist, distargs=distargs, fit=fit, a=a, loc=loc, scale=scale
)
fig = probplot.qqplot(ax=ax, line=line, **plotkwargs)
return fig
def qqplot_2samples(
data1, data2, xlabel=None, ylabel=None, line=None, ax=None
):
"""
Q-Q Plot of two samples' quantiles.
Can take either two `ProbPlot` instances or two array-like objects. In the
case of the latter, both inputs will be converted to `ProbPlot` instances
using only the default values - so use `ProbPlot` instances if
finer-grained control of the quantile computations is required.
Parameters
----------
data1 : {array_like, ProbPlot}
Data to plot along x axis. If the sample sizes are unequal, the longer
series is always plotted along the x-axis.
data2 : {array_like, ProbPlot}
Data to plot along y axis. Does not need to have the same number of
observations as data 1. If the sample sizes are unequal, the longer
series is always plotted along the x-axis.
xlabel : {None, str}
User-provided labels for the x-axis. If None (default),
other values are used.
ylabel : {None, str}
User-provided labels for the y-axis. If None (default),
other values are used.
line : {None, "45", "s", "r", q"}
Options for the reference line to which the data is compared:
- "45" - 45-degree line
- "s" - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- "r" - A regression line is fit
- "q" - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure being
created.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
scipy.stats.probplot
Notes
-----
1) Depends on matplotlib.
2) If `data1` and `data2` are not `ProbPlot` instances, instances will be
created using the default parameters. Therefore, it is recommended to use
`ProbPlot` instance if fine-grained control is needed in the computation
of the quantiles.
Examples
--------
>>> import statsmodels.api as sm
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from statsmodels.graphics.gofplots import qqplot_2samples
>>> x = np.random.normal(loc=8.5, scale=2.5, size=37)
>>> y = np.random.normal(loc=8.0, scale=3.0, size=37)
>>> pp_x = sm.ProbPlot(x)
>>> pp_y = sm.ProbPlot(y)
>>> qqplot_2samples(pp_x, pp_y)
>>> plt.show()
.. plot:: plots/graphics_gofplots_qqplot_2samples.py
>>> fig = qqplot_2samples(pp_x, pp_y, xlabel=None, ylabel=None,
... line=None, ax=None)
"""
if not isinstance(data1, ProbPlot):
data1 = ProbPlot(data1)
if not isinstance(data2, ProbPlot):
data2 = ProbPlot(data2)
if data2.data.shape[0] > data1.data.shape[0]:
fig = data1.qqplot(
xlabel=ylabel, ylabel=xlabel, line=line, other=data2, ax=ax
)
else:
fig = data2.qqplot(
xlabel=ylabel,
ylabel=xlabel,
line=line,
other=data1,
ax=ax,
swap=True,
)
return fig
def qqline(ax, line, x=None, y=None, dist=None, fmt="r-", **lineoptions):
"""
Plot a reference line for a qqplot.
Parameters
----------
ax : matplotlib axes instance
The axes on which to plot the line
line : str {"45","r","s","q"}
Options for the reference line to which the data is compared.:
- "45" - 45-degree line
- "s" - standardized line, the expected order statistics are scaled by
the standard deviation of the given sample and have the mean
added to them
- "r" - A regression line is fit
- "q" - A line is fit through the quartiles.
- None - By default no reference line is added to the plot.
x : ndarray
X data for plot. Not needed if line is "45".
y : ndarray
Y data for plot. Not needed if line is "45".
dist : scipy.stats.distribution
A scipy.stats distribution, needed if line is "q".
fmt : str, optional
Line format string passed to `plot`.
**lineoptions
Additional arguments to be passed to the `plot` command.
Notes
-----
There is no return value. The line is plotted on the given `ax`.
Examples
--------
Import the food expenditure dataset. Plot annual food expenditure on x-axis
and household income on y-axis. Use qqline to add regression line into the
plot.
>>> import statsmodels.api as sm
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from statsmodels.graphics.gofplots import qqline
>>> foodexp = sm.datasets.engel.load()
>>> x = foodexp.exog
>>> y = foodexp.endog
>>> ax = plt.subplot(111)
>>> plt.scatter(x, y)
>>> ax.set_xlabel(foodexp.exog_name[0])
>>> ax.set_ylabel(foodexp.endog_name)
>>> qqline(ax, "r", x, y)
>>> plt.show()
.. plot:: plots/graphics_gofplots_qqplot_qqline.py
"""
lineoptions = lineoptions.copy()
for ls in ("-", "--", "-.", ":"):
if ls in fmt:
lineoptions.setdefault("linestyle", ls)
fmt = fmt.replace(ls, "")
break
for marker in (
".",
",",
"o",
"v",
"^",
"<",
">",
"1",
"2",
"3",
"4",
"8",
"s",
"p",
"P",
"*",
"h",
"H",
"+",
"x",
"X",
"D",
"d",
"|",
"_",
):
if marker in fmt:
lineoptions.setdefault("marker", marker)
fmt = fmt.replace(marker, "")
break
if fmt:
lineoptions.setdefault("color", fmt)
if line == "45":
end_pts = lzip(ax.get_xlim(), ax.get_ylim())
end_pts[0] = min(end_pts[0])
end_pts[1] = max(end_pts[1])
ax.plot(end_pts, end_pts, **lineoptions)
ax.set_xlim(end_pts)
ax.set_ylim(end_pts)
return # does this have any side effects?
if x is None or y is None:
raise ValueError("If line is not 45, x and y cannot be None.")
x = np.array(x)
y = np.array(y)
if line == "r":
# could use ax.lines[0].get_xdata(), get_ydata(),
# but don't know axes are "clean"
y = OLS(y, add_constant(x)).fit().fittedvalues
ax.plot(x, y, **lineoptions)
elif line == "s":
m, b = np.std(y), np.mean(y)
ref_line = x * m + b
ax.plot(x, ref_line, **lineoptions)
elif line == "q":
_check_for(dist, "ppf")
q25 = stats.scoreatpercentile(y, 25)
q75 = stats.scoreatpercentile(y, 75)
theoretical_quartiles = dist.ppf([0.25, 0.75])
m = (q75 - q25) / np.diff(theoretical_quartiles)
b = q25 - m * theoretical_quartiles[0]
ax.plot(x, m * x + b, **lineoptions)
# about 10x faster than plotting_position in sandbox and mstats
def plotting_pos(nobs, a=0.0, b=None):
"""
Generates sequence of plotting positions
Parameters
----------
nobs : int
Number of probability points to plot
a : float, default 0.0
alpha parameter for the plotting position of an expected order
statistic
b : float, default None
beta parameter for the plotting position of an expected order
statistic. If None, then b is set to a.
Returns
-------
ndarray
The plotting positions
Notes
-----
The plotting positions are given by (i - a)/(nobs + 1 - a - b) for i in
range(1, nobs+1)
See Also
--------
scipy.stats.mstats.plotting_positions
Additional information on alpha and beta
"""
b = a if b is None else b
return (np.arange(1.0, nobs + 1) - a) / (nobs + 1 - a - b)
def _fmt_probplot_axis(ax, dist, nobs):
"""
Formats a theoretical quantile axis to display the corresponding
probabilities on the quantiles' scale.
Parameters
----------
ax : AxesSubplot, optional
The axis to be formatted
nobs : scalar
Number of observations in the sample
dist : scipy.stats.distribution
A scipy.stats distribution sufficiently specified to implement its
ppf() method.
Returns
-------
There is no return value. This operates on `ax` in place
"""
_check_for(dist, "ppf")
axis_probs = np.linspace(10, 90, 9, dtype=float)
small = np.array([1.0, 2, 5])
axis_probs = np.r_[small, axis_probs, 100 - small[::-1]]
if nobs >= 50:
axis_probs = np.r_[small / 10, axis_probs, 100 - small[::-1] / 10]
if nobs >= 500:
axis_probs = np.r_[small / 100, axis_probs, 100 - small[::-1] / 100]
axis_probs /= 100.0
axis_qntls = dist.ppf(axis_probs)
ax.set_xticks(axis_qntls)
ax.set_xticklabels(
axis_probs * 100,
rotation=45,
rotation_mode="anchor",
horizontalalignment="right",
verticalalignment="center",
)
ax.set_xlim([axis_qntls.min(), axis_qntls.max()])
def _do_plot(
x, y, dist=None, line=None, ax=None, fmt="b", step=False, **kwargs
):
"""
Boiler plate plotting function for the `ppplot`, `qqplot`, and
`probplot` methods of the `ProbPlot` class
Parameters
----------
x : array_like
X-axis data to be plotted
y : array_like
Y-axis data to be plotted
dist : scipy.stats.distribution
A scipy.stats distribution, needed if `line` is "q".
line : {"45", "s", "r", "q", None}, default None
Options for the reference line to which the data is compared.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure being
created.
fmt : str, optional
matplotlib-compatible formatting string for the data markers
kwargs : keywords
These are passed to matplotlib.plot
Returns
-------
fig : Figure
The figure containing `ax`.
ax : AxesSubplot
The original axes if provided. Otherwise a new instance.
"""
plot_style = {
"marker": "o",
"markerfacecolor": "C0",
"markeredgecolor": "C0",
"linestyle": "none",
}
plot_style.update(**kwargs)
where = plot_style.pop("where", "pre")
fig, ax = utils.create_mpl_ax(ax)
ax.set_xmargin(0.02)
if step:
ax.step(x, y, fmt, where=where, **plot_style)
else:
ax.plot(x, y, fmt, **plot_style)
if line:
if line not in ["r", "q", "45", "s"]:
msg = "%s option for line not understood" % line
raise ValueError(msg)
qqline(ax, line, x=x, y=y, dist=dist)
return fig, ax
def _check_for(dist, attr="ppf"):
if not hasattr(dist, attr):
raise AttributeError(f"distribution must have a {attr} method")
|
statsmodels/statsmodels
|
statsmodels/graphics/gofplots.py
|
Python
|
bsd-3-clause
| 35,607
|
import numpy as np
import pandas as pd
from bokeh.plotting import *
# Define some categories
categories = [
'ousia', 'poson', 'poion', 'pros ti', 'pou',
'pote', 'keisthai', 'echein', 'poiein', 'paschein',
]
# Create data
N = 10
data = { cat : np.random.randint(10, 100, size=N) for cat in categories }
# Define a little function to stack series together to make polygons. Soon
# this will be built into Bokeh.
def stacked(data, categories):
ys = []
last = np.zeros(len(data.values()[0]))
for cat in categories:
next = last + data[cat]
ys.append(np.hstack((last[::-1], next)))
last = next
return ys
# Get the y coordinates of the stacked data
ys = stacked(data, categories)
# The x coordinates for each polygon are simply the series concatenated
# with its reverse.
xs = [np.hstack((categories[::-1], categories))] * len(ys)
# Pick out a color palette
colors = brewer["Spectral"][len(ys)]
# EXERCISE: output static HTML file
# EXERCISE: play around with parameters like:
# - line_color
# - line_alpha
# - line_width
# - line_dash (e.g., [2,4])
# - fill_color
# - fill_alpha
# - background_fill
patches(xs, ys, x_range=categories, y_range=[0, 800],
color=colors, alpha=0.8, line_color=None, background_fill="lightgrey",
title="Categories of Brewering")
# EXERCISE: configure all of the following plot properties
ygrid().grid_line_color = # color, or None, to suppress the line
ygrid().grid_line_width = # line width for grid lines
axis().major_label_text_font_size = # "12pt", "1.5em", "10px", etc
axis().major_label_text_font_style = # "bold", "normal", "italic"
axis().major_label_standoff = # distance of tick labels from ticks
axis().axis_line_color = # color, or None, to suppress the line
xaxis().major_label_orientation = # radians, "horizontal", "vertical", "normal"
xaxis().major_tick_in = # distance ticks extends into the plot
xaxis().major_tick_out = # and distance they extend out
xaxis().major_tick_line_color = # color, or None, to suppress the line
show()
|
jakevdp/bokeh
|
sphinx/source/tutorial/exercises/style.py
|
Python
|
bsd-3-clause
| 2,134
|
{% extends "survey/base.html" %}{% load i18n %}
{% block styles %}
<style type="text/css">
input[type="radio"] { vertical-align: top; }
</style>
{% endblock %}
{% block content %}
<h1>{{ title }}</h1>
<form method="post" class="focus-input" action="">
{% trans 'Are you sure you want to delete the following question ?' %}
<ul>
<li>{{question}}</li>
</ul>
<div class="submit-row"><input type="submit" value="{% trans 'Delete' %}" class="default" name="__delete" /></div>
</form>
{% endblock %}
|
aawsolutions/bslcks
|
templates/survey/question_confirm_delete.html
|
HTML
|
bsd-3-clause
| 532
|
"""A module which implements the time-frequency estimation.
Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
# Authors : Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Hari Bharadwaj <hari@nmr.mgh.harvard.edu>
# Clement Moutard <clement.moutard@polytechnique.org>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License : BSD (3-clause)
from copy import deepcopy
from functools import partial
from math import sqrt
import numpy as np
from scipy import linalg
from scipy.fftpack import fft, ifft
from ..baseline import rescale
from ..parallel import parallel_func
from ..utils import logger, verbose, _time_mask, check_fname, sizeof_fmt
from ..channels.channels import ContainsMixin, UpdateChannelsMixin
from ..channels.layout import _pair_grad_sensors
from ..io.pick import pick_info, pick_types
from ..io.meas_info import Info
from ..utils import SizeMixin
from .multitaper import dpss_windows
from ..viz.utils import figure_nobar, plt_show, _setup_cmap
from ..externals.h5io import write_hdf5, read_hdf5
from ..externals.six import string_types
# Make wavelet
def morlet(sfreq, freqs, n_cycles=7.0, sigma=None, zero_mean=False):
"""Compute Morlet wavelets for the given frequency range.
Parameters
----------
sfreq : float
The sampling Frequency.
freqs : array
frequency range of interest (1 x Frequencies)
n_cycles: float | array of float, defaults to 7.0
Number of cycles. Fixed number or one per frequency.
sigma : float, defaults to None
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool, defaults to False
Make sure the wavelet has a mean of zero.
Returns
-------
Ws : list of array
The wavelets time series.
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= sqrt(0.5) * linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _make_dpss(sfreq, freqs, n_cycles=7., time_bandwidth=4.0, zero_mean=False):
"""Compute DPSS tapers for the given frequency range.
Parameters
----------
sfreq : float
The sampling frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,), defaults to 7.
The number of cycles globally or for each frequency.
time_bandwidth : float, defaults to 4.0
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
zero_mean : bool | None, , defaults to False
Make sure the wavelet has a mean of zero.
Returns
-------
Ws : list of array
The wavelets time series.
"""
Ws = list()
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
# Low level convolution
def _cwt(X, Ws, mode="same", decim=1, use_fft=True):
"""Compute cwt with fft based convolutions or temporal convolutions.
Parameters
----------
X : array of shape (n_signals, n_times)
The data.
Ws : list of array
Wavelets time series.
mode : {'full', 'valid', 'same'}
See numpy.convolve.
decim : int | slice, defaults to 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
use_fft : bool, defaults to True
Use the FFT for convolutions or not.
Returns
-------
out : array, shape (n_signals, n_freqs, n_time_decim)
The time-frequency transform of the signals.
"""
if mode not in ['same', 'valid', 'full']:
raise ValueError("`mode` must be 'same', 'valid' or 'full', "
"got %s instead." % mode)
if mode == 'full' and (not use_fft):
# XXX JRK: full wavelet decomposition needs to be implemented
raise ValueError('`full` decomposition with convolution is currently' +
' not supported.')
decim = _check_decim(decim)
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
n_signals, n_times = X.shape
n_times_out = X[:, decim].shape[1]
n_freqs = len(Ws)
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
# precompute FFTs of Ws
if use_fft:
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
if len(W) > n_times:
raise ValueError('At least one of the wavelets is longer than the '
'signal. Use a longer signal or shorter '
'wavelets.')
if use_fft:
fft_Ws[i] = fft(W, fsize)
# Make generator looping across signals
tfr = np.zeros((n_freqs, n_times_out), dtype=np.complex128)
for x in X:
if use_fft:
fft_x = fft(x, fsize)
# Loop across wavelets
for ii, W in enumerate(Ws):
if use_fft:
ret = ifft(fft_x * fft_Ws[ii])[:n_times + W.size - 1]
else:
ret = np.convolve(x, W, mode=mode)
# Center and decimate decomposition
if mode == "valid":
sz = int(abs(W.size - n_times)) + 1
offset = (n_times - sz) // 2
this_slice = slice(offset // decim.step,
(offset + sz) // decim.step)
if use_fft:
ret = _centered(ret, sz)
tfr[ii, this_slice] = ret[decim]
else:
if use_fft:
ret = _centered(ret, n_times)
tfr[ii, :] = ret[decim]
yield tfr
# Loop of convolution: single trial
def _compute_tfr(epoch_data, frequencies, sfreq=1.0, method='morlet',
n_cycles=7.0, zero_mean=None, time_bandwidth=None,
use_fft=True, decim=1, output='complex', n_jobs=1,
verbose=None):
"""Compute time-frequency transforms.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
frequencies : array-like of floats, shape (n_freqs)
The frequencies.
sfreq : float | int, defaults to 1.0
Sampling frequency of the data.
method : 'multitaper' | 'morlet', defaults to 'morlet'
The time-frequency method. 'morlet' convolves a Morlet wavelet.
'multitaper' uses Morlet wavelets windowed with multiple DPSS
multitapers.
n_cycles : float | array of float, defaults to 7.0
Number of cycles in the Morlet wavelet. Fixed number
or one per frequency.
zero_mean : bool | None, defaults to None
None means True for method='multitaper' and False for method='morlet'.
If True, make sure the wavelets have a mean of zero.
time_bandwidth : float, defaults to None
If None and method=multitaper, will be set to 4.0 (3 tapers).
Time x (Full) Bandwidth product. Only applies if
method == 'multitaper'. The number of good tapers (low-bias) is
chosen automatically based on this to equal floor(time_bandwidth - 1).
use_fft : bool, defaults to True
Use the FFT for convolutions or not.
decim : int | slice, defaults to 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, defaults to 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
n_jobs : int, defaults to 1
The number of epochs to process at the same time. The parallelization
is implemented across channels.
verbose : bool, str, int, or None, defaults to None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs,
n_times), else it is (n_chans, n_freqs, n_times). If output is
'avg_power_itc', the real values code for 'avg_power' and the
imaginary values code for the 'itc': out = avg_power + i * itc
"""
# Check data
epoch_data = np.asarray(epoch_data)
if epoch_data.ndim != 3:
raise ValueError('epoch_data must be of shape '
'(n_epochs, n_chans, n_times)')
# Check params
frequencies, sfreq, zero_mean, n_cycles, time_bandwidth, decim = \
_check_tfr_param(frequencies, sfreq, method, zero_mean, n_cycles,
time_bandwidth, use_fft, decim, output)
# Setup wavelet
if method == 'morlet':
W = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
Ws = [W] # to have same dimensionality as the 'multitaper' case
elif method == 'multitaper':
Ws = _make_dpss(sfreq, frequencies, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
# Check wavelets
if len(Ws[0][0]) > epoch_data.shape[2]:
raise ValueError('At least one of the wavelets is longer than the '
'signal. Use a longer signal or shorter wavelets.')
# Initialize output
decim = _check_decim(decim)
n_freqs = len(frequencies)
n_epochs, n_chans, n_times = epoch_data[:, :, decim].shape
if output in ('power', 'phase', 'avg_power', 'itc'):
dtype = np.float
elif output in ('complex', 'avg_power_itc'):
# avg_power_itc is stored as power + 1i * itc to keep a
# simple dimensionality
dtype = np.complex
if ('avg_' in output) or ('itc' in output):
out = np.empty((n_chans, n_freqs, n_times), dtype)
else:
out = np.empty((n_chans, n_epochs, n_freqs, n_times), dtype)
# Parallel computation
parallel, my_cwt, _ = parallel_func(_time_frequency_loop, n_jobs)
# Parallelization is applied across channels.
tfrs = parallel(
my_cwt(channel, Ws, output, use_fft, 'same', decim)
for channel in epoch_data.transpose(1, 0, 2))
# FIXME: to avoid overheads we should use np.array_split()
for channel_idx, tfr in enumerate(tfrs):
out[channel_idx] = tfr
if ('avg_' not in output) and ('itc' not in output):
# This is to enforce that the first dimension is for epochs
out = out.transpose(1, 0, 2, 3)
return out
def _check_tfr_param(frequencies, sfreq, method, zero_mean, n_cycles,
time_bandwidth, use_fft, decim, output):
"""Aux. function to _compute_tfr to check the params validity."""
# Check frequencies
if not isinstance(frequencies, (list, np.ndarray)):
raise ValueError('frequencies must be an array-like, got %s '
'instead.' % type(frequencies))
frequencies = np.asarray(frequencies, dtype=float)
if frequencies.ndim != 1:
raise ValueError('frequencies must be of shape (n_freqs,), got %s '
'instead.' % np.array(frequencies.shape))
# Check sfreq
if not isinstance(sfreq, (float, int)):
raise ValueError('sfreq must be a float or an int, got %s '
'instead.' % type(sfreq))
sfreq = float(sfreq)
# Default zero_mean = True if multitaper else False
zero_mean = method == 'multitaper' if zero_mean is None else zero_mean
if not isinstance(zero_mean, bool):
raise ValueError('zero_mean should be of type bool, got %s. instead'
% type(zero_mean))
frequencies = np.asarray(frequencies)
if (method == 'multitaper') and (output == 'phase'):
raise NotImplementedError(
'This function is not optimized to compute the phase using the '
'multitaper method. Use np.angle of the complex output instead.')
# Check n_cycles
if isinstance(n_cycles, (int, float)):
n_cycles = float(n_cycles)
elif isinstance(n_cycles, (list, np.ndarray)):
n_cycles = np.array(n_cycles)
if len(n_cycles) != len(frequencies):
raise ValueError('n_cycles must be a float or an array of length '
'%i frequencies, got %i cycles instead.' %
(len(frequencies), len(n_cycles)))
else:
raise ValueError('n_cycles must be a float or an array, got %s '
'instead.' % type(n_cycles))
# Check time_bandwidth
if (method == 'morlet') and (time_bandwidth is not None):
raise ValueError('time_bandwidth only applies to "multitaper" method.')
elif method == 'multitaper':
time_bandwidth = (4.0 if time_bandwidth is None
else float(time_bandwidth))
# Check use_fft
if not isinstance(use_fft, bool):
raise ValueError('use_fft must be a boolean, got %s '
'instead.' % type(use_fft))
# Check decim
if isinstance(decim, int):
decim = slice(None, None, decim)
if not isinstance(decim, slice):
raise ValueError('decim must be an integer or a slice, '
'got %s instead.' % type(decim))
# Check output
allowed_ouput = ('complex', 'power', 'phase',
'avg_power_itc', 'avg_power', 'itc')
if output not in allowed_ouput:
raise ValueError("Unknown output type. Allowed are %s but "
"got %s." % (allowed_ouput, output))
if method not in ('multitaper', 'morlet'):
raise ValueError('method must be "morlet" or "multitaper", got %s '
'instead.' % type(method))
return frequencies, sfreq, zero_mean, n_cycles, time_bandwidth, decim
def _time_frequency_loop(X, Ws, output, use_fft, mode, decim):
"""Aux. function to _compute_tfr.
Loops time-frequency transform across wavelets and epochs.
Parameters
----------
X : array, shape (n_epochs, n_times)
The epochs data of a single channel.
Ws : list, shape (n_tapers, n_wavelets, n_times)
The wavelets.
output : str
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
use_fft : bool
Use the FFT for convolutions or not.
mode : {'full', 'valid', 'same'}
See numpy.convolve.
decim : slice
The decimation slice: e.g. power[:, decim]
"""
# Set output type
dtype = np.float
if output in ['complex', 'avg_power_itc']:
dtype = np.complex
# Init outputs
decim = _check_decim(decim)
n_epochs, n_times = X[:, decim].shape
n_freqs = len(Ws[0])
if ('avg_' in output) or ('itc' in output):
tfrs = np.zeros((n_freqs, n_times), dtype=dtype)
else:
tfrs = np.zeros((n_epochs, n_freqs, n_times), dtype=dtype)
# Loops across tapers.
for W in Ws:
coefs = _cwt(X, W, mode, decim=decim, use_fft=use_fft)
# Inter-trial phase locking is apparently computed per taper...
if 'itc' in output:
plf = np.zeros((n_freqs, n_times), dtype=np.complex)
# Loop across epochs
for epoch_idx, tfr in enumerate(coefs):
# Transform complex values
if output in ['power', 'avg_power']:
tfr = (tfr * tfr.conj()).real # power
elif output == 'phase':
tfr = np.angle(tfr)
elif output == 'avg_power_itc':
tfr_abs = np.abs(tfr)
plf += tfr / tfr_abs # phase
tfr = tfr_abs ** 2 # power
elif output == 'itc':
plf += tfr / np.abs(tfr) # phase
continue # not need to stack anything else than plf
# Stack or add
if ('avg_' in output) or ('itc' in output):
tfrs += tfr
else:
tfrs[epoch_idx] += tfr
# Compute inter trial coherence
if output == 'avg_power_itc':
tfrs += 1j * np.abs(plf)
elif output == 'itc':
tfrs += np.abs(plf)
# Normalization of average metrics
if ('avg_' in output) or ('itc' in output):
tfrs /= n_epochs
# Normalization by number of taper
tfrs /= len(Ws)
return tfrs
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time freq decomposition with continuous wavelet transform.
Parameters
----------
X : array, shape (n_signals, n_times)
The signals.
Ws : list of array
Wavelets time series.
use_fft : bool
Use FFT for convolutions. Defaults to True.
mode : 'same' | 'valid' | 'full'
Convention for convolution. 'full' is currently not implemented with
`use_fft=False`. Defaults to 'same'.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
Defaults to 1.
Returns
-------
tfr : array, shape (n_signals, n_frequencies, n_times)
The time-frequency decompositions.
See Also
--------
mne.time_frequency.tfr_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
decim = _check_decim(decim)
n_signals, n_times = X[:, decim].shape
coefs = _cwt(X, Ws, mode, decim=decim, use_fft=use_fft)
tfrs = np.empty((n_signals, len(Ws), n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def _tfr_aux(method, inst, freqs, decim, return_itc, picks, average,
**tfr_params):
"""Help reduce redundancy between tfr_morlet and tfr_multitaper."""
decim = _check_decim(decim)
data = _get_data(inst, return_itc)
info = inst.info
info, data, picks = _prepare_picks(info, data, picks)
data = data[:, picks, :]
if average:
if return_itc:
output = 'avg_power_itc'
else:
output = 'avg_power'
else:
output = 'power'
if return_itc:
raise ValueError('Inter-trial coherence is not supported'
' with average=False')
out = _compute_tfr(data, freqs, info['sfreq'], method=method,
output=output, decim=decim, **tfr_params)
times = inst.times[decim].copy()
if average:
if return_itc:
power, itc = out.real, out.imag
else:
power = out
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave,
method='%s-power' % method)
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='%s-itc' % method))
else:
power = out
out = EpochsTFR(info, power, times, freqs, method='%s-power' % method)
return out
@verbose
def tfr_morlet(inst, freqs, n_cycles, use_fft=False, return_itc=True, decim=1,
n_jobs=1, picks=None, zero_mean=True, average=True,
verbose=None):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool, defaults to False
The fft based convolution or not.
return_itc : bool, defaults to True
Return inter-trial coherence (ITC) as well as averaged power.
Must be ``False`` for evoked data.
decim : int | slice, defaults to 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
n_jobs : int, defaults to 1
The number of jobs to run in parallel.
picks : array-like of int | None, defaults to None
The indices of the channels to decompose. If None, all available
channels are decomposed.
zero_mean : bool, defaults to True
Make sure the wavelet has a mean of zero.
.. versionadded:: 0.13.0
average : bool, defaults to True
If True average across Epochs.
.. versionadded:: 0.13.0
verbose : bool, str, int, or None, defaults to None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
power : AverageTFR | EpochsTFR
The averaged or single-trial power.
itc : AverageTFR | EpochsTFR
The inter-trial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
mne.time_frequency.tfr_array_morlet
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
"""
tfr_params = dict(n_cycles=n_cycles, n_jobs=n_jobs, use_fft=use_fft,
zero_mean=zero_mean)
return _tfr_aux('morlet', inst, freqs, decim, return_itc, picks,
average, **tfr_params)
@verbose
def tfr_array_morlet(epoch_data, sfreq, frequencies, n_cycles=7.0,
zero_mean=False, use_fft=True, decim=1, output='complex',
n_jobs=1, verbose=None):
"""Compute time-frequency transform using Morlet wavelets.
Convolves epoch data with selected Morlet wavelets.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
sfreq : float | int
Sampling frequency of the data.
frequencies : array-like of floats, shape (n_freqs)
The frequencies.
n_cycles : float | array of float, defaults to 7.0
Number of cycles in the Morlet wavelet. Fixed number or one per
frequency.
zero_mean : bool | False
If True, make sure the wavelets have a mean of zero. Defaults to False.
use_fft : bool
Use the FFT for convolutions or not. Defaults to True.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition. Defaults to 1
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, defaults to 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
n_jobs : int
The number of epochs to process at the same time. The parallelization
is implemented across channels. Defaults to 1
verbose : bool, str, int, or None, defaults to None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs,
n_times), else it is (n_chans, n_freqs, n_times). If output is
'avg_power_itc', the real values code for 'avg_power' and the
imaginary values code for the 'itc': out = avg_power + i * itc
See Also
--------
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
Notes
-----
.. versionadded:: 0.14.0
"""
return _compute_tfr(epoch_data=epoch_data, frequencies=frequencies,
sfreq=sfreq, method='morlet', n_cycles=n_cycles,
zero_mean=zero_mean, time_bandwidth=None,
use_fft=use_fft, decim=decim, output=output,
n_jobs=n_jobs, verbose=verbose)
@verbose
def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0,
use_fft=True, return_itc=True, decim=1,
n_jobs=1, picks=None, average=True, verbose=None):
"""Compute Time-Frequency Representation (TFR) using DPSS tapers.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
The time-window length is thus T = n_cycles / freq.
time_bandwidth : float, (optional), defaults to 4.0 (3 good tapers).
Time x (Full) Bandwidth product. Should be >= 2.0.
Choose this along with n_cycles to get desired frequency resolution.
The number of good tapers (least leakage from far away frequencies)
is chosen automatically based on this to floor(time_bandwidth - 1).
E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
use_fft : bool, defaults to True
The fft based convolution or not.
return_itc : bool, defaults to True
Return inter-trial coherence (ITC) as well as averaged (or
single-trial) power.
decim : int | slice, defaults to 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
n_jobs : int, defaults to 1
The number of jobs to run in parallel.
picks : array-like of int | None, defaults to None
The indices of the channels to decompose. If None, all available
channels are decomposed.
average : bool, defaults to True
If True average across Epochs.
.. versionadded:: 0.13.0
verbose : bool, str, int, or None, defaults to None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
power : AverageTFR | EpochsTFR
The averaged or single-trial power.
itc : AverageTFR | EpochsTFR
The inter-trial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
Notes
-----
.. versionadded:: 0.9.0
"""
tfr_params = dict(n_cycles=n_cycles, n_jobs=n_jobs, use_fft=use_fft,
zero_mean=True, time_bandwidth=time_bandwidth)
return _tfr_aux('multitaper', inst, freqs, decim, return_itc, picks,
average, **tfr_params)
# TFR(s) class
class _BaseTFR(ContainsMixin, UpdateChannelsMixin, SizeMixin):
"""Base TFR class."""
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
@property
def ch_names(self):
"""Channel names."""
return self.info['ch_names']
def crop(self, tmin=None, tmax=None):
"""Crop data to a given time interval in place.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
Returns
-------
inst : instance of AverageTFR
The modified instance.
"""
mask = _time_mask(self.times, tmin, tmax, sfreq=self.info['sfreq'])
self.times = self.times[mask]
self.data = self.data[..., mask]
return self
def copy(self):
"""Return a copy of the instance."""
return deepcopy(self)
@verbose
def apply_baseline(self, baseline, mode='mean', verbose=None):
"""Baseline correct the data.
Parameters
----------
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : None | 'ratio' | 'zscore' | 'mean' | 'percent' | 'logratio' | 'zlogratio'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)),
mean simply subtracts the mean power, percent is the same as
applying ratio then mean, logratio is the same as mean but then
rendered in log-scale, zlogratio is the same as zscore but data
is rendered in log-scale first.
If None no baseline correction is applied.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`).
Returns
-------
inst : instance of AverageTFR
The modified instance.
""" # noqa: E501
self.data = rescale(self.data, self.times, baseline, mode,
copy=False)
return self
class AverageTFR(_BaseTFR):
"""Container for Time-Frequency data.
Can for example store induced power at sensor level or inter-trial
coherence.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
comment : str | None, defaults to None
Comment on the data, e.g., the experimental condition.
method : str | None, defaults to None
Comment on the method used to compute the data, e.g., morlet wavelet.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Attributes
----------
ch_names : list
The names of the channels.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, comment=None,
method=None, verbose=None): # noqa: D102
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = np.array(times, dtype=float)
self.freqs = np.array(freqs, dtype=float)
self.nave = nave
self.comment = comment
self.method = method
self.preload = True
@verbose
def plot(self, picks, baseline=None, mode='mean', tmin=None, tmax=None,
fmin=None, fmax=None, vmin=None, vmax=None, cmap='RdBu_r',
dB=False, colorbar=True, show=True, title=None, axes=None,
layout=None, yscale='auto', verbose=None):
"""Plot TFRs as a two-dimensional image(s).
Parameters
----------
picks : array-like of int
The indices of the channels to plot, one figure per channel.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'ratio' | 'zscore' | 'mean' | 'percent' | 'logratio' | 'zlogratio'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)),
mean simply subtracts the mean power, percent is the same as
applying ratio then mean, logratio is the same as mean but then
rendered in log-scale, zlogratio is the same as zscore but data
is rendered in log-scale first.
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
cmap : matplotlib colormap | 'interactive' | (colormap, bool)
The colormap to use. If tuple, the first value indicates the
colormap to use and the second value is a boolean defining
interactivity. In interactive mode the colors are adjustable by
clicking and dragging the colorbar with left and right mouse
button. Left mouse button moves the scale up and down and right
mouse button adjusts the range. Hitting space bar resets the range.
Up and down arrows can be used to change the colormap. If
'interactive', translates to ('RdBu_r', True). Defaults to
'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of images.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot. For user defined axes,
the colorbar cannot be drawn. Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channels. If instance of Axes,
there must be only one channel plotted.
layout : Layout | None
Layout instance specifying sensor positions. Used for interactive
plotting of topographies on rectangle selection. If possible, the
correct layout is inferred from the data.
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
.. versionadded:: 0.14.0
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`).
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
from ..viz.topo import _imshow_tfr
import matplotlib.pyplot as plt
times, freqs = self.times.copy(), self.freqs.copy()
info = self.info
data = self.data
n_picks = len(picks)
info, data, picks = _prepare_picks(info, data, picks)
data = data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB, info['sfreq'])
tmin, tmax = times[0], times[-1]
if isinstance(axes, plt.Axes):
axes = [axes]
if isinstance(axes, list) or isinstance(axes, np.ndarray):
if len(axes) != n_picks:
raise RuntimeError('There must be an axes for each picked '
'channel.')
cmap = _setup_cmap(cmap)
for idx in range(len(data)):
if axes is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes[idx]
fig = ax.get_figure()
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode, layout=layout)
_imshow_tfr(ax, 0, tmin, tmax, vmin, vmax, onselect_callback,
ylim=None, tfr=data[idx: idx + 1], freq=freqs,
x_label='Time (ms)', y_label='Frequency (Hz)',
colorbar=colorbar, cmap=cmap, yscale=yscale)
if title:
fig.suptitle(title)
plt_show(show)
return fig
def _onselect(self, eclick, erelease, baseline, mode, layout):
"""Handle rubber band selector in channel tfr."""
import matplotlib.pyplot as plt
from ..viz import plot_tfr_topomap
if abs(eclick.x - erelease.x) < .1 or abs(eclick.y - erelease.y) < .1:
return
plt.ion() # turn interactive mode on
tmin = round(min(eclick.xdata, erelease.xdata) / 1000., 5) # ms to s
tmax = round(max(eclick.xdata, erelease.xdata) / 1000., 5)
fmin = round(min(eclick.ydata, erelease.ydata), 5) # Hz
fmax = round(max(eclick.ydata, erelease.ydata), 5)
tmin = min(self.times, key=lambda x: abs(x - tmin)) # find closest
tmax = min(self.times, key=lambda x: abs(x - tmax))
fmin = min(self.freqs, key=lambda x: abs(x - fmin))
fmax = min(self.freqs, key=lambda x: abs(x - fmax))
if tmin == tmax or fmin == fmax:
logger.info('The selected area is too small. '
'Select a larger time-frequency window.')
return
types = list()
if 'eeg' in self:
types.append('eeg')
if 'mag' in self:
types.append('mag')
if 'grad' in self:
if len(_pair_grad_sensors(self.info, topomap_coords=False,
raise_error=False)) >= 2:
types.append('grad')
elif len(types) == 0:
return # Don't draw a figure for nothing.
fig = figure_nobar()
fig.suptitle('{0:.2f} s - {1:.2f} s, {2:.2f} Hz - {3:.2f} Hz'.format(
tmin, tmax, fmin, fmax), y=0.04)
for idx, ch_type in enumerate(types):
ax = plt.subplot(1, len(types), idx + 1)
plot_tfr_topomap(self, ch_type=ch_type, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, layout=layout,
baseline=baseline, mode=mode, cmap=None,
title=ch_type, vmin=None, vmax=None,
axes=ax)
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True,
border='none', fig_facecolor='k', fig_background=None,
font_color='w', yscale='auto'):
"""Plot TFRs in a topography with images.
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot. If None, all available
channels are displayed.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'ratio' | 'zscore' | 'mean' | 'percent' | 'logratio' | 'zlogratio'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)),
mean simply subtracts the mean power, percent is the same as
applying ratio then mean, logratio is the same as mean but then
rendered in log-scale, zlogratio is the same as zscore but data
is rendered in log-scale first.
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
fig_background : None | array
A background image for the figure. This must be a valid input to
`matplotlib.pyplot.imshow`. Defaults to None.
font_color: str | obj
The color of tick labels in the colorbar. Defaults to white.
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
from ..viz.topo import _imshow_tfr, _plot_topo, _imshow_tfr_unified
from ..viz import add_background_image
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
info, data, picks = _prepare_picks(info, data, picks)
data = data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB, info['sfreq'])
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode, layout=layout)
click_fun = partial(_imshow_tfr, tfr=data, freq=freqs, yscale=yscale,
cmap=(cmap, True), onselect=onselect_callback)
imshow = partial(_imshow_tfr_unified, tfr=data, freq=freqs, cmap=cmap,
onselect=onselect_callback)
fig = _plot_topo(info=info, times=times, show_func=imshow,
click_func=click_fun, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border=border,
x_label='Time (ms)', y_label='Frequency (Hz)',
fig_facecolor=fig_facecolor, font_color=font_color,
unified=True, img=True)
add_background_image(fig, fig_background)
plt_show(show)
return fig
def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean',
layout=None, vmin=None, vmax=None, cmap=None,
sensors=True, colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head', head_pos=None):
"""Plot topographic maps of time-frequency intervals of TFR data.
Parameters
----------
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : None | 'ratio' | 'zscore' | 'mean' | 'percent' | 'logratio' | 'zlogratio'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)),
mean simply subtracts the mean power, percent is the same as
applying ratio then mean, logratio is the same as mean but then
rendered in log-scale, zlogratio is the same as zscore but data
is rendered in log-scale first.
If None no baseline correction is applied.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
vmin : float | callable | None
The value specifying the lower bound of the color range. If None,
and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output
equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None,
the maximum value is used. If callable, the output equals
vmax(data). Defaults to None.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None (default), 'Reds' is used
for all positive data, otherwise defaults to 'RdBu_r'. If
'interactive', translates to (None, True).
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, ch_type=ch_type, baseline=baseline,
mode=mode, layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar,
unit=unit, res=res, size=size,
cbar_fmt=cbar_fmt, show_names=show_names,
title=title, axes=axes, show=show,
outlines=outlines, head_pos=head_pos)
def _check_compat(self, tfr):
"""Check that self and tfr have the same time-frequency ranges."""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr): # noqa: D105
"""Add instances."""
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr): # noqa: D105
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr): # noqa: D105
"""Subtract instances."""
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr): # noqa: D105
self._check_compat(tfr)
self.data -= tfr.data
return self
def __repr__(self): # noqa: D105
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[0]
s += ', ~%s' % (sizeof_fmt(self._size),)
return "<AverageTFR | %s>" % s
def save(self, fname, overwrite=False):
"""Save TFR object to hdf5 file.
Parameters
----------
fname : str
The file name, which should end with -tfr.h5 .
overwrite : bool
If True, overwrite file (if it exists). Defaults to false
"""
write_tfrs(fname, self, overwrite=overwrite)
class EpochsTFR(_BaseTFR):
"""Container for Time-Frequency data on epochs.
Can for example store induced power at sensor level.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_epochs, n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
comment : str | None, defaults to None
Comment on the data, e.g., the experimental condition.
method : str | None, defaults to None
Comment on the method used to compute the data, e.g., morlet wavelet.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Attributes
----------
ch_names : list
The names of the channels.
Notes
-----
.. versionadded:: 0.13.0
"""
@verbose
def __init__(self, info, data, times, freqs, comment=None,
method=None, verbose=None): # noqa: D102
self.info = info
if data.ndim != 4:
raise ValueError('data should be 4d. Got %d.' % data.ndim)
n_epochs, n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = np.array(times, dtype=float)
self.freqs = np.array(freqs, dtype=float)
self.comment = comment
self.method = method
self.preload = True
def __repr__(self): # noqa: D105
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", epochs : %d" % self.data.shape[0]
s += ', channels : %d' % self.data.shape[1]
s += ', ~%s' % (sizeof_fmt(self._size),)
return "<EpochsTFR | %s>" % s
def average(self):
"""Average the data across epochs.
Returns
-------
ave : instance of AverageTFR
The averaged data.
"""
data = np.mean(self.data, axis=0)
return AverageTFR(info=self.info.copy(), data=data,
times=self.times.copy(), freqs=self.freqs.copy(),
nave=self.data.shape[0],
method=self.method)
def combine_tfr(all_tfr, weights='nave'):
"""Merge AverageTFR data by weighted addition.
Create a new AverageTFR instance, using a combination of the supplied
instances as its data. By default, the mean (weighted by trials) is used.
Subtraction can be performed by passing negative weights (e.g., [1, -1]).
Data must have the same channels and the same time instants.
Parameters
----------
all_tfr : list of AverageTFR
The tfr datasets.
weights : list of float | str
The weights to apply to the data of each AverageTFR instance.
Can also be ``'nave'`` to weight according to tfr.nave,
or ``'equal'`` to use equal weighting (each weighted as ``1/N``).
Returns
-------
tfr : AverageTFR
The new TFR data.
Notes
-----
.. versionadded:: 0.11.0
"""
tfr = all_tfr[0].copy()
if isinstance(weights, string_types):
if weights not in ('nave', 'equal'):
raise ValueError('Weights must be a list of float, or "nave" or '
'"equal"')
if weights == 'nave':
weights = np.array([e.nave for e in all_tfr], float)
weights /= weights.sum()
else: # == 'equal'
weights = [1. / len(all_tfr)] * len(all_tfr)
weights = np.array(weights, float)
if weights.ndim != 1 or weights.size != len(all_tfr):
raise ValueError('Weights must be the same size as all_tfr')
ch_names = tfr.ch_names
for t_ in all_tfr[1:]:
assert t_.ch_names == ch_names, ValueError("%s and %s do not contain "
"the same channels"
% (tfr, t_))
assert np.max(np.abs(t_.times - tfr.times)) < 1e-7, \
ValueError("%s and %s do not contain the same time instants"
% (tfr, t_))
# use union of bad channels
bads = list(set(tfr.info['bads']).union(*(t_.info['bads']
for t_ in all_tfr[1:])))
tfr.info['bads'] = bads
# XXX : should be refactored with combined_evoked function
tfr.data = sum(w * t_.data for w, t_ in zip(weights, all_tfr))
tfr.nave = max(int(1. / sum(w ** 2 / e.nave
for w, e in zip(weights, all_tfr))), 1)
return tfr
# Utils
def _get_data(inst, return_itc):
"""Get data from Epochs or Evoked instance as epochs x ch x time."""
from ..epochs import BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (BaseEpochs, Evoked)):
raise TypeError('inst must be Epochs or Evoked')
if isinstance(inst, BaseEpochs):
data = inst.get_data()
else:
if return_itc:
raise ValueError('return_itc must be False for evoked data')
data = inst.data[np.newaxis].copy()
return data
def _prepare_picks(info, data, picks):
"""Prepare the picks."""
if picks is None:
picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
if np.array_equal(picks, np.arange(len(data))):
picks = slice(None)
else:
info = pick_info(info, picks)
return info, data, picks
def _centered(arr, newsize):
"""Aux Function to center data."""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB, sfreq):
"""Aux Function to prepare tfr computation."""
from ..viz.utils import _setup_vmin_vmax
copy = baseline is not None
data = rescale(data, times, baseline, mode, copy=copy)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(times, tmin, tmax, sfreq=sfreq))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(freqs, fmin, fmax, sfreq=sfreq))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
times *= 1e3
if dB:
data = 10 * np.log10((data * data.conj()).real)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
def _check_decim(decim):
"""Aux function checking the decim parameter."""
if isinstance(decim, int):
decim = slice(None, None, decim)
elif not isinstance(decim, slice):
raise(TypeError, '`decim` must be int or slice, got %s instead'
% type(decim))
return decim
# i/o
def write_tfrs(fname, tfr, overwrite=False):
"""Write a TFR dataset to hdf5.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5
tfr : AverageTFR instance, or list of AverageTFR instances
The TFR dataset, or list of TFR datasets, to save in one file.
Note. If .comment is not None, a name will be generated on the fly,
based on the order in which the TFR objects are passed
overwrite : bool
If True, overwrite file (if it exists). Defaults to False.
See Also
--------
read_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
out = []
if not isinstance(tfr, (list, tuple)):
tfr = [tfr]
for ii, tfr_ in enumerate(tfr):
comment = ii if tfr_.comment is None else tfr_.comment
out.append(_prepare_write_tfr(tfr_, condition=comment))
write_hdf5(fname, out, overwrite=overwrite, title='mnepython')
def _prepare_write_tfr(tfr, condition):
"""Aux function."""
return (condition, dict(times=tfr.times, freqs=tfr.freqs,
data=tfr.data, info=tfr.info,
nave=tfr.nave, comment=tfr.comment,
method=tfr.method))
def read_tfrs(fname, condition=None):
"""Read TFR datasets from hdf5 file.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5 .
condition : int or str | list of int or str | None
The condition to load. If None, all conditions will be returned.
Defaults to None.
See Also
--------
write_tfrs
Returns
-------
tfrs : list of instances of AverageTFR | instance of AverageTFR
Depending on `condition` either the TFR object or a list of multiple
TFR objects.
Notes
-----
.. versionadded:: 0.9.0
"""
check_fname(fname, 'tfr', ('-tfr.h5',))
logger.info('Reading %s ...' % fname)
tfr_data = read_hdf5(fname, title='mnepython')
for k, tfr in tfr_data:
tfr['info'] = Info(tfr['info'])
if condition is not None:
tfr_dict = dict(tfr_data)
if condition not in tfr_dict:
keys = ['%s' % k for k in tfr_dict]
raise ValueError('Cannot find condition ("{0}") in this file. '
'The file contains "{1}""'
.format(condition, " or ".join(keys)))
out = AverageTFR(**tfr_dict[condition])
else:
out = [AverageTFR(**d) for d in list(zip(*tfr_data))[1]]
return out
|
nicproulx/mne-python
|
mne/time_frequency/tfr.py
|
Python
|
bsd-3-clause
| 69,456
|
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <cmath>
#include <cstdlib>
#include <string>
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
#include "./vpx_config.h"
#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
#include "vpx/vpx_integer.h"
using libvpx_test::ACMRandom;
namespace {
// Horizontally and Vertically need 32x32: 8 Coeffs preceeding filtered section
// 16 Coefs within filtered section
// 8 Coeffs following filtered section
const int kNumCoeffs = 1024;
const int number_of_iterations = 10000;
#if CONFIG_VP9_HIGHBITDEPTH
typedef void (*loop_op_t)(uint16_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int count, int bd);
typedef void (*dual_loop_op_t)(uint16_t *s, int p, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0,
const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1, int bd);
#else
typedef void (*loop_op_t)(uint8_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int count);
typedef void (*dual_loop_op_t)(uint8_t *s, int p, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0,
const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1);
#endif // CONFIG_VP9_HIGHBITDEPTH
typedef std::tr1::tuple<loop_op_t, loop_op_t, int> loop8_param_t;
typedef std::tr1::tuple<dual_loop_op_t, dual_loop_op_t, int> dualloop8_param_t;
#if HAVE_SSE2
#if CONFIG_VP9_HIGHBITDEPTH
void wrapper_vertical_16_sse2(uint16_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int count, int bd) {
vp9_highbd_lpf_vertical_16_sse2(s, p, blimit, limit, thresh, bd);
}
void wrapper_vertical_16_c(uint16_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int count, int bd) {
vp9_highbd_lpf_vertical_16_c(s, p, blimit, limit, thresh, bd);
}
void wrapper_vertical_16_dual_sse2(uint16_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int count, int bd) {
vp9_highbd_lpf_vertical_16_dual_sse2(s, p, blimit, limit, thresh, bd);
}
void wrapper_vertical_16_dual_c(uint16_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int count, int bd) {
vp9_highbd_lpf_vertical_16_dual_c(s, p, blimit, limit, thresh, bd);
}
#else
void wrapper_vertical_16_sse2(uint8_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int count) {
vp9_lpf_vertical_16_sse2(s, p, blimit, limit, thresh);
}
void wrapper_vertical_16_c(uint8_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int count) {
vp9_lpf_vertical_16_c(s, p, blimit, limit, thresh);
}
void wrapper_vertical_16_dual_sse2(uint8_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int count) {
vp9_lpf_vertical_16_dual_sse2(s, p, blimit, limit, thresh);
}
void wrapper_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int count) {
vp9_lpf_vertical_16_dual_c(s, p, blimit, limit, thresh);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
#endif // HAVE_SSE2
class Loop8Test6Param : public ::testing::TestWithParam<loop8_param_t> {
public:
virtual ~Loop8Test6Param() {}
virtual void SetUp() {
loopfilter_op_ = GET_PARAM(0);
ref_loopfilter_op_ = GET_PARAM(1);
bit_depth_ = GET_PARAM(2);
mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
int bit_depth_;
int mask_;
loop_op_t loopfilter_op_;
loop_op_t ref_loopfilter_op_;
};
class Loop8Test9Param : public ::testing::TestWithParam<dualloop8_param_t> {
public:
virtual ~Loop8Test9Param() {}
virtual void SetUp() {
loopfilter_op_ = GET_PARAM(0);
ref_loopfilter_op_ = GET_PARAM(1);
bit_depth_ = GET_PARAM(2);
mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
int bit_depth_;
int mask_;
dual_loop_op_t loopfilter_op_;
dual_loop_op_t ref_loopfilter_op_;
};
TEST_P(Loop8Test6Param, OperationCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = number_of_iterations;
#if CONFIG_VP9_HIGHBITDEPTH
int32_t bd = bit_depth_;
DECLARE_ALIGNED_ARRAY(16, uint16_t, s, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, uint16_t, ref_s, kNumCoeffs);
#else
DECLARE_ALIGNED_ARRAY(8, uint8_t, s, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(8, uint8_t, ref_s, kNumCoeffs);
#endif // CONFIG_VP9_HIGHBITDEPTH
int err_count_total = 0;
int first_failure = -1;
for (int i = 0; i < count_test_block; ++i) {
int err_count = 0;
uint8_t tmp = rnd.Rand8();
DECLARE_ALIGNED(16, const uint8_t, blimit[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
tmp = rnd.Rand8();
DECLARE_ALIGNED(16, const uint8_t, limit[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
tmp = rnd.Rand8();
DECLARE_ALIGNED(16, const uint8_t, thresh[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
int32_t p = kNumCoeffs/32;
int count = 1;
uint16_t tmp_s[kNumCoeffs];
int j = 0;
while (j < kNumCoeffs) {
uint8_t val = rnd.Rand8();
if (val & 0x80) { // 50% chance to choose a new value.
tmp_s[j] = rnd.Rand16();
j++;
} else { // 50% chance to repeat previous value in row X times
int k = 0;
while (k++ < ((val & 0x1f) + 1) && j < kNumCoeffs) {
if (j < 1) {
tmp_s[j] = rnd.Rand16();
} else if (val & 0x20) { // Increment by an value within the limit
tmp_s[j] = (tmp_s[j - 1] + (*limit - 1));
} else { // Decrement by an value within the limit
tmp_s[j] = (tmp_s[j - 1] - (*limit - 1));
}
j++;
}
}
}
for (j = 0; j < kNumCoeffs; j++) {
if (i % 2) {
s[j] = tmp_s[j] & mask_;
} else {
s[j] = tmp_s[p * (j % p) + j / p] & mask_;
}
ref_s[j] = s[j];
}
#if CONFIG_VP9_HIGHBITDEPTH
ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh, count, bd);
ASM_REGISTER_STATE_CHECK(
loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count, bd));
#else
ref_loopfilter_op_(ref_s+8+p*8, p, blimit, limit, thresh, count);
ASM_REGISTER_STATE_CHECK(
loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count));
#endif // CONFIG_VP9_HIGHBITDEPTH
for (int j = 0; j < kNumCoeffs; ++j) {
err_count += ref_s[j] != s[j];
}
if (err_count && !err_count_total) {
first_failure = i;
}
err_count_total += err_count;
}
EXPECT_EQ(0, err_count_total)
<< "Error: Loop8Test6Param, C output doesn't match SSE2 "
"loopfilter output. "
<< "First failed at test case " << first_failure;
}
TEST_P(Loop8Test6Param, ValueCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = number_of_iterations;
#if CONFIG_VP9_HIGHBITDEPTH
const int32_t bd = bit_depth_;
DECLARE_ALIGNED_ARRAY(16, uint16_t, s, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, uint16_t, ref_s, kNumCoeffs);
#else
DECLARE_ALIGNED_ARRAY(8, uint8_t, s, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(8, uint8_t, ref_s, kNumCoeffs);
#endif // CONFIG_VP9_HIGHBITDEPTH
int err_count_total = 0;
int first_failure = -1;
for (int i = 0; i < count_test_block; ++i) {
int err_count = 0;
uint8_t tmp = rnd.Rand8();
DECLARE_ALIGNED(16, const uint8_t, blimit[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
tmp = rnd.Rand8();
DECLARE_ALIGNED(16, const uint8_t, limit[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
tmp = rnd.Rand8();
DECLARE_ALIGNED(16, const uint8_t, thresh[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
int32_t p = kNumCoeffs / 32;
int count = 1;
for (int j = 0; j < kNumCoeffs; ++j) {
s[j] = rnd.Rand16() & mask_;
ref_s[j] = s[j];
}
#if CONFIG_VP9_HIGHBITDEPTH
ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh, count, bd);
ASM_REGISTER_STATE_CHECK(
loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count, bd));
#else
ref_loopfilter_op_(ref_s+8+p*8, p, blimit, limit, thresh, count);
ASM_REGISTER_STATE_CHECK(
loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count));
#endif // CONFIG_VP9_HIGHBITDEPTH
for (int j = 0; j < kNumCoeffs; ++j) {
err_count += ref_s[j] != s[j];
}
if (err_count && !err_count_total) {
first_failure = i;
}
err_count_total += err_count;
}
EXPECT_EQ(0, err_count_total)
<< "Error: Loop8Test6Param, C output doesn't match SSE2 "
"loopfilter output. "
<< "First failed at test case " << first_failure;
}
TEST_P(Loop8Test9Param, OperationCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = number_of_iterations;
#if CONFIG_VP9_HIGHBITDEPTH
const int32_t bd = bit_depth_;
DECLARE_ALIGNED_ARRAY(16, uint16_t, s, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, uint16_t, ref_s, kNumCoeffs);
#else
DECLARE_ALIGNED_ARRAY(8, uint8_t, s, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(8, uint8_t, ref_s, kNumCoeffs);
#endif // CONFIG_VP9_HIGHBITDEPTH
int err_count_total = 0;
int first_failure = -1;
for (int i = 0; i < count_test_block; ++i) {
int err_count = 0;
uint8_t tmp = rnd.Rand8();
DECLARE_ALIGNED(16, const uint8_t, blimit0[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
tmp = rnd.Rand8();
DECLARE_ALIGNED(16, const uint8_t, limit0[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
tmp = rnd.Rand8();
DECLARE_ALIGNED(16, const uint8_t, thresh0[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
tmp = rnd.Rand8();
DECLARE_ALIGNED(16, const uint8_t, blimit1[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
tmp = rnd.Rand8();
DECLARE_ALIGNED(16, const uint8_t, limit1[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
tmp = rnd.Rand8();
DECLARE_ALIGNED(16, const uint8_t, thresh1[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
int32_t p = kNumCoeffs / 32;
uint16_t tmp_s[kNumCoeffs];
int j = 0;
const uint8_t limit = *limit0 < *limit1 ? *limit0 : *limit1;
while (j < kNumCoeffs) {
uint8_t val = rnd.Rand8();
if (val & 0x80) { // 50% chance to choose a new value.
tmp_s[j] = rnd.Rand16();
j++;
} else { // 50% chance to repeat previous value in row X times.
int k = 0;
while (k++ < ((val & 0x1f) + 1) && j < kNumCoeffs) {
if (j < 1) {
tmp_s[j] = rnd.Rand16();
} else if (val & 0x20) { // Increment by a value within the limit.
tmp_s[j] = (tmp_s[j - 1] + (limit - 1));
} else { // Decrement by an value within the limit.
tmp_s[j] = (tmp_s[j - 1] - (limit - 1));
}
j++;
}
}
}
for (j = 0; j < kNumCoeffs; j++) {
if (i % 2) {
s[j] = tmp_s[j] & mask_;
} else {
s[j] = tmp_s[p * (j % p) + j / p] & mask_;
}
ref_s[j] = s[j];
}
#if CONFIG_VP9_HIGHBITDEPTH
ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0,
blimit1, limit1, thresh1, bd);
ASM_REGISTER_STATE_CHECK(
loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, thresh0,
blimit1, limit1, thresh1, bd));
#else
ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0,
blimit1, limit1, thresh1);
ASM_REGISTER_STATE_CHECK(
loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, thresh0,
blimit1, limit1, thresh1));
#endif // CONFIG_VP9_HIGHBITDEPTH
for (int j = 0; j < kNumCoeffs; ++j) {
err_count += ref_s[j] != s[j];
}
if (err_count && !err_count_total) {
first_failure = i;
}
err_count_total += err_count;
}
EXPECT_EQ(0, err_count_total)
<< "Error: Loop8Test9Param, C output doesn't match SSE2 "
"loopfilter output. "
<< "First failed at test case " << first_failure;
}
TEST_P(Loop8Test9Param, ValueCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = number_of_iterations;
#if CONFIG_VP9_HIGHBITDEPTH
DECLARE_ALIGNED_ARRAY(16, uint16_t, s, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, uint16_t, ref_s, kNumCoeffs);
#else
DECLARE_ALIGNED_ARRAY(8, uint8_t, s, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(8, uint8_t, ref_s, kNumCoeffs);
#endif // CONFIG_VP9_HIGHBITDEPTH
int err_count_total = 0;
int first_failure = -1;
for (int i = 0; i < count_test_block; ++i) {
int err_count = 0;
uint8_t tmp = rnd.Rand8();
DECLARE_ALIGNED(16, const uint8_t, blimit0[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
tmp = rnd.Rand8();
DECLARE_ALIGNED(16, const uint8_t, limit0[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
tmp = rnd.Rand8();
DECLARE_ALIGNED(16, const uint8_t, thresh0[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
tmp = rnd.Rand8();
DECLARE_ALIGNED(16, const uint8_t, blimit1[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
tmp = rnd.Rand8();
DECLARE_ALIGNED(16, const uint8_t, limit1[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
tmp = rnd.Rand8();
DECLARE_ALIGNED(16, const uint8_t, thresh1[16]) = {
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
};
int32_t p = kNumCoeffs / 32; // TODO(pdlf) can we have non-square here?
for (int j = 0; j < kNumCoeffs; ++j) {
s[j] = rnd.Rand16() & mask_;
ref_s[j] = s[j];
}
#if CONFIG_VP9_HIGHBITDEPTH
const int32_t bd = bit_depth_;
ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0,
blimit1, limit1, thresh1, bd);
ASM_REGISTER_STATE_CHECK(
loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
thresh0, blimit1, limit1, thresh1, bd));
#else
ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0,
blimit1, limit1, thresh1);
ASM_REGISTER_STATE_CHECK(
loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, thresh0,
blimit1, limit1, thresh1));
#endif // CONFIG_VP9_HIGHBITDEPTH
for (int j = 0; j < kNumCoeffs; ++j) {
err_count += ref_s[j] != s[j];
}
if (err_count && !err_count_total) {
first_failure = i;
}
err_count_total += err_count;
}
EXPECT_EQ(0, err_count_total)
<< "Error: Loop8Test9Param, C output doesn't match SSE2"
"loopfilter output. "
<< "First failed at test case " << first_failure;
}
using std::tr1::make_tuple;
#if HAVE_SSE2
#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
SSE2_C_COMPARE_SINGLE, Loop8Test6Param,
::testing::Values(
make_tuple(&vp9_highbd_lpf_horizontal_4_sse2,
&vp9_highbd_lpf_horizontal_4_c, 8),
make_tuple(&vp9_highbd_lpf_vertical_4_sse2,
&vp9_highbd_lpf_vertical_4_c, 8),
make_tuple(&vp9_highbd_lpf_horizontal_8_sse2,
&vp9_highbd_lpf_horizontal_8_c, 8),
make_tuple(&vp9_highbd_lpf_horizontal_16_sse2,
&vp9_highbd_lpf_horizontal_16_c, 8),
make_tuple(&vp9_highbd_lpf_vertical_8_sse2,
&vp9_highbd_lpf_vertical_8_c, 8),
make_tuple(&wrapper_vertical_16_sse2,
&wrapper_vertical_16_c, 8),
make_tuple(&vp9_highbd_lpf_horizontal_4_sse2,
&vp9_highbd_lpf_horizontal_4_c, 10),
make_tuple(&vp9_highbd_lpf_vertical_4_sse2,
&vp9_highbd_lpf_vertical_4_c, 10),
make_tuple(&vp9_highbd_lpf_horizontal_8_sse2,
&vp9_highbd_lpf_horizontal_8_c, 10),
make_tuple(&vp9_highbd_lpf_horizontal_16_sse2,
&vp9_highbd_lpf_horizontal_16_c, 10),
make_tuple(&vp9_highbd_lpf_vertical_8_sse2,
&vp9_highbd_lpf_vertical_8_c, 10),
make_tuple(&wrapper_vertical_16_sse2,
&wrapper_vertical_16_c, 10),
make_tuple(&vp9_highbd_lpf_horizontal_4_sse2,
&vp9_highbd_lpf_horizontal_4_c, 12),
make_tuple(&vp9_highbd_lpf_vertical_4_sse2,
&vp9_highbd_lpf_vertical_4_c, 12),
make_tuple(&vp9_highbd_lpf_horizontal_8_sse2,
&vp9_highbd_lpf_horizontal_8_c, 12),
make_tuple(&vp9_highbd_lpf_horizontal_16_sse2,
&vp9_highbd_lpf_horizontal_16_c, 12),
make_tuple(&vp9_highbd_lpf_vertical_8_sse2,
&vp9_highbd_lpf_vertical_8_c, 12),
make_tuple(&wrapper_vertical_16_sse2,
&wrapper_vertical_16_c, 12)));
#else
INSTANTIATE_TEST_CASE_P(
SSE2_C_COMPARE_SINGLE, Loop8Test6Param,
::testing::Values(
make_tuple(&vp9_lpf_horizontal_8_sse2, &vp9_lpf_horizontal_8_c, 8),
make_tuple(&vp9_lpf_horizontal_16_sse2, &vp9_lpf_horizontal_16_c, 8),
make_tuple(&vp9_lpf_vertical_8_sse2, &vp9_lpf_vertical_8_c, 8)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#endif
#if HAVE_SSE2
#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
SSE2_C_COMPARE_DUAL, Loop8Test6Param,
::testing::Values(
make_tuple(&wrapper_vertical_16_dual_sse2,
&wrapper_vertical_16_dual_c, 8),
make_tuple(&wrapper_vertical_16_dual_sse2,
&wrapper_vertical_16_dual_c, 10),
make_tuple(&wrapper_vertical_16_dual_sse2,
&wrapper_vertical_16_dual_c, 12)));
#else
INSTANTIATE_TEST_CASE_P(
SSE2_C_COMPARE_DUAL, Loop8Test6Param,
::testing::Values(
make_tuple(&wrapper_vertical_16_sse2, &wrapper_vertical_16_c, 8)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#endif // HAVE_SSE2
#if HAVE_SSE2
#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
SSE_C_COMPARE_DUAL, Loop8Test9Param,
::testing::Values(
make_tuple(&vp9_highbd_lpf_horizontal_4_dual_sse2,
&vp9_highbd_lpf_horizontal_4_dual_c, 8),
make_tuple(&vp9_highbd_lpf_horizontal_8_dual_sse2,
&vp9_highbd_lpf_horizontal_8_dual_c, 8),
make_tuple(&vp9_highbd_lpf_vertical_4_dual_sse2,
&vp9_highbd_lpf_vertical_4_dual_c, 8),
make_tuple(&vp9_highbd_lpf_vertical_8_dual_sse2,
&vp9_highbd_lpf_vertical_8_dual_c, 8),
make_tuple(&vp9_highbd_lpf_horizontal_4_dual_sse2,
&vp9_highbd_lpf_horizontal_4_dual_c, 10),
make_tuple(&vp9_highbd_lpf_horizontal_8_dual_sse2,
&vp9_highbd_lpf_horizontal_8_dual_c, 10),
make_tuple(&vp9_highbd_lpf_vertical_4_dual_sse2,
&vp9_highbd_lpf_vertical_4_dual_c, 10),
make_tuple(&vp9_highbd_lpf_vertical_8_dual_sse2,
&vp9_highbd_lpf_vertical_8_dual_c, 10),
make_tuple(&vp9_highbd_lpf_horizontal_4_dual_sse2,
&vp9_highbd_lpf_horizontal_4_dual_c, 12),
make_tuple(&vp9_highbd_lpf_horizontal_8_dual_sse2,
&vp9_highbd_lpf_horizontal_8_dual_c, 12),
make_tuple(&vp9_highbd_lpf_vertical_4_dual_sse2,
&vp9_highbd_lpf_vertical_4_dual_c, 12),
make_tuple(&vp9_highbd_lpf_vertical_8_dual_sse2,
&vp9_highbd_lpf_vertical_8_dual_c, 12)));
#else
INSTANTIATE_TEST_CASE_P(
SSE_C_COMPARE_DUAL, Loop8Test9Param,
::testing::Values(
make_tuple(&vp9_lpf_horizontal_4_dual_sse2,
&vp9_lpf_horizontal_4_dual_c, 8),
make_tuple(&vp9_lpf_horizontal_8_dual_sse2,
&vp9_lpf_horizontal_8_dual_c, 8),
make_tuple(&vp9_lpf_vertical_4_dual_sse2,
&vp9_lpf_vertical_4_dual_c, 8),
make_tuple(&vp9_lpf_vertical_8_dual_sse2,
&vp9_lpf_vertical_8_dual_c, 8)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#endif
} // namespace
|
n4t/libvpx
|
test/lpf_8_test.cc
|
C++
|
bsd-3-clause
| 22,233
|
/********************************************************************************/
/* */
/* TPM Key Handling Routines */
/* Written by J. Kravitz */
/* IBM Thomas J. Watson Research Center */
/* $Id: keys.c 4702 2013-01-03 21:26:29Z kgoldman $ */
/* */
/* (c) Copyright IBM Corporation 2006, 2010. */
/* */
/* All rights reserved. */
/* */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions are */
/* met: */
/* */
/* Redistributions of source code must retain the above copyright notice, */
/* this list of conditions and the following disclaimer. */
/* */
/* Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in the */
/* documentation and/or other materials provided with the distribution. */
/* */
/* Neither the names of the IBM Corporation nor the names of its */
/* contributors may be used to endorse or promote products derived from */
/* this software without specific prior written permission. */
/* */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR */
/* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT */
/* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */
/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */
/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */
/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY */
/* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE */
/* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/********************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef TPM_POSIX
#include <netinet/in.h>
#endif
#ifdef TPM_WINDOWS
#include <winsock2.h>
#endif
#include <tpm.h>
#include <oiaposap.h>
#include <tpmfunc.h>
#include <tpmutil.h>
#include <tpmkeys.h>
#include <tpm_constants.h>
#include "tpm_error.h"
#include <hmac.h>
#include <newserialize.h>
#include <openssl/rsa.h>
#include <openssl/sha.h>
#include <openssl/bn.h>
/****************************************************************************/
/* */
/* Creates an endorsement key pair */
/* */
/* uses the following standard parameters in its request: */
/* */
/* algorithm: RSA */
/* encScheme: enc_SCHEME */
/* sigScheme: TPM_SS_SASSAPKCS1v15_SHA1 */
/* numprimes: 2 */
/* keybitlen: 2048 */
/* */
/* The arguments are ... */
/* */
/* pubkeybuff A pointer to an area that will hold the public key */
/* pubkeybuflen is the size of the pubkeybuff as given by the caller and */
/* on returns the number of bytes copied into that buffer */
/****************************************************************************/
uint32_t TPM_CreateEndorsementKeyPair(unsigned char * pubkeybuff,
uint32_t * pubkeybuflen) {
unsigned char nonce[TPM_HASH_SIZE];
STACK_TPM_BUFFER(tpmdata)
keydata k;
uint32_t ret;
uint32_t ordinal_no = htonl(TPM_ORD_CreateEndorsementKeyPair);
int serkeylen;
STACK_TPM_BUFFER(serkey)
uint32_t size;
memset(&k, 0x0, sizeof(k));
k.pub.algorithmParms.algorithmID = TPM_ALG_RSA;
/* Should be ignored, but a certain HW TPM requires the correct encScheme */
k.pub.algorithmParms.encScheme = TPM_ES_RSAESOAEP_SHA1_MGF1;
k.pub.algorithmParms.sigScheme = TPM_SS_RSASSAPKCS1v15_SHA1;
k.pub.algorithmParms.u.rsaKeyParms.keyLength = 2048;
k.pub.algorithmParms.u.rsaKeyParms.numPrimes = 2;
k.pub.algorithmParms.u.rsaKeyParms.exponentSize = 0;
TSS_gennonce(nonce);
serkeylen = TPM_WriteKeyInfo(&serkey, &k);
if (serkeylen < 0) {
return serkeylen;
}
ret = TSS_buildbuff("00 c1 T l % %",&tpmdata,
ordinal_no,
TPM_HASH_SIZE, nonce,
serkeylen, serkey.buffer);
if ((ret & ERR_MASK)) {
return ret;
}
ret = TPM_Transmit(&tpmdata,"CreateEndorsementKeyPair");
if (0 != ret) {
return ret;
}
size = TSS_PubKeySize(&tpmdata, TPM_DATA_OFFSET ,0);
if ((size & ERR_MASK))
return size;
*pubkeybuflen = MIN(*pubkeybuflen, size);
if (NULL != pubkeybuff) {
memcpy(pubkeybuff,
&tpmdata.buffer[TPM_DATA_OFFSET],
*pubkeybuflen);
}
/*
* Verify the checksum...
*/
{
SHA_CTX sha;
unsigned char digest[TPM_DIGEST_SIZE];
SHA1_Init(&sha);
SHA1_Update(&sha,
&tpmdata.buffer[TPM_DATA_OFFSET],
size);
SHA1_Update(&sha,
nonce,
TPM_HASH_SIZE);
SHA1_Final(digest,&sha);
if (0 != memcmp(digest,
&tpmdata.buffer[TPM_DATA_OFFSET+size],
TPM_DIGEST_SIZE)) {
ret = ERR_CHECKSUM;
}
}
return ret;
}
/****************************************************************************/
/* */
/* Creates an revocable endorsement key pair */
/* */
/* uses the following standard parameters in its request: */
/* */
/* algorithm: RSA */
/* encScheme: enc_SCHEME */
/* sigScheme: TPM_SS_SASSAPKCS1v15_SHA1 */
/* numPrimes: 2 */
/* keybitlen: 2048 */
/* */
/* The arguments are ... */
/* */
/* genreset a boolean that determines whether to generate ekreset */
/* inputekreset A pointer to a hash that is used as ekreset if genreset is */
/* FALSE */
/* pubkeybuff A pointer to an area that will hold the public key */
/* pubkeybuflen is the size of the pubkeybuff as given by the caller and */
/* on returns the number of bytes copied into that buffer */
/****************************************************************************/
uint32_t TPM_CreateRevocableEK(TPM_BOOL genreset,
unsigned char * inputekreset,
pubkeydata * pubkey) {
unsigned char nonce[TPM_HASH_SIZE];
STACK_TPM_BUFFER( tpmdata)
keydata k;
uint32_t ret;
uint32_t ordinal_no = htonl(TPM_ORD_CreateRevocableEK);
int serkeylen;
STACK_TPM_BUFFER(serkey)
uint32_t size;
memset(&k, 0x0, sizeof(k));
k.pub.algorithmParms.algorithmID = TPM_ALG_RSA;
k.pub.algorithmParms.encScheme = TPM_ES_RSAESOAEP_SHA1_MGF1;
k.pub.algorithmParms.sigScheme = TPM_SS_RSASSAPKCS1v15_SHA1;
k.pub.algorithmParms.u.rsaKeyParms.keyLength = 2048;
k.pub.algorithmParms.u.rsaKeyParms.numPrimes = 2;
k.pub.algorithmParms.u.rsaKeyParms.exponentSize = 0;
TSS_gennonce(nonce);
serkeylen = TPM_WriteKeyInfo(&serkey, &k);
if ( (serkeylen & ERR_MASK) != 0 ) {
return serkeylen;
}
if (FALSE == genreset) {
ret = TSS_buildbuff("00 c1 T l % % o %",&tpmdata,
ordinal_no,
TPM_HASH_SIZE, nonce,
serkeylen, serkey.buffer,
genreset,
TPM_HASH_SIZE, inputekreset);
} else {
unsigned char empty[TPM_HASH_SIZE];
memset(empty, 0x0, TPM_HASH_SIZE);
ret = TSS_buildbuff("00 c1 T l % % o %",&tpmdata,
ordinal_no,
TPM_HASH_SIZE, nonce,
serkeylen, serkey.buffer,
genreset,
TPM_HASH_SIZE, empty);
}
if ((ret & ERR_MASK)) {
return ret;
}
ret = TPM_Transmit(&tpmdata,"CreateRevocableEK");
if (0 != ret) {
return ret;
}
size = TSS_PubKeyExtract(&tpmdata, TPM_DATA_OFFSET, pubkey);
/*
* Verify the checksum...
*/
{
SHA_CTX sha;
unsigned char digest[TPM_DIGEST_SIZE];
SHA1_Init(&sha);
SHA1_Update(&sha,
&tpmdata.buffer[TPM_DATA_OFFSET],
size);
SHA1_Update(&sha,
nonce,
TPM_HASH_SIZE);
SHA1_Final(digest,&sha);
if (0 != memcmp(digest,
&tpmdata.buffer[TPM_DATA_OFFSET+size],
TPM_DIGEST_SIZE)) {
ret = -1;
}
}
return ret;
}
/****************************************************************************/
/* */
/* Clear the EK and reset the TPM to default state */
/* */
/* The arguments are ... */
/* */
/* inputekreset A pointer to a hash that is used as ekreset */
/* It must match the parameter passed to CreateRevocableEK */
/****************************************************************************/
uint32_t TPM_RevokeTrust(unsigned char *ekreset)
{
STACK_TPM_BUFFER( tpmdata)
uint32_t ordinal_no;
uint32_t ret;
/* check input arguments */
if (NULL == ekreset) return ERR_NULL_ARG;
/* move Network byte order data to variable for hmac calculation */
ordinal_no = htonl(TPM_ORD_RevokeTrust);
/* build the request buffer */
ret = TSS_buildbuff("00 c1 T l %", &tpmdata,
ordinal_no,
TPM_NONCE_SIZE,ekreset);
/* transmit the request buffer to the TPM device and read the reply */
ret = TPM_Transmit(&tpmdata,"RevokeTrust");
return ret;
}
/****************************************************************************/
/* */
/* Read the TPM Endorsement public key */
/* */
/****************************************************************************/
uint32_t TPM_ReadPubek(pubkeydata *k)
{
STACK_TPM_BUFFER(tpmdata)
uint32_t ret;
uint32_t len;
unsigned char antiReplay[TPM_NONCE_SIZE];
/* check input argument */
if (k == NULL)
return ERR_NULL_ARG;
ret = TSS_gennonce(antiReplay);
if (ret == 0)
return ERR_CRYPT_ERR;
/* copy Read PubKey request template to buffer */
ret = TSS_buildbuff("00 c1 T 00 00 00 7c %",&tpmdata,
TPM_HASH_SIZE, antiReplay);
if ((ret & ERR_MASK) != 0) return ret;
ret = TPM_Transmit(&tpmdata,"ReadPubek");
if (ret)
return ret;
len = TSS_PubKeyExtract(&tpmdata, TPM_DATA_OFFSET ,k);
/*
* Verify the checksum...
*/
{
SHA_CTX sha;
unsigned char digest[TPM_DIGEST_SIZE];
SHA1_Init(&sha);
SHA1_Update(&sha,
&tpmdata.buffer[TPM_DATA_OFFSET],
len);
SHA1_Update(&sha,
antiReplay,
TPM_HASH_SIZE);
SHA1_Final(digest,&sha);
if (0 != memcmp(digest,
&tpmdata.buffer[TPM_DATA_OFFSET+len],
TPM_DIGEST_SIZE)) {
ret = -1;
}
}
return 0;
}
/****************************************************************************/
/* */
/* Owner Read the TPM Endorsement Key */
/* */
/****************************************************************************/
uint32_t TPM_OwnerReadPubek(unsigned char *ownauth,pubkeydata *k)
{
uint32_t ret;
STACK_TPM_BUFFER(tpmdata)
unsigned char nonceodd[TPM_NONCE_SIZE];
unsigned char authdata[TPM_NONCE_SIZE];
unsigned char c = 0;
uint32_t ordinal = htonl(0x7D);
uint32_t len;
int size;
session sess;
/* generate odd nonce */
TSS_gennonce(nonceodd);
/* Open OIAP Session */
ret = TSS_SessionOpen(SESSION_DSAP|SESSION_OSAP|SESSION_OIAP,
&sess,
ownauth, TPM_ET_OWNER, 0);
if (ret != 0) return ret;
/* calculate authorization HMAC value */
ret = TSS_authhmac(authdata,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,TSS_Session_GetENonce(&sess),nonceodd,c,
TPM_U32_SIZE,&ordinal,
0,0);
if ((ret & ERR_MASK))
{
TSS_SessionClose(&sess);
return ret;
}
/* build the request buffer */
ret = TSS_buildbuff("00 c2 T l L % o %",&tpmdata,
ordinal,
TSS_Session_GetHandle(&sess),
TPM_NONCE_SIZE,nonceodd,
c,
TPM_HASH_SIZE,authdata);
if ((ret & ERR_MASK) != 0)
{
TSS_SessionClose(&sess);
return ret;
}
/* transmit the request buffer to the TPM device and read the reply */
ret = TPM_Transmit(&tpmdata,"OwnerReadEkey");
TSS_SessionClose(&sess);
if (ret != 0)
{
return ret;
}
size = TSS_PubKeySize(&tpmdata, TPM_DATA_OFFSET, 0);
ret = TSS_checkhmac1(&tpmdata,ordinal,nonceodd,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,
size,TPM_DATA_OFFSET,
0,0);
if (ret != 0) return ret;
len = TSS_PubKeyExtract(&tpmdata, TPM_DATA_OFFSET, k);
if ((len & ERR_MASK))
return len;
return 0;
}
/****************************************************************************/
/* */
/* Disable Reading of the Public Endorsement Key */
/* */
/****************************************************************************/
uint32_t TPM_DisablePubekRead(unsigned char *ownauth)
{
uint32_t ret;
STACK_TPM_BUFFER(tpmdata)
unsigned char nonceodd[TPM_NONCE_SIZE];
unsigned char authdata[TPM_NONCE_SIZE];
unsigned char c = 0;
uint32_t ordinal = htonl(TPM_ORD_DisablePubekRead);
session sess;
/* generate odd nonce */
TSS_gennonce(nonceodd);
/* Open OIAP Session */
ret = TSS_SessionOpen(SESSION_DSAP | SESSION_OSAP|SESSION_OIAP,
&sess,
ownauth, TPM_ET_OWNER, 0);
if (ret != 0) return ret;
/* move Network byte order data to variables for hmac calculation */
/* calculate authorization HMAC value */
ret = TSS_authhmac(authdata,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,TSS_Session_GetENonce(&sess),nonceodd,c,
TPM_U32_SIZE,&ordinal,
0,0);
if ((ret & ERR_MASK))
{
TSS_SessionClose(&sess);
return ret;
}
/* build the request buffer */
ret = TSS_buildbuff("00 c2 T l L % o %",&tpmdata,
ordinal,
TSS_Session_GetHandle(&sess),
TPM_NONCE_SIZE,nonceodd,
c,
TPM_HASH_SIZE,authdata);
if ((ret & ERR_MASK) != 0)
{
TSS_SessionClose(&sess);
return ret;
}
/* transmit the request buffer to the TPM device and read the reply */
ret = TPM_Transmit(&tpmdata,"DisablePubekRead");
TSS_SessionClose(&sess);
if (ret != 0)
{
return ret;
}
ret = TSS_checkhmac1(&tpmdata,ordinal,nonceodd,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,
0,0);
if (ret != 0) return ret;
return 0;
}
/****************************************************************************/
/* */
/* Return the public portion of the EK or SRK */
/* */
/* The arguments are... */
/* */
/* keyhandle is the handle of the parent key of the new key */
/* which may only be PUBEK or 0x40000000 for the SRK */
/* ownauth The sha'ed owner password of the TPM */
/* pubkeybuf is a pointer to an area that will hold the public portion of */
/* the requested key */
/* pubkeybuflen gives the size of the buffer pubkeybuf on input and will */
/* return the size of the public key part on output. */
/* */
/****************************************************************************/
uint32_t TPM_OwnerReadInternalPub(uint32_t keyhandle,
unsigned char * ownerauth,
pubkeydata *k)
{
STACK_TPM_BUFFER(tpmdata)
unsigned char nonceodd[TPM_NONCE_SIZE];
unsigned char authdata[TPM_NONCE_SIZE];
unsigned char c = 0;
uint32_t ordinal_no = htonl(TPM_ORD_OwnerReadInternalPub);
uint32_t keyhandle_no = htonl(keyhandle);
uint32_t ret;
uint32_t keylen;
session sess;
/* generate odd nonce */
ret = TSS_gennonce(nonceodd);
if (0 == ret) return ERR_CRYPT_ERR;
/* Open OIAP Session */
ret = TSS_SessionOpen(SESSION_DSAP|SESSION_OSAP|SESSION_OIAP,
&sess,
ownerauth, TPM_ET_OWNER, 0);
if (ret != 0) return ret;
ret = TSS_authhmac(authdata,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,TSS_Session_GetENonce(&sess),nonceodd,c,
TPM_U32_SIZE, &ordinal_no,
TPM_U32_SIZE, &keyhandle_no,
0,0);
if (0 != ret) {
TSS_SessionClose(&sess);
return ret;
}
/* build the request buffer */
ret = TSS_buildbuff("00 c2 T l l L % o %", &tpmdata,
ordinal_no,
keyhandle_no,
TSS_Session_GetHandle(&sess),
TPM_NONCE_SIZE,nonceodd,
c,
TPM_HASH_SIZE,authdata);
if ((ret & ERR_MASK) != 0) {
TSS_SessionClose(&sess);
return ret;
}
ret = TPM_Transmit(&tpmdata,"OwnerReadInternalPub");
TSS_SessionClose(&sess);
if (0 != ret) {
return ret;
}
keylen = TSS_PubKeySize(&tpmdata, TPM_DATA_OFFSET, 0);
if ((keylen & ERR_MASK)) {
return keylen;
}
ret = TSS_checkhmac1(&tpmdata,ordinal_no,nonceodd,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,
keylen, TPM_DATA_OFFSET,
0,0);
if (0 != ret) {
return ret;
}
keylen = TSS_PubKeyExtract(&tpmdata, TPM_DATA_OFFSET, k);
if ((keylen & ERR_MASK)) {
ret = keylen;
}
return ret;
}
/****************************************************************************/
/* */
/* Create and Wrap a Key */
/* */
/* The arguments are... */
/* */
/* keyhandle is the handle of the parent key of the new key */
/* 0x40000000 for the SRK */
/* parauth is the authorization data (password) for the parent key */
/* if NULL, the default auth data of all zeros is assumed */
/* newauth is the authorization data (password) for the new key */
/* migauth is the authorization data (password) for migration of the new */
/* key, or NULL if the new key is not migratable */
/* all authorization values must be 20 bytes long */
/* keyparms is a pointer to a keydata structure with parms set for the new */
/* key */
/* key is a pointer to a keydata structure returned filled in */
/* with the public key data for the new key, or NULL if no */
/* keydata is to be returned */
/* keyblob is a pointer to an area which will receive a copy of the */
/* encrypted key blob. If NULL no copy is returned */
/* bloblen is a pointer to an integer which will receive the length of */
/* the key blob, or NULL if no length is to be returned */
/* */
/****************************************************************************/
uint32_t TPM_CreateWrapKey(uint32_t keyhandle,
unsigned char *parauth,
unsigned char *newauth,
unsigned char *migauth,
keydata *keyparms,
keydata *key,
unsigned char *keyblob,
unsigned int *bloblen)
{
uint32_t ret;
STACK_TPM_BUFFER( tpmdata)
STACK_TPM_BUFFER(kparmbuf)
session sess;
unsigned char encauth1[TPM_HASH_SIZE];
unsigned char encauth2[TPM_HASH_SIZE];
unsigned char nonceodd[TPM_NONCE_SIZE];
unsigned char pubauth[TPM_HASH_SIZE];
unsigned char dummyauth[TPM_HASH_SIZE];
unsigned char *cparauth;
unsigned char *cnewauth;
unsigned char c = 0;
uint32_t ordinal = htonl(TPM_ORD_CreateWrapKey);
uint32_t keyhndl = htonl(keyhandle);
uint16_t keytype;
int kparmbufsize;
STACK_TPM_BUFFER(response);
memset(dummyauth,0,sizeof dummyauth);
/* check input arguments */
if (keyparms == NULL) return ERR_NULL_ARG;
if (parauth == NULL) cparauth = dummyauth;
else cparauth = parauth;
if (newauth == NULL) cnewauth = dummyauth;
else cnewauth = newauth;
if (keyhandle == 0x40000000) keytype = 0x0004;
else keytype = 0x0001;
ret = needKeysRoom(keyhandle, 0, 0, 0);
if (ret != 0) {
return ret;
}
if (keyparms->v.tag != TPM_TAG_KEY12) {
/* get the TPM version and put into the keyparms structure */
ret = TPM_GetCapability(TPM_CAP_VERSION,
NULL,
&response);
if (ret != 0)
return ret;
memcpy(&(keyparms->v.ver), response.buffer, response.used);
}
/* generate odd nonce */
TSS_gennonce(nonceodd);
/* Open OSAP Session */
ret = TSS_SessionOpen(SESSION_OSAP|SESSION_DSAP,&sess,cparauth,keytype,keyhandle);
if (ret != 0)
return ret;
TPM_CreateEncAuth(&sess, cnewauth, encauth1, NULL);
/* calculate encrypted authorization value for migration of new key */
if (migauth != NULL) {
TPM_CreateEncAuth(&sess, migauth, encauth2, nonceodd);
} else {
memset(encauth2,0,TPM_HASH_SIZE);
}
/* move Network byte order data to variables for hmac calculation */
/* convert keyparm structure to buffer */
ret = TPM_WriteKey(&kparmbuf,keyparms);
if ((ret & ERR_MASK) != 0) {
TSS_SessionClose(&sess);
return ret;
}
kparmbufsize = ret;
/* calculate authorization HMAC value */
ret = TSS_authhmac(pubauth,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,TSS_Session_GetENonce(&sess),nonceodd,c,
TPM_U32_SIZE,&ordinal,
TPM_HASH_SIZE,encauth1,
TPM_HASH_SIZE,encauth2,
kparmbufsize,kparmbuf.buffer,
0,0);
if (ret != 0) {
TSS_SessionClose(&sess);
return ret;
}
/* build the request buffer */
ret = TSS_buildbuff("00 c2 T l l % % % L % o %",&tpmdata,
ordinal,
keyhndl,
TPM_HASH_SIZE,encauth1,
TPM_HASH_SIZE,encauth2,
kparmbufsize,kparmbuf.buffer,
TSS_Session_GetHandle(&sess),
TPM_NONCE_SIZE,nonceodd,
c,
TPM_HASH_SIZE,pubauth);
if ((ret & ERR_MASK) != 0) {
TSS_SessionClose(&sess);
return ret;
}
/* transmit the request buffer to the TPM device and read the reply */
ret = TPM_Transmit(&tpmdata,"CreateWrapKey - AUTH1");
TSS_SessionClose(&sess);
if (ret != 0) {
return ret;
}
kparmbufsize = TSS_KeySize(&tpmdata, TPM_DATA_OFFSET);
ret = TSS_checkhmac1(&tpmdata,ordinal,nonceodd,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,
kparmbufsize,TPM_DATA_OFFSET,
0,0);
if (ret != 0)
return ret;
/* convert the returned key to a structure */
if (key != NULL)
TSS_KeyExtract(&tpmdata, TPM_DATA_OFFSET ,key);
/* copy the key blob to caller */
if (keyblob != NULL) {
memcpy(keyblob,&tpmdata.buffer[TPM_DATA_OFFSET],kparmbufsize);
if (bloblen != NULL) *bloblen = kparmbufsize;
}
return 0;
}
/****************************************************************************/
/* */
/* Load a new Key into the TPM */
/* */
/* The arguments are... */
/* */
/* keyhandle is the handle of parent key for the new key */
/* 0x40000000 for the SRK */
/* keyauth is the authorization data (password) for the parent key */
/* if null, it is assumed that the parent requires no auth */
/* keyparms is a pointer to a keydata structure with all data for the new */
/* key */
/* newhandle is a pointer to a 32bit word which will receive the handle */
/* of the new key */
/* */
/****************************************************************************/
uint32_t TPM_LoadKey(uint32_t keyhandle, unsigned char *keyauth,
keydata *keyparms,uint32_t *newhandle)
{
uint32_t ret;
STACK_TPM_BUFFER(tpmdata)
STACK_TPM_BUFFER(kparmbuf)
unsigned char nonceodd[TPM_NONCE_SIZE];
unsigned char pubauth[TPM_HASH_SIZE];
unsigned char c = 0;
uint32_t ordinal = htonl(TPM_ORD_LoadKey);
uint32_t keyhndl;
int kparmbufsize;
ret = needKeysRoom(keyhandle, 0, 0, 0);
if (ret != 0) {
return ret;
}
/* check input arguments */
if (keyparms == NULL || newhandle == NULL) return ERR_NULL_ARG;
if (keyauth != NULL) /* parent requires authorization */
{
session sess;
/* generate odd nonce */
TSS_gennonce(nonceodd);
/* Open OIAP Session */
ret = TSS_SessionOpen(SESSION_OSAP|SESSION_OIAP|SESSION_DSAP,
&sess,
keyauth, TPM_ET_KEYHANDLE, keyhandle);
if (ret != 0) return ret;
/* move Network byte order data to variables for hmac calculation */
keyhndl = htonl(keyhandle);
/* convert keyparm structure to buffer */
ret = TPM_WriteKey(&kparmbuf,keyparms);
if ((ret & ERR_MASK) != 0)
{
TSS_SessionClose(&sess);
return ret;
}
kparmbufsize = ret;
/* calculate authorization HMAC value */
ret = TSS_authhmac(pubauth,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,TSS_Session_GetENonce(&sess),nonceodd,c,
TPM_U32_SIZE,&ordinal,
kparmbufsize,kparmbuf.buffer,
0,0);
if ((ret & ERR_MASK))
{
TSS_SessionClose(&sess);
return ret;
}
/* build the request buffer */
ret = TSS_buildbuff("00 c2 T l l % L % o %",&tpmdata,
ordinal,
keyhndl,
kparmbufsize,kparmbuf.buffer,
TSS_Session_GetHandle(&sess),
TPM_NONCE_SIZE,nonceodd,
c,
TPM_HASH_SIZE,pubauth);
if ((ret & ERR_MASK) != 0)
{
TSS_SessionClose(&sess);
return ret;
}
/* transmit the request buffer to the TPM device and read the reply */
ret = TPM_Transmit(&tpmdata,"LoadKey - AUTH1");
TSS_SessionClose(&sess);
if (ret != 0)
{
return ret;
}
ret = TSS_checkhmac1(&tpmdata,ordinal,nonceodd,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,
TPM_U32_SIZE,TPM_DATA_OFFSET,
0,0);
if (ret != 0) return ret;
ret = tpm_buffer_load32(&tpmdata,TPM_DATA_OFFSET, newhandle);
if ((ret & ERR_MASK)) {
return ret;
}
}
else /* parent requires NO authorization */
{
/* move Network byte order data to variables for hmac calculation */
keyhndl = htonl(keyhandle);
/* convert keyparm structure to buffer */
ret = TPM_WriteKey(&kparmbuf,keyparms);
if ((ret & ERR_MASK) != 0) return ret;
kparmbufsize = ret;
/* build the request buffer */
ret = TSS_buildbuff("00 c1 T l l %",&tpmdata,
ordinal,
keyhndl,
kparmbufsize,kparmbuf.buffer);
if ((ret & ERR_MASK) != 0) return ret;
/* transmit the request buffer to the TPM device and read the reply */
ret = TPM_Transmit(&tpmdata,"LoadKey");
if (ret != 0) return ret;
ret = tpm_buffer_load32(&tpmdata,TPM_DATA_OFFSET,newhandle);
if ((ret & ERR_MASK)) {
return ret;
}
}
return 0;
}
/****************************************************************************/
/* */
/* Load a new Key into the TPM */
/* */
/* The arguments are... */
/* */
/* keyhandle is the handle of parent key for the new key */
/* 0x40000000 for the SRK */
/* keyauth is the authorization data (password) for the parent key */
/* if null, it is assumed that the parent requires no auth */
/* keyparms is a pointer to a keydata structure with all data for the new */
/* key */
/* newhandle is a pointer to a 32bit word which will receive the handle */
/* of the new key */
/* */
/****************************************************************************/
uint32_t TPM_LoadKey2(uint32_t keyhandle, unsigned char *keyauth,
keydata *keyparms, uint32_t *newhandle)
{
uint32_t ret;
STACK_TPM_BUFFER(tpmdata)
STACK_TPM_BUFFER(kparmbuf)
unsigned char nonceodd[TPM_NONCE_SIZE];
unsigned char pubauth[TPM_HASH_SIZE];
unsigned char c = 0;
uint32_t ordinal = htonl(TPM_ORD_LoadKey2);
uint32_t keyhndl = htonl(keyhandle);
int kparmbufsize;
/* check input arguments */
if (keyparms == NULL || newhandle == NULL)
return ERR_NULL_ARG;
ret = needKeysRoom(keyhandle, 0, 0, 0);
if (ret != 0) {
return ret;
}
if (keyauth != NULL) /* parent requires authorization */ {
session sess;
/* generate odd nonce */
TSS_gennonce(nonceodd);
/* Open OIAP Session */
ret = TSS_SessionOpen(SESSION_OSAP|SESSION_OIAP|SESSION_DSAP,
&sess,
keyauth, TPM_ET_KEYHANDLE, keyhandle);
if (ret != 0)
return ret;
/* move Network byte order data to variables for hmac calculation */
/* convert keyparm structure to buffer */
ret = TPM_WriteKey(&kparmbuf,keyparms);
if ((ret & ERR_MASK) != 0) {
TSS_SessionClose(&sess);
return ret;
}
kparmbufsize = ret;
/* calculate authorization HMAC value */
ret = TSS_authhmac(pubauth,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,TSS_Session_GetENonce(&sess),nonceodd,c,
TPM_U32_SIZE,&ordinal,
kparmbufsize,kparmbuf.buffer,
0,0);
if ((ret & ERR_MASK)) {
TSS_SessionClose(&sess);
return ret;
}
/* build the request buffer */
ret = TSS_buildbuff("00 c2 T l l % L % o %",&tpmdata,
ordinal,
keyhndl,
kparmbufsize,kparmbuf.buffer,
TSS_Session_GetHandle(&sess),
TPM_NONCE_SIZE,nonceodd,
c,
TPM_HASH_SIZE,pubauth);
if ((ret & ERR_MASK) != 0) {
TSS_SessionClose(&sess);
return ret;
}
/* transmit the request buffer to the TPM device and read the reply */
ret = TPM_Transmit(&tpmdata,"LoadKey2 - AUTH1");
TSS_SessionClose(&sess);
if (ret != 0) {
return ret;
}
ret = TSS_checkhmac1(&tpmdata,ordinal,nonceodd,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,
0,0);
if (ret != 0)
return ret;
ret = tpm_buffer_load32(&tpmdata,TPM_DATA_OFFSET,newhandle);
if ((ret & ERR_MASK)) {
return ret;
}
} else /* parent requires NO authorization */ {
/* convert keyparm structure to buffer */
ret = TPM_WriteKey(&kparmbuf,keyparms);
if ((ret & ERR_MASK) != 0)
return ret;
kparmbufsize = ret;
/* build the request buffer */
ret = TSS_buildbuff("00 c1 T l l %",&tpmdata,
ordinal,
keyhndl,
kparmbufsize,kparmbuf.buffer);
if ((ret & ERR_MASK) != 0)
return ret;
/* transmit the request buffer to the TPM device and read the reply */
ret = TPM_Transmit(&tpmdata,"LoadKey2");
if (ret != 0)
return ret;
ret = tpm_buffer_load32(&tpmdata,TPM_DATA_OFFSET,newhandle);
if ((ret & ERR_MASK)) {
return ret;
}
}
return ret;
}
/****************************************************************************/
/* */
/* Get a Public Key from the TPM */
/* */
/* The arguments are... */
/* */
/* keyhandle is the handle of the key to be read */
/* 0x40000000 for the SRK */
/* keyauth is the authorization data (password) for the key */
/* if null, it is assumed that the key requires no authorization */
/* keyblob is a pointer to an area which will receive a copy of the */
/* public key blob. */
/* keyblen is a pointer to an integer which will receive the length of */
/* the key blob */
/* */
/****************************************************************************/
static uint32_t TPM_GetPubKey_Internal(uint32_t keyhandle,
unsigned char *keyauth,
pubkeydata *pk)
{
uint32_t ret;
STACK_TPM_BUFFER(tpmdata)
unsigned char nonceodd[TPM_NONCE_SIZE];
unsigned char pubauth[TPM_HASH_SIZE];
unsigned char c = 0;
uint32_t ordinal = htonl(0x21);
uint32_t keyhndl = htonl(keyhandle);
int size;
/* check input arguments */
if (pk == NULL) return ERR_NULL_ARG;
if (keyauth != NULL) /* key requires authorization */
{
session sess;
/* generate odd nonce */
TSS_gennonce(nonceodd);
/* Open OIAP Session */
ret = TSS_SessionOpen(SESSION_OSAP|SESSION_OIAP|SESSION_DSAP,
&sess,
keyauth, TPM_ET_KEYHANDLE, keyhandle);
if (ret != 0) return ret;
/* calculate authorization HMAC value */
ret = TSS_authhmac(pubauth,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,TSS_Session_GetENonce(&sess),nonceodd,c,
TPM_U32_SIZE,&ordinal,
0,0);
if (ret != 0)
{
TSS_SessionClose(&sess);
return ret;
}
/* build the request buffer */
ret = TSS_buildbuff("00 c2 T l l L % o %",&tpmdata,
ordinal,
keyhndl,
TSS_Session_GetHandle(&sess),
TPM_NONCE_SIZE,nonceodd,
c,
TPM_HASH_SIZE,pubauth);
if ((ret & ERR_MASK) != 0)
{
TSS_SessionClose(&sess);
return ret;
}
/* transmit the request buffer to the TPM device and read the reply */
ret = TPM_Transmit(&tpmdata,"GetPubKey - AUTH1");
TSS_SessionClose(&sess);
if (ret != 0)
{
return ret;
}
ret = TSS_PubKeyExtract(&tpmdata, TPM_DATA_OFFSET, pk);
if ((ret & ERR_MASK))
return ret;
size = ret;
ret = TSS_checkhmac1(&tpmdata,ordinal,nonceodd,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,
size,TPM_DATA_OFFSET,
0,0);
if (ret != 0) return ret;
} else /* key requires NO authorization */ {
/* build the request buffer */
ret = TSS_buildbuff("00 c1 T l l",&tpmdata,
ordinal,
keyhndl);
if ((ret & ERR_MASK) != 0) return ret;
/* transmit the request buffer to the TPM device and read the reply */
ret = TPM_Transmit(&tpmdata,"GetPubKey - NO AUTH");
if (ret != 0) return ret;
ret = TSS_PubKeyExtract(&tpmdata, TPM_DATA_OFFSET, pk);
if ((ret & ERR_MASK))
return ret;
}
return 0;
}
uint32_t TPM_GetPubKey_UseRoom(uint32_t keyhandle,
unsigned char *keyauth,
pubkeydata *pk)
{
uint32_t ret;
uint32_t replaced_keyhandle;
/* swap in keyhandle */
ret = needKeysRoom_Stacked(keyhandle, &replaced_keyhandle);
if (ret != 0)
return ret;
ret = TPM_GetPubKey_Internal(keyhandle, keyauth, pk);
needKeysRoom_Stacked_Undo(keyhandle, replaced_keyhandle);
return ret;
}
uint32_t TPM_GetPubKey(uint32_t keyhandle,
unsigned char *keyauth,
pubkeydata *pk)
{
uint32_t ret;
ret = needKeysRoom(keyhandle, 0, 0, 0);
if (ret != 0)
return ret;
return TPM_GetPubKey_Internal(keyhandle, keyauth, pk);
}
/****************************************************************************/
/* */
/* Evict (delete) a Key from the TPM */
/* */
/* The arguments are... */
/* */
/* keyhandle is the handle of the key to be evicted */
/* */
/****************************************************************************/
static uint32_t TPM_EvictKey_Internal(uint32_t keyhandle, int allowTransport)
{
uint32_t ret;
STACK_TPM_BUFFER( tpmdata)
char *version = getenv("TPM_VERSION");
if (version == NULL || !strcmp("11",version)) {
ret = TSS_buildbuff("00 c1 T 00 00 00 22 L",&tpmdata, keyhandle);
if ((ret & ERR_MASK) != 0) return ret;
/* transmit the request buffer to the TPM device and read the reply */
if (allowTransport)
ret = TPM_Transmit(&tpmdata, "EvictKey");
else
ret = TPM_Transmit_NoTransport(&tpmdata, "EvictKey");
if (ret == TPM_BAD_ORDINAL) {
ret = TPM_FlushSpecific(keyhandle, TPM_RT_KEY);
}
} else {
ret = TPM_FlushSpecific(keyhandle, TPM_RT_KEY);
}
return ret;
}
uint32_t TPM_EvictKey_UseRoom(uint32_t keyhandle)
{
uint32_t ret;
/*
* To avoid recursion and major problems we assume for
* this implementation here that the keyhandle is in
* the TPM.
*
* uint32_t replaced_keyhandle;
*
* ret = needKeysRoom_Stacked(keyhandle, &replaced_keyhandle);
* if (ret != 0)
* return 0;
*/
ret = TPM_EvictKey_Internal(keyhandle, 0);
/*
* needKeysRoom_Stacked_Undo(0, replaced_keyhandle);
*/
return ret;
}
uint32_t TPM_EvictKey(uint32_t keyhandle)
{
uint32_t ret;
ret = needKeysRoom(keyhandle, 0, 0, 0);
if (ret != 0)
return 0;
return TPM_EvictKey_Internal(keyhandle, 1);
}
/****************************************************************************/
/* */
/* Extract a Pubkey Blob from a Key Blob */
/* */
/****************************************************************************/
void TSS_Key2Pub(unsigned char *keybuff, unsigned char *pkey, unsigned int *plen)
{
int srcoff1;
int srcoff2;
int srcoff3;
int dstoff1;
int dstoff2;
int dstoff3;
int len1;
int len2;
int len3;
int pointer;
int parmsize;
int pcrisize;
int pubksize;
srcoff1 = TPM_U32_SIZE + TPM_U16_SIZE + TPM_U32_SIZE + 1;
dstoff1 = 0;
len1 = TPM_U32_SIZE + TPM_U16_SIZE + TPM_U16_SIZE + TPM_U32_SIZE;
memcpy(pkey+dstoff1,keybuff+srcoff1,len1);
dstoff2 = dstoff1 + len1;
srcoff2 = srcoff1 + len1;
pointer = srcoff1 + TPM_U32_SIZE + TPM_U16_SIZE + TPM_U16_SIZE;
parmsize = LOAD32(keybuff,pointer);
len2 = parmsize;
memcpy(pkey+dstoff2,keybuff+srcoff2,len2);
pointer = pointer + TPM_U32_SIZE + parmsize;
pcrisize = LOAD32(keybuff,pointer);
pointer = pointer + TPM_U32_SIZE + pcrisize;
pubksize = LOAD32(keybuff,pointer);
dstoff3 = dstoff2 + len2;
srcoff3 = pointer;
len3 = pubksize + TPM_U32_SIZE;
memcpy(pkey+dstoff3,keybuff+srcoff3,len3);
*plen = len1 + len2 + len3;
}
/****************************************************************************/
/* */
/* Calculate the size of a Key Blob */
/* */
/****************************************************************************/
int TSS_KeySize(const struct tpm_buffer *tb, unsigned int offset)
{
int privkeylen;
const unsigned char *keybuff = tb->buffer;
unsigned int len;
unsigned int offset_in = offset;
offset += 0 + 4 + TPM_U16_SIZE + TPM_U32_SIZE + 1;
len = TSS_PubKeySize(tb,offset,1);
if ((len & ERR_MASK)) {
return len;
}
offset += len;
privkeylen = LOAD32(keybuff,offset);
offset += TPM_U32_SIZE + privkeylen;
return (offset - offset_in);
}
/****************************************************************************/
/* */
/* Calculate the size of a Public Key Blob */
/* */
/****************************************************************************/
int TSS_PubKeySize(const struct tpm_buffer *tb, unsigned int offset, int pcrpresent)
{
uint32_t parmsize;
uint32_t pcrisize;
uint32_t keylength;
const unsigned char *keybuff = tb->buffer;
uint32_t offset_in = offset;
offset += TPM_U32_SIZE + TPM_U16_SIZE + TPM_U16_SIZE;
if (offset + 4 >= tb->used) {
return ERR_STRUCTURE;
}
parmsize = LOAD32(keybuff,offset);
offset += TPM_U32_SIZE;
offset += parmsize;
if (pcrpresent) {
if (offset + 4 >= tb->used) {
return ERR_STRUCTURE;
}
pcrisize = LOAD32(keybuff,offset);
offset += TPM_U32_SIZE;
offset += pcrisize;
}
if (offset + 4 >= tb->used) {
return ERR_STRUCTURE;
}
keylength = LOAD32(keybuff,offset);
offset += TPM_U32_SIZE;
offset += keylength;
if (offset > tb->used) {
return ERR_STRUCTURE;
}
return (offset - offset_in);
}
/****************************************************************************/
/* */
/* Calculate the size of a Asymmetric Key Blob */
/* */
/****************************************************************************/
int TSS_AsymKeySize(const unsigned char * keybuff)
{
int offset = sizeof(TPM_ALGORITHM_ID) + sizeof(TPM_ENC_SCHEME);
int size;
size = LOAD16(keybuff, offset);
size += sizeof(TPM_ALGORITHM_ID) + sizeof(TPM_ENC_SCHEME) + TPM_U16_SIZE;
return size;
}
/****************************************************************************/
/* */
/* Calculate the size of a Symmetric Key Blob */
/* */
/****************************************************************************/
int TSS_SymKeySize(const unsigned char * keybuff) {
return TSS_AsymKeySize(keybuff);
}
/****************************************************************************/
/* */
/* Convert a TPM public key to an OpenSSL RSA public key */
/* */
/****************************************************************************/
RSA *TSS_convpubkey(pubkeydata *k)
{
RSA *rsa;
BIGNUM *mod;
BIGNUM *exp;
/* create the necessary structures */
rsa = RSA_new();
mod = BN_new();
exp = BN_new();
if (rsa == NULL || mod == NULL || exp == NULL) {
if (rsa) {
RSA_free(rsa);
}
if (mod) {
BN_free(mod);
}
if (exp) {
BN_free(exp);
}
return NULL;
}
/* convert the raw public key values to BIGNUMS */
BN_bin2bn(k->pubKey.modulus,k->pubKey.keyLength,mod);
if (0 == k->algorithmParms.u.rsaKeyParms.exponentSize) {
unsigned char exponent[3] = {0x1,0x0,0x1};
BN_bin2bn(exponent,3,exp);
} else {
BN_bin2bn(k->algorithmParms.u.rsaKeyParms.exponent,
k->algorithmParms.u.rsaKeyParms.exponentSize,
exp);
}
/* set up the RSA public key structure */
rsa->n = mod;
rsa->e = exp;
return rsa;
}
/****************************************************************************/
/* */
/* Get the Fingerprint of a Key given a pubkeydata structure */
/* */
/****************************************************************************/
void TSS_pkeyprint(pubkeydata *key, unsigned char *fprint)
{
TSS_sha1(key->pubKey.modulus,key->pubKey.keyLength,fprint);
}
/****************************************************************************/
/* */
/* Get the Fingerprint of a Key given a key blob */
/* */
/****************************************************************************/
void TSS_keyprint(unsigned char *keybuff, unsigned char *fprint)
{
keydata k;
STACK_TPM_BUFFER(buffer);
SET_TPM_BUFFER(&buffer, keybuff, sizeof(TPM_KEY_EMB));
TSS_KeyExtract(&buffer, 0,&k);
TSS_pkeyprint(&(k.pub),fprint);
}
/****************************************************************************/
/* */
/* Get the Fingerprint of a Key given a loaded key handle and authdata */
/* */
/****************************************************************************/
uint32_t TSS_lkeyprint(uint32_t keyhandle, unsigned char *keyauth, unsigned char *fprint)
{
uint32_t ret;
pubkeydata k;
ret = TPM_GetPubKey(keyhandle, keyauth, &k);
if (ret != 0) return ret;
TSS_pkeyprint(&k,fprint);
return 0;
}
/****************************************************************************/
/* */
/* Certify a key */
/* */
/* The arguments are ... */
/* */
/* certhandle is the handle of the key used to certify they */
/* keyhandle is the handle of the key to be certified */
/* antiReplay points to a TPM_NONCE_SIZE (20) bytes large buffer */
/* containing an anti replay nonce */
/* certKeyAuth is a pointer to a password (may be NULL) */
/* usageAuth is a pointer to a password to inputs and key to be signed */
/* certifyInfo is a pointer to an area that will receive the certifyInfo */
/* blob upon return */
/* certifyInfoLen is a pointer to an integer that indicates the size of */
/* the certifyInfo buffer on input and indicates the number */
/* of valid bytes on output */
/* outData is a pointer to a buffer that will receive the signed */
/* public key on return */
/* outDataSize is a pointer to an integer that holds the size of the */
/* outData buffer on input and the actual numbers of valid */
/* data used in that buffer on output. */
/****************************************************************************/
uint32_t TPM_CertifyKey(uint32_t certhandle,
uint32_t keyhandle,
unsigned char *certKeyAuth,
unsigned char *usageAuth,
struct tpm_buffer *certifyInfo_ser,
struct tpm_buffer *signature)
{
uint32_t ret = 0;
uint32_t ordinal_no = htonl(TPM_ORD_CertifyKey);
unsigned char c = 0;
unsigned char nonceodd[TPM_NONCE_SIZE];
unsigned char authdata1[TPM_NONCE_SIZE];
unsigned char antiReplay[TPM_HASH_SIZE];
STACK_TPM_BUFFER(tpmdata)
uint32_t certHandle_no = htonl(certhandle);
uint32_t keyHandle_no = htonl(keyhandle);
uint32_t ci_size;
uint32_t len;
session sess;
if (NULL == usageAuth) {
return ERR_NULL_ARG;
}
ret = needKeysRoom(certhandle, keyhandle, 0, 0);
if (ret != 0) {
return ret;
}
TSS_gennonce(antiReplay);
TSS_gennonce(nonceodd);
if (NULL != certKeyAuth) {
session sess2;
unsigned char authdata2[TPM_NONCE_SIZE];
unsigned char nonceodd2[TPM_NONCE_SIZE];
TSS_gennonce(nonceodd2);
ret = TSS_SessionOpen(SESSION_OSAP|SESSION_OIAP|SESSION_DSAP,
&sess,
certKeyAuth, TPM_ET_KEYHANDLE, certhandle);
if (0 != ret) {
return ret;
}
ret = TSS_SessionOpen(SESSION_OIAP,
&sess2,
usageAuth,0,0);
if (0 != ret) {
TSS_SessionClose(&sess);
return ret;
}
ret = TSS_authhmac(authdata1,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,TSS_Session_GetENonce(&sess),nonceodd,c,
TPM_U32_SIZE, &ordinal_no,
TPM_NONCE_SIZE, antiReplay,
0,0);
if (0 != ret) {
TSS_SessionClose(&sess);
TSS_SessionClose(&sess2);
return ret;
}
ret = TSS_authhmac(authdata2,TSS_Session_GetAuth(&sess2),TPM_HASH_SIZE,TSS_Session_GetENonce(&sess2),nonceodd2,c,
TPM_U32_SIZE, &ordinal_no,
TPM_NONCE_SIZE, antiReplay,
0,0);
if (0 != ret) {
TSS_SessionClose(&sess);
TSS_SessionClose(&sess2);
return ret;
}
ret = TSS_buildbuff("00 c3 T l l l % L % o % L % o %", &tpmdata,
ordinal_no,
certHandle_no,
keyHandle_no,
TPM_HASH_SIZE, antiReplay,
TSS_Session_GetHandle(&sess),
TPM_NONCE_SIZE,nonceodd,
c,
TPM_HASH_SIZE, authdata1,
TSS_Session_GetHandle(&sess2),
TPM_NONCE_SIZE,nonceodd2,
c,
TPM_HASH_SIZE,authdata2);
if (( ret & ERR_MASK ) != 0) {
TSS_SessionClose(&sess);
TSS_SessionClose(&sess2);
return ret;
}
ret = TPM_Transmit(&tpmdata,"CertifyKey - AUTH2");
TSS_SessionClose(&sess);
TSS_SessionClose(&sess2);
if (0 != ret) {
return ret;
}
ci_size = TPM_GetCertifyInfoSize(&tpmdata.buffer[TPM_DATA_OFFSET]);
ret = tpm_buffer_load32(&tpmdata, TPM_DATA_OFFSET + ci_size, &len);
if ((ret & ERR_MASK)) {
return ret;
}
ret = TSS_checkhmac2(&tpmdata,ordinal_no,nonceodd,
TSS_Session_GetAuth(&sess) , TPM_HASH_SIZE,
nonceodd2,
TSS_Session_GetAuth(&sess2) , TPM_HASH_SIZE,
ci_size + TPM_U32_SIZE + len , TPM_DATA_OFFSET,
0,0);
if (0 != ret) {
return ret;
}
if (NULL != certifyInfo_ser) {
SET_TPM_BUFFER(certifyInfo_ser,
&tpmdata.buffer[TPM_DATA_OFFSET],
ci_size)
}
if (NULL != signature) {
SET_TPM_BUFFER(signature,
&tpmdata.buffer[TPM_DATA_OFFSET + ci_size + TPM_U32_SIZE],
len);
}
} else {
ret = TSS_SessionOpen(SESSION_OIAP,
&sess,
usageAuth, 0, 0);
if (0 != ret) {
return ret;
}
ret = TSS_authhmac(authdata1,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,TSS_Session_GetENonce(&sess),nonceodd,c,
TPM_U32_SIZE, &ordinal_no,
TPM_NONCE_SIZE, antiReplay,
0,0);
if (0 != ret) {
TSS_SessionClose(&sess);
return ret;
}
ret = TSS_buildbuff("00 c2 T l l l % L % o %", &tpmdata,
ordinal_no,
certHandle_no,
keyHandle_no,
TPM_HASH_SIZE, antiReplay,
TSS_Session_GetHandle(&sess),
TPM_NONCE_SIZE,nonceodd,
c,
TPM_HASH_SIZE, authdata1);
if ((ret & ERR_MASK)) {
TSS_SessionClose(&sess);
return ret;
}
ret = TPM_Transmit(&tpmdata,"CertifyKey - AUTH1");
TSS_SessionClose(&sess);
if (0 != ret) {
return ret;
}
ci_size = TPM_GetCertifyInfoSize(&tpmdata.buffer[TPM_DATA_OFFSET]);
ret = tpm_buffer_load32(&tpmdata, TPM_DATA_OFFSET + ci_size, &len);
if ((ret & ERR_MASK)) {
return ret;
}
ret = TSS_checkhmac1(&tpmdata,ordinal_no,nonceodd,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,
ci_size + TPM_U32_SIZE + len , TPM_DATA_OFFSET,
0,0);
if (0 != ret) {
return ret;
}
if (NULL != certifyInfo_ser) {
SET_TPM_BUFFER(certifyInfo_ser,
&tpmdata.buffer[TPM_DATA_OFFSET],
ci_size)
}
if (NULL != signature) {
SET_TPM_BUFFER(signature,
&tpmdata.buffer[TPM_DATA_OFFSET + ci_size + TPM_U32_SIZE],
len);
}
}
return ret;
}
/****************************************************************************/
/* */
/* Certify a key */
/* */
/* The arguments are ... */
/* */
/* certhandle is the handle of the key used to certify they */
/* keyhandle is the handle of the key to be certified */
/* migrationPubDigest is a pointer to a digest */
/* antiReplay points to a TPM_NONCE_SIZE (20) bytes large buffer */
/* containing an anti replay nonce */
/* certKeyAuth is a pointer to a password (may be NULL) */
/* usageAuth is a pointer to a password to inputs and key to be signed */
/* certifyInfo is a pointer to an area that will receive the certifyInfo */
/* blob upon return */
/* certifyInfoLen is a pointer to an integer that indicates the size of */
/* the certifyInfo buffer on input and indicates the number */
/* of valid bytes on output */
/* outData is a pointer to a buffer that will receive the signed */
/* public key on return */
/* outDataSize is a pointer to an integer that holds the size of the */
/* outData buffer on input and the actual numbers of valid */
/* data used in that buffer on output. */
/****************************************************************************/
uint32_t TPM_CertifyKey2(uint32_t certhandle,
uint32_t keyhandle,
unsigned char * migrationPubDigest,
unsigned char * certKeyAuth,
unsigned char * usageAuth,
struct tpm_buffer *certifyInfo_ser,
struct tpm_buffer *signature)
{
uint32_t ret = 0;
uint32_t ordinal_no = htonl(TPM_ORD_CertifyKey2);
unsigned char c = 0;
unsigned char authdata1[TPM_NONCE_SIZE];
unsigned char antiReplay[TPM_HASH_SIZE];
unsigned char nonceodd[TPM_NONCE_SIZE];
STACK_TPM_BUFFER( tpmdata )
uint32_t certHandle_no = htonl(certhandle);
uint32_t keyHandle_no = htonl(keyhandle);
uint32_t ci_size;
uint32_t len;
session sess;
if (NULL == certKeyAuth ||
NULL == migrationPubDigest) {
return ERR_NULL_ARG;
}
ret = needKeysRoom(certhandle, keyhandle, 0, 0);
if (ret != 0) {
return ret;
}
TSS_gennonce(antiReplay);
TSS_gennonce(nonceodd);
if (NULL != usageAuth) {
unsigned char authdata2[TPM_NONCE_SIZE];
unsigned char nonceodd2[TPM_NONCE_SIZE];
session sess2;
TSS_gennonce(nonceodd2);
ret = TSS_SessionOpen(SESSION_OIAP,
&sess,
usageAuth,0,0);
if (0 != ret) {
return ret;
}
ret = TSS_SessionOpen(SESSION_OIAP,
&sess2,
certKeyAuth, 0,0);
if (0 != ret) {
TSS_SessionClose(&sess);
return ret;
}
ret = TSS_authhmac(authdata1,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,TSS_Session_GetENonce(&sess),nonceodd,c,
TPM_U32_SIZE, &ordinal_no,
TPM_HASH_SIZE, migrationPubDigest,
TPM_NONCE_SIZE, antiReplay,
0,0);
if (0 != ret) {
TSS_SessionClose(&sess);
TSS_SessionClose(&sess2);
return ret;
}
ret = TSS_authhmac(authdata2,TSS_Session_GetAuth(&sess2),TPM_HASH_SIZE,TSS_Session_GetENonce(&sess2),nonceodd2,c,
TPM_U32_SIZE, &ordinal_no,
TPM_HASH_SIZE, migrationPubDigest,
TPM_NONCE_SIZE, antiReplay,
0,0);
if (0 != ret) {
TSS_SessionClose(&sess);
TSS_SessionClose(&sess2);
return ret;
}
ret = TSS_buildbuff("00 c3 T l l l % % L % o % L % o %", &tpmdata,
ordinal_no,
keyHandle_no,
certHandle_no,
TPM_DIGEST_SIZE, migrationPubDigest,
TPM_HASH_SIZE, antiReplay,
TSS_Session_GetHandle(&sess),
TPM_NONCE_SIZE,nonceodd,
c,
TPM_HASH_SIZE, authdata1,
TSS_Session_GetHandle(&sess2),
TPM_NONCE_SIZE,nonceodd2,
c,
TPM_HASH_SIZE,authdata2);
if ((ret & ERR_MASK)) {
TSS_SessionClose(&sess);
TSS_SessionClose(&sess2);
return ret;
}
ret = TPM_Transmit(&tpmdata,"CertifyKey2 - AUTH2");
TSS_SessionClose(&sess);
TSS_SessionClose(&sess2);
if (0 != ret) {
return ret;
}
ci_size = TPM_GetCertifyInfoSize(&tpmdata.buffer[TPM_DATA_OFFSET]);
ret = tpm_buffer_load32(&tpmdata, TPM_DATA_OFFSET + ci_size, &len);
if ((ret & ERR_MASK)) {
return ret;
}
ret = TSS_checkhmac2(&tpmdata,ordinal_no,nonceodd,
TSS_Session_GetAuth(&sess) , TPM_HASH_SIZE,
nonceodd2,
TSS_Session_GetAuth(&sess2) , TPM_HASH_SIZE,
ci_size + TPM_U32_SIZE + len, TPM_DATA_OFFSET,
0,0);
if (0 != ret) {
return ret;
}
if (NULL != certifyInfo_ser) {
SET_TPM_BUFFER(certifyInfo_ser,
&tpmdata.buffer[TPM_DATA_OFFSET],
ci_size)
}
if (NULL != signature) {
SET_TPM_BUFFER(signature,
&tpmdata.buffer[TPM_DATA_OFFSET + ci_size + TPM_U32_SIZE],
len);
}
} else {
TSS_gennonce(nonceodd);
ret = TSS_SessionOpen(SESSION_OIAP,
&sess,
certKeyAuth, 0,0);
if (0 != ret) {
return ret;
}
ret = TSS_authhmac(authdata1,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,TSS_Session_GetENonce(&sess),nonceodd,c,
TPM_U32_SIZE, &ordinal_no,
TPM_HASH_SIZE, migrationPubDigest,
TPM_NONCE_SIZE, antiReplay,
0,0);
if (0 != ret) {
TSS_SessionClose(&sess);
return ret;
}
ret = TSS_buildbuff("00 c2 T l l l % % l % o %", &tpmdata,
ordinal_no,
keyHandle_no,
certHandle_no,
TPM_DIGEST_SIZE, migrationPubDigest,
TPM_HASH_SIZE, antiReplay,
TSS_Session_GetAuth(&sess),
TPM_NONCE_SIZE,nonceodd,
c,
TPM_HASH_SIZE, authdata1);
if ((ret & ERR_MASK)) {
TSS_SessionClose(&sess);
return ret;
}
ret = TPM_Transmit(&tpmdata,"CertifyKey2 - AUTH1");
TSS_SessionClose(&sess);
if (0 != ret) {
return ret;
}
ci_size = TPM_GetCertifyInfoSize(&tpmdata.buffer[TPM_DATA_OFFSET]);
ret = tpm_buffer_load32(&tpmdata, TPM_DATA_OFFSET + ci_size, &len);
if ((ret & ERR_MASK)) {
return ret;
}
ret = TSS_checkhmac1(&tpmdata,ordinal_no,nonceodd,TSS_Session_GetAuth(&sess),TPM_HASH_SIZE,
ci_size + TPM_U32_SIZE + len , TPM_DATA_OFFSET,
0,0);
if (0 != ret) {
return ret;
}
if (NULL != certifyInfo_ser) {
SET_TPM_BUFFER(certifyInfo_ser,
&tpmdata.buffer[TPM_DATA_OFFSET],
ci_size)
}
if (NULL != signature) {
SET_TPM_BUFFER(signature,
&tpmdata.buffer[TPM_DATA_OFFSET + ci_size + TPM_U32_SIZE],
len);
}
}
return ret;
}
uint32_t TPM_GetPubKeyDigest(uint32_t keyhandle, unsigned char *keyPassHash,
unsigned char *digest)
{
uint32_t ret;
keydata k;
ret = needKeysRoom(keyhandle, 0, 0, 0);
if (ret != 0) {
return ret;
}
ret = TPM_GetPubKey(keyhandle, keyPassHash,
&k.pub);
if (0 != ret) {
return ret;
}
ret = TPM_HashPubKey(&k, digest);
if ((ret & ERR_MASK)) {
return ret;
}
return 0;
}
|
louissobel/828-ibmswtpm-fork
|
libtpm/lib/keys.c
|
C
|
bsd-3-clause
| 67,340
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE194_Unexpected_Sign_Extension__fscanf_memmove_73b.cpp
Label Definition File: CWE194_Unexpected_Sign_Extension.label.xml
Template File: sources-sink-73b.tmpl.cpp
*/
/*
* @description
* CWE: 194 Unexpected Sign Extension
* BadSource: fscanf Read data from the console using fscanf()
* GoodSource: Positive integer
* Sinks: memmove
* BadSink : Copy strings using memmove() with the length of data
* Flow Variant: 73 Data flow: data passed in a list from one function to another in different source files
*
* */
#include "std_testcase.h"
#include <list>
using namespace std;
namespace CWE194_Unexpected_Sign_Extension__fscanf_memmove_73
{
#ifndef OMITBAD
void badSink(list<short> dataList)
{
/* copy data out of dataList */
short data = dataList.back();
{
char source[100];
char dest[100] = "";
memset(source, 'A', 100-1);
source[100-1] = '\0';
if (data < 100)
{
/* POTENTIAL FLAW: data is interpreted as an unsigned int - if its value is negative,
* the sign extension could result in a very large number */
memmove(dest, source, data);
dest[data] = '\0'; /* NULL terminate */
}
printLine(dest);
}
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* goodG2B uses the GoodSource with the BadSink */
void goodG2BSink(list<short> dataList)
{
short data = dataList.back();
{
char source[100];
char dest[100] = "";
memset(source, 'A', 100-1);
source[100-1] = '\0';
if (data < 100)
{
/* POTENTIAL FLAW: data is interpreted as an unsigned int - if its value is negative,
* the sign extension could result in a very large number */
memmove(dest, source, data);
dest[data] = '\0'; /* NULL terminate */
}
printLine(dest);
}
}
#endif /* OMITGOOD */
} /* close namespace */
|
JianpingZeng/xcc
|
xcc/test/juliet/testcases/CWE194_Unexpected_Sign_Extension/s01/CWE194_Unexpected_Sign_Extension__fscanf_memmove_73b.cpp
|
C++
|
bsd-3-clause
| 2,048
|
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "include/gpu/GrDirectContext.h"
#include "src/gpu/GrDirectContextPriv.h"
#include "src/gpu/GrProxyProvider.h"
#include "src/gpu/GrShaderCaps.h"
#include "src/gpu/GrTexture.h"
#include "src/gpu/GrTextureProxyPriv.h"
#include "src/gpu/SurfaceFillContext.h"
#include "src/gpu/gl/GrGLGpu.h"
#include "src/gpu/gl/GrGLUtil.h"
#include "tests/Test.h"
#include "tests/TestUtils.h"
#include "tools/gpu/GrContextFactory.h"
#include "tools/gpu/ManagedBackendTexture.h"
#include "tools/gpu/gl/GLTestContext.h"
#ifdef SK_GL
using sk_gpu_test::GLTestContext;
static void cleanup(GLTestContext* glctx0,
GrGLuint texID0,
GLTestContext* glctx1,
sk_sp<GrDirectContext> dContext,
GrEGLImage image1) {
if (glctx1) {
glctx1->makeCurrent();
if (GR_EGL_NO_IMAGE != image1) {
glctx1->destroyEGLImage(image1);
}
}
glctx0->makeCurrent();
if (texID0) {
GR_GL_CALL(glctx0->gl(), DeleteTextures(1, &texID0));
}
}
DEF_GPUTEST_FOR_GL_RENDERING_CONTEXTS(EGLImageTest, reporter, ctxInfo) {
auto context0 = ctxInfo.directContext();
sk_gpu_test::GLTestContext* glCtx0 = ctxInfo.glContext();
// Try to create a second GL context and then check if the contexts have necessary
// extensions to run this test.
if (kGLES_GrGLStandard != glCtx0->gl()->fStandard) {
return;
}
GrGLGpu* gpu0 = static_cast<GrGLGpu*>(context0->priv().getGpu());
if (!gpu0->glCaps().shaderCaps()->externalTextureSupport()) {
return;
}
std::unique_ptr<GLTestContext> glCtx1 = glCtx0->makeNew();
if (!glCtx1) {
return;
}
sk_sp<GrDirectContext> context1 = GrDirectContext::MakeGL(sk_ref_sp(glCtx1->gl()));
GrEGLImage image = GR_EGL_NO_IMAGE;
GrGLTextureInfo externalTexture;
externalTexture.fID = 0;
if (!context1) {
cleanup(glCtx0, externalTexture.fID, glCtx1.get(), context1, image);
return;
}
if (!glCtx1->gl()->hasExtension("EGL_KHR_image") ||
!glCtx1->gl()->hasExtension("EGL_KHR_gl_texture_2D_image")) {
cleanup(glCtx0, externalTexture.fID, glCtx1.get(), context1, image);
return;
}
///////////////////////////////// CONTEXT 1 ///////////////////////////////////
// Use GL Context 1 to create a texture unknown to context 0.
context1->flushAndSubmit();
static const int kSize = 100;
auto mbet = sk_gpu_test::ManagedBackendTexture::MakeWithoutData(
context1.get(), kSize, kSize, kRGBA_8888_SkColorType, GrMipmapped::kNo,
GrRenderable::kNo, GrProtected::kNo);
if (!mbet) {
ERRORF(reporter, "Error creating texture for EGL Image");
cleanup(glCtx0, externalTexture.fID, glCtx1.get(), context1, image);
return;
}
GrGLTextureInfo texInfo;
if (!mbet->texture().getGLTextureInfo(&texInfo)) {
ERRORF(reporter, "Failed to get GrGLTextureInfo");
return;
}
if (GR_GL_TEXTURE_2D != texInfo.fTarget) {
ERRORF(reporter, "Expected backend texture to be 2D");
cleanup(glCtx0, externalTexture.fID, glCtx1.get(), context1, image);
return;
}
// Wrap the texture in an EGLImage
image = glCtx1->texture2DToEGLImage(texInfo.fID);
if (GR_EGL_NO_IMAGE == image) {
ERRORF(reporter, "Error creating EGL Image from texture");
cleanup(glCtx0, externalTexture.fID, glCtx1.get(), context1, image);
return;
}
// Since we are dealing with two different GL contexts here, we need to call finish so that the
// clearing of the texture that happens in createTextingOnlyBackendTexture occurs before we call
// TexSubImage below on the other context. Otherwise, it is possible the calls get reordered and
// the clearing overwrites the TexSubImage writes.
GR_GL_CALL(glCtx1->gl(), Finish());
// Populate the texture using GL context 1. Important to use TexSubImage as TexImage orphans
// the EGL image. Also, this must be done after creating the EGLImage as the texture
// contents may not be preserved when the image is created.
SkAutoTMalloc<uint32_t> pixels(kSize * kSize);
for (int i = 0; i < kSize*kSize; ++i) {
pixels.get()[i] = 0xDDAABBCC;
}
GR_GL_CALL(glCtx1->gl(), ActiveTexture(GR_GL_TEXTURE0));
GR_GL_CALL(glCtx1->gl(), BindTexture(texInfo.fTarget, texInfo.fID));
GR_GL_CALL(glCtx1->gl(), TexSubImage2D(texInfo.fTarget, 0, 0, 0, kSize, kSize,
GR_GL_RGBA, GR_GL_UNSIGNED_BYTE, pixels.get()));
GR_GL_CALL(glCtx1->gl(), Finish());
// We've been making direct GL calls in GL context 1, let GrDirectContext 1 know its internal
// state is invalid.
context1->resetContext();
///////////////////////////////// CONTEXT 0 ///////////////////////////////////
// Make a new texture ID in GL Context 0 from the EGL Image
glCtx0->makeCurrent();
externalTexture.fTarget = GR_GL_TEXTURE_EXTERNAL;
externalTexture.fID = glCtx0->eglImageToExternalTexture(image);
externalTexture.fFormat = GR_GL_RGBA8;
if (0 == externalTexture.fID) {
ERRORF(reporter, "Error converting EGL Image back to texture");
cleanup(glCtx0, externalTexture.fID, glCtx1.get(), context1, image);
return;
}
// Wrap this texture ID in a GrTexture
GrBackendTexture backendTex(kSize, kSize, GrMipmapped::kNo, externalTexture);
GrColorInfo colorInfo(GrColorType::kRGBA_8888, kPremul_SkAlphaType, nullptr);
// TODO: If I make this TopLeft origin to match resolve_origin calls for kDefault, this test
// fails on the Nexus5. Why?
GrSurfaceOrigin origin = kBottomLeft_GrSurfaceOrigin;
sk_sp<GrSurfaceProxy> texProxy = context0->priv().proxyProvider()->wrapBackendTexture(
backendTex, kBorrow_GrWrapOwnership, GrWrapCacheable::kNo, kRW_GrIOType);
if (!texProxy) {
ERRORF(reporter, "Error wrapping external texture in GrTextureProxy.");
cleanup(glCtx0, externalTexture.fID, glCtx1.get(), context1, image);
return;
}
skgpu::Swizzle swizzle = context0->priv().caps()->getReadSwizzle(texProxy->backendFormat(),
colorInfo.colorType());
GrSurfaceProxyView view(std::move(texProxy), origin, swizzle);
auto surfaceContext = context0->priv().makeSC(std::move(view), colorInfo);
if (!surfaceContext) {
ERRORF(reporter, "Error wrapping external texture in SurfaceContext.");
cleanup(glCtx0, externalTexture.fID, glCtx1.get(), context1, image);
return;
}
GrTextureProxy* proxy = surfaceContext->asTextureProxy();
REPORTER_ASSERT(reporter, proxy->mipmapped() == GrMipmapped::kNo);
REPORTER_ASSERT(reporter, proxy->peekTexture()->mipmapped() == GrMipmapped::kNo);
REPORTER_ASSERT(reporter, proxy->textureType() == GrTextureType::kExternal);
REPORTER_ASSERT(reporter, proxy->peekTexture()->textureType() == GrTextureType::kExternal);
REPORTER_ASSERT(reporter, proxy->hasRestrictedSampling());
REPORTER_ASSERT(reporter, proxy->peekTexture()->hasRestrictedSampling());
// Should not be able to wrap as a RT
{
auto temp = context0->priv().makeSFCFromBackendTexture(colorInfo,
backendTex,
1,
origin,
/*release helper*/ nullptr);
if (temp) {
ERRORF(reporter, "Should not be able to wrap an EXTERNAL texture as a RT.");
}
}
//TestReadPixels(reporter, context0, surfaceContext.get(), pixels.get(), "EGLImageTest-read");
SkDebugf("type: %d\n", (int)surfaceContext->asTextureProxy()->textureType());
// We should not be able to write to an EXTERNAL texture
TestWritePixels(reporter, context0, surfaceContext.get(), false, "EGLImageTest-write");
// Only test RT-config
// TODO: why do we always need to draw to copy from an external texture?
TestCopyFromSurface(reporter,
context0,
surfaceContext->asSurfaceProxyRef(),
surfaceContext->origin(),
colorInfo.colorType(),
pixels.get(),
"EGLImageTest-copy");
cleanup(glCtx0, externalTexture.fID, glCtx1.get(), context1, image);
}
#endif // SK_GL
|
google/skia
|
tests/EGLImageTest.cpp
|
C++
|
bsd-3-clause
| 8,743
|
import React from 'react';
import { createShallow } from '@material-ui/core/test-utils';
import { CustomTableRow } from '../components/common/CustomTableRow';
describe('CustomTextField component', () => {
let shallow;
beforeAll(() => {
shallow = createShallow();
});
const props = {
title: 'test title',
children: 'test data',
classes: {},
...(global as any).eventkit_test_props,
};
const getWrapper = prop => (
shallow(<CustomTableRow {...prop} />)
);
it('should render a title and data', () => {
const wrapper = getWrapper(props);
expect(wrapper.find('.qa-CustomTableRow')).toHaveLength(1);
expect(wrapper.find('.qa-CustomTableRow').find('div').at(1)
.text()).toEqual('test title');
expect(wrapper.find('.qa-CustomTableRow').find('div').at(2)
.text()).toEqual('test data');
});
});
|
venicegeo/eventkit-cloud
|
eventkit_cloud/ui/static/ui/app/tests/CustomTableRow.spec.tsx
|
TypeScript
|
bsd-3-clause
| 929
|
<?xml version="1.0" encoding="ascii"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<title>clickmodels.inference.ClickModel</title>
<link rel="stylesheet" href="epydoc.css" type="text/css" />
<script type="text/javascript" src="epydoc.js"></script>
</head>
<body bgcolor="white" text="black" link="blue" vlink="#204080"
alink="#204080">
<!-- ==================== NAVIGATION BAR ==================== -->
<table class="navbar" border="0" width="100%" cellpadding="0"
bgcolor="#a0c0ff" cellspacing="0">
<tr valign="middle">
<!-- Home link -->
<th> <a
href="clickmodels-module.html">Home</a> </th>
<!-- Tree link -->
<th> <a
href="module-tree.html">Trees</a> </th>
<!-- Index link -->
<th> <a
href="identifier-index.html">Indices</a> </th>
<!-- Help link -->
<th> <a
href="help.html">Help</a> </th>
<th class="navbar" width="100%"></th>
</tr>
</table>
<table width="100%" cellpadding="0" cellspacing="0">
<tr valign="top">
<td width="100%">
<span class="breadcrumbs">
<a href="clickmodels-module.html">Package clickmodels</a> ::
<a href="clickmodels.inference-module.html">Module inference</a> ::
Class ClickModel
</span>
</td>
<td>
<table cellpadding="0" cellspacing="0">
<!-- hide/show private -->
<tr><td align="right"><span class="options">[<a href="javascript:void(0);" class="privatelink"
onclick="toggle_private();">hide private</a>]</span></td></tr>
<tr><td align="right"><span class="options"
>[<a href="frames.html" target="_top">frames</a
>] | <a href="clickmodels.inference.ClickModel-class.html"
target="_top">no frames</a>]</span></td></tr>
</table>
</td>
</tr>
</table>
<!-- ==================== CLASS DESCRIPTION ==================== -->
<h1 class="epydoc">Class ClickModel</h1><p class="nomargin-top"><span class="codelink"><a href="clickmodels.inference-pysrc.html#ClickModel">source code</a></span></p>
<center>
<center> <map id="class_hierarchy_for_clickmodel" name="class_hierarchy_for_clickmodel">
<area shape="rect" id="node1" href="clickmodels.inference.ClickModel-class.html" title="ClickModel" alt="" coords="139,5,225,29"/>
<area shape="rect" id="node2" href="clickmodels.inference.DbnModel-class.html" title="DbnModel" alt="" coords="34,59,115,83"/>
<area shape="rect" id="node3" href="clickmodels.inference.DcmModel-class.html" title="DcmModel" alt="" coords="140,59,224,83"/>
<area shape="rect" id="node6" href="clickmodels.inference.UbmModel-class.html" title="UbmModel" alt="" coords="249,59,335,83"/>
<area shape="rect" id="node5" href="clickmodels.inference.SimplifiedDbnModel-class.html" title="SimplifiedDbnModel" alt="" coords="5,112,144,136"/>
<area shape="rect" id="node4" href="clickmodels.inference.EbUbmModel-class.html" title="EbUbmModel" alt="" coords="241,112,343,136"/>
</map>
<img src="class_hierarchy_for_clickmodel.gif" alt='' usemap="#class_hierarchy_for_clickmodel" ismap="ismap" class="graph-without-title" />
</center>
</center>
<hr />
<p>An abstract click model interface.</p>
<!-- ==================== INSTANCE METHODS ==================== -->
<a name="section-InstanceMethods"></a>
<table class="summary" border="1" cellpadding="3"
cellspacing="0" width="100%" bgcolor="white">
<tr bgcolor="#70b0f0" class="table-header">
<td colspan="2" class="table-header">
<table border="0" cellpadding="0" cellspacing="0" width="100%">
<tr valign="top">
<td align="left"><span class="table-header">Instance Methods</span></td>
<td align="right" valign="top"
><span class="options">[<a href="#section-InstanceMethods"
class="privatelink" onclick="toggle_private();"
>hide private</a>]</span></td>
</tr>
</table>
</td>
</tr>
<tr>
<td width="15%" align="right" valign="top" class="summary">
<span class="summary-type"> </span>
</td><td class="summary">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr>
<td><span class="summary-sig"><a name="__init__"></a><span class="summary-sig-name">__init__</span>(<span class="summary-sig-arg">self</span>,
<span class="summary-sig-arg">ignoreIntents</span>=<span class="summary-sig-default">True</span>,
<span class="summary-sig-arg">ignoreLayout</span>=<span class="summary-sig-default">True</span>,
<span class="summary-sig-arg">config</span>=<span class="summary-sig-default">None</span>)</span></td>
<td align="right" valign="top">
<span class="codelink"><a href="clickmodels.inference-pysrc.html#ClickModel.__init__">source code</a></span>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td width="15%" align="right" valign="top" class="summary">
<span class="summary-type"> </span>
</td><td class="summary">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr>
<td><span class="summary-sig"><a name="train"></a><span class="summary-sig-name">train</span>(<span class="summary-sig-arg">self</span>,
<span class="summary-sig-arg">sessions</span>)</span><br />
Trains the model.</td>
<td align="right" valign="top">
<span class="codelink"><a href="clickmodels.inference-pysrc.html#ClickModel.train">source code</a></span>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td width="15%" align="right" valign="top" class="summary">
<span class="summary-type"> </span>
</td><td class="summary">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr>
<td><span class="summary-sig"><a href="clickmodels.inference.ClickModel-class.html#test" class="summary-sig-name">test</a>(<span class="summary-sig-arg">self</span>,
<span class="summary-sig-arg">sessions</span>,
<span class="summary-sig-arg">reportPositionPerplexity</span>=<span class="summary-sig-default">True</span>)</span><br />
Evaluates the prediciton power of the click model for a given
sessions.</td>
<td align="right" valign="top">
<span class="codelink"><a href="clickmodels.inference-pysrc.html#ClickModel.test">source code</a></span>
</td>
</tr>
</table>
</td>
</tr>
<tr class="private">
<td width="15%" align="right" valign="top" class="summary">
<span class="summary-type"> </span>
</td><td class="summary">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr>
<td><span class="summary-sig"><a href="clickmodels.inference.ClickModel-class.html#_get_click_probs" class="summary-sig-name" onclick="show_private();">_get_click_probs</a>(<span class="summary-sig-arg">self</span>,
<span class="summary-sig-arg">s</span>,
<span class="summary-sig-arg">possible_intents</span>)</span><br />
Returns click probabilities list for a given list of s.clicks.</td>
<td align="right" valign="top">
<span class="codelink"><a href="clickmodels.inference-pysrc.html#ClickModel._get_click_probs">source code</a></span>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td width="15%" align="right" valign="top" class="summary">
<span class="summary-type"> </span>
</td><td class="summary">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr>
<td><span class="summary-sig"><a href="clickmodels.inference.ClickModel-class.html#get_loglikelihood" class="summary-sig-name">get_loglikelihood</a>(<span class="summary-sig-arg">self</span>,
<span class="summary-sig-arg">sessions</span>)</span><br />
Returns the average log-likelihood of the current model for given
sessions.</td>
<td align="right" valign="top">
<span class="codelink"><a href="clickmodels.inference-pysrc.html#ClickModel.get_loglikelihood">source code</a></span>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td width="15%" align="right" valign="top" class="summary">
<span class="summary-type"> </span>
</td><td class="summary">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr>
<td><span class="summary-sig"><a href="clickmodels.inference.ClickModel-class.html#get_log_click_probs" class="summary-sig-name">get_log_click_probs</a>(<span class="summary-sig-arg">self</span>,
<span class="summary-sig-arg">session</span>)</span><br />
Returns an average log-likelihood for a given session, i.e.</td>
<td align="right" valign="top">
<span class="codelink"><a href="clickmodels.inference-pysrc.html#ClickModel.get_log_click_probs">source code</a></span>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td width="15%" align="right" valign="top" class="summary">
<span class="summary-type"> </span>
</td><td class="summary">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr>
<td><span class="summary-sig"><a name="get_model_relevances"></a><span class="summary-sig-name">get_model_relevances</span>(<span class="summary-sig-arg">self</span>,
<span class="summary-sig-arg">session</span>,
<span class="summary-sig-arg">intent</span>=<span class="summary-sig-default">False</span>)</span><br />
Returns estimated relevance of each document in a given session based
on a trained click model.</td>
<td align="right" valign="top">
<span class="codelink"><a href="clickmodels.inference-pysrc.html#ClickModel.get_model_relevances">source code</a></span>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td width="15%" align="right" valign="top" class="summary">
<span class="summary-type"> </span>
</td><td class="summary">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr>
<td><span class="summary-sig"><a href="clickmodels.inference.ClickModel-class.html#predict_click_probs" class="summary-sig-name">predict_click_probs</a>(<span class="summary-sig-arg">self</span>,
<span class="summary-sig-arg">session</span>,
<span class="summary-sig-arg">intent</span>=<span class="summary-sig-default">False</span>)</span><br />
Predicts click probabilities for a given session.</td>
<td align="right" valign="top">
<span class="codelink"><a href="clickmodels.inference-pysrc.html#ClickModel.predict_click_probs">source code</a></span>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td width="15%" align="right" valign="top" class="summary">
<span class="summary-type"> </span>
</td><td class="summary">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr>
<td><span class="summary-sig"><a href="clickmodels.inference.ClickModel-class.html#predict_stop_probs" class="summary-sig-name">predict_stop_probs</a>(<span class="summary-sig-arg">self</span>,
<span class="summary-sig-arg">session</span>,
<span class="summary-sig-arg">intent</span>=<span class="summary-sig-default">False</span>)</span><br />
Predicts stop probabilities (after click) for each document in a
session.</td>
<td align="right" valign="top">
<span class="codelink"><a href="clickmodels.inference-pysrc.html#ClickModel.predict_stop_probs">source code</a></span>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td width="15%" align="right" valign="top" class="summary">
<span class="summary-type"> </span>
</td><td class="summary">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr>
<td><span class="summary-sig"><a name="get_abandonment_prob"></a><span class="summary-sig-name">get_abandonment_prob</span>(<span class="summary-sig-arg">self</span>,
<span class="summary-sig-arg">rank</span>,
<span class="summary-sig-arg">intent</span>=<span class="summary-sig-default">False</span>,
<span class="summary-sig-arg">layout</span>=<span class="summary-sig-default">None</span>)</span><br />
Predicts probability of stopping without click after examining
document at rank `rank`.</td>
<td align="right" valign="top">
<span class="codelink"><a href="clickmodels.inference-pysrc.html#ClickModel.get_abandonment_prob">source code</a></span>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td width="15%" align="right" valign="top" class="summary">
<span class="summary-type"> </span>
</td><td class="summary">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr>
<td><span class="summary-sig"><a name="generate_clicks"></a><span class="summary-sig-name">generate_clicks</span>(<span class="summary-sig-arg">self</span>,
<span class="summary-sig-arg">session</span>)</span><br />
Generates clicks for a given session, assuming cascade examination
order.</td>
<td align="right" valign="top">
<span class="codelink"><a href="clickmodels.inference-pysrc.html#ClickModel.generate_clicks">source code</a></span>
</td>
</tr>
</table>
</td>
</tr>
</table>
<!-- ==================== METHOD DETAILS ==================== -->
<a name="section-MethodDetails"></a>
<table class="details" border="1" cellpadding="3"
cellspacing="0" width="100%" bgcolor="white">
<tr bgcolor="#70b0f0" class="table-header">
<td colspan="2" class="table-header">
<table border="0" cellpadding="0" cellspacing="0" width="100%">
<tr valign="top">
<td align="left"><span class="table-header">Method Details</span></td>
<td align="right" valign="top"
><span class="options">[<a href="#section-MethodDetails"
class="privatelink" onclick="toggle_private();"
>hide private</a>]</span></td>
</tr>
</table>
</td>
</tr>
</table>
<a name="test"></a>
<div>
<table class="details" border="1" cellpadding="3"
cellspacing="0" width="100%" bgcolor="white">
<tr><td>
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr valign="top"><td>
<h3 class="epydoc"><span class="sig"><span class="sig-name">test</span>(<span class="sig-arg">self</span>,
<span class="sig-arg">sessions</span>,
<span class="sig-arg">reportPositionPerplexity</span>=<span class="sig-default">True</span>)</span>
</h3>
</td><td align="right" valign="top"
><span class="codelink"><a href="clickmodels.inference-pysrc.html#ClickModel.test">source code</a></span>
</td>
</tr></table>
<p>Evaluates the prediciton power of the click model for a given
sessions. Returns the log-likelihood, perplexity, position perplexity
(perplexity for each rank a.k.a. position in a SERP) and separate
perplexity values for clicks and non-clicks (skips).</p>
<dl class="fields">
</dl>
</td></tr></table>
</div>
<a name="_get_click_probs"></a>
<div class="private">
<table class="details" border="1" cellpadding="3"
cellspacing="0" width="100%" bgcolor="white">
<tr><td>
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr valign="top"><td>
<h3 class="epydoc"><span class="sig"><span class="sig-name">_get_click_probs</span>(<span class="sig-arg">self</span>,
<span class="sig-arg">s</span>,
<span class="sig-arg">possible_intents</span>)</span>
</h3>
</td><td align="right" valign="top"
><span class="codelink"><a href="clickmodels.inference-pysrc.html#ClickModel._get_click_probs">source code</a></span>
</td>
</tr></table>
<p>Returns click probabilities list for a given list of s.clicks. For
each intent $i$ and each rank $k$ we have: click_probs[i][k-1] = P(C_1,
..., C_k | I=i)</p>
<dl class="fields">
</dl>
</td></tr></table>
</div>
<a name="get_loglikelihood"></a>
<div>
<table class="details" border="1" cellpadding="3"
cellspacing="0" width="100%" bgcolor="white">
<tr><td>
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr valign="top"><td>
<h3 class="epydoc"><span class="sig"><span class="sig-name">get_loglikelihood</span>(<span class="sig-arg">self</span>,
<span class="sig-arg">sessions</span>)</span>
</h3>
</td><td align="right" valign="top"
><span class="codelink"><a href="clickmodels.inference-pysrc.html#ClickModel.get_loglikelihood">source code</a></span>
</td>
</tr></table>
<p>Returns the average log-likelihood of the current model for given
sessions. This is a lightweight version of the self.test() method.</p>
<dl class="fields">
</dl>
</td></tr></table>
</div>
<a name="get_log_click_probs"></a>
<div>
<table class="details" border="1" cellpadding="3"
cellspacing="0" width="100%" bgcolor="white">
<tr><td>
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr valign="top"><td>
<h3 class="epydoc"><span class="sig"><span class="sig-name">get_log_click_probs</span>(<span class="sig-arg">self</span>,
<span class="sig-arg">session</span>)</span>
</h3>
</td><td align="right" valign="top"
><span class="codelink"><a href="clickmodels.inference-pysrc.html#ClickModel.get_log_click_probs">source code</a></span>
</td>
</tr></table>
<p>Returns an average log-likelihood for a given session, i.e.
log-likelihood of all the click events, divided by the number of
documents in the session.</p>
<dl class="fields">
</dl>
</td></tr></table>
</div>
<a name="predict_click_probs"></a>
<div>
<table class="details" border="1" cellpadding="3"
cellspacing="0" width="100%" bgcolor="white">
<tr><td>
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr valign="top"><td>
<h3 class="epydoc"><span class="sig"><span class="sig-name">predict_click_probs</span>(<span class="sig-arg">self</span>,
<span class="sig-arg">session</span>,
<span class="sig-arg">intent</span>=<span class="sig-default">False</span>)</span>
</h3>
</td><td align="right" valign="top"
><span class="codelink"><a href="clickmodels.inference-pysrc.html#ClickModel.predict_click_probs">source code</a></span>
</td>
</tr></table>
<p>Predicts click probabilities for a given session. Does not use
session.clicks. This is a vector of P(C_k = 1 | E_k = 1) for different
ranks $k$.</p>
<dl class="fields">
</dl>
</td></tr></table>
</div>
<a name="predict_stop_probs"></a>
<div>
<table class="details" border="1" cellpadding="3"
cellspacing="0" width="100%" bgcolor="white">
<tr><td>
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr valign="top"><td>
<h3 class="epydoc"><span class="sig"><span class="sig-name">predict_stop_probs</span>(<span class="sig-arg">self</span>,
<span class="sig-arg">session</span>,
<span class="sig-arg">intent</span>=<span class="sig-default">False</span>)</span>
</h3>
</td><td align="right" valign="top"
><span class="codelink"><a href="clickmodels.inference-pysrc.html#ClickModel.predict_stop_probs">source code</a></span>
</td>
</tr></table>
<p>Predicts stop probabilities (after click) for each document in a
session. This is often referred to as satisfaction probability. This is a
vector of P(S_k = 1 | C_k = 1) for different ranks $k$.</p>
<dl class="fields">
</dl>
</td></tr></table>
</div>
<br />
<!-- ==================== NAVIGATION BAR ==================== -->
<table class="navbar" border="0" width="100%" cellpadding="0"
bgcolor="#a0c0ff" cellspacing="0">
<tr valign="middle">
<!-- Home link -->
<th> <a
href="clickmodels-module.html">Home</a> </th>
<!-- Tree link -->
<th> <a
href="module-tree.html">Trees</a> </th>
<!-- Index link -->
<th> <a
href="identifier-index.html">Indices</a> </th>
<!-- Help link -->
<th> <a
href="help.html">Help</a> </th>
<th class="navbar" width="100%"></th>
</tr>
</table>
<table border="0" cellpadding="0" cellspacing="0" width="100%%">
<tr>
<td align="left" class="footer">
Generated by Epydoc 3.0.1 on Sun Jun 8 11:05:26 2014
</td>
<td align="right" class="footer">
<a target="mainFrame" href="http://epydoc.sourceforge.net"
>http://epydoc.sourceforge.net</a>
</td>
</tr>
</table>
<script type="text/javascript">
<!--
// Private objects are initially displayed (because if
// javascript is turned off then we want them to be
// visible); but by default, we want to hide them. So hide
// them unless we have a cookie that says to show them.
checkCookie();
// -->
</script>
</body>
</html>
|
varepsilon/clickmodels
|
doc/html/clickmodels.inference.ClickModel-class.html
|
HTML
|
bsd-3-clause
| 21,724
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE590_Free_Memory_Not_on_Heap__free_int_static_31.c
Label Definition File: CWE590_Free_Memory_Not_on_Heap__free.label.xml
Template File: sources-sink-31.tmpl.c
*/
/*
* @description
* CWE: 590 Free Memory Not on Heap
* BadSource: static Data buffer is declared static on the stack
* GoodSource: Allocate memory on the heap
* Sinks:
* BadSink : Print then free data
* Flow Variant: 31 Data flow using a copy of data within the same function
*
* */
#include "std_testcase.h"
#include <wchar.h>
#ifndef OMITBAD
void CWE590_Free_Memory_Not_on_Heap__free_int_static_31_bad()
{
int * data;
data = NULL; /* Initialize data */
{
/* FLAW: data is allocated on the stack and deallocated in the BadSink */
static int dataBuffer[100];
{
size_t i;
for (i = 0; i < 100; i++)
{
dataBuffer[i] = 5;
}
}
data = dataBuffer;
}
{
int * dataCopy = data;
int * data = dataCopy;
printIntLine(data[0]);
/* POTENTIAL FLAW: Possibly deallocating memory allocated on the stack */
free(data);
}
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* goodG2B() uses the GoodSource with the BadSink */
static void goodG2B()
{
int * data;
data = NULL; /* Initialize data */
{
/* FIX: data is allocated on the heap and deallocated in the BadSink */
int * dataBuffer = (int *)malloc(100*sizeof(int));
if (dataBuffer == NULL)
{
printLine("malloc() failed");
exit(1);
}
{
size_t i;
for (i = 0; i < 100; i++)
{
dataBuffer[i] = 5;
}
}
data = dataBuffer;
}
{
int * dataCopy = data;
int * data = dataCopy;
printIntLine(data[0]);
/* POTENTIAL FLAW: Possibly deallocating memory allocated on the stack */
free(data);
}
}
void CWE590_Free_Memory_Not_on_Heap__free_int_static_31_good()
{
goodG2B();
}
#endif /* OMITGOOD */
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
#ifdef INCLUDEMAIN
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
#ifndef OMITGOOD
printLine("Calling good()...");
CWE590_Free_Memory_Not_on_Heap__free_int_static_31_good();
printLine("Finished good()");
#endif /* OMITGOOD */
#ifndef OMITBAD
printLine("Calling bad()...");
CWE590_Free_Memory_Not_on_Heap__free_int_static_31_bad();
printLine("Finished bad()");
#endif /* OMITBAD */
return 0;
}
#endif
|
JianpingZeng/xcc
|
xcc/test/juliet/testcases/CWE590_Free_Memory_Not_on_Heap/s04/CWE590_Free_Memory_Not_on_Heap__free_int_static_31.c
|
C
|
bsd-3-clause
| 3,007
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0014_auto_20160404_1908'),
]
operations = [
migrations.AlterField(
model_name='cmsplugin',
name='position',
field=models.PositiveSmallIntegerField(default=0, verbose_name='position', editable=False),
),
]
|
rsalmaso/django-cms
|
cms/migrations/0015_auto_20160421_0000.py
|
Python
|
bsd-3-clause
| 391
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" >
<html>
<head>
<title>myGalaxyLogon</title>
<!------------------------------------------------------------------------------------------------------>
<!-- ( THIS PAGE CREATED BY RM. IT IS THE NEW LOGON PAGE FOR EIS TO. ) -->
<!-- ( 2006.10.30 hvp - Fixed links ) -->
<!------------------------------------------------------------------------------------------------------>
<meta content="Microsoft Visual Studio .NET 7.1" name="GENERATOR" />
<meta content="C#" name="CODE_LANGUAGE" />
<meta content="JavaScript" name="vs_defaultClientScript" />
<meta content="http://schemas.microsoft.com/intellisense/ie5" name="vs_targetSchema" />
<link id="Link1" href="css/toNonIE.css" rel="stylesheet" type="text/css"></link>
<!--<link id="Link4" href="css/GalaxyStyles.css" rel="stylesheet" type="text/css"></link>-->
<link id="Link5" href="css/colorbox.css" rel="stylesheet" type="text/css"></link>
<link rel="stylesheet" href="https://ajax.googleapis.com/ajax/libs/jqueryui/1.7.2/themes/redmond/jquery-ui.css" type="text/css" />
<script type="text/javascript" src="scripts/dateFunction.js"></script>
<link href="css/Security.css" type="text/css" rel="stylesheet" />
<style type="text/css">
.font10_tts { FONT-SIZE: 20px; COLOR: red; FONT-FAMILY: Tahoma,Verdana }
.font8_tts { FONT-SIZE: 8pt; COLOR: red; FONT-FAMILY: Tahoma,Verdana }
.ui-widget-header
{
font-size: 11px;
}
.ui-dialog{ position: absolute; overflow:hidden }
.center {
position: absolute;
left: 50%;
top: 50%;
transform: translate(-50%, -50%); /* Yep! */
width: 48%;
height: 59%;
}
.font11text_ul
{
color: #000000;
font-family: Tahoma,Verdana;
font-size: 11px;
padding-left: 5px;
padding-right: 5px;
text-align: left;
text-decoration: underline;
font-style: italic;
cursor: pointer;
}
.font11text_ul_header
{
color: #000000;
font-family: Tahoma,Verdana;
font-size: 12px;
padding-left: 5px;
padding-right: 5px;
text-align: left;
font-weight: bold;
}
.font11text_ul_content
{
color: #000000;
font-family: Tahoma,Verdana;
font-size: 11px;
/*padding-left: 5px;
padding-right: 5px;
*/
text-align: left;
}
.loading-indicator {
margin-top: 20px;margin-bottom: 30px;-moz-border-radius: 12px;-webkit-border-radius: 12px;border-radius: 12px;-moz-box-shadow: 4px 4px 14px #000;-webkit-box-shadow: 4px 4px 14px #000;box-shadow: 4px 4px 14px #000;-moz-transform:rotate(0deg);-webkit-transform:rotate(0deg);-o-transform:rotate(0deg);-ms-transform:rotate(0deg);
display: inline-block;
padding: 12px;
background: white;
-opacity: 0.5;
color: #0b4981;
font-weight: bold;
z-index: 9999;
border: 1px solid #0b4981;
-moz-border-radius: 10px;
-webkit-border-radius: 10px;
-moz-box-shadow: 0 0 5px grey;
-webkit-box-shadow: 0px 0px 5px grey;
-text-shadow: 1px 1px 1px white;
}
.noSelect { user-select: none; -o-user-select: none; -moz-user-select: none; -khtml-user-select: none; -webkit-user-select: none; }
.mgHeaderSession { FONT-WEIGHT: normal; FONT-SIZE: 11px; COLOR: #0066cc; FONT-FAMILY: tahoma }
.mydialogClass
{
margin-top: 20px;margin-bottom: 30px;-moz-border-radius: 12px;-webkit-border-radius: 12px;border-radius: 12px;-moz-box-shadow: 4px 4px 14px #000;-webkit-box-shadow: 4px 4px 14px #000;box-shadow: 4px 4px 14px #000;-moz-transform:rotate(0deg);-webkit-transform:rotate(0deg);-o-transform:rotate(0deg);-ms-transform:rotate(0deg);
behavior: url(ie-css3.htc);
-ms-filter: "progid:DXImageTransform.Microsoft.Shadow(Strength=1, Direction=135, Color='#000000')";
filter: progid:DXImageTransform.Microsoft.Shadow(Strength=1, Direction=135, Color='#000000');
}
.ui-dialog-titlebar {
-moz-border-radius: 6px;-webkit-border-radius: 6px;border-radius: 6px;-moz-transform:rotate(0deg);-webkit-transform:rotate(0deg);-o-transform:rotate(0deg);-ms-transform:rotate(0deg);
}
.ui-widget-header
{
font-size: 11px;
}
.font11text_ul
{
color: #000000;
font-family: Tahoma,Verdana;
font-size: 11px;
padding-left: 5px;
padding-right: 5px;
text-align: left;
text-decoration: underline;
font-style: italic;
cursor: pointer;
}
.font11text_ul_header
{
color: #000000;
font-family: Tahoma,Verdana;
font-size: 12px;
padding-left: 5px;
padding-right: 5px;
text-align: left;
font-weight: bold;
}
.font11text_ul_content
{
color: #000000;
font-family: Tahoma,Verdana;
font-size: 11px;
padding-left: 5px;
padding-right: 5px;
text-align: left;
}
</style>
<script src="scripts/jquery-1.7.2.js" type="text/javascript"></script>
<script src="scripts/jquery-ui.min.1.8.js" type="text/javascript"></script>
<script src="scripts/jquery.colorbox.js" type="text/javascript"></script>
<script src="scripts/jQueryModal.js" type="text/javascript"></script>
<script type="text/javascript" src="scripts/jqPrint-0.3.js"></script>
<script language="javascript" type="text/javascript" src="library/aspnet/Scripts/General.js"></script>
<script language="javascript" type="text/javascript">
var bInRemainAttempt = false;
var $popDialog = null;
function openReportAnIssue() {
var url = 'library/aspnet/Galaxy_Issue_Reporting.aspx?';
url += 'DBConnect=' + "";
url += '&fy=';
url += '&dst=' + "";
url += '&school=' + "";
url += '&userId=' + "";
url += '&gs_to_id=0';
url += '&src=M';
var sFeatures = " dialogWidth : 600px "
+ "; dialogHeight : 550px "
+ "; status = no "
+ "; status : off "
+ "; statusbar : no "
+ "; status : 0 "
+ "; center : yes "
+ "; edge : raised"
+ "; help : off "
+ "; resizable : no "
+ "; scroll : off ";
window.showModelessDialog(url, window, sFeatures);
return;
}
function ValidateLogon()
{
//Based upon user's logon choice, do validation
//debugger;
//if (!bInRemainAttempt) {
document.getElementById("divLogon").style.display = "none";
//} else {
// bInRemainAttempt = false;
//}
if ( document.getElementById("radioDOE").checked )
{
if ( trim(document.getElementById("txtDOEUserID").value).length == 0 ||
trim(document.getElementById("txtDOEPwd").value).length == 0 )
{
alert("Please provide DOE User ID and password");
return false;
}
}
else if ( document.getElementById("radioGalaxy").checked )
{
if ( trim(document.getElementById("txtGalaxyUserID").value).length == 0 &&
trim(document.getElementById("txtGalaxyPwd").value).length == 0 &&
trim(document.getElementById("txtDOEUserID").value).length == 0 &&
trim(document.getElementById("txtDOEPwd").value).length == 0 )
{
alert("Please provide at least one of Galaxy or DOE credentials");
return false;
}
else if ( trim(document.getElementById("txtGalaxyUserID").value).length != 0 &&
trim(document.getElementById("txtGalaxyPwd").value).length == 0 )
{
alert("Please enter Galaxy password");
return false;
}
else if ( trim(document.getElementById("txtDOEUserID").value).length != 0 &&
trim(document.getElementById("txtDOEPwd").value).length == 0 )
{
alert("Please enter DOE password");
return false;
}
}
if ( trim(document.getElementById("txtLocation").value).length !=0 &&
trim(document.getElementById("txtLocation").value).length != 6 )
{
alert("Location Must have 6 Characters")
return false;
}
if (trim(document.getElementById("txtGalaxyUserID").value) != "" && trim(document.getElementById("txtGalaxyPwd").value) != "" &&
(
trim(document.getElementById("txtGalaxyUserID").value).toUpperCase() ==
trim(document.getElementById("txtGalaxyPwd").value).toUpperCase().split('').reverse().join('')
)
)
{
//alert("Password cannot be the reverse of user id, Please use DOE User ID and password to login.");
/*
document.getElementById("txtGalaxyUserID").value = "";
document.getElementById("txtGalaxyPwd").value = "";
document.getElementById("txtDOEUserID").focus();
*/
openReverseLogonMessage();
//if (!bInRemainAttempt) {
//
//}
return false;
}
return true;
}
function CheckForEnterKey()
{
//debugger;
if ( event.keyCode == 13 )
{
event.cancelBubble = true;
return ValidateLogon();
}
else
{
return false;
}
}
function openSettingsPopup()
{
var sHTML = "/library/aspnet/galaxysecurity/LogonSettings.aspx?Application=" + document.getElementById("slcApp").value + "&Region=" + document.getElementById("slcRegion").value;
showModalIpadDialog({
url: sHTML,
height: 250,
width: 360,
resizable: false,
scrolling: 'yes',
title: 'Applications',
//position: [curX, curY],
position: 'center',
dialogArguments: null,
onClose: processAppReturnedValue
});
//var returnValue = window.showModalDialog("library/aspnet/galaxysecurity/LogonSettings.aspx?Application=" + document.getElementById("slcApp").value + "&Region=" + document.getElementById("slcRegion").value , null, "dialogHeight: 250px; dialogWidth: 350px; edge: Raised; center: Yes; help: No; resizable: No; status: No;");
}
function processAppReturnedValue(retObj) {
//debugger;
//alert(this.returnValue);
try{
var returnValue;
if (!$.browser.msie || ($.browser.msie && $.browser.version >= 7)) {
returnValue = this.returnValue;
} else {
returnValue = retObj;
//vReturnObject = this.returnValue;
}
if (typeof (returnValue) == "undefined") {
return false;
}
//Retrieve application name and region name from the return value
//and store in the hidden fields
//Commented codes -- JKS -- 3/17/2011
if ( typeof(returnValue) != "undefined" )
{
document.getElementById("slcApp").value = returnValue.application;
if ( document.getElementById("slcApp").value == "G" )
document.getElementById("lblEnvironment").innerText = "my Galaxy";
else if ( document.getElementById("slcApp").value == "C" )
document.getElementById("lblEnvironment").innerText = "Central Offices";
// else if ( document.getElementById("slcApp").value == "I" )
// document.getElementById("lblEnvironment").innerText = "Integrity Reports";
// else if ( document.getElementById("slcApp").value == "R" )
// document.getElementById("lblEnvironment").innerText = "Galaxy Reports";
// else if ( document.getElementById("slcApp").value == "H" )
// document.getElementById("lblEnvironment").innerText = "Position Management";
// else if ( document.getElementById("slcApp").value == "V" )
// document.getElementById("lblEnvironment").innerText = "Excess and Vacancy Reporting";
else if ( document.getElementById("slcApp").value == "J" )
document.getElementById("lblEnvironment").innerText = "Reorganization Plan";
// else if ( document.getElementById("slcApp").value == "A" )
// document.getElementById("lblEnvironment").innerText = "DHR Approvals";
// else if ( document.getElementById("slcApp").value == "M" )
// document.getElementById("lblEnvironment").innerText = "Performance Metrics";
// else if ( document.getElementById("slcApp").value == "S" )
// document.getElementById("lblEnvironment").innerText = "School Wide Performance Bonus Program";
// else if ( document.getElementById("slcApp").value == "N" )
// document.getElementById("lblEnvironment").innerText = "Role Management";
// else if ( document.getElementById("slcApp").value == "O" )
// document.getElementById("lblEnvironment").innerText = "Role Administrator";
// else if ( document.getElementById("slcApp").value == "T" )
// document.getElementById("lblEnvironment").innerText = "Role Assignment Approval";
// else if ( document.getElementById("slcApp").value == "K" )
// document.getElementById("lblEnvironment").innerText = "Program Code Administration";
// else if ( document.getElementById("slcApp").value == "P" )
// document.getElementById("lblEnvironment").innerText = "Hiring Control Panel";
// else if ( document.getElementById("slcApp").value == "L" )
// document.getElementById("lblEnvironment").innerText = "Reassignment Approval";
// Added this for Bulk PUSH Page
else if ( document.getElementById("slcApp").value == "B" )
document.getElementById("lblEnvironment").innerText = "JT Bulk PUSH";
// Added this for Reassignment Tickler Administration
else if ( document.getElementById("slcApp").value == "E" )
document.getElementById("lblEnvironment").innerText = "Reassignment Tickler Administration";
// Added this for Reassignment Case Status Sreach
else if ( document.getElementById("slcApp").value == "F" )
document.getElementById("lblEnvironment").innerText = "OSI Case Search";
// else if ( document.getElementById("slcApp").value == "Q" )
// document.getElementById("lblEnvironment").innerText = "Quick Code Reporting Categories Administration";
else {
document.getElementById("lblEnvironment").innerText = returnValue.applicationDesc;
}
//Check if there is change in the selected Region. If yes,
//submit the form so that Batch Status and system Status in the
//selected "Region / Database" are reflected in the images
if ( document.getElementById("slcRegion").value != returnValue.region )
{
document.getElementById("slcRegion").value = returnValue.region;
LoadGraphs();
//document.getElementById("hdnRefreshStatusImages").value = "true";
//document.forms[0].submit();
}
else
{
document.getElementById("slcRegion").value = returnValue.region;
}
if ( document.getElementById("slcRegion").value == "P" )
document.getElementById("lblEnvironment").innerText = document.getElementById("lblEnvironment").innerText + " (Production Region)";
else if ( document.getElementById("slcRegion").value == "T" )
document.getElementById("lblEnvironment").innerText = document.getElementById("lblEnvironment").innerText + " (Training Region)";
else if ( document.getElementById("slcRegion").value == "D" )
document.getElementById("lblEnvironment").innerText = document.getElementById("lblEnvironment").innerText + " (Development Region)";
else if ( document.getElementById("slcRegion").value == "Q" )
document.getElementById("lblEnvironment").innerText = document.getElementById("lblEnvironment").innerText + " (QA Region)";
else if ( document.getElementById("slcRegion").value == "B" )
document.getElementById("lblEnvironment").innerText = document.getElementById("lblEnvironment").innerText + " (Budgeting Region)";
else if ( document.getElementById("slcRegion").value == "F" )
document.getElementById("lblEnvironment").innerText = document.getElementById("lblEnvironment").innerText + " (Field Test Region)";
else if ( document.getElementById("slcRegion").value == "A" )
document.getElementById("lblEnvironment").innerText = document.getElementById("lblEnvironment").innerText + " (Accrual Region)";
else if ( document.getElementById("slcRegion").value == "G" )
document.getElementById("lblEnvironment").innerText = document.getElementById("lblEnvironment").innerText + " (Field Test2 Region)";
else if ( document.getElementById("slcRegion").value == "C" )
document.getElementById("lblEnvironment").innerText = document.getElementById("lblEnvironment").innerText + " (Prod Test Region)";
}
} catch(e){}
}
try
{
//alert(flashDetected);
if(flashDetected == "N")
{
flashAnchor.innerText = " New features of myGalaxy require installation of Macromedia Flash player. Please click here to complete installation. ";
}
else
{
flashAnchor.innerText = "";
}
}
catch(e)
{
}
function installFlash()
{
window.location = "https://www.macromedia.com/shockwave/download/download.cgi?P1_Prod_Version=ShockwaveFlash";
}
function isIE() {
var ua = window.navigator.userAgent;
var msie = ua.indexOf('MSIE ');
if (msie > 0) {
// IE 10 or older => return version number
return parseInt(ua.substring(msie + 5, ua.indexOf('.', msie)), 10);
}
var trident = ua.indexOf('Trident/');
if (trident > 0) {
// IE 11 => return version number
var rv = ua.indexOf('rv:');
return parseInt(ua.substring(rv + 3, ua.indexOf('.', rv)), 10);
}
var edge = ua.indexOf('Edge/');
if (edge > 0) {
// Edge (IE 12+) => return version number
return parseInt(ua.substring(edge + 5, ua.indexOf('.', edge)), 10);
}
// other browser
return false;
}
function GetRemainingAttempts()
{
var isChrome = /Chrome/.test(navigator.userAgent) && /Google Inc/.test(navigator.vendor);
if (!isIE()) {
alert("Galaxy is currently supported in Internet Explorer. Please close your browser and reload with Internet Explorer.");
return false;
}
var isValid = false;
//bInRemainAttempt = true;
isValid = ValidateLogon();
//bInRemainAttempt = false;
if ( isValid == false )
return false;
var warning;
var returnValue = "";
returnValue = MyGalaxy.library.aspnet.Ajax.Aajx_Default.GetUserData(document.getElementById("txtGalaxyUserID").value, document.getElementById("txtGalaxyPwd").value, document.getElementById("txtDOEUserID").value, document.getElementById("txtDOEPwd").value, document.getElementById("slcRegion").value, document.getElementById("txtLocation").value);
//var temp = new Array();
//temp = returnValue.value.split(",");
//alert(returnValue.value);
//alert(returnValue.value.length);
//return false;
if ( null == returnValue || typeof(returnValue) == undefined || returnValue.value.length <= 2 )
{
//It means there is error
//alert(temp[1]);
alert(returnValue.value[1]);
return false;
}
// Biswajit
document.getElementById("txtGalaxyUserID").value = returnValue.value[7]; //User Id
//End Biswajit
var authenticated = trim(returnValue.value[0]); //trim(temp[0]);
var authenticationMessage = returnValue.value[1]; //temp[1]
var numAllowedAttempts = returnValue.value[2]; //temp[2];
if ( authenticated.toLowerCase() == "false" || authenticated == false )
{
window.alert(authenticationMessage);
return false;
}
var fullString = "";
var intCOunter = 0;
for ( intCOunter = 0; intCOunter < returnValue.value.length ; intCOunter++ )
{
if ( fullString == "" )
{
/*if ( returnValue.value[intCOunter] == "" )
fullString = "";
else*/
fullString = returnValue.value[intCOunter];
}
else
{
/*if ( returnValue.value[intCOunter] == "" )
fullString = fullString + String.fromCharCode(9) + "";
else*/
fullString = fullString + String.fromCharCode(9) + returnValue.value[intCOunter];
}
}
document.getElementById('hdnUserValues').value = fullString;
var intNum = 0;
intNum = numAllowedAttempts;
//Display the Pwd warning
//alert(trim(temp[8]));
//if(trim(temp[8]) != ""){
if(trim(returnValue.value[8]) != ""){
//alert(trim(temp[8]));
alert(trim(returnValue.value[8]));
}
//try { intNum =Integer.parseInt(numAllowedAttempts); }
//catch(NumberFormatException e) { }
if ( intNum > 1 )
{
if (intNum > 2 )
{
var dummy = intNum - 2 + ""
warning = "You have not yet associated your Outlook ID with your Galaxy user ID. You may login only " + dummy + " more times without doing so. Do you wish to enter your Outlook ID now?";
}
else if (intNum == 2 )
{
warning = "You have not yet associated your Outlook ID with your Galaxy user ID. From next time, you may not be able to login without doing so. Do you wish to enter your Outlook ID now?";
}
warning = warning + "\n\nPlease click 'OK' to go back to the logon screen and enter your Outlook ID.";
warning = warning + "\nPlease click 'Cancel' to log on without entering your Outlook ID.";
var answer = false;
answer = window.confirm(warning);
if ( answer == true )
{
window.location.href="default.aspx";
return false;
}
else
{
return true;
}
}
else
{
/*commented out till further notice...Feb 01 2007
var isverified=VerifySSNEntered();
if (isverified)
return true;
else
{
return false;
}*/
return true;
}
}
function VerifySSNEntered()
{
var flag;
var uid;
var dbconnect=document.getElementById("slcRegion").value;
//return true;
if (document.getElementById("txtGalaxyUserID").value.length>0)
{
uid=document.getElementById("txtGalaxyUserID").value;
flag="G";
}
else
{
uid=document.getElementById("txtDOEUserID").value;
flag="D";
}
var returnValue=MyGalaxy.library.aspnet.Ajax.Aajx_Default.ajax_CheckSSNCode(uid,flag,dbconnect);
if (returnValue.value==true)
return true;
else
{
var returnValue = window.showModalDialog("library/aspnet/GalaxySecurity/SSNCodeEntry.aspx?userid=" + uid + "&Flag=" + flag + "&DBConnect="+ dbconnect , null, "dialogHeight: 200px; dialogWidth: 350px;center: Yes; help: No; scroll:No;resizable: No; status: No;");
//alert(returnValue.value + '\n'+returnValue.result.value);
//check if last 4 digits of SSN has been entered
if ( typeof(returnValue) != "undefined" )
{
if (returnValue.result.value!=true)
{
var warning="For security reasons, log on to the system is not permitted without entering the last 4 digits of your SSN";
warning + "\nPlease click 'Log On' to retry entering the SSN ";
warning += "\nError: "+returnValue.result.value;
window.alert(warning);
}
return returnValue;
}
}
}
function LoadGraphs()
{
document.getElementById('imgSystemStatus').src = "NumActiveUsersChart.aspx?DBConnect=" + document.getElementById('slcRegion').value;
document.getElementById('imgBatchStatus').src = "BatchStatusChart.aspx?DBConnect=" + document.getElementById('slcRegion').value;
}
function resizeIframe (iframeWindowId)
{
var iframeWindow=document.getElementById(iframeWindowId);
var the_height=iframeWindow.contentWindow.document.body.scrollHeight;
iframeWindow.style.height=(the_height+15)+"px";
}
function positionImageDiv()
{
var divImgTxt = document.getElementById("gxyHPImageTxt");
var galImg = document.getElementById("imgGalaxyImage");
var leftpos=0;
var toppos=0;
aTag = divImgTxt;
do {
aTag = aTag.offsetParent;
leftpos += aTag.offsetLeft;
toppos += aTag.offsetTop;
} while(aTag.tagName!="BODY");
divImgTxt.style.left = galImg.offsetLeft + leftpos + 270;
divImgTxt.style.top = (galImg.offsetHeight/2) + 35;
}
</script>
</head>
<body class="default" id="GalaxyHP" onload="resizeIframe('fAnnouncement');resizeIframe('fSysMessage');resizeIframe('fImageTxt');positionImageDiv();">
<form name="frmLogon" method="post" action="./" id="frmLogon" onkeypress="javascript:CheckForEnterKey();">
<input type="hidden" name="__VIEWSTATE" id="__VIEWSTATE" value="/wEPDwUKMTcyNDM0MjkxNg9kFgICBQ9kFgQCGw8PFgIeBFRleHQFHW15IEdhbGF4eSAoUHJvZHVjdGlvbiBSZWdpb24pZGQCHQ8PZBYCHgdvbmNsaWNrBR5yZXR1cm4gR2V0UmVtYWluaW5nQXR0ZW1wdHMoKTtkGAEFHl9fQ29udHJvbHNSZXF1aXJlUG9zdEJhY2tLZXlfXxYBBQtyYWRpb0dhbGF4efAM+BA2BYx0SzOfzSlWgu6QJh8X9Lj2wx47HlHafbst" />
<script type="text/javascript" src="/ajaxpro/prototype.ashx"></script>
<script type="text/javascript" src="/ajaxpro/core.ashx"></script>
<script type="text/javascript" src="/ajaxpro/ms.ashx"></script>
<script type="text/javascript" src="/ajaxpro/converter.ashx"></script>
<script type="text/javascript" src="/ajaxpro/MyGalaxy.library.aspnet.Ajax.Aajx_Default,App_Code.ashx"></script>
<input type="hidden" name="__VIEWSTATEGENERATOR" id="__VIEWSTATEGENERATOR" value="CA0B0334" />
<table id="tblLogon" cellSpacing="0" cellPadding="0" border="0">
<tr>
<td class="titleBorderBottom" colspan=2>
<!--<span class="title1">my</span><span class="title2">Galaxy</span>-->
<span class="title2">Table of Organization </span>
</td>
</tr>
<tr id="tblLogon_FirstRow" width="100%">
<td>
<table id="tblTop" style="POSITION: static" cellSpacing="0" cellPadding="1" border="0">
<tr id="tblTop_FirstRow">
<td valign="top">
<table id="tblImage" cellSpacing="0" cellPadding="0" border="0" style="WIDTH: 530px">
<tr height="3"> <!--blank spacer row-->
<td height="3"></td>
</tr>
<tr valign="top">
<td valign="top">
<img id="imgGalaxyImage" src="images/myGalaxyLogo4_new.jpg" style="WIDTH: 530px" />
</td>
</tr>
<tr id="tblLogon_SecondRow" width="100%">
<td>
<table id='Table2' cellpadding="0" cellspacing="0" border="0" height="100%" width="100%">
<tr id="Tr1" >
<td style="FONT-SIZE: 11px; FONT-FAMILY: tAHOMA" valign="top">
<table cellpadding="0" cellspacing="0" border="0" height="100%" width="100%">
<tr style="height:15px;">
<td class="content1Title" >
DFPM Announcements
</td>
</tr>
<tr>
<td class="contentText">
<iframe id="fAnnouncement" src="library\aspnet\GxyBroadcast\DBOR_Broadcast.aspx?DBConnect=P" frameborder=0 height="100%" width="100%" scrolling="no"></iframe>
</td>
</tr>
</table>
</td>
</tr>
</table>
</td>
</tr>
</table>
</td>
</tr>
</table>
</td>
<td width="100%" valign="top">
<table id="tblTop2" style="POSITION: static" cellSpacing="0" cellPadding="1" border="0">
<tr >
<td valign="top" height="288" >
<table id="Table1" cellSpacing="0" cellPadding="2" border="0">
<tr height="3"> <!--blank spacer row-->
<td colspan="2" height="3"></td>
</tr>
<tr>
<td class="tableHeaderStyle"> </td>
<td class="tableHeaderStyle" colSpan="2"><span id="lblHeader">Access the Galaxy System ...</span></td>
</tr>
<tr class="logonRowBG">
<td> </td>
<td colspan="2">
<table cellspacing="0" cellpadding="1" width="100%" border="0">
<TR>
<td width="28%"><span class="label">Logon Using:</span>
</td>
<td width="39%"><input value="radioGalaxy" name="logonChoice" type="radio" id="radioGalaxy" class="radiobutton" onclick="document.getElementById('txtGalaxyUserID').disabled = false; document.getElementById('txtGalaxyUserID').className = 'textbox'; document.getElementById('txtGalaxyPwd').disabled = false; document.getElementById('txtGalaxyPwd').className = 'textbox';" tabindex="8" checked="checked" />
<span class="label">Galaxy User Id</span>
</td>
<td width="33%"><input value="radioDOE" name="logonChoice" type="radio" id="radioDOE" class="radiobutton" onclick="document.getElementById('txtGalaxyUserID').disabled = true; document.getElementById('txtGalaxyUserID').value = ''; document.getElementById('txtGalaxyUserID').className = 'readOnlyText'; document.getElementById('txtGalaxyPwd').disabled = true; document.getElementById('txtGalaxyPwd').value = ''; document.getElementById('txtGalaxyPwd').className = 'readOnlyText';" tabindex="9" disabled="disabled" />
<span class="label">DOE User Id</span>
</td>
</TR>
</table>
</td>
</tr>
<tr class="logonRowBG">
<td> </td>
<td><span id="lblGalaxyUserID" class="label">Galaxy User ID</span></td>
<td align="center"><input name="txtGalaxyUserID" type="text" maxlength="8" id="txtGalaxyUserID" tabindex="1" class="textbox" /></td>
</tr>
<tr class="logonRowBG">
<td> </td>
<td><span id="lblGalaxyPwd" class="label">Galaxy Password</span></td>
<td align="center"><input name="txtGalaxyPwd" type="password" maxlength="8" id="txtGalaxyPwd" tabindex="2" class="textbox" /></td>
</tr>
<tr class="logonRowBG">
<td> </td>
<td style="HEIGHT: 27px"><span id="lblDOEUserID" class="label">* DOE User ID</span></td>
<td style="HEIGHT: 27px" align="center"><input name="txtDOEUserID" type="text" maxlength="30" id="txtDOEUserID" tabindex="3" class="textbox" /></td>
</tr>
<tr class="logonRowBG">
<td> </td>
<td><span id="lblDOEPassword" class="label">* DOE Password</span></td>
<td align="center"><input name="txtDOEPwd" type="password" maxlength="20" id="txtDOEPwd" tabindex="4" class="textbox" /></td>
</tr>
<tr class="logonRowBG">
<td> </td>
<td><span id="lblLocation" class="label">Location (optional)</span></td>
<td align="center"><input name="txtLocation" type="text" maxlength="6" id="txtLocation" tabindex="5" class="textbox" /></td>
</tr>
<tr class="logonRowBG">
<td> </td>
<td><span id="lblEnvironment" class="label">my Galaxy (Production Region)</span></td>
<td align="center"><input type="submit" name="btnLogon" value="Logon" onclick="return GetRemainingAttempts();" language="javascript" id="btnLogon" tabindex="6" class="button" /></td>
</tr>
<tr class="logonRowBG">
<td> </td>
<td class="logolink" colSpan="2"><a id="hlinkSettings" tabindex="7" href="javascript:openSettingsPopup();">Click Here To Change Settings</a></td>
</tr>
<tr class="logonRowBG">
<td> </td>
<td colSpan="2"><span id="lblMessage" class="logonMsg">* Please enter your DOE user ID in order to enable future logons to the Galaxy system</span></td>
</tr>
<tr class="logonRowBG">
<td> </td>
<td colSpan="2">
<label id=compView name=compView class="font8_tts" />
</td>
</tr>
<tr>
<td> </td>
<td colSpan="2">
<iframe id="fSysMessage" src="library\aspnet\GxyBroadcast\DBOR_Broadcast.aspx?DBConnect=P&type=1" frameborder=0 height="100%" width="100%" scrolling="no"></iframe>
</td>
</tr>
</table>
</td>
</tr>
</table>
</td>
</tr>
<tr id="tblLogon_SecondRow" width="100%">
<td>
<table id='Table3' cellpadding="0" cellspacing="0" border="0" height="100%">
<tr id="Tr3" >
<td style="FONT-SIZE: 11px; FONT-FAMILY: tAHOMA" valign="top" width="643px">
<table cellpadding="0" cellspacing="0" border="0" height="100%">
<tr>
<td class="content2Title">FAQ</td>
</tr>
<tr>
<td class="contentText">
<u><img src="images\question_image.gif"/> RED Question Mark Indicator on School Line Items</u>
<br/><br/>
A new icon <img src="images\question_image.gif"/> now appears in Galaxy to assist the schools in easily identifying items that:
<ul>
<li> items where required or invalid attributes need to be entered.</li>
</ul>
Users can hover their cursor over this icon to see what action is required for each item with the indicator.<br/>
Click <a href="http://intranet.nycboe.net/NR/rdonlyres/86F86DBC-7E7A-4799-BBE5-F5D6D38E48DC/2989/To_Do_List.pdf">here</a> for samples of the new indicator.
</td>
</tr>
<!--Added per Greta 9/16 -->
<tr><td class="contentText"><br>
<P class=contentText style="MARGIN: 0in 0in 0pt"><B>Staffing F –Status <?xml:namespace prefix = o ns = "urn:schemas-microsoft-com:office:office" /><o:p></o:p></B></P>
<P class=contentText style="MARGIN: 0in 0in 0pt"><o:p> </o:p></P>
<P class=contentText style="MARGIN: 0in 0in 0pt">Please click <A href="http://www.nycboe.net/AdminOrg/Finance/budget/dbor/dbor_intranet/Galaxy/Galaxy_FY2012/FY12_F_Status_Staffing.pdf" target=_blank>here</SPAN></A> for guidance on how to staff F-Status people. The click here refers to the attached document.<o:p></o:p></P>
<P class=contentText style="MARGIN: 0in 0in 0pt"><o:p> </o:p></P>
<P class=contentText style="MARGIN: 0in 0in 0pt"><B><o:p> </o:p></B></P>
<P class=contentText style="MARGIN: 0in 0in 0pt"><B>Latest Screen Updates and Internet Explorer Settings<o:p></o:p></B></P>
<P class=contentText style="MARGIN: 0in 0in 0pt"><o:p> </o:p></P>
<P class=contentText style="MARGIN: 0in 0in 0pt">Please make sure that Internet Explorer refresh settings are set to “Every time I visit the webpage”. Please click <A href="http://www.nycboe.net/AdminOrg/Finance/budget/dbor/dbor_intranet/Galaxy/Galaxy_FY2012/Modifying_IE_Settings.pdf" target=_blank>here</A> for instructions. If it is not set to this option, Internet Explorer will serve a cached version of the page and you will not have the latest updates. In other words, cookies will prevent the latest screen updates which will result in the user having to log in and out of Galaxy to see screen data updated.
<FONT style="BACKGROUND-COLOR: yellow">If you are using Internet Explorer version 8 or 9, you may need to enable the Compatibility View setting for
“nycenet.edu” in order to also view Galaxy TO screen updates. Please click <a href="http:\\mygalaxy.nycenet.edu\FAQ\Instructions for Changing the Internet Explorer Compatibility Settings.pdf" target=_blank>here</a> for instructions.</FONT>
<o:p></o:p></P>
<P class=contentText style="MARGIN: 0in 0in 0pt"><o:p> </o:p></P>
<P class=contentText style="MARGIN: 0in 0in 0pt"><o:p> </o:p></P>
<P class=contentText style="MARGIN: 0in 0in 0pt"><B>Prior Year Corrections vs. Service Corrections<o:p></o:p></B></P>
<P class=contentText style="MARGIN: 0in 0in 0pt"><o:p> </o:p></P>
<P class=contentText style="MARGIN: 0in 0in 0pt"><SPAN style="COLOR: black">Please note that prior year corrections through Galaxy are for H-bank and Z-bank staff only. Service corrections can only be done in Galaxy for the current school year for Q-bank staff. Prior year service corrections for Q-bank staff must be done in NYCAPS.<o:p></o:p></SPAN></P>
</td></tr>
</table>
</td>
</tr>
</table>
</td>
<td>
<table id='Table33' cellpadding="0" cellspacing="0" border="0" height="100%">
<tr>
<td>
<table id="tblImages" cellSpacing="0" cellPadding="0">
<tr>
<td class="content3Title" colspan=2>System Status Activity</td>
</tr>
<tr class="contentText">
<td Class="systemStatusTitle"><span id="lblBatchStatus" class="systemStatusTitle"> Batch Status </span></td>
<td Class="systemStatusTitle"><span id="lblSystemStatus" class="systemStatusTitle"> System Volume</span></td>
</tr>
<tr>
<td><IMG id="imgBatchStatus"></td>
<td><IMG id="imgSystemStatus"></td>
</tr>
</table>
</td>
</tr>
</table>
</td>
</tr>
</table>
<!-- following is taken "as is" from logon.asp --><A id="flashAnchor" style="BORDER-RIGHT: thin solid; BORDER-TOP: thin solid; FONT-WEIGHT: bold; FONT-SIZE: 14px; BORDER-LEFT: thin solid; CURSOR: hand; COLOR: red; BORDER-BOTTOM: thin solid; FONT-FAMILY: tahoma"
onclick="installFlash()"></A>
<!-- <A STYLE="FONT-WEIGHT: normal; FONT-SIZE: x-small; LINE-HEIGHT: normal; FONT-STYLE: normal; FONT-VARIANT: normal"
HREF="#" onclick="window.showModalDialog('advisory.asp');">Show current
Advisory / News / Alerts. </A> --><input name="slcApp" type="hidden" id="slcApp" value="G" />
<input name="slcRegion" type="hidden" id="slcRegion" value="P" /><input name="flashEnabled" type="hidden" id="flashEnabled" value="N" />
<input name="hdnUserValues" type="hidden" id="hdnUserValues" />
<div id="gxyHPImageTxt" style="position:absolute;top:0;left:-1000;width=300px;">
<iframe id="fImageTxt" src="library/aspnet/GxyBroadcast/DBOR_Broadcast.aspx?DBConnect=P&type=2" frameborder=0 height="100%" width="100%" scrolling="no" allowTransparency="true"></iframe>
</div>
<div id="divLogon" style="display: none; border: 2px solid #abadb3; width:480px;height:170px;z-index:101; background-color:white">
<table cellpadding="5" cellspacing="0" width="99%">
<tr>
<td>
<div>
<img id="imgHeader" src="images/exclamation.png" alt="Logon flag" />
<label class="font11text_ul_header">
Logon Message</label>
</div>
<hr />
</td>
</tr>
<tr>
<td>
<label class="font11text_ul_content">You are no longer able to sign into Galaxy with your Galaxy ID and Galaxy Password. <b>Please sign-in using your DOE User ID and DOE Password.</b> This is the same ID and password used to access your Outlook e-mail. If you need to reset your DOE Password, please click <a href="https://idm.nycenet.edu/selfservice/Login.do" target="_blank" onclick="ClosePopup();" style="text-decoration:underline;cursor:pointer;color:blue">here</a>, or contact the DOE Help Desk at 718-935-5100. We are sorry for the inconvenience and thank you for your cooperation.</label>
</td>
</tr>
<tr>
<td align="center">
<input id="btnOk" type="button" value="Ok" onclick="ClosePopup();" style="width: 75px" />
</td>
</tr>
</table>
</div>
</form>
<OBJECT id="blank" codeBase="https://fpdownload.macromedia.com/pub/shockwave/cabs/flash/swflash.cab#version=7,0,0,0"
height="0" width="0" align="middle" classid="clsid:d27cdb6e-ae6d-11cf-96b8-444553540000"
VIEWASTEXT>
<PARAM NAME="_cx" VALUE="26">
<PARAM NAME="_cy" VALUE="26">
<PARAM NAME="FlashVars" VALUE="">
<PARAM NAME="Movie" VALUE="/blank.swf">
<PARAM NAME="Src" VALUE="/blank.swf">
<PARAM NAME="WMode" VALUE="Window">
<PARAM NAME="Play" VALUE="-1">
<PARAM NAME="Loop" VALUE="-1">
<PARAM NAME="Quality" VALUE="High">
<PARAM NAME="SAlign" VALUE="">
<PARAM NAME="Menu" VALUE="-1">
<PARAM NAME="Base" VALUE="">
<PARAM NAME="AllowScriptAccess" VALUE="sameDomain">
<PARAM NAME="Scale" VALUE="ShowAll">
<PARAM NAME="DeviceFont" VALUE="0">
<PARAM NAME="EmbedMovie" VALUE="0">
<PARAM NAME="BGColor" VALUE="FFFFFF">
<PARAM NAME="SWRemote" VALUE="">
<PARAM NAME="MovieData" VALUE="">
<PARAM NAME="SeamlessTabbing" VALUE="1">
<PARAM NAME="Profile" VALUE="0">
<PARAM NAME="ProfileAddress" VALUE="">
<PARAM NAME="ProfilePort" VALUE="0">
<PARAM NAME="AllowNetworking" VALUE="all">
<PARAM NAME="AllowFullScreen" VALUE="false">
<embed src="/blank.swf" style="WIDTH: 0px; HEIGHT: 0px; BACKGROUND-COLOR:
#ffffff" quality="high" name="blank" align="middle" allowScriptAccess="sameDomain" type="application/x-shockwave-flash"
pluginspage="https://www.macromedia.com/go/getflashplayer" />
</OBJECT>
<script language="vbscript">
on error resume next
dim useFlash
dim flashDetected
useFlash = NOT IsNull(CreateObject("ShockwaveFlash.ShockwaveFlash"))
if useFlash then
frmLogon.flashEnabled.value = "Y"
flashDetected = "Y"
else
flashDetected = "N"
frmLogon.flashEnabled.value = "N"
end if
LoadGraphs
</script>
<script language="javascript" type="text/javascript">
/*
jQuery.fn.center = function ($) {
var w = $(window);
this.css({
'position': 'absolute',
'top': Math.abs(((w.height() - this.outerHeight()) / 2) + w.scrollTop()),
'left': Math.abs(((w.width() - this.outerWidth()) / 2) + w.scrollLeft())
});
return this;
}
*/
function openReverseLogonMessage() {
event.cancelBubble = true;
var eY = (event.clientY);
var eX = (event.clientX);
var top = (eY);
var left = (eX);
/*
$('#divLogon').css("position", "absolute");
$('#divLogon').css("top", top);
$('#divLogon').css("left", left);
*/
$("#divLogon").addClass("center");
$('#divLogon').css("display", "block");
//$('#divLogon').center();
//$.colorbox({ width: "750px", inline: true, href: divLogon, top: top, left: left });
//$.colorbox.resize();
}
function ClosePopup() {
document.getElementById("divLogon").style.display = "none";
document.getElementById("txtGalaxyUserID").value = "";
document.getElementById("txtGalaxyPwd").value = "";
document.getElementById("txtDOEUserID").focus();
//$.fn.colorbox.close();
}
/*
var version = navigator.appVersion.match(/MSIE ([\d.]+)/)[1];
if (navigator.appName == "Microsoft Internet Explorer") {
if (version > 7) {
//alert("You are attempting to use the myGalaxy web application with Internet Explorer Version 9 and do not have Compatibility Mode turned on. You may experience problems with certain functions within myGalaxy if you do not turn this setting on.");
document.getElementById("compView").innerText = "* You are attempting to use the myGalaxy web application with Internet Explorer version\n\r " + version.toString() + " and do not have Compatibility View turned on. You may experience problems with\n\r certain functions. \n\r \n\r Please check the FAQ section on the homepage for more instructions on how to turn on\n\r Compatibility View. ";
}
}
*/
</script>
</body>
</HTML>
|
todor-dk/HTML-Renderer
|
Source/Testing/HtmlRenderer.ExperimentalApp/Data/Files/nynet/3A7ABD70443A1E1395733EC91E830520.html
|
HTML
|
bsd-3-clause
| 44,432
|
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef URL_URL_UTIL_H_
#define URL_URL_UTIL_H_
#include <string>
#include "base/string16.h"
#include "url/url_canon.h"
#include "url/url_parse.h"
namespace url_util {
// Init ------------------------------------------------------------------------
// Initialization is NOT required, it will be implicitly initialized when first
// used. However, this implicit initialization is NOT threadsafe. If you are
// using this library in a threaded environment and don't have a consistent
// "first call" (an example might be calling "AddStandardScheme" with your
// special application-specific schemes) then you will want to call initialize
// before spawning any threads.
//
// It is OK to call this function more than once, subsequent calls will simply
// "noop", unless Shutdown() was called in the mean time. This will also be a
// "noop" if other calls to the library have forced an initialization
// beforehand.
void Initialize();
// Cleanup is not required, except some strings may leak. For most user
// applications, this is fine. If you're using it in a library that may get
// loaded and unloaded, you'll want to unload to properly clean up your
// library.
void Shutdown();
// Schemes --------------------------------------------------------------------
// Adds an application-defined scheme to the internal list of "standard" URL
// schemes. This function is not threadsafe and can not be called concurrently
// with any other url_util function. It will assert if the list of standard
// schemes has been locked (see LockStandardSchemes).
void AddStandardScheme(const char* new_scheme);
// Sets a flag to prevent future calls to AddStandardScheme from succeeding.
//
// This is designed to help prevent errors for multithreaded applications.
// Normal usage would be to call AddStandardScheme for your custom schemes at
// the beginning of program initialization, and then LockStandardSchemes. This
// prevents future callers from mistakenly calling AddStandardScheme when the
// program is running with multiple threads, where such usage would be
// dangerous.
//
// We could have had AddStandardScheme use a lock instead, but that would add
// some platform-specific dependencies we don't otherwise have now, and is
// overkill considering the normal usage is so simple.
void LockStandardSchemes();
// Locates the scheme in the given string and places it into |found_scheme|,
// which may be NULL to indicate the caller does not care about the range.
//
// Returns whether the given |compare| scheme matches the scheme found in the
// input (if any). The |compare| scheme must be a valid canonical scheme or
// the result of the comparison is undefined.
bool FindAndCompareScheme(const char* str,
int str_len,
const char* compare,
url_parse::Component* found_scheme);
bool FindAndCompareScheme(const char16* str,
int str_len,
const char* compare,
url_parse::Component* found_scheme);
inline bool FindAndCompareScheme(const std::string& str,
const char* compare,
url_parse::Component* found_scheme) {
return FindAndCompareScheme(str.data(), static_cast<int>(str.size()),
compare, found_scheme);
}
inline bool FindAndCompareScheme(const string16& str,
const char* compare,
url_parse::Component* found_scheme) {
return FindAndCompareScheme(str.data(), static_cast<int>(str.size()),
compare, found_scheme);
}
// Returns true if the given string represents a standard URL. This means that
// either the scheme is in the list of known standard schemes.
bool IsStandard(const char* spec,
const url_parse::Component& scheme);
bool IsStandard(const char16* spec,
const url_parse::Component& scheme);
// TODO(brettw) remove this. This is a temporary compatibility hack to avoid
// breaking the WebKit build when this version is synced via Chrome.
inline bool IsStandard(const char* spec, int spec_len,
const url_parse::Component& scheme) {
return IsStandard(spec, scheme);
}
// URL library wrappers -------------------------------------------------------
// Parses the given spec according to the extracted scheme type. Normal users
// should use the URL object, although this may be useful if performance is
// critical and you don't want to do the heap allocation for the std::string.
//
// As with the url_canon::Canonicalize* functions, the charset converter can
// be NULL to use UTF-8 (it will be faster in this case).
//
// Returns true if a valid URL was produced, false if not. On failure, the
// output and parsed structures will still be filled and will be consistent,
// but they will not represent a loadable URL.
bool Canonicalize(const char* spec,
int spec_len,
url_canon::CharsetConverter* charset_converter,
url_canon::CanonOutput* output,
url_parse::Parsed* output_parsed);
bool Canonicalize(const char16* spec,
int spec_len,
url_canon::CharsetConverter* charset_converter,
url_canon::CanonOutput* output,
url_parse::Parsed* output_parsed);
// Resolves a potentially relative URL relative to the given parsed base URL.
// The base MUST be valid. The resulting canonical URL and parsed information
// will be placed in to the given out variables.
//
// The relative need not be relative. If we discover that it's absolute, this
// will produce a canonical version of that URL. See Canonicalize() for more
// about the charset_converter.
//
// Returns true if the output is valid, false if the input could not produce
// a valid URL.
bool ResolveRelative(const char* base_spec,
int base_spec_len,
const url_parse::Parsed& base_parsed,
const char* relative,
int relative_length,
url_canon::CharsetConverter* charset_converter,
url_canon::CanonOutput* output,
url_parse::Parsed* output_parsed);
bool ResolveRelative(const char* base_spec,
int base_spec_len,
const url_parse::Parsed& base_parsed,
const char16* relative,
int relative_length,
url_canon::CharsetConverter* charset_converter,
url_canon::CanonOutput* output,
url_parse::Parsed* output_parsed);
// Replaces components in the given VALID input url. The new canonical URL info
// is written to output and out_parsed.
//
// Returns true if the resulting URL is valid.
bool ReplaceComponents(
const char* spec,
int spec_len,
const url_parse::Parsed& parsed,
const url_canon::Replacements<char>& replacements,
url_canon::CharsetConverter* charset_converter,
url_canon::CanonOutput* output,
url_parse::Parsed* out_parsed);
bool ReplaceComponents(
const char* spec,
int spec_len,
const url_parse::Parsed& parsed,
const url_canon::Replacements<char16>& replacements,
url_canon::CharsetConverter* charset_converter,
url_canon::CanonOutput* output,
url_parse::Parsed* out_parsed);
// String helper functions ----------------------------------------------------
// Compare the lower-case form of the given string against the given ASCII
// string. This is useful for doing checking if an input string matches some
// token, and it is optimized to avoid intermediate string copies.
//
// The versions of this function that don't take a b_end assume that the b
// string is NULL terminated.
bool LowerCaseEqualsASCII(const char* a_begin,
const char* a_end,
const char* b);
bool LowerCaseEqualsASCII(const char* a_begin,
const char* a_end,
const char* b_begin,
const char* b_end);
bool LowerCaseEqualsASCII(const char16* a_begin,
const char16* a_end,
const char* b);
// Unescapes the given string using URL escaping rules.
void DecodeURLEscapeSequences(const char* input, int length,
url_canon::CanonOutputW* output);
// Escapes the given string as defined by the JS method encodeURIComponent. See
// https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/encodeURIComponent
void EncodeURIComponent(const char* input, int length,
url_canon::CanonOutput* output);
} // namespace url_util
#endif // URL_URL_UTIL_H_
|
loopCM/chromium
|
url/url_util.h
|
C
|
bsd-3-clause
| 9,020
|
// Copyright 2020 Google LLC.
// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
#include "tools/fiddle/examples.h"
REG_FIDDLE(picture_shader, 256, 256, false, 5) {
static void draw_centered(
const char* s, const SkFont& font, SkColor color, SkPoint xy, SkCanvas* c) {
sk_sp<SkTextBlob> b = SkTextBlob::MakeFromString(s, font);
xy -= SkPoint{b->bounds().centerX(), b->bounds().centerY()};
SkPaint p;
p.setColor(color);
c->drawTextBlob(b.get(), xy.x(), xy.y(), p);
}
SkPoint from_polar_deg(float r, float d) {
float a = d * 0.017453292519943295;
return {r * cosf(a), r * sinf(a)};
}
void draw_wheel(SkCanvas* c) {
const SkScalar scale = 512;
SkAutoCanvasRestore autoCanvasRestore(c, true);
c->translate(0.5f * scale, 0.5f * scale);
SkPaint p;
p.setAntiAlias(true);
p.setColor(SK_ColorWHITE);
c->drawCircle(0.0f, 0.0f, scale * 0.475f, p);
const SkColor sweep_colors[] = {SK_ColorRED, SK_ColorYELLOW, SK_ColorGREEN, SK_ColorCYAN,
SK_ColorBLUE, SK_ColorMAGENTA, SK_ColorRED};
SkMatrix rot;
rot.setRotate(90.0f);
p.setShader(SkGradientShader::MakeSweep(0, 0, sweep_colors, nullptr,
SK_ARRAY_COUNT(sweep_colors), 0, &rot));
p.setStrokeWidth(0.05f * scale);
p.setStyle(SkPaint::kStroke_Style);
c->drawCircle(0.0f, 0.0f, 0.475f * scale, p);
SkFont f(nullptr, 0.28125f * scale);
draw_centered("K", f, SK_ColorBLACK, {0.0f, 0.0f}, c);
draw_centered("R", f, SK_ColorRED, from_polar_deg(0.3f * scale, 90), c);
draw_centered("G", f, SK_ColorGREEN, from_polar_deg(0.3f * scale, 210), c);
draw_centered("B", f, SK_ColorBLUE, from_polar_deg(0.3f * scale, 330), c);
draw_centered("C", f, SK_ColorCYAN, from_polar_deg(0.3f * scale, 270), c);
draw_centered("M", f, SK_ColorMAGENTA, from_polar_deg(0.3f * scale, 30), c);
draw_centered("Y", f, SK_ColorYELLOW, from_polar_deg(0.3f * scale, 150), c);
}
void draw(SkCanvas* canvas) {
canvas->clear(SK_ColorWHITE);
SkMatrix matrix;
matrix.setScale(0.25f, 0.25f);
matrix.preRotate(30.0f);
SkPaint paint;
SkPictureRecorder rec;
draw_wheel(rec.beginRecording(512, 512));
paint.setShader(rec.finishRecordingAsPicture()->makeShader(
SkTileMode::kRepeat, SkTileMode::kRepeat, SkFilterMode::kNearest, &matrix, nullptr));
canvas->drawPaint(paint);
}
} // END FIDDLE
|
google/skia
|
docs/examples/picture_shader.cpp
|
C++
|
bsd-3-clause
| 2,493
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def generate_initial_block_types(apps, schema_editor):
User = apps.get_model("auth", "User")
root = User.objects.filter(username="root").first()
if not root:
root = User.objects.filter(username="root2").first()
if not root:
root = User.objects.create(username="root2")
DashBlockType = apps.get_model("dashblocks", "DashBlockType")
DashBlockType.objects.get_or_create(name="U-Reporters",
slug="ureporters",
description="U-Reporters Page",
has_title=True,
has_image=True,
has_rich_text=False,
has_summary=False,
has_link=False,
has_gallery=False,
has_color=False,
has_video=False,
has_tags=False,
created_by=root,
modified_by=root)
class Migration(migrations.Migration):
dependencies = [
('dashblocks', '0002_auto_20140802_2112'),
]
operations = [
migrations.RunPython(generate_initial_block_types),
]
|
peterayeni/dash
|
dash/dashblocks/migrations/0003_auto_20140804_0236.py
|
Python
|
bsd-3-clause
| 1,506
|
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <assert.h>
#include <limits.h>
#include <math.h>
#include <stdio.h>
#include "./vp9_rtcd.h"
#include "vpx_mem/vpx_mem.h"
#include "vp9/common/vp9_blockd.h"
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_mvref_common.h"
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_reconintra.h"
#include "vp9/encoder/vp9_encoder.h"
#include "vp9/encoder/vp9_pickmode.h"
#include "vp9/encoder/vp9_ratectrl.h"
#include "vp9/encoder/vp9_rd.h"
typedef struct {
uint8_t *data;
int stride;
int in_use;
} PRED_BUFFER;
static int mv_refs_rt(const VP9_COMMON *cm, const MACROBLOCKD *xd,
const TileInfo *const tile,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
int_mv *mv_ref_list,
int mi_row, int mi_col) {
const int *ref_sign_bias = cm->ref_frame_sign_bias;
int i, refmv_count = 0;
const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
int different_ref_found = 0;
int context_counter = 0;
int const_motion = 0;
// Blank the reference vector list
vpx_memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES);
// The nearest 2 blocks are treated differently
// if the size < 8x8 we get the mv from the bmi substructure,
// and we also need to keep a mode count.
for (i = 0; i < 2; ++i) {
const POSITION *const mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row *
xd->mi_stride].src_mi;
const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
// Keep counts for entropy encoding.
context_counter += mode_2_counter[candidate->mode];
different_ref_found = 1;
if (candidate->ref_frame[0] == ref_frame)
ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 0, mv_ref->col, -1),
refmv_count, mv_ref_list, Done);
}
}
const_motion = 1;
// Check the rest of the neighbors in much the same way
// as before except we don't need to keep track of sub blocks or
// mode counts.
for (; i < MVREF_NEIGHBOURS && !refmv_count; ++i) {
const POSITION *const mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row *
xd->mi_stride].src_mi->mbmi;
different_ref_found = 1;
if (candidate->ref_frame[0] == ref_frame)
ADD_MV_REF_LIST(candidate->mv[0], refmv_count, mv_ref_list, Done);
}
}
// Since we couldn't find 2 mvs from the same reference frame
// go back through the neighbors and find motion vectors from
// different reference frames.
if (different_ref_found && !refmv_count) {
for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
const POSITION *mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row
* xd->mi_stride].src_mi->mbmi;
// If the candidate is INTRA we don't want to consider its mv.
IF_DIFF_REF_FRAME_ADD_MV(candidate, ref_frame, ref_sign_bias,
refmv_count, mv_ref_list, Done);
}
}
}
Done:
mi->mbmi.mode_context[ref_frame] = counter_to_context[context_counter];
// Clamp vectors
for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i)
clamp_mv_ref(&mv_ref_list[i].as_mv, xd);
return const_motion;
}
static int combined_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE bsize, int mi_row, int mi_col,
int_mv *tmp_mv, int *rate_mv,
int64_t best_rd_sofar) {
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}};
const int step_param = cpi->sf.mv.fullpel_search_step_param;
const int sadpb = x->sadperbit16;
MV mvp_full;
const int ref = mbmi->ref_frame[0];
const MV ref_mv = mbmi->ref_mvs[ref][0].as_mv;
int dis;
int rate_mode;
const int tmp_col_min = x->mv_col_min;
const int tmp_col_max = x->mv_col_max;
const int tmp_row_min = x->mv_row_min;
const int tmp_row_max = x->mv_row_max;
int rv = 0;
int cost_list[5];
const YV12_BUFFER_CONFIG *scaled_ref_frame = vp9_get_scaled_ref_frame(cpi,
ref);
if (scaled_ref_frame) {
int i;
// Swap out the reference frame for a version that's been scaled to
// match the resolution of the current frame, allowing the existing
// motion search code to be used without additional modifications.
for (i = 0; i < MAX_MB_PLANE; i++)
backup_yv12[i] = xd->plane[i].pre[0];
vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
}
vp9_set_mv_search_range(x, &ref_mv);
assert(x->mv_best_ref_index[ref] <= 2);
if (x->mv_best_ref_index[ref] < 2)
mvp_full = mbmi->ref_mvs[ref][x->mv_best_ref_index[ref]].as_mv;
else
mvp_full = x->pred_mv[ref];
mvp_full.col >>= 3;
mvp_full.row >>= 3;
vp9_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
cond_cost_list(cpi, cost_list),
&ref_mv, &tmp_mv->as_mv, INT_MAX, 0);
x->mv_col_min = tmp_col_min;
x->mv_col_max = tmp_col_max;
x->mv_row_min = tmp_row_min;
x->mv_row_max = tmp_row_max;
// calculate the bit cost on motion vector
mvp_full.row = tmp_mv->as_mv.row * 8;
mvp_full.col = tmp_mv->as_mv.col * 8;
*rate_mv = vp9_mv_bit_cost(&mvp_full, &ref_mv,
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
rate_mode = cpi->inter_mode_cost[mbmi->mode_context[ref]]
[INTER_OFFSET(NEWMV)];
rv = !(RDCOST(x->rdmult, x->rddiv, (*rate_mv + rate_mode), 0) >
best_rd_sofar);
if (rv) {
cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv,
cpi->common.allow_high_precision_mv,
x->errorperbit,
&cpi->fn_ptr[bsize],
cpi->sf.mv.subpel_force_stop,
cpi->sf.mv.subpel_iters_per_step,
cond_cost_list(cpi, cost_list),
x->nmvjointcost, x->mvcost,
&dis, &x->pred_sse[ref], NULL, 0, 0);
}
if (scaled_ref_frame) {
int i;
for (i = 0; i < MAX_MB_PLANE; i++)
xd->plane[i].pre[0] = backup_yv12[i];
}
return rv;
}
static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize,
MACROBLOCK *x, MACROBLOCKD *xd,
int *out_rate_sum, int64_t *out_dist_sum,
unsigned int *var_y, unsigned int *sse_y) {
// Note our transform coeffs are 8 times an orthogonal transform.
// Hence quantizer step is also 8 times. To get effective quantizer
// we need to divide by 8 before sending to modeling function.
unsigned int sse;
int rate;
int64_t dist;
struct macroblock_plane *const p = &x->plane[0];
struct macroblockd_plane *const pd = &xd->plane[0];
const int64_t dc_thr = p->quant_thred[0] >> 6;
const int64_t ac_thr = p->quant_thred[1] >> 6;
const uint32_t dc_quant = pd->dequant[0];
const uint32_t ac_quant = pd->dequant[1];
unsigned int var = cpi->fn_ptr[bsize].vf(p->src.buf, p->src.stride,
pd->dst.buf, pd->dst.stride, &sse);
int skip_dc = 0;
*var_y = var;
*sse_y = sse;
if (cpi->common.tx_mode == TX_MODE_SELECT) {
if (sse > (var << 2))
xd->mi[0].src_mi->mbmi.tx_size =
MIN(max_txsize_lookup[bsize],
tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
else
xd->mi[0].src_mi->mbmi.tx_size = TX_8X8;
if (cpi->sf.partition_search_type == VAR_BASED_PARTITION) {
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
cyclic_refresh_segment_id_boosted(xd->mi[0].src_mi->mbmi.segment_id))
xd->mi[0].src_mi->mbmi.tx_size = TX_8X8;
else if (xd->mi[0].src_mi->mbmi.tx_size > TX_16X16)
xd->mi[0].src_mi->mbmi.tx_size = TX_16X16;
}
} else {
xd->mi[0].src_mi->mbmi.tx_size =
MIN(max_txsize_lookup[bsize],
tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
}
// Evaluate if the partition block is a skippable block in Y plane.
{
const BLOCK_SIZE unit_size =
txsize_to_bsize[xd->mi[0].src_mi->mbmi.tx_size];
const unsigned int num_blk_log2 =
(b_width_log2_lookup[bsize] - b_width_log2_lookup[unit_size]) +
(b_height_log2_lookup[bsize] - b_height_log2_lookup[unit_size]);
const unsigned int sse_tx = sse >> num_blk_log2;
const unsigned int var_tx = var >> num_blk_log2;
x->skip_txfm[0] = 0;
// Check if all ac coefficients can be quantized to zero.
if (var_tx < ac_thr || var == 0) {
x->skip_txfm[0] = 2;
// Check if dc coefficient can be quantized to zero.
if (sse_tx - var_tx < dc_thr || sse == var)
x->skip_txfm[0] = 1;
} else {
if (sse_tx - var_tx < dc_thr || sse == var)
skip_dc = 1;
}
}
if (x->skip_txfm[0] == 1) {
*out_rate_sum = 0;
*out_dist_sum = sse << 4;
return;
}
if (!skip_dc) {
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
dc_quant >> (xd->bd - 5), &rate, &dist);
} else {
vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
dc_quant >> 3, &rate, &dist);
}
#else
vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
dc_quant >> 3, &rate, &dist);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
if (!skip_dc) {
*out_rate_sum = rate >> 1;
*out_dist_sum = dist << 3;
} else {
*out_rate_sum = 0;
*out_dist_sum = (sse - var) << 4;
}
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
ac_quant >> (xd->bd - 5), &rate, &dist);
} else {
vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
ac_quant >> 3, &rate, &dist);
}
#else
vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
ac_quant >> 3, &rate, &dist);
#endif // CONFIG_VP9_HIGHBITDEPTH
*out_rate_sum += rate;
*out_dist_sum += dist << 4;
}
static void model_rd_for_sb_uv(VP9_COMP *cpi, BLOCK_SIZE bsize,
MACROBLOCK *x, MACROBLOCKD *xd,
int *out_rate_sum, int64_t *out_dist_sum,
unsigned int *var_y, unsigned int *sse_y) {
// Note our transform coeffs are 8 times an orthogonal transform.
// Hence quantizer step is also 8 times. To get effective quantizer
// we need to divide by 8 before sending to modeling function.
unsigned int sse;
int rate;
int64_t dist;
int i;
*out_rate_sum = 0;
*out_dist_sum = 0;
for (i = 1; i <= 2; ++i) {
struct macroblock_plane *const p = &x->plane[i];
struct macroblockd_plane *const pd = &xd->plane[i];
const uint32_t dc_quant = pd->dequant[0];
const uint32_t ac_quant = pd->dequant[1];
const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
unsigned int var;
if (!x->color_sensitivity[i - 1])
continue;
var = cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride,
pd->dst.buf, pd->dst.stride, &sse);
*var_y += var;
*sse_y += sse;
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs],
dc_quant >> (xd->bd - 5), &rate, &dist);
} else {
vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs],
dc_quant >> 3, &rate, &dist);
}
#else
vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs],
dc_quant >> 3, &rate, &dist);
#endif // CONFIG_VP9_HIGHBITDEPTH
*out_rate_sum += rate >> 1;
*out_dist_sum += dist << 3;
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs],
ac_quant >> (xd->bd - 5), &rate, &dist);
} else {
vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs],
ac_quant >> 3, &rate, &dist);
}
#else
vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs],
ac_quant >> 3, &rate, &dist);
#endif // CONFIG_VP9_HIGHBITDEPTH
*out_rate_sum += rate;
*out_dist_sum += dist << 4;
}
}
static int get_pred_buffer(PRED_BUFFER *p, int len) {
int i;
for (i = 0; i < len; i++) {
if (!p[i].in_use) {
p[i].in_use = 1;
return i;
}
}
return -1;
}
static void free_pred_buffer(PRED_BUFFER *p) {
if (p != NULL)
p->in_use = 0;
}
static void encode_breakout_test(VP9_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE bsize, int mi_row, int mi_col,
MV_REFERENCE_FRAME ref_frame,
PREDICTION_MODE this_mode,
unsigned int var_y, unsigned int sse_y,
struct buf_2d yv12_mb[][MAX_MB_PLANE],
int *rate, int64_t *dist) {
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
const BLOCK_SIZE uv_size = get_plane_block_size(bsize, &xd->plane[1]);
unsigned int var = var_y, sse = sse_y;
// Skipping threshold for ac.
unsigned int thresh_ac;
// Skipping threshold for dc.
unsigned int thresh_dc;
if (x->encode_breakout > 0) {
// Set a maximum for threshold to avoid big PSNR loss in low bit rate
// case. Use extreme low threshold for static frames to limit
// skipping.
const unsigned int max_thresh = 36000;
// The encode_breakout input
const unsigned int min_thresh =
MIN(((unsigned int)x->encode_breakout << 4), max_thresh);
#if CONFIG_VP9_HIGHBITDEPTH
const int shift = (xd->bd << 1) - 16;
#endif
// Calculate threshold according to dequant value.
thresh_ac = (xd->plane[0].dequant[1] * xd->plane[0].dequant[1]) >> 3;
#if CONFIG_VP9_HIGHBITDEPTH
if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && shift > 0) {
thresh_ac = ROUND_POWER_OF_TWO(thresh_ac, shift);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
thresh_ac = clamp(thresh_ac, min_thresh, max_thresh);
// Adjust ac threshold according to partition size.
thresh_ac >>=
8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
thresh_dc = (xd->plane[0].dequant[0] * xd->plane[0].dequant[0] >> 6);
#if CONFIG_VP9_HIGHBITDEPTH
if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && shift > 0) {
thresh_dc = ROUND_POWER_OF_TWO(thresh_dc, shift);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
} else {
thresh_ac = 0;
thresh_dc = 0;
}
// Y skipping condition checking for ac and dc.
if (var <= thresh_ac && (sse - var) <= thresh_dc) {
unsigned int sse_u, sse_v;
unsigned int var_u, var_v;
// Skip UV prediction unless breakout is zero (lossless) to save
// computation with low impact on the result
if (x->encode_breakout == 0) {
xd->plane[1].pre[0] = yv12_mb[ref_frame][1];
xd->plane[2].pre[0] = yv12_mb[ref_frame][2];
vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, bsize);
}
var_u = cpi->fn_ptr[uv_size].vf(x->plane[1].src.buf,
x->plane[1].src.stride,
xd->plane[1].dst.buf,
xd->plane[1].dst.stride, &sse_u);
// U skipping condition checking
if (((var_u << 2) <= thresh_ac) && (sse_u - var_u <= thresh_dc)) {
var_v = cpi->fn_ptr[uv_size].vf(x->plane[2].src.buf,
x->plane[2].src.stride,
xd->plane[2].dst.buf,
xd->plane[2].dst.stride, &sse_v);
// V skipping condition checking
if (((var_v << 2) <= thresh_ac) && (sse_v - var_v <= thresh_dc)) {
x->skip = 1;
// The cost of skip bit needs to be added.
*rate = cpi->inter_mode_cost[mbmi->mode_context[ref_frame]]
[INTER_OFFSET(this_mode)];
// More on this part of rate
// rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
// Scaling factor for SSE from spatial domain to frequency
// domain is 16. Adjust distortion accordingly.
// TODO(yunqingwang): In this function, only y-plane dist is
// calculated.
*dist = (sse << 4); // + ((sse_u + sse_v) << 4);
// *disable_skip = 1;
}
}
}
}
struct estimate_block_intra_args {
VP9_COMP *cpi;
MACROBLOCK *x;
PREDICTION_MODE mode;
int rate;
int64_t dist;
};
static void estimate_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
TX_SIZE tx_size, void *arg) {
struct estimate_block_intra_args* const args = arg;
VP9_COMP *const cpi = args->cpi;
MACROBLOCK *const x = args->x;
MACROBLOCKD *const xd = &x->e_mbd;
struct macroblock_plane *const p = &x->plane[0];
struct macroblockd_plane *const pd = &xd->plane[0];
const BLOCK_SIZE bsize_tx = txsize_to_bsize[tx_size];
uint8_t *const src_buf_base = p->src.buf;
uint8_t *const dst_buf_base = pd->dst.buf;
const int src_stride = p->src.stride;
const int dst_stride = pd->dst.stride;
int i, j;
int rate;
int64_t dist;
unsigned int var_y, sse_y;
txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j);
assert(plane == 0);
(void) plane;
p->src.buf = &src_buf_base[4 * (j * src_stride + i)];
pd->dst.buf = &dst_buf_base[4 * (j * dst_stride + i)];
// Use source buffer as an approximation for the fully reconstructed buffer.
vp9_predict_intra_block(xd, block >> (2 * tx_size),
b_width_log2_lookup[plane_bsize],
tx_size, args->mode,
x->skip_encode ? p->src.buf : pd->dst.buf,
x->skip_encode ? src_stride : dst_stride,
pd->dst.buf, dst_stride,
i, j, 0);
// This procedure assumes zero offset from p->src.buf and pd->dst.buf.
model_rd_for_sb_y(cpi, bsize_tx, x, xd, &rate, &dist, &var_y, &sse_y);
p->src.buf = src_buf_base;
pd->dst.buf = dst_buf_base;
args->rate += rate;
args->dist += dist;
}
static const THR_MODES mode_idx[MAX_REF_FRAMES - 1][4] = {
{THR_DC, THR_H_PRED, THR_V_PRED, THR_TM},
{THR_NEARESTMV, THR_NEARMV, THR_ZEROMV, THR_NEWMV},
{THR_NEARESTG, THR_NEARG, THR_ZEROG, THR_NEWG},
};
static const PREDICTION_MODE intra_mode_list[] = {
DC_PRED, V_PRED, H_PRED, TM_PRED
};
void vp9_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
RD_COST this_rdc, best_rdc;
PREDICTION_MODE this_mode;
struct estimate_block_intra_args args = { cpi, x, DC_PRED, 0, 0 };
const TX_SIZE intra_tx_size =
MIN(max_txsize_lookup[bsize],
tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
MODE_INFO *const mic = xd->mi[0].src_mi;
int *bmode_costs;
const MODE_INFO *above_mi = xd->mi[-xd->mi_stride].src_mi;
const MODE_INFO *left_mi = xd->left_available ? xd->mi[-1].src_mi : NULL;
const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, 0);
const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, 0);
bmode_costs = cpi->y_mode_costs[A][L];
(void) ctx;
vp9_rd_cost_reset(&best_rdc);
vp9_rd_cost_reset(&this_rdc);
mbmi->ref_frame[0] = INTRA_FRAME;
mbmi->mv[0].as_int = INVALID_MV;
mbmi->uv_mode = DC_PRED;
vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
// Change the limit of this loop to add other intra prediction
// mode tests.
for (this_mode = DC_PRED; this_mode <= H_PRED; ++this_mode) {
args.mode = this_mode;
args.rate = 0;
args.dist = 0;
mbmi->tx_size = intra_tx_size;
vp9_foreach_transformed_block_in_plane(xd, bsize, 0,
estimate_block_intra, &args);
this_rdc.rate = args.rate;
this_rdc.dist = args.dist;
this_rdc.rate += bmode_costs[this_mode];
this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
this_rdc.rate, this_rdc.dist);
if (this_rdc.rdcost < best_rdc.rdcost) {
best_rdc = this_rdc;
mbmi->mode = this_mode;
}
}
*rd_cost = best_rdc;
}
static const int ref_frame_cost[MAX_REF_FRAMES] = {
1235, 229, 530, 615,
};
typedef struct {
MV_REFERENCE_FRAME ref_frame;
PREDICTION_MODE pred_mode;
} REF_MODE;
#define RT_INTER_MODES 8
static const REF_MODE ref_mode_set[RT_INTER_MODES] = {
{LAST_FRAME, ZEROMV},
{LAST_FRAME, NEARESTMV},
{GOLDEN_FRAME, ZEROMV},
{LAST_FRAME, NEARMV},
{LAST_FRAME, NEWMV},
{GOLDEN_FRAME, NEARESTMV},
{GOLDEN_FRAME, NEARMV},
{GOLDEN_FRAME, NEWMV}
};
// TODO(jingning) placeholder for inter-frame non-RD mode decision.
// this needs various further optimizations. to be continued..
void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
TileDataEnc *tile_data,
int mi_row, int mi_col, RD_COST *rd_cost,
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
VP9_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
struct macroblockd_plane *const pd = &xd->plane[0];
PREDICTION_MODE best_mode = ZEROMV;
MV_REFERENCE_FRAME ref_frame, best_ref_frame = LAST_FRAME;
MV_REFERENCE_FRAME usable_ref_frame;
TX_SIZE best_tx_size = TX_SIZES;
INTERP_FILTER best_pred_filter = EIGHTTAP;
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
struct buf_2d yv12_mb[4][MAX_MB_PLANE];
static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
VP9_ALT_FLAG };
RD_COST this_rdc, best_rdc;
uint8_t skip_txfm = 0, best_mode_skip_txfm = 0;
// var_y and sse_y are saved to be used in skipping checking
unsigned int var_y = UINT_MAX;
unsigned int sse_y = UINT_MAX;
// Reduce the intra cost penalty for small blocks (<=16x16).
const int reduction_fac =
(cpi->sf.partition_search_type == VAR_BASED_PARTITION &&
bsize <= BLOCK_16X16) ? 2 : 0;
const int intra_cost_penalty = vp9_get_intra_cost_penalty(
cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth) >> reduction_fac;
const int64_t inter_mode_thresh = RDCOST(x->rdmult, x->rddiv,
intra_cost_penalty, 0);
const int *const rd_threshes = cpi->rd.threshes[mbmi->segment_id][bsize];
const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize];
INTERP_FILTER filter_ref;
const int bsl = mi_width_log2_lookup[bsize];
const int pred_filter_search = cm->interp_filter == SWITCHABLE ?
(((mi_row + mi_col) >> bsl) +
get_chessboard_index(cm->current_video_frame)) & 0x1 : 0;
int const_motion[MAX_REF_FRAMES] = { 0 };
const int bh = num_4x4_blocks_high_lookup[bsize] << 2;
const int bw = num_4x4_blocks_wide_lookup[bsize] << 2;
// For speed 6, the result of interp filter is reused later in actual encoding
// process.
// tmp[3] points to dst buffer, and the other 3 point to allocated buffers.
PRED_BUFFER tmp[4];
DECLARE_ALIGNED_ARRAY(16, uint8_t, pred_buf, 3 * 64 * 64);
#if CONFIG_VP9_HIGHBITDEPTH
DECLARE_ALIGNED_ARRAY(16, uint16_t, pred_buf_16, 3 * 64 * 64);
#endif
struct buf_2d orig_dst = pd->dst;
PRED_BUFFER *best_pred = NULL;
PRED_BUFFER *this_mode_pred = NULL;
const int pixels_in_block = bh * bw;
int reuse_inter_pred = cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready;
int ref_frame_skip_mask = 0;
int idx;
int best_pred_sad = INT_MAX;
if (reuse_inter_pred) {
int i;
for (i = 0; i < 3; i++) {
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth)
tmp[i].data = CONVERT_TO_BYTEPTR(&pred_buf_16[pixels_in_block * i]);
else
tmp[i].data = &pred_buf[pixels_in_block * i];
#else
tmp[i].data = &pred_buf[pixels_in_block * i];
#endif // CONFIG_VP9_HIGHBITDEPTH
tmp[i].stride = bw;
tmp[i].in_use = 0;
}
tmp[3].data = pd->dst.buf;
tmp[3].stride = pd->dst.stride;
tmp[3].in_use = 0;
}
x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
x->skip = 0;
if (xd->up_available)
filter_ref = xd->mi[-xd->mi_stride].src_mi->mbmi.interp_filter;
else if (xd->left_available)
filter_ref = xd->mi[-1].src_mi->mbmi.interp_filter;
else
filter_ref = cm->interp_filter;
// initialize mode decisions
vp9_rd_cost_reset(&best_rdc);
vp9_rd_cost_reset(rd_cost);
mbmi->sb_type = bsize;
mbmi->ref_frame[0] = NONE;
mbmi->ref_frame[1] = NONE;
mbmi->tx_size = MIN(max_txsize_lookup[bsize],
tx_mode_to_biggest_tx_size[cm->tx_mode]);
#if CONFIG_VP9_TEMPORAL_DENOISING
vp9_denoiser_reset_frame_stats(ctx);
#endif
if (cpi->rc.frames_since_golden == 0) {
cpi->ref_frame_flags &= (~VP9_GOLD_FLAG);
usable_ref_frame = LAST_FRAME;
} else {
usable_ref_frame = GOLDEN_FRAME;
}
for (ref_frame = LAST_FRAME; ref_frame <= usable_ref_frame; ++ref_frame) {
const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
x->pred_mv_sad[ref_frame] = INT_MAX;
frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
frame_mv[ZEROMV][ref_frame].as_int = 0;
if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) {
int_mv *const candidates = mbmi->ref_mvs[ref_frame];
const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col,
sf, sf);
if (cm->use_prev_frame_mvs)
vp9_find_mv_refs(cm, xd, tile_info, xd->mi[0].src_mi, ref_frame,
candidates, mi_row, mi_col, NULL, NULL);
else
const_motion[ref_frame] = mv_refs_rt(cm, xd, tile_info,
xd->mi[0].src_mi,
ref_frame, candidates,
mi_row, mi_col);
vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
&frame_mv[NEARESTMV][ref_frame],
&frame_mv[NEARMV][ref_frame]);
if (!vp9_is_scaled(sf) && bsize >= BLOCK_8X8)
vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride,
ref_frame, bsize);
} else {
ref_frame_skip_mask |= (1 << ref_frame);
}
}
for (idx = 0; idx < RT_INTER_MODES; ++idx) {
int rate_mv = 0;
int mode_rd_thresh;
int mode_index;
int i;
PREDICTION_MODE this_mode = ref_mode_set[idx].pred_mode;
if (!(cpi->sf.inter_mode_mask[bsize] & (1 << this_mode)))
continue;
ref_frame = ref_mode_set[idx].ref_frame;
if (!(cpi->ref_frame_flags & flag_list[ref_frame]))
continue;
if (const_motion[ref_frame] && this_mode == NEARMV)
continue;
i = (ref_frame == LAST_FRAME) ? GOLDEN_FRAME : LAST_FRAME;
if (cpi->ref_frame_flags & flag_list[i])
if (x->pred_mv_sad[ref_frame] > (x->pred_mv_sad[i] << 1))
ref_frame_skip_mask |= (1 << ref_frame);
if (ref_frame_skip_mask & (1 << ref_frame))
continue;
// Select prediction reference frames.
for (i = 0; i < MAX_MB_PLANE; i++)
xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
mbmi->ref_frame[0] = ref_frame;
set_ref_ptrs(cm, xd, ref_frame, NONE);
mode_index = mode_idx[ref_frame][INTER_OFFSET(this_mode)];
mode_rd_thresh = best_mode_skip_txfm ?
rd_threshes[mode_index] << 1 : rd_threshes[mode_index];
if (rd_less_than_thresh(best_rdc.rdcost, mode_rd_thresh,
rd_thresh_freq_fact[mode_index]))
continue;
if (this_mode == NEWMV) {
if (ref_frame > LAST_FRAME) {
int tmp_sad;
int dis, cost_list[5];
if (bsize < BLOCK_16X16)
continue;
tmp_sad = vp9_int_pro_motion_estimation(cpi, x, bsize);
if (tmp_sad > x->pred_mv_sad[LAST_FRAME])
continue;
if (tmp_sad + (num_pels_log2_lookup[bsize] << 4) > best_pred_sad)
continue;
frame_mv[NEWMV][ref_frame].as_int = mbmi->mv[0].as_int;
rate_mv = vp9_mv_bit_cost(&frame_mv[NEWMV][ref_frame].as_mv,
&mbmi->ref_mvs[ref_frame][0].as_mv,
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
frame_mv[NEWMV][ref_frame].as_mv.row >>= 3;
frame_mv[NEWMV][ref_frame].as_mv.col >>= 3;
cpi->find_fractional_mv_step(x, &frame_mv[NEWMV][ref_frame].as_mv,
&mbmi->ref_mvs[ref_frame][0].as_mv,
cpi->common.allow_high_precision_mv,
x->errorperbit,
&cpi->fn_ptr[bsize],
cpi->sf.mv.subpel_force_stop,
cpi->sf.mv.subpel_iters_per_step,
cond_cost_list(cpi, cost_list),
x->nmvjointcost, x->mvcost, &dis,
&x->pred_sse[ref_frame], NULL, 0, 0);
} else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
&frame_mv[NEWMV][ref_frame], &rate_mv, best_rdc.rdcost)) {
continue;
}
}
if (this_mode == NEWMV && ref_frame == LAST_FRAME &&
frame_mv[NEWMV][LAST_FRAME].as_int != INVALID_MV) {
const int pre_stride = xd->plane[0].pre[0].stride;
const uint8_t * const pre_buf = xd->plane[0].pre[0].buf +
(frame_mv[NEWMV][LAST_FRAME].as_mv.row >> 3) * pre_stride +
(frame_mv[NEWMV][LAST_FRAME].as_mv.col >> 3);
best_pred_sad = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf,
x->plane[0].src.stride,
pre_buf, pre_stride);
x->pred_mv_sad[LAST_FRAME] = best_pred_sad;
}
if (this_mode != NEARESTMV &&
frame_mv[this_mode][ref_frame].as_int ==
frame_mv[NEARESTMV][ref_frame].as_int)
continue;
mbmi->mode = this_mode;
mbmi->mv[0].as_int = frame_mv[this_mode][ref_frame].as_int;
// Search for the best prediction filter type, when the resulting
// motion vector is at sub-pixel accuracy level for luma component, i.e.,
// the last three bits are all zeros.
if (reuse_inter_pred) {
if (!this_mode_pred) {
this_mode_pred = &tmp[3];
} else {
this_mode_pred = &tmp[get_pred_buffer(tmp, 3)];
pd->dst.buf = this_mode_pred->data;
pd->dst.stride = bw;
}
}
if ((this_mode == NEWMV || filter_ref == SWITCHABLE) && pred_filter_search
&& (ref_frame == LAST_FRAME)
&& (((mbmi->mv[0].as_mv.row | mbmi->mv[0].as_mv.col) & 0x07) != 0)) {
int pf_rate[3];
int64_t pf_dist[3];
unsigned int pf_var[3];
unsigned int pf_sse[3];
TX_SIZE pf_tx_size[3];
int64_t best_cost = INT64_MAX;
INTERP_FILTER best_filter = SWITCHABLE, filter;
PRED_BUFFER *current_pred = this_mode_pred;
for (filter = EIGHTTAP; filter <= EIGHTTAP_SHARP; ++filter) {
int64_t cost;
mbmi->interp_filter = filter;
vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
model_rd_for_sb_y(cpi, bsize, x, xd, &pf_rate[filter], &pf_dist[filter],
&pf_var[filter], &pf_sse[filter]);
pf_rate[filter] += vp9_get_switchable_rate(cpi, xd);
cost = RDCOST(x->rdmult, x->rddiv, pf_rate[filter], pf_dist[filter]);
pf_tx_size[filter] = mbmi->tx_size;
if (cost < best_cost) {
best_filter = filter;
best_cost = cost;
skip_txfm = x->skip_txfm[0];
if (reuse_inter_pred) {
if (this_mode_pred != current_pred) {
free_pred_buffer(this_mode_pred);
this_mode_pred = current_pred;
}
if (filter < EIGHTTAP_SHARP) {
current_pred = &tmp[get_pred_buffer(tmp, 3)];
pd->dst.buf = current_pred->data;
pd->dst.stride = bw;
}
}
}
}
if (reuse_inter_pred && this_mode_pred != current_pred)
free_pred_buffer(current_pred);
mbmi->interp_filter = best_filter;
mbmi->tx_size = pf_tx_size[best_filter];
this_rdc.rate = pf_rate[best_filter];
this_rdc.dist = pf_dist[best_filter];
var_y = pf_var[best_filter];
sse_y = pf_sse[best_filter];
x->skip_txfm[0] = skip_txfm;
} else {
mbmi->interp_filter = (filter_ref == SWITCHABLE) ? EIGHTTAP : filter_ref;
vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc.rate, &this_rdc.dist,
&var_y, &sse_y);
this_rdc.rate +=
cm->interp_filter == SWITCHABLE ?
vp9_get_switchable_rate(cpi, xd) : 0;
}
// chroma component rate-distortion cost modeling
if (x->color_sensitivity[0] || x->color_sensitivity[1]) {
int uv_rate = 0;
int64_t uv_dist = 0;
if (x->color_sensitivity[0])
vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, 1);
if (x->color_sensitivity[1])
vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, 2);
model_rd_for_sb_uv(cpi, bsize, x, xd, &uv_rate, &uv_dist, &var_y, &sse_y);
this_rdc.rate += uv_rate;
this_rdc.dist += uv_dist;
}
this_rdc.rate += rate_mv;
this_rdc.rate +=
cpi->inter_mode_cost[mbmi->mode_context[ref_frame]][INTER_OFFSET(
this_mode)];
this_rdc.rate += ref_frame_cost[ref_frame];
this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
// Skipping checking: test to see if this block can be reconstructed by
// prediction only.
if (cpi->allow_encode_breakout) {
encode_breakout_test(cpi, x, bsize, mi_row, mi_col, ref_frame, this_mode,
var_y, sse_y, yv12_mb, &this_rdc.rate,
&this_rdc.dist);
if (x->skip) {
this_rdc.rate += rate_mv;
this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, this_rdc.rate,
this_rdc.dist);
}
}
#if CONFIG_VP9_TEMPORAL_DENOISING
if (cpi->oxcf.noise_sensitivity > 0)
vp9_denoiser_update_frame_stats(mbmi, sse_y, this_mode, ctx);
#else
(void)ctx;
#endif
if (this_rdc.rdcost < best_rdc.rdcost || x->skip) {
best_rdc = this_rdc;
best_mode = this_mode;
best_pred_filter = mbmi->interp_filter;
best_tx_size = mbmi->tx_size;
best_ref_frame = ref_frame;
best_mode_skip_txfm = x->skip_txfm[0];
if (reuse_inter_pred) {
free_pred_buffer(best_pred);
best_pred = this_mode_pred;
}
} else {
if (reuse_inter_pred)
free_pred_buffer(this_mode_pred);
}
if (x->skip)
break;
}
mbmi->mode = best_mode;
mbmi->interp_filter = best_pred_filter;
mbmi->tx_size = best_tx_size;
mbmi->ref_frame[0] = best_ref_frame;
mbmi->mv[0].as_int = frame_mv[best_mode][best_ref_frame].as_int;
xd->mi[0].src_mi->bmi[0].as_mv[0].as_int = mbmi->mv[0].as_int;
x->skip_txfm[0] = best_mode_skip_txfm;
// Perform intra prediction search, if the best SAD is above a certain
// threshold.
if (best_rdc.rdcost == INT64_MAX ||
(!x->skip && best_rdc.rdcost > inter_mode_thresh &&
bsize <= cpi->sf.max_intra_bsize)) {
struct estimate_block_intra_args args = { cpi, x, DC_PRED, 0, 0 };
const TX_SIZE intra_tx_size =
MIN(max_txsize_lookup[bsize],
tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
int i;
TX_SIZE best_intra_tx_size = TX_SIZES;
if (reuse_inter_pred && best_pred != NULL) {
if (best_pred->data == orig_dst.buf) {
this_mode_pred = &tmp[get_pred_buffer(tmp, 3)];
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth)
vp9_highbd_convolve_copy(best_pred->data, best_pred->stride,
this_mode_pred->data, this_mode_pred->stride,
NULL, 0, NULL, 0, bw, bh, xd->bd);
else
vp9_convolve_copy(best_pred->data, best_pred->stride,
this_mode_pred->data, this_mode_pred->stride,
NULL, 0, NULL, 0, bw, bh);
#else
vp9_convolve_copy(best_pred->data, best_pred->stride,
this_mode_pred->data, this_mode_pred->stride,
NULL, 0, NULL, 0, bw, bh);
#endif // CONFIG_VP9_HIGHBITDEPTH
best_pred = this_mode_pred;
}
}
pd->dst = orig_dst;
for (i = 0; i < 4; ++i) {
const PREDICTION_MODE this_mode = intra_mode_list[i];
if (!((1 << this_mode) & cpi->sf.intra_y_mode_mask[intra_tx_size]))
continue;
args.mode = this_mode;
args.rate = 0;
args.dist = 0;
mbmi->tx_size = intra_tx_size;
vp9_foreach_transformed_block_in_plane(xd, bsize, 0,
estimate_block_intra, &args);
this_rdc.rate = args.rate;
this_rdc.dist = args.dist;
this_rdc.rate += cpi->mbmode_cost[this_mode];
this_rdc.rate += ref_frame_cost[INTRA_FRAME];
this_rdc.rate += intra_cost_penalty;
this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
this_rdc.rate, this_rdc.dist);
if (this_rdc.rdcost < best_rdc.rdcost) {
best_rdc = this_rdc;
mbmi->mode = this_mode;
best_intra_tx_size = mbmi->tx_size;
mbmi->ref_frame[0] = INTRA_FRAME;
mbmi->uv_mode = this_mode;
mbmi->mv[0].as_int = INVALID_MV;
}
}
// Reset mb_mode_info to the best inter mode.
if (mbmi->ref_frame[0] != INTRA_FRAME) {
x->skip_txfm[0] = best_mode_skip_txfm;
mbmi->tx_size = best_tx_size;
} else {
mbmi->tx_size = best_intra_tx_size;
}
}
pd->dst = orig_dst;
if (reuse_inter_pred && best_pred != NULL) {
if (best_pred->data != orig_dst.buf && is_inter_mode(mbmi->mode)) {
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth)
vp9_highbd_convolve_copy(best_pred->data, best_pred->stride,
pd->dst.buf, pd->dst.stride, NULL, 0,
NULL, 0, bw, bh, xd->bd);
else
vp9_convolve_copy(best_pred->data, best_pred->stride,
pd->dst.buf, pd->dst.stride, NULL, 0,
NULL, 0, bw, bh);
#else
vp9_convolve_copy(best_pred->data, best_pred->stride,
pd->dst.buf, pd->dst.stride, NULL, 0,
NULL, 0, bw, bh);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
if (cpi->sf.adaptive_rd_thresh) {
THR_MODES best_mode_idx = is_inter_block(mbmi) ?
mode_idx[best_ref_frame][INTER_OFFSET(mbmi->mode)] :
mode_idx[INTRA_FRAME][mbmi->mode];
PREDICTION_MODE this_mode;
for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
if (best_ref_frame != ref_frame) continue;
for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
THR_MODES thr_mode_idx = mode_idx[ref_frame][INTER_OFFSET(this_mode)];
int *freq_fact = &tile_data->thresh_freq_fact[bsize][thr_mode_idx];
if (thr_mode_idx == best_mode_idx)
*freq_fact -= (*freq_fact >> 4);
else
*freq_fact = MIN(*freq_fact + RD_THRESH_INC,
cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT);
}
}
}
*rd_cost = best_rdc;
}
void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x,
TileDataEnc *tile_data,
int mi_row, int mi_col, RD_COST *rd_cost,
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
VP9_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
SPEED_FEATURES *const sf = &cpi->sf;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
const struct segmentation *const seg = &cm->seg;
MV_REFERENCE_FRAME ref_frame, second_ref_frame = NONE;
MV_REFERENCE_FRAME best_ref_frame = NONE;
unsigned char segment_id = mbmi->segment_id;
struct buf_2d yv12_mb[4][MAX_MB_PLANE];
static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
VP9_ALT_FLAG };
int64_t best_rd = INT64_MAX;
b_mode_info bsi[MAX_REF_FRAMES][4];
int ref_frame_skip_mask = 0;
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
int idx, idy;
x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
ctx->pred_pixel_ready = 0;
for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
int_mv dummy_mv[2];
x->pred_mv_sad[ref_frame] = INT_MAX;
if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) {
int_mv *const candidates = mbmi->ref_mvs[ref_frame];
const struct scale_factors *const sf =
&cm->frame_refs[ref_frame - 1].sf;
vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col,
sf, sf);
vp9_find_mv_refs(cm, xd, tile_info, xd->mi[0].src_mi, ref_frame,
candidates, mi_row, mi_col, NULL, NULL);
vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
&dummy_mv[0], &dummy_mv[1]);
} else {
ref_frame_skip_mask |= (1 << ref_frame);
}
}
mbmi->sb_type = bsize;
mbmi->tx_size = TX_4X4;
mbmi->uv_mode = DC_PRED;
mbmi->ref_frame[0] = LAST_FRAME;
mbmi->ref_frame[1] = NONE;
mbmi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP
: cm->interp_filter;
for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
int64_t this_rd = 0;
int plane;
if (ref_frame_skip_mask & (1 << ref_frame))
continue;
// TODO(jingning, agrange): Scaling reference frame not supported for
// sub8x8 blocks. Is this supported now?
if (ref_frame > INTRA_FRAME &&
vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
continue;
// If the segment reference frame feature is enabled....
// then do nothing if the current ref frame is not allowed..
if (vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
vp9_get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame)
continue;
mbmi->ref_frame[0] = ref_frame;
x->skip = 0;
set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
// Select prediction reference frames.
for (plane = 0; plane < MAX_MB_PLANE; plane++)
xd->plane[plane].pre[0] = yv12_mb[ref_frame][plane];
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
int_mv b_mv[MB_MODE_COUNT];
int64_t b_best_rd = INT64_MAX;
const int i = idy * 2 + idx;
PREDICTION_MODE this_mode;
RD_COST this_rdc;
unsigned int var_y, sse_y;
struct macroblock_plane *p = &x->plane[0];
struct macroblockd_plane *pd = &xd->plane[0];
const struct buf_2d orig_src = p->src;
const struct buf_2d orig_dst = pd->dst;
struct buf_2d orig_pre[2];
vpx_memcpy(orig_pre, xd->plane[0].pre, sizeof(orig_pre));
// set buffer pointers for sub8x8 motion search.
p->src.buf =
&p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
pd->dst.buf =
&pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
pd->pre[0].buf =
&pd->pre[0].buf[vp9_raster_block_offset(BLOCK_8X8,
i, pd->pre[0].stride)];
b_mv[ZEROMV].as_int = 0;
b_mv[NEWMV].as_int = INVALID_MV;
vp9_append_sub8x8_mvs_for_idx(cm, xd, tile_info, i, 0, mi_row, mi_col,
&b_mv[NEARESTMV],
&b_mv[NEARMV]);
for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
int b_rate = 0;
xd->mi[0].bmi[i].as_mv[0].as_int = b_mv[this_mode].as_int;
if (this_mode == NEWMV) {
const int step_param = cpi->sf.mv.fullpel_search_step_param;
MV mvp_full;
MV tmp_mv;
int cost_list[5];
const int tmp_col_min = x->mv_col_min;
const int tmp_col_max = x->mv_col_max;
const int tmp_row_min = x->mv_row_min;
const int tmp_row_max = x->mv_row_max;
int dummy_dist;
if (i == 0) {
mvp_full.row = b_mv[NEARESTMV].as_mv.row >> 3;
mvp_full.col = b_mv[NEARESTMV].as_mv.col >> 3;
} else {
mvp_full.row = xd->mi[0].bmi[0].as_mv[0].as_mv.row >> 3;
mvp_full.col = xd->mi[0].bmi[0].as_mv[0].as_mv.col >> 3;
}
vp9_set_mv_search_range(x, &mbmi->ref_mvs[0]->as_mv);
vp9_full_pixel_search(
cpi, x, bsize, &mvp_full, step_param, x->sadperbit4,
cond_cost_list(cpi, cost_list),
&mbmi->ref_mvs[ref_frame][0].as_mv, &tmp_mv,
INT_MAX, 0);
x->mv_col_min = tmp_col_min;
x->mv_col_max = tmp_col_max;
x->mv_row_min = tmp_row_min;
x->mv_row_max = tmp_row_max;
// calculate the bit cost on motion vector
mvp_full.row = tmp_mv.row * 8;
mvp_full.col = tmp_mv.col * 8;
b_rate += vp9_mv_bit_cost(&mvp_full,
&mbmi->ref_mvs[ref_frame][0].as_mv,
x->nmvjointcost, x->mvcost,
MV_COST_WEIGHT);
b_rate += cpi->inter_mode_cost[mbmi->mode_context[ref_frame]]
[INTER_OFFSET(NEWMV)];
if (RDCOST(x->rdmult, x->rddiv, b_rate, 0) > b_best_rd)
continue;
cpi->find_fractional_mv_step(x, &tmp_mv,
&mbmi->ref_mvs[ref_frame][0].as_mv,
cpi->common.allow_high_precision_mv,
x->errorperbit,
&cpi->fn_ptr[bsize],
cpi->sf.mv.subpel_force_stop,
cpi->sf.mv.subpel_iters_per_step,
cond_cost_list(cpi, cost_list),
x->nmvjointcost, x->mvcost,
&dummy_dist,
&x->pred_sse[ref_frame], NULL, 0, 0);
xd->mi[0].bmi[i].as_mv[0].as_mv = tmp_mv;
} else {
b_rate += cpi->inter_mode_cost[mbmi->mode_context[ref_frame]]
[INTER_OFFSET(this_mode)];
}
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
vp9_highbd_build_inter_predictor(pd->pre[0].buf, pd->pre[0].stride,
pd->dst.buf, pd->dst.stride,
&xd->mi[0].bmi[i].as_mv[0].as_mv,
&xd->block_refs[0]->sf,
4 * num_4x4_blocks_wide,
4 * num_4x4_blocks_high, 0,
vp9_get_interp_kernel(mbmi->interp_filter),
MV_PRECISION_Q3,
mi_col * MI_SIZE + 4 * (i & 0x01),
mi_row * MI_SIZE + 4 * (i >> 1), xd->bd);
} else {
#endif
vp9_build_inter_predictor(pd->pre[0].buf, pd->pre[0].stride,
pd->dst.buf, pd->dst.stride,
&xd->mi[0].bmi[i].as_mv[0].as_mv,
&xd->block_refs[0]->sf,
4 * num_4x4_blocks_wide,
4 * num_4x4_blocks_high, 0,
vp9_get_interp_kernel(mbmi->interp_filter),
MV_PRECISION_Q3,
mi_col * MI_SIZE + 4 * (i & 0x01),
mi_row * MI_SIZE + 4 * (i >> 1));
#if CONFIG_VP9_HIGHBITDEPTH
}
#endif
model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc.rate, &this_rdc.dist,
&var_y, &sse_y);
this_rdc.rate += b_rate;
this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
this_rdc.rate, this_rdc.dist);
if (this_rdc.rdcost < b_best_rd) {
b_best_rd = this_rdc.rdcost;
bsi[ref_frame][i].as_mode = this_mode;
bsi[ref_frame][i].as_mv[0].as_mv = xd->mi[0].bmi[i].as_mv[0].as_mv;
}
} // mode search
// restore source and prediction buffer pointers.
p->src = orig_src;
pd->pre[0] = orig_pre[0];
pd->dst = orig_dst;
this_rd += b_best_rd;
xd->mi[0].bmi[i] = bsi[ref_frame][i];
if (num_4x4_blocks_wide > 1)
xd->mi[0].bmi[i + 1] = xd->mi[0].bmi[i];
if (num_4x4_blocks_high > 1)
xd->mi[0].bmi[i + 2] = xd->mi[0].bmi[i];
}
} // loop through sub8x8 blocks
if (this_rd < best_rd) {
best_rd = this_rd;
best_ref_frame = ref_frame;
}
} // reference frames
mbmi->tx_size = TX_4X4;
mbmi->ref_frame[0] = best_ref_frame;
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
const int block = idy * 2 + idx;
xd->mi[0].bmi[block] = bsi[best_ref_frame][block];
if (num_4x4_blocks_wide > 1)
xd->mi[0].bmi[block + 1] = bsi[best_ref_frame][block];
if (num_4x4_blocks_high > 1)
xd->mi[0].bmi[block + 2] = bsi[best_ref_frame][block];
}
}
mbmi->mode = xd->mi[0].bmi[3].as_mode;
ctx->mic = *(xd->mi[0].src_mi);
ctx->skip_txfm[0] = 0;
ctx->skip = 0;
// Dummy assignment for speed -5. No effect in speed -6.
rd_cost->rdcost = best_rd;
}
|
Maria1099/webm.libvpx
|
vp9/encoder/vp9_pickmode.c
|
C
|
bsd-3-clause
| 51,902
|
/*
Copyright (c) 2009-2015, Jack Poulson
All rights reserved.
This file is part of Elemental and is under the BSD 2-Clause License,
which can be found in the LICENSE file in the root directory, or at
http://opensource.org/licenses/BSD-2-Clause
*/
#include "El.hpp"
namespace El {
template<typename F>
InertiaType Inertia
( UpperOrLower uplo, Matrix<F>& A, const LDLPivotCtrl<Base<F>>& ctrl )
{
DEBUG_ONLY(CSE cse("Inertia"))
if( uplo == UPPER )
LogicError("This option not yet supported");
Matrix<Int> p;
Matrix<F> dSub;
LDL( A, dSub, p, true, ctrl );
return ldl::Inertia( GetRealPartOfDiagonal(A), dSub );
}
template<typename F>
InertiaType Inertia
( UpperOrLower uplo, AbstractDistMatrix<F>& APre,
const LDLPivotCtrl<Base<F>>& ctrl )
{
DEBUG_ONLY(CSE cse("Inertia"))
if( uplo == UPPER )
LogicError("This option not yet supported");
auto APtr = ReadProxy<F,MC,MR>( &APre );
auto& A = *APtr;
DistMatrix<Int,VC,STAR> p( A.Grid() );
DistMatrix<F,MD,STAR> dSub( A.Grid() );
LDL( A, dSub, p, true, ctrl );
return ldl::Inertia( GetRealPartOfDiagonal(A), dSub );
}
#define PROTO(F) \
template InertiaType Inertia \
( UpperOrLower uplo, Matrix<F>& A, const LDLPivotCtrl<Base<F>>& ctrl ); \
template InertiaType Inertia \
( UpperOrLower uplo, AbstractDistMatrix<F>& A, \
const LDLPivotCtrl<Base<F>>& ctrl );
#define EL_NO_INT_PROTO
#include "El/macros/Instantiate.h"
} // namespace El
|
justusc/Elemental
|
src/lapack_like/props/Inertia.cpp
|
C++
|
bsd-3-clause
| 1,490
|
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_IA32_LITHIUM_IA32_H_
#define V8_IA32_LITHIUM_IA32_H_
#include "hydrogen.h"
#include "lithium-allocator.h"
#include "lithium.h"
#include "safepoint-table.h"
#include "utils.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
V(ArithmeticD) \
V(ArithmeticT) \
V(ArrayLiteral) \
V(BitI) \
V(BitNotI) \
V(BoundsCheck) \
V(Branch) \
V(CallConstantFunction) \
V(CallFunction) \
V(CallGlobal) \
V(CallKeyed) \
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
V(CallRuntime) \
V(CallStub) \
V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMap) \
V(CheckNonSmi) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
V(CmpIDAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
V(CmpConstantEqAndBranch) \
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
V(Context) \
V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
V(ElementsKind) \
V(FixedArrayBaseLength) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(StringCompareAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyedFastElement) \
V(LoadKeyedFastDoubleElement) \
V(LoadKeyedGeneric) \
V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MathPowHalf) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
V(NumberTagI) \
V(NumberUntagD) \
V(ObjectLiteralFast) \
V(ObjectLiteralGeneric) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
V(StoreKeyedFastDoubleElement) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
V(StringLength) \
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
virtual Opcode opcode() const { return LInstruction::k##type; } \
virtual void CompileToNative(LCodeGen* generator); \
virtual const char* Mnemonic() const { return mnemonic; } \
static L##type* cast(LInstruction* instr) { \
ASSERT(instr->Is##type()); \
return reinterpret_cast<L##type*>(instr); \
}
#define DECLARE_HYDROGEN_ACCESSOR(type) \
H##type* hydrogen() const { \
return H##type::cast(hydrogen_value()); \
}
class LInstruction: public ZoneObject {
public:
LInstruction()
: environment_(NULL),
hydrogen_value_(NULL),
is_call_(false),
is_save_doubles_(false) { }
virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
virtual void PrintTo(StringStream* stream);
virtual void PrintDataTo(StringStream* stream);
virtual void PrintOutputOperandTo(StringStream* stream);
enum Opcode {
// Declare a unique enum value for each instruction.
#define DECLARE_OPCODE(type) k##type,
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
kNumberOfInstructions
#undef DECLARE_OPCODE
};
virtual Opcode opcode() const = 0;
// Declare non-virtual type testers for all leaf IR classes.
#define DECLARE_PREDICATE(type) \
bool Is##type() const { return opcode() == k##type; }
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
#undef DECLARE_PREDICATE
// Declare virtual predicates for instructions that don't have
// an opcode.
virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_; }
bool HasEnvironment() const { return environment_ != NULL; }
void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
void set_deoptimization_environment(LEnvironment* env) {
deoptimization_environment_.set(env);
}
LEnvironment* deoptimization_environment() const {
return deoptimization_environment_.get();
}
bool HasDeoptimizationEnvironment() const {
return deoptimization_environment_.is_set();
}
void MarkAsCall() { is_call_ = true; }
void MarkAsSaveDoubles() { is_save_doubles_ = true; }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; }
bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
virtual int InputCount() = 0;
virtual LOperand* InputAt(int i) = 0;
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
#ifdef DEBUG
void VerifyCall();
#endif
private:
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
SetOncePointer<LEnvironment> deoptimization_environment_;
bool is_call_;
bool is_save_doubles_;
};
// R = number of result operands (0 or 1).
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
class LTemplateInstruction: public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
virtual bool HasResult() const { return R != 0; }
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() { return results_[0]; }
int InputCount() { return I; }
LOperand* InputAt(int i) { return inputs_[i]; }
int TempCount() { return T; }
LOperand* TempAt(int i) { return temps_[i]; }
protected:
EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
};
class LGap: public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block) : block_(block) {
parallel_moves_[BEFORE] = NULL;
parallel_moves_[START] = NULL;
parallel_moves_[END] = NULL;
parallel_moves_[AFTER] = NULL;
}
// Can't use the DECLARE-macro here because of sub-classes.
virtual bool IsGap() const { return true; }
virtual void PrintDataTo(StringStream* stream);
static LGap* cast(LInstruction* instr) {
ASSERT(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
}
bool IsRedundant() const;
HBasicBlock* block() const { return block_; }
enum InnerPosition {
BEFORE,
START,
END,
AFTER,
FIRST_INNER_POSITION = BEFORE,
LAST_INNER_POSITION = AFTER
};
LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
return parallel_moves_[pos];
}
LParallelMove* GetParallelMove(InnerPosition pos) {
return parallel_moves_[pos];
}
private:
LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
HBasicBlock* block_;
};
class LInstructionGap: public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
};
class LGoto: public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(int block_id) : block_id_(block_id) { }
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int block_id() const { return block_id_; }
private:
int block_id_;
};
class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
};
class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
};
class LLabel: public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
virtual void PrintDataTo(StringStream* stream);
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
Label* label() { return &label_; }
LLabel* replacement() const { return replacement_; }
void set_replacement(LLabel* label) { replacement_ = label; }
bool HasReplacement() const { return replacement_ != NULL; }
private:
Label label_;
LLabel* replacement_;
};
class LParameter: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
class LCallStub: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallStub(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
LOperand* context() { return inputs_[0]; }
TranscendentalCache::Type transcendental_type() {
return hydrogen()->transcendental_type();
}
};
class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
virtual bool IsControl() const { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
private:
HControlInstruction* hydrogen() {
return HControlInstruction::cast(this->hydrogen_value());
}
};
class LApplyArguments: public LTemplateInstruction<1, 4, 1> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
LOperand* length,
LOperand* elements,
LOperand* temp) {
inputs_[0] = function;
inputs_[1] = receiver;
inputs_[2] = length;
inputs_[3] = elements;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
LOperand* function() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* length() { return inputs_[2]; }
LOperand* elements() { return inputs_[3]; }
};
class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
inputs_[1] = length;
inputs_[2] = index;
}
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
LOperand* arguments() { return inputs_[0]; }
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
virtual void PrintDataTo(StringStream* stream);
};
class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
}
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
};
class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
public:
LArgumentsElements() { }
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
};
class LModI: public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
class LDivI: public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
class LMulI: public LTemplateInstruction<1, 2, 1> {
public:
LMulI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
};
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
return hydrogen()->GetInputRepresentation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
};
class LUnaryMathOperation: public LTemplateInstruction<1, 2, 0> {
public:
LUnaryMathOperation(LOperand* context, LOperand* value) {
inputs_[1] = context;
inputs_[0] = value;
}
LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
virtual void PrintDataTo(StringStream* stream);
BuiltinFunctionId op() const { return hydrogen()->op(); }
};
class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
public:
LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[1] = context;
inputs_[0] = value;
temps_[0] = temp;
}
LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
virtual void PrintDataTo(StringStream* stream);
};
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
"cmp-object-eq-and-branch")
};
class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
public:
explicit LCmpConstantEqAndBranch(LOperand* left) {
inputs_[0] = left;
}
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
"cmp-constant-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
};
class LIsNilAndBranch: public LControlInstruction<1, 1> {
public:
LIsNilAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
EqualityKind kind() const { return hydrogen()->kind(); }
NilValue nil() const { return hydrogen()->nil(); }
virtual void PrintDataTo(StringStream* stream);
};
class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
virtual void PrintDataTo(StringStream* stream);
};
class LIsStringAndBranch: public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
virtual void PrintDataTo(StringStream* stream);
};
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
public:
LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
virtual void PrintDataTo(StringStream* stream);
};
class LStringCompareAndBranch: public LControlInstruction<3, 0> {
public:
LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
inputs_[1] = left;
inputs_[2] = right;
}
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
"string-compare-and-branch")
DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
virtual void PrintDataTo(StringStream* stream);
Token::Value op() const { return hydrogen()->token(); }
};
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
public:
LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
};
class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
virtual void PrintDataTo(StringStream* stream);
};
class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
"is-construct-call-and-branch")
};
class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp;
temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
class LCmpT: public LTemplateInstruction<1, 3, 0> {
public:
LCmpT(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
inputs_[1] = left;
inputs_[2] = right;
}
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Token::Value op() const { return hydrogen()->token(); }
};
class LInstanceOf: public LTemplateInstruction<1, 3, 0> {
public:
LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
inputs_[1] = left;
inputs_[2] = right;
}
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
LOperand* context() { return inputs_[0]; }
};
class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
public:
LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
inputs_[1] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
"instance-of-known-global")
DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
Handle<JSFunction> function() const { return hydrogen()->function(); }
};
class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
inputs_[1] = length;
}
LOperand* index() { return inputs_[0]; }
LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
};
class LBitI: public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
Token::Value op() const { return hydrogen()->op(); }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
DECLARE_HYDROGEN_ACCESSOR(Bitwise)
};
class LShiftI: public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
inputs_[0] = left;
inputs_[1] = right;
}
Token::Value op() const { return op_; }
bool can_deopt() const { return can_deopt_; }
DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
private:
Token::Value op_;
bool can_deopt_;
};
class LSubI: public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
DECLARE_HYDROGEN_ACCESSOR(Sub)
};
class LConstantI: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
int32_t value() const { return hydrogen()->Integer32Value(); }
};
class LConstantD: public LTemplateInstruction<1, 0, 1> {
public:
explicit LConstantD(LOperand* temp) {
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
double value() const { return hydrogen()->DoubleValue(); }
};
class LConstantT: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
Handle<Object> value() const { return hydrogen()->handle(); }
};
class LBranch: public LControlInstruction<1, 1> {
public:
explicit LBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
virtual void PrintDataTo(StringStream* stream);
};
class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
virtual bool IsControl() const { return true; }
Handle<Map> map() const { return hydrogen()->map(); }
int true_block_id() const {
return hydrogen()->FirstSuccessor()->block_id();
}
int false_block_id() const {
return hydrogen()->SecondSuccessor()->block_id();
}
};
class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LJSArrayLength(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
};
class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LFixedArrayBaseLength(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
"fixed-array-base-length")
DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
};
class LElementsKind: public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
};
class LValueOf: public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
};
class LThrow: public LTemplateInstruction<0, 2, 0> {
public:
LThrow(LOperand* context, LOperand* value) {
inputs_[0] = context;
inputs_[1] = value;
}
LOperand* context() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
class LBitNotI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LBitNotI(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
};
class LAddI: public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
class LPower: public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(Power, "power")
DECLARE_HYDROGEN_ACCESSOR(Power)
};
class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
inputs_[0] = left;
inputs_[1] = right;
}
Token::Value op() const { return op_; }
virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
private:
Token::Value op_;
};
class LArithmeticT: public LTemplateInstruction<1, 3, 0> {
public:
LArithmeticT(Token::Value op,
LOperand* context,
LOperand* left,
LOperand* right)
: op_(op) {
inputs_[0] = context;
inputs_[1] = left;
inputs_[2] = right;
}
virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
Token::Value op() const { return op_; }
LOperand* context() { return inputs_[0]; }
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
private:
Token::Value op_;
};
class LReturn: public LTemplateInstruction<0, 1, 0> {
public:
explicit LReturn(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
LOperand* object() { return inputs_[0]; }
};
class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 2, 0> {
public:
LLoadNamedFieldPolymorphic(LOperand* context, LOperand* object) {
inputs_[0] = context;
inputs_[1] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
};
class LLoadNamedGeneric: public LTemplateInstruction<1, 2, 0> {
public:
LLoadNamedGeneric(LOperand* context, LOperand* object) {
inputs_[0] = context;
inputs_[1] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
};
class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
public:
LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
inputs_[0] = function;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
LOperand* function() { return inputs_[0]; }
};
class LLoadElements: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadElements(LOperand* object) {
inputs_[0] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
};
class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
"load-external-array-pointer")
};
class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
};
class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedFastDoubleElement(LOperand* elements,
LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
"load-keyed-fast-double-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
};
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
"load-keyed-specialized-array-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
};
class LLoadKeyedGeneric: public LTemplateInstruction<1, 3, 0> {
public:
LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) {
inputs_[0] = context;
inputs_[1] = obj;
inputs_[2] = key;
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
};
class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
inputs_[0] = context;
inputs_[1] = global_object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
LOperand* context() { return inputs_[0]; }
LOperand* global_object() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
bool for_typeof() const { return hydrogen()->for_typeof(); }
};
class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
public:
explicit LStoreGlobalCell(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
LOperand* value() { return inputs_[0]; }
};
class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
public:
LStoreGlobalGeneric(LOperand* context,
LOperand* global_object,
LOperand* value) {
inputs_[0] = context;
inputs_[1] = global_object;
inputs_[2] = value;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
LOperand* context() { return InputAt(0); }
LOperand* global_object() { return InputAt(1); }
Handle<Object> name() const { return hydrogen()->name(); }
LOperand* value() { return InputAt(2); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
LOperand* context() { return InputAt(0); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
};
class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
public:
LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
inputs_[1] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
LOperand* context() { return InputAt(0); }
LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
};
class LPushArgument: public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
};
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
};
class LOuterContext: public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
LOperand* context() { return InputAt(0); }
};
class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalObject(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
LOperand* context() { return InputAt(0); }
};
class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalReceiver(LOperand* global_object) {
inputs_[0] = global_object;
}
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
LOperand* global() { return InputAt(0); }
};
class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
virtual void PrintDataTo(StringStream* stream);
Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
public:
LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
inputs_[1] = function;
}
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
LOperand* context() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LCallKeyed: public LTemplateInstruction<1, 2, 0> {
public:
LCallKeyed(LOperand* context, LOperand* key) {
inputs_[0] = context;
inputs_[1] = key;
}
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
LOperand* context() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LCallNamed: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNamed(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
virtual void PrintDataTo(StringStream* stream);
LOperand* context() { return inputs_[0]; }
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LCallFunction: public LTemplateInstruction<1, 2, 0> {
public:
explicit LCallFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
inputs_[1] = function;
}
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
LOperand* context() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LCallGlobal: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallGlobal(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
virtual void PrintDataTo(StringStream* stream);
LOperand* context() { return inputs_[0]; }
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
virtual void PrintDataTo(StringStream* stream);
Handle<JSFunction> target() const { return hydrogen()->target(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LCallNew: public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
inputs_[1] = constructor;
}
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
virtual void PrintDataTo(StringStream* stream);
LOperand* context() { return inputs_[0]; }
LOperand* constructor() { return inputs_[1]; }
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LCallRuntime: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallRuntime(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
LOperand* context() { return inputs_[0]; }
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
};
class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
};
class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
public:
LNumberTagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
};
// Sometimes truncating conversion from a tagged value to an int32.
class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
public:
LDoubleToI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
// Truncating conversion from a tagged value to an int32.
class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
public:
LTaggedToI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
class LSmiTag: public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
};
class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change);
};
class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
bool needs_check() const { return needs_check_; }
private:
bool needs_check_;
};
class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
public:
LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp) {
inputs_[0] = obj;
inputs_[1] = val;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
};
class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = value;
}
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
virtual void PrintDataTo(StringStream* stream);
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
"store-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
};
class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedFastDoubleElement(LOperand* elements,
LOperand* key,
LOperand* val) {
inputs_[0] = elements;
inputs_[1] = key;
inputs_[2] = val;
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
"store-keyed-fast-double-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
virtual void PrintDataTo(StringStream* stream);
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
};
class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
LOperand* key,
LOperand* val) {
inputs_[0] = external_pointer;
inputs_[1] = key;
inputs_[2] = val;
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
"store-keyed-specialized-array-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
};
class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyedGeneric(LOperand* context,
LOperand* object,
LOperand* key,
LOperand* value) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = key;
inputs_[3] = value;
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
virtual void PrintDataTo(StringStream* stream);
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
LOperand* temp_reg) {
inputs_[0] = object;
temps_[0] = new_map_temp;
temps_[1] = temp_reg;
}
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* new_map_reg() { return temps_[0]; }
LOperand* temp_reg() { return temps_[1]; }
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
};
class LStringAdd: public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
inputs_[1] = left;
inputs_[2] = right;
}
DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
DECLARE_HYDROGEN_ACCESSOR(StringAdd)
LOperand* context() { return inputs_[0]; }
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
};
class LStringCharCodeAt: public LTemplateInstruction<1, 3, 0> {
public:
LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
inputs_[0] = context;
inputs_[1] = string;
inputs_[2] = index;
}
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
LOperand* context() { return inputs_[0]; }
LOperand* string() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
};
class LStringCharFromCode: public LTemplateInstruction<1, 2, 0> {
public:
LStringCharFromCode(LOperand* context, LOperand* char_code) {
inputs_[0] = context;
inputs_[1] = char_code;
}
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
LOperand* context() { return inputs_[0]; }
LOperand* char_code() { return inputs_[1]; }
};
class LStringLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LStringLength(LOperand* string) {
inputs_[0] = string;
}
DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
DECLARE_HYDROGEN_ACCESSOR(StringLength)
LOperand* string() { return inputs_[0]; }
};
class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckFunction(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};
class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
public:
LCheckInstanceType(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
};
class LCheckMap: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMap(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
DECLARE_HYDROGEN_ACCESSOR(CheckMap)
};
class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
public:
explicit LCheckPrototypeMaps(LOperand* temp) {
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
Handle<JSObject> holder() const { return hydrogen()->holder(); }
};
class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
};
class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampDToUint8(LOperand* value) {
inputs_[0] = value;
}
LOperand* unclamped() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
};
class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* value) {
inputs_[0] = value;
}
LOperand* unclamped() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
};
class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
LOperand* unclamped() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
};
class LArrayLiteral: public LTemplateInstruction<1, 1, 0> {
public:
explicit LArrayLiteral(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
};
class LObjectLiteralFast: public LTemplateInstruction<1, 1, 0> {
public:
explicit LObjectLiteralFast(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast)
};
class LObjectLiteralGeneric: public LTemplateInstruction<1, 1, 0> {
public:
explicit LObjectLiteralGeneric(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric)
};
class LRegExpLiteral: public LTemplateInstruction<1, 1, 0> {
public:
explicit LRegExpLiteral(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
class LFunctionLiteral: public LTemplateInstruction<1, 1, 0> {
public:
explicit LFunctionLiteral(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
};
class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
};
class LTypeof: public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
inputs_[0] = context;
inputs_[1] = value;
}
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
virtual void PrintDataTo(StringStream* stream);
};
class LDeleteProperty: public LTemplateInstruction<1, 3, 0> {
public:
LDeleteProperty(LOperand* context, LOperand* obj, LOperand* key) {
inputs_[0] = context;
inputs_[1] = obj;
inputs_[2] = key;
}
DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
};
class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry();
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
LOperand** SpilledRegisterArray() { return register_spills_; }
LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
void MarkSpilledDoubleRegister(int allocation_index,
LOperand* spill_operand);
private:
// Arrays of spill slot operands for registers with an assigned spill
// slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation
// index.
LOperand* register_spills_[Register::kNumAllocatableRegisters];
LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
};
class LStackCheck: public LTemplateInstruction<0, 1, 0> {
public:
explicit LStackCheck(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
Label* done_label() { return &done_label_; }
private:
Label done_label_;
};
class LIn: public LTemplateInstruction<1, 3, 0> {
public:
LIn(LOperand* context, LOperand* key, LOperand* object) {
inputs_[0] = context;
inputs_[1] = key;
inputs_[2] = object;
}
LOperand* context() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* object() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(In, "in")
};
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
LChunk(CompilationInfo* info, HGraph* graph)
: spill_slot_count_(0),
info_(info),
graph_(graph),
instructions_(32),
pointer_maps_(8),
num_double_slots_(0),
inlined_closures_(1) { }
void AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant);
Handle<Object> LookupLiteral(LConstantOperand* operand) const;
Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
int GetNextSpillIndex(bool is_double);
LOperand* GetNextSpillSlot(bool is_double);
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
int num_double_slots() const { return num_double_slots_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
void AddGapMove(int index, LOperand* from, LOperand* to);
LGap* GetGapAt(int index) const;
bool IsGapAt(int index) const;
int NearestGapPos(int index) const;
void MarkEmptyBlocks();
const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
LLabel* GetLabel(int block_id) const {
HBasicBlock* block = graph_->blocks()->at(block_id);
int first_instruction = block->first_instruction_index();
return LLabel::cast(instructions_[first_instruction]);
}
int LookupDestination(int block_id) const {
LLabel* cur = GetLabel(block_id);
while (cur->replacement() != NULL) {
cur = cur->replacement();
}
return cur->block_id();
}
Label* GetAssemblyLabel(int block_id) const {
LLabel* label = GetLabel(block_id);
ASSERT(!label->HasReplacement());
return label->label();
}
const ZoneList<Handle<JSFunction> >* inlined_closures() const {
return &inlined_closures_;
}
void AddInlinedClosure(Handle<JSFunction> closure) {
inlined_closures_.Add(closure);
}
private:
int spill_slot_count_;
CompilationInfo* info_;
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
int num_double_slots_;
ZoneList<Handle<JSFunction> > inlined_closures_;
};
class LChunkBuilder BASE_EMBEDDED {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
info_(info),
graph_(graph),
isolate_(graph->isolate()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
argument_count_(0),
allocator_(allocator),
position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
// Build the sequence for the graph.
LChunk* Build();
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
private:
enum Status {
UNUSED,
BUILDING,
DONE,
ABORTED
};
LChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
Zone* zone() { return isolate_->zone(); }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
void Abort(const char* format, ...);
// Methods for getting operands for Use / Define / Temp.
LRegister* ToOperand(Register reg);
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(XMMRegister reg);
// Methods for setting up define-use relationships.
MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
XMMRegister fixed_register);
// A value that is guaranteed to be allocated to a register.
// Operand created by UseRegister is guaranteed to be live until the end of
// instruction. This means that register allocator will not reuse it's
// register for any other operand inside instruction.
// Operand created by UseRegisterAtStart is guaranteed to be live only at
// instruction start. Register allocator is free to assign the same register
// to some other operand used inside instruction (i.e. temporary or
// output).
MUST_USE_RESULT LOperand* UseRegister(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
// An input operand in a register that may be trashed.
MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
// An input operand in a register or stack slot.
MUST_USE_RESULT LOperand* Use(HValue* value);
MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
// An input operand in a register, stack slot or a constant operand.
MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
// An input operand in a register or a constant operand.
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
MUST_USE_RESULT LOperand* FixedTemp(Register reg);
MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg);
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
template<int I, int T>
LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
LUnallocated* result);
template<int I, int T>
LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
template<int I, int T>
LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
template<int I, int T>
LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
int index);
template<int I, int T>
LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
template<int I, int T>
LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
Register reg);
template<int I, int T>
LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
XMMRegister reg);
// Assigns an environment to an instruction. An instruction which can
// deoptimize must have an environment.
LInstruction* AssignEnvironment(LInstruction* instr);
// Assigns a pointer map to an instruction. An instruction which can
// trigger a GC or a lazy deoptimization must have a pointer map.
LInstruction* AssignPointerMap(LInstruction* instr);
enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
// Marks a call for the register allocator. Assigns a pointer map to
// support GC and lazy deoptimization. Assigns an environment to support
// eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
LInstruction* MarkAsCall(
LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LInstruction* MarkAsSaveDoubles(LInstruction* instr);
LInstruction* SetInstructionPendingDeoptimizationEnvironment(
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
int* argument_index_accumulator);
void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
HArithmeticBinaryOperation* instr);
LChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
Isolate* isolate_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
int argument_count_;
LAllocator* allocator_;
int position_;
LInstruction* instruction_pending_deoptimization_environment_;
int pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_CONCRETE_INSTRUCTION
} } // namespace v8::internal
#endif // V8_IA32_LITHIUM_IA32_H_
|
PawelMarc/MQDB
|
v8-read-only/src/ia32/lithium-ia32.h
|
C
|
bsd-3-clause
| 69,072
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE761_Free_Pointer_Not_at_Start_of_Buffer__wchar_t_environment_51b.c
Label Definition File: CWE761_Free_Pointer_Not_at_Start_of_Buffer.label.xml
Template File: source-sinks-51b.tmpl.c
*/
/*
* @description
* CWE: 761 Free Pointer not at Start of Buffer
* BadSource: environment Read input from an environment variable
* Sinks:
* GoodSink: free() memory correctly at the start of the buffer
* BadSink : free() memory not at the start of the buffer
* Flow Variant: 51 Data flow: data passed as an argument from one function to another in different source files
*
* */
#include "std_testcase.h"
#include <wchar.h>
#define ENV_VARIABLE L"ADD"
#ifdef _WIN32
#define GETENV _wgetenv
#else
#define GETENV getenv
#endif
#define SEARCH_CHAR L'S'
#ifndef OMITBAD
void CWE761_Free_Pointer_Not_at_Start_of_Buffer__wchar_t_environment_51b_badSink(wchar_t * data)
{
/* FLAW: We are incrementing the pointer in the loop - this will cause us to free the
* memory block not at the start of the buffer */
for (; *data != L'\0'; data++)
{
if (*data == SEARCH_CHAR)
{
printLine("We have a match!");
break;
}
}
free(data);
}
#endif /* OMITBAD */
#ifndef OMITGOOD
void CWE761_Free_Pointer_Not_at_Start_of_Buffer__wchar_t_environment_51b_goodB2GSink(wchar_t * data)
{
{
size_t i;
/* FIX: Use a loop variable to traverse through the string pointed to by data */
for (i=0; i < wcslen(data); i++)
{
if (data[i] == SEARCH_CHAR)
{
printLine("We have a match!");
break;
}
}
free(data);
}
}
#endif /* OMITGOOD */
|
JianpingZeng/xcc
|
xcc/test/juliet/testcases/CWE761_Free_Pointer_Not_at_Start_of_Buffer/CWE761_Free_Pointer_Not_at_Start_of_Buffer__wchar_t_environment_51b.c
|
C
|
bsd-3-clause
| 1,821
|
{% extends "base.html" %}
{% block title %}
Registration | {{ block.super }}
{% endblock %}
{% block header %}
<h1>Sign up for a Your World of Text account</h1>
{% endblock %}
{% block content %}
<form action="{% url registration_register %}" method="POST">{% csrf_token %}
<table>
<tr>
<td align="right" valign="top">Username:</td>
<td>
{{ form.username }} <br/>
{% for error in form.username.errors %}
<span style="color:red">{{ error }}</span>
{% endfor %}
</td>
</tr>
<tr>
<td align="right" valign="top">Email:</td>
<td>
{{ form.email }} <br/>
{% for error in form.email.errors %}
<span style="color:red">{{ error }}</span>
{% endfor %}
</td>
</tr>
<tr>
<td align="right" valign="top">Password:</td>
<td>
{{ form.password1 }} <br/>
{% for error in form.password1.errors %}
<span style="color:red">{{ error }}</span>
{% endfor %}
</td>
</tr>
<tr>
<td align="right" valign="top">Password (again):</td>
<td>
{{ form.password2 }} <br/>
{% for error in form.password2.errors %}
<span style="color:red">{{ error }}</span>
{% endfor %}
</td>
</tr>
<tr>
<td> </td>
<td><input type="submit" value="Register" /></td>
</tr>
</table>
</form>
{% endblock %}
|
zischwartz/yourworldoftext
|
templates/registration/registration_form.html
|
HTML
|
bsd-3-clause
| 1,341
|
import numpy as np
import tensorflow as tf
import dists
from misc import *
|
davmre/bayesflow
|
elbow/util/__init__.py
|
Python
|
bsd-3-clause
| 77
|
// Karma configuration
// Generated on Tue Jan 14 2014 14:55:10 GMT+0100 (CET)
module.exports = function(config) {
config.set({
// base path, that will be used to resolve files and exclude
basePath: '../',
// frameworks to use
frameworks: ['mocha', 'requirejs'],
// list of files / patterns to load in the browser
files: [
'lib/jquery/jquery.js',
'test/lib/expect.js',
'test/lib/sinon-1.7.3.js',
{ pattern: 'src/*.js', included: false, served: true },
// Tests
{ pattern: 'test/*_test.js', included: false, served: true },
// Test config
'test/test-main.js'
],
// list of files to exclude
exclude: [
'src/_start.js',
'src/_end.js'
],
preprocessors: {
'src/*.js': ['coverage']
},
coverageReporter: {
type: 'cobertura',
dir: 'coverage/'
},
junitReporter: {
suite: 'GoodData-js Unit',
outputFile: 'test/test-results.xml'
},
// test results reporter to use
// possible values: 'dots', 'progress', 'junit', 'growl', 'coverage'
reporters: ['progress', 'junit', 'coverage'],
// web server port
port: 9876,
// enable / disable colors in the output (reporters and logs)
colors: true,
// level of logging
// possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG
logLevel: config.LOG_INFO,
// enable / disable watching file and executing tests whenever any file changes
autoWatch: false,
// Start these browsers, currently available:
// - Chrome
// - ChromeCanary
// - Firefox
// - Opera (has to be installed with `npm install karma-opera-launcher`)
// - Safari (only Mac; has to be installed with `npm install karma-safari-launcher`)
// - PhantomJS
// - IE (only Windows; has to be installed with `npm install karma-ie-launcher`)
browsers: ['PhantomJS'],
// If browser does not capture in given timeout [ms], kill it
captureTimeout: 60000,
// Continuous Integration mode
// if true, it capture browsers, run tests and exit
singleRun: true
});
};
|
voy/gooddata-js
|
tools/karma.conf.js
|
JavaScript
|
bsd-3-clause
| 2,194
|
/* $NetBSD: mapleio.h,v 1.1 2001/05/26 19:04:39 marcus Exp $ */
/*-
* Copyright (c) 2001 Marcus Comstedt
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Marcus Comstedt.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _DREAMCAST_DEV_MAPLE_MAPLEIO_H_
#define _DREAMCAST_DEV_MAPLE_MAPLEIO_H_
/*
* Maple (maple, mmc) exported interfaces
* Ioctls are all in group 'M'. Ioctl number space is partitioned like:
* 0-31 generic ioctls (MAPLEIO)
*/
#include <sys/types.h>
#include <sys/ioccom.h>
#include <dreamcast/dev/maple/maple.h>
/*
* Generic ioctls (0 - 31)
*/
/* get devinfo */
#define MAPLEIO_GDEVINFO _IOR('M', 0, struct maple_devinfo)
#endif /* _DREAMCAST_DEV_MAPLE_MAPLEIO_H_ */
|
MarginC/kame
|
netbsd/sys/arch/dreamcast/dev/maple/mapleio.h
|
C
|
bsd-3-clause
| 2,327
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE476_NULL_Pointer_Dereference__binary_if_05.c
Label Definition File: CWE476_NULL_Pointer_Dereference.pointflaw.label.xml
Template File: point-flaw-05.tmpl.c
*/
/*
* @description
* CWE: 476 NULL Pointer Dereference
* Sinks: binary_if
* GoodSink: Do not check for NULL after the pointer has been dereferenced
* BadSink : Check for NULL after a pointer has already been dereferenced
* Flow Variant: 05 Control flow: if(staticTrue) and if(staticFalse)
*
* */
#include "std_testcase.h"
/* The two variables below are not defined as "const", but are never
assigned any other value, so a tool should be able to identify that
reads of these will always return their initialized values. */
static int staticTrue = 1; /* true */
static int staticFalse = 0; /* false */
#ifndef OMITBAD
void CWE476_NULL_Pointer_Dereference__binary_if_05_bad()
{
if(staticTrue)
{
{
twoIntsStruct *twoIntsStructPointer = NULL;
/* FLAW: Using a single & in the if statement will cause both sides of the expression to be evaluated
* thus causing a NPD */
if ((twoIntsStructPointer != NULL) & (twoIntsStructPointer->intOne == 5))
{
printLine("intOne == 5");
}
}
}
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* good1() uses if(staticFalse) instead of if(staticTrue) */
static void good1()
{
if(staticFalse)
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
printLine("Benign, fixed string");
}
else
{
{
twoIntsStruct *twoIntsStructPointer = NULL;
/* FIX: Use && in the if statement so that if the left side of the expression fails then
* the right side will not be evaluated */
if ((twoIntsStructPointer != NULL) && (twoIntsStructPointer->intOne == 5))
{
printLine("intOne == 5");
}
}
}
}
/* good2() reverses the bodies in the if statement */
static void good2()
{
if(staticTrue)
{
{
twoIntsStruct *twoIntsStructPointer = NULL;
/* FIX: Use && in the if statement so that if the left side of the expression fails then
* the right side will not be evaluated */
if ((twoIntsStructPointer != NULL) && (twoIntsStructPointer->intOne == 5))
{
printLine("intOne == 5");
}
}
}
}
void CWE476_NULL_Pointer_Dereference__binary_if_05_good()
{
good1();
good2();
}
#endif /* OMITGOOD */
/* Below is the main(). It is only used when building this testcase on
its own for testing or for building a binary to use in testing binary
analysis tools. It is not used when compiling all the testcases as one
application, which is how source code analysis tools are tested. */
#ifdef INCLUDEMAIN
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
#ifndef OMITGOOD
printLine("Calling good()...");
CWE476_NULL_Pointer_Dereference__binary_if_05_good();
printLine("Finished good()");
#endif /* OMITGOOD */
#ifndef OMITBAD
printLine("Calling bad()...");
CWE476_NULL_Pointer_Dereference__binary_if_05_bad();
printLine("Finished bad()");
#endif /* OMITBAD */
return 0;
}
#endif
|
JianpingZeng/xcc
|
xcc/test/juliet/testcases/CWE476_NULL_Pointer_Dereference/CWE476_NULL_Pointer_Dereference__binary_if_05.c
|
C
|
bsd-3-clause
| 3,497
|
import ipcalc
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.cache import cache
from django.db.models.signals import post_save, post_delete
class BlockIP(models.Model):
network = models.CharField(_('IP address or mask'), max_length=18)
reason_for_block = models.TextField(blank=True, null=True, help_text=_("Optional reason for block"))
def __unicode__(self):
return 'BlockIP: %s' % self.network
def get_network(self):
return ipcalc.Network(self.network)
class Meta:
verbose_name = _('IPs & masks to ban')
verbose_name_plural = _('IPs & masks to ban')
def _clear_cache(sender, instance, **kwargs):
cache.set('blockip:list', BlockIP.objects.all())
post_save.connect(_clear_cache, sender=BlockIP)
post_delete.connect(_clear_cache, sender=BlockIP)
|
zhendilin/django-block-ip
|
block_ip/models.py
|
Python
|
bsd-3-clause
| 866
|
<?php
/**
* Message translations.
*
* This file is automatically generated by 'yiic message' command.
* It contains the localizable messages extracted from source code.
* You may modify this file by translating the extracted messages.
*
* Each array element represents the translation (value) of a message (key).
* If the value is empty, the message is considered as not translated.
* Messages that no longer need translation will have their translations
* enclosed between a pair of '@@' marks.
*
* Message string can be used with plural forms format. Check i18n section
* of the guide for details.
*
* NOTE, this file must be saved in UTF-8 encoding.
*
* @version $Id: $
*/
return array (
'*неизвестно*' => '',
'*нет*' => '',
'Email' => '',
'Email "{email}" не найден или пользователь заблокирован !' => '',
'Email или пароль введены неверно!' => '',
'Email от имени которого отправлять сообщение' => '',
'Email подтвержден' => '',
'Email уже занят' => '',
'Id' => '',
'Ip активации' => '',
'Ip регистрации' => '',
'htmlOptions must be array or not specified!' => '',
'id' => '',
'yupe team' => '',
'Аватар' => '',
'Автоматически генерировать уникальный ник и не требовать его указания' => '',
'Автоматическое восстановление пароля' => '',
'Администратор' => '',
'Аккаунт активирован!' => '',
'Активен' => '',
'Активирован e-mail с activate_key = {activate_key}, id = {id}!' => '',
'Активирован аккаунт с activate_key = {activate_key}!' => '',
'Вам необходимо продтвердить новый e-mail, проверьте почту!' => '',
'Ваш профиль успешно изменен!' => '',
'Восстановление пароля!' => '',
'Восстановления паролей' => '',
'Вы успешно авторизовались!' => '',
'Вы успешно активировали аккаунт! Теперь Вы можете войти!' => '',
'Вы успешно подтвердили новый e-mail!' => '',
'Граватар' => '',
'Да' => '',
'Данные обновлены!' => '',
'Данный email уже используется другим пользователем' => '',
'Данный ник уже используется другим пользователем' => '',
'Дата активации' => '',
'Дата изменения' => '',
'Дата регистрации' => '',
'Дата рождения' => '',
'Дата создания' => '',
'День рождения' => '',
'Добавление пользователя' => '',
'Доступ' => '',
'Заблокирован' => '',
'Заявка на автоматическое восстановление пароля.' => '',
'Заявка на восстановление пароля.' => '',
'Идентификатор' => '',
'Изменен профиль учетной запись #{id}-{nick_name}!' => '',
'Имя' => '',
'Имя пользователя' => '',
'Каталог для загрузки аватарок' => '',
'Код' => '',
'Код активации' => '',
'Код восстановления пароля {code} не найден!' => '',
'Код восстановления пароля не найден! Попробуйте еще раз!' => '',
'Код проверки' => '',
'Код проверки не корректен.' => '',
'Корень сервера' => '',
'Максимальная длина капчи' => '',
'Максимальный размер аватарки' => '',
'Минимальная длина капчи' => '',
'Минимальная длина пароля' => '',
'Модуль для управления пользователями, регистрацией и авторизацией' => '',
'На указанный email отправлено письмо с инструкцией по восстановлению пароля!' => '',
'Не активирован' => '',
'Неверный формат поля "{attribute}" допустимы только буквы и цифры, от 2 до 20 символов' => '',
'Нет' => '',
'Ник' => '',
'Ник уже занят' => '',
'Новый пароль' => '',
'Новый пароль еще раз' => '',
'Новый пароль отправлен Вам на email!' => '',
'Новый пользователь добавлен!' => '',
'О себе' => '',
'Отчество' => '',
'Ошибка авторизации! email => {email}, Password => {password}!' => '',
'Ошибка активации! Возможно данный e-mail уже проверен или указан неверный ключ активации! Попробуйте другой e-mail.' => '',
'Ошибка активации! Возможно данный аккаунт уже активирован! Попробуете зарегистрироваться вновь?' => '',
'Ошибка при автоматической смене пароля {error}!' => '',
'Ошибка при смене пароля!' => '',
'Ошибка при создании учетной записи без активации!' => '',
'Ошибка при создании учетной записи!' => '',
'Ошибка при сохранении профиля! #{id}' => '',
'Ощибка при создании заявки на автоматическое восстановление пароля' => '',
'Пароли не совпадают!' => '',
'Пароли не совпадают.' => '',
'Пароль' => '',
'Пароль изменен!' => '',
'Пароль успешно изменен!' => '',
'Подтверждать аккаунт по Email' => '',
'Подтверждение нового e-mail адреса на сайте {site} !' => '',
'Подтверждение пароля' => '',
'Пожалуйста, завершите регистрацию, имя пользователя "{nick_name}" к сожалению, уже занято...' => '',
'Показывать капчу при регистрации' => '',
'Пол' => '',
'Пользователи' => '',
'Пользователь' => '',
'Пользователь {email} авторизовался!' => '',
'Пользователь {user} вышел!' => '',
'Пользователь не найден!' => '',
'Порядок следования в меню' => '',
'Последний визит' => '',
'Почтовое событие при автоматическом восстановлении пароля' => '',
'Почтовое событие при восстановлении пароля' => '',
'Почтовое событие при регистрации нового пользователя без активации' => '',
'Почтовое событие при регистрации нового пользователя с активацией' => '',
'Почтовое событие при успешной активации пользователя' => '',
'Почтовое событие при успешном восстановлении пароля' => '',
'При авторизации произошла ошибка!' => '',
'При авторизации через {servive} произошла ошибка!' => '',
'При активации аккаунта c activate_key => {activate_key} произошла ошибка!' => '',
'При активации аккаунта произошла ошибка! Попробуйте позже!' => '',
'При восстановлении пароля произошла ошибка!' => '',
'При восстановлении пароля произошла ошибка! Повторите попытку позже!' => '',
'При подтверждении e-mail c activate_key => {activate_key} произошла ошибка {error}!' => '',
'При подтверждении e-mail произошла ошибка! Попробуйте позже!' => '',
'При создании учетной записи произошла ошибка {error}!' => '',
'При создании учетной записи произошла ошибка!' => '',
'Пустой аватар' => '',
'Расположение' => '',
'Регистрация на сайте {site} !' => '',
'Сайт/блог' => '',
'Создана учетная запись {nick_name} без активации!' => '',
'Создана учетная запись {nick_name}!' => '',
'Соль' => '',
'Список восстановлений' => '',
'Список пользователей' => '',
'Статус' => '',
'Страница для заблокированных Email' => '',
'Страница для заблокированных IP' => '',
'Страница неудачной активации аккаунта' => '',
'Страница после авторизации' => '',
'Страница после авторизации админстратора' => '',
'Страница после активации аккаунта' => '',
'Страница после выхода с сайта' => '',
'Страница после успешной регистрации' => '',
'Тип' => '',
'Управление восстановлением паролей' => '',
'Управление пользователями' => '',
'Успешная смена пароля для пользоателя {user}!' => '',
'Успешное восстановление пароля!' => '',
'Учетная запись создана! Пожалуйста, авторизуйтесь!' => '',
'Учетная запись создана! Проверьте Вашу почту!' => '',
'Учетная запись создана, но не удалось авторизоваться!' => '',
'Фамилия' => '',
'женский' => '',
'мужской' => '',
'не указан' => '',
'статус не определен' => '',
);
|
1datr/yupe
|
protected/messages/en/user.php
|
PHP
|
bsd-3-clause
| 10,591
|
{-# LANGUAGE CPP #-}
module Main where
import Prelude hiding ( catch )
import Test.HUnit
import System.Exit
import System.Process ( system )
import System.IO ( stderr )
import qualified DependencyTest
import qualified MigrationsTest
import qualified FilesystemSerializeTest
import qualified FilesystemParseTest
import qualified FilesystemTest
import qualified CycleDetectionTest
import qualified StoreTest
import Control.Monad ( forM )
import Control.Exception ( finally, catch, SomeException )
import Database.HDBC ( IConnection(disconnect) )
#ifndef WithoutBackendDependencies
import qualified BackendTest
import Database.HDBC.Sqlite3 ( connectSqlite3 )
import qualified Database.HDBC.PostgreSQL as PostgreSQL
doBackendTests :: IO [Test]
doBackendTests = do
sqliteConn <- connectSqlite3 ":memory:"
pgConn <- setupPostgresDb
let backends = [ ("Sqlite", (BackendTest.tests sqliteConn) `finally`
(disconnect sqliteConn))
, ("PostgreSQL", (BackendTest.tests pgConn) `finally`
(disconnect pgConn >> teardownPostgresDb))
]
backendTests <- forM backends $ \(name, testAct) -> do
return $ (name ++ " backend tests") ~: test testAct
setupPostgresDb :: IO PostgreSQL.Connection
setupPostgresDb = do
teardownPostgresDb `catch` ignoreException
-- create database
status <- system $ "createdb " ++ tempPgDatabase
case status of
ExitSuccess -> return ()
ExitFailure _ -> error $ "Failed to create PostgreSQL database " ++ (show tempPgDatabase)
-- return test db connection
PostgreSQL.connectPostgreSQL $ "dbname=" ++ tempPgDatabase
teardownPostgresDb :: IO ()
teardownPostgresDb = do
-- create database
status <- system $ "dropdb " ++ tempPgDatabase ++ " 2>/dev/null"
case status of
ExitSuccess -> return ()
ExitFailure _ -> error $ "Failed to drop PostgreSQL database " ++ (show tempPgDatabase)
#else
doBackendTests :: IO [Test]
doBackendTests = return []
#endif
loadTests :: IO [Test]
loadTests = do
backendTests <- doBackendTests
ioTests <- sequence [ do fspTests <- FilesystemParseTest.tests
return $ "Filesystem Parsing" ~: test fspTests
, do fsTests <- FilesystemTest.tests
return $ "Filesystem general" ~: test fsTests
]
return $ concat [ backendTests
, ioTests
, DependencyTest.tests
, FilesystemSerializeTest.tests
, MigrationsTest.tests
, CycleDetectionTest.tests
, StoreTest.tests
]
tempPgDatabase :: String
tempPgDatabase = "dbmigrations_test"
ignoreException :: SomeException -> IO ()
ignoreException _ = return ()
main :: IO ()
main = do
tests <- loadTests
(testResults, _) <- runTestText (putTextToHandle stderr False) $ test tests
if errors testResults + failures testResults > 0
then exitFailure
else exitSuccess
|
nick0x01/dbmigrations
|
test/TestDriver.hs
|
Haskell
|
bsd-3-clause
| 3,047
|
from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
Float64Index, Index, Int64Index, NaT, Timedelta, TimedeltaIndex,
timedelta_range)
import pandas.util.testing as tm
class TestTimedeltaIndex(object):
def test_astype_object(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name='idx')
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype_object_with_nat(self):
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), NaT,
Timedelta('4 days')]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name='idx')
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timedelta('1 days 03:46:40')] + [NaT] * 3,
dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([100000000000000] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
result = idx.astype(str)
expected = Index(str(x) for x in idx)
tm.assert_index_equal(result, expected)
rng = timedelta_range('1 days', periods=10)
result = rng.astype('i8')
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(rng.asi8, result.values)
def test_astype_uint(self):
arr = timedelta_range('1H', periods=2)
expected = pd.UInt64Index(
np.array([3600000000000, 90000000000000], dtype="uint64")
)
tm.assert_index_equal(arr.astype("uint64"), expected)
tm.assert_index_equal(arr.astype("uint32"), expected)
def test_astype_timedelta64(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN])
result = idx.astype('timedelta64')
expected = Float64Index([1e+14] + [np.NaN] * 3, dtype='float64')
tm.assert_index_equal(result, expected)
result = idx.astype('timedelta64[ns]')
tm.assert_index_equal(result, idx)
assert result is not idx
result = idx.astype('timedelta64[ns]', copy=False)
tm.assert_index_equal(result, idx)
assert result is idx
@pytest.mark.parametrize('dtype', [
float, 'datetime64', 'datetime64[ns]'])
def test_astype_raises(self, dtype):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN])
msg = 'Cannot cast TimedeltaArray to dtype'
with pytest.raises(TypeError, match=msg):
idx.astype(dtype)
def test_astype_category(self):
obj = pd.timedelta_range("1H", periods=2, freq='H')
result = obj.astype('category')
expected = pd.CategoricalIndex([pd.Timedelta('1H'),
pd.Timedelta('2H')])
tm.assert_index_equal(result, expected)
result = obj._data.astype('category')
expected = expected.values
tm.assert_categorical_equal(result, expected)
def test_astype_array_fallback(self):
obj = pd.timedelta_range("1H", periods=2)
result = obj.astype(bool)
expected = pd.Index(np.array([True, True]))
tm.assert_index_equal(result, expected)
result = obj._data.astype(bool)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
|
GuessWhoSamFoo/pandas
|
pandas/tests/indexes/timedeltas/test_astype.py
|
Python
|
bsd-3-clause
| 4,066
|
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ppapi/cpp/url_loader.h"
#include "ppapi/c/ppb_url_loader.h"
#include "ppapi/c/pp_errors.h"
#include "ppapi/cpp/completion_callback.h"
#include "ppapi/cpp/file_ref.h"
#include "ppapi/cpp/instance.h"
#include "ppapi/cpp/module.h"
#include "ppapi/cpp/module_impl.h"
#include "ppapi/cpp/url_request_info.h"
#include "ppapi/cpp/url_response_info.h"
namespace pp {
namespace {
template <> const char* interface_name<PPB_URLLoader>() {
return PPB_URLLOADER_INTERFACE;
}
} // namespace
URLLoader::URLLoader(PP_Resource resource) : Resource(resource) {
}
// TODO(brettw) remove this when NaCl is updated.
URLLoader::URLLoader(const Instance& instance) {
if (!has_interface<PPB_URLLoader>())
return;
PassRefFromConstructor(get_interface<PPB_URLLoader>()->Create(
instance.pp_instance()));
}
URLLoader::URLLoader(Instance* instance) {
if (!has_interface<PPB_URLLoader>())
return;
PassRefFromConstructor(get_interface<PPB_URLLoader>()->Create(
instance->pp_instance()));
}
URLLoader::URLLoader(const URLLoader& other) : Resource(other) {
}
int32_t URLLoader::Open(const URLRequestInfo& request_info,
const CompletionCallback& cc) {
if (!has_interface<PPB_URLLoader>())
return cc.MayForce(PP_ERROR_NOINTERFACE);
return get_interface<PPB_URLLoader>()->Open(pp_resource(),
request_info.pp_resource(),
cc.pp_completion_callback());
}
int32_t URLLoader::FollowRedirect(const CompletionCallback& cc) {
if (!has_interface<PPB_URLLoader>())
return cc.MayForce(PP_ERROR_NOINTERFACE);
return get_interface<PPB_URLLoader>()->FollowRedirect(
pp_resource(), cc.pp_completion_callback());
}
bool URLLoader::GetUploadProgress(int64_t* bytes_sent,
int64_t* total_bytes_to_be_sent) const {
if (!has_interface<PPB_URLLoader>())
return false;
return PP_ToBool(get_interface<PPB_URLLoader>()->GetUploadProgress(
pp_resource(), bytes_sent, total_bytes_to_be_sent));
}
bool URLLoader::GetDownloadProgress(
int64_t* bytes_received,
int64_t* total_bytes_to_be_received) const {
if (!has_interface<PPB_URLLoader>())
return false;
return PP_ToBool(get_interface<PPB_URLLoader>()->GetDownloadProgress(
pp_resource(), bytes_received, total_bytes_to_be_received));
}
URLResponseInfo URLLoader::GetResponseInfo() const {
if (!has_interface<PPB_URLLoader>())
return URLResponseInfo();
return URLResponseInfo(URLResponseInfo::PassRef(),
get_interface<PPB_URLLoader>()->GetResponseInfo(
pp_resource()));
}
int32_t URLLoader::ReadResponseBody(void* buffer,
int32_t bytes_to_read,
const CompletionCallback& cc) {
if (!has_interface<PPB_URLLoader>())
return cc.MayForce(PP_ERROR_NOINTERFACE);
return get_interface<PPB_URLLoader>()->ReadResponseBody(
pp_resource(), buffer, bytes_to_read, cc.pp_completion_callback());
}
int32_t URLLoader::FinishStreamingToFile(const CompletionCallback& cc) {
if (!has_interface<PPB_URLLoader>())
return cc.MayForce(PP_ERROR_NOINTERFACE);
return get_interface<PPB_URLLoader>()->FinishStreamingToFile(
pp_resource(), cc.pp_completion_callback());
}
void URLLoader::Close() {
if (!has_interface<PPB_URLLoader>())
return;
get_interface<PPB_URLLoader>()->Close(pp_resource());
}
} // namespace pp
|
aYukiSekiguchi/ACCESS-Chromium
|
ppapi/cpp/url_loader.cc
|
C++
|
bsd-3-clause
| 3,686
|
--pass out-of-range data of timestampltz/datetimeltz type to the parameter
--2. [error] out-of-range argument: timestampltz type
select date(timestampltz'23:00:00 13/01');
select date(timestampltz'04:14:07 1/19/9999');
select date(timestampltz'23:59:59:999 12/31/10000');
select date(timestampltz'2/31/2022 10:20:30:400');
select date(timestampltz'0-12-12 23:59:59:999');
select date(timestampltz'12/31/9999 23:59:59:999');
--3. [error] out-of-range argument: datetimeltz type
select date(datetimeltz'2010-10 10:10:100:00 am');
select date(datetimeltz'24:59:59:999 12/31/9999');
select date(datetimeltz'23:60:59:999 12/31/9999');
select date(datetimeltz'23:59:60:999 12/31/9999');
select date(datetimeltz'23:59:59:999 12/31/10000');
select date(datetimeltz'20:33:61:111 1990-10-19');
select date(datetimeltz'2/31/2022 10:20:30:400');
select date(datetimeltz'12/31/9999 23:59:59:999');
select date(datetimeltz'0-12-12 23:59:59:999');
|
CUBRID/cubrid-testcases
|
sql/_27_banana_qa/issue_5765_timezone_support/_00_dev_cases/_02_operator/_36_date_with_local_tz/cases/date_007.sql
|
SQL
|
bsd-3-clause
| 951
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.5.0_22) on Wed Mar 02 16:46:11 EST 2011 -->
<TITLE>
Uses of Class gov.nih.nci.cabig.caaers.rules.deploy.AttributionBusinessRulesTest
</TITLE>
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../../../../stylesheet.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
parent.document.title="Uses of Class gov.nih.nci.cabig.caaers.rules.deploy.AttributionBusinessRulesTest";
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../gov/nih/nci/cabig/caaers/rules/deploy/AttributionBusinessRulesTest.html" title="class in gov.nih.nci.cabig.caaers.rules.deploy"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../index-files/index-1.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../../../index.html?gov/nih/nci/cabig/caaers/rules/deploy/class-use/AttributionBusinessRulesTest.html" target="_top"><B>FRAMES</B></A>
<A HREF="AttributionBusinessRulesTest.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<CENTER>
<H2>
<B>Uses of Class<br>gov.nih.nci.cabig.caaers.rules.deploy.AttributionBusinessRulesTest</B></H2>
</CENTER>
No usage of gov.nih.nci.cabig.caaers.rules.deploy.AttributionBusinessRulesTest
<P>
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../gov/nih/nci/cabig/caaers/rules/deploy/AttributionBusinessRulesTest.html" title="class in gov.nih.nci.cabig.caaers.rules.deploy"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../index-files/index-1.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../../../index.html?gov/nih/nci/cabig/caaers/rules/deploy/class-use/AttributionBusinessRulesTest.html" target="_top"><B>FRAMES</B></A>
<A HREF="AttributionBusinessRulesTest.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
</BODY>
</HTML>
|
CBIIT/caaers
|
caAERS/software/docs/gov/nih/nci/cabig/caaers/rules/deploy/class-use/AttributionBusinessRulesTest.html
|
HTML
|
bsd-3-clause
| 6,383
|
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
// General Information about an assembly is controlled through the following
// set of attributes. Change these attribute values to modify the information
// associated with an assembly.
[assembly: AssemblyTitle("QCV")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("Christoph Heindl")]
[assembly: AssemblyProduct("QCV")]
[assembly: AssemblyCopyright("Copyright © Christoph Heindl 2010")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
// Setting ComVisible to false makes the types in this assembly not visible
// to COM components. If you need to access a type in this assembly from
// COM, set the ComVisible attribute to true on that type.
[assembly: ComVisible(false)]
// The following GUID is for the ID of the typelib if this project is exposed to COM
[assembly: Guid("a632e3c4-e319-42a2-8804-0147b0dfa5b2")]
// Version information for an assembly consists of the following four values:
//
// Major Version
// Minor Version
// Build Number
// Revision
//
// You can specify all the values or you can default the Build and Revision Numbers
// by using the '*' as shown below:
// [assembly: AssemblyVersion("1.0.*")]
[assembly: AssemblyVersion("1.0.0.0")]
[assembly: AssemblyFileVersion("1.0.0.0")]
|
cheind/qcv
|
QCV/Properties/AssemblyInfo.cs
|
C#
|
bsd-3-clause
| 1,450
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE506_Embedded_Malicious_Code__w32_aes_encrypted_payload_08.c
Label Definition File: CWE506_Embedded_Malicious_Code__w32.label.xml
Template File: point-flaw-08.tmpl.c
*/
/*
* @description
* CWE: 506 Embedded Malicious Code
* Sinks: aes_encrypted_payload
* GoodSink: Use a plaintext payload in a system call
* BadSink : Use an AES encrypted payload in a system call
* Flow Variant: 08 Control flow: if(staticReturnsTrue()) and if(staticReturnsFalse())
*
* */
#include "std_testcase.h"
#include <windows.h>
#include <wincrypt.h>
#pragma comment(lib, "advapi32")
#define HASH_INPUT "ABCDEFG123456" /* INCIDENTAL: Hardcoded crypto */
/* The two function below always return the same value, so a tool
should be able to identify that calls to the functions will always
return a fixed value. */
static int staticReturnsTrue()
{
return 1;
}
static int staticReturnsFalse()
{
return 0;
}
#ifndef OMITBAD
void CWE506_Embedded_Malicious_Code__w32_aes_encrypted_payload_08_bad()
{
if(staticReturnsTrue())
{
{
/* FLAW: encrytped "calc.exe" */
BYTE payload[20] = {0xfb, 0x50, 0xe5, 0x8d, 0xc5, 0x4b, 0xdd, 0xe0, 0x26, 0x2b, 0x98, 0x49, 0x73, 0xfb, 0x4c, 0xf6};
DWORD payloadLen = strlen((char *)payload);
HCRYPTPROV hCryptProv = 0;
HCRYPTHASH hHash = 0;
HCRYPTKEY hKey = 0;
char hashData[100] = HASH_INPUT;
do
{
/* Aquire a Context */
if(!CryptAcquireContext(&hCryptProv, NULL, MS_ENH_RSA_AES_PROV, PROV_RSA_AES, 0))
{
break;
}
/* Create hash handle */
if(!CryptCreateHash(hCryptProv, CALG_SHA_256, 0, 0, &hHash))
{
break;
}
/* Hash the input string */
if(!CryptHashData(hHash, (BYTE*)hashData, strlen(hashData), 0))
{
break;
}
/* Derive an AES key from the hash */
if(!CryptDeriveKey(hCryptProv, CALG_AES_256, hHash, 0, &hKey))
{
break;
}
/* Decrypt the payload */
if(!CryptDecrypt(hKey, 0, 1, 0, (BYTE *)payload, &payloadLen))
{
break;
}
/* null terminate */
payload[payloadLen] = '\0';
if(system((char*)payload) <= 0)
{
printLine("command execution failed!");
exit(1);
}
}
while (0);
if (hKey)
{
CryptDestroyKey(hKey);
}
if (hHash)
{
CryptDestroyHash(hHash);
}
if (hCryptProv)
{
CryptReleaseContext(hCryptProv, 0);
}
}
}
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* good1() uses if(staticReturnsFalse()) instead of if(staticReturnsTrue()) */
static void good1()
{
if(staticReturnsFalse())
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
printLine("Benign, fixed string");
}
else
{
{
/* FIX: plaintext command */
char * payload = "calc.exe";
if(system(payload) <= 0)
{
printLine("command execution failed!");
exit(1);
}
}
}
}
/* good2() reverses the bodies in the if statement */
static void good2()
{
if(staticReturnsTrue())
{
{
/* FIX: plaintext command */
char * payload = "calc.exe";
if(system(payload) <= 0)
{
printLine("command execution failed!");
exit(1);
}
}
}
}
void CWE506_Embedded_Malicious_Code__w32_aes_encrypted_payload_08_good()
{
good1();
good2();
}
#endif /* OMITGOOD */
/* Below is the main(). It is only used when building this testcase on
its own for testing or for building a binary to use in testing binary
analysis tools. It is not used when compiling all the testcases as one
application, which is how source code analysis tools are tested. */
#ifdef INCLUDEMAIN
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
#ifndef OMITGOOD
printLine("Calling good()...");
CWE506_Embedded_Malicious_Code__w32_aes_encrypted_payload_08_good();
printLine("Finished good()");
#endif /* OMITGOOD */
#ifndef OMITBAD
printLine("Calling bad()...");
CWE506_Embedded_Malicious_Code__w32_aes_encrypted_payload_08_bad();
printLine("Finished bad()");
#endif /* OMITBAD */
return 0;
}
#endif
|
JianpingZeng/xcc
|
xcc/test/juliet/testcases/CWE506_Embedded_Malicious_Code/CWE506_Embedded_Malicious_Code__w32_aes_encrypted_payload_08.c
|
C
|
bsd-3-clause
| 5,089
|
% \section{Evaluation: Recasting Type Errors as Runtime Errors}
\section{Evaluation}
\label{sec:evaluation}
We have implemented a prototype of our search procedure and trace
visualization for a purely functional subset of \ocaml\ --- with
polymorphic types and records, but no modules, objects, or polymorphic
variants --- in a tool called \nanomaly.
\footnote{\url{https://github.com/ucsd-progsys/nanomaly}}
%
We treat explicit type signatures, \eg @(x : int)@, as
primitive operations that narrow the type of the wrapped value.
%
In our implementation we instantiated \gensym\ with a simple random
generation of values, which we will show suffices for the majority of
type errors.
\paragraph{Evaluation Goals}
%
There are four questions we seek to answer with our evaluation:
%
\begin{enumerate}
\item \emphbf{Witness Coverage} (\S~\ref{sec:eval:witness-coverage},~\ref{sec:how-safe})
How many ill-typed programs \emph{admit} witnesses?
\item \emphbf{Witness Complexity} (\S~\ref{sec:trace-complexity})
How \emph{complex} are the traces produced by the witnesses?
\item \emphbf{Witness Utility} (\S~\ref{sec:advantage-traces},~\ref{sec:user-study})
How \emph{helpful} %(qualitatively and quantitatively)
are the witnesses in debugging type errors?
\item \emphbf{Witness-based Blame} (\S~\ref{sec:locating})
Can witnesses be used to \emph{locate} the source
of an error?
\end{enumerate}
In the sequel we present our experimental methodology (\S~\ref{sec:methodology})
and then answer the above questions.
%
However, for the impatient reader, we first summarize our main results:
\paragraph{1. Most Type Errors Admit Witnesses}
Our prime result is that the vast majority of static type errors, around
85\%, do in fact admit a dynamic witness.
%
Further, \toolname efficiently synthesizes witnesses with its randomized search;
it can synthesize a witness for over 75\% of programs in under one second, \ie
fast enough for interactive use. %to be integrated into the edit-compile-debug cycle.
%
\paragraph{2. Jump-Compressed Traces Are Small}
We find that our jump-compression heuristic effectively abstracts the
pedestrian details of computation, compressing the median trace with
14--15 single-step reductions to only 4 jumps.
%
Over 80\% of programs have a jump-compressed trace with at most 10
jumps, providing a bird's-eye view from which we can launch a more
in-depth investigation.
\paragraph{3. Witnesses Help Novices}
A witness should also help programmers \emph{understand} and
\emph{fix} type errors.
%
We use a set of ill-typed student programs to show that \toolname's
witnesses effectively demonstrate the runtime error that the type
system prevented.
%
Furthermore, we find, in a study of undergraduate students, that
\toolname's witnesses lead to more accurate diagnoses and fixes of type
errors than \ocaml's type error messages.
\paragraph{4. Witnesses Assign Blame}
Finally, we present a simple heuristic that allows us to use witnesses
to \emph{automatically} assign blame for type errors.
%
We treat the values inside the stuck term as \emph{sources} of typing
constraints and the stuck term itself as a \emph{sink}, producing
a slice of the program that likely contains the error.
%
Using this heuristic, \toolname's witnesses are competitive with the
state-of-the-art localization tools \mycroft~\cite{Loncaric2016-uk}
and \sherrloc~\cite{Zhang2014-lv}.
\subsection{Methodology}
\label{sec:methodology}
We answer the first two questions on two sets of ill-typed programs,
\ie\ programs that were rejected by the \ocaml\ compiler because of a
type error.
%
The first dataset comes from the Spring 2014 undergraduate Programming
Languages (CSE 130) course at UC San Diego.
%
We recorded each interaction with the \ocaml\ top-level system over the
course of the first three assignments (IRB
% \# hidden for blind review),
\#140608),
from which we extracted \ucsdsize\ distinct, ill-typed \ocaml\ programs
from a cohort of 46 students.
%
The second dataset --- widely used in the literature --- comes from a
graduate-level course at the University of Washington~\cite{Lerner2006-pj},
from which we extracted 284 ill-typed programs.
%
Both datasets contain relatively small programs, the largest being 348
SLoC; however, they demonstrate a variety of functional programming
idioms including (tail) recursive functions, higher-order functions,
and polymorphic and algebraic data types. % and expression evaluators.
We answer the third question in two steps.
%
First, we present a qualitative evaluation of \toolname's traces on a
selection of programs drawn from the UCSD dataset.
%
Second, we present a quantitative user study of students in the
University of Virginia's Spring 2016 undergraduate Programming Languages
(CS 4501) course.
%
As part of an exam, we presented the students with ill-typed \ocaml\
programs and asked them to
%
(1) \emph{explain} the type error, and
%
(2) \emph{fix} the type error (IRB \#2014009900).
%
For each problem the students were given the ill-typed program and
either \ocaml's error message or \toolname's jump-compressed trace.
We answer the last question on a subset of the \ucsdbench dataset.
%
% For each ill-typed program in a student's interaction trace, we identify
% the student's \emph{fix} by searching for the first type-correct program
% that follows it in the trace.
For each ill-typed program compiled by a student, we identify the student's
\emph{fix} by searching for the first type-correct program that the student
subsequently compiled.
%
We then use an expression-level \emph{diff}~\cite{Lempsink2009-xf} to
determine which sub-expressions changed between the ill-typed program
and the student's fix, and treat those expressions as the source of the
type error.
\subsection{Witness Coverage}
\label{sec:eval:witness-coverage}
%
We ran our search algorithm on each program for 1,000 iterations, with
the entry point set to the function that \ocaml\ had identified as
containing a type error.
%
Due to the possibility of non-termination we set a timeout of one minute
total per program.
%
% Due to the possibility of non-termination we set a limit on the number
% of reductions to perform, increasing in 500-step increments from 500
% steps to 3,000 steps total.
%
We also added a na{\"\i}ve check for infinite recursion; at each recursive
function call we check whether the new arguments are identical to the
current arguments.
%
If so, the function cannot possibly terminate and we report an error.
%
While not a \emph{type error}, infinite recursion is still a clear bug
in the program, and thus valuable feedback for the user.
\begin{figure}[t]
% \centerline{
% \begin{minipage}{1.2\textwidth}
\centering
\includegraphics[width=0.7\linewidth]{coverage.png}
% \end{minipage}
% \begin{minipage}{\linewidth}
% \includegraphics[width=0.6\linewidth]{distrib.png}
% \end{minipage}
% }
% \vspace{3ex}
\caption{Results of our coverage testing. Our random search successfully
finds witnesses for 76--83\% of the programs in under one second,
improving to 84--85\% in under 10 seconds. }
\label{fig:results-witness}
\end{figure}
\begin{figure}[t]
\includegraphics[width=0.7\linewidth]{distrib.png}
\caption{Distribution of test outcomes. In both datasets we detect
actual type errors at least 77\% of the time, unbound variables or
constructors 4\% of the time, and diverging loops 2--3\% of the
time. For the remaining 15--16\% of the programs we are unable to
provide any useful feedback. }
\label{fig:results-distrib}
\end{figure}
\paragraph{Results}
\label{sec:results-witness}
The results of our experiments are summarized in
Figures~\ref{fig:results-witness}~and~\ref{fig:results-distrib}.
%
In both datasets our tool was able to find a witness for over 75\% of the
programs in under one second, \ie\ fast enough to be integrated as a
compile-time check. If we extend our tolerance to a 10 second timeout,
we reach 84\% coverage, and if we allow a 60 second search,
we hit a maximum of 84--85\% coverage.
%
Interestingly, while the vast majority of witnesses corresponded to a
type-error, as expected, 4\% triggered an unbound variable error (even
though \ocaml\ reported a type error) and 3\% triggered an infinite
recursion error.
%
For the remaining 15--16\% of programs we were unable to provide any useful
feedback as they either completed 1,000 tests successfully, or timed out
after one minute.
%
% XX programs were deemed safe and XX timed out even at 3,000 steps, \ie
% we could not provide any useful feedback for XX\% of the total programs.
%
While a more advanced search procedure, \eg\ dynamic-symbolic execution,
could likely uncover more errors, our experiments suggest that
type errors are coarse enough (or that novice programs are \emph{simple}
enough) that these techniques are not necessary.
\input{safe}
\subsection{Witness Complexity}
\label{sec:trace-complexity}
For each of the ill-typed programs for which we could
find a witness, we measure the complexity of the generated
trace using two metrics.
% \paragraph{Metrics} Thus, our two metrics are:
% size of the full trace,
% \ie the number of small-step reductions, and the size of the jump-compressed
% version of the trace.
%
\begin{enumerate}
\item \emphbf{Single-step:} The size of the trace after expanding
all of the single-step edges from the witness to the stuck term, and
% This can be thought of as a worst-case
% complexity, \ie ``How big is the fully-expanded trace?''
\item \emphbf{Jump-compressed:} The size of the jump-compressed trace.
\end{enumerate}
% \item \ES{others?}
%
\begin{figure}[t]
% \centerline{
% \begin{minipage}{1.2\textwidth}
\includegraphics[width=0.7\linewidth]{trace_size_step.png}
\includegraphics[width=0.7\linewidth]{trace_size_jump.png}
% \end{minipage}
% }
% \vspace{3ex}
\caption{Complexity of the generated traces. Over 80\% of the combined traces
have a jump complexity of at most 10, with an average complexity of 7
and a median of 5.}
\label{fig:results-complexity}
\end{figure}
%
\paragraph{Results}
\label{sec:results-complexity}
The results of the experiment are summarized in
Figure~\ref{fig:results-complexity}.
%
The average number of single-step reductions per trace is 17 for the
\ucsdbench\ dataset (42 for the \uwbench\ dataset) with a maximum of
2,745 (\resp 982) and a median of 15 (\resp 15).
%
The average number of jumps per trace is 7 (\resp 9) with a
maximium of 353 (\resp 221) and a median of 4 (\resp 4).
%
In both datasets about 60\% of traces have at most 5 jumps, and 80\% or more
have at most 10 jumps.
\subsection{Qualitative Evaluation of Witness Utility}\label{sec:advantage-traces}
Next, we present a \emph{qualitative} evaluation that compares
the explanations provided by \toolname's dynamic witnesses with
the static reports produced by the \ocaml\ compiler and \sherrloc,
a state-of-the-art fault localization approach~\cite{Zhang2014-lv}.
%
In particular, we illustrate, using a series of examples drawn
from student programs in the \ucsdbench\ dataset, how \toolname's
jump-compressed traces can get to the heart of the error. Our approach
%
highlights the conflicting values that cause the program to get
stuck, rather that blaming a single one,
%
shows the steps necessary to reach the stuck state, and
%
does not assume that a function is correct just because it type-checks.
%
For each example we will present:
(1)~the code;
(2)~the error message returned \ocaml;
(3)~the error locations returned by \hlOcaml{\ocaml} and \hlSherrloc{\sherrloc};
and (4)~\toolname's jump-compressed trace.
% \begin{figure*}[ht]
% \centering
% \begin{minipage}{0.49\linewidth}
% \centering
% \begin{figure*}[htp]
% \centering
% \includegraphics[height=125px]{sqsum.png}
% \includegraphics[height=125px]{sumlist.png}
% \includegraphics[height=150px]{digitsOfInt.png}
% \includegraphics[height=125px]{wwhile.png}
% \caption{(Left to right) Jump-compressed traces showing how
% \texttt{sqsum}, \texttt{sumList}, \texttt{digitsOfInt}, and
% \texttt{wwhile} go wrong in \S~\ref{sec:advantage-traces}.}
% \label{fig:traces}
% \end{figure*}
\paragraph{Example: Recursion with Bad Operator}
The recursive function @sqsum@ should square each
element of the input list and then compute the sum
of the result.
%
\begin{ecode}
let rec sqsum xs = match xs with
| [] -> 0
| h::t -> (*@\hlOcaml{\hlSherrloc{sqsum t}}@*) @ (h * h)
\end{ecode}
%
Unfortunately the student has used the list-append
operator |@| instead of \texttt{+}. % to compute the sum.
%
Both \ocaml\ and \sherrloc\ blame the \emph{wrong location},
the recursive call @sqsum t@, with the message
%
\begin{verbatim}
This expression has type
int
but an expression was expected of type
'a list
\end{verbatim}
%
\toolname\ produces a trace showing how the evaluation of
@sqsum [1]@ gets stuck.
%
\begin{center}
\includegraphics[height=125px]{sqsum.png}
\end{center}
%
The trace highlights the entire stuck term
(not just the recursive call), emphasizing
the \emph{conflict} between @int@ and @list@
rather than assuming one or the other is correct.
\paragraph{Example: Recursion with Bad Base Case}
%
The function @sumList@ should add up
the elements of its input list.
%
\begin{ecode}
let rec sumList xs = match xs with
| [] -> (*@\hlSherrloc{[]}@*)
| y::ys -> y + (*@\hlOcaml{sumList ys}@*)
\end{ecode}
%
Unfortunately, in the base case, it returns @[]@
instead of @0@.
%
\sherrloc\ blames the base case, and \ocaml\
assumes the base case is correct and blames
the \emph{recursive call} on line 3:
%
\begin{verbatim}
This expression has type
'a list
but an expression was expected of type
int
\end{verbatim}
%
Both of the above are parts of the full story, which
is summarized by \toolname's trace showing
how @sumList [1; 2]@ gets stuck at @2 + []@.
%
\begin{center}
\includegraphics[height=125px]{sumlist.png}
\end{center}
%
The trace clarifies (via the third step)
that the @[]@ results from the recursive call
\hbox{@sumList []@,} and shows how it is incompatible with
the subsequent \texttt{+} operation.
%% ES: append is actually a bit problematic as we don't find the nice
%% append [1] [2] witness. instead we could find something like
%% append [_] [], but it's not as clear IMO
% Our next example is the @append@ function, which should concatenate the
% two input lists.
% %
% \begin{ecode}
% let append xs ys = match xs with
% | [] -> ys
% | h::t -> h :: __t__ :: ys
% \end{ecode}
% %
% The student has forgotten to make a recursive call to @append@, and
% instead tries to cons the tail @t@ directly onto the second list @ys@.
% Consing @h@ back onto the result causes \ocaml to attempt to construct
% the infinite type @'a = 'a list@, triggering an \emph{occurs-check}
% error.
% %
% \begin{verbatim}
% Error: This expression has type
% 'a list
% but an expression was expected of type
% 'a
% The type variable 'a occurs inside 'a list
% \end{verbatim}
% %
% %
% \begin{center}
% \includegraphics[height=75px]{append.png}
% \end{center}
%\pagebreak
\paragraph{Example: Bad Helper Function that Type-Checks}
%
The function @digitsOfInt@ should return a list of
the digits of the input integer.
%
\begin{ecode}
let append x xs =
match xs with
| [] -> [x]
| _ -> x :: xs
let rec digitsOfInt n =
if n <= 0 then
[]
else
append ((*@\hlSherrloc{digitsOfInt (n / 10)}@*)) [(*@\hlOcaml{n mod 10}@*)]
\end{ecode}
%
%\pagebreak
Unfortunately, the student's @append@ function \emph{conses} an element
onto a list instead of appending two lists.
%
Though incorrect, @append@ still type-checks and thus \ocaml and
\sherrloc blame the \emph{use-site} on line 10.
%
\begin{verbatim}
This expression has type
int
but an expression was expected of type
'a list
\end{verbatim}
%
In contrast, \toolname makes no assumptions about @append@,
yielding a trace that illustrates the error on line 4, by
highlighting the conflict in consing a list onto a list of integers.
%
\begin{center}
\includegraphics[height=160px]{digitsOfInt.png}
\end{center}
%
\paragraph{Example: Higher-Order Functions}
%
The higher-order function @wwhile@ is supposed
to emulate a traditional while-loop. It takes
a function @f@ and repeatedly calls @f@ on the
first element of its output pair, starting with
the initial @b@, till the second element is @false@.
%
\newpage
\begin{ecode}
let rec wwhile (f,b) =
match f with
| (z, false) -> z
| (z, true) -> wwhile (f, z)
let f x =
let xx = x * x in
(xx, (xx < 100))
let _ = wwhile ((*@\hlOcaml{\hlSherrloc{f}}@*), 2)
\end{ecode}
%
The student has forgotten to \emph{apply} @f@ at all on line 2,
and just matches it directly against a pair.
This faulty @wwhile@ definition nevertheless typechecks,
and is assumed to be correct by both \ocaml\ and \sherrloc\
which blame the use-site on line 10.
%
\begin{verbatim}
This expression has type
int -> int * bool
but an expression was expected of type
'a * bool
\end{verbatim}
%
\toolname\ synthesizes a trace that draws the eye to the
true error: the @match@ expression on line 2, and highlights
the conflict in matching a function against a pair pattern.
%
\begin{center}
\includegraphics[height=135px]{wwhile.png}
\end{center}
%
By highlighting conflicting values, \ie\ the source and sink
of the problem, and not making assumptions about function correctness, \toolname\
focusses the user's attention on the piece of code that is
actually relevant to the error.
\subsection{Quantitative Evaluation of Witness Utility}
\label{sec:user-study}
% Finally, to test the explanatory power of our jump-compressed traces, we
% ran a user study (IRB \#2014009900) at the University of Virginia (UVA).
% %
% We included four problems in an exam in the Spring session of UVA's
% undergraduate Programming Languages course (CS 4501).
% %
% We presented the 60 students in the course with ill-typed \ocaml\
% programs and asked them to
% %
% (1) \emph{explain} the type error, and
% %
% (2) \emph{fix} the type error.
% %
% For each problem the student was given the ill-typed program and
% either \ocaml's error message or \toolname's jump-compressed trace.
%
We assigned four problems to the 60 students in the course: the
@sumList@, \hbox{@digitsOfInt@,} and @wwhile@ programs from
\S~\ref{sec:advantage-traces}, as well as the following @append@ program
%
\begin{ecode}
let append x l =
match x with
| [] -> l
| h::t -> h :: t :: l
\end{ecode}
%
which triggers an occurs-check error on line 4.
%
For each problem the students were given the ill-typed program and
either \ocaml's error or \toolname's jump-compressed trace;
the full user study is available in Appendix~\ref{sec:user-study-exams}.
%
Due to the nature of an in-class exam, not every student answered every
question; we received between 13 and 28 (out of a possible 30) responses
for each problem-tool pair.
We then instructed four annotators (one of whom is an author, the other
three are teaching assistants at UCSD) to classify the answers as
correct or incorrect.
%
We performed an inter-rater reliability (IRR) analysis to determine the
degree to which the annotators consistently graded the exams.
\footnote{Measuring IRR is an established practice to account for
potential bias among raters. The students were asked to explain the
errors in English; judging whether they truly understood the errors
involved a surprising amount of subjectivity, and is thus subject to
rater bias.}
%
As we had more than two annotators assigning nominal (``correct'' or
``incorrect'') ratings we used Fleiss' kappa~\cite{Fleiss1971-du} to
measure IRR.\@
%
Fleiss' kappa is measured on a scale from $1$, indicating total
agreement, to $-1$, indicating total disagreement, with $0$ indicating
random agreement.
Finally, we used a one-sided Mann-Whitney $U$ test~\cite{Mann1947-fd} to
determine the significance of our results.
%
The null hypothesis was that the responses from students given
\toolname's witnesses were drawn from the same distribution as those
given \ocaml's errors, \ie \toolname had no effect.
%
Since we used a one-sided test, the alternative to the null hypothesis
is that \toolname had a \emph{positive} effect on the responses.
%
We reject the null hypothesis in favor of the alternative if the test
produces a significance level $p < 0.05$, a standard threshold for
determining statistical significance.
\begin{figure}[t]
% \centerline{
% \begin{minipage}{1.2\textwidth}
\includegraphics[width=0.7\linewidth]{user-study-reason.png}
\includegraphics[width=0.7\linewidth]{user-study-fix.png}
% \end{minipage}
% }
% \vspace{3ex}
\caption{A classification of students' explanations and fixes for type
errors, given either \ocaml's error message or \toolname's
jump-compressed trace. The students given \toolname's jump-compressed
trace consistently scored better ($\ge 10\%$) than those given
\ocaml's type error. We report the result of a one-sided Mann-Whitney
$U$ test for statistical significance in parentheses.}
\label{fig:results-user-study}
\end{figure}
\paragraph{Results}
%
The measured kappa values were $\kappa = 0.72$ for the explanations and
$\kappa = 0.83$ for the fixes; while there is no formal notion for what
consititutes strong agreement~\cite{Krippendorff2012-wd}, kappa values
above $0.60$ are often called ``substantial''
agreement~\cite{Landis1977-ey}.
%
Figure~\ref{fig:results-user-study} summarizes a single annotator's
results, which show that students given \toolname's jump-compressed
trace were consistently more likely to correctly explain
and fix the type error than those given \ocaml's error message.
%
Across each problem the \toolname responses were marked correct
$10-30\%$ more often than the \ocaml responses, which suggests that
the students who had access to \toolname's traces had a better
understanding of the type errors;
%
however, only the @append@ tests were statistically significant at
$p < 0.05$.
%
\paragraph{Threats to Validity}
Measuring understanding is a difficult task; the following summarize
the threats to the validity of our results.
\subparagraph{Construct.}
%
We used the correctness of the student's explanation of, and fix for,
the type error as a proxy for her understanding, but it is possible that
other metrics would produce different results. One such metric that is
also surely relevant is time-to-completion, \ie a good error report
should \emph{quickly} guide the student to a fix. Unfortunately, the
in-class exam setting of our study did not admit the collection of
timing data.
Furthermore, one might object to our selection of \ocaml as
the baseline comparison rather than \sherrloc or \mycroft, which also
claim to produce more accurate error reports. This is indeed a
limitation of our study, but we note that \sherrloc blames the same
expression as \ocaml in both @wwhile@ and @append@, and in @digitsOfInt@
both blame the wrong function.
Finally, one might point out that our study investigates the use of
\toolname as a \emph{debugging} aid rather than as a \emph{teaching}
aid. That is, it may be that \toolname helps students solve their
immediate problem, but does not help them build a lasting understanding
of the type system. We have not attempted a longitudinal study of the
long-term impact of using \toolname, but we agree that it would be an
interesting future direction.
\subparagraph{Internal.}
%
We assigned students randomly to two groups. The first was given
\ocaml's errors for @append@ and \hbox{@digitsOfInt@,} and \toolname's trace
for @sumList@ and \hbox{@wwhile@;} the second was given the opposite
assignment of errors and traces. This assignment ensured that: (1) each
student was given \ocaml and \toolname problems; and (2) each student
was given an ``easy'' and ``hard'' problem for both \ocaml and
\toolname. Students without sufficient knowledge of \ocaml could affect
the results, as could the time-constrained nature of an exam. For these
reasons we excluded any answers left blank from our analysis.
\subparagraph{External.}
%
Our experiment used students in the process of learning \ocaml,
and thus may not generalize to all developers. The four
programs were chosen manually, via a random selection and
filtering of the programs in the \ucsdbench dataset. In some cases we made
minor simplifying edits (\eg alpha-renaming, dead-code removal) to the
programs to make them more understandable in the short timeframe of an
exam; however, we never altered the resulting type-error. A different
selection of programs may lead to different results.
\subparagraph{Conclusion.}
%
We collected exams from 60 students, though due to the nature of the
study not every student completed every problem.
%
The number of complete submissions ranges from 13 (for the \toolname
version of @wwhile@) to 28 (for the \ocaml version of @sumList@), out of
a maximum of 30 per program-tool pair.
%
Our results are statistically significant in only 2 out of 8 tests; however,
collecting more responses per test pair was not possible as it
would require having students answer the same problem twice (once with
\ocaml and once with \toolname).
\input{locating}
\subsection{Discussion}
\label{sec:discussion}
To summarize, our experiments demonstrate that \nanomaly finds witnesses
to type errors:
%
(1) with high coverage in a timespan amenable to compile-time analysis;
%
(2) with traces that have a low median complexity of 5 jumps;
%
(3) that are more helpful to novice programmers than traditional type
error messages; and
%
(4) that can be used to automatically locate the source of a type error.
There are, of course, drawbacks to our approach.
% %
% Five that stand out are:
% %
% (1) coverage limits due to random generation;
% %
% (2) dealing with explosions in the size of generated traces;
% %
% %(3) the inability to handle certain instances of infinite types; and
% (3) our use of a non-parametric function type;
% %
% (4) handling ad-hoc polymorphism; and
% %
% (5) traversal bias.
%
In the sequel we discuss a selection of drawbacks, and how we might
address them in future work.
\paragraph{Random Generation}
Random test generation has difficulty generating highly constrained
values, \eg\ red-black trees or a pair of equal integers. If the type
error is hidden behind a complex branch condition \nanomaly\ may not be
able to trigger it. Exhaustive testing and dynamic-symbolic execution
can address this short-coming by performing an exhaustive search for
inputs (\emph{resp}.\ paths through the program). Our approach does not
rely on random generation, we could easily substitute it for
dynamic-symbolic execution by extending the evaluation relation to
maintain a path condition and replacing the \gensym function with a call
to the constraint solver.
%
As our experiments show, however, novice programs do not appear to
require more advanced search techniques, likely because they tend to be
simple.
% \paragraph{Infinite Types}
% Our implementation does check for infinite types inside \forcesym, but
% there are some degenerate cases where it is unable to detect
% them. Consider the following buggy @replicate@.
% %
% \begin{code}
% let rec replicate n x =
% if n <= 0 then
% []
% else
% replicate (n-1) [x]
% \end{code}
% %
% This code produces a nested list (with @n@ levels of nesting) containing
% a single copy of @x@, instead of a list with @n@ copies of @x@. \ocaml\
% detects a cyclic \hbox{@'a = 'a list@} constraint in the recursive call
% and throws a type error, whereas \nanomaly\ happily recurses @n@ times to
% produces the nested list. Strictly speaking, this function itself cannot
% ``go wrong'', the program would not get stuck until a \emph{client}
% attempted to use the result expecting a flat list. But this is not very
% satisfying as @replicate@ is clearly to blame. Furthermore, in our
% experience, infinite-type errors are often difficult to %some of the more difficult ones to
% debug (and to explain to novices), so better support for this scenario
% would be useful.
\paragraph{Trace Explosion}
Though the average complexity of our generated traces is low in terms of
jumps, there are some extreme outliers.
%
We cannot reasonably expect a novice user to explore a trace containing
50+ terms and draw a conclusion about which pieces contributed to the
bug in their program.
%
Enhancing our visualization to slice out program paths relevant to
specific values~\cite{Perera2012-dy}, would likely help alleviate this
issue, allowing users to highlight a confusing value and ask: ``Where
did this come from?''
\paragraph{Non-Parametric Function Type}
As we discussed in \S~\ref{sec:how-safe} some ill-typed programs
lack a witness in our semantics due to our use of a non-parametric
type $\tfun$ for functions.
%
These programs cannot ``go wrong'', strictly speaking, but would be very
difficult to \emph{use} in practice.
%
We also note that many of these programs induce cyclic typing constraints,
causing infinite-type errors, which in our experience can be particularly
difficult to debug (and to explain to novices).
%
Better support for these programs would be welcome.
%
For example, we might track how the types of inputs change between
recursive calls.
%
If we cannot find a traditional witness, we could then produce a trace
expanded to show these particular steps.
\paragraph{Ad-Hoc Polymorphism}
% Our approach can only support ad-hoc polymorphism (\eg\ type-classes in
% \haskell\ or polymorphic comparison functions in \ocaml) in limited cases
% where we have enough typing information at the call-site to resolve the
% overloading. For example, consider the @n <= 0@ test in our @fac@ example.
% @<=@ is polymorphic in \ocaml, but in this case we can make progress because
% the literal @0@ is not. If we parameterized @fac@ by a lower bound, \eg
% %
% \begin{code}
% let rec fac n m =
% if n <= m then
% 1
% else
% n * fac (n - 1) m
% \end{code}
% %
% and called @fac@ with two holes, we would get stuck at the @n <= m@
% test; not because of a type error, but because all we know about
% @n@ and @m@ at that point is that they must have the same (unknown)
% type.
Also discussed in \S~\ref{sec:how-safe}, our approach can only support
ad-hoc polymorphism (\eg\ type-classes in \haskell\ or polymorphic
comparison functions in \ocaml) in limited cases where we have enough
typing information at the call-site to resolve the overloading. This
issue is uncommon in \ocaml\ (we detected it in around 5\% of our
benchmarks), but it would surely be exacerbated by a language like
\haskell, which overloads not only functions but also numeric literals,
as well as strings and lists if one enables the respective language
extensions.
%
We suspect that either dynamic-symbolic execution or speculative
instantiation of holes would allow us to handle ad-hoc polymorphism, but
defer a proper treatment to future work.
\paragraph{Traversal Bias}
A common problem with typecheckers is that the order in which the
typechecker traverses the abstract syntax tree \emph{biases} it in favor
of blaming expressions that are seen later~\cite{McAdam1998-ub}. This
usually takes the form of a left-to-right bias with respect to the
source code (terms that appear later \emph{textually} are more likely to
be blamed), but in our case the bias is with respect to the execution
trace.
Incorporating our notion of type sources from \S~\ref{sec:locating} into
the visualization, \eg by including those reductions in the initial
visualization, may help alleviate our bias in a similar manner to
McAdam's proposal. Hage and Heeren~\shortcite{Hage2009-em} offer another
solution that allows the compiler author to selectively control the
bias, and thus produce better errors, by prioritizing typing
constraints. Unfortunately, due to \toolname's dynamic nature, providing
this sort of control would likely require selectively changing the order
of evalution; while sound for the pure subset of \ocaml that we address,
this could nonetheless confuse newcomers to the language even more.
% Several solutions to the bias problem have been proposed.
% %
% Hage and Heeren~\shortcite{Hage2009-em} describe a system that allows
% the compiler author to selectively control the bias and thus produce
% better errors, while McAdam~\shortcite{McAdam1998-ub},
\paragraph{Side Effects}
While our implementation does not currently support side effects, we
suspect it would not be difficult to add support.
%
The search procedure is easily extended to support mutation by
maintaining a store in the evaluation relation, the main complexity
would be in extending the trace visualization to demonstrate mutation.
%
Incorporating mutation directly into our reduction-based visualization
would be difficult, as mutation would have non-local effects on the
expressions and may be difficult for students to follow.
%
Instead, we could follow the example of
%
\textsc{Python Tutor}~\cite{GuoSIGCSE2013}
%
and provide a separate visualization of the mutable store, with references
visualized as constant pointers to changing objects.
% \begin{itemize}
% \item benchmarks: our data + seminal data
% \item both cases: \textbf{random} search sufficient to trigger runtime crash in 80\% of programs
% \item how many of the ``safe'' programs are actually safe??
% \end{itemize}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "main"
%%% End:
%!TEX root = main.tex
|
ucsd-progsys/nanomaly
|
paper/jfp-submission/evaluation.tex
|
TeX
|
bsd-3-clause
| 33,563
|
package org.hisp.dhis.common;
/*
* Copyright (c) 2004-2018, University of Oslo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of the HISP project nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* Enum representing web API versions. The API version is exposed through
* the API URL at <code>/api/{version}/{resource}</code>, where <code>{version}</code>
* is a numeric value and must match a value of this enum. If omitted, the
* <code>DEFAULT</code> value will be used. The API resources can also be mapped
* to all versions using the <code>ALL</code> value.
* <p>
* TODO The <code>DEFAULT</code> version must be updated for each release.
*
* @author Morten Olav Hansen <mortenoh@gmail.com>
*/
public enum DhisApiVersion
{
ALL( -1, true ),
V26( 26 ),
V27( 27 ),
V28( 28 ),
V29( 29 ),
V30( 30 ),
V31( 31 ),
DEFAULT( V31.getVersion() );
final int version;
final boolean ignore;
DhisApiVersion( int version )
{
this.version = version;
this.ignore = false;
}
DhisApiVersion( int version, boolean ignore )
{
this.version = version;
this.ignore = ignore;
}
public int getVersion()
{
return version;
}
public String getVersionString()
{
return this == DEFAULT ? "" : String.valueOf( version );
}
public boolean isIgnore()
{
return ignore;
}
/**
* Indicates whether this version is equal to the given
* version.
*
* @param apiVersion the API version.
*/
public boolean eq( DhisApiVersion apiVersion )
{
return version == apiVersion.getVersion();
}
/**
* Indicates whether this version is less than the given
* version.
*
* @param apiVersion the API version.
*/
public boolean lt( DhisApiVersion apiVersion )
{
return version < apiVersion.getVersion();
}
/**
* Indicates whether this version is less than or equal to
* the given version.
*
* @param apiVersion the API version.
*/
public boolean le( DhisApiVersion apiVersion )
{
return version <= apiVersion.getVersion();
}
/**
* Indicates whether this version is greater than the given
* version.
*
* @param apiVersion the API version.
*/
public boolean gt( DhisApiVersion apiVersion )
{
return version > apiVersion.getVersion();
}
/**
* Indicates whether this version is greater than or equal to
* the given version.
*
* @param apiVersion the API version.
*/
public boolean ge( DhisApiVersion apiVersion )
{
return version >= apiVersion.getVersion();
}
public static DhisApiVersion getVersion( int version )
{
for ( int i = 0; i < DhisApiVersion.values().length; i++ )
{
DhisApiVersion v = DhisApiVersion.values()[i];
if ( version == v.getVersion() )
{
return v;
}
}
return DEFAULT;
}
}
|
msf-oca-his/dhis-core
|
dhis-2/dhis-api/src/main/java/org/hisp/dhis/common/DhisApiVersion.java
|
Java
|
bsd-3-clause
| 4,463
|
/**
* Copyright (c) 2015-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
* @flow
* @format
*/
'use strict';
const AsyncTaskGroup = require('../lib/AsyncTaskGroup');
const MapWithDefaults = require('../lib/MapWithDefaults');
const debug = require('debug')('RNP:DependencyGraph');
const util = require('util');
const path = require('path');
const realPath = require('path');
const invariant = require('fbjs/lib/invariant');
const isAbsolutePath = require('absolute-path');
import type {HasteFS} from '../types';
import type DependencyGraphHelpers from './DependencyGraphHelpers';
import type ResolutionResponse from './ResolutionResponse';
import type {
Options as TransformWorkerOptions,
} from '../../JSTransformer/worker/worker';
import type {ReadResult, CachedReadResult} from '../Module';
type DirExistsFn = (filePath: string) => boolean;
/**
* `jest-haste-map`'s interface for ModuleMap.
*/
export type ModuleMap = {
getModule(
name: string,
platform: ?string,
supportsNativePlatform: boolean,
): ?string,
getPackage(
name: string,
platform: ?string,
supportsNativePlatform: boolean,
): ?string,
};
export type Packageish = {
isHaste(): boolean,
getName(): Promise<string>,
path: string,
redirectRequire(toModuleName: string): string | false,
getMain(): string,
+root: string,
};
export type Moduleish = {
+path: string,
isHaste(): boolean,
getName(): Promise<string>,
getPackage(): ?Packageish,
hash(): string,
readCached(transformOptions: TransformWorkerOptions): CachedReadResult,
readFresh(transformOptions: TransformWorkerOptions): Promise<ReadResult>,
};
export type ModuleishCache<TModule, TPackage> = {
getPackage(
name: string,
platform?: string,
supportsNativePlatform?: boolean,
): TPackage,
getModule(path: string): TModule,
getAssetModule(path: string): TModule,
};
type Options<TModule, TPackage> = {|
+dirExists: DirExistsFn,
+entryPath: string,
+extraNodeModules: ?Object,
+hasteFS: HasteFS,
+helpers: DependencyGraphHelpers,
+moduleCache: ModuleishCache<TModule, TPackage>,
+moduleMap: ModuleMap,
+platform: ?string,
+preferNativePlatform: boolean,
+resolveAsset: (dirPath: string, assetName: string) => $ReadOnlyArray<string>,
+sourceExts: Array<string>,
|};
/**
* It may not be a great pattern to leverage exception just for "trying" things
* out, notably for performance. We should consider replacing these functions
* to be nullable-returning, or being better stucture to the algorithm.
*/
function tryResolveSync<T>(action: () => T, secondaryAction: () => T): T {
try {
return action();
} catch (error) {
if (error.type !== 'UnableToResolveError') {
throw error;
}
return secondaryAction();
}
}
class ResolutionRequest<TModule: Moduleish, TPackage: Packageish> {
_doesFileExist = filePath => this._options.hasteFS.exists(filePath);
_immediateResolutionCache: {[key: string]: TModule};
_options: Options<TModule, TPackage>;
static EMPTY_MODULE: string = require.resolve('./assets/empty-module.js');
constructor(options: Options<TModule, TPackage>) {
this._options = options;
this._resetResolutionCache();
}
_tryResolve<T>(
action: () => Promise<T>,
secondaryAction: () => ?Promise<T>,
): Promise<T> {
return action().catch(error => {
if (error.type !== 'UnableToResolveError') {
throw error;
}
return secondaryAction();
});
}
resolveDependency(fromModule: TModule, toModuleName: string): TModule {
const resHash = resolutionHash(fromModule.path, toModuleName);
const immediateResolution = this._immediateResolutionCache[resHash];
if (immediateResolution) {
return immediateResolution;
}
const cacheResult = result => {
this._immediateResolutionCache[resHash] = result;
return result;
};
if (
!this._options.helpers.isNodeModulesDir(fromModule.path) &&
!(isRelativeImport(toModuleName) || isAbsolutePath(toModuleName))
) {
const result = tryResolveSync(
() => this._resolveHasteDependency(fromModule, toModuleName),
() => this._resolveNodeDependency(fromModule, toModuleName),
);
return cacheResult(result);
}
return cacheResult(this._resolveNodeDependency(fromModule, toModuleName));
}
resolveModuleDependencies(
module: TModule,
dependencyNames: $ReadOnlyArray<string>,
): [$ReadOnlyArray<string>, $ReadOnlyArray<TModule>] {
const dependencies = dependencyNames.map(name =>
this.resolveDependency(module, name),
);
return [dependencyNames, dependencies];
}
getOrderedDependencies<T>({
response,
transformOptions,
onProgress,
recursive = true,
}: {
response: ResolutionResponse<TModule, T>,
transformOptions: TransformWorkerOptions,
onProgress?: ?(finishedModules: number, totalModules: number) => mixed,
recursive: boolean,
}) {
const entry = this._options.moduleCache.getModule(this._options.entryPath);
response.pushDependency(entry);
let totalModules = 1;
let finishedModules = 0;
let preprocessedModuleCount = 1;
if (recursive) {
this._preprocessPotentialDependencies(transformOptions, entry, count => {
if (count + 1 <= preprocessedModuleCount) {
return;
}
preprocessedModuleCount = count + 1;
if (onProgress != null) {
onProgress(finishedModules, preprocessedModuleCount);
}
});
}
const resolveDependencies = (module: TModule) =>
Promise.resolve().then(() => {
const cached = module.readCached(transformOptions);
if (cached.result != null) {
return this.resolveModuleDependencies(
module,
cached.result.dependencies,
);
}
return module
.readFresh(transformOptions)
.then(({dependencies}) =>
this.resolveModuleDependencies(module, dependencies),
);
});
const collectedDependencies: MapWithDefaults<
TModule,
Promise<Array<TModule>>,
> = new MapWithDefaults(module => collect(module));
const crawlDependencies = (mod, [depNames, dependencies]) => {
const filteredPairs = [];
dependencies.forEach((modDep, i) => {
const name = depNames[i];
if (modDep == null) {
debug(
'WARNING: Cannot find required module `%s` from module `%s`',
name,
mod.path,
);
return false;
}
return filteredPairs.push([name, modDep]);
});
response.setResolvedDependencyPairs(mod, filteredPairs);
const dependencyModules = filteredPairs.map(([, m]) => m);
const newDependencies = dependencyModules.filter(
m => !collectedDependencies.has(m),
);
if (onProgress) {
finishedModules += 1;
totalModules += newDependencies.length;
onProgress(
finishedModules,
Math.max(totalModules, preprocessedModuleCount),
);
}
if (recursive) {
// doesn't block the return of this function invocation, but defers
// the resulution of collectionsInProgress.done.then(...)
dependencyModules.forEach(dependency =>
collectedDependencies.get(dependency),
);
}
return dependencyModules;
};
const collectionsInProgress = new AsyncTaskGroup();
function collect(module) {
collectionsInProgress.start(module);
const result = resolveDependencies(module).then(deps =>
crawlDependencies(module, deps),
);
const end = () => collectionsInProgress.end(module);
result.then(end, end);
return result;
}
return Promise.all([
// kicks off recursive dependency discovery, but doesn't block until it's
// done
collectedDependencies.get(entry),
// resolves when there are no more modules resolving dependencies
collectionsInProgress.done,
])
.then(([rootDependencies]) => {
return Promise.all(
Array.from(collectedDependencies, resolveKeyWithPromise),
).then(moduleToDependenciesPairs => [
rootDependencies,
new MapWithDefaults(() => [], moduleToDependenciesPairs),
]);
})
.then(([rootDependencies, moduleDependencies]) => {
// serialize dependencies, and make sure that every single one is only
// included once
const seen = new Set([entry]);
function traverse(dependencies) {
dependencies.forEach(dependency => {
if (seen.has(dependency)) {
return;
}
seen.add(dependency);
response.pushDependency(dependency);
traverse(moduleDependencies.get(dependency));
});
}
traverse(rootDependencies);
});
}
/**
* This synchronously look at all the specified modules and recursively kicks
* off global cache fetching or transforming (via `readFresh`). This is a hack
* that workaround the current structure, because we could do better. First
* off, the algorithm that resolves dependencies recursively should be
* synchronous itself until it cannot progress anymore (and needs to call
* `readFresh`), so that this algo would be integrated into it.
*/
_preprocessPotentialDependencies(
transformOptions: TransformWorkerOptions,
module: TModule,
onProgress: (moduleCount: number) => mixed,
): void {
const visitedModulePaths = new Set();
const pendingBatches = [
this.preprocessModule(transformOptions, module, visitedModulePaths),
];
onProgress(visitedModulePaths.size);
while (pendingBatches.length > 0) {
const dependencyModules = pendingBatches.pop();
while (dependencyModules.length > 0) {
const dependencyModule = dependencyModules.pop();
const deps = this.preprocessModule(
transformOptions,
dependencyModule,
visitedModulePaths,
);
pendingBatches.push(deps);
onProgress(visitedModulePaths.size);
}
}
}
preprocessModule(
transformOptions: TransformWorkerOptions,
module: TModule,
visitedModulePaths: Set<string>,
): Array<TModule> {
const cached = module.readCached(transformOptions);
if (cached.result == null) {
module.readFresh(transformOptions).catch(error => {
/* ignore errors, they'll be handled later if the dependency is actually
* not obsolete, and required from somewhere */
});
}
const dependencies = cached.result != null
? cached.result.dependencies
: cached.outdatedDependencies;
return this.tryResolveModuleDependencies(
module,
dependencies,
visitedModulePaths,
);
}
tryResolveModuleDependencies(
module: TModule,
dependencyNames: $ReadOnlyArray<string>,
visitedModulePaths: Set<string>,
): Array<TModule> {
const result = [];
for (let i = 0; i < dependencyNames.length; ++i) {
try {
const depModule = this.resolveDependency(module, dependencyNames[i]);
if (!visitedModulePaths.has(depModule.path)) {
visitedModulePaths.add(depModule.path);
result.push(depModule);
}
} catch (error) {
if (!(error instanceof UnableToResolveError)) {
throw error;
}
}
}
return result;
}
_resolveHasteDependency(fromModule: TModule, toModuleName: string): TModule {
toModuleName = normalizePath(toModuleName);
const pck = fromModule.getPackage();
let realModuleName;
if (pck) {
/* $FlowFixMe: redirectRequire can actually return `false` for
exclusions*/
realModuleName = (pck.redirectRequire(toModuleName): string);
} else {
realModuleName = toModuleName;
}
const modulePath = this._options.moduleMap.getModule(
realModuleName,
this._options.platform,
/* supportsNativePlatform */ true,
);
if (modulePath != null) {
const module = this._options.moduleCache.getModule(modulePath);
/* temporary until we strengthen the typing */
invariant(module.type === 'Module', 'expected Module type');
return module;
}
let packageName = realModuleName;
let packagePath;
while (packageName && packageName !== '.') {
packagePath = this._options.moduleMap.getPackage(
packageName,
this._options.platform,
/* supportsNativePlatform */ true,
);
if (packagePath != null) {
break;
}
packageName = path.dirname(packageName);
}
if (packagePath != null) {
const package_ = this._options.moduleCache.getPackage(packagePath);
/* temporary until we strengthen the typing */
invariant(package_.type === 'Package', 'expected Package type');
const potentialModulePath = path.join(
package_.root,
path.relative(packageName, realModuleName),
);
return tryResolveSync(
() => this._loadAsFile(potentialModulePath, fromModule, toModuleName),
() => this._loadAsDir(potentialModulePath, fromModule, toModuleName),
);
}
throw new UnableToResolveError(
fromModule,
toModuleName,
'Unable to resolve dependency',
);
}
_redirectRequire(fromModule: TModule, modulePath: string): string | false {
const pck = fromModule.getPackage();
if (pck) {
return pck.redirectRequire(modulePath);
}
return modulePath;
}
_resolveFileOrDir(fromModule: TModule, toModuleName: string): TModule {
const potentialModulePath = isAbsolutePath(toModuleName)
? resolveWindowsPath(toModuleName)
: path.join(path.dirname(fromModule.path), toModuleName);
const realModuleName = this._redirectRequire(
fromModule,
potentialModulePath,
);
if (realModuleName === false) {
return this._loadAsFile(
ResolutionRequest.EMPTY_MODULE,
fromModule,
toModuleName,
);
}
return tryResolveSync(
() => this._loadAsFile(realModuleName, fromModule, toModuleName),
() => this._loadAsDir(realModuleName, fromModule, toModuleName),
);
}
_resolveNodeDependency(fromModule: TModule, toModuleName: string): TModule {
if (isRelativeImport(toModuleName) || isAbsolutePath(toModuleName)) {
return this._resolveFileOrDir(fromModule, toModuleName);
}
const realModuleName = this._redirectRequire(fromModule, toModuleName);
// exclude
if (realModuleName === false) {
return this._loadAsFile(
ResolutionRequest.EMPTY_MODULE,
fromModule,
toModuleName,
);
}
if (isRelativeImport(realModuleName) || isAbsolutePath(realModuleName)) {
// derive absolute path /.../node_modules/fromModuleDir/realModuleName
const fromModuleParentIdx =
fromModule.path.lastIndexOf('node_modules' + path.sep) + 13;
const fromModuleDir = fromModule.path.slice(
0,
fromModule.path.indexOf(path.sep, fromModuleParentIdx),
);
const absPath = path.join(fromModuleDir, realModuleName);
return this._resolveFileOrDir(fromModule, absPath);
}
const searchQueue = [];
for (
let currDir = path.dirname(fromModule.path);
currDir !== '.' && currDir !== realPath.parse(fromModule.path).root;
currDir = path.dirname(currDir)
) {
const searchPath = path.join(currDir, 'node_modules');
searchQueue.push(path.join(searchPath, realModuleName));
}
const extraSearchQueue = [];
if (this._options.extraNodeModules) {
const {extraNodeModules} = this._options;
const bits = toModuleName.split(path.sep);
const packageName = bits[0];
if (extraNodeModules[packageName]) {
bits[0] = extraNodeModules[packageName];
extraSearchQueue.push(path.join.apply(path, bits));
}
}
const fullSearchQueue = searchQueue.concat(extraSearchQueue);
for (let i = 0; i < fullSearchQueue.length; ++i) {
const resolvedModule = this._tryResolveNodeDep(
fullSearchQueue[i],
fromModule,
toModuleName,
);
if (resolvedModule != null) {
return resolvedModule;
}
}
const displaySearchQueue = searchQueue
.filter(dirPath => this._options.dirExists(dirPath))
.concat(extraSearchQueue);
const hint = displaySearchQueue.length ? ' or in these directories:' : '';
throw new UnableToResolveError(
fromModule,
toModuleName,
`Module does not exist in the module map${hint}\n` +
displaySearchQueue
.map(searchPath => ` ${path.dirname(searchPath)}\n`)
.join(', ') +
'\n' +
`This might be related to https://github.com/facebook/react-native/issues/4968\n` +
`To resolve try the following:\n` +
` 1. Clear watchman watches: \`watchman watch-del-all\`.\n` +
` 2. Delete the \`node_modules\` folder: \`rm -rf node_modules && npm install\`.\n` +
' 3. Reset packager cache: `rm -fr $TMPDIR/react-*` or `npm start -- --reset-cache`.',
);
}
/**
* This is written as a separate function because "try..catch" blocks cause
* the entire surrounding function to be deoptimized.
*/
_tryResolveNodeDep(
searchPath: string,
fromModule: TModule,
toModuleName: string,
): ?TModule {
try {
return tryResolveSync(
() => this._loadAsFile(searchPath, fromModule, toModuleName),
() => this._loadAsDir(searchPath, fromModule, toModuleName),
);
} catch (error) {
if (error.type !== 'UnableToResolveError') {
throw error;
}
return null;
}
}
_loadAsFile(
basepath: string,
fromModule: TModule,
toModule: string,
): TModule {
if (this._options.helpers.isAssetFile(basepath)) {
return this._loadAsAssetFile(basepath, fromModule, toModule);
}
const dirPath = path.dirname(basepath);
const doesFileExist = this._doesFileExist;
const resolver = new FileNameResolver({doesFileExist, dirPath});
const fileNamePrefix = path.basename(basepath);
const fileName = this._tryToResolveAllFileNames(resolver, fileNamePrefix);
if (fileName != null) {
return this._options.moduleCache.getModule(path.join(dirPath, fileName));
}
throw new UnableToResolveError(
fromModule,
toModule,
`Could not resolve the base path \`${basepath}' into a module. The ` +
`folder \`${dirPath}' was searched for one of these files: ` +
resolver
.getTentativeFileNames()
.map(filePath => `\`${filePath}'`)
.join(', ') +
'.',
);
}
_loadAsAssetFile(
potentialModulePath: string,
fromModule: TModule,
toModule: string,
): TModule {
const dirPath = path.dirname(potentialModulePath);
const baseName = path.basename(potentialModulePath);
const assetNames = this._options.resolveAsset(dirPath, baseName);
const assetName = getArrayLowestItem(assetNames);
if (assetName != null) {
const assetPath = path.join(dirPath, assetName);
return this._options.moduleCache.getAssetModule(assetPath);
}
throw new UnableToResolveError(
fromModule,
toModule,
`Directory \`${dirPath}' doesn't contain asset \`${baseName}'`,
);
}
/**
* A particular 'base path' can resolve to a number of possibilities depending
* on the context. For example `foo/bar` could resolve to `foo/bar.ios.js`, or
* to `foo/bar.js`. If can also resolve to the bare path `foo/bar` itself, as
* supported by Node.js resolution. On the other hand it doesn't support
* `foo/bar.ios`, for historical reasons.
*/
_tryToResolveAllFileNames(
resolver: FileNameResolver,
fileNamePrefix: string,
): ?string {
if (resolver.tryToResolveFileName(fileNamePrefix)) {
return fileNamePrefix;
}
const {sourceExts} = this._options;
for (let i = 0; i < sourceExts.length; i++) {
const fileName = this._tryToResolveFileNamesForExt(
fileNamePrefix,
resolver,
sourceExts[i],
);
if (fileName != null) {
return fileName;
}
}
return null;
}
/**
* For a particular extension, ex. `js`, we want to try a few possibilities,
* such as `foo.ios.js`, `foo.native.js`, and of course `foo.js`.
*/
_tryToResolveFileNamesForExt(
fileNamePrefix: string,
resolver: FileNameResolver,
ext: string,
): ?string {
const {platform, preferNativePlatform} = this._options;
if (platform != null) {
const fileName = `${fileNamePrefix}.${platform}.${ext}`;
if (resolver.tryToResolveFileName(fileName)) {
return fileName;
}
}
if (preferNativePlatform) {
const fileName = `${fileNamePrefix}.native.${ext}`;
if (resolver.tryToResolveFileName(fileName)) {
return fileName;
}
}
const fileName = `${fileNamePrefix}.${ext}`;
return resolver.tryToResolveFileName(fileName) ? fileName : null;
}
_loadAsDir(
potentialDirPath: string,
fromModule: TModule,
toModule: string,
): TModule {
const packageJsonPath = path.join(potentialDirPath, 'package.json');
if (this._options.hasteFS.exists(packageJsonPath)) {
const main = this._options.moduleCache
.getPackage(packageJsonPath)
.getMain();
return tryResolveSync(
() => this._loadAsFile(main, fromModule, toModule),
() => this._loadAsDir(main, fromModule, toModule),
);
}
return this._loadAsFile(
path.join(potentialDirPath, 'index'),
fromModule,
toModule,
);
}
_resetResolutionCache() {
this._immediateResolutionCache = Object.create(null);
}
}
function resolutionHash(modulePath, depName) {
return `${path.resolve(modulePath)}:${depName}`;
}
type FileNameResolverOptions = {|
+dirPath: string,
+doesFileExist: (filePath: string) => boolean,
|};
/**
* When resolving a single module we want to keep track of the list of paths
* we tried to find.
*/
class FileNameResolver {
_options: FileNameResolverOptions;
_tentativeFileNames: Array<string>;
constructor(options: FileNameResolverOptions) {
this._options = options;
this._tentativeFileNames = [];
}
getTentativeFileNames(): $ReadOnlyArray<string> {
return this._tentativeFileNames;
}
tryToResolveFileName(fileName: string): boolean {
this._tentativeFileNames.push(fileName);
const filePath = path.join(this._options.dirPath, fileName);
return this._options.doesFileExist(filePath);
}
}
class UnableToResolveError extends Error {
type: string;
from: string;
to: string;
constructor(fromModule, toModule, message) {
super();
this.from = fromModule.path;
this.to = toModule;
this.message = util.format(
'Unable to resolve module `%s` from `%s`: %s',
toModule,
fromModule.path,
message,
);
this.type = this.name = 'UnableToResolveError';
}
}
function normalizePath(modulePath) {
if (path.sep === '/') {
modulePath = path.normalize(modulePath);
} else if (path.posix) {
modulePath = path.posix.normalize(modulePath);
}
return modulePath.replace(/\/$/, '');
}
// HasteFS stores paths with backslashes on Windows, this ensures the path is in
// the proper format. Will also add drive letter if not present so `/root` will
// resolve to `C:\root`. Noop on other platforms.
function resolveWindowsPath(modulePath) {
if (path.sep !== '\\') {
return modulePath;
}
return path.resolve(modulePath);
}
function resolveKeyWithPromise([key, promise]) {
return promise.then(value => [key, value]);
}
function isRelativeImport(filePath) {
return /^[.][.]?(?:[/]|$)/.test(filePath);
}
function getArrayLowestItem(a: $ReadOnlyArray<string>): string | void {
if (a.length === 0) {
return undefined;
}
let lowest = a[0];
for (let i = 1; i < a.length; ++i) {
if (a[i] < lowest) {
lowest = a[i];
}
}
return lowest;
}
module.exports = ResolutionRequest;
|
clozr/react-native
|
packager/src/node-haste/DependencyGraph/ResolutionRequest.js
|
JavaScript
|
bsd-3-clause
| 24,449
|
'''
Created on Oct 21, 2011
@author: bolme
'''
import time
from collections import defaultdict
import cProfile
import traceback
import shelve
class EmptyData(object):
def __str__(self):
return "<MissingData>"
class DefaultData(object):
def __init__(self,default):
self.default = default
def __str__(self):
tmp = str(self.default)
tmp = " ".join(tmp.split()) # Flatten to one line an collapse white space to single spaces
if len(tmp) > 40:
tmp = tmp[:37] + "..."
return "<DefaultData:%s>"%(tmp,)
EMPTY_DATA = EmptyData()
#############################################################################
# Video tasks are opperations to be run on a frame.
#############################################################################
class VideoTask(object):
'''
This provides an interface and support functions for a video processing
task. Typically a subclass will overide the constructor which will
be used as a task factory and will create the task and specify the
arguments.
'''
# TODO: optional args should also be added which are included if avalible but will not delay execution if they are not avalible.
def __init__(self,frame_id,args=[]):
'''
@param frame_id: the frame_id associated with this task.
@param args: specification of the data that is required to execute the task.
'''
self.frame_id = frame_id
self.args = args
self.task_id = None
self.label = self.__class__.__name__
if not hasattr(self,'subgraph'):
self.subgraph = None
if not hasattr(self,'color'):
self.color = None
self._arg_map = {}
self._added_args = 0 # keep track of how many arguments have been found.
self._default_args = 0 # keep track of how many arguments are currently default.
for i in range(len(args)):
each = args[i]
dtype = each[0]
fid = each[1]
key = (dtype,fid)
#if self._arg_map.has_key(key):
# continue
if len(each) == 2:
self._arg_map[key] = EMPTY_DATA
elif len(each) == 3:
self._arg_map[key] = DefaultData(each[2])
self._default_args += 1
else:
raise ValueError("Argument should have 2 or 3 values: %s"%each)
self.collected_args = [False for each in self.args]
self.processed_args = [each for each in self.args]
self.distributable = False
self.is_ready = False
def addData(self, data_item):
'''
Check to see if the data item is needed for this task. If it is then keep a reference.
'''
# Compute the key
key = (data_item.getType(),data_item.getFrameId())
# Check if this task needs the data
if self._arg_map.has_key(key):
curr_val = self._arg_map[key]
# If no default save the data and update counts
if curr_val == EMPTY_DATA:
self._arg_map[key] = data_item.getData()
self._added_args += 1
return True
# If there is a default replace and update counts
elif isinstance(curr_val,DefaultData):
self._arg_map[key] = data_item.getData()
self._added_args += 1
self._default_args -= 1
assert self._default_args >= 0 # This should only fail if there is an error in counting.
return True
return False
def ready(self):
'''
Returns True if this task is ready to run.
'''
return self._added_args == len(self._arg_map)
def couldRun(self):
'''
Returns True if this task could run with the default arguments.
'''
return self._added_args + self._default_args == len(self._arg_map)
def run(self):
args = []
for i in range(len(self.args)):
each = self.args[i]
key = (each[0],each[1])
if isinstance(self._arg_map[key],DefaultData):
args.append(self._arg_map[key].default)
else:
args.append(self._arg_map[key])
return self.execute(*args)
def getFrameId(self):
'''
@returns: the frame_id associated with this task.
'''
return self.frame_id
def required(self):
'''
@returns: the list of required data.
'''
return self.args
def execute(self, *args, **kwargs):
'''
This is an abstract method that needs to be implemented in subclasses.
One argument is suppled for each item in the required arguments. This
method should return a list of new data items. If no data is
generated by this method an empty list should be returned.
'''
raise NotImplementedError("Abstract Method")
def printInfo(self):
print "VideoTask {%s:%d}"%(self.__class__.__name__,self.getFrameId())
for key in self._arg_map.keys():
dtype,frame_id = key
if self._arg_map[key] is EMPTY_DATA or isinstance(self._arg_map[key],DefaultData):
print " Argument <%s,%d> -> %s"%(dtype,frame_id,str(self._arg_map[key]))
else:
tmp = str(self._arg_map[key])
tmp = " ".join(tmp.split()) # Flatten to one line an collapse white space to single spaces
if len(tmp) > 40:
tmp = tmp[:37] + "..."
print " Argument <%s,%d> -> %s"%(dtype,frame_id,tmp)
class _VideoDataItem(object):
'''
This class keeps track of data items and when they are used.
'''
def __init__(self,data_tuple):
self._data_type = data_tuple[0]
self._frame_id = data_tuple[1]
self._data = data_tuple[2]
self._touched = 0
def getType(self):
''' Get the item type. '''
return self._data_type
def getFrameId(self):
''' Get the frame id. '''
return self._frame_id
def getData(self):
''' Get the actual data. '''
return self._data
def getKey(self):
''' Get the key. '''
return (self._data_type,self._frame_id)
def touch(self):
''' Count the number of times this data was touched. '''
self._touched += 1
def getTouched(self):
''' Return the number of times the data was touched. '''
return self._touched
def __repr__(self):
return "_VideoDataItem((%s,%s,%s)"%(self._data_type,self._frame_id,self._data)
def vtmProcessor(task_queue,results_queue,options):
'''
Each task_queue item should have three items (task_id,frame_id,command/task).
the command "quit" is used to stop the process.
The vtmProcessor will return (task_id, frame_id, results). If there is an exception
then the result will be replaced by the exception and a stack trace will be printed.
'''
while True:
item = task_queue.get()
try:
task_id,frame_id,task = item
result = task.run()
results_queue.put((task_id,frame_id,result))
except Exception, error:
traceback.print_exc()
results_queue.put((task_id,frame_id,error))
#############################################################################
# This class manages the workflow for video items.
#############################################################################
# TODO: Should we keep this name?
class VideoTaskManager(object):
'''
The framework provide by this class will allow complex video processing
systems to be constructed from simple tasks. Often video processing
loops can be complicated because data needs to persist across many frame
and many operations or tasks need to be completed to solve a video analysis
problem. This class allows for many small and simple tasks to be managed
in a way that can produce a complex and powerful system. #
Tasks request only the data they need, which keeps the complexity of tasks
as simple as possible. This also reduces the coupling between tasks and
eliminates complex video processing loops. The video task manager handles
much of the complexity of the video processing system like data buffering,
and insures that each task gets its required data. #
This class manages tasks that are run on video frames. The video task
manager maintains a list of data objects and task objects. Each task is
a listener for data objects. When the data objects are avalible required
to execute a task the tasks execute method will be called and the required
data items will be passed as arguments. #
New frames are added using the addFrame method. When a frame is added
it creates a data item that includes a frame_id, a data type of "FRAME",
and a pv.Image that contains the frame data. Tasks can register to
receive that frame data or any data products of other tasks and when
that data becomes available the task will be executed.
'''
def __init__(self,debug_level=0, buffer_size=10, show = False):
'''
Create a task manager.
@param debug_level: 0=quiet, 1=errors, 2=warnings, 3=info, 4=verbose
@type debug_level: int
@param buffer_size: the size of the frame and data buffer.
@type buffer_size: int
'''
self.debug_level = debug_level
# Initialize data.
self.frame_id = 0
self.task_list = []
self.task_factories = []
self.buffer_size = buffer_size
self.frame_list = []
self.show = show
# Initialize information for flow analysis.
self.flow = defaultdict(set)
self.task_set = set()
self.data_set = set((('FRAME',None),('LAST_FRAME',None),))
self.task_data = defaultdict(dict)
self.task_id = 0
self.lastFrameCreated = 0
self.recording_shelf = None
self.playback_shelf = None
self.recording_filter = None
self.task_filter = None
self.playback_filter = None
if self.debug_level >= 3:
print "TaskManager[INFO]: Initialized"
def addTaskFactory(self,task_factory,*args,**kwargs):
'''
This function add a task factory function to the video task manager.
The function is called once for every frame processed by the
VideoTaskManager. This function should take one argument which
is the frame_id of that frame. The task factory should return an
instance of the VideoTask class that will perform processing on this
frame. There are three options for implementing a task factory. #
- A class object for a VideoTask which has a constructor that takes
a frame_id as an argument. When called the constructor for that
class and will create a task.
- A function that takes a frame id argument. The function can
create and return a task.
- Any other object that implements the __call__ method which
returns a task instance.
Any additional arguments or keyword arguments passed to this
to this function will be pased after the frame_id argument
to the task factory. #
@param task_factory: a function or callible object that returns a task.
@type task_factory: callable
@param profile: Keyword argument. If true, profile data will be
generated for each call to this task.
@type profile: True | False
'''
self.task_id += 1
profile = False
if kwargs.has_key('profile'):
profile = kwargs['profile']
del kwargs['profile']
self.task_factories.append((task_factory,args,kwargs,profile,self.task_id))
def addFrame(self,frame,ilog=None):
'''
Adds a new frame to the task manager and then start processing.
@param frame: the next frame of video.
@type frame: pv.Image
'''
# Add the frame to the data manager
start = time.time()
frame_data = _VideoDataItem(("FRAME",self.frame_id,frame))
self._createTasksForFrame(self.frame_id)
self.addDataItem(frame_data)
last_data = _VideoDataItem(("LAST_FRAME",self.frame_id-1,False))
self.addDataItem(last_data)
self.frame_list.append(frame_data)
# Playback the recording
if self.playback_shelf != None and self.playback_shelf.has_key(str(self.frame_id)):
data_items = self.playback_shelf[str(self.frame_id)]
for each in data_items:
if self.playback_filter==None or each.getType() in self.playback_filter:
self.addDataItem(each)
self.data_set.add((each.getKey()[0],None))
self.flow[('Playback',each.getType())].add(0)
# Run any tasks that can be completed with the current data.
self._runTasks()
if self.recording_shelf != None:
self.recording_shelf.sync()
# Delete old data
#self._cleanUp()
stop = time.time()
# Set up for the next frame and display the results.
self.frame_id += 1
self.showFrames(ilog=ilog)
if self.debug_level >= 3:
print "TaskManager[INFO]: Frame Processing Time=%0.3fms"%(1000*(stop-start),)
def addData(self,data_list):
'''
Add additional data for this frame. The data list should contain a list tuples where each tuple of (label, data)
'''
for each in data_list:
data = _VideoDataItem((each[0],self.frame_id,each[1]))
self.addDataItem(data)
self.flow[('Data Input',data.getType())].add(0)
self.data_set.add((data.getKey()[0],None))
def addDataItem(self,data_item):
'''
Process any new data items and associate them with tasks.
'''
if self.recording_shelf != None:
frame_id = str(self.frame_id)
if not self.recording_shelf.has_key(frame_id):
self.recording_shelf[frame_id] = []
if self.recording_filter == None or data_item.getType() in self.recording_filter:
self.recording_shelf[frame_id].append(data_item)
for task in self.task_list:
was_added = task.addData(data_item)
if was_added:
# Compute the dataflow
self.flow[(data_item.getKey()[0],task.task_id)].add(data_item.getKey()[1]-task.getFrameId())
def _createTasksForFrame(self,frame_id):
'''
This calls the task factories to create tasks for the current frame.
'''
while self.lastFrameCreated < frame_id + self.buffer_size:
start = time.time()
count = 0
for factory,args,kwargs,profile,task_id in self.task_factories:
task = factory(self.lastFrameCreated,*args,**kwargs)
task.task_id=task_id
self.task_data[task.task_id]['class_name'] = task.__class__.__name__
task.profile=profile
count += 1
if self.task_filter == None or task.__class__.__name__ in self.task_filter:
self.task_list += [task]
stop = time.time() - start
if self.debug_level >= 3:
print "TaskManager[INFO]: Created %d new tasks for frame %s. Total Tasks=%d. Time=%0.2fms"%(count,self.lastFrameCreated,len(self.task_list),stop*1000)
self.lastFrameCreated += 1
def _runTasks(self,flush=False):
'''
Run any tasks that have all data available.
'''
if self.debug_level >= 3: print "TaskManager[INFO]: Running Tasks..."
while True:
start_count = len(self.task_list)
remaining_tasks = []
for task in self.task_list:
if self._evaluateTask(task,flush=flush):
remaining_tasks.append(task)
self.task_list = remaining_tasks
if start_count == len(self.task_list):
break
def flush(self):
'''
Run all tasks that can be run and then finish up. The LAST_FRAME data
item will be set to true for the last frame inserted.
'''
last_data = _VideoDataItem(("LAST_FRAME",self.frame_id-1,True))
self.addDataItem(last_data)
self._runTasks(flush=True)
def _evaluateTask(self,task,flush=False):
'''
Attempts to run a task. This is intended to be run within a filter operation.
@returns: false if task should be deleted and true otherwise.
'''
self.task_set.add(task.task_id)
should_run = False
if task.ready():
should_run = True
elif (flush or self.frame_id - task.getFrameId() > self.buffer_size) and task.couldRun():
should_run = True
elif (flush or self.frame_id - task.getFrameId() > self.buffer_size) and not task.couldRun():
if self.debug_level >= 2:
print "TaskManager[WARNING]: Task %s for frame %d was not executed."%(task,task.getFrameId())
task.printInfo()
# If the task is beyond the buffer, then delete it.
return False
# If the task is not ready then skip it for now.
if not should_run:
return True
# Run the task.
start = time.time()
# Start the profiler
if task.profile:
prof = cProfile.Profile()
prof.enable()
# RUN THE TASK
result = task.run()
# Stop the profiler and show that information.
if task.profile:
prof.disable()
print
print "Profiled task:",task.__class__.__name__
prof.print_stats('time')
print
# Check that the task did return a list.
try:
len(result)
except:
raise Exception("Task did not return a valid list of data.\n Task: %s\n Data:%s"%(task,result))
# Record the dataflow information.
for each in result:
self.flow[(task.task_id,each[0])].add(0)
self.data_set.add((each[0],task.subgraph))
# Compute the dataflow
for i in range(len(task.collected_args)):
if task.collected_args[i]:
each = task.processed_args[i]
self.flow[(each.getKey()[0],task.task_id)].add(each.getKey()[1]-task.getFrameId())
self.data_set.add((each.getKey()[0],task.subgraph))
# Add the data to the cache.
for data_item in result:
if len(data_item) != 3:
raise Exception("Task returned a data item that does not have 3 elements.\n Task: %s\n Data: %s"%(task,data_item))
data_item = _VideoDataItem(data_item)
self.addDataItem(data_item)
stop = time.time() - start
if self.debug_level >= 3:
print "TaskManager[INFO]: Evaluate task %s for frame %d. Time=%0.2fms"%(task,task.getFrameId(),stop*1000)
# Compute task statistics
if not self.task_data[task.task_id].has_key('time_sum'):
self.task_data[task.task_id]['time_sum'] = 0.0
self.task_data[task.task_id]['call_count'] = 0
self.task_data[task.task_id]['time_sum'] += stop
self.task_data[task.task_id]['call_count'] += 1
self.task_data[task.task_id]['color'] = task.color
self.task_data[task.task_id]['subgraph'] = task.subgraph
# Return false so that the task is deleted.
return False
def _remainingTasksForFrame(self,frame_id):
'''
@returns: the number of tasks that need to be run for this frame.
'''
count = 0
for task in self.task_list:
if task.getFrameId() == frame_id:
count += 1
return count
# TODO: I don't really like how show frames works. I would like display of frames to be optional or maybe handled outside of this class. How should this work.
def showFrames(self,ilog=None):
'''
Show any frames with no remaining tasks.
'''
while len(self.frame_list) > 0:
frame_data = self.frame_list[0]
frame_id = frame_data.getFrameId()
frame = frame_data.getData()
task_count = self._remainingTasksForFrame(frame_id)
# If the frame is complete then show it.
if task_count == 0:
if self.show:
frame.show(delay=1)
if ilog != None:
ilog(frame,ext='jpg')
del self.frame_list[0]
else:
break
def recordingFile(self,filename):
'''
Set up an output file for recording.
'''
assert self.playback_shelf == None
self.recording_shelf = shelve.open(filename, flag='n', protocol=2, writeback=True)
def playbackFile(self,filename,cache=False):
'''
Set up an input file for playback.
'''
assert self.recording_shelf == None
self.playback_shelf = shelve.open(filename, flag='r', protocol=2, writeback=False)
def recordingFilter(self,data_types):
'''
Only recorded data_types in the list.
'''
self.recording_filter = set(data_types)
def taskFilter(self,task_types):
'''
Only generate tasks in the list.
'''
self.task_filter = set(task_types)
def playbackFilter(self,data_types):
'''
Only playback data_types in the list.
'''
self.playback_filter = set(data_types)
def asGraph(self,as_image=False):
'''
This uses runtime analysis to create a dataflow graph for this VTM.
'''
import pydot
import pyvision as pv
import PIL.Image
from cStringIO import StringIO
def formatNum(n):
'''
This formats frame offsets correctly: -1,0,+1
'''
if n == 0:
return '0'
else:
return "%+d"%n
def record_strings(my_list):
return '{''}'
# Create the graph.
graph = pydot.Dot(graph_type='digraph',nodesep=.3,ranksep=.5)
graph.add_node(pydot.Node("Data Input",shape='invhouse',style='filled',fillcolor='#ffCC99'))
graph.add_node(pydot.Node("Video Input",shape='invhouse',style='filled',fillcolor='#ffCC99'))
graph.add_edge(pydot.Edge("Video Input","FRAME"))
graph.add_edge(pydot.Edge("Video Input","LAST_FRAME"))
if self.playback_shelf != None:
graph.add_node(pydot.Node("Playback",shape='invhouse',style='filled',fillcolor='#ffCC99'))
subgraphs = {None:graph}
# Add task nodes
for each in self.task_set:
if self.task_data[each].has_key('call_count'):
class_name = self.task_data[each]['class_name']
call_count = self.task_data[each]['call_count']
mean_time = self.task_data[each]['time_sum']/call_count
node_label = "{" + " | ".join([class_name,
"Time=%0.2fms"%(mean_time*1000.0,),
"Calls=%d"%(call_count,),
]) + "}"
color = '#99CC99'
print each, self.task_data[each]
if self.task_data[each]['color'] is not None:
color = self.task_data[each]['color']
subgraph = self.task_data[each]['subgraph']
subgraph_name = subgraph
if subgraph_name != None:
subgraph_name = "_".join(subgraph.split())
if not subgraphs.has_key(subgraph):
print "adding subgraph",subgraph
subgraphs[subgraph_name] = pydot.Cluster(subgraph_name,label=subgraph,shape='box',style='filled',fillcolor='#DDDDDD',nodesep=1.0)
subgraphs[None].add_subgraph(subgraphs[subgraph_name])
print "adding node",each,subgraph
subgraphs[subgraph_name].add_node(pydot.Node(each,label=node_label,shape='record',style='filled',fillcolor=color))
else:
# The task node was never executed
call_count = 0
mean_time = -1
class_name = self.task_data[each]['class_name']
node_label = "{" + " | ".join([class_name,
"Time=%0.2fms"%(mean_time*1000.0,),
"Calls=%d"%(call_count,),
]) + "}"
graph.add_node(pydot.Node(each,label=node_label,shape='record',style='filled',fillcolor='#CC3333'))
# Add Data Nodes
for each,subgraph in self.data_set:
subgraph_name = subgraph
if subgraph_name != None:
subgraph_name = "_".join(subgraph.split())
subgraphs[subgraph_name].add_node(pydot.Node(each,shape='box',style='rounded, filled',fillcolor='#9999ff'))
# Add edges.
for each,offsets in self.flow.iteritems():
offsets = list(offsets)
if len(offsets) == 1 and list(offsets)[0] == 0:
graph.add_edge(pydot.Edge(each[0],each[1]))
else:
offsets = formatOffsets(offsets)
graph.add_edge(pydot.Edge(each[0],each[1],label=offsets,label_scheme=2,labeldistance=2,labelfloat=False))
# Create a pv.Image containing the graph.
if as_image:
data = graph.create_png()
f = StringIO(data)
im = pv.Image(PIL.Image.open(f))
return im
return graph
def formatGroup(group):
try:
if len(group) > 3:
return formatGroup(group[:1])+"..."+formatGroup(group[-1:])
except:
pass
return ",".join(["%+d"%each for each in group])
def groupOffsets(offsets):
offsets.sort()
group = []
groups = [group]
for each in offsets:
if len(group) == 0 or each == group[-1]+1:
group.append(each)
else:
group = [each]
groups.append(group)
return groups
def formatOffsets(offsets):
groups = groupOffsets(offsets)
out = "("+ ",".join([formatGroup(each) for each in groups]) + ")"
return out
if __name__ == '__main__':
offsets = [-3,-2,-1,0,1,3,4,5,6,7,8,10,15,20,21,22,23,-21,-22,56,57]
offsets.sort()
print offsets
groups = groupOffsets(offsets)
print groups
print ",".join([formatGroup(each) for each in groups])
|
mikeseven/pyvision
|
src/pyvision/beta/vtm.py
|
Python
|
bsd-3-clause
| 27,866
|
#if !defined(__CINT__) || defined(__MAKECINT__)
#include "TString.h"
#include "TObjArray.h"
#include "AliLog.h"
#include "AliAnalysisManager.h"
#include "AliAnalysisDataContainer.h"
#endif
AliAnalysisTaskCMWESE* AddTaskCMWESE(
int debug=0, // debug level controls amount of output statements
double Harmonic=2,
TString trigger="kINT7",
float chi2lo=0.1,
float dcacutz=3.2, // dcaz cut for all tracks
float dcacutxy=2.4, // dcaxy cut for all tracks
float ptmin=0.2, // minimum pt for Q-vector components
float ptmax=2.0, // maximum pt for Q-vector components
int cbinlo=0, // lower centrality bin for histogram array
int cbinhg=8, // higher centrality bin for histogram array
double etaGap=0.3,
bool v0calibOn=true,
bool doNUE=true,
bool doNUA=true,
float centcut=7.5, // centrality restriction for V0M and TRK
TString period="LHC15o",
TString uniqueID=""
)
{
// Creates a pid task and adds it to the analysis manager
// Get the pointer to the existing analysis manager via the static
//access method
//=========================================================================
AliAnalysisManager *mgr = AliAnalysisManager::GetAnalysisManager();
if (!mgr) {
Error("AddTaskCMWESE.C", "No analysis manager to connect to.");
return NULL;
}
// Check the analysis type using the event handlers connected to the
// analysis manager The availability of MC handler can also be
// checked here.
// =========================================================================
if (!mgr->GetInputEventHandler()) {
Error("AddTaskCMWESE.C", "This task requires an input event handler");
return NULL;
}
TString type = mgr->GetInputEventHandler()->GetDataType(); // can be "ESD" or "AOD"
// --- instantiate analysis task
AliAnalysisTaskCMWESE *task = new AliAnalysisTaskCMWESE("TaskCMWESE", period, doNUE, doNUA, v0calibOn);
task->SetDebug(debug);
task-> SetHarmonic(Harmonic);
task->SetTrigger(trigger);
task->SetChi2Low(chi2lo);
task->SetDCAcutZ(dcacutz);
task->SetDCAcutXY(dcacutxy);
task->SetPtMin(ptmin);
task->SetPtMax(ptmax);
task->SetCentBinLow(cbinlo);
task->SetCentBinHigh(cbinhg);
task->SetEtaGap(etaGap);
task->SetV0CalibOn(v0calibOn);
task->SetNUEOn(doNUE);
task->SetNUAOn(doNUA);
task->SetCentCut(centcut);
// task->SelectCollisionCandidates(AliVEvent::kINT7);
mgr->AddTask(task);
// Create ONLY the output containers for the data produced by the
// task. Get and connect other common input/output containers via
// the manager as below
//======================================================================
AliAnalysisDataContainer* cinput = mgr->GetCommonInputContainer();
const char* outputFileName = mgr->GetCommonFileName();
AliAnalysisDataContainer* coutput = mgr->CreateContainer(Form("output_%s", uniqueID.Data()), TList::Class(),
AliAnalysisManager::kOutputContainer,
Form("%s:%s", outputFileName, uniqueID.Data()));
mgr->ConnectInput (task, 0, cinput);
mgr->ConnectOutput(task, 1, coutput);
Int_t inSlotCounter=1;
TGrid::Connect("alien://");
TObjArray *AllContainers = mgr->GetContainers();
if(task->GetNUEOn() || doNUE) {
if (period.EqualTo("LHC10h") || period.EqualTo("LHC11h")) {
TFile *inNUE;
if(!AllContainers->FindObject("NUE")) {
inNUE = TFile::Open("alien:///alice/cern.ch/user/w/wenya/refData/reflhc10h/Run1NUE.root");
AliAnalysisDataContainer *cin_NUE = mgr->CreateContainer(Form("NUE"), TList::Class(), AliAnalysisManager::kInputContainer);
TList* wNUE_list = NULL;
wNUE_list = dynamic_cast<TList*>(inNUE->Get("listNUE"));
if (!wNUE_list) printf("Read TList wrong!\n");
cin_NUE->SetData(wNUE_list);
mgr->ConnectInput(task,inSlotCounter,cin_NUE);
inSlotCounter++;
}
else {
mgr->ConnectInput(task,inSlotCounter,(AliAnalysisDataContainer*)AllContainers->FindObject("NUE"));
inSlotCounter++;
printf("NUE already loaded\n");
}
}
else if (period.EqualTo("LHC15o")) {
TFile *inNUE;
if(!AllContainers->FindObject("NUE")) {
inNUE = TFile::Open("alien:///alice/cern.ch/user/w/wenya/refData/reflhc15o/efficiencyBothpol.root");
// Ref NUE data from alien:///alice/cern.ch/user/p/prottay/nuarootfiles_p5_one_two_two_FB768_15op2_withpileup/efficiencyBothpol.root
AliAnalysisDataContainer *cin_NUE = mgr->CreateContainer(Form("NUE"), TList::Class(), AliAnalysisManager::kInputContainer);
TList* wNUE_list = NULL;
wNUE_list = dynamic_cast<TList*>(inNUE->Get("fMcEffiHij"));
if (!wNUE_list) printf("Read TList wrong!\n");
cin_NUE->SetData(wNUE_list);
mgr->ConnectInput(task,inSlotCounter,cin_NUE);
inSlotCounter++;
}
else {
mgr->ConnectInput(task,inSlotCounter,(AliAnalysisDataContainer*)AllContainers->FindObject("NUE"));
inSlotCounter++;
printf("NUE already loaded\n");
}
}
}
TString filenameNUA = "";
if (uniqueID.EqualTo("10h_Default") && period.EqualTo("LHC10h")) filenameNUA = "alien:///alice/cern.ch/user/w/wenya/refData/reflhc10h/10hNUAFB1.root";
if (uniqueID.EqualTo("10h_ChiHg3") && period.EqualTo("LHC10h")) filenameNUA = "alien:///alice/cern.ch/user/w/wenya/refData/reflhc10h/10hNUAFB1ChiHg3.root";
if (uniqueID.EqualTo("10h_Nhits60") && period.EqualTo("LHC10h")) filenameNUA = "alien:///alice/cern.ch/user/w/wenya/refData/reflhc10h/10hNUAFB1Nhits60.root";
if (uniqueID.EqualTo("10h_Nhits80") && period.EqualTo("LHC10h")) filenameNUA = "alien:///alice/cern.ch/user/w/wenya/refData/reflhc10h/10hNUAFB1Nhits80.root";
if (uniqueID.EqualTo("10h_FB768") && period.EqualTo("LHC10h")) filenameNUA = "alien:///alice/cern.ch/user/w/wenya/refData/reflhc10h/10hNUAFB768.root";
if (uniqueID.EqualTo("10h_FB272") && period.EqualTo("LHC10h")) filenameNUA = "alien:///alice/cern.ch/user/w/wenya/refData/reflhc10h/10hNUAFB272.root";
if (uniqueID.EqualTo("10h_FB768_ChiHg2") && period.EqualTo("LHC10h")) filenameNUA = "alien:///alice/cern.ch/user/w/wenya/refData/reflhc10h/10hNUAFB768_ChiHg2.root";
if (uniqueID.EqualTo("10h_FB768_Nhits80") && period.EqualTo("LHC10h")) filenameNUA = "alien:///alice/cern.ch/user/w/wenya/refData/reflhc10h/10hNUAFB768_Nhits80.root";
if (uniqueID.EqualTo("10h_FB768_Nhits60") && period.EqualTo("LHC10h")) filenameNUA = "alien:///alice/cern.ch/user/w/wenya/refData/reflhc10h/10hNUAFB768_Nhits60.root";
if (uniqueID.EqualTo("15o_Default") && period.EqualTo("LHC15o")) filenameNUA = "alien:///alice/cern.ch/user/w/wenya/refData/reflhc15o/wgtPion_NUAFB768DeftwPUcut_LHC15op2_24Aug2021.root";
if (uniqueID.EqualTo("15o_ChiHg3") && period.EqualTo("LHC15o")) filenameNUA = "alien:///alice/cern.ch/user/w/wenya/refData/reflhc15o/LHC15o_pass2_NUA_ChiHg3.root";
if (uniqueID.EqualTo("15o_Nhits60") && period.EqualTo("LHC15o")) filenameNUA = "alien:///alice/cern.ch/user/w/wenya/refData/reflhc15o/LHC15o_pass2_NUA_Nhits60.root";
if (uniqueID.EqualTo("15o_Nhits80") && period.EqualTo("LHC15o")) filenameNUA = "alien:///alice/cern.ch/user/w/wenya/refData/reflhc15o/LHC15o_pass2_NUA_Nhits80.root";
if (uniqueID.EqualTo("15o_FB96") && period.EqualTo("LHC15o")) filenameNUA = "alien:///alice/cern.ch/user/w/wenya/refData/reflhc15o/LHC15o_pass2_NUA_FB96.root";
if (uniqueID.EqualTo("15o_NUA_fromEmil") && period.EqualTo("LHC15o")) filenameNUA = "alien:///alice/cern.ch/user/w/wenya/refData/reflhc15o/LHC15o_pass2_NUA_wSyst.root";
if (uniqueID.EqualTo("15o_ChiHg2") && period.EqualTo("LHC15o")) filenameNUA = "alien:///alice/cern.ch/user/w/wenya/refData/reflhc15o/LHC15o_pass2_NUA_ChiHg2.root";
if(task->GetNUAOn() ||doNUA) {
if (period.EqualTo("LHC10h") ) { // NUA for 10h is too large to read, we separate them into 3 TList*s.
TFile *inNUA;
if(!AllContainers->FindObject("NUA")) {
inNUA = TFile::Open(filenameNUA);
if (!inNUA) return task;
AliAnalysisDataContainer *cin_NUA = mgr->CreateContainer(Form("NUA"), TList::Class(), AliAnalysisManager::kInputContainer);
TList* wNUA_list = NULL;
wNUA_list = dynamic_cast<TList*>(inNUA->Get("10hListNUA"));
cin_NUA->SetData(wNUA_list);
mgr->ConnectInput(task,inSlotCounter,cin_NUA);
inSlotCounter++;
} else {
mgr->ConnectInput(task,inSlotCounter,(AliAnalysisDataContainer*)AllContainers->FindObject("NUA"));
inSlotCounter++;
printf("NUA already loaded\n");
}
}
else if (period.EqualTo("LHC15o")) {
TFile *inNUA;
if(!AllContainers->FindObject("NUA")) {
inNUA = TFile::Open(filenameNUA);
if (!inNUA) return task;
AliAnalysisDataContainer *cin_NUA = mgr->CreateContainer(Form("NUA"), TList::Class(), AliAnalysisManager::kInputContainer);
TList* wNUA_list = NULL;
wNUA_list = dynamic_cast<TList*>(inNUA->Get("15oListNUA"));
cin_NUA->SetData(wNUA_list);
mgr->ConnectInput(task,inSlotCounter,cin_NUA);
inSlotCounter++;
} else {
mgr->ConnectInput(task,inSlotCounter,(AliAnalysisDataContainer*)AllContainers->FindObject("NUA"));
inSlotCounter++;
printf("NUA already loaded\n");
}
}
}
if(task->GetV0CalibOn() || v0calibOn){
if (period.EqualTo("LHC10h") ) {
// GainEQ & Recenter
TFile *v0calib;
if(!AllContainers->FindObject("V0Calib")) {
AliAnalysisDataContainer *cin_V0Calib = mgr->CreateContainer(Form("V0Calib"), TList::Class(), AliAnalysisManager::kInputContainer);
v0calib = TFile::Open("alien:///alice/cern.ch/user/w/wenya/refData/reflhc10h/10hQnCalib.root");
TList* qncalib_list = NULL;
qncalib_list = dynamic_cast<TList*>(v0calib->Get("10hlistqncalib"));
cin_V0Calib->SetData(qncalib_list);
mgr->ConnectInput(task,inSlotCounter,cin_V0Calib);
inSlotCounter++;
}else {
mgr->ConnectInput(task,inSlotCounter,(AliAnalysisDataContainer*)AllContainers->FindObject("V0Calib"));
inSlotCounter++;
printf("V0Calib already loaded\n");
}
}
else if (period.EqualTo("LHC15o")){
TFile *qnSp;
if(!AllContainers->FindObject("qnSp")) {
AliAnalysisDataContainer *cin_qnPercSp = mgr->CreateContainer(Form("qnSp"), TList::Class(), AliAnalysisManager::kInputContainer);
qnSp = TFile::Open("alien:///alice/cern.ch/user/w/wenya/refData/reflhc15o/calibSpq2V0C15oP2.root");
// Ref V0 qn percentail data copied from alien:////alice/cern.ch/user/a/adobrin/cmeESE15oP2/calibSpq2V0C15oP2.root
TList* spperc_list = NULL;
spperc_list = dynamic_cast<TList*>(qnSp->Get("15olistspPerc"));
cin_qnPercSp->SetData(spperc_list);
mgr->ConnectInput(task,inSlotCounter,cin_qnPercSp);
inSlotCounter++;
}else {
mgr->ConnectInput(task,inSlotCounter,(AliAnalysisDataContainer*)AllContainers->FindObject("qnSp"));
inSlotCounter++;
printf("qnSp already loaded\n");
}
}
}
// Return task pointer at the end
return task;
}
|
nschmidtALICE/AliPhysics
|
PWGCF/FLOW/macros/AddTaskCMWESE.C
|
C++
|
bsd-3-clause
| 11,648
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_TEST_RUNNER_MOCK_WEBRTC_PEER_CONNECTION_HANDLER_H_
#define COMPONENTS_TEST_RUNNER_MOCK_WEBRTC_PEER_CONNECTION_HANDLER_H_
#include <map>
#include "base/basictypes.h"
#include "components/test_runner/web_task.h"
#include "third_party/WebKit/public/platform/WebRTCPeerConnectionHandler.h"
#include "third_party/WebKit/public/platform/WebRTCSessionDescription.h"
#include "third_party/WebKit/public/platform/WebRTCSessionDescriptionRequest.h"
#include "third_party/WebKit/public/platform/WebRTCStatsRequest.h"
namespace blink {
class WebRTCPeerConnectionHandlerClient;
};
namespace test_runner {
class TestInterfaces;
class MockWebRTCPeerConnectionHandler
: public blink::WebRTCPeerConnectionHandler {
public:
MockWebRTCPeerConnectionHandler(
blink::WebRTCPeerConnectionHandlerClient* client,
TestInterfaces* interfaces);
~MockWebRTCPeerConnectionHandler() override;
// WebRTCPeerConnectionHandler related methods
bool initialize(const blink::WebRTCConfiguration& configuration,
const blink::WebMediaConstraints& constraints) override;
void createOffer(const blink::WebRTCSessionDescriptionRequest& request,
const blink::WebMediaConstraints& constraints) override;
void createOffer(const blink::WebRTCSessionDescriptionRequest& request,
const blink::WebRTCOfferOptions& options) override;
void createAnswer(const blink::WebRTCSessionDescriptionRequest& request,
const blink::WebMediaConstraints& constraints) override;
void setLocalDescription(
const blink::WebRTCVoidRequest& request,
const blink::WebRTCSessionDescription& local_description) override;
void setRemoteDescription(
const blink::WebRTCVoidRequest& request,
const blink::WebRTCSessionDescription& remote_description) override;
blink::WebRTCSessionDescription localDescription() override;
blink::WebRTCSessionDescription remoteDescription() override;
bool updateICE(const blink::WebRTCConfiguration& configuration,
const blink::WebMediaConstraints& constraints) override;
bool addICECandidate(const blink::WebRTCICECandidate& ice_candidate) override;
bool addICECandidate(const blink::WebRTCVoidRequest& request,
const blink::WebRTCICECandidate& ice_candidate) override;
bool addStream(const blink::WebMediaStream& stream,
const blink::WebMediaConstraints& constraints) override;
void removeStream(const blink::WebMediaStream& stream) override;
void getStats(const blink::WebRTCStatsRequest& request) override;
blink::WebRTCDataChannelHandler* createDataChannel(
const blink::WebString& label,
const blink::WebRTCDataChannelInit& init) override;
blink::WebRTCDTMFSenderHandler* createDTMFSender(
const blink::WebMediaStreamTrack& track) override;
void stop() override;
// WebTask related methods
WebTaskList* mutable_task_list() { return &task_list_; }
private:
MockWebRTCPeerConnectionHandler();
// UpdateRemoteStreams uses the collection of |local_streams_| to create
// remote MediaStreams with the same number of tracks and notifies |client_|
// about added and removed streams. It's triggered when setRemoteDescription
// is called.
void UpdateRemoteStreams();
blink::WebRTCPeerConnectionHandlerClient* client_;
bool stopped_;
WebTaskList task_list_;
blink::WebRTCSessionDescription local_description_;
blink::WebRTCSessionDescription remote_description_;
int stream_count_;
TestInterfaces* interfaces_;
typedef std::map<std::string, blink::WebMediaStream> StreamMap;
StreamMap local_streams_;
StreamMap remote_streams_;
DISALLOW_COPY_AND_ASSIGN(MockWebRTCPeerConnectionHandler);
};
} // namespace test_runner
#endif // COMPONENTS_TEST_RUNNER_MOCK_WEBRTC_PEER_CONNECTION_HANDLER_H_
|
Workday/OpenFrame
|
components/test_runner/mock_webrtc_peer_connection_handler.h
|
C
|
bsd-3-clause
| 4,019
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef DEVICE_SERIAL_SERIAL_SERVICE_IMPL_H_
#define DEVICE_SERIAL_SERIAL_SERVICE_IMPL_H_
#include <memory>
#include "base/macros.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
#include "device/serial/data_stream.mojom.h"
#include "device/serial/serial.mojom.h"
#include "device/serial/serial_connection_factory.h"
#include "device/serial/serial_device_enumerator.h"
#include "mojo/public/cpp/bindings/strong_binding.h"
namespace device {
class SerialServiceImpl : public serial::SerialService {
public:
SerialServiceImpl(scoped_refptr<SerialConnectionFactory> connection_factory,
mojo::InterfaceRequest<serial::SerialService> request);
SerialServiceImpl(scoped_refptr<SerialConnectionFactory> connection_factory,
std::unique_ptr<SerialDeviceEnumerator> device_enumerator,
mojo::InterfaceRequest<serial::SerialService> request);
~SerialServiceImpl() override;
static void Create(scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner,
mojo::InterfaceRequest<serial::SerialService> request);
static void CreateOnMessageLoop(
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner,
mojo::InterfaceRequest<serial::SerialService> request);
// SerialService overrides.
void GetDevices(const GetDevicesCallback& callback) override;
void Connect(
const mojo::String& path,
serial::ConnectionOptionsPtr options,
mojo::InterfaceRequest<serial::Connection> connection_request,
mojo::InterfaceRequest<serial::DataSink> sink,
mojo::InterfaceRequest<serial::DataSource> source,
mojo::InterfacePtr<serial::DataSourceClient> source_client) override;
private:
SerialDeviceEnumerator* GetDeviceEnumerator();
bool IsValidPath(const mojo::String& path);
std::unique_ptr<SerialDeviceEnumerator> device_enumerator_;
scoped_refptr<SerialConnectionFactory> connection_factory_;
mojo::StrongBinding<serial::SerialService> binding_;
DISALLOW_COPY_AND_ASSIGN(SerialServiceImpl);
};
} // namespace device
#endif // DEVICE_SERIAL_SERIAL_SERVICE_IMPL_H_
|
danakj/chromium
|
device/serial/serial_service_impl.h
|
C
|
bsd-3-clause
| 2,535
|
<?php
$this->breadcrumbs=array(
'Logins'=>array('index'),
$model->id=>array('view','id'=>$model->id),
'Update',
);
$menu=array();
require(dirname(__FILE__).DIRECTORY_SEPARATOR.'_menu.php');
$this->menu=array(
array('label'=>'Login','url'=>array('index'),'icon'=>'fa fa-list-alt', 'items' => $menu)
);
?>
<?php $box = $this->beginWidget(
'bootstrap.widgets.TbBox',
array(
'title' => 'Update Logins #'.$model->id,
'headerIcon' => 'icon- fa fa-pencil',
'headerButtons' => array(
array(
'class' => 'bootstrap.widgets.TbButtonGroup',
'type' => 'success',
// '', 'primary', 'info', 'success', 'warning', 'danger' or 'inverse'
'buttons' => $this->menu
),
)
)
);?>
<?php $this->widget('bootstrap.widgets.TbAlert', array(
'block'=>false, // display a larger alert block?
'fade'=>true, // use transitions?
'closeText'=>'×', // close link text - if set to false, no close link is displayed
'alerts'=>array( // configurations per alert type
'success'=>array('block'=>true, 'fade'=>true, 'closeText'=>'×'), //success, info, warning, error or danger
'info'=>array('block'=>true, 'fade'=>true, 'closeText'=>'×'), //success, info, warning, error or danger
'warning'=>array('block'=>true, 'fade'=>true, 'closeText'=>'×'), //success, info, warning, error or danger
'error'=>array('block'=>true, 'fade'=>true, 'closeText'=>'×'), //success, info, warning, error or danger
'danger'=>array('block'=>true, 'fade'=>true, 'closeText'=>'×'), //success, info, warning, error or danger
),
));
?><?php echo $this->renderPartial('_form',array('model'=>$model)); ?>
<?php $this->endWidget(); ?>
|
mdcconcepts/opinion_desk_CAP
|
protected/views/login/update.php
|
PHP
|
bsd-3-clause
| 1,825
|
package org.discotools.gwt.leaflet.client.controls.scale;
import org.discotools.gwt.leaflet.client.controls.ControlImpl;
import org.discotools.gwt.leaflet.client.jsobject.JSObject;
/**
* {@link ScaleImpl} implementation.
*
* @author Lionel Leiva-Marcon
*
*/
public class ScaleImpl extends ControlImpl {
public static native JSObject create(JSObject options)/*-{
return new $wnd.L.control.scale(options);
}-*/;
}
|
haleystorm/gwt-leaflet
|
src/gwtl-core/src/main/java/org/discotools/gwt/leaflet/client/controls/scale/ScaleImpl.java
|
Java
|
bsd-3-clause
| 428
|
"""
An implementation of OGC WFS 2.0.0 over the top of Django. This module requires that OGR be installed and that you use
either the PostGIS or Spatialite backends to GeoDjango for the layers you are retrieving. The module provides a
generic view, :py:class:WFS that provides standard WFS requests and responses and :py:class:WFST that provides WFS +
Transactions.
This is an initial cut at WFS compatibility. It is not perfect by any means, but it is a decent start. To use WFS with
your application, you will either need to use a GeoDjango model or derive from :py:class:WFSAdapter and
wrap a model class with it. Most URL configs will look like this::
url('r/wfs', WFS.as_view(model=myapp.models.MyGeoModel))
Models' Meta class can be modified to include attributes that can be picked up by the view as descriptive parameters
that will make it into the response of a GetCapabilities request.
The following features remain unimplemented:
* Transactions
* Creation and removal of stored queries
* Resolution
* The standard XML filter language (instead I intend to support OGR SQL and the Django filter language)
"""
from collections import namedtuple
from uuid import uuid4
from django.http import HttpResponse
from django.contrib.gis.db.models.query import GeoQuerySet
from django.contrib.gis.db.models import GeometryField
from django import forms as f
import json
from django.shortcuts import render_to_response
from ga_ows.views import common
from ga_ows.utils import MultipleValueField, BBoxField, CaseInsensitiveDict
from lxml import etree
from ga_ows.views.common import RequestForm, CommonParameters, GetCapabilitiesMixin
from osgeo import ogr
from django.conf import settings
from tempfile import gettempdir
from django.db import connections
import re
from lxml import etree
import os
#: Requests' Common Parameters
#: ===========================
class InputParameters(RequestForm):
"""
"""
srs_name = f.CharField()
input_format = f.CharField() # default should be "application/gml+xml; version=3.2"
srs_format = f.CharField(required=False)
@classmethod
def from_request(cls, request):
request['srs_name'] = request.get('srsname', 'EPSG:4326')
request['input_format'] = request.get('inputformat', "application/gml+xml; version=3.2")
class PresentationParameters(RequestForm):
count = f.IntegerField()
start_index = f.IntegerField()
max_features = f.IntegerField()
output_format = f.CharField()
@classmethod
def from_request(cls, request):
request['count'] = int(request.get('count', '1'))
request['start_index'] = int(request.get('startindex','1'))
request['max_features'] = int(request.get('maxfeatures', '1'))
request['output_format'] = request.get('outputformat',"application/gml+xml; version=3.2")
class AdHocQueryParameters(RequestForm):
type_names = MultipleValueField()
aliases = MultipleValueField(required=False)
filter = f.CharField(required=False)
filter_language = f.CharField(required=False)
resource_id = f.CharField(required=False)
bbox = BBoxField()
sort_by = f.CharField(required=False)
@classmethod
def from_request(cls, request):
request['type_names'] = request.getlist('typenames')
request['aliases'] = request.getlist('aliases')
request['filter'] = request.get('filter')
request['filter_language'] = request.get('filterlanguage')
request['resource_id'] = request.get('resource_id')
request['bbox'] = request.get('bbox')
request['sort_by'] = request.get('sortby')
class StoredQueryParameters(RequestForm):
stored_query_id = f.CharField(required=False)
@classmethod
def from_request(cls, request):
request['stored_query_id'] = request.get('storedquery_id')
class GetFeatureByIdParameters(RequestForm):
feature_id = f.CharField()
@classmethod
def from_request(cls, request):
request['feature_id'] = request.get('id')
class ResolveParameters(RequestForm):
resolve = f.CharField(required=False)
resolve_depth = f.IntegerField()
resolve_timeout = f.FloatField()
@classmethod
def from_request(cls, request):
request['resolve'] = request.get('resolve')
request['resolve_depth'] = int(request.get('resolve_depth','0'))
request['resolve_timeout'] = float(request.get('resolve_timeout', '0'))
#: Exceptions
#: ==========
class CannotLockAllFeatures(common.OWSException):
"""A locking request with a lockAction of ALL failed to lock all the requested features."""
class DuplicateStoredQueryIdValue(common.OWSException):
"""The identifier specified for a stored query expression is a duplicate."""
class DuplicateStoredQueryParameterName(common.OWSException):
"""This specified name for a stored query parameter is already being used within the same stored query definition."""
class FeaturesNotLocked(common.OWSException):
"""For servers that do not support automatic data locking (see 15.2.3.1), this exception indicates that a transaction operation is modifying features that have not previously been locked using a LockFeature (see Clause 12) or GetFeatureWithLock (see Clause 13) operation."""
class InvalidLockId(common.OWSException):
"""The value of the lockId parameter on a Transaction operation is invalid because it was not generated by the server."""
class InvalidValue(common.OWSException):
"""A Transaction (see Clause 15) has attempted to insert or change the value of a data component in a way that violates the schema of the feature."""
class LockHasExpired(common.OWSException):
"""The specified lock identifier on a Transaction or LockFeature operation has expired and is no longer valid."""
class OperationParsingFailed(common.OWSException):
"""The request is badly formed and failed to be parsed by the server."""
class OperationProcessingFailed(common.OWSException):
"""An error was encountered while processing the operation."""
class ResponseCacheExpired(common.OWSException):
"""The response cache used to support paging has expired and the results are no longer available."""
class OperationNotSupported(common.OWSException):
"""The operation is not yet implemented"""
########################################################################################################################
# Adapter class
########################################################################################################################
#: Class for describing features. A named tuple containing:
#: * name : str - the feature type name. this is what goes in the featureTypes parameter on a GetFeature request.
#: * title : str - the human readable name for this feature type
#: * abstract : str - a short description of this feature type, if necessary
#: * keywords : list(str) - keywords associated with this feature_type
#: * srs : str - the sptial reference system that is default for this feature type
#: * bbox : (minx, miny, maxx, maxy) - the boundinb box for this feature type. must be present and filled in WGS84
#:
FeatureDescription = namedtuple('FeatureDescription', ('ns', 'ns_name', 'name','title','abstract','keywords','srs','bbox', 'schema'))
#: A description of a stored-query parameter. A named tuple containing:
#: * type : str - the parameter type
#: * name : str - the parameter name (computer-readable)
#: * title : str - the parameter name (human-readable)
#: * abstract : str - a short description of the parameter
#: * query_expression : :py:class:StoredQueryExpression
#:
StoredQueryParameter = namedtuple("StoredQueryParameter", ('type','name', 'title','abstract', 'query_expression'))
#: A description of how a stored query parameter should be filled in. A named tuple containing:
#: * text : str - template text for a query
#: * language : str - the language the query is expressed in.
#: * private : boolean - whether or not the query is private
#: * return_feature_types : the comma-separated computer-readable names of the feature types that are returned
StoredQueryExpression = namedtuple("StoredQueryExpression", ('text', 'language', 'private', 'return_feature_types'))
#: A description of a stored query. A named tuple containing:
#: * name : str - the computer-readable name of the stored query
#: * title : str - the human-readable name of the stored query
#: * feature_types : str - the comma-separated computer-readable names of the feature types that are returned
StoredQueryDescription = namedtuple("StoredQueryDescription", ('name', 'feature_types', 'title', 'parameters'))
class WFSAdapter(object):
"""
This adapter should be defined by any class that needs to expose WFS services on its interface. The adapter will
be called with an object as its working object and will encapsulate all the functionality needed to expose that
object via WFS using the ga_ows.WFSView class.
"""
def get_feature_descriptions(self, request, *types):
raise OperationNotSupported.at('GetFeatureDescription', 'Implementor should return list of FeatureDescriptions')
def list_stored_queries(self, request):
"""Subclasses of this class may implement extra stored queries by creating methods
matching the pattern::
def SQ_{QueryName}(self, request, parms):
pass
where request and parms are the Django HTTPRequest object and parms are
GetFeature parameters
"""
queries = dict([(q[3:],[]) for q in filter(lambda x: x.startswith("SQ_"),
reduce(
list.__add__,
[c.__dict__.keys() for c in self.__class__.mro()]
)
)])
return queries
def get_features(self, request, parms):
raise OperationNotSupported.at('GetFeature', "Implementor is given a GetFeatures.Parameters object and should return an OGR dataset or a GeoDjango QuerySet")
def supports_feature_versioning(self):
return False
class GeoDjangoWFSAdapter(WFSAdapter):
def __init__(self, models):
self.models = {}
self.srids = {}
# NOTE this assumes that there will be only one geometry field per model. This is of course not necessarily the case, but it works 95% of the time.
self.geometries = {}
for model in models:
self.models[model._meta.app_label + ":" + model._meta.object_name] = model
for field in model._meta.fields:
if isinstance(field, GeometryField):
self.geometries[model._meta.app_label + ":" + model._meta.object_name] = field
self.srids[model._meta.app_label + ":" + model._meta.object_name] = field.srid
def list_stored_queries(self, request):
sq = super(GeoDjangoWFSAdapter, self).list_stored_queries(request)
fts = list(self.models.keys())
for k in sq.keys():
sq[k] = StoredQueryDescription(name=k, feature_types=fts, title=k, parameters=[])
return sq
def get_feature_descriptions(self, request, *types):
namespace = request.build_absolute_uri().split('?')[0] + "/schema" # todo: include https://bitbucket.org/eegg/django-model-schemas/wiki/Home
for model in self.models.values():
if model.objects.count() > 0:
extent = model.objects.extent()
else:
extent = (0,0,0,0)
yield FeatureDescription(
ns=namespace,
ns_name=model._meta.app_label,
name=model._meta.object_name,
abstract=model.__doc__,
title=model._meta.verbose_name,
keywords=[],
srs=self.srids[model._meta.app_label + ":" + model._meta.object_name],
bbox=extent,
schema=namespace
)
def get_features(self, request, parms):
if parms.cleaned_data['stored_query_id']:
squid = "SQ_" + parms.cleaned_data['stored_query_id']
try:
return self.__getattribute__(squid)(request, parms)
except AttributeError:
raise OperationNotSupported.at('GetFeatures', 'stored_query_id={squid}'.format(squid=squid))
else:
#try:
return self.AdHocQuery(request, parms)
#except KeyError as k:
# raise OperationProcessingFailed.at("GetFeatures", str(k))
#except ValueError as v:
# raise OperationParsingFailed.at("GetFeatures", "filter language not supported or invalid JSON")
def AdHocQuery(self, request, parms):
type_names = parms.cleaned_data['type_names'] # only support one type-name at a time (model) for now
#aliases = parms.cleaned_data['aliases'] # ignored for now
flt = parms.cleaned_data['filter'] # filter should be in JSON
flt_lang = parms.cleaned_data['filter_language'] # only support JSON now
#res_id = parms.cleaned_data['resource_id'] # ignored
bbox = parms.cleaned_data['bbox']
sort_by = parms.cleaned_data['sort_by']
count = parms.cleaned_data['count']
if not count:
count = parms.cleaned_data['max_features']
start_index = parms.cleaned_data['start_index']
srs_name = parms.cleaned_data['srs_name'] # assume bbox is in this
srs_format = parms.cleaned_data['srs_format'] # this can be proj, None (srid), srid, or wkt.
model = self.models[type_names[0]] # support only the first type-name for now.
geometry_field = self.geometries[type_names[0]]
query_set = model.objects.all()
if bbox:
mnx, mny, mxx, mxy = bbox
query_set.filter(**{ geometry_field.name + "__bboverlaps" :
"POLYGON(({mnx} {mny}, {mxx} {mny}, {mxx} {mxy}, {mnx} {mxy}, {mnx} {mny}))".format(
mnx=mnx,
mny=mny,
mxx=mxx,
mxy=mxy)
})
if flt:
flt = json.loads(flt)
query_set = query_set.filter(**flt)
if sort_by and ',' in sort_by:
sort_by = sort_by.split(',')
query_set = query_set.order_by(*sort_by)
elif sort_by:
query_set = query_set.order_by(sort_by)
if start_index and count:
query_set = query_set[start_index:start_index+count]
elif start_index:
query_set = query_set[start_index:]
elif count:
query_set = query_set[:count]
if srs_name:
if (not srs_format or srs_format == 'srid') and srs_name != geometry_field.srid:
if srs_name.lower().startswith('epsg:'):
srs_name = srs_name[5:]
query_set.transform(int(srs_name))
# TODO support proj and WKT formats by manually transforming geometries.
# First create a list() from the queryset, then create SpatialReference objects for
# the source and dest. Then import them from their corresponding SRS definitions
# then loop over the list and transform each model instance's geometry record
return query_set
def SQ_GetFeatureById(self, request, parms):
my_parms = GetFeatureByIdParameters.create(request.REQUEST)
typename, pk = my_parms.cleaned_data['feature_id'].split('.')
return self.models[typename].objects.filter(pk=int(pk))
# WFS itself. All the individual classes are defined as mixins for the sake of modularity and ease of debugging.
class WFSBase(object):
"""The base class for WFS mixins. Makes sure that all mixins assume an adapter"""
adapter = None
class DescribeFeatureTypeMixin(WFSBase):
"""
Defines the DescribeFeatureType operation found in section 9 of the WFS standard
"""
class Parameters(
CommonParameters
):
type_names = MultipleValueField()
output_format = f.CharField()
@classmethod
def from_request(cls, request):
request['type_names'] = request.getlist('typename') + request.getlist('typenames')
request['output_format'] = request.get('outputformat', "application/gml+xml; version=3.2")
def _parse_xml_DescribeFeatureType(self, request):
"""See section 9.4.2 of the OGC spec. Note that the spec is unclear how to encode the typeNames parameter. Its
example says one thing and the standard says another, so I've done both here.
Returns a named tuple:
* type_names: 'all' or list. all should return all feature types. list should return the named feature types.
"""
def add_ns(it, ns):
x = it.split(':')
if len(x) > 1:
return ns[x[0]], x[1]
else:
return '',x
root = etree.fromstring(request)
xmlns = root.get('xmlns')
output_format = root.get('outputFormat', 'application/gml+xml; version=3.2')
if xmlns is not None:
xmlns = "{" + xmlns + "}"
else:
xmlns = ""
namespaces = {}
for name, value in root.attrib.items():
if name.startswith(xmlns):
namespaces[value] = name[len(xmlns):]
type_names = root.get('typeNames')
if type_names is not None:
type_names = [add_ns(n, namespaces) for n in type_names.split(',')]
else:
type_names = []
for elt in root:
if elt.tag.endswith("TypeName"):
namespace, name = elt.text.split(":")
namespace = namespaces[namespace]
type_names.append((namespace, name))
if not len(type_names):
type_names = 'all'
return DescribeFeatureTypeMixin.Parameters.create(CaseInsensitiveDict({"typenames" : type_names, "outputformat" : output_format}))
def _response_xml_DescribeFeatureType(self, response):
return render_to_response("ga_ows/WFS_DescribeFeature.template.xml", { "feature_types" : list(response) })
def _response_json_DescribeFeatureType(self, response, callback=None):
rsp = []
for feature_type in response:
rsp.append({
"schema" : feature_type.schema,
"name" : feature_type.name,
"abstract" : feature_type.abstract,
"title" : feature_type.title,
"ns_name" : feature_type.ns_name
})
if callback is not None:
return HttpResponse(callback + "(" + json.dumps(rsp) + ")", mimetype='text/javascript')
else:
return HttpResponse(json.dumps(rsp), mimetype='application/json')
def DescribeFeatureType(self, request, kwargs):
"""See section 9 of the OGC WFS standards document."""
if 'xml' in kwargs:
parms = self._parse_xml_DescribeFeatureType(kwargs['xml'])
else:
parms = DescribeFeatureTypeMixin.Parameters.create(kwargs)
response = self.adapter.get_feature_descriptions(request, *parms.cleaned_data['type_names'])
if parms.cleaned_data['output_format'].endswith('json'):
if 'callback' in kwargs:
return self._response_json_DescribeFeatureType(response, callback=kwargs['callback'])
elif 'jsonp' in kwargs:
return self._response_json_DescribeFeatureType(response, callback=kwargs['jsonp'])
else:
return self._response_json_DescribeFeatureType(response)
else:
return self._response_xml_DescribeFeatureType(response)
class GetFeatureMixin(WFSBase):
"""
Defines the GetFeature operation in section 11 of the WFS standard.
"""
class Parameters(
CommonParameters,
InputParameters,
PresentationParameters,
AdHocQueryParameters,
StoredQueryParameters
):
pass
def _parse_xml_GetFeature(self, request):
"""
"""
raise OperationNotSupported.at("GetFeature", "XML encoded POST for WFS.GetFeature needs implemented")
#TODO implement this method.
def GetFeature(self, request, kwargs):
"""
"""
mimetypes = {
'GeoJSON' : 'application/json'
}
if 'xml' in kwargs:
parms = self._parse_xml_GetFeature(kwargs['xml'])
else:
parms = GetFeatureMixin.Parameters.create(kwargs)
# must be an OGR dataset or a QuerySet containing one layer
response = self.adapter.get_features(request, parms)
if isinstance(response, GeoQuerySet):
layer = None
db_params = settings.DATABASES[response.db]
if db_params['ENGINE'].endswith('postgis'):
# Then we take the raw SQL from thr QuerySet and pass it through OGR instead. This causes the SQL to be
# executed twice, but it's also the most expedient way to create our output. This could be done better,
# but it gets it out the door for now.
# Create the query from the QuerySet
# adapt() prevents SQL injection attacks
from psycopg2.extensions import adapt
query, parameters = response.query.get_compiler(response.db).as_sql()
parameters = tuple([adapt(p) for p in parameters])
query = query % parameters
# Connect to PostGIS with OGR.
drv = ogr.GetDriverByName("PostgreSQL")
connection_string = "PG:dbname='{db}'".format(db=db_params['NAME'])
if 'HOST' in db_params and db_params['HOST']:
connection_string += " host='{host}'".format(host=db_params['HOST'])
if 'PORT' in db_params and db_params['PORT']:
connection_string += " port='{port}'".format(port=db_params['PORT'])
if 'USER' in db_params and db_params['USER']:
connection_string += " user='{user}'".format(user=db_params['USER'])
if 'PASSWORD' in db_params and db_params['PASSWORD']:
connection_string += " password='{password}'".format(password=db_params['PASSWORD'])
conn = drv.Open(connection_string)
# Put the QuerySet into a layer the hard way.
layer = conn.ExecuteSQL(query)
elif db_params['ENGINE'].endswith('spatialite'):
# This works the same way as the if-statement above.
# todo replace this with the sqlite version of the same thing for preventing SQL injection attacks
from psycopg2.extensions import adapt
query, parameters = response.query.get_compiler(response.db).as_sql()
parameters = tuple([adapt(p) for p in parameters])
query = query % parameters
drv = ogr.GetDriverByName("Spatialite")
conn = drv.Open(db_params['NAME'])
layer = conn.ExecuteSQL(query)
else:
layer = response.GetLayerByIndex(0)
drivers = dict([(ogr.GetDriver(drv).GetName(), ogr.GetDriver(drv)) for drv in range(ogr.GetDriverCount()) if ogr.GetDriver(drv).TestCapability(ogr.ODrCCreateDataSource)])
output_format = parms.cleaned_data['output_format'].decode('ascii')
if 'gml' in output_format or 'xml' in output_format:
tmpname = "{tmpdir}{sep}{uuid}.{output_format}".format(tmpdir=gettempdir(), uuid=uuid4(), output_format='gml', sep=os.path.sep)
drv = ogr.GetDriverByName("GML")
ds = drv.CreateDataSource(tmpname)
l2 = ds.CopyLayer(layer, 'WFS_result')
l2.SyncToDisk()
del ds
responsef = open(tmpname)
rdata = responsef.read()
responsef.close()
os.unlink(tmpname)
return HttpResponse(rdata, mimetype=output_format)
elif output_format in drivers:
tmpname = "{tmpdir}{sep}{uuid}.{output_format}".format(tmpdir=gettempdir(), uuid=uuid4(), output_format=output_format, sep=os.path.sep)
drv = drivers[output_format]
ds = drv.CreateDataSource(tmpname)
l2 = ds.CopyLayer(layer, 'WFS_result')
l2.SyncToDisk()
del ds
responsef = open(tmpname)
rdata = responsef.read()
responsef.close()
os.unlink(tmpname)
return HttpResponse(rdata, mimetype=mimetypes.get(output_format,'text/plain'))
else:
raise OperationProcessingFailed.at('GetFeature', 'outputFormat {of} not supported ({formats})'.format(of=output_format, formats=drivers.keys()))
class ListStoredQueriesMixin(WFSBase):
"""
Defines the ListStoredQueries operation in section 14.3 of the standard
"""
def ListStoredQueries(self, request, kwargs):
"""
"""
queries = self.adapter.list_stored_queries(request)
response = etree.Element("ListStoredQueriesResponse")
for query, description in queries.items():
sub = etree.SubElement(response, "StoredQuery")
etree.SubElement(sub, "Title").text = query
for feature_type in description.feature_types:
etree.SubElement(sub, 'ReturnFeatureType').text = feature_type
return HttpResponse(etree.tostring(response, pretty_print=True), mimetype='text/xml')
class DescribeStoredQueriesMixin(WFSBase):
class Parameters(CommonParameters):
stored_query_id = MultipleValueField()
@classmethod
def from_request(cls, request):
request['stored_query_id'] = request.getlist('storedqueryid')
def DescribeStoredQueries(self, request, kwargs):
parms = DescribeStoredQueriesMixin.Parameters.create(kwargs)
inspected_queries = parms.cleaned_data['stored_query_id']
response = etree.Element('DescribeStoredQueriesResponse')
for query, description in filter(lambda (x,y): x in inspected_queries, self.adapter.list_stored_queries(request).items()):
desc = etree.SubElement(response, "StoredQueryDescription")
etree.SubElement(desc, 'Title').text = query
for parameter in description.parameters:
p = etree.SubElement(desc, "Parameter", attrib={"name" : parameter.name, "type" : parameter.type})
etree.SubElement(p, 'Title').text = parameter.title
etree.SubElement(p, 'Abstract').text = parameter.abstractS
if parameter.query_expression:
etree.SubElement(p, "QueryExpressionText", attrib={
"isPrivate" : parameter.query_expression.private == True,
"language" : parameter.query_expression.language,
"returnFeatureTypes" : ' '.join(parameter.query_expression.return_feature_types)
}).text = parameter.query_expression.text
return HttpResponse(etree.tostring(response, pretty_print=True), mimetype='text/xml')
# TODO implement stored queries
class CreateStoredQuery(WFSBase):
def CreateStoredQuery(self, request, kwargs):
raise OperationNotSupported.at("CreateStoredQuery")
class DropStoredQuery(WFSBase):
def DropStoredQuery(self, request, kwargs):
raise OperationNotSupported.at("DropStoredQuery")
# TODO implement transactions
class TransactionMixin(WFSBase):
def Transaction(self, request, kwargs):
"""
"""
raise OperationNotSupported.at('Transaction')
class GetFeatureWithLockMixin(WFSBase):
def GetFeatureWithLock(self, request, kwargs):
raise OperationNotSupported.at("GetFeatureWithLock")
class LockFeatureMixin(WFSBase):
def LockFeature(self, request, kwargs):
raise OperationNotSupported.at('LockFeature')
class GetPropertyValueMixin(WFSBase):
class Parameters(StoredQueryParameters, AdHocQueryParameters):
value_reference = f.CharField()
resolve_path = f.CharField(required=False)
def from_request(cls, request):
request['value_reference'] = request['valuereference']
request['resolve_path'] = request['resolvepath']
def GetPropertyValue(self, request, kwargs):
raise OperationNotSupported.at('GetPropertyValue')
class WFS(
common.OWSView,
GetCapabilitiesMixin,
DescribeFeatureTypeMixin,
DescribeStoredQueriesMixin,
GetFeatureMixin,
ListStoredQueriesMixin,
GetPropertyValueMixin
):
""" A generic view supporting the WFS 2.0.0 standard from the OGC"""
adapter = None
models = None
title = None
keywords = []
fees = None
access_constraints = None
provider_name = None
addr_street = None
addr_city = None
addr_admin_area = None
addr_postcode = None
addr_country = None
addr_email = None
def __init__(self, **kwargs):
common.OWSView.__init__(self, **kwargs)
if self.models:
self.adapter = GeoDjangoWFSAdapter(self.models)
def get_capabilities_response(self, request, params):
return render_to_response('ga_ows/WFS_GetCapabilities.template.xml', {
"title" : self.title,
"keywords" : self.keywords,
"fees" : self.fees,
"access_constraints" : self.access_constraints,
"endpoint" : request.build_absolute_uri().split('?')[0],
"output_formats" : [ogr.GetDriver(drv).GetName() for drv in range(ogr.GetDriverCount()) if ogr.GetDriver(drv).TestCapability(ogr.ODrCCreateDataSource)],
"addr_street" : self.addr_street,
"addr_city" : self.addr_city,
"addr_admin_area" : self.addr_admin_area,
"addr_postcode" : self.addr_postcode,
"addr_country" : self.addr_country,
"feature_versioning" : False,
"transactional" : False,
'feature_types' : self.adapter.get_feature_descriptions(request)
})
class WFST(WFS,TransactionMixin,GetFeatureWithLockMixin, LockFeatureMixin):
""" A generic view supporting the WFS 2.0.0 standard from the OGC including transactions"""
def get_capabilities_response(self, request, params):
return render_to_response('ga_ows/WFS_GetCapabilities.template.xml', {
"title" : self.title,
"keywords" : self.keywords,
"fees" : self.fees,
"access_constraints" : self.access_constraints,
"endpoint" : request.build_absolute_uri().split('?')[0],
"output_formats" : [ogr.GetDriver(drv).GetName() for drv in range(ogr.GetDriverCount()) if ogr.GetDriver(drv).TestCapability(ogr.ODrCCreateDataSource)],
"addr_street" : self.addr_street,
"addr_city" : self.addr_city,
"addr_admin_area" : self.addr_admin_area,
"addr_postcode" : self.addr_postcode,
"addr_country" : self.addr_country,
"feature_versioning" : self.adapter.supports_feature_versioning(),
"transactional" : True,
'feature_types' : self.adapter.get_feature_descriptions(request)
})
|
hydroshare/hydroshare_temp
|
ga_ows/views/wfs.py
|
Python
|
bsd-3-clause
| 31,334
|
<?php
Yii::setAlias('@tests', dirname(__DIR__) . '/tests');
$params = require(__DIR__ . '/params.php');
$db = require(__DIR__ . '/db.php');
return [
'id' => 'basic-console',
'basePath' => dirname(__DIR__),
'preload' => ['log'],
'controllerNamespace' => 'app\commands',
'extensions' => require(__DIR__ . '/../vendor/yiisoft/extensions.php'),
'components' => [
'cache' => [
'class' => 'yii\caching\FileCache',
],
'log' => [
'targets' => [
[
'class' => 'yii\log\FileTarget',
'levels' => ['error', 'warning'],
],
],
],
'db' => $db,
],
'params' => $params,
];
|
veontomo/teresa
|
config/console.php
|
PHP
|
bsd-3-clause
| 738
|
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_PUBLIC_COMMON_USER_AGENT_USER_AGENT_MOJOM_TRAITS_H_
#define THIRD_PARTY_BLINK_PUBLIC_COMMON_USER_AGENT_USER_AGENT_MOJOM_TRAITS_H_
#include "third_party/blink/public/common/user_agent/user_agent_metadata.h"
#include <string>
#include "mojo/public/cpp/bindings/struct_traits.h"
#include "third_party/blink/public/common/common_export.h"
#include "third_party/blink/public/mojom/user_agent/user_agent_metadata.mojom-shared.h"
namespace mojo {
template <>
struct BLINK_COMMON_EXPORT
StructTraits<blink::mojom::UserAgentBrandVersionDataView,
::blink::UserAgentBrandVersion> {
static const std::string& brand(const ::blink::UserAgentBrandVersion& data) {
return data.brand;
}
static const std::string& version(
const ::blink::UserAgentBrandVersion& data) {
return data.version;
}
static bool Read(blink::mojom::UserAgentBrandVersionDataView data,
::blink::UserAgentBrandVersion* out);
};
template <>
struct BLINK_COMMON_EXPORT StructTraits<blink::mojom::UserAgentMetadataDataView,
::blink::UserAgentMetadata> {
static const blink::UserAgentBrandList& brand_version_list(
const ::blink::UserAgentMetadata& data) {
return data.brand_version_list;
}
static const blink::UserAgentBrandList& brand_full_version_list(
const ::blink::UserAgentMetadata& data) {
return data.brand_full_version_list;
}
static const std::string& full_version(
const ::blink::UserAgentMetadata& data) {
return data.full_version;
}
static const std::string& platform(const ::blink::UserAgentMetadata& data) {
return data.platform;
}
static const std::string& platform_version(
const ::blink::UserAgentMetadata& data) {
return data.platform_version;
}
static const std::string& architecture(
const ::blink::UserAgentMetadata& data) {
return data.architecture;
}
static const std::string& model(const ::blink::UserAgentMetadata& data) {
return data.model;
}
static const bool& mobile(const ::blink::UserAgentMetadata& data) {
return data.mobile;
}
static const std::string& bitness(const ::blink::UserAgentMetadata& data) {
return data.bitness;
}
static bool Read(blink::mojom::UserAgentMetadataDataView data,
::blink::UserAgentMetadata* out);
};
template <>
struct BLINK_COMMON_EXPORT StructTraits<blink::mojom::UserAgentOverrideDataView,
::blink::UserAgentOverride> {
static const std::string& ua_string_override(
const ::blink::UserAgentOverride& data) {
return data.ua_string_override;
}
static const absl::optional<::blink::UserAgentMetadata> ua_metadata_override(
const ::blink::UserAgentOverride& data) {
return data.ua_metadata_override;
}
static bool Read(blink::mojom::UserAgentOverrideDataView,
::blink::UserAgentOverride* out);
};
} // namespace mojo
#endif // THIRD_PARTY_BLINK_PUBLIC_COMMON_USER_AGENT_USER_AGENT_MOJOM_TRAITS_H_
|
scheib/chromium
|
third_party/blink/public/common/user_agent/user_agent_mojom_traits.h
|
C
|
bsd-3-clause
| 3,234
|
/*
* TI Voxel Lib component.
*
* Copyright (c) 2014 Texas Instruments Inc.
*/
#include "Convolve2D.h"
#include "Logger.h"
namespace Voxel
{
Convolve2D::Convolve2D(const Vector<float> &coefficients, SizeType rows, SizeType columns):_rows(rows), _columns(columns)
{
if(coefficients.size() != _rows*_columns)
{
logger(LOG_ERROR) << "Convolve2D: Invalid number of coefficients given. Expecting "
<< _rows << "x" << _columns << " number of coefficients." << std::endl;
_rows = _columns = 0;
}
else if(_rows % 2 == 0 || _columns % 2 == 0)
{
logger(LOG_ERROR) << "Convolve2D: Rows and columns are expected to be odd numbers. Got "
<< _rows << "x" << _columns << "." << std::endl;
_rows = _columns = 0;
}
else
{
SizeType s = coefficients.size();
_coefficients.resize(s);
for(auto i = 0; i < s; i++)
_coefficients[s - i - 1] = coefficients[i]; // Invert and keep ready for convolution
}
}
#define COEFF(r, c) _coefficients[(rc + r)*_columns + (cc + c)]
bool Convolve2D::convolve(const Vector<float> &in, SizeType rows, SizeType columns, Vector<float> &out)
{
if(_coefficients.size() == 0 || in.size() != rows*columns)
{
logger(LOG_ERROR) << "Convolve2D: Invalid data size or missing coefficients." << std::endl;
return false;
}
out.resize(rows*columns);
int rc = _rows/2, cc = _columns/2;
for(int r = 0; r < rows; r++)
for(int c = 0; c < columns; c++)
{
auto i = r*columns + c;
float v = 0;
for(int r1 = -rc; r1 <= rc; r1++)
for(int c1 = -cc; c1 <= cc; c1++)
{
if(r + r1 < 0 || r + r1 >= rows || c + c1 < 0 || c + c1 >= columns)
continue;
auto j = r1*columns + c1;
v += in[i + j]*COEFF(r1, c1);
}
out[i] = v;
}
return true;
}
}
|
Metrilus/voxelsdk
|
Voxel/Convolve2D.cpp
|
C++
|
bsd-3-clause
| 1,888
|
using Microsoft.AspNetCore.Html;
using Microsoft.AspNetCore.Mvc.Localization;
using Microsoft.AspNetCore.Mvc.Rendering;
namespace OrchardCore.DisplayManagement.Notify
{
public enum NotifyType
{
Success,
Information,
Warning,
Error
}
public class NotifyEntry
{
public NotifyType Type { get; set; }
public IHtmlContent Message { get; set; }
}
}
|
lukaskabrt/Orchard2
|
src/OrchardCore/OrchardCore.DisplayManagement/Notify/NotifyEntry.cs
|
C#
|
bsd-3-clause
| 416
|
/*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2014, The University of Texas
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of The University of Texas nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "blis.h"
void bli_trmm_lu_blk_var1( obj_t* alpha,
obj_t* a,
obj_t* b,
obj_t* beta,
obj_t* c,
trmm_t* cntl )
{
obj_t a1, a1_pack;
obj_t b_pack;
obj_t c1, c1_pack;
dim_t i;
dim_t b_alg;
dim_t mT_trans;
// Initialize all pack objects that are passed into packm_init().
bli_obj_init_pack( &a1_pack );
bli_obj_init_pack( &b_pack );
bli_obj_init_pack( &c1_pack );
// If A is [upper] triangular, use the diagonal offset of A to determine
// the length of the non-zero region.
if ( bli_obj_is_triangular( *a ) )
mT_trans = bli_abs( bli_obj_diag_offset_after_trans( *a ) ) +
bli_obj_width_after_trans( *a );
else // if ( bli_obj_is_general( *a )
mT_trans = bli_obj_length_after_trans( *a );
// Scale C by beta (if instructed).
bli_scalm_int( beta,
c,
cntl_sub_scalm( cntl ) );
// Initialize object for packing B.
bli_packm_init( b, &b_pack,
cntl_sub_packm_b( cntl ) );
// Pack B and scale by alpha (if instructed).
bli_packm_int( alpha,
b, &b_pack,
cntl_sub_packm_b( cntl ) );
// Partition along the m dimension.
for ( i = 0; i < mT_trans; i += b_alg )
{
// Determine the current algorithmic blocksize.
b_alg = bli_determine_blocksize_f( i, mT_trans, a,
cntl_blocksize( cntl ) );
// Acquire partitions for A1 and C1.
bli_acquire_mpart_t2b( BLIS_SUBPART1,
i, b_alg, a, &a1 );
bli_acquire_mpart_t2b( BLIS_SUBPART1,
i, b_alg, c, &c1 );
// Initialize objects for packing A1 and C1.
bli_packm_init( &a1, &a1_pack,
cntl_sub_packm_a( cntl ) );
bli_packm_init( &c1, &c1_pack,
cntl_sub_packm_c( cntl ) );
// Pack A1 and scale by alpha (if instructed).
bli_packm_int( alpha,
&a1, &a1_pack,
cntl_sub_packm_a( cntl ) );
// Pack C1 and scale by beta (if instructed).
bli_packm_int( beta,
&c1, &c1_pack,
cntl_sub_packm_c( cntl ) );
// Perform trmm subproblem.
bli_trmm_int( BLIS_LEFT,
alpha,
&a1_pack,
&b_pack,
beta,
&c1_pack,
cntl_sub_trmm( cntl ) );
// Unpack C1 (if C1 was packed).
bli_unpackm_int( &c1_pack, &c1,
cntl_sub_unpackm_c( cntl ) );
}
// If any packing buffers were acquired within packm, release them back
// to the memory manager.
bli_obj_release_pack( &a1_pack );
bli_obj_release_pack( &b_pack );
bli_obj_release_pack( &c1_pack );
}
|
tkelman/blis
|
frame/3/trmm/other/bli_trmm_lu_blk_var1.c
|
C
|
bsd-3-clause
| 4,421
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/extensions/installed_loader.h"
#include "base/file_path.h"
#include "base/metrics/histogram.h"
#include "base/stringprintf.h"
#include "base/threading/thread_restrictions.h"
#include "base/utf_string_conversions.h"
#include "base/values.h"
#include "chrome/browser/browser_process.h"
#include "chrome/browser/extensions/api/runtime/runtime_api.h"
#include "chrome/browser/extensions/extension_action_manager.h"
#include "chrome/browser/extensions/extension_prefs.h"
#include "chrome/browser/extensions/extension_service.h"
#include "chrome/browser/extensions/extension_system.h"
#include "chrome/browser/extensions/management_policy.h"
#include "chrome/browser/profiles/profile_manager.h"
#include "chrome/common/chrome_switches.h"
#include "chrome/common/extensions/extension.h"
#include "chrome/common/extensions/extension_file_util.h"
#include "chrome/common/extensions/extension_l10n_util.h"
#include "chrome/common/extensions/extension_manifest_constants.h"
#include "chrome/common/extensions/manifest.h"
#include "chrome/common/pref_names.h"
#include "content/public/browser/notification_service.h"
#include "content/public/browser/user_metrics.h"
using content::BrowserThread;
using content::UserMetricsAction;
using extensions::Extension;
using extensions::ExtensionInfo;
namespace errors = extension_manifest_errors;
namespace {
// The following enumeration is used in histograms matching
// Extensions.ManifestReload* . Values may be added, as long as existing
// values are not changed.
enum ManifestReloadReason {
NOT_NEEDED = 0, // Reload not needed.
UNPACKED_DIR, // Unpacked directory.
NEEDS_RELOCALIZATION, // The locale has changed since we read this extension.
NUM_MANIFEST_RELOAD_REASONS
};
ManifestReloadReason ShouldReloadExtensionManifest(const ExtensionInfo& info) {
// Always reload manifests of unpacked extensions, because they can change
// on disk independent of the manifest in our prefs.
if (info.extension_location == Extension::LOAD)
return UNPACKED_DIR;
// Reload the manifest if it needs to be relocalized.
if (extension_l10n_util::ShouldRelocalizeManifest(info))
return NEEDS_RELOCALIZATION;
return NOT_NEEDED;
}
void DispatchOnInstalledEvent(
Profile* profile,
const std::string& extension_id,
const Version& old_version,
bool chrome_updated) {
// profile manager can be NULL in unit tests.
if (!g_browser_process->profile_manager())
return;
if (!g_browser_process->profile_manager()->IsValidProfile(profile))
return;
extensions::RuntimeEventRouter::DispatchOnInstalledEvent(
profile, extension_id, old_version, chrome_updated);
}
} // namespace
namespace extensions {
InstalledLoader::InstalledLoader(ExtensionService* extension_service)
: extension_service_(extension_service),
extension_prefs_(extension_service->extension_prefs()) {
}
InstalledLoader::~InstalledLoader() {
}
void InstalledLoader::Load(const ExtensionInfo& info, bool write_to_prefs) {
std::string error;
scoped_refptr<const Extension> extension(NULL);
if (info.extension_manifest.get()) {
extension = Extension::Create(
info.extension_path,
info.extension_location,
*info.extension_manifest,
GetCreationFlags(&info),
&error);
} else {
error = errors::kManifestUnreadable;
}
// Once installed, non-unpacked extensions cannot change their IDs (e.g., by
// updating the 'key' field in their manifest).
// TODO(jstritar): migrate preferences when unpacked extensions change IDs.
if (extension &&
extension->location() != Extension::LOAD &&
info.extension_id != extension->id()) {
error = errors::kCannotChangeExtensionID;
extension = NULL;
content::RecordAction(UserMetricsAction("Extensions.IDChangedError"));
}
// Check policy on every load in case an extension was blacklisted while
// Chrome was not running.
const ManagementPolicy* policy = extensions::ExtensionSystem::Get(
extension_service_->profile())->management_policy();
if (extension &&
!policy->UserMayLoad(extension, NULL)) {
// The error message from UserMayInstall() often contains the extension ID
// and is therefore not well suited to this UI.
error = errors::kDisabledByPolicy;
extension = NULL;
}
if (!extension) {
extension_service_->
ReportExtensionLoadError(info.extension_path, error, false);
return;
}
if (write_to_prefs)
extension_prefs_->UpdateManifest(extension);
extension_service_->AddExtension(extension);
}
void InstalledLoader::LoadAllExtensions() {
CHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
base::TimeTicks start_time = base::TimeTicks::Now();
scoped_ptr<ExtensionPrefs::ExtensionsInfo> extensions_info(
extension_prefs_->GetInstalledExtensionsInfo());
std::vector<int> reload_reason_counts(NUM_MANIFEST_RELOAD_REASONS, 0);
bool should_write_prefs = false;
int update_count = 0;
for (size_t i = 0; i < extensions_info->size(); ++i) {
ExtensionInfo* info = extensions_info->at(i).get();
scoped_ptr<ExtensionInfo> pending_update(
extension_prefs_->GetDelayedInstallInfo(info->extension_id));
if (pending_update) {
if (!extension_prefs_->FinishDelayedInstallInfo(info->extension_id))
NOTREACHED();
Version old_version;
if (info->extension_manifest) {
std::string version_str;
if (info->extension_manifest->GetString(
extension_manifest_keys::kVersion, &version_str)) {
old_version = Version(version_str);
}
}
MessageLoop::current()->PostTask(FROM_HERE,
base::Bind(&DispatchOnInstalledEvent, extension_service_->profile(),
info->extension_id, old_version, false));
info = extension_prefs_->GetInstalledExtensionInfo(
info->extension_id).release();
extensions_info->at(i).reset(info);
update_count++;
}
ManifestReloadReason reload_reason = ShouldReloadExtensionManifest(*info);
++reload_reason_counts[reload_reason];
UMA_HISTOGRAM_ENUMERATION("Extensions.ManifestReloadEnumValue",
reload_reason, 100);
if (reload_reason != NOT_NEEDED) {
// Reloading an extension reads files from disk. We do this on the
// UI thread because reloads should be very rare, and the complexity
// added by delaying the time when the extensions service knows about
// all extensions is significant. See crbug.com/37548 for details.
// |allow_io| disables tests that file operations run on the file
// thread.
base::ThreadRestrictions::ScopedAllowIO allow_io;
std::string error;
scoped_refptr<const Extension> extension(
extension_file_util::LoadExtension(
info->extension_path,
info->extension_location,
GetCreationFlags(info),
&error));
if (!extension.get()) {
extension_service_->
ReportExtensionLoadError(info->extension_path, error, false);
continue;
}
extensions_info->at(i)->extension_manifest.reset(
static_cast<DictionaryValue*>(
extension->manifest()->value()->DeepCopy()));
should_write_prefs = true;
}
}
for (size_t i = 0; i < extensions_info->size(); ++i) {
Load(*extensions_info->at(i), should_write_prefs);
}
extension_service_->OnLoadedInstalledExtensions();
// The histograms Extensions.ManifestReload* allow us to validate
// the assumption that reloading manifest is a rare event.
UMA_HISTOGRAM_COUNTS_100("Extensions.ManifestReloadNotNeeded",
reload_reason_counts[NOT_NEEDED]);
UMA_HISTOGRAM_COUNTS_100("Extensions.ManifestReloadUnpackedDir",
reload_reason_counts[UNPACKED_DIR]);
UMA_HISTOGRAM_COUNTS_100("Extensions.ManifestReloadNeedsRelocalization",
reload_reason_counts[NEEDS_RELOCALIZATION]);
UMA_HISTOGRAM_COUNTS_100("Extensions.LoadAll",
extension_service_->extensions()->size());
UMA_HISTOGRAM_COUNTS_100("Extensions.Disabled",
extension_service_->disabled_extensions()->size());
UMA_HISTOGRAM_COUNTS_100("Extensions.UpdateOnLoad",
update_count);
UMA_HISTOGRAM_TIMES("Extensions.LoadAllTime",
base::TimeTicks::Now() - start_time);
int app_user_count = 0;
int app_external_count = 0;
int hosted_app_count = 0;
int legacy_packaged_app_count = 0;
int platform_app_count = 0;
int user_script_count = 0;
int extension_user_count = 0;
int extension_external_count = 0;
int theme_count = 0;
int page_action_count = 0;
int browser_action_count = 0;
int disabled_for_permissions_count = 0;
int item_user_count = 0;
const ExtensionSet* extensions = extension_service_->extensions();
ExtensionSet::const_iterator ex;
for (ex = extensions->begin(); ex != extensions->end(); ++ex) {
Extension::Location location = (*ex)->location();
Extension::Type type = (*ex)->GetType();
if ((*ex)->is_app()) {
UMA_HISTOGRAM_ENUMERATION("Extensions.AppLocation",
location, 100);
} else if (type == Extension::TYPE_EXTENSION) {
UMA_HISTOGRAM_ENUMERATION("Extensions.ExtensionLocation",
location, 100);
}
// Don't count component extensions, since they are only extensions as an
// implementation detail.
if (location == Extension::COMPONENT)
continue;
// Don't count unpacked extensions, since they're a developer-specific
// feature.
if (location == Extension::LOAD)
continue;
// Using an enumeration shows us the total installed ratio across all users.
// Using the totals per user at each startup tells us the distribution of
// usage for each user (e.g. 40% of users have at least one app installed).
UMA_HISTOGRAM_ENUMERATION("Extensions.LoadType", type, 100);
switch (type) {
case Extension::TYPE_THEME:
++theme_count;
break;
case Extension::TYPE_USER_SCRIPT:
++user_script_count;
break;
case Extension::TYPE_HOSTED_APP:
++hosted_app_count;
if (Extension::IsExternalLocation(location)) {
++app_external_count;
} else {
++app_user_count;
}
break;
case Extension::TYPE_LEGACY_PACKAGED_APP:
++legacy_packaged_app_count;
if (Extension::IsExternalLocation(location)) {
++app_external_count;
} else {
++app_user_count;
}
break;
case Extension::TYPE_PLATFORM_APP:
++platform_app_count;
if (Extension::IsExternalLocation(location)) {
++app_external_count;
} else {
++app_user_count;
}
break;
case Extension::TYPE_EXTENSION:
default:
if (Extension::IsExternalLocation(location)) {
++extension_external_count;
} else {
++extension_user_count;
}
break;
}
if (!Extension::IsExternalLocation((*ex)->location()))
++item_user_count;
ExtensionActionManager* extension_action_manager =
ExtensionActionManager::Get(extension_service_->profile());
if (extension_action_manager->GetPageAction(**ex))
++page_action_count;
if (extension_action_manager->GetBrowserAction(**ex))
++browser_action_count;
extension_service_->RecordPermissionMessagesHistogram(
*ex, "Extensions.Permissions_Load");
}
const ExtensionSet* disabled_extensions =
extension_service_->disabled_extensions();
for (ex = disabled_extensions->begin();
ex != disabled_extensions->end(); ++ex) {
if (extension_service_->extension_prefs()->
DidExtensionEscalatePermissions((*ex)->id())) {
++disabled_for_permissions_count;
}
}
UMA_HISTOGRAM_COUNTS_100("Extensions.LoadAllUser", item_user_count);
UMA_HISTOGRAM_COUNTS_100("Extensions.LoadApp",
app_user_count + app_external_count);
UMA_HISTOGRAM_COUNTS_100("Extensions.LoadAppUser", app_user_count);
UMA_HISTOGRAM_COUNTS_100("Extensions.LoadAppExternal", app_external_count);
UMA_HISTOGRAM_COUNTS_100("Extensions.LoadHostedApp", hosted_app_count);
UMA_HISTOGRAM_COUNTS_100("Extensions.LoadPackagedApp",
legacy_packaged_app_count);
UMA_HISTOGRAM_COUNTS_100("Extensions.LoadPlatformApp", platform_app_count);
UMA_HISTOGRAM_COUNTS_100("Extensions.LoadExtension",
extension_user_count + extension_external_count);
UMA_HISTOGRAM_COUNTS_100("Extensions.LoadExtensionUser",
extension_user_count);
UMA_HISTOGRAM_COUNTS_100("Extensions.LoadExtensionExternal",
extension_external_count);
UMA_HISTOGRAM_COUNTS_100("Extensions.LoadUserScript", user_script_count);
UMA_HISTOGRAM_COUNTS_100("Extensions.LoadTheme", theme_count);
UMA_HISTOGRAM_COUNTS_100("Extensions.LoadPageAction", page_action_count);
UMA_HISTOGRAM_COUNTS_100("Extensions.LoadBrowserAction",
browser_action_count);
UMA_HISTOGRAM_COUNTS_100("Extensions.DisabledForPermissions",
disabled_for_permissions_count);
}
int InstalledLoader::GetCreationFlags(const ExtensionInfo* info) {
int flags = extension_prefs_->GetCreationFlags(info->extension_id);
if (info->extension_location != Extension::LOAD)
flags |= Extension::REQUIRE_KEY;
if (extension_prefs_->AllowFileAccess(info->extension_id))
flags |= Extension::ALLOW_FILE_ACCESS;
return flags;
}
} // namespace extensions
|
leiferikb/bitpop-private
|
chrome/browser/extensions/installed_loader.cc
|
C++
|
bsd-3-clause
| 13,951
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE23_Relative_Path_Traversal__wchar_t_environment_ofstream_16.cpp
Label Definition File: CWE23_Relative_Path_Traversal.label.xml
Template File: sources-sink-16.tmpl.cpp
*/
/*
* @description
* CWE: 23 Relative Path Traversal
* BadSource: environment Read input from an environment variable
* GoodSource: Use a fixed file name
* Sink: ofstream
* BadSink : Open the file named in data using ofstream::open()
* Flow Variant: 16 Control flow: while(1)
*
* */
#include "std_testcase.h"
#ifdef _WIN32
#define BASEPATH L"c:\\temp\\"
#else
#include <wchar.h>
#define BASEPATH L"/tmp/"
#endif
#define ENV_VARIABLE L"ADD"
#ifdef _WIN32
#define GETENV _wgetenv
#else
#define GETENV getenv
#endif
#include <fstream>
using namespace std;
namespace CWE23_Relative_Path_Traversal__wchar_t_environment_ofstream_16
{
#ifndef OMITBAD
void bad()
{
wchar_t * data;
wchar_t dataBuffer[FILENAME_MAX] = BASEPATH;
data = dataBuffer;
while(1)
{
{
/* Append input from an environment variable to data */
size_t dataLen = wcslen(data);
wchar_t * environment = GETENV(ENV_VARIABLE);
/* If there is data in the environment variable */
if (environment != NULL)
{
/* POTENTIAL FLAW: Read data from an environment variable */
wcsncat(data+dataLen, environment, FILENAME_MAX-dataLen-1);
}
}
break;
}
{
ofstream outputFile;
/* POTENTIAL FLAW: Possibly opening a file without validating the file name or path */
outputFile.open((char *)data);
outputFile.close();
}
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* goodG2B() - use goodsource and badsink by changing the conditions on the while statements */
static void goodG2B()
{
wchar_t * data;
wchar_t dataBuffer[FILENAME_MAX] = BASEPATH;
data = dataBuffer;
while(1)
{
/* FIX: Use a fixed file name */
wcscat(data, L"file.txt");
break;
}
{
ofstream outputFile;
/* POTENTIAL FLAW: Possibly opening a file without validating the file name or path */
outputFile.open((char *)data);
outputFile.close();
}
}
void good()
{
goodG2B();
}
#endif /* OMITGOOD */
} /* close namespace */
/* Below is the main(). It is only used when building this testcase on
its own for testing or for building a binary to use in testing binary
analysis tools. It is not used when compiling all the testcases as one
application, which is how source code analysis tools are tested. */
#ifdef INCLUDEMAIN
using namespace CWE23_Relative_Path_Traversal__wchar_t_environment_ofstream_16; /* so that we can use good and bad easily */
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
#ifndef OMITGOOD
printLine("Calling good()...");
good();
printLine("Finished good()");
#endif /* OMITGOOD */
#ifndef OMITBAD
printLine("Calling bad()...");
bad();
printLine("Finished bad()");
#endif /* OMITBAD */
return 0;
}
#endif
|
JianpingZeng/xcc
|
xcc/test/juliet/testcases/CWE23_Relative_Path_Traversal/s04/CWE23_Relative_Path_Traversal__wchar_t_environment_ofstream_16.cpp
|
C++
|
bsd-3-clause
| 3,280
|
/*
* Copyright (C) 2011 Google Inc. All Rights Reserved.
* Copyright (C) 2012 Apple Inc. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef THIRD_PARTY_BLINK_RENDERER_CORE_DOM_TREE_SCOPE_H_
#define THIRD_PARTY_BLINK_RENDERER_CORE_DOM_TREE_SCOPE_H_
#include "third_party/blink/renderer/core/core_export.h"
#include "third_party/blink/renderer/core/dom/tree_ordered_map.h"
#include "third_party/blink/renderer/core/html/forms/radio_button_group_scope.h"
#include "third_party/blink/renderer/core/layout/hit_test_request.h"
#include "third_party/blink/renderer/platform/bindings/exception_state.h"
#include "third_party/blink/renderer/platform/heap/handle.h"
#include "third_party/blink/renderer/platform/wtf/text/atomic_string.h"
namespace blink {
class ContainerNode;
class CSSStyleSheet;
class DOMSelection;
class Document;
class Element;
class HTMLMapElement;
class HitTestResult;
class IdTargetObserverRegistry;
class Node;
class SVGTreeScopeResources;
class ScopedStyleResolver;
// The root node of a document tree (in which case this is a Document) or of a
// shadow tree (in which case this is a ShadowRoot). Various things, like
// element IDs, are scoped to the TreeScope in which they are rooted, if any.
//
// A class which inherits both Node and TreeScope must call clearRareData() in
// its destructor so that the Node destructor no longer does problematic
// NodeList cache manipulation in the destructor.
class CORE_EXPORT TreeScope : public GarbageCollectedMixin {
public:
enum HitTestPointType {
kInternal = 1 << 1,
kWebExposed = 1 << 2,
};
TreeScope* ParentTreeScope() const { return parent_tree_scope_; }
bool IsInclusiveAncestorTreeScopeOf(const TreeScope&) const;
Element* AdjustedFocusedElement() const;
// Finds a retargeted element to the given argument, when the retargeted
// element is in this TreeScope. Returns null otherwise.
// TODO(kochi): once this algorithm is named in the spec, rename the method
// name.
Element* AdjustedElement(const Element&) const;
Element* getElementById(const AtomicString&) const;
const HeapVector<Member<Element>>& GetAllElementsById(
const AtomicString&) const;
bool HasElementWithId(const AtomicString& id) const;
bool ContainsMultipleElementsWithId(const AtomicString& id) const;
void AddElementById(const AtomicString& element_id, Element&);
void RemoveElementById(const AtomicString& element_id, Element&);
Document& GetDocument() const {
DCHECK(document_);
return *document_;
}
Node* AncestorInThisScope(Node*) const;
void AddImageMap(HTMLMapElement&);
void RemoveImageMap(HTMLMapElement&);
HTMLMapElement* GetImageMap(const String& url) const;
Element* ElementFromPoint(double x, double y) const;
Element* HitTestPoint(double x, double y, const HitTestRequest&) const;
HeapVector<Member<Element>> ElementsFromPoint(double x, double y) const;
HeapVector<Member<Element>> ElementsFromHitTestResult(HitTestResult&) const;
DOMSelection* GetSelection() const;
Element& Retarget(const Element& target) const;
Element* AdjustedFocusedElementInternal(const Element& target) const;
// Find first anchor which matches the given URL fragment.
// First searches for an element with the given ID, but if that fails, then
// looks for an anchor with the given name. ID matching is always case
// sensitive, but Anchor name matching is case sensitive in strict mode and
// not case sensitive in quirks mode for historical compatibility reasons.
// First searches for the raw fragment if not an SVG document, then searches
// with the URL decoded fragment.
Node* FindAnchor(const String& fragment);
// Used by the basic DOM mutation methods (e.g., appendChild()).
void AdoptIfNeeded(Node&);
ContainerNode& RootNode() const { return *root_node_; }
IdTargetObserverRegistry& GetIdTargetObserverRegistry() const {
return *id_target_observer_registry_.Get();
}
RadioButtonGroupScope& GetRadioButtonGroupScope() {
return radio_button_group_scope_;
}
bool IsInclusiveAncestorOf(const TreeScope&) const;
uint16_t ComparePosition(const TreeScope&) const;
const TreeScope* CommonAncestorTreeScope(const TreeScope& other) const;
TreeScope* CommonAncestorTreeScope(TreeScope& other);
Element* GetElementByAccessKey(const String& key) const;
void Trace(Visitor*) const override;
ScopedStyleResolver* GetScopedStyleResolver() const {
return scoped_style_resolver_.Get();
}
ScopedStyleResolver& EnsureScopedStyleResolver();
void ClearScopedStyleResolver();
SVGTreeScopeResources& EnsureSVGTreeScopedResources();
bool HasAdoptedStyleSheets() const;
const HeapVector<Member<CSSStyleSheet>>& AdoptedStyleSheets();
void SetAdoptedStyleSheets(HeapVector<Member<CSSStyleSheet>>&);
void SetAdoptedStyleSheets(HeapVector<Member<CSSStyleSheet>>&,
ExceptionState&);
protected:
TreeScope(ContainerNode&, Document&);
explicit TreeScope(Document&);
virtual ~TreeScope();
void ResetTreeScope();
void SetDocument(Document& document) { document_ = &document; }
void SetParentTreeScope(TreeScope&);
void SetNeedsStyleRecalcForViewportUnits();
private:
Element* HitTestPointInternal(Node*, HitTestPointType) const;
Element* FindAnchorWithName(const String& name);
Member<ContainerNode> root_node_;
Member<Document> document_;
Member<TreeScope> parent_tree_scope_;
Member<TreeOrderedMap> elements_by_id_;
Member<TreeOrderedMap> image_maps_by_name_;
Member<IdTargetObserverRegistry> id_target_observer_registry_;
Member<ScopedStyleResolver> scoped_style_resolver_;
mutable Member<DOMSelection> selection_;
RadioButtonGroupScope radio_button_group_scope_;
Member<SVGTreeScopeResources> svg_tree_scoped_resources_;
HeapVector<Member<CSSStyleSheet>> adopted_style_sheets_;
};
inline bool TreeScope::HasElementWithId(const AtomicString& id) const {
DCHECK(!id.IsNull());
return elements_by_id_ && elements_by_id_->Contains(id);
}
inline bool TreeScope::ContainsMultipleElementsWithId(
const AtomicString& id) const {
return elements_by_id_ && elements_by_id_->ContainsMultiple(id);
}
DEFINE_COMPARISON_OPERATORS_WITH_REFERENCES(TreeScope)
HitTestResult HitTestInDocument(
Document*,
double x,
double y,
const HitTestRequest& = HitTestRequest::kReadOnly |
HitTestRequest::kActive);
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_CORE_DOM_TREE_SCOPE_H_
|
scheib/chromium
|
third_party/blink/renderer/core/dom/tree_scope.h
|
C
|
bsd-3-clause
| 7,752
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/common/variations/experiment_labels.h"
#include <set>
#include <string>
#include <vector>
#include "base/basictypes.h"
#include "base/metrics/field_trial.h"
#include "base/strings/string_split.h"
#include "base/strings/utf_string_conversions.h"
#include "components/variations/variations_associated_data.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace chrome_variations {
TEST(ExperimentLabelsTest, BuildGoogleUpdateExperimentLabel) {
const variations::VariationID TEST_VALUE_A = 3300200;
const variations::VariationID TEST_VALUE_B = 3300201;
const variations::VariationID TEST_VALUE_C = 3300202;
const variations::VariationID TEST_VALUE_D = 3300203;
struct {
const char* active_group_pairs;
const char* expected_ids;
} test_cases[] = {
// Empty group.
{"", ""},
// Group of 1.
{"FieldTrialA#Default", "3300200"},
// Group of 1, doesn't have an associated ID.
{"FieldTrialA#DoesNotExist", ""},
// Group of 3.
{"FieldTrialA#Default#FieldTrialB#Group1#FieldTrialC#Default",
"3300200#3300201#3300202"},
// Group of 3, one doesn't have an associated ID.
{"FieldTrialA#Default#FieldTrialB#DoesNotExist#FieldTrialC#Default",
"3300200#3300202"},
// Group of 3, all three don't have an associated ID.
{"FieldTrialX#Default#FieldTrialB#DoesNotExist#FieldTrialC#Default",
"3300202"},
};
// Register a few VariationIDs.
AssociateGoogleVariationID(variations::GOOGLE_UPDATE_SERVICE, "FieldTrialA",
"Default", TEST_VALUE_A);
AssociateGoogleVariationID(variations::GOOGLE_UPDATE_SERVICE, "FieldTrialB",
"Group1", TEST_VALUE_B);
AssociateGoogleVariationID(variations::GOOGLE_UPDATE_SERVICE, "FieldTrialC",
"Default", TEST_VALUE_C);
AssociateGoogleVariationID(variations::GOOGLE_UPDATE_SERVICE, "FieldTrialD",
"Default", TEST_VALUE_D); // Not actually used.
for (size_t i = 0; i < arraysize(test_cases); ++i) {
// Parse the input groups.
base::FieldTrial::ActiveGroups groups;
std::vector<std::string> group_data = base::SplitString(
test_cases[i].active_group_pairs, "#",
base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
ASSERT_EQ(0U, group_data.size() % 2);
for (size_t j = 0; j < group_data.size(); j += 2) {
base::FieldTrial::ActiveGroup group;
group.trial_name = group_data[j];
group.group_name = group_data[j + 1];
groups.push_back(group);
}
// Parse the expected output.
std::vector<std::string> expected_ids_list = base::SplitString(
test_cases[i].expected_ids, "#",
base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
std::string experiment_labels_string = base::UTF16ToUTF8(
BuildGoogleUpdateExperimentLabel(groups));
// Split the VariationIDs from the labels for verification below.
std::set<std::string> parsed_ids;
for (const std::string& label : base::SplitString(
experiment_labels_string, ";",
base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL)) {
// The ID is precisely between the '=' and '|' characters in each label.
size_t index_of_equals = label.find('=');
size_t index_of_pipe = label.find('|');
ASSERT_NE(std::string::npos, index_of_equals);
ASSERT_NE(std::string::npos, index_of_pipe);
ASSERT_GT(index_of_pipe, index_of_equals);
parsed_ids.insert(label.substr(index_of_equals + 1,
index_of_pipe - index_of_equals - 1));
}
// Verify that the resulting string contains each of the expected labels,
// and nothing more. Note that the date is stripped out and ignored.
for (std::vector<std::string>::const_iterator it =
expected_ids_list.begin(); it != expected_ids_list.end(); ++it) {
std::set<std::string>::iterator it2 = parsed_ids.find(*it);
EXPECT_TRUE(parsed_ids.end() != it2);
parsed_ids.erase(it2);
}
EXPECT_TRUE(parsed_ids.empty());
} // for
}
TEST(ExperimentLabelsTest, CombineExperimentLabels) {
struct {
const char* variations_labels;
const char* other_labels;
const char* expected_label;
} test_cases[] = {
{"A=B|Tue, 21 Jan 2014 15:30:21 GMT",
"C=D|Tue, 21 Jan 2014 15:30:21 GMT",
"C=D|Tue, 21 Jan 2014 15:30:21 GMT;A=B|Tue, 21 Jan 2014 15:30:21 GMT"},
{"A=B|Tue, 21 Jan 2014 15:30:21 GMT",
"",
"A=B|Tue, 21 Jan 2014 15:30:21 GMT"},
{"",
"A=B|Tue, 21 Jan 2014 15:30:21 GMT",
"A=B|Tue, 21 Jan 2014 15:30:21 GMT"},
{"A=B|Tue, 21 Jan 2014 15:30:21 GMT;C=D|Tue, 21 Jan 2014 15:30:21 GMT",
"P=Q|Tue, 21 Jan 2014 15:30:21 GMT;X=Y|Tue, 21 Jan 2014 15:30:21 GMT",
"P=Q|Tue, 21 Jan 2014 15:30:21 GMT;X=Y|Tue, 21 Jan 2014 15:30:21 GMT;"
"A=B|Tue, 21 Jan 2014 15:30:21 GMT;C=D|Tue, 21 Jan 2014 15:30:21 GMT"},
{"",
"",
""},
};
for (size_t i = 0; i < arraysize(test_cases); ++i) {
std::string result = base::UTF16ToUTF8(CombineExperimentLabels(
base::ASCIIToUTF16(test_cases[i].variations_labels),
base::ASCIIToUTF16(test_cases[i].other_labels)));
EXPECT_EQ(test_cases[i].expected_label, result);
}
}
TEST(ExperimentLabelsTest, ExtractNonVariationLabels) {
struct {
const char* input_label;
const char* expected_output;
} test_cases[] = {
// Empty
{"", ""},
// One
{"gcapi_brand=123|Tue, 21 Jan 2014 15:30:21 GMT",
"gcapi_brand=123|Tue, 21 Jan 2014 15:30:21 GMT"},
// Three
{"CrVar1=123|Tue, 21 Jan 2014 15:30:21 GMT;"
"experiment1=456|Tue, 21 Jan 2014 15:30:21 GMT;"
"experiment2=789|Tue, 21 Jan 2014 15:30:21 GMT;"
"CrVar1=123|Tue, 21 Jan 2014 15:30:21 GMT",
"experiment1=456|Tue, 21 Jan 2014 15:30:21 GMT;"
"experiment2=789|Tue, 21 Jan 2014 15:30:21 GMT"},
// One and one Variation
{"gcapi_brand=123|Tue, 21 Jan 2014 15:30:21 GMT;"
"CrVar1=3310002|Tue, 21 Jan 2014 15:30:21 GMT",
"gcapi_brand=123|Tue, 21 Jan 2014 15:30:21 GMT"},
// One and one Variation, flipped
{"CrVar1=3310002|Tue, 21 Jan 2014 15:30:21 GMT;"
"gcapi_brand=123|Tue, 21 Jan 2014 15:30:21 GMT",
"gcapi_brand=123|Tue, 21 Jan 2014 15:30:21 GMT"},
// Sandwiched
{"CrVar1=3310002|Tue, 21 Jan 2014 15:30:21 GMT;"
"gcapi_brand=123|Tue, 21 Jan 2014 15:30:21 GMT;"
"CrVar2=3310003|Tue, 21 Jan 2014 15:30:21 GMT;"
"CrVar3=3310004|Tue, 21 Jan 2014 15:30:21 GMT",
"gcapi_brand=123|Tue, 21 Jan 2014 15:30:21 GMT"},
// Only Variations
{"CrVar1=3310002|Tue, 21 Jan 2014 15:30:21 GMT;"
"CrVar2=3310003|Tue, 21 Jan 2014 15:30:21 GMT;"
"CrVar3=3310004|Tue, 21 Jan 2014 15:30:21 GMT",
""},
// Empty values
{"gcapi_brand=123|Tue, 21 Jan 2014 15:30:21 GMT;"
"CrVar1=3310002|Tue, 21 Jan 2014 15:30:21 GMT",
"gcapi_brand=123|Tue, 21 Jan 2014 15:30:21 GMT"},
// Trailing semicolon
{"gcapi_brand=123|Tue, 21 Jan 2014 15:30:21 GMT;"
"CrVar1=3310002|Tue, 21 Jan 2014 15:30:21 GMT;", // Note the semi here.
"gcapi_brand=123|Tue, 21 Jan 2014 15:30:21 GMT"},
// Semis
{";;;;", ""},
};
for (size_t i = 0; i < arraysize(test_cases); ++i) {
std::string non_variation_labels = base::UTF16ToUTF8(
ExtractNonVariationLabels(
base::ASCIIToUTF16(test_cases[i].input_label)));
EXPECT_EQ(test_cases[i].expected_output, non_variation_labels);
}
}
} // namespace chrome_variations
|
Just-D/chromium-1
|
chrome/common/variations/experiment_labels_unittest.cc
|
C++
|
bsd-3-clause
| 7,656
|
# -*- coding: utf-8 -*-
import morepath
from morepath.request import Response
from morepath.authentication import Identity, NO_IDENTITY
from .fixtures import identity_policy
import base64
import json
from webtest import TestApp as Client
try:
from cookielib import CookieJar
except ImportError:
from http.cookiejar import CookieJar
def test_no_permission():
class app(morepath.App):
pass
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@app.path(model=Model, path='{id}',
variables=lambda model: {'id': model.id})
def get_model(id):
return Model(id)
@app.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
c = Client(app())
c.get('/foo', status=403)
def test_permission_directive_identity():
class app(morepath.App):
pass
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@app.verify_identity()
def verify_identity(identity):
return True
@app.path(model=Model, path='{id}',
variables=lambda model: {'id': model.id})
def get_model(id):
return Model(id)
@app.permission_rule(model=Model, permission=Permission)
def get_permission(identity, model, permission):
if model.id == 'foo':
return True
else:
return False
@app.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
@app.identity_policy()
class IdentityPolicy(object):
def identify(self, request):
return Identity('testidentity')
def remember(self, response, request, identity):
pass
def forget(self, response, request):
pass
c = Client(app())
response = c.get('/foo')
assert response.body == b'Model: foo'
response = c.get('/bar', status=403)
def test_permission_directive_with_app_arg():
class App(morepath.App):
pass
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@App.verify_identity()
def verify_identity(identity):
return True
@App.path(model=Model, path='{id}',
variables=lambda model: {'id': model.id})
def get_model(id):
return Model(id)
@App.permission_rule(model=Model, permission=Permission)
def get_permission(app, identity, model, permission):
assert isinstance(app, App)
if model.id == 'foo':
return True
else:
return False
@App.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
@App.identity_policy()
class IdentityPolicy(object):
def identify(self, request):
return Identity('testidentity')
def remember(self, response, request, identity):
pass
def forget(self, response, request):
pass
c = Client(App())
response = c.get('/foo')
assert response.body == b'Model: foo'
response = c.get('/bar', status=403)
def test_permission_directive_no_identity():
class app(morepath.App):
pass
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@app.path(model=Model, path='{id}',
variables=lambda model: {'id': model.id})
def get_model(id):
return Model(id)
@app.permission_rule(model=Model, permission=Permission, identity=None)
def get_permission(identity, model, permission):
if model.id == 'foo':
return True
else:
return False
@app.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
c = Client(app())
response = c.get('/foo')
assert response.body == b'Model: foo'
response = c.get('/bar', status=403)
def test_policy_action():
c = Client(identity_policy.app())
response = c.get('/foo')
assert response.body == b'Model: foo'
response = c.get('/bar', status=403)
def test_no_identity_policy():
class App(morepath.App):
pass
@App.path(path='{id}')
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@App.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
@App.view(model=Model, name='log_in')
def log_in(self, request):
response = Response()
request.app.remember_identity(
response, request, Identity(userid='user', payload='Amazing'))
return response
@App.view(model=Model, name='log_out')
def log_out(self, request):
response = Response()
request.app.forget_identity(response, request)
return response
@App.verify_identity()
def verify_identity(identity):
return True
c = Client(App())
# if you protect things with permissions and you
# install no identity policy, doing a log in has
# no effect
c.get('/foo', status=403)
c.get('/foo/log_in')
c.get('/foo', status=403)
c.get('/foo/log_out')
c.get('/foo', status=403)
class DumbCookieIdentityPolicy(object):
"""A very insecure cookie-based policy.
Only for testing. Don't use in practice!
"""
def identify(self, request):
data = request.cookies.get('dumb_id', None)
if data is None:
return NO_IDENTITY
data = json.loads(base64.b64decode(data).decode())
return Identity(**data)
def remember(self, response, request, identity):
data = base64.b64encode(str.encode(json.dumps(identity.as_dict())))
response.set_cookie('dumb_id', data)
def forget(self, response, request):
response.delete_cookie('dumb_id')
def test_cookie_identity_policy():
class app(morepath.App):
pass
@app.path(path='{id}')
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@app.permission_rule(model=Model, permission=Permission)
def get_permission(identity, model, permission):
return identity.userid == 'user'
@app.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
@app.view(model=Model, name='log_in')
def log_in(self, request):
response = Response()
request.app.remember_identity(
response, request, Identity(userid='user', payload='Amazing'))
return response
@app.view(model=Model, name='log_out')
def log_out(self, request):
response = Response()
request.app.forget_identity(response, request)
return response
@app.identity_policy()
def policy():
return DumbCookieIdentityPolicy()
@app.verify_identity()
def verify_identity(identity):
return True
c = Client(app(), cookiejar=CookieJar())
response = c.get('/foo', status=403)
response = c.get('/foo/log_in')
response = c.get('/foo', status=200)
assert response.body == b'Model: foo'
response = c.get('/foo/log_out')
response = c.get('/foo', status=403)
def test_default_verify_identity():
class app(morepath.App):
pass
identity = morepath.Identity('foo')
assert not app()._verify_identity(identity)
def test_verify_identity_directive():
class app(morepath.App):
pass
@app.verify_identity()
def verify_identity(identity):
return identity.password == 'right'
identity = morepath.Identity('foo', password='wrong')
assert not app()._verify_identity(identity)
identity = morepath.Identity('foo', password='right')
assert app()._verify_identity(identity)
def test_verify_identity_directive_app_arg():
class App(morepath.App):
pass
@App.verify_identity()
def verify_identity(app, identity):
assert isinstance(app, App)
return identity.password == 'right'
identity = morepath.Identity('foo', password='wrong')
assert not App()._verify_identity(identity)
identity = morepath.Identity('foo', password='right')
assert App()._verify_identity(identity)
def test_verify_identity_directive_identity_argument():
class app(morepath.App):
pass
class PlainIdentity(morepath.Identity):
pass
@app.verify_identity(identity=object)
def verify_identity(identity):
return False
@app.verify_identity(identity=PlainIdentity)
def verify_plain_identity(identity):
return identity.password == 'right'
identity = PlainIdentity('foo', password='wrong')
assert not app()._verify_identity(identity)
identity = morepath.Identity('foo', password='right')
assert not app()._verify_identity(identity)
identity = PlainIdentity('foo', password='right')
assert app()._verify_identity(identity)
def test_false_verify_identity():
class app(morepath.App):
pass
@app.path(path='{id}')
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@app.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
@app.view(model=Model, name='log_in')
def log_in(self, request):
response = Response()
request.app.remember_identity(
response, request,
Identity(userid='user', payload='Amazing'))
return response
@app.identity_policy()
def policy():
return DumbCookieIdentityPolicy()
@app.verify_identity()
def verify_identity(identity):
return False
c = Client(app(), cookiejar=CookieJar())
c.get('/foo', status=403)
c.get('/foo/log_in')
c.get('/foo', status=403)
def test_dispatch_verify_identity():
# This app uses two Identity classes, morepath.Identity and
# Anonymous, which are verfied in two different ways (see the two
# functions a little further down that are decorated by
# @App.verify_identity).
class App(morepath.App):
pass
@App.path(path='{id}')
class Model(object):
def __init__(self, id):
self.id = id
class Read(object):
"""Read Permission"""
class Anonymous(Identity):
def __init__(self, **kw):
super(Anonymous, self).__init__(userid=None, **kw)
@App.permission_rule(model=Model, permission=Read)
def get_permission(identity, model, permission):
return True
@App.view(model=Model, permission=Read)
def default(self, request):
if request.identity.userid == self.id:
return "Read restricted: %s" % self.id
return "Read shared: %s" % self.id
@App.identity_policy()
class HeaderIdentityPolicy(object):
def identify(self, request):
user = request.headers.get('user', None)
if user is not None:
if user == '':
return Anonymous()
return Identity(
userid=user,
password=request.headers['password'])
def remember(self, response, request, identity):
pass
def forget(self, response, request):
pass
@App.verify_identity(identity=Identity)
def verify_identity(identity):
return identity.password == 'secret'
@App.verify_identity(identity=Anonymous)
def verify_anonymous(identity):
return True
c = Client(App())
r = c.get('/foo', status=403)
r = c.get('/foo', status=403, headers=dict(user='foo', password='wrong'))
r = c.get('/foo', status=403, headers=dict(user='bar', password='wrong'))
r = c.get('/foo', status=200, headers={'user': ''})
assert r.text == 'Read shared: foo'
r = c.get('/foo', status=200, headers=dict(user='foo', password='secret'))
assert r.text == 'Read restricted: foo'
r = c.get('/foo', status=200, headers=dict(user='bar', password='secret'))
assert r.text == 'Read shared: foo'
def test_settings():
class App(morepath.App):
pass
class Model(object):
pass
@App.verify_identity()
def verify_identity(identity):
return True
@App.path(model=Model, path='test')
def get_model():
return Model()
@App.view(model=Model)
def default(self, request):
return "%s, your token is valid." % request.identity.userid
@App.setting_section(section="test")
def get_test_settings():
return {'encryption_key': 'secret'}
@App.identity_policy()
def get_identity_policy(settings):
test_settings = settings.test.__dict__.copy()
return IdentityPolicy(**test_settings)
class IdentityPolicy(object):
def __init__(self, encryption_key):
self.encryption_key = encryption_key
def identify(self, request):
token = self.get_token(request)
if token is None or not self.token_is_valid(
token, self.encryption_key
):
return NO_IDENTITY
return Identity('Testuser')
def remember(self, response, request, identity):
pass
def forget(self, response, request):
pass
def get_token(self, request):
try:
authtype, token = request.authorization
except ValueError:
return None
if authtype.lower() != 'bearer':
return None
return token
def token_is_valid(self, token, encryption_key):
return token == encryption_key # fake validation
c = Client(App())
headers = {'Authorization': 'Bearer secret'}
response = c.get('/test', headers=headers)
assert response.body == b'Testuser, your token is valid.'
def test_prevent_poisoned_host_headers():
class App(morepath.App):
pass
@App.path(path='')
class Model(object):
pass
@App.view(model=Model)
def view_model(self, request):
return 'ok'
poisoned_hosts = (
'example.com@evil.tld',
'example.com:dr.frankenstein@evil.tld',
'example.com:dr.frankenstein@evil.tld:80',
'example.com:80/badpath',
'example.com: recovermypassword.com',
)
legit_hosts = (
'example.com',
'example.com:80',
'12.34.56.78',
'12.34.56.78:443',
'[2001:19f0:feee::dead:beef:cafe]',
'[2001:19f0:feee::dead:beef:cafe]:8080',
'xn--4ca9at.com', # Punnycode for öäü.com
)
c = Client(App())
for host in legit_hosts:
response = c.get('/', headers={'Host': host})
assert response.status_code == 200
for host in poisoned_hosts:
response = c.get('/', headers={'Host': host}, expect_errors=True)
assert response.status_code == 400
def test_settings_in_permission_rule():
class App(morepath.App):
pass
@App.path(path='{id}')
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@App.verify_identity()
def verify_identity(identity):
return True
@App.setting_section(section="permissions")
def get_roles_setting():
return {
'read': {'foo'},
}
@App.permission_rule(model=Model, permission=Permission)
def get_permission(app, identity, model, permission):
return model.id in app.settings.permissions.read
@App.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
@App.identity_policy()
class IdentityPolicy(object):
def identify(self, request):
return Identity('testidentity')
def remember(self, response, request, identity):
pass
def forget(self, response, request):
pass
c = Client(App())
response = c.get('/foo')
assert response.body == b'Model: foo'
response = c.get('/bar', status=403)
|
taschini/morepath
|
morepath/tests/test_security.py
|
Python
|
bsd-3-clause
| 16,352
|
<P>hello</P>
|
gigamonkey/monkeylib-html
|
html-testfiles/test000.html
|
HTML
|
bsd-3-clause
| 13
|
// Copyright 2014 PDFium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Original code copyright 2014 Foxit Software Inc. http://www.foxitsoftware.com
#include "xfa/fxfa/parser/xfa_document_datamerger_imp.h"
#include "xfa/fxfa/parser/cxfa_document.h"
#include "xfa/fxfa/parser/cxfa_node.h"
bool XFA_DataMerge_NeedGenerateForm(CXFA_Node* pTemplateChild,
bool bUseInstanceManager) {
XFA_Element eType = pTemplateChild->GetElementType();
if (eType == XFA_Element::Variables)
return true;
if (pTemplateChild->IsContainerNode())
return false;
if (eType == XFA_Element::Proto ||
(bUseInstanceManager && eType == XFA_Element::Occur)) {
return false;
}
return true;
}
CXFA_Node* XFA_DataMerge_FindFormDOMInstance(CXFA_Document* pDocument,
XFA_Element eType,
uint32_t dwNameHash,
CXFA_Node* pFormParent) {
CXFA_Node* pFormChild = pFormParent->GetFirstChild();
for (; pFormChild; pFormChild = pFormChild->GetNextSibling()) {
if (pFormChild->GetElementType() == eType &&
pFormChild->GetNameHash() == dwNameHash && pFormChild->IsUnusedNode()) {
return pFormChild;
}
}
return nullptr;
}
CXFA_Node* XFA_NodeMerge_CloneOrMergeContainer(
CXFA_Document* pDocument,
CXFA_Node* pFormParent,
CXFA_Node* pTemplateNode,
bool bRecursive,
std::vector<CXFA_Node*>* pSubformArray) {
CXFA_Node* pExistingNode = nullptr;
if (!pSubformArray) {
pExistingNode = XFA_DataMerge_FindFormDOMInstance(
pDocument, pTemplateNode->GetElementType(),
pTemplateNode->GetNameHash(), pFormParent);
} else if (!pSubformArray->empty()) {
pExistingNode = pSubformArray->front();
pSubformArray->erase(pSubformArray->begin());
}
if (pExistingNode) {
if (pSubformArray) {
pFormParent->InsertChildAndNotify(pExistingNode, nullptr);
} else if (pExistingNode->IsContainerNode()) {
pFormParent->RemoveChildAndNotify(pExistingNode, true);
pFormParent->InsertChildAndNotify(pExistingNode, nullptr);
}
pExistingNode->ClearFlag(XFA_NodeFlag_UnusedNode);
pExistingNode->SetTemplateNode(pTemplateNode);
if (bRecursive && pExistingNode->GetElementType() != XFA_Element::Items) {
for (CXFA_Node* pTemplateChild = pTemplateNode->GetFirstChild();
pTemplateChild; pTemplateChild = pTemplateChild->GetNextSibling()) {
if (XFA_DataMerge_NeedGenerateForm(pTemplateChild, true)) {
XFA_NodeMerge_CloneOrMergeContainer(
pDocument, pExistingNode, pTemplateChild, bRecursive, nullptr);
}
}
}
pExistingNode->SetFlagAndNotify(XFA_NodeFlag_Initialized);
return pExistingNode;
}
CXFA_Node* pNewNode = pTemplateNode->CloneTemplateToForm(false);
pFormParent->InsertChildAndNotify(pNewNode, nullptr);
if (bRecursive) {
for (CXFA_Node* pTemplateChild = pTemplateNode->GetFirstChild();
pTemplateChild; pTemplateChild = pTemplateChild->GetNextSibling()) {
if (XFA_DataMerge_NeedGenerateForm(pTemplateChild, true)) {
CXFA_Node* pNewChild = pTemplateChild->CloneTemplateToForm(true);
pNewNode->InsertChildAndNotify(pNewChild, nullptr);
}
}
}
return pNewNode;
}
CXFA_Node* XFA_DataMerge_FindDataScope(CXFA_Node* pParentFormNode) {
if (!pParentFormNode)
return nullptr;
for (CXFA_Node* pRootBoundNode = pParentFormNode;
pRootBoundNode && pRootBoundNode->IsContainerNode();
pRootBoundNode = pRootBoundNode->GetParent()) {
CXFA_Node* pDataScope = pRootBoundNode->GetBindData();
if (pDataScope)
return pDataScope;
}
return ToNode(
pParentFormNode->GetDocument()->GetXFAObject(XFA_HASHCODE_Data));
}
|
endlessm/chromium-browser
|
third_party/pdfium/xfa/fxfa/parser/xfa_document_datamerger_imp.cpp
|
C++
|
bsd-3-clause
| 3,918
|
#!/usr/bin/env python3
# flake8: noqa
import io
import sys
if len (sys.argv) != 5:
print ("""usage: ./gen-use-table.py IndicSyllabicCategory.txt IndicPositionalCategory.txt UnicodeData.txt Blocks.txt
Input file, as of Unicode 12:
* https://unicode.org/Public/UCD/latest/ucd/IndicSyllabicCategory.txt
* https://unicode.org/Public/UCD/latest/ucd/IndicPositionalCategory.txt
* https://unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
* https://unicode.org/Public/UCD/latest/ucd/Blocks.txt""", file=sys.stderr)
sys.exit (1)
BLACKLISTED_BLOCKS = ["Thai", "Lao"]
files = [io.open (x, encoding='utf-8') for x in sys.argv[1:]]
headers = [[f.readline () for i in range (2)] for j,f in enumerate(files) if j != 2]
headers.append (["UnicodeData.txt does not have a header."])
data = [{} for f in files]
values = [{} for f in files]
for i, f in enumerate (files):
for line in f:
j = line.find ('#')
if j >= 0:
line = line[:j]
fields = [x.strip () for x in line.split (';')]
if len (fields) == 1:
continue
uu = fields[0].split ('..')
start = int (uu[0], 16)
if len (uu) == 1:
end = start
else:
end = int (uu[1], 16)
t = fields[1 if i != 2 else 2]
for u in range (start, end + 1):
data[i][u] = t
values[i][t] = values[i].get (t, 0) + end - start + 1
defaults = ('Other', 'Not_Applicable', 'Cn', 'No_Block')
# TODO Characters that are not in Unicode Indic files, but used in USE
data[0][0x034F] = defaults[0]
data[0][0x1B61] = defaults[0]
data[0][0x1B63] = defaults[0]
data[0][0x1B64] = defaults[0]
data[0][0x1B65] = defaults[0]
data[0][0x1B66] = defaults[0]
data[0][0x1B67] = defaults[0]
data[0][0x1B69] = defaults[0]
data[0][0x1B6A] = defaults[0]
data[0][0x2060] = defaults[0]
# TODO https://github.com/harfbuzz/harfbuzz/pull/1685
data[0][0x1B5B] = 'Consonant_Placeholder'
data[0][0x1B5C] = 'Consonant_Placeholder'
data[0][0x1B5F] = 'Consonant_Placeholder'
data[0][0x1B62] = 'Consonant_Placeholder'
data[0][0x1B68] = 'Consonant_Placeholder'
# TODO https://github.com/harfbuzz/harfbuzz/issues/1035
data[0][0x11C44] = 'Consonant_Placeholder'
data[0][0x11C45] = 'Consonant_Placeholder'
# TODO https://github.com/harfbuzz/harfbuzz/pull/1399
data[0][0x111C8] = 'Consonant_Placeholder'
for u in range (0xFE00, 0xFE0F + 1):
data[0][u] = defaults[0]
# Merge data into one dict:
for i,v in enumerate (defaults):
values[i][v] = values[i].get (v, 0) + 1
combined = {}
for i,d in enumerate (data):
for u,v in d.items ():
if i >= 2 and not u in combined:
continue
if not u in combined:
combined[u] = list (defaults)
combined[u][i] = v
combined = {k:v for k,v in combined.items() if v[3] not in BLACKLISTED_BLOCKS}
data = combined
del combined
num = len (data)
property_names = [
# General_Category
'Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc',
'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po',
'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs',
# Indic_Syllabic_Category
'Other',
'Bindu',
'Visarga',
'Avagraha',
'Nukta',
'Virama',
'Pure_Killer',
'Invisible_Stacker',
'Vowel_Independent',
'Vowel_Dependent',
'Vowel',
'Consonant_Placeholder',
'Consonant',
'Consonant_Dead',
'Consonant_With_Stacker',
'Consonant_Prefixed',
'Consonant_Preceding_Repha',
'Consonant_Succeeding_Repha',
'Consonant_Subjoined',
'Consonant_Medial',
'Consonant_Final',
'Consonant_Head_Letter',
'Consonant_Initial_Postfixed',
'Modifying_Letter',
'Tone_Letter',
'Tone_Mark',
'Gemination_Mark',
'Cantillation_Mark',
'Register_Shifter',
'Syllable_Modifier',
'Consonant_Killer',
'Non_Joiner',
'Joiner',
'Number_Joiner',
'Number',
'Brahmi_Joining_Number',
# Indic_Positional_Category
'Not_Applicable',
'Right',
'Left',
'Visual_Order_Left',
'Left_And_Right',
'Top',
'Bottom',
'Top_And_Bottom',
'Top_And_Right',
'Top_And_Left',
'Top_And_Left_And_Right',
'Bottom_And_Left',
'Bottom_And_Right',
'Top_And_Bottom_And_Right',
'Overstruck',
]
class PropertyValue(object):
def __init__(self, name_):
self.name = name_
def __str__(self):
return self.name
def __eq__(self, other):
return self.name == (other if isinstance(other, str) else other.name)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(str(self))
property_values = {}
for name in property_names:
value = PropertyValue(name)
assert value not in property_values
assert value not in globals()
property_values[name] = value
globals().update(property_values)
def is_BASE(U, UISC, UGC):
return (UISC in [Number, Consonant, Consonant_Head_Letter,
#SPEC-DRAFT Consonant_Placeholder,
Tone_Letter,
Vowel_Independent #SPEC-DRAFT
] or
(UGC == Lo and UISC in [Avagraha, Bindu, Consonant_Final, Consonant_Medial,
Consonant_Subjoined, Vowel, Vowel_Dependent]))
def is_BASE_IND(U, UISC, UGC):
#SPEC-DRAFT return (UISC in [Consonant_Dead, Modifying_Letter] or UGC == Po)
return (UISC in [Consonant_Dead, Modifying_Letter] or
(UGC == Po and not U in [0x104B, 0x104E, 0x1B5B, 0x1B5C, 0x1B5F, 0x2022, 0x111C8, 0x11A3F, 0x11A45, 0x11C44, 0x11C45]) or
False # SPEC-DRAFT-OUTDATED! U == 0x002D
)
def is_BASE_NUM(U, UISC, UGC):
return UISC == Brahmi_Joining_Number
def is_BASE_OTHER(U, UISC, UGC):
if UISC == Consonant_Placeholder: return True #SPEC-DRAFT
#SPEC-DRAFT return U in [0x00A0, 0x00D7, 0x2015, 0x2022, 0x25CC, 0x25FB, 0x25FC, 0x25FD, 0x25FE]
return U in [0x2015, 0x2022, 0x25FB, 0x25FC, 0x25FD, 0x25FE]
def is_CGJ(U, UISC, UGC):
return U == 0x034F
def is_CONS_FINAL(U, UISC, UGC):
return ((UISC == Consonant_Final and UGC != Lo) or
UISC == Consonant_Succeeding_Repha)
def is_CONS_FINAL_MOD(U, UISC, UGC):
#SPEC-DRAFT return UISC in [Consonant_Final_Modifier, Syllable_Modifier]
return UISC == Syllable_Modifier
def is_CONS_MED(U, UISC, UGC):
# Consonant_Initial_Postfixed is new in Unicode 11; not in the spec.
return (UISC == Consonant_Medial and UGC != Lo or
UISC == Consonant_Initial_Postfixed)
def is_CONS_MOD(U, UISC, UGC):
return UISC in [Nukta, Gemination_Mark, Consonant_Killer]
def is_CONS_SUB(U, UISC, UGC):
#SPEC-DRAFT return UISC == Consonant_Subjoined
return UISC == Consonant_Subjoined and UGC != Lo
def is_CONS_WITH_STACKER(U, UISC, UGC):
return UISC == Consonant_With_Stacker
def is_HALANT(U, UISC, UGC):
return (UISC in [Virama, Invisible_Stacker]
and not is_HALANT_OR_VOWEL_MODIFIER(U, UISC, UGC)
and not is_SAKOT(U, UISC, UGC))
def is_HALANT_OR_VOWEL_MODIFIER(U, UISC, UGC):
# https://github.com/harfbuzz/harfbuzz/issues/1102
# https://github.com/harfbuzz/harfbuzz/issues/1379
return U in [0x11046, 0x1134D]
def is_HALANT_NUM(U, UISC, UGC):
return UISC == Number_Joiner
def is_ZWNJ(U, UISC, UGC):
return UISC == Non_Joiner
def is_ZWJ(U, UISC, UGC):
return UISC == Joiner
def is_Word_Joiner(U, UISC, UGC):
return U == 0x2060
def is_OTHER(U, UISC, UGC):
#SPEC-OUTDATED return UGC == Zs # or any other SCRIPT_COMMON characters
return (UISC == Other
and not is_SYM(U, UISC, UGC)
and not is_SYM_MOD(U, UISC, UGC)
and not is_CGJ(U, UISC, UGC)
and not is_Word_Joiner(U, UISC, UGC)
and not is_VARIATION_SELECTOR(U, UISC, UGC)
)
def is_Reserved(U, UISC, UGC):
return UGC == 'Cn'
def is_REPHA(U, UISC, UGC):
return UISC in [Consonant_Preceding_Repha, Consonant_Prefixed]
def is_SAKOT(U, UISC, UGC):
return U == 0x1A60
def is_SYM(U, UISC, UGC):
if U == 0x25CC: return False #SPEC-DRAFT
#SPEC-DRAFT return UGC in [So, Sc] or UISC == Symbol_Letter
return UGC in [So, Sc] and U not in [0x1B62, 0x1B68]
def is_SYM_MOD(U, UISC, UGC):
return U in [0x1B6B, 0x1B6C, 0x1B6D, 0x1B6E, 0x1B6F, 0x1B70, 0x1B71, 0x1B72, 0x1B73]
def is_VARIATION_SELECTOR(U, UISC, UGC):
return 0xFE00 <= U <= 0xFE0F
def is_VOWEL(U, UISC, UGC):
# https://github.com/harfbuzz/harfbuzz/issues/376
return (UISC == Pure_Killer or
(UGC != Lo and UISC in [Vowel, Vowel_Dependent] and U not in [0xAA29]))
def is_VOWEL_MOD(U, UISC, UGC):
# https://github.com/harfbuzz/harfbuzz/issues/376
return (UISC in [Tone_Mark, Cantillation_Mark, Register_Shifter, Visarga] or
(UGC != Lo and (UISC == Bindu or U in [0xAA29])))
use_mapping = {
'B': is_BASE,
'IND': is_BASE_IND,
'N': is_BASE_NUM,
'GB': is_BASE_OTHER,
'CGJ': is_CGJ,
'F': is_CONS_FINAL,
'FM': is_CONS_FINAL_MOD,
'M': is_CONS_MED,
'CM': is_CONS_MOD,
'SUB': is_CONS_SUB,
'CS': is_CONS_WITH_STACKER,
'H': is_HALANT,
'HVM': is_HALANT_OR_VOWEL_MODIFIER,
'HN': is_HALANT_NUM,
'ZWNJ': is_ZWNJ,
'ZWJ': is_ZWJ,
'WJ': is_Word_Joiner,
'O': is_OTHER,
'Rsv': is_Reserved,
'R': is_REPHA,
'S': is_SYM,
'Sk': is_SAKOT,
'SM': is_SYM_MOD,
'VS': is_VARIATION_SELECTOR,
'V': is_VOWEL,
'VM': is_VOWEL_MOD,
}
use_positions = {
'F': {
'Abv': [Top],
'Blw': [Bottom],
'Pst': [Right],
},
'M': {
'Abv': [Top],
'Blw': [Bottom, Bottom_And_Left],
'Pst': [Right],
'Pre': [Left],
},
'CM': {
'Abv': [Top],
'Blw': [Bottom],
},
'V': {
'Abv': [Top, Top_And_Bottom, Top_And_Bottom_And_Right, Top_And_Right],
'Blw': [Bottom, Overstruck, Bottom_And_Right],
'Pst': [Right, Top_And_Left, Top_And_Left_And_Right, Left_And_Right],
'Pre': [Left],
},
'VM': {
'Abv': [Top],
'Blw': [Bottom, Overstruck],
'Pst': [Right],
'Pre': [Left],
},
'SM': {
'Abv': [Top],
'Blw': [Bottom],
},
'H': None,
'HVM': None,
'B': None,
'FM': {
'Abv': [Top],
'Blw': [Bottom],
'Pst': [Not_Applicable],
},
'SUB': None,
}
def map_to_use(data):
out = {}
items = use_mapping.items()
for U,(UISC,UIPC,UGC,UBlock) in data.items():
# Resolve Indic_Syllabic_Category
# TODO: These don't have UISC assigned in Unicode 12.0, but have UIPC
if 0x1CE2 <= U <= 0x1CE8: UISC = Cantillation_Mark
# Tibetan:
# TODO: These don't have UISC assigned in Unicode 12.0, but have UIPC
if 0x0F18 <= U <= 0x0F19 or 0x0F3E <= U <= 0x0F3F: UISC = Vowel_Dependent
if 0x0F86 <= U <= 0x0F87: UISC = Tone_Mark
# Overrides to allow NFC order matching syllable
# https://github.com/harfbuzz/harfbuzz/issues/1012
if UBlock == 'Tibetan' and is_VOWEL (U, UISC, UGC):
if UIPC == Top:
UIPC = Bottom
# TODO: https://github.com/harfbuzz/harfbuzz/pull/982
# also https://github.com/harfbuzz/harfbuzz/issues/1012
if UBlock == 'Chakma' and is_VOWEL (U, UISC, UGC):
if UIPC == Top:
UIPC = Bottom
elif UIPC == Bottom:
UIPC = Top
# TODO: https://github.com/harfbuzz/harfbuzz/pull/627
if 0x1BF2 <= U <= 0x1BF3: UISC = Nukta; UIPC = Bottom
# TODO: U+1CED should only be allowed after some of
# the nasalization marks, maybe only for U+1CE9..U+1CF1.
if U == 0x1CED: UISC = Tone_Mark
# TODO: https://github.com/harfbuzz/harfbuzz/issues/1105
if U == 0x11134: UISC = Gemination_Mark
values = [k for k,v in items if v(U,UISC,UGC)]
assert len(values) == 1, "%s %s %s %s" % (hex(U), UISC, UGC, values)
USE = values[0]
# Resolve Indic_Positional_Category
# TODO: These should die, but have UIPC in Unicode 12.0
if U in [0x953, 0x954]: UIPC = Not_Applicable
# TODO: In USE's override list but not in Unicode 12.0
if U == 0x103C: UIPC = Left
# TODO: https://github.com/harfbuzz/harfbuzz/pull/2012
if U == 0x1C29: UIPC = Left
# TODO: These are not in USE's override list that we have, nor are they in Unicode 12.0
if 0xA926 <= U <= 0xA92A: UIPC = Top
# TODO: https://github.com/harfbuzz/harfbuzz/pull/1037
# and https://github.com/harfbuzz/harfbuzz/issues/1631
if U in [0x11302, 0x11303, 0x114C1]: UIPC = Top
if U == 0x1171E: UIPC = Left
if 0x1CF8 <= U <= 0x1CF9: UIPC = Top
assert (UIPC in [Not_Applicable, Visual_Order_Left] or
USE in use_positions), "%s %s %s %s %s" % (hex(U), UIPC, USE, UISC, UGC)
pos_mapping = use_positions.get(USE, None)
if pos_mapping:
values = [k for k,v in pos_mapping.items() if v and UIPC in v]
assert len(values) == 1, "%s %s %s %s %s %s" % (hex(U), UIPC, USE, UISC, UGC, values)
USE = USE + values[0]
out[U] = (USE, UBlock)
return out
defaults = ('O', 'No_Block')
data = map_to_use(data)
print ("/* == Start of generated table == */")
print ("/*")
print (" * The following table is generated by running:")
print (" *")
print (" * ./gen-use-table.py IndicSyllabicCategory.txt IndicPositionalCategory.txt UnicodeData.txt Blocks.txt")
print (" *")
print (" * on files with these headers:")
print (" *")
for h in headers:
for l in h:
print (" * %s" % (l.strip()))
print (" */")
print ()
print ('#include "hb.hh"')
print ()
print ('#ifndef HB_NO_OT_SHAPE')
print ()
print ('#include "hb-ot-shape-complex-use.hh"')
print ()
total = 0
used = 0
last_block = None
def print_block (block, start, end, data):
global total, used, last_block
if block and block != last_block:
print ()
print ()
print (" /* %s */" % block)
if start % 16:
print (' ' * (20 + (start % 16 * 6)), end='')
num = 0
assert start % 8 == 0
assert (end+1) % 8 == 0
for u in range (start, end+1):
if u % 16 == 0:
print ()
print (" /* %04X */" % u, end='')
if u in data:
num += 1
d = data.get (u, defaults)
print ("%6s," % d[0], end='')
total += end - start + 1
used += num
if block:
last_block = block
uu = sorted (data.keys ())
last = -100000
num = 0
offset = 0
starts = []
ends = []
print ('#pragma GCC diagnostic push')
print ('#pragma GCC diagnostic ignored "-Wunused-macros"')
for k,v in sorted(use_mapping.items()):
if k in use_positions and use_positions[k]: continue
print ("#define %s USE_%s /* %s */" % (k, k, v.__name__[3:]))
for k,v in sorted(use_positions.items()):
if not v: continue
for suf in v.keys():
tag = k + suf
print ("#define %s USE_%s" % (tag, tag))
print ('#pragma GCC diagnostic pop')
print ("")
print ("static const USE_TABLE_ELEMENT_TYPE use_table[] = {")
for u in uu:
if u <= last:
continue
block = data[u][1]
start = u//8*8
end = start+1
while end in uu and block == data[end][1]:
end += 1
end = (end-1)//8*8 + 7
if start != last + 1:
if start - last <= 1+16*3:
print_block (None, last+1, start-1, data)
last = start-1
else:
if last >= 0:
ends.append (last + 1)
offset += ends[-1] - starts[-1]
print ()
print ()
print ("#define use_offset_0x%04xu %d" % (start, offset))
starts.append (start)
print_block (block, start, end, data)
last = end
ends.append (last + 1)
offset += ends[-1] - starts[-1]
print ()
print ()
occupancy = used * 100. / total
page_bits = 12
print ("}; /* Table items: %d; occupancy: %d%% */" % (offset, occupancy))
print ()
print ("USE_TABLE_ELEMENT_TYPE")
print ("hb_use_get_category (hb_codepoint_t u)")
print ("{")
print (" switch (u >> %d)" % page_bits)
print (" {")
pages = set([u>>page_bits for u in starts+ends])
for p in sorted(pages):
print (" case 0x%0Xu:" % p)
for (start,end) in zip (starts, ends):
if p not in [start>>page_bits, end>>page_bits]: continue
offset = "use_offset_0x%04xu" % start
print (" if (hb_in_range<hb_codepoint_t> (u, 0x%04Xu, 0x%04Xu)) return use_table[u - 0x%04Xu + %s];" % (start, end-1, start, offset))
print (" break;")
print ("")
print (" default:")
print (" break;")
print (" }")
print (" return USE_O;")
print ("}")
print ()
for k in sorted(use_mapping.keys()):
if k in use_positions and use_positions[k]: continue
print ("#undef %s" % k)
for k,v in sorted(use_positions.items()):
if not v: continue
for suf in v.keys():
tag = k + suf
print ("#undef %s" % tag)
print ()
print ()
print ('#endif')
print ("/* == End of generated table == */")
# Maintain at least 50% occupancy in the table */
if occupancy < 50:
raise Exception ("Table too sparse, please investigate: ", occupancy)
|
endlessm/chromium-browser
|
third_party/harfbuzz-ng/src/src/gen-use-table.py
|
Python
|
bsd-3-clause
| 15,523
|
package org.buildmlearn.toolkit.templates;
import android.app.Activity;
import android.content.Context;
import android.content.DialogInterface;
import android.content.Intent;
import android.support.v7.app.AlertDialog;
import android.view.LayoutInflater;
import android.view.View;
import android.widget.BaseAdapter;
import android.widget.EditText;
import org.buildmlearn.toolkit.R;
import org.buildmlearn.toolkit.learnspelling.fragment.SplashFragment;
import org.buildmlearn.toolkit.model.Template;
import org.buildmlearn.toolkit.model.TemplateInterface;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import java.util.ArrayList;
/**
* @brief Learn Spelling template code implementing methods of TemplateInterface
* <p/>
* Created by abhishek on 16/06/15 at 9:59 PM.
*/
public class LearnSpellingTemplate implements TemplateInterface {
transient private LearnSpellingAdapter adapter;
private ArrayList<LearnSpellingModel> mLearnSpellingData;
private int templateId;
public LearnSpellingTemplate() {
mLearnSpellingData = new ArrayList<>();
}
private static boolean validated(Context context, EditText word, EditText meaning) {
if (word == null || meaning == null) {
return false;
}
String wordText = word.getText().toString().trim();
String meaningText = meaning.getText().toString().trim();
if ("".equals(wordText)) {
word.setError(context.getString(R.string.enter_word));
return false;
} else if ("".equals(meaningText)) {
meaning.setError(context.getString(R.string.enter_meaning));
return false;
}
return true;
}
@Override
public BaseAdapter newTemplateEditorAdapter(Context context) {
adapter = new LearnSpellingAdapter(context, mLearnSpellingData);
setEmptyView((Activity) context);
return adapter;
}
@Override
public BaseAdapter newMetaEditorAdapter(Context context) {
return null;
}
@Override
public BaseAdapter currentTemplateEditorAdapter() {
return adapter;
}
@Override
public BaseAdapter currentMetaEditorAdapter() {
return null;
}
@Override
public BaseAdapter loadProjectMetaEditor(Context context, Document doc) {
return null;
}
@Override
public BaseAdapter loadProjectTemplateEditor(Context context, ArrayList<Element> data) {
mLearnSpellingData = new ArrayList<>();
for (Element item : data) {
String infoObject = item.getElementsByTagName("word").item(0).getTextContent();
String infoDescription = item.getElementsByTagName("meaning").item(0).getTextContent();
mLearnSpellingData.add(new LearnSpellingModel(infoObject, infoDescription));
}
adapter = new LearnSpellingAdapter(context, mLearnSpellingData);
setEmptyView((Activity) context);
return adapter;
}
@Override
public void setTemplateId(int templateId) {
this.templateId = templateId;
}
@Override
public String getTitle() {
return "Learn Spelling Template";
}
@Override
public void addItem(final Activity activity) {
LayoutInflater inflater = activity.getLayoutInflater();
View dialogView = inflater.inflate(R.layout.info_dialog_add_edit_data, null);
final AlertDialog dialog = new AlertDialog.Builder(activity)
.setTitle(R.string.info_add_new_title)
.setView(dialogView,
activity.getResources().getDimensionPixelSize(R.dimen.spacing_left),
activity.getResources().getDimensionPixelSize(R.dimen.spacing_top),
activity.getResources().getDimensionPixelSize(R.dimen.spacing_right),
activity.getResources().getDimensionPixelSize(R.dimen.spacing_bottom))
.setPositiveButton(R.string.info_template_add, null)
.setNegativeButton(R.string.info_template_cancel, null)
.create();
dialog.show();
final EditText word = (EditText) dialogView.findViewById(R.id.info_word);
final EditText meaning = (EditText) dialogView.findViewById(R.id.info_meaning);
dialog.getButton(DialogInterface.BUTTON_POSITIVE).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
if (validated(activity, word, meaning)) {
String wordText = word.getText().toString().trim();
String meaningText = meaning.getText().toString().trim();
LearnSpellingModel temp = new LearnSpellingModel(wordText, meaningText);
mLearnSpellingData.add(temp);
adapter.notifyDataSetChanged();
setEmptyView(activity);
dialog.dismiss();
}
}
});
}
@Override
public void addMetaData(Activity activity) {
// This is intentionally empty
}
@Override
public void editItem(final Activity activity, int position) {
LayoutInflater inflater = activity.getLayoutInflater();
View dialogView = inflater.inflate(R.layout.info_dialog_add_edit_data, null);
final AlertDialog dialog = new AlertDialog.Builder(activity)
.setTitle(R.string.info_edit_title)
.setView(dialogView,
activity.getResources().getDimensionPixelSize(R.dimen.spacing_left),
activity.getResources().getDimensionPixelSize(R.dimen.spacing_top),
activity.getResources().getDimensionPixelSize(R.dimen.spacing_right),
activity.getResources().getDimensionPixelSize(R.dimen.spacing_bottom))
.setPositiveButton(R.string.info_template_ok, null)
.setNegativeButton(R.string.info_template_cancel, null)
.create();
dialog.show();
final LearnSpellingModel data = mLearnSpellingData.get(position);
final EditText word = (EditText) dialogView.findViewById(R.id.info_word);
final EditText meaning = (EditText) dialogView.findViewById(R.id.info_meaning);
word.setText(data.getWord().trim());
meaning.setText(data.getMeaning().trim());
dialog.getButton(DialogInterface.BUTTON_POSITIVE).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
if (validated(activity, word, meaning)) {
String wordText = word.getText().toString().trim();
String meaningText = meaning.getText().toString().trim();
data.setWord(wordText);
data.setMeaning(meaningText);
adapter.notifyDataSetChanged();
dialog.dismiss();
}
}
});
}
@Override
public Object deleteItem(Activity activity, int position) {
LearnSpellingModel learnSpellingModel = mLearnSpellingData.get(position);
mLearnSpellingData.remove(position);
setEmptyView(activity);
adapter.notifyDataSetChanged();
return learnSpellingModel;
}
@Override
public void restoreItem(Activity activity, int position, Object object) {
if (object instanceof LearnSpellingModel)
{
LearnSpellingModel learnSpellingModel = (LearnSpellingModel)object;
if (learnSpellingModel!=null)
{
mLearnSpellingData.add(position,learnSpellingModel);
adapter.notifyDataSetChanged();
}
}
}
@Override
public ArrayList<Element> getItems(Document doc) {
ArrayList<Element> itemElements = new ArrayList<>();
for (LearnSpellingModel data : mLearnSpellingData) {
itemElements.add(data.getXml(doc));
}
return itemElements;
}
@Override
public android.support.v4.app.Fragment getSimulatorFragment(String filePathWithName) {
return SplashFragment.newInstance(filePathWithName);
}
@Override
public String getAssetsFileName(Context context) {
Template[] templates = Template.values();
return context.getString(templates[templateId].getAssetsName());
}
@Override
public String getAssetsFilePath() {
return "assets/";
}
@Override
public String getApkFilePath() {
return "LearnSpellingsApp.apk";
}
@Override
public void onActivityResult(Context context, int requestCode, int resultCode, Intent intent) {
// This is intentionally empty
}
/**
* @brief Toggles the visibility of empty text if Array has zero elements
*/
private void setEmptyView(Activity activity) {
if (mLearnSpellingData.size() < 1) {
activity.findViewById(R.id.empty).setVisibility(View.VISIBLE);
} else {
activity.findViewById(R.id.empty).setVisibility(View.GONE);
}
}
}
|
BuildmLearn/BuildmLearn-Toolkit-Android
|
source-code/app/src/main/java/org/buildmlearn/toolkit/templates/LearnSpellingTemplate.java
|
Java
|
bsd-3-clause
| 9,152
|
import re
from decimal import Decimal
from django.conf import settings
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.util import SpatialOperation, SpatialFunction
from django.contrib.gis.db.backends.postgis.adapter import PostGISAdapter
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.postgresql_psycopg2.base import DatabaseOperations
from django.db.utils import DatabaseError
from django.utils import six
#### Classes used in constructing PostGIS spatial SQL ####
class PostGISOperator(SpatialOperation):
"For PostGIS operators (e.g. `&&`, `~`)."
def __init__(self, operator):
super(PostGISOperator, self).__init__(operator=operator)
class PostGISFunction(SpatialFunction):
"For PostGIS function calls (e.g., `ST_Contains(table, geom)`)."
def __init__(self, prefix, function, **kwargs):
super(PostGISFunction, self).__init__(prefix + function, **kwargs)
class PostGISFunctionParam(PostGISFunction):
"For PostGIS functions that take another parameter (e.g. DWithin, Relate)."
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s)'
class PostGISDistance(PostGISFunction):
"For PostGIS distance operations."
dist_func = 'Distance'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s) %(operator)s %%s'
def __init__(self, prefix, operator):
super(PostGISDistance, self).__init__(prefix, self.dist_func,
operator=operator)
class PostGISSpheroidDistance(PostGISFunction):
"For PostGIS spherical distance operations (using the spheroid)."
dist_func = 'distance_spheroid'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s) %(operator)s %%s'
def __init__(self, prefix, operator):
# An extra parameter in `end_subst` is needed for the spheroid string.
super(PostGISSpheroidDistance, self).__init__(prefix, self.dist_func,
operator=operator)
class PostGISSphereDistance(PostGISDistance):
"For PostGIS spherical distance operations."
dist_func = 'distance_sphere'
class PostGISRelate(PostGISFunctionParam):
"For PostGIS Relate(<geom>, <pattern>) calls."
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def __init__(self, prefix, pattern):
if not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
super(PostGISRelate, self).__init__(prefix, 'Relate')
class PostGISOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.models.sql.compiler'
name = 'postgis'
postgis = True
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
valid_aggregates = dict([(k, None) for k in
('Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union')])
Adapter = PostGISAdapter
Adaptor = Adapter # Backwards-compatibility alias.
def __init__(self, connection):
super(PostGISOperations, self).__init__(connection)
# Trying to get the PostGIS version because the function
# signatures will depend on the version used. The cost
# here is a database query to determine the version, which
# can be mitigated by setting `POSTGIS_VERSION` with a 3-tuple
# comprising user-supplied values for the major, minor, and
# subminor revision of PostGIS.
try:
if hasattr(settings, 'POSTGIS_VERSION'):
vtup = settings.POSTGIS_VERSION
if len(vtup) == 3:
# The user-supplied PostGIS version.
version = vtup
else:
# This was the old documented way, but it's stupid to
# include the string.
version = vtup[1:4]
else:
vtup = self.postgis_version_tuple()
version = vtup[1:]
# Getting the prefix -- even though we don't officially support
# PostGIS 1.2 anymore, keeping it anyway in case a prefix change
# for something else is necessary.
if version >= (1, 2, 2):
prefix = 'ST_'
else:
prefix = ''
self.geom_func_prefix = prefix
self.spatial_version = version
except DatabaseError:
raise ImproperlyConfigured(
'Cannot determine PostGIS version for database "%s". '
'GeoDjango requires at least PostGIS version 1.3. '
'Was the database created from a spatial database '
'template?' % self.connection.settings_dict['NAME']
)
# TODO: Raise helpful exceptions as they become known.
# PostGIS-specific operators. The commented descriptions of these
# operators come from Section 7.6 of the PostGIS 1.4 documentation.
self.geometry_operators = {
# The "&<" operator returns true if A's bounding box overlaps or
# is to the left of B's bounding box.
'overlaps_left' : PostGISOperator('&<'),
# The "&>" operator returns true if A's bounding box overlaps or
# is to the right of B's bounding box.
'overlaps_right' : PostGISOperator('&>'),
# The "<<" operator returns true if A's bounding box is strictly
# to the left of B's bounding box.
'left' : PostGISOperator('<<'),
# The ">>" operator returns true if A's bounding box is strictly
# to the right of B's bounding box.
'right' : PostGISOperator('>>'),
# The "&<|" operator returns true if A's bounding box overlaps or
# is below B's bounding box.
'overlaps_below' : PostGISOperator('&<|'),
# The "|&>" operator returns true if A's bounding box overlaps or
# is above B's bounding box.
'overlaps_above' : PostGISOperator('|&>'),
# The "<<|" operator returns true if A's bounding box is strictly
# below B's bounding box.
'strictly_below' : PostGISOperator('<<|'),
# The "|>>" operator returns true if A's bounding box is strictly
# above B's bounding box.
'strictly_above' : PostGISOperator('|>>'),
# The "~=" operator is the "same as" operator. It tests actual
# geometric equality of two features. So if A and B are the same feature,
# vertex-by-vertex, the operator returns true.
'same_as' : PostGISOperator('~='),
'exact' : PostGISOperator('~='),
# The "@" operator returns true if A's bounding box is completely contained
# by B's bounding box.
'contained' : PostGISOperator('@'),
# The "~" operator returns true if A's bounding box completely contains
# by B's bounding box.
'bbcontains' : PostGISOperator('~'),
# The "&&" operator returns true if A's bounding box overlaps
# B's bounding box.
'bboverlaps' : PostGISOperator('&&'),
}
self.geometry_functions = {
'equals' : PostGISFunction(prefix, 'Equals'),
'disjoint' : PostGISFunction(prefix, 'Disjoint'),
'touches' : PostGISFunction(prefix, 'Touches'),
'crosses' : PostGISFunction(prefix, 'Crosses'),
'within' : PostGISFunction(prefix, 'Within'),
'overlaps' : PostGISFunction(prefix, 'Overlaps'),
'contains' : PostGISFunction(prefix, 'Contains'),
'intersects' : PostGISFunction(prefix, 'Intersects'),
'relate' : (PostGISRelate, six.string_types),
'coveredby' : PostGISFunction(prefix, 'CoveredBy'),
'covers' : PostGISFunction(prefix, 'Covers'),
}
# Valid distance types and substitutions
dtypes = (Decimal, Distance, float) + six.integer_types
def get_dist_ops(operator):
"Returns operations for both regular and spherical distances."
return {'cartesian' : PostGISDistance(prefix, operator),
'sphere' : PostGISSphereDistance(prefix, operator),
'spheroid' : PostGISSpheroidDistance(prefix, operator),
}
self.distance_functions = {
'distance_gt' : (get_dist_ops('>'), dtypes),
'distance_gte' : (get_dist_ops('>='), dtypes),
'distance_lt' : (get_dist_ops('<'), dtypes),
'distance_lte' : (get_dist_ops('<='), dtypes),
'dwithin' : (PostGISFunctionParam(prefix, 'DWithin'), dtypes)
}
# Adding the distance functions to the geometries lookup.
self.geometry_functions.update(self.distance_functions)
# Only PostGIS versions 1.3.4+ have GeoJSON serialization support.
if version < (1, 3, 4):
GEOJSON = False
else:
GEOJSON = prefix + 'AsGeoJson'
# ST_ContainsProperly ST_MakeLine, and ST_GeoHash added in 1.4.
if version >= (1, 4, 0):
GEOHASH = 'ST_GeoHash'
BOUNDINGCIRCLE = 'ST_MinimumBoundingCircle'
self.geometry_functions['contains_properly'] = PostGISFunction(prefix, 'ContainsProperly')
else:
GEOHASH, BOUNDINGCIRCLE = False, False
# Geography type support added in 1.5.
if version >= (1, 5, 0):
self.geography = True
# Only a subset of the operators and functions are available
# for the geography type.
self.geography_functions = self.distance_functions.copy()
self.geography_functions.update({
'coveredby': self.geometry_functions['coveredby'],
'covers': self.geometry_functions['covers'],
'intersects': self.geometry_functions['intersects'],
})
self.geography_operators = {
'bboverlaps': PostGISOperator('&&'),
}
# Native geometry type support added in PostGIS 2.0.
if version >= (2, 0, 0):
self.geometry = True
# Creating a dictionary lookup of all GIS terms for PostGIS.
self.gis_terms = set(['isnull'])
self.gis_terms.update(self.geometry_operators)
self.gis_terms.update(self.geometry_functions)
self.area = prefix + 'Area'
self.bounding_circle = BOUNDINGCIRCLE
self.centroid = prefix + 'Centroid'
self.collect = prefix + 'Collect'
self.difference = prefix + 'Difference'
self.distance = prefix + 'Distance'
self.distance_sphere = prefix + 'distance_sphere'
self.distance_spheroid = prefix + 'distance_spheroid'
self.envelope = prefix + 'Envelope'
self.extent = prefix + 'Extent'
self.force_rhr = prefix + 'ForceRHR'
self.geohash = GEOHASH
self.geojson = GEOJSON
self.gml = prefix + 'AsGML'
self.intersection = prefix + 'Intersection'
self.kml = prefix + 'AsKML'
self.length = prefix + 'Length'
self.length_spheroid = prefix + 'length_spheroid'
self.makeline = prefix + 'MakeLine'
self.mem_size = prefix + 'mem_size'
self.num_geom = prefix + 'NumGeometries'
self.num_points = prefix + 'npoints'
self.perimeter = prefix + 'Perimeter'
self.point_on_surface = prefix + 'PointOnSurface'
self.polygonize = prefix + 'Polygonize'
self.reverse = prefix + 'Reverse'
self.scale = prefix + 'Scale'
self.snap_to_grid = prefix + 'SnapToGrid'
self.svg = prefix + 'AsSVG'
self.sym_difference = prefix + 'SymDifference'
self.transform = prefix + 'Transform'
self.translate = prefix + 'Translate'
self.union = prefix + 'Union'
self.unionagg = prefix + 'Union'
if version >= (2, 0, 0):
self.extent3d = prefix + '3DExtent'
self.length3d = prefix + '3DLength'
self.perimeter3d = prefix + '3DPerimeter'
else:
self.extent3d = prefix + 'Extent3D'
self.length3d = prefix + 'Length3D'
self.perimeter3d = prefix + 'Perimeter3D'
def check_aggregate_support(self, aggregate):
"""
Checks if the given aggregate name is supported (that is, if it's
in `self.valid_aggregates`).
"""
agg_name = aggregate.__class__.__name__
return agg_name in self.valid_aggregates
def convert_extent(self, box):
"""
Returns a 4-tuple extent for the `Extent` aggregate by converting
the bounding box text returned by PostGIS (`box` argument), for
example: "BOX(-90.0 30.0, -85.0 40.0)".
"""
ll, ur = box[4:-1].split(',')
xmin, ymin = map(float, ll.split())
xmax, ymax = map(float, ur.split())
return (xmin, ymin, xmax, ymax)
def convert_extent3d(self, box3d):
"""
Returns a 6-tuple extent for the `Extent3D` aggregate by converting
the 3d bounding-box text returnded by PostGIS (`box3d` argument), for
example: "BOX3D(-90.0 30.0 1, -85.0 40.0 2)".
"""
ll, ur = box3d[6:-1].split(',')
xmin, ymin, zmin = map(float, ll.split())
xmax, ymax, zmax = map(float, ur.split())
return (xmin, ymin, zmin, xmax, ymax, zmax)
def convert_geom(self, hex, geo_field):
"""
Converts the geometry returned from PostGIS aggretates.
"""
if hex:
return Geometry(hex)
else:
return None
def geo_db_type(self, f):
"""
Return the database field type for the given geometry field.
Typically this is `None` because geometry columns are added via
the `AddGeometryColumn` stored procedure, unless the field
has been specified to be of geography type instead.
"""
if f.geography:
if not self.geography:
raise NotImplementedError('PostGIS 1.5 required for geography column support.')
if f.srid != 4326:
raise NotImplementedError('PostGIS 1.5 supports geography columns '
'only with an SRID of 4326.')
return 'geography(%s,%d)' % (f.geom_type, f.srid)
elif self.geometry:
# Postgis 2.0 supports type-based geometries.
# TODO: Support 'M' extension.
if f.dim == 3:
geom_type = f.geom_type + 'Z'
else:
geom_type = f.geom_type
return 'geometry(%s,%d)' % (geom_type, f.srid)
else:
return None
def get_distance(self, f, dist_val, lookup_type):
"""
Retrieve the distance parameters for the given geometry field,
distance lookup value, and the distance lookup type.
This is the most complex implementation of the spatial backends due to
what is supported on geodetic geometry columns vs. what's available on
projected geometry columns. In addition, it has to take into account
the newly introduced geography column type introudced in PostGIS 1.5.
"""
# Getting the distance parameter and any options.
if len(dist_val) == 1:
value, option = dist_val[0], None
else:
value, option = dist_val
# Shorthand boolean flags.
geodetic = f.geodetic(self.connection)
geography = f.geography and self.geography
if isinstance(value, Distance):
if geography:
dist_param = value.m
elif geodetic:
if lookup_type == 'dwithin':
raise ValueError('Only numeric values of degree units are '
'allowed on geographic DWithin queries.')
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
# Assuming the distance is in the units of the field.
dist_param = value
if (not geography and geodetic and lookup_type != 'dwithin'
and option == 'spheroid'):
# using distance_spheroid requires the spheroid of the field as
# a parameter.
return [f._spheroid, dist_param]
else:
return [dist_param]
def get_geom_placeholder(self, f, value):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
ST_Transform() function call.
"""
if value is None or value.srid == f.srid:
placeholder = '%s'
else:
# Adding Transform() to the SQL placeholder.
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
if hasattr(value, 'expression'):
# If this is an F expression, then we don't really want
# a placeholder and instead substitute in the column
# of the expression.
placeholder = placeholder % self.get_expression_column(value)
return placeholder
def _get_postgis_func(self, func):
"""
Helper routine for calling PostGIS functions and returning their result.
"""
# Close out the connection. See #9437.
with self.connection.temporary_connection() as cursor:
cursor.execute('SELECT %s()' % func)
return cursor.fetchone()[0]
def postgis_geos_version(self):
"Returns the version of the GEOS library used with PostGIS."
return self._get_postgis_func('postgis_geos_version')
def postgis_lib_version(self):
"Returns the version number of the PostGIS library used with PostgreSQL."
return self._get_postgis_func('postgis_lib_version')
def postgis_proj_version(self):
"Returns the version of the PROJ.4 library used with PostGIS."
return self._get_postgis_func('postgis_proj_version')
def postgis_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_version')
def postgis_full_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_full_version')
def postgis_version_tuple(self):
"""
Returns the PostGIS version as a tuple (version string, major,
minor, subminor).
"""
# Getting the PostGIS version
version = self.postgis_lib_version()
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse PostGIS version string: %s' % version)
return (version, major, minor1, minor2)
def proj_version_tuple(self):
"""
Return the version of PROJ.4 used by PostGIS as a tuple of the
major, minor, and subminor release numbers.
"""
proj_regex = re.compile(r'(\d+)\.(\d+)\.(\d+)')
proj_ver_str = self.postgis_proj_version()
m = proj_regex.search(proj_ver_str)
if m:
return tuple(map(int, [m.group(1), m.group(2), m.group(3)]))
else:
raise Exception('Could not determine PROJ.4 version from PostGIS.')
def num_params(self, lookup_type, num_param):
"""
Helper routine that returns a boolean indicating whether the number of
parameters is correct for the lookup type.
"""
def exactly_two(np): return np == 2
def two_to_three(np): return np >= 2 and np <=3
if (lookup_type in self.distance_functions and
lookup_type != 'dwithin'):
return two_to_three(num_param)
else:
return exactly_two(num_param)
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
"""
Constructs spatial SQL from the given lookup value tuple a
(alias, col, db_type), the lookup type string, lookup value, and
the geometry field.
"""
alias, col, db_type = lvalue
# Getting the quoted geometry column.
geo_col = '%s.%s' % (qn(alias), qn(col))
if lookup_type in self.geometry_operators:
if field.geography and not lookup_type in self.geography_operators:
raise ValueError('PostGIS geography does not support the '
'"%s" lookup.' % lookup_type)
# Handling a PostGIS operator.
op = self.geometry_operators[lookup_type]
return op.as_sql(geo_col, self.get_geom_placeholder(field, value))
elif lookup_type in self.geometry_functions:
if field.geography and not lookup_type in self.geography_functions:
raise ValueError('PostGIS geography type does not support the '
'"%s" lookup.' % lookup_type)
# See if a PostGIS geometry function matches the lookup type.
tmp = self.geometry_functions[lookup_type]
# Lookup types that are tuples take tuple arguments, e.g., 'relate' and
# distance lookups.
if isinstance(tmp, tuple):
# First element of tuple is the PostGISOperation instance, and the
# second element is either the type or a tuple of acceptable types
# that may passed in as further parameters for the lookup type.
op, arg_type = tmp
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, (tuple, list)):
raise ValueError('Tuple required for `%s` lookup type.' % lookup_type)
# Geometry is first element of lookup tuple.
geom = value[0]
# Number of valid tuple parameters depends on the lookup type.
nparams = len(value)
if not self.num_params(lookup_type, nparams):
raise ValueError('Incorrect number of parameters given for `%s` lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise ValueError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
# For lookup type `relate`, the op instance is not yet created (has
# to be instantiated here to check the pattern parameter).
if lookup_type == 'relate':
op = op(self.geom_func_prefix, value[1])
elif lookup_type in self.distance_functions and lookup_type != 'dwithin':
if not field.geography and field.geodetic(self.connection):
# Geodetic distances are only available from Points to
# PointFields on PostGIS 1.4 and below.
if not self.connection.ops.geography:
if field.geom_type != 'POINT':
raise ValueError('PostGIS spherical operations are only valid on PointFields.')
if str(geom.geom_type) != 'Point':
raise ValueError('PostGIS geometry distance parameter is required to be of type Point.')
# Setting up the geodetic operation appropriately.
if nparams == 3 and value[2] == 'spheroid':
op = op['spheroid']
else:
op = op['sphere']
else:
op = op['cartesian']
else:
op = tmp
geom = value
# Calling the `as_sql` function on the operation instance.
return op.as_sql(geo_col, self.get_geom_placeholder(field, geom))
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, ('' if value else 'NOT ')), []
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
def spatial_aggregate_sql(self, agg):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = agg.__class__.__name__
if not self.check_aggregate_support(agg):
raise NotImplementedError('%s spatial aggregate is not implmented for this backend.' % agg_name)
agg_name = agg_name.lower()
if agg_name == 'union':
agg_name += 'agg'
sql_template = '%(function)s(%(field)s)'
sql_function = getattr(self, agg_name)
return sql_template, sql_function
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.postgis.models import GeometryColumns
return GeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.postgis.models import SpatialRefSys
return SpatialRefSys
|
postrational/django
|
django/contrib/gis/db/backends/postgis/operations.py
|
Python
|
bsd-3-clause
| 25,538
|
/**
* Copyright (c) 2012, Regents of the University of Colorado
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* For a complete copy of the license please see the file LICENSE distributed
* with the cleartk-syntax-berkeley project or visit
* http://www.gnu.org/licenses/old-licenses/gpl-2.0.html.
*/
package org.cleartk.ml.weka;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.NotImplementedException;
import org.cleartk.ml.CleartkProcessingException;
import org.cleartk.ml.Feature;
import org.cleartk.ml.encoder.features.FeaturesEncoder;
import org.cleartk.ml.encoder.outcome.OutcomeEncoder;
import org.cleartk.ml.jar.Classifier_ImplBase;
import weka.core.Instance;
import com.google.common.annotations.Beta;
/**
* Copyright (c) 2012, Regents of the University of Colorado <br>
* All rights reserved.
* @author Philip Ogren
*
*/
@Beta
public abstract class WekaStringOutcomeClassifier extends Classifier_ImplBase<Instance, String, String> {
//TODO need to add the Weka model as a parameter
public WekaStringOutcomeClassifier( FeaturesEncoder<Instance> featuresEncoder,
OutcomeEncoder<String, String> outcomeEncoder) throws Exception {
super(featuresEncoder, outcomeEncoder);
}
//TODO no implementation of classify method
public String classify(List<Feature> features) throws UnsupportedOperationException {
throw new NotImplementedException();
}
//TODO no implementation of the score method
@Override
public Map<String, Double> score(List<Feature> features) throws CleartkProcessingException {
throw new NotImplementedException();
}
}
|
ClearTK/cleartk
|
cleartk-ml-weka/src/main/java/org/cleartk/ml/weka/WekaStringOutcomeClassifier.java
|
Java
|
bsd-3-clause
| 2,118
|
package db
import (
"hg/messages"
"fmt"
"gopkg.in/mgo.v2"
)
const (
mongoURL = "127.0.0.1:27017"
)
func persist(operation string,historyEntry messages.HistoryMessage) {
fmt.Println("Do db operation")
session, err := mgo.Dial(mongoURL)
if err != nil {
panic(err)
}
defer session.Close()
historyCollection := session.DB("historys").C("history")
switch operation {
case "Insert":
err = historyCollection.Insert(historyEntry)
if err != nil {
fmt.Printf("Can't insert document: %v\n", err)
panic(err)
}
case "Update":
fmt.Println("Update method is not supported yet!")
case "Delete":
fmt.Println("Delete method is not supported yet!")
case "Put":
fmt.Println("Put method is not supported yet!")
}
}
func RecordHistory(historyEntry messages.HistoryMessage){
fmt.Println("Start to record history object")
persist("Insert",historyEntry)
}
|
MikeroSkywalker/auditgo
|
db/db.go
|
GO
|
bsd-3-clause
| 875
|
#!/bin/bash
$PYTHON setup.py install --prefix=$PREFIX
# Add more build steps here, if they are necessary.
# See
# http://docs.continuum.io/conda/build.html
# for a list of environment variables that are set during the build process.
|
hajs/pylodger
|
recipes/misc/python-inotify/build.sh
|
Shell
|
bsd-3-clause
| 236
|
# -*- coding: utf-8 -*-
import os
import unittest
from manolo_scraper.spiders.mincu import MincuSpider
from utils import fake_response_from_file
class TestMincuSpider(unittest.TestCase):
def setUp(self):
self.spider = MincuSpider()
def test_parse_item(self):
filename = os.path.join('data/mincu', '18-08-2015.html')
items = self.spider.parse(fake_response_from_file(filename, meta={'date': u'18/08/2015'}))
item = next(items)
self.assertEqual(item.get('full_name'), u'INGRID BARRIONUEVO ECHEGARAY')
self.assertEqual(item.get('time_start'), u'16:40')
self.assertEqual(item.get('institution'), u'mincu')
self.assertEqual(item.get('id_document'), u'DNI')
self.assertEqual(item.get('id_number'), u'10085172')
self.assertEqual(item.get('entity'), u'PARTICULAR')
self.assertEqual(item.get('reason'), u'REUNIÓN DE TRABAJO')
self.assertEqual(item.get('host_name'), u'JOIZ ELIZABETH DOBLADILLO ORTIZ')
self.assertEqual(item.get('title'), u'[SERVICIOS DE UN ASISTENTE EN COMUNICACIONES]')
self.assertEqual(item.get('office'), u'QHAPAQ ÑAN')
self.assertEqual(item.get('time_end'), u'16:53')
self.assertEqual(item.get('date'), u'2015-08-18')
number_of_items = 1 + sum(1 for x in items)
self.assertEqual(number_of_items, 15)
|
aniversarioperu/django-manolo
|
scrapers/tests/test_mincu_spider.py
|
Python
|
bsd-3-clause
| 1,379
|
from flask import Flask
from flask.ext.restful import reqparse, abort, Api, Resource
app = Flask(__name__)
api = Api(app)
TODOS = {
'todo1': {'task': 'build an API'},
'todo2': {'task': '?????'},
'todo3': {'task': 'profit!'},
}
def abort_if_todo_doesnt_exist(todo_id):
if todo_id not in TODOS:
abort(404, message="Todo {} doesn't exist".format(todo_id))
parser = reqparse.RequestParser()
parser.add_argument('task', type=str)
# Todo
# show a single todo item and lets you delete them
class Todo(Resource):
def get(self, todo_id):
abort_if_todo_doesnt_exist(todo_id)
return TODOS[todo_id]
def delete(self, todo_id):
abort_if_todo_doesnt_exist(todo_id)
del TODOS[todo_id]
return '', 204
def put(self, todo_id):
args = parser.parse_args()
task = {'task': args['task']}
TODOS[todo_id] = task
return task, 201
# TodoList
# shows a list of all todos, and lets you POST to add new tasks
class TodoList(Resource):
def get(self):
return TODOS
def post(self):
args = parser.parse_args()
todo_id = 'todo%d' % (len(TODOS) + 1)
TODOS[todo_id] = {'task': args['task']}
return TODOS[todo_id], 201
##
## Actually setup the Api resource routing here
##
api.add_resource(TodoList, '/todos')
api.add_resource(Todo, '/todos/<string:todo_id>')
if __name__ == '__main__':
app.run(debug=True)
|
CanalTP/flask-restful
|
examples/todo.py
|
Python
|
bsd-3-clause
| 1,446
|
A simple service that can be used for testing per-user services.
|
endlessm/chromium-browser
|
services/test/user_id/README.md
|
Markdown
|
bsd-3-clause
| 65
|
from media_tree.contrib.cms_plugins.media_tree_image.models import MediaTreeImage
from media_tree.contrib.cms_plugins.helpers import PluginLink
from media_tree.models import FileNode
from media_tree.contrib.views.detail.image import ImageNodeDetailView
from django.utils.translation import ugettext_lazy as _
from cms.utils.page_resolver import get_page_from_path
from django.http import Http404
class ImagePluginDetailView(ImageNodeDetailView):
return_url = None
def get_object(self, *args, **kwargs):
obj = super(ImagePluginDetailView, self).get_object(*args, **kwargs)
if obj:
allowed = False
# validate that the object is actually published using the plugin...
for plugin in MediaTreeImage.objects.filter(node=obj):
# ...and on a publicly accessible page.
# TODO: Iterating all plugins and getting each page
# is a bit inefficient.
page = get_page_from_path(plugin.page.get_path())
if page:
allowed = True
break
if not allowed:
raise Http404
return obj
def get_context_data(self, *args, **kwargs):
context_data = super(ImagePluginDetailView, self).get_context_data(
*args, **kwargs)
if self.return_url:
page = get_page_from_path(self.return_url.strip('/'))
if page:
context_data.update({
'link': PluginLink(url=page.get_absolute_url(),
text=_('Back to %s') % page.get_title())
})
return context_data
def get(self, request, *args, **kwargs):
self.return_url = request.GET.get('return_url', None)
return super(ImagePluginDetailView, self).get(request, *args, **kwargs)
|
bittner/django-media-tree
|
media_tree/contrib/cms_plugins/media_tree_image/views.py
|
Python
|
bsd-3-clause
| 1,854
|
/*
* ====================================================================
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*
*/
package local.org.apache.http.impl.auth;
import java.util.Locale;
import org.apache.http.annotation.NotThreadSafe;
import org.apache.http.FormattedHeader;
import org.apache.http.Header;
import org.apache.http.HttpRequest;
import org.apache.http.auth.AUTH;
import org.apache.http.auth.AuthenticationException;
import org.apache.http.auth.ChallengeState;
import org.apache.http.auth.ContextAwareAuthScheme;
import org.apache.http.auth.Credentials;
import org.apache.http.auth.MalformedChallengeException;
import org.apache.http.protocol.HTTP;
import org.apache.http.protocol.HttpContext;
import org.apache.http.util.CharArrayBuffer;
/**
* Abstract authentication scheme class that serves as a basis
* for all authentication schemes supported by HttpClient. This class
* defines the generic way of parsing an authentication challenge. It
* does not make any assumptions regarding the format of the challenge
* nor does it impose any specific way of responding to that challenge.
*
*
* @since 4.0
*/
@NotThreadSafe
public abstract class AuthSchemeBase implements ContextAwareAuthScheme {
private ChallengeState challengeState;
/**
* Creates an instance of <tt>AuthSchemeBase</tt> with the given challenge
* state.
*
* @since 4.2
*/
public AuthSchemeBase(final ChallengeState challengeState) {
super();
this.challengeState = challengeState;
}
public AuthSchemeBase() {
this(null);
}
/**
* Processes the given challenge token. Some authentication schemes
* may involve multiple challenge-response exchanges. Such schemes must be able
* to maintain the state information when dealing with sequential challenges
*
* @param header the challenge header
*
* @throws MalformedChallengeException is thrown if the authentication challenge
* is malformed
*/
public void processChallenge(final Header header) throws MalformedChallengeException {
if (header == null) {
throw new IllegalArgumentException("Header may not be null");
}
String authheader = header.getName();
if (authheader.equalsIgnoreCase(AUTH.WWW_AUTH)) {
this.challengeState = ChallengeState.TARGET;
} else if (authheader.equalsIgnoreCase(AUTH.PROXY_AUTH)) {
this.challengeState = ChallengeState.PROXY;
} else {
throw new MalformedChallengeException("Unexpected header name: " + authheader);
}
CharArrayBuffer buffer;
int pos;
if (header instanceof FormattedHeader) {
buffer = ((FormattedHeader) header).getBuffer();
pos = ((FormattedHeader) header).getValuePos();
} else {
String s = header.getValue();
if (s == null) {
throw new MalformedChallengeException("Header value is null");
}
buffer = new CharArrayBuffer(s.length());
buffer.append(s);
pos = 0;
}
while (pos < buffer.length() && HTTP.isWhitespace(buffer.charAt(pos))) {
pos++;
}
int beginIndex = pos;
while (pos < buffer.length() && !HTTP.isWhitespace(buffer.charAt(pos))) {
pos++;
}
int endIndex = pos;
String s = buffer.substring(beginIndex, endIndex);
if (!s.equalsIgnoreCase(getSchemeName())) {
throw new MalformedChallengeException("Invalid scheme identifier: " + s);
}
parseChallenge(buffer, pos, buffer.length());
}
@SuppressWarnings("deprecation")
public Header authenticate(
final Credentials credentials,
final HttpRequest request,
final HttpContext context) throws AuthenticationException {
return authenticate(credentials, request);
}
protected abstract void parseChallenge(
CharArrayBuffer buffer, int beginIndex, int endIndex) throws MalformedChallengeException;
/**
* Returns <code>true</code> if authenticating against a proxy, <code>false</code>
* otherwise.
*/
public boolean isProxy() {
return this.challengeState != null && this.challengeState == ChallengeState.PROXY;
}
/**
* Returns {@link ChallengeState} value or <code>null</code> if unchallenged.
*
* @since 4.2
*/
public ChallengeState getChallengeState() {
return this.challengeState;
}
@Override
public String toString() {
String name = getSchemeName();
if (name != null) {
return name.toUpperCase(Locale.US);
} else {
return super.toString();
}
}
}
|
Phoenix1708/t2-server-jar-android-0.1
|
t2-server-jar-android-0.1-hyde/src/main/java/local/org/apache/http/impl/auth/AuthSchemeBase.java
|
Java
|
bsd-3-clause
| 5,874
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<!--Converted with LaTeX2HTML 2002-1 (1.68)
original version by: Nikos Drakos, CBLU, University of Leeds
* revised and updated by: Marcus Hennecke, Ross Moore, Herb Swan
* with significant contributions from:
Jens Lippmann, Marek Rouchal, Martin Wilck and others -->
<HTML>
<HEAD>
<TITLE>G3 Circular/Helical Interpolation (Counterclockwise)</TITLE>
<META NAME="description" CONTENT="G3 Circular/Helical Interpolation (Counterclockwise)">
<META NAME="keywords" CONTENT="Handbook">
<META NAME="resource-type" CONTENT="document">
<META NAME="distribution" CONTENT="global">
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
<META NAME="Generator" CONTENT="LaTeX2HTML v2002-1">
<META HTTP-EQUIV="Content-Style-Type" CONTENT="text/css">
<LINK REL="STYLESHEET" HREF="Handbook.css">
<LINK REL="next" HREF="node57.html">
<LINK REL="previous" HREF="node55.html">
<LINK REL="up" HREF="node52.html">
<LINK REL="next" HREF="node57.html">
</HEAD>
<BODY >
<!--Navigation Panel-->
<A NAME="tex2html1177"
HREF="node57.html">
<IMG WIDTH="37" HEIGHT="24" ALIGN="BOTTOM" BORDER="0" ALT="next" SRC="next.gif"></A>
<A NAME="tex2html1171"
HREF="node52.html">
<IMG WIDTH="26" HEIGHT="24" ALIGN="BOTTOM" BORDER="0" ALT="up" SRC="up.gif"></A>
<A NAME="tex2html1165"
HREF="node55.html">
<IMG WIDTH="63" HEIGHT="24" ALIGN="BOTTOM" BORDER="0" ALT="previous" SRC="prev.gif"></A>
<A NAME="tex2html1173"
HREF="node1.html">
<IMG WIDTH="65" HEIGHT="24" ALIGN="BOTTOM" BORDER="0" ALT="contents" SRC="contents.gif"></A>
<A NAME="tex2html1175"
HREF="node140.html">
<IMG WIDTH="43" HEIGHT="24" ALIGN="BOTTOM" BORDER="0" ALT="index" SRC="index.gif"></A>
<BR>
<B> Next:</B> <A NAME="tex2html1178"
HREF="node57.html">G4 Dwell</A>
<B> Up:</B> <A NAME="tex2html1172"
HREF="node52.html">10.7 Basic Motion and</A>
<B> Previous:</B> <A NAME="tex2html1166"
HREF="node55.html">G2 Circular/Helical Interpolation (Clockwise)</A>
<B> <A NAME="tex2html1174"
HREF="node1.html">Contents</A></B>
<B> <A NAME="tex2html1176"
HREF="node140.html">Index</A></B>
<BR>
<BR>
<!--End of Navigation Panel-->
<H2><A NAME="SECTION04174000000000000000">
G3 Circular/Helical Interpolation (Counterclockwise)</A>
</H2>
<P>
<A NAME="2188"></A>G3 is the counterclockwise sibling to G2.
<P>
<BR><HR>
<ADDRESS>
root
2003-05-26
</ADDRESS>
</BODY>
</HTML>
|
parhansson/KMotionX
|
KMotion/Help/GCodeScreen/EMC_Handbook/node56.html
|
HTML
|
bsd-3-clause
| 2,491
|
// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "printing/print_settings.h"
#include "base/atomic_sequence_num.h"
#include "base/logging.h"
#include "printing/units.h"
namespace printing {
// Global SequenceNumber used for generating unique cookie values.
static base::AtomicSequenceNumber cookie_seq(base::LINKER_INITIALIZED);
PrintSettings::PrintSettings()
: min_shrink(1.25),
max_shrink(2.0),
desired_dpi(72),
selection_only(false),
dpi_(0),
landscape_(false) {
}
void PrintSettings::Clear() {
ranges.clear();
min_shrink = 1.25;
max_shrink = 2.;
desired_dpi = 72;
selection_only = false;
printer_name_.clear();
device_name_.clear();
page_setup_pixels_.Clear();
dpi_ = 0;
landscape_ = false;
}
#ifdef WIN32
void PrintSettings::Init(HDC hdc,
const DEVMODE& dev_mode,
const PageRanges& new_ranges,
const std::wstring& new_device_name,
bool print_selection_only) {
DCHECK(hdc);
printer_name_ = dev_mode.dmDeviceName;
device_name_ = new_device_name;
ranges = new_ranges;
landscape_ = dev_mode.dmOrientation == DMORIENT_LANDSCAPE;
selection_only = print_selection_only;
dpi_ = GetDeviceCaps(hdc, LOGPIXELSX);
// No printer device is known to advertise different dpi in X and Y axis; even
// the fax device using the 200x100 dpi setting. It's ought to break so many
// applications that it's not even needed to care about. WebKit doesn't
// support different dpi settings in X and Y axis.
DCHECK_EQ(dpi_, GetDeviceCaps(hdc, LOGPIXELSY));
DCHECK_EQ(GetDeviceCaps(hdc, SCALINGFACTORX), 0);
DCHECK_EQ(GetDeviceCaps(hdc, SCALINGFACTORY), 0);
// Initialize page_setup_pixels_.
gfx::Size physical_size_pixels(GetDeviceCaps(hdc, PHYSICALWIDTH),
GetDeviceCaps(hdc, PHYSICALHEIGHT));
gfx::Rect printable_area_pixels(GetDeviceCaps(hdc, PHYSICALOFFSETX),
GetDeviceCaps(hdc, PHYSICALOFFSETY),
GetDeviceCaps(hdc, HORZRES),
GetDeviceCaps(hdc, VERTRES));
SetPrinterPrintableArea(physical_size_pixels, printable_area_pixels);
}
#endif
void PrintSettings::SetPrinterPrintableArea(
gfx::Size const& physical_size_pixels,
gfx::Rect const& printable_area_pixels) {
int margin_printer_units = ConvertUnit(500, kHundrethsMMPerInch, dpi_);
// Start by setting the user configuration
// Hard-code text_height = 0.5cm = ~1/5 of inch
page_setup_pixels_.Init(physical_size_pixels,
printable_area_pixels,
margin_printer_units);
// Now apply user configured settings.
PageMargins margins;
margins.header = margin_printer_units;
margins.footer = margin_printer_units;
margins.left = margin_printer_units;
margins.top = margin_printer_units;
margins.right = margin_printer_units;
margins.bottom = margin_printer_units;
page_setup_pixels_.SetRequestedMargins(margins);
}
bool PrintSettings::Equals(const PrintSettings& rhs) const {
// Do not test the display device name (printer_name_) for equality since it
// may sometimes be chopped off at 30 chars. As long as device_name is the
// same, that's fine.
return ranges == rhs.ranges &&
min_shrink == rhs.min_shrink &&
max_shrink == rhs.max_shrink &&
desired_dpi == rhs.desired_dpi &&
overlays.Equals(rhs.overlays) &&
device_name_ == rhs.device_name_ &&
page_setup_pixels_.Equals(rhs.page_setup_pixels_) &&
dpi_ == rhs.dpi_ &&
landscape_ == rhs.landscape_;
}
int PrintSettings::NewCookie() {
// A cookie of 0 is used to mark a document as unassigned, count from 1.
return cookie_seq.GetNext() + 1;
}
} // namespace printing
|
kuiche/chromium
|
printing/print_settings.cc
|
C++
|
bsd-3-clause
| 3,963
|
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run all python tests in this directory."""
import sys
import unittest
MODULES = [
'directory_storage_test',
'gsd_storage_test',
'hashing_tools_test',
'local_storage_cache_test',
]
# We use absolute imports for Py3 compatibility.
# This means for imports to resolve when testing we need to add the pynacl
# directory to the module search path.
sys.path.insert(0, './')
suite = unittest.TestLoader().loadTestsFromNames(MODULES)
result = unittest.TextTestRunner(verbosity=2).run(suite)
if result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
|
endlessm/chromium-browser
|
native_client/pynacl/run_pynacl_tests.py
|
Python
|
bsd-3-clause
| 757
|
// Copyright (c) MOSA Project. Licensed under the New BSD License.
using Mosa.DeviceSystem;
namespace Mosa.DeviceDriver.ISA
{
/// <summary>
/// VGA Text Device Driver
/// </summary>
[ISADeviceDriver(AutoLoad = true, BasePort = 0x03B0, PortRange = 0x1F, BaseAddress = 0xB0000, AddressRange = 0x10000, Platforms = PlatformArchitecture.X86AndX64)]
public class VGAText : HardwareDevice, IDevice, ITextDevice
{
#region Definitions
internal struct CRTCommands
{
internal const byte HorizontalTotal = 0x00;
internal const byte HorizontalDisplayEnableEnd = 0x01;
internal const byte CursorStart = 0x0A;
internal const byte CursorEnd = 0x0B;
internal const byte CursorLocationHigh = 0x0E;
internal const byte CursorLocationLow = 0x0F;
internal const byte VerticalDisplayEnableEnd = 0x12;
}
#endregion Definitions
/// <summary>
///
/// </summary>
protected IReadWriteIOPort miscellaneousOutput;
/// <summary>
///
/// </summary>
protected IReadWriteIOPort crtControllerIndex;
/// <summary>
///
/// </summary>
protected IReadWriteIOPort crtControllerData;
/// <summary>
///
/// </summary>
protected IReadWriteIOPort crtControllerIndexColor;
/// <summary>
///
/// </summary>
protected IReadWriteIOPort crtControllerDataColor;
/// <summary>
///
/// </summary>
protected IWriteOnlyIOPort miscellaneousOutputWrite;
/// <summary>
///
/// </summary>
protected IReadWriteIOPort sequencerAddress;
/// <summary>
///
/// </summary>
protected IReadWriteIOPort sequencerData;
/// <summary>
///
/// </summary>
protected IReadWriteIOPort graphicsControllerAddress;
/// <summary>
///
/// </summary>
protected IReadWriteIOPort graphicsControllerData;
/// <summary>
///
/// </summary>
protected IReadWriteIOPort inputStatus1ReadB;
/// <summary>
///
/// </summary>
protected IReadWriteIOPort attributeAddress;
/// <summary>
///
/// </summary>
protected IReadWriteIOPort attributeData;
/// <summary>
///
/// </summary>
protected IMemory memory;
/// <summary>
///
/// </summary>
protected IReadWriteIOPort activeControllerIndex;
/// <summary>
///
/// </summary>
protected IReadWriteIOPort activeControllerData;
/// <summary>
///
/// </summary>
protected bool colorMode = false;
/// <summary>
///
/// </summary>
protected uint offset = 0x8000;
/// <summary>
///
/// </summary>
protected byte width = 80;
/// <summary>
///
/// </summary>
protected byte height = 25;
/// <summary>
///
/// </summary>
protected byte bytePerChar = 2;
/// <summary>
///
/// </summary>
protected TextColor defaultBackground = TextColor.White;
/// <summary>
/// Initializes a new instance of the <see cref="VGAText"/> class.
/// </summary>
public VGAText()
{
}
/// <summary>
/// Setups this hardware device driver
/// </summary>
/// <returns></returns>
public override bool Setup(IHardwareResources hardwareResources)
{
this.hardwareResources = hardwareResources;
base.name = "VGAText";
miscellaneousOutput = base.hardwareResources.GetIOPort(0, 0x1C);
crtControllerIndex = base.hardwareResources.GetIOPort(0, 0x04);
crtControllerData = base.hardwareResources.GetIOPort(0, 0x05);
crtControllerIndexColor = base.hardwareResources.GetIOPort(0, 0x24);
crtControllerDataColor = base.hardwareResources.GetIOPort(0, 0x25);
miscellaneousOutputWrite = base.hardwareResources.GetIOPort(0, 0x12);
sequencerAddress = base.hardwareResources.GetIOPort(0, 0x14);
sequencerData = base.hardwareResources.GetIOPort(0, 0x15);
graphicsControllerAddress = base.hardwareResources.GetIOPort(0, 0x1E);
graphicsControllerData = base.hardwareResources.GetIOPort(0, 0x1F);
inputStatus1ReadB = base.hardwareResources.GetIOPort(0, 0x2A);
attributeAddress = base.hardwareResources.GetIOPort(0, 0x10);
attributeData = base.hardwareResources.GetIOPort(0, 0x11);
memory = base.hardwareResources.GetMemory(0);
return true;
}
/// <summary>
/// Starts this hardware device.
/// </summary>
/// <returns></returns>
public override DeviceDriverStartStatus Start()
{
WriteSettings(VGAText80x25);
colorMode = ((miscellaneousOutput.Read8() & 1) == 1);
if (colorMode)
{
offset = 0x8000;
bytePerChar = 2;
activeControllerIndex = crtControllerIndexColor;
activeControllerData = crtControllerDataColor;
}
else
{
offset = 0x0;
bytePerChar = 1;
activeControllerIndex = crtControllerIndex;
activeControllerData = crtControllerData;
}
width = GetValue(CRTCommands.HorizontalDisplayEnableEnd);
height = GetValue(CRTCommands.VerticalDisplayEnableEnd);
width++;
height = 25;
base.deviceStatus = DeviceStatus.Online;
return DeviceDriverStartStatus.Started;
}
/// <summary>
/// Called when an interrupt is received.
/// </summary>
/// <returns></returns>
public override bool OnInterrupt()
{
return true;
}
/// <summary>
/// Sends the command.
/// </summary>
/// <param name="command">The command.</param>
/// <param name="value">The value.</param>
protected void SendCommand(byte command, byte value)
{
activeControllerIndex.Write8(command);
activeControllerData.Write8(value);
}
/// <summary>
/// Gets the value.
/// </summary>
/// <param name="command">The command.</param>
/// <returns></returns>
protected byte GetValue(byte command)
{
activeControllerIndex.Write8(command);
return activeControllerData.Read8();
}
/// <summary>
/// Sets the size of the cursor.
/// </summary>
/// <param name="start">The start.</param>
/// <param name="end">The end.</param>
protected void SetCursorSize(byte start, byte end)
{
SendCommand(CRTCommands.CursorStart, start);
SendCommand(CRTCommands.CursorEnd, end);
}
/// <summary>
/// Gets the width.
/// </summary>
/// <value></value>
/// <returns></returns>
public byte Width { get { return width; } }
/// <summary>
/// Gets the height.
/// </summary>
/// <value></value>
/// <returns></returns>
public byte Height { get { return height; } }
/// <summary>
/// Writes the char at the position indicated.
/// </summary>
/// <param name="x">The x position.</param>
/// <param name="y">The y position.</param>
/// <param name="c">The character.</param>
/// <param name="foreground">The foreground color.</param>
/// <param name="background">The background color.</param>
public void WriteChar(ushort x, ushort y, char c, TextColor foreground, TextColor background)
{
if (colorMode)
{
uint index = (ushort)(offset + (((y * width) + x) * 2));
memory[index] = (byte)c;
memory[index + 1] = (byte)((byte)foreground | ((byte)background << 4));
}
else
{
uint index = (ushort)(offset + (y * width) + x);
index = index + x;
memory[index] = (byte)c;
}
}
/// <summary>
/// Sets the cursor position.
/// </summary>
/// <param name="x">The x position.</param>
/// <param name="y">The y position.</param>
public void SetCursor(ushort x, ushort y)
{
uint position = (uint)(x + (y * width));
SendCommand(CRTCommands.CursorLocationHigh, (byte)((position >> 8) & 0xFF));
SendCommand(CRTCommands.CursorLocationLow, (byte)(position & 0xFF));
}
/// <summary>
/// Clears the screen.
/// </summary>
public void ClearScreen()
{
uint index = offset;
uint size = (uint)(height * width);
if (bytePerChar == 2)
for (int i = 0; i < size; i++)
{
memory[(uint)(index + (i * 2))] = 0;
memory[(uint)(index + (i * 2) + 1)] = (byte)((byte)defaultBackground << 4);
}
else
for (int i = 0; i < size; i = i + bytePerChar)
{
memory[(uint)(index + i)] = 0;
}
}
/// <summary>
/// Scrolls up.
/// </summary>
public void ScrollUp()
{
uint index = offset;
uint size = (uint)(((height * width) - width) * bytePerChar);
for (uint i = index; i < (index + size); i++)
memory[i] = memory[(uint)(i + (width * bytePerChar))];
index = (uint)(index + ((height - 1) * width * bytePerChar));
for (int i = 0; i < width * 2; i++)
memory[(uint)(index + i)] = 0;
}
/// <summary>
/// Writes the settings.
/// </summary>
/// <param name="settings">The settings.</param>
protected void WriteSettings(byte[] settings)
{
// Write MISCELLANEOUS reg
miscellaneousOutputWrite.Write8(settings[0]);
// Write SEQUENCER regs
for (byte i = 0; i < 5; i++)
{
sequencerAddress.Write8(i);
sequencerData.Write8(settings[1 + i]);
}
// Unlock CRTC registers
crtControllerIndexColor.Write8(0x03);
crtControllerDataColor.Write8((byte)(crtControllerData.Read8() | 0x80));
crtControllerIndexColor.Write8(0x11);
crtControllerDataColor.Write8((byte)(crtControllerData.Read8() & 0x7F));
// Make sure they remain unlocked
settings[0x03] = (byte)(settings[0x03] | 0x80);
settings[0x11] = (byte)(settings[0x11] & 0x7F);
// Write CRTC regs
for (byte i = 0; i < 25; i++)
{
crtControllerIndexColor.Write8(i);
crtControllerDataColor.Write8(settings[6 + i]);
}
// Write GRAPHICS CONTROLLER regs
for (byte i = 0; i < 9; i++)
{
graphicsControllerAddress.Write8(i);
graphicsControllerData.Write8(settings[31 + i]);
}
// Write ATTRIBUTE CONTROLLER regs
for (byte i = 0; i < 21; i++)
{
inputStatus1ReadB.Read8();
attributeAddress.Write8(i);
attributeAddress.Write8(settings[40 + i]); // TODO: Double check
}
// Lock 16-color palette and unblank display */
inputStatus1ReadB.Read8();
attributeAddress.Write8(0x20);
}
#region Modes
private static byte[] VGAText80x25 = new byte[] {
/* MISC */
0x67,
/* SEQ */
0x03, 0x00, 0x03, 0x00, 0x02,
/* CRTC */
0x5F, 0x4F, 0x50, 0x82, 0x55, 0x81, 0xBF, 0x1F,
0x00, 0x4F, 0x0D, 0x0E, 0x00, 0x00, 0x00, 0x50,
0x9C, 0x0E, 0x8F, 0x28, 0x1F, 0x96, 0xB9, 0xA3,
0xFF,
/* GC */
0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x0E, 0x00,
0xFF,
/* AC */
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x14, 0x07,
0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
0x0C, 0x00, 0x0F, 0x08, 0x00
};
#endregion Modes
}
}
|
Kintaro/MOSA-Project
|
Source/Mosa.DeviceDriver/ISA/VGAText.cs
|
C#
|
bsd-3-clause
| 10,270
|
#pragma once
//=====================================================================//
/*! @file
@brief ダイアログ表示 @n
・単独で利用できるシンプルなダイアログクラス。@n
・文字列を表示して、タッチを検出して閉じる事しか出来ない。
@author 平松邦仁 (hira@rvf-rc45.net)
@copyright Copyright (C) 2019, 2020 Kunihito Hiramatsu @n
Released under the MIT license @n
https://github.com/hirakuni45/RX/blob/master/LICENSE
*/
//=====================================================================//
#include "graphics/graphics.hpp"
#include "graphics/color.hpp"
namespace gui {
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
/*!
@brief シンプル・ダイアログ表示クラス
@param[in] RDR レンダークラス
@param[in] TOUCH タッチクラス
*/
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
template <class RDR, class TOUCH>
class simple_dialog {
public:
typedef graphics::def_color DEF_COLOR;
static const int16_t modal_radius = 10; // modal round radius
static const int16_t button_radius = 6; // button round radius
private:
using GLC = typename RDR::glc_type;
RDR& rdr_;
TOUCH& touch_;
public:
//-----------------------------------------------------------------//
/*!
@brief コンストラクター
@param[in] rdr 描画クラス
@param[in] touch タッチクラス
*/
//-----------------------------------------------------------------//
simple_dialog(RDR& rdr, TOUCH& touch) noexcept : rdr_(rdr), touch_(touch)
{ }
//-----------------------------------------------------------------//
/*!
@brief モーダルウィンドウ描画(画面の中心に表示される)
@param[in] size 大きさ
@param[in] text テキスト
*/
//-----------------------------------------------------------------//
void modal(const vtx::spos& size, const char* text) noexcept
{
vtx::spos pos((GLC::width - size.x) / 2, (GLC::height - size.y) / 2);
rdr_.set_fore_color(DEF_COLOR::White);
rdr_.set_back_color(DEF_COLOR::Darkgray);
vtx::srect r(pos, size);
rdr_.round_box(r, modal_radius);
r.org += 2;
r.size -= 2 * 2;
rdr_.swap_color();
rdr_.round_box(r, modal_radius - 2);
auto sz = rdr_.at_font().get_text_size(text);
rdr_.swap_color();
rdr_.draw_text(pos + (size - sz) / 2, text);
}
//-----------------------------------------------------------------//
/*!
@brief 四角ボタンの描画 @n
・背景色は「back_color」が使われる。@n
・フォントの描画色は「fore_color」が利用
@param[in] rect 配置
@param[in] text テキスト
*/
//-----------------------------------------------------------------//
void square_button(const vtx::srect& rect, const char* text) noexcept
{
auto sz = rdr_.at_font().get_text_size(text);
rdr_.swap_color();
rdr_.fill_box(rect);
rdr_.swap_color();
rdr_.draw_text(rect.org + (rect.size - sz) / 2, text);
}
//-----------------------------------------------------------------//
/*!
@brief ラウンドボタンの描画 @n
・背景色は「back_color」が使われる。@n
・フォントの描画色は「fore_color」が利用
@param[in] rect 配置
@param[in] text テキスト
*/
//-----------------------------------------------------------------//
void round_button(const vtx::srect& rect, const char* text) noexcept
{
rdr_.swap_color();
rdr_.round_box(rect, button_radius);
rdr_.swap_color();
auto sz = rdr_.at_font().get_text_size(text);
rdr_.draw_text(rect.org + (rect.size - sz) / 2, text);
}
//-----------------------------------------------------------------//
/*!
@brief アップデート
*/
//-----------------------------------------------------------------//
void update() noexcept
{
}
//-----------------------------------------------------------------//
/*!
@brief タッチパネルの安定待ち
*/
//-----------------------------------------------------------------//
void ready_to_touch()
{
rdr_.sync_frame();
modal(vtx::spos(400, 60),
"Touch panel device wait...\nPlease touch it with some screen.");
uint8_t nnn = 0;
while(1) {
rdr_.sync_frame();
touch_.update();
auto num = touch_.get_touch_num();
if(num == 0) {
++nnn;
if(nnn >= 60) break;
} else {
nnn = 0;
}
}
}
};
}
|
hirakuni45/RX
|
graphics/simple_dialog.hpp
|
C++
|
bsd-3-clause
| 4,490
|
#include <ompl/geometric/PathGeometric.h>
#include "aikido/planner/ompl/BackwardCompatibility.hpp"
#include "aikido/planner/ompl/GeometricStateSpace.hpp"
#include "aikido/planner/ompl/GoalRegion.hpp"
#include "aikido/planner/ompl/StateValidityChecker.hpp"
#include "aikido/trajectory/Interpolated.hpp"
namespace aikido {
namespace planner {
namespace ompl {
//==============================================================================
template <class PlannerType>
trajectory::InterpolatedPtr planOMPL(
const statespace::StateSpace::State* _start,
const statespace::StateSpace::State* _goal,
statespace::ConstStateSpacePtr _stateSpace,
statespace::InterpolatorPtr _interpolator,
distance::DistanceMetricPtr _dmetric,
constraint::SampleablePtr _sampler,
constraint::TestablePtr _validityConstraint,
constraint::TestablePtr _boundsConstraint,
constraint::ProjectablePtr _boundsProjector,
double _maxPlanTime,
double _maxDistanceBtwValidityChecks)
{
// Create a SpaceInformation. This function will ensure state space matching
auto si = getSpaceInformation(
_stateSpace,
_interpolator,
std::move(_dmetric),
std::move(_sampler),
std::move(_validityConstraint),
std::move(_boundsConstraint),
std::move(_boundsProjector),
_maxDistanceBtwValidityChecks);
// Start and states
auto pdef = ompl_make_shared<::ompl::base::ProblemDefinition>(si);
auto sspace
= ompl_static_pointer_cast<GeometricStateSpace>(si->getStateSpace());
auto start = sspace->allocState(_start);
auto goal = sspace->allocState(_goal);
// ProblemDefinition clones states and keeps them internally
pdef->setStartAndGoalStates(start, goal);
sspace->freeState(start);
sspace->freeState(goal);
auto planner = ompl_make_shared<PlannerType>(si);
return planOMPL(
planner,
pdef,
std::move(_stateSpace),
std::move(_interpolator),
_maxPlanTime);
}
//==============================================================================
template <class PlannerType>
trajectory::InterpolatedPtr planOMPL(
const statespace::StateSpace::State* _start,
constraint::TestablePtr _goalTestable,
constraint::SampleablePtr _goalSampler,
statespace::ConstStateSpacePtr _stateSpace,
statespace::InterpolatorPtr _interpolator,
distance::DistanceMetricPtr _dmetric,
constraint::SampleablePtr _sampler,
constraint::TestablePtr _validityConstraint,
constraint::TestablePtr _boundsConstraint,
constraint::ProjectablePtr _boundsProjector,
double _maxPlanTime,
double _maxDistanceBtwValidityChecks)
{
if (_goalTestable == nullptr)
{
throw std::invalid_argument("Testable goal is nullptr.");
}
if (_goalSampler == nullptr)
{
throw std::invalid_argument("Sampleable goal is nullptr.");
}
if (_goalTestable->getStateSpace() != _stateSpace)
{
throw std::invalid_argument("Testable goal does not match StateSpace");
}
if (_goalSampler->getStateSpace() != _stateSpace)
{
throw std::invalid_argument("Sampleable goal does not match StateSpace");
}
auto si = getSpaceInformation(
_stateSpace,
_interpolator,
std::move(_dmetric),
std::move(_sampler),
std::move(_validityConstraint),
std::move(_boundsConstraint),
std::move(_boundsProjector),
_maxDistanceBtwValidityChecks);
// Set the start and goal
auto pdef = ompl_make_shared<::ompl::base::ProblemDefinition>(si);
auto sspace
= ompl_static_pointer_cast<GeometricStateSpace>(si->getStateSpace());
auto start = sspace->allocState(_start);
pdef->addStartState(start); // copies
sspace->freeState(start);
auto goalRegion = ompl_make_shared<GoalRegion>(
si, std::move(_goalTestable), _goalSampler->createSampleGenerator());
pdef->setGoal(goalRegion);
auto planner = ompl_make_shared<PlannerType>(si);
return planOMPL(
planner,
pdef,
std::move(_stateSpace),
std::move(_interpolator),
_maxPlanTime);
}
} // namespace ompl
} // namespace planner
} // namespace aikido
|
personalrobotics/aikido
|
include/aikido/planner/ompl/detail/Planner-impl.hpp
|
C++
|
bsd-3-clause
| 4,104
|
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" /><meta name="generator" content="Docutils 0.17.1: http://docutils.sourceforge.net/" />
<meta name="viewport" content="width=device-width,initial-scale=1">
<meta http-equiv="x-ua-compatible" content="ie=edge">
<meta name="lang:clipboard.copy" content="Copy to clipboard">
<meta name="lang:clipboard.copied" content="Copied to clipboard">
<meta name="lang:search.language" content="en">
<meta name="lang:search.pipeline.stopwords" content="True">
<meta name="lang:search.pipeline.trimmer" content="True">
<meta name="lang:search.result.none" content="No matching documents">
<meta name="lang:search.result.one" content="1 matching document">
<meta name="lang:search.result.other" content="# matching documents">
<meta name="lang:search.tokenizer" content="[\s\-]+">
<link href="https://fonts.gstatic.com/" rel="preconnect" crossorigin>
<link href="https://fonts.googleapis.com/css?family=Roboto+Mono:400,500,700|Roboto:300,400,400i,700&display=fallback" rel="stylesheet">
<style>
body,
input {
font-family: "Roboto", "Helvetica Neue", Helvetica, Arial, sans-serif
}
code,
kbd,
pre {
font-family: "Roboto Mono", "Courier New", Courier, monospace
}
</style>
<link rel="stylesheet" href="../../_static/stylesheets/application.css"/>
<link rel="stylesheet" href="../../_static/stylesheets/application-palette.css"/>
<link rel="stylesheet" href="../../_static/stylesheets/application-fixes.css"/>
<link rel="stylesheet" href="../../_static/fonts/material-icons.css"/>
<meta name="theme-color" content="#3f51b5">
<script src="../../_static/javascripts/modernizr.js"></script>
<title>statsmodels.tools.sm_exceptions.OutputWarning — statsmodels</title>
<link rel="icon" type="image/png" sizes="32x32" href="../../_static/icons/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="16x16" href="../../_static/icons/favicon-16x16.png">
<link rel="manifest" href="../../_static/icons/site.webmanifest">
<link rel="mask-icon" href="../../_static/icons/safari-pinned-tab.svg" color="#919191">
<meta name="msapplication-TileColor" content="#2b5797">
<meta name="msapplication-config" content="../../_static/icons/browserconfig.xml">
<link rel="stylesheet" href="../../_static/stylesheets/examples.css">
<link rel="stylesheet" href="../../_static/stylesheets/deprecation.css">
<link rel="stylesheet" type="text/css" href="../../_static/pygments.css" />
<link rel="stylesheet" type="text/css" href="../../_static/material.css" />
<link rel="stylesheet" type="text/css" href="../../_static/graphviz.css" />
<link rel="stylesheet" type="text/css" href="../../_static/plot_directive.css" />
<script data-url_root="../../" id="documentation_options" src="../../_static/documentation_options.js"></script>
<script src="../../_static/jquery.js"></script>
<script src="../../_static/underscore.js"></script>
<script src="../../_static/doctools.js"></script>
<script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script>
<link rel="shortcut icon" href="../../_static/favicon.ico"/>
<link rel="author" title="About these documents" href="../../about.html" />
<link rel="index" title="Index" href="../../genindex.html" />
<link rel="search" title="Search" href="../../search.html" />
<link rel="next" title="statsmodels.tools.sm_exceptions.DomainWarning" href="statsmodels.tools.sm_exceptions.DomainWarning.html" />
<link rel="prev" title="statsmodels.tools.sm_exceptions.NotImplementedWarning" href="statsmodels.tools.sm_exceptions.NotImplementedWarning.html" />
</head>
<body dir=ltr
data-md-color-primary=indigo data-md-color-accent=blue>
<svg class="md-svg">
<defs data-children-count="0">
<svg xmlns="http://www.w3.org/2000/svg" width="416" height="448" viewBox="0 0 416 448" id="__github"><path fill="currentColor" d="M160 304q0 10-3.125 20.5t-10.75 19T128 352t-18.125-8.5-10.75-19T96 304t3.125-20.5 10.75-19T128 256t18.125 8.5 10.75 19T160 304zm160 0q0 10-3.125 20.5t-10.75 19T288 352t-18.125-8.5-10.75-19T256 304t3.125-20.5 10.75-19T288 256t18.125 8.5 10.75 19T320 304zm40 0q0-30-17.25-51T296 232q-10.25 0-48.75 5.25Q229.5 240 208 240t-39.25-2.75Q130.75 232 120 232q-29.5 0-46.75 21T56 304q0 22 8 38.375t20.25 25.75 30.5 15 35 7.375 37.25 1.75h42q20.5 0 37.25-1.75t35-7.375 30.5-15 20.25-25.75T360 304zm56-44q0 51.75-15.25 82.75-9.5 19.25-26.375 33.25t-35.25 21.5-42.5 11.875-42.875 5.5T212 416q-19.5 0-35.5-.75t-36.875-3.125-38.125-7.5-34.25-12.875T37 371.5t-21.5-28.75Q0 312 0 260q0-59.25 34-99-6.75-20.5-6.75-42.5 0-29 12.75-54.5 27 0 47.5 9.875t47.25 30.875Q171.5 96 212 96q37 0 70 8 26.25-20.5 46.75-30.25T376 64q12.75 25.5 12.75 54.5 0 21.75-6.75 42 34 40 34 99.5z"/></svg>
</defs>
</svg>
<input class="md-toggle" data-md-toggle="drawer" type="checkbox" id="__drawer">
<input class="md-toggle" data-md-toggle="search" type="checkbox" id="__search">
<label class="md-overlay" data-md-component="overlay" for="__drawer"></label>
<a href="#dev/generated/statsmodels.tools.sm_exceptions.OutputWarning" tabindex="1" class="md-skip"> Skip to content </a>
<header class="md-header" data-md-component="header">
<nav class="md-header-nav md-grid">
<div class="md-flex navheader">
<div class="md-flex__cell md-flex__cell--shrink">
<a href="../../index.html" title="statsmodels"
class="md-header-nav__button md-logo">
<img src="../../_static/statsmodels-logo-v2-bw.svg" height="26"
alt="statsmodels logo">
</a>
</div>
<div class="md-flex__cell md-flex__cell--shrink">
<label class="md-icon md-icon--menu md-header-nav__button" for="__drawer"></label>
</div>
<div class="md-flex__cell md-flex__cell--stretch">
<div class="md-flex__ellipsis md-header-nav__title" data-md-component="title">
<span class="md-header-nav__topic">statsmodels v0.13.2</span>
<span class="md-header-nav__topic"> statsmodels.tools.sm_exceptions.OutputWarning </span>
</div>
</div>
<div class="md-flex__cell md-flex__cell--shrink">
<label class="md-icon md-icon--search md-header-nav__button" for="__search"></label>
<div class="md-search" data-md-component="search" role="dialog">
<label class="md-search__overlay" for="__search"></label>
<div class="md-search__inner" role="search">
<form class="md-search__form" action="../../search.html" method="get" name="search">
<input type="text" class="md-search__input" name="q" placeholder="Search"
autocapitalize="off" autocomplete="off" spellcheck="false"
data-md-component="query" data-md-state="active">
<label class="md-icon md-search__icon" for="__search"></label>
<button type="reset" class="md-icon md-search__icon" data-md-component="reset" tabindex="-1">

</button>
</form>
<div class="md-search__output">
<div class="md-search__scrollwrap" data-md-scrollfix>
<div class="md-search-result" data-md-component="result">
<div class="md-search-result__meta">
Type to start searching
</div>
<ol class="md-search-result__list"></ol>
</div>
</div>
</div>
</div>
</div>
</div>
<div class="md-flex__cell md-flex__cell--shrink">
<div class="md-header-nav__source">
<a href="https://github.com/statsmodels/statsmodels" title="Go to repository" class="md-source" data-md-source="github">
<div class="md-source__icon">
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 24 24" width="28" height="28">
<use xlink:href="#__github" width="24" height="24"></use>
</svg>
</div>
<div class="md-source__repository">
statsmodels
</div>
</a>
</div>
</div>
<script src="../../_static/javascripts/version_dropdown.js"></script>
<script>
var json_loc = "../../../versions-v2.json",
target_loc = "../../../",
text = "Versions";
$( document ).ready( add_version_dropdown(json_loc, target_loc, text));
</script>
</div>
</nav>
</header>
<div class="md-container">
<nav class="md-tabs" data-md-component="tabs">
<div class="md-tabs__inner md-grid">
<ul class="md-tabs__list">
<li class="md-tabs__item"><a href="../index.html" class="md-tabs__link">Developer Page</a></li>
<li class="md-tabs__item"><a href="../warnings-and-exceptions.html" class="md-tabs__link">Exceptions and Warnings</a></li>
</ul>
</div>
</nav>
<main class="md-main">
<div class="md-main__inner md-grid" data-md-component="container">
<div class="md-sidebar md-sidebar--primary" data-md-component="navigation">
<div class="md-sidebar__scrollwrap">
<div class="md-sidebar__inner">
<nav class="md-nav md-nav--primary" data-md-level="0">
<label class="md-nav__title md-nav__title--site" for="__drawer">
<a href="../../index.html" title="statsmodels" class="md-nav__button md-logo">
<img src="../../_static/statsmodels-logo-v2-bw.svg" alt=" logo" width="48" height="48">
</a>
<a href="../../index.html"
title="statsmodels">statsmodels v0.13.2</a>
</label>
<div class="md-nav__source">
<a href="https://github.com/statsmodels/statsmodels" title="Go to repository" class="md-source" data-md-source="github">
<div class="md-source__icon">
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 24 24" width="28" height="28">
<use xlink:href="#__github" width="24" height="24"></use>
</svg>
</div>
<div class="md-source__repository">
statsmodels
</div>
</a>
</div>
<ul class="md-nav__list">
<li class="md-nav__item">
<a href="../../install.html" class="md-nav__link">Installing statsmodels</a>
</li>
<li class="md-nav__item">
<a href="../../gettingstarted.html" class="md-nav__link">Getting started</a>
</li>
<li class="md-nav__item">
<a href="../../user-guide.html" class="md-nav__link">User Guide</a>
</li>
<li class="md-nav__item">
<a href="../../examples/index.html" class="md-nav__link">Examples</a>
</li>
<li class="md-nav__item">
<a href="../../api.html" class="md-nav__link">API Reference</a>
</li>
<li class="md-nav__item">
<a href="../../about.html" class="md-nav__link">About statsmodels</a>
</li>
<li class="md-nav__item">
<a href="../index.html" class="md-nav__link">Developer Page</a>
<ul class="md-nav__list">
<li class="md-nav__item">
<a href="../index.html#submitting-a-bug-report" class="md-nav__link">Submitting a Bug Report</a>
</li>
<li class="md-nav__item">
<a href="../index.html#making-changes-to-the-code" class="md-nav__link">Making Changes to the Code</a>
</li>
<li class="md-nav__item">
<a href="../index.html#how-to-submit-a-pull-request" class="md-nav__link">How to Submit a Pull Request</a>
</li>
<li class="md-nav__item">
<a href="../index.html#mailing-list" class="md-nav__link">Mailing List</a>
</li>
<li class="md-nav__item">
<a href="../index.html#license" class="md-nav__link">License</a>
</li>
<li class="md-nav__item">
<a href="../index.html#contents" class="md-nav__link">Contents</a>
<ul class="md-nav__list">
<li class="md-nav__item">
<a href="../git_notes.html" class="md-nav__link">Working with the statsmodels Code</a>
</li>
<li class="md-nav__item">
<a href="../maintainer_notes.html" class="md-nav__link">Maintainer Notes</a>
</li>
<li class="md-nav__item">
<a href="../test_notes.html" class="md-nav__link">Testing</a>
</li>
<li class="md-nav__item">
<a href="../naming_conventions.html" class="md-nav__link">Naming Conventions</a>
</li>
<li class="md-nav__item">
<a href="../warnings-and-exceptions.html" class="md-nav__link">Exceptions and Warnings</a>
</li>
<li class="md-nav__item">
<a href="../dataset_notes.html" class="md-nav__link">Datasets</a>
</li>
<li class="md-nav__item">
<a href="../examples.html" class="md-nav__link">Examples</a>
</li>
<li class="md-nav__item">
<a href="../get_involved.html" class="md-nav__link">Get Involved</a>
</li>
<li class="md-nav__item">
<a href="../internal.html" class="md-nav__link">Internal Classes</a>
</li>
<li class="md-nav__item">
<a href="../testing.html" class="md-nav__link">Testing on Build Machines</a>
</li></ul>
</li></ul>
</li>
<li class="md-nav__item">
<a href="../../release/index.html" class="md-nav__link">Release Notes</a>
</li>
</ul>
</nav>
</div>
</div>
</div>
<div class="md-sidebar md-sidebar--secondary" data-md-component="toc">
<div class="md-sidebar__scrollwrap">
<div class="md-sidebar__inner">
<nav class="md-nav md-nav--secondary">
<ul class="md-nav__list" data-md-scrollfix="">
<li class="md-nav__item"><a class="md-nav__extra_link" href="../../_sources/dev/generated/statsmodels.tools.sm_exceptions.OutputWarning.rst.txt">Show Source</a> </li>
<li id="searchbox" class="md-nav__item"></li>
</ul>
</nav>
</div>
</div>
</div>
<div class="md-content">
<article class="md-content__inner md-typeset" role="main">
<section id="statsmodels-tools-sm-exceptions-outputwarning">
<h1 id="dev-generated-statsmodels-tools-sm-exceptions-outputwarning--page-root">statsmodels.tools.sm_exceptions.OutputWarning<a class="headerlink" href="#dev-generated-statsmodels-tools-sm-exceptions-outputwarning--page-root" title="Permalink to this headline">¶</a></h1>
<dl class="py exception">
<dt class="sig sig-object py" id="statsmodels.tools.sm_exceptions.OutputWarning">
<em class="property"><span class="pre">exception</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">statsmodels.tools.sm_exceptions.</span></span><span class="sig-name descname"><span class="pre">OutputWarning</span></span><a class="reference internal" href="../../_modules/statsmodels/tools/sm_exceptions.html#OutputWarning"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#statsmodels.tools.sm_exceptions.OutputWarning" title="Permalink to this definition">¶</a></dt>
<dd><p>Function output contains atypical values</p>
</dd></dl>
</section>
</article>
</div>
</div>
</main>
</div>
<footer class="md-footer">
<div class="md-footer-nav">
<nav class="md-footer-nav__inner md-grid">
<a href="statsmodels.tools.sm_exceptions.NotImplementedWarning.html" title="statsmodels.tools.sm_exceptions.NotImplementedWarning"
class="md-flex md-footer-nav__link md-footer-nav__link--prev"
rel="prev">
<div class="md-flex__cell md-flex__cell--shrink">
<i class="md-icon md-icon--arrow-back md-footer-nav__button"></i>
</div>
<div class="md-flex__cell md-flex__cell--stretch md-footer-nav__title">
<span class="md-flex__ellipsis">
<span
class="md-footer-nav__direction"> Previous </span> statsmodels.tools.sm_exceptions.NotImplementedWarning </span>
</div>
</a>
<a href="statsmodels.tools.sm_exceptions.DomainWarning.html" title="statsmodels.tools.sm_exceptions.DomainWarning"
class="md-flex md-footer-nav__link md-footer-nav__link--next"
rel="next">
<div class="md-flex__cell md-flex__cell--stretch md-footer-nav__title"><span
class="md-flex__ellipsis"> <span
class="md-footer-nav__direction"> Next </span> statsmodels.tools.sm_exceptions.DomainWarning </span>
</div>
<div class="md-flex__cell md-flex__cell--shrink"><i
class="md-icon md-icon--arrow-forward md-footer-nav__button"></i>
</div>
</a>
</nav>
</div>
<div class="md-footer-meta md-typeset">
<div class="md-footer-meta__inner md-grid">
<div class="md-footer-copyright">
<div class="md-footer-copyright__highlight">
© Copyright 2009-2019, Josef Perktold, Skipper Seabold, Jonathan Taylor, statsmodels-developers.
</div>
Last updated on
Feb 08, 2022.
<br/>
Created using
<a href="http://www.sphinx-doc.org/">Sphinx</a> 4.4.0.
and
<a href="https://github.com/bashtage/sphinx-material/">Material for
Sphinx</a>
</div>
</div>
</div>
</footer>
<script src="../../_static/javascripts/application.js"></script>
<script>app.initialize({version: "1.0.4", url: {base: ".."}})</script>
</body>
</html>
|
statsmodels/statsmodels.github.io
|
v0.13.2/dev/generated/statsmodels.tools.sm_exceptions.OutputWarning.html
|
HTML
|
bsd-3-clause
| 18,226
|
<?php return [
'DomainRedirector' => [
'domainRedirects' => [
// toDomain => fromUrl
// 'hi.domain.com' => 'https://s3-us-west-2.amazonaws.com/hi',
]
],
];
|
innaDa/RcmPlugins
|
DomainRedirector/config/module.config.php
|
PHP
|
bsd-3-clause
| 202
|
/* ///////////////////////////////////////////////////////////////////////////
// Name: src/gtk/assertdlg_gtk.cpp
// Purpose: GtkAssertDialog
// Author: Francesco Montorsi
// Copyright: (c) 2006 Francesco Montorsi
// Licence: wxWindows licence
/////////////////////////////////////////////////////////////////////////// */
#include "wx/wxprec.h"
#if wxDEBUG_LEVEL
#include "wx/gtk/private.h"
#include "wx/gtk/assertdlg_gtk.h"
#include "wx/gtk/private/mnemonics.h"
#include "wx/translation.h"
#include "wx/stockitem.h"
#include <stdio.h>
/* ----------------------------------------------------------------------------
Constants
---------------------------------------------------------------------------- */
/*
NB: when changing order of the columns also update the gtk_list_store_new() call
in gtk_assert_dialog_create_backtrace_list_model() function
*/
#define STACKFRAME_LEVEL_COLIDX 0
#define FUNCTION_PROTOTYPE_COLIDX 1
#define SOURCE_FILE_COLIDX 2
#define LINE_NUMBER_COLIDX 3
/* ----------------------------------------------------------------------------
GtkAssertDialog helpers
---------------------------------------------------------------------------- */
// This function is called only for GTK+ < 3.10
static
GtkWidget *gtk_assert_dialog_add_button_to (GtkBox *box, const gchar *label,
const gchar *stock)
{
/* create the button */
GtkWidget *button = gtk_button_new_with_mnemonic (label);
gtk_widget_set_can_default(button, true);
/* add a stock icon inside it */
#ifdef __WXGTK4__
gtk_button_set_icon_name (GTK_BUTTON (button), stock);
#else
wxGCC_WARNING_SUPPRESS(deprecated-declarations)
GtkWidget *image = gtk_image_new_from_stock (stock, GTK_ICON_SIZE_BUTTON);
wxGCC_WARNING_RESTORE()
gtk_button_set_image (GTK_BUTTON (button), image);
#endif
/* add to the given (container) widget */
if (box)
#ifdef __WXGTK4__
gtk_box_pack_end (box, button);
#else
gtk_box_pack_end (box, button, FALSE, TRUE, 8);
#endif
return button;
}
// This function is called only for GTK+ < 3.10
static
GtkWidget *gtk_assert_dialog_add_button (GtkAssertDialog *dlg, const gchar *label,
const gchar *stock, gint response_id)
{
/* create the button */
GtkWidget* button = gtk_assert_dialog_add_button_to(NULL, label, stock);
/* add the button to the dialog's action area */
gtk_dialog_add_action_widget (GTK_DIALOG (dlg), button, response_id);
return button;
}
#if wxUSE_STACKWALKER
// This function is called only for GTK+ < 3.10
static
void gtk_assert_dialog_append_text_column (GtkWidget *treeview, const gchar *name, int index)
{
GtkCellRenderer *renderer;
GtkTreeViewColumn *column;
renderer = gtk_cell_renderer_text_new ();
column = gtk_tree_view_column_new_with_attributes (name, renderer,
"text", index, NULL);
gtk_tree_view_insert_column (GTK_TREE_VIEW (treeview), column, index);
gtk_tree_view_column_set_resizable (column, TRUE);
gtk_tree_view_column_set_reorderable (column, TRUE);
}
// This function is called only for GTK+ < 3.10
static
GtkWidget *gtk_assert_dialog_create_backtrace_list_model ()
{
GtkListStore *store;
GtkWidget *treeview;
/* create list store */
store = gtk_list_store_new (4,
G_TYPE_UINT, /* stack frame number */
G_TYPE_STRING, /* function prototype */
G_TYPE_STRING, /* source file name */
G_TYPE_STRING); /* line number */
/* create the tree view */
treeview = gtk_tree_view_new_with_model (GTK_TREE_MODEL(store));
g_object_unref (store);
#ifndef __WXGTK4__
wxGCC_WARNING_SUPPRESS(deprecated-declarations)
gtk_tree_view_set_rules_hint (GTK_TREE_VIEW (treeview), TRUE);
wxGCC_WARNING_RESTORE()
#endif
/* append columns */
gtk_assert_dialog_append_text_column(treeview, "#", STACKFRAME_LEVEL_COLIDX);
gtk_assert_dialog_append_text_column(treeview, "Function Prototype", FUNCTION_PROTOTYPE_COLIDX);
gtk_assert_dialog_append_text_column(treeview, "Source file", SOURCE_FILE_COLIDX);
gtk_assert_dialog_append_text_column(treeview, "Line #", LINE_NUMBER_COLIDX);
return treeview;
}
static
void gtk_assert_dialog_process_backtrace (GtkAssertDialog *dlg)
{
/* set busy cursor */
GdkWindow *parent = gtk_widget_get_window(GTK_WIDGET(dlg));
GdkDisplay* display = gdk_window_get_display(parent);
GdkCursor* cur = gdk_cursor_new_for_display(display, GDK_WATCH);
gdk_window_set_cursor (parent, cur);
gdk_flush ();
(*dlg->callback)(dlg->userdata);
/* toggle busy cursor */
gdk_window_set_cursor (parent, NULL);
#ifdef __WXGTK3__
g_object_unref(cur);
#else
gdk_cursor_unref (cur);
#endif
}
extern "C" {
/* ----------------------------------------------------------------------------
GtkAssertDialog signal handlers
---------------------------------------------------------------------------- */
static void gtk_assert_dialog_expander_callback(GtkWidget*, GtkAssertDialog* dlg)
{
/* status is not yet updated so we need to invert it to get the new one */
gboolean expanded = !gtk_expander_get_expanded (GTK_EXPANDER(dlg->expander));
gtk_window_set_resizable (GTK_WINDOW (dlg), expanded);
if (dlg->callback == NULL) /* was the backtrace already processed? */
return;
gtk_assert_dialog_process_backtrace (dlg);
/* mark the work as done (so that next activate we won't call the callback again) */
dlg->callback = NULL;
}
static void gtk_assert_dialog_save_backtrace_callback(GtkWidget*, GtkAssertDialog* dlg)
{
GtkWidget *dialog;
dialog = gtk_file_chooser_dialog_new ("Save assert info to file", GTK_WINDOW(dlg),
GTK_FILE_CHOOSER_ACTION_SAVE,
static_cast<const char*>(wxConvertMnemonicsToGTK(wxGetStockLabel(wxID_CANCEL)).utf8_str()), GTK_RESPONSE_CANCEL,
static_cast<const char*>(wxConvertMnemonicsToGTK(wxGetStockLabel(wxID_SAVE)).utf8_str()), GTK_RESPONSE_ACCEPT,
NULL);
if (gtk_dialog_run (GTK_DIALOG (dialog)) == GTK_RESPONSE_ACCEPT)
{
char *filename, *msg, *backtrace;
FILE *fp;
filename = gtk_file_chooser_get_filename (GTK_FILE_CHOOSER (dialog));
if ( filename )
{
msg = gtk_assert_dialog_get_message (dlg);
backtrace = gtk_assert_dialog_get_backtrace (dlg);
/* open the file and write all info inside it */
fp = fopen (filename, "w");
if (fp)
{
fprintf (fp, "ASSERT INFO:\n%s\n\nBACKTRACE:\n%s", msg, backtrace);
fclose (fp);
}
g_free (filename);
g_free (msg);
g_free (backtrace);
}
}
gtk_widget_destroy (dialog);
}
static void gtk_assert_dialog_copy_callback(GtkWidget*, GtkAssertDialog* dlg)
{
char *msg, *backtrace;
GtkClipboard *clipboard;
GString *str;
msg = gtk_assert_dialog_get_message (dlg);
backtrace = gtk_assert_dialog_get_backtrace (dlg);
/* combine both in a single string */
str = g_string_new("");
g_string_printf (str, "ASSERT INFO:\n%s\n\nBACKTRACE:\n%s\n\n", msg, backtrace);
/* copy everything in default clipboard */
clipboard = gtk_clipboard_get (GDK_SELECTION_CLIPBOARD);
gtk_clipboard_set_text (clipboard, str->str, str->len);
/* copy everything in primary clipboard too */
clipboard = gtk_clipboard_get (GDK_SELECTION_PRIMARY);
gtk_clipboard_set_text (clipboard, str->str, str->len);
g_free (msg);
g_free (backtrace);
g_string_free (str, TRUE);
}
} // extern "C"
#endif // wxUSE_STACKWALKER
extern "C" {
static void gtk_assert_dialog_continue_callback(GtkWidget*, GtkAssertDialog* dlg)
{
gint response =
gtk_toggle_button_get_active (GTK_TOGGLE_BUTTON(dlg->shownexttime)) ?
GTK_ASSERT_DIALOG_CONTINUE : GTK_ASSERT_DIALOG_CONTINUE_SUPPRESSING;
gtk_dialog_response (GTK_DIALOG(dlg), response);
}
} // extern "C"
/* ----------------------------------------------------------------------------
GtkAssertDialogClass implementation
---------------------------------------------------------------------------- */
extern "C" {
#if GTK_CHECK_VERSION(3,10,0)
static void gtk_assert_dialog_class_init(gpointer g_class, void*);
#endif // GTK+ >= 3.10
static void gtk_assert_dialog_init(GTypeInstance* instance, void*);
}
GType gtk_assert_dialog_get_type()
{
static GType assert_dialog_type;
if (!assert_dialog_type)
{
const GTypeInfo assert_dialog_info =
{
sizeof (GtkAssertDialogClass),
NULL, /* base_init */
NULL, /* base_finalize */
#if GTK_CHECK_VERSION(3,10,0)
gtk_assert_dialog_class_init, /* class init */
#else
NULL,
#endif // GTK+ >= 3.10 / < 3.10
NULL, /* class_finalize */
NULL, /* class_data */
sizeof (GtkAssertDialog),
16, /* n_preallocs */
gtk_assert_dialog_init,
NULL
};
assert_dialog_type = g_type_register_static (GTK_TYPE_DIALOG, "GtkAssertDialog", &assert_dialog_info, (GTypeFlags)0);
}
return assert_dialog_type;
}
extern "C" {
// For GTK+ >= 3.10, Composite Widget Templates are used to define composite widgets.
#if GTK_CHECK_VERSION(3,10,0)
static void gtk_assert_dialog_class_init(gpointer g_class, void*)
{
if (gtk_check_version(3,10,0) == NULL)
{
// GtkBuilder XML to be bound to the dialog class data
static const char dlgTempl[] =
"<interface>"
"<object class='GtkListStore' id='backtrace_list_store'>"
"<columns>"
"<!-- column-name column_index -->"
"<column type='gint'/>"
"<!-- column-name column_func_prototype -->"
"<column type='gchararray'/>"
"<!-- column-name column_src_file -->"
"<column type='gchararray'/>"
"<!-- column-name column_line_no -->"
"<column type='gchararray'/>"
"</columns>"
"</object>"
"<object class='GtkImage' id='imageBtnContinue'>"
"<property name='visible'>True</property>"
"<property name='can_focus'>False</property>"
"<property name='icon_name'>go-next</property>"
"</object>"
"<object class='GtkImage' id='imageBtnCopy'>"
"<property name='visible'>True</property>"
"<property name='can_focus'>False</property>"
"<property name='icon_name'>edit-copy</property>"
"</object>"
"<object class='GtkImage' id='imageBtnSave'>"
"<property name='visible'>True</property>"
"<property name='can_focus'>False</property>"
"<property name='icon_name'>document-save</property>"
"</object>"
"<object class='GtkImage' id='imageBtnStop'>"
"<property name='visible'>True</property>"
"<property name='can_focus'>False</property>"
"<property name='icon_name'>application-exit</property>"
"</object>"
"<template class='GtkAssertDialog' parent='GtkDialog'>"
"<property name='can_focus'>False</property>"
"<property name='resizable'>False</property>"
"<property name='type_hint'>dialog</property>"
"<child internal-child='vbox'>"
"<object class='GtkBox' id='dialog_vbox'>"
"<property name='can_focus'>False</property>"
"<property name='orientation'>vertical</property>"
"<property name='spacing'>2</property>"
"<child internal-child='action_area'>"
"<object class='GtkButtonBox' id='dialog_buttons'>"
"<property name='can_focus'>False</property>"
"<property name='layout_style'>end</property>"
"<child>"
"<object class='GtkCheckButton' id='shownexttime'>"
"<property name='label' translatable='yes'>Show this _dialog the next time</property>"
"<property name='visible'>True</property>"
"<property name='can_focus'>True</property>"
"<property name='receives_default'>False</property>"
"<property name='use_underline'>True</property>"
"<property name='xalign'>0.5</property>"
"<property name='active'>True</property>"
"<property name='draw_indicator'>True</property>"
"</object>"
"<packing>"
"<property name='expand'>True</property>"
"<property name='fill'>False</property>"
"<property name='padding'>8</property>"
"<property name='pack_type'>end</property>"
"<property name='position'>0</property>"
"</packing>"
"</child>"
"<child>"
"<object class='GtkButton' id='button_stop'>"
"<property name='label' translatable='yes'>_Stop</property>"
"<property name='visible'>True</property>"
"<property name='can_focus'>True</property>"
"<property name='receives_default'>True</property>"
"<property name='image'>imageBtnStop</property>"
"<property name='use_underline'>True</property>"
"</object>"
"<packing>"
"<property name='expand'>True</property>"
"<property name='fill'>True</property>"
"<property name='position'>1</property>"
"</packing>"
"</child>"
"<child>"
"<object class='GtkButton' id='button_continue'>"
"<property name='label' translatable='yes'>_Continue</property>"
"<property name='visible'>True</property>"
"<property name='can_focus'>True</property>"
"<property name='can_default'>True</property>"
"<property name='has_default'>True</property>"
"<property name='receives_default'>True</property>"
"<property name='image'>imageBtnContinue</property>"
"<property name='use_underline'>True</property>"
"<signal name='clicked' handler='gtk_assert_dialog_continue_callback' swapped='no'/>"
"</object>"
"<packing>"
"<property name='expand'>True</property>"
"<property name='fill'>True</property>"
"<property name='position'>2</property>"
"</packing>"
"</child>"
"</object>"
"<packing>"
"<property name='expand'>False</property>"
"<property name='fill'>False</property>"
"<property name='position'>0</property>"
"</packing>"
"</child>"
"<child>"
"<object class='GtkBox' id='vbox'>"
"<property name='visible'>True</property>"
"<property name='can_focus'>False</property>"
"<property name='orientation'>vertical</property>"
"<child>"
"<object class='GtkBox' id='hbox'>"
"<property name='visible'>True</property>"
"<property name='can_focus'>False</property>"
"<property name='border_width'>8</property>"
"<child>"
"<object class='GtkImage' id='image'>"
"<property name='visible'>True</property>"
"<property name='can_focus'>False</property>"
"<property name='icon_name'>dialog-error</property>"
"<property name='icon_size'>6</property>"
"</object>"
"<packing>"
"<property name='expand'>False</property>"
"<property name='fill'>False</property>"
"<property name='padding'>12</property>"
"<property name='position'>0</property>"
"</packing>"
"</child>"
"<child>"
"<object class='GtkBox' id='vbox2'>"
"<property name='visible'>True</property>"
"<property name='can_focus'>False</property>"
"<property name='orientation'>vertical</property>"
"<child>"
"<object class='GtkLabel' id='info'>"
"<property name='visible'>True</property>"
"<property name='can_focus'>False</property>"
"<property name='label' translatable='yes'>An assertion failed!</property>"
"</object>"
"<packing>"
"<property name='expand'>True</property>"
"<property name='fill'>True</property>"
"<property name='padding'>8</property>"
"<property name='position'>0</property>"
"</packing>"
"</child>"
"<child>"
"<object class='GtkLabel' id='message'>"
"<property name='width_request'>450</property>"
"<property name='visible'>True</property>"
"<property name='can_focus'>False</property>"
"<property name='wrap'>True</property>"
"<property name='selectable'>True</property>"
"</object>"
"<packing>"
"<property name='expand'>True</property>"
"<property name='fill'>True</property>"
"<property name='padding'>8</property>"
"<property name='pack_type'>end</property>"
"<property name='position'>1</property>"
"</packing>"
"</child>"
"</object>"
"<packing>"
"<property name='expand'>True</property>"
"<property name='fill'>True</property>"
"<property name='position'>1</property>"
"</packing>"
"</child>"
"</object>"
"<packing>"
"<property name='expand'>False</property>"
"<property name='fill'>False</property>"
"<property name='position'>0</property>"
"</packing>"
"</child>"
#if wxUSE_STACKWALKER // expander is needed only if backtrace is enabled
"<child>"
"<object class='GtkExpander' id='expander'>"
"<property name='visible'>True</property>"
"<property name='can_focus'>True</property>"
"<signal name='activate' handler='gtk_assert_dialog_expander_callback' swapped='no'/>"
"<child>"
"<object class='GtkBox' id='vbox_exp'>"
"<property name='visible'>True</property>"
"<property name='can_focus'>False</property>"
"<property name='orientation'>vertical</property>"
"<child>"
"<object class='GtkScrolledWindow' id='sw'>"
"<property name='visible'>True</property>"
"<property name='can_focus'>True</property>"
"<property name='shadow_type'>etched-in</property>"
"<property name='min-content-height'>180</property>"
"<child>"
"<object class='GtkTreeView' id='treeview'>"
"<property name='visible'>True</property>"
"<property name='can_focus'>True</property>"
"<property name='model'>backtrace_list_store</property>"
"<child internal-child='selection'>"
"<object class='GtkTreeSelection' id='treeview-selection'/>"
"</child>"
"<child>"
"<object class='GtkTreeViewColumn' id='column_index'>"
"<property name='resizable'>True</property>"
"<property name='spacing'>4</property>"
"<property name='title' translatable='yes'>#</property>"
"<property name='reorderable'>True</property>"
"<child>"
"<object class='GtkCellRendererText' id='index_renderer'/>"
"<attributes>"
"<attribute name='text'>0</attribute>"
"</attributes>"
"</child>"
"</object>"
"</child>"
"<child>"
"<object class='GtkTreeViewColumn' id='column_func_prototype'>"
"<property name='resizable'>True</property>"
"<property name='spacing'>4</property>"
"<property name='title' translatable='yes'>Function Prototype</property>"
"<property name='reorderable'>True</property>"
"<child>"
"<object class='GtkCellRendererText' id='function_renderer'/>"
"<attributes>"
"<attribute name='text'>1</attribute>"
"</attributes>"
"</child>"
"</object>"
"</child>"
"<child>"
"<object class='GtkTreeViewColumn' id='column_src_file'>"
"<property name='resizable'>True</property>"
"<property name='spacing'>4</property>"
"<property name='title' translatable='yes'>Source file</property>"
"<property name='reorderable'>True</property>"
"<child>"
"<object class='GtkCellRendererText' id='src_file_renderer'/>"
"<attributes>"
"<attribute name='text'>2</attribute>"
"</attributes>"
"</child>"
"</object>"
"</child>"
"<child>"
"<object class='GtkTreeViewColumn' id='column_line_no'>"
"<property name='resizable'>True</property>"
"<property name='spacing'>4</property>"
"<property name='title' translatable='yes'>Line #</property>"
"<property name='reorderable'>True</property>"
"<child>"
"<object class='GtkCellRendererText' id='line_no_renderer'/>"
"<attributes>"
"<attribute name='text'>3</attribute>"
"</attributes>"
"</child>"
"</object>"
"</child>"
"</object>"
"</child>"
"</object>"
"<packing>"
"<property name='expand'>True</property>"
"<property name='fill'>True</property>"
"<property name='padding'>8</property>"
"<property name='position'>0</property>"
"</packing>"
"</child>"
"<child>"
"<object class='GtkButtonBox' id='buttonbox_exp'>"
"<property name='visible'>True</property>"
"<property name='can_focus'>False</property>"
"<property name='layout_style'>end</property>"
"<child>"
"<object class='GtkButton' id='button_save'>"
"<property name='label' translatable='yes'>Save to _file</property>"
"<property name='visible'>True</property>"
"<property name='can_focus'>True</property>"
"<property name='receives_default'>True</property>"
"<property name='image'>imageBtnSave</property>"
"<property name='use_underline'>True</property>"
"<signal name='clicked' handler='gtk_assert_dialog_save_backtrace_callback' swapped='no'/>"
"</object>"
"<packing>"
"<property name='expand'>True</property>"
"<property name='fill'>True</property>"
"<property name='position'>0</property>"
"</packing>"
"</child>"
"<child>"
"<object class='GtkButton' id='button_copy'>"
"<property name='label' translatable='yes'>Copy to clip_board</property>"
"<property name='visible'>True</property>"
"<property name='can_focus'>True</property>"
"<property name='receives_default'>True</property>"
"<property name='image'>imageBtnCopy</property>"
"<property name='use_underline'>True</property>"
"<signal name='clicked' handler='gtk_assert_dialog_copy_callback' swapped='no'/>"
"</object>"
"<packing>"
"<property name='expand'>True</property>"
"<property name='fill'>True</property>"
"<property name='position'>1</property>"
"</packing>"
"</child>"
"</object>"
"<packing>"
"<property name='expand'>False</property>"
"<property name='fill'>True</property>"
"<property name='pack_type'>end</property>"
"<property name='position'>1</property>"
"</packing>"
"</child>"
"</object>"
"</child>"
"<child type='label'>"
"<object class='GtkLabel' id='label_exp'>"
"<property name='visible'>True</property>"
"<property name='can_focus'>False</property>"
"<property name='label' translatable='yes'>Back_trace:</property>"
"<property name='use_underline'>True</property>"
"</object>"
"</child>"
"</object>"
"<packing>"
"<property name='expand'>True</property>"
"<property name='fill'>True</property>"
"<property name='position'>1</property>"
"</packing>"
"</child>"
#endif // wxUSE_STACKWALKER
"</object>"
"<packing>"
"<property name='expand'>True</property>"
"<property name='fill'>True</property>"
"<property name='padding'>5</property>"
"<property name='position'>1</property>"
"</packing>"
"</child>"
"</object>"
"</child>"
"<action-widgets>"
"<action-widget response='0'>button_stop</action-widget>"
"<action-widget response='1'>button_continue</action-widget>"
"</action-widgets>"
"</template>"
"</interface>";
// Verify numeric values of response codes hard-coded in the XML
wxASSERT(GTK_ASSERT_DIALOG_STOP == 0);
wxASSERT(GTK_ASSERT_DIALOG_CONTINUE == 1);
GtkWidgetClass* widgetClass = GTK_WIDGET_CLASS(g_class);
GBytes* templBytes = g_bytes_new_static(dlgTempl, sizeof(dlgTempl)-1);
gtk_widget_class_set_template(widgetClass, templBytes);
// Define the relationship of the entries in GtkAssertDialog and entries defined in the XML
gtk_widget_class_bind_template_child(widgetClass, GtkAssertDialog, message);
#if wxUSE_STACKWALKER
gtk_widget_class_bind_template_child(widgetClass, GtkAssertDialog, expander);
gtk_widget_class_bind_template_child(widgetClass, GtkAssertDialog, treeview);
#endif // wxUSE_STACKWALKER
gtk_widget_class_bind_template_child(widgetClass, GtkAssertDialog, shownexttime);
// Bind <signal> connections defined in the GtkBuilder XML
// with callbacks exposed by GtkAssertDialog.
#if wxUSE_STACKWALKER
gtk_widget_class_bind_template_callback(widgetClass, gtk_assert_dialog_expander_callback);
gtk_widget_class_bind_template_callback(widgetClass, gtk_assert_dialog_save_backtrace_callback);
gtk_widget_class_bind_template_callback(widgetClass, gtk_assert_dialog_copy_callback);
#endif // wxUSE_STACKWALKER
gtk_widget_class_bind_template_callback(widgetClass, gtk_assert_dialog_continue_callback);
}
}
#endif // GTK+ >= 3.10
static void gtk_assert_dialog_init(GTypeInstance* instance, void*)
{
// For GTK+ >= 3.10 create and initialize the dialog from the already assigned template
// or create the dialog "manually" otherwise.
#if GTK_CHECK_VERSION(3,10,0)
if (gtk_check_version(3,10,0) == NULL)
{
GtkAssertDialog* dlg = GTK_ASSERT_DIALOG(instance);
gtk_widget_init_template(GTK_WIDGET(dlg));
/* complete creation */
dlg->callback = NULL;
dlg->userdata = NULL;
}
else
#endif // GTK+ >= 3.10
{
GtkAssertDialog* dlg = GTK_ASSERT_DIALOG(instance);
GtkWidget *continuebtn;
// This code is called only for GTK+ < 3.10
{
GtkWidget *vbox, *hbox, *image;
/* start the main vbox */
wxGCC_WARNING_SUPPRESS(deprecated-declarations)
gtk_widget_push_composite_child ();
wxGCC_WARNING_RESTORE()
vbox = gtk_box_new(GTK_ORIENTATION_VERTICAL, 8);
gtk_container_set_border_width (GTK_CONTAINER(vbox), 8);
gtk_box_pack_start(GTK_BOX(gtk_dialog_get_content_area(GTK_DIALOG(dlg))), vbox, true, true, 5);
/* add the icon+message hbox */
hbox = gtk_box_new(GTK_ORIENTATION_HORIZONTAL, 0);
gtk_box_pack_start (GTK_BOX(vbox), hbox, FALSE, FALSE, 0);
/* icon */
wxGCC_WARNING_SUPPRESS(deprecated-declarations)
image = gtk_image_new_from_stock("gtk-dialog-error", GTK_ICON_SIZE_DIALOG);
wxGCC_WARNING_RESTORE()
gtk_box_pack_start (GTK_BOX(hbox), image, FALSE, FALSE, 12);
{
GtkWidget *vbox2, *info;
/* message */
vbox2 = gtk_box_new(GTK_ORIENTATION_VERTICAL, 0);
gtk_box_pack_start (GTK_BOX (hbox), vbox2, TRUE, TRUE, 0);
info = gtk_label_new ("An assertion failed!");
gtk_box_pack_start (GTK_BOX(vbox2), info, TRUE, TRUE, 8);
/* assert message */
dlg->message = gtk_label_new (NULL);
gtk_label_set_selectable (GTK_LABEL (dlg->message), TRUE);
gtk_label_set_line_wrap (GTK_LABEL (dlg->message), TRUE);
gtk_label_set_justify (GTK_LABEL (dlg->message), GTK_JUSTIFY_LEFT);
gtk_widget_set_size_request (GTK_WIDGET(dlg->message), 450, -1);
gtk_box_pack_end (GTK_BOX(vbox2), GTK_WIDGET(dlg->message), TRUE, TRUE, 8);
}
#if wxUSE_STACKWALKER
/* add the expander */
dlg->expander = gtk_expander_new_with_mnemonic ("Back_trace:");
gtk_box_pack_start (GTK_BOX(vbox), dlg->expander, TRUE, TRUE, 0);
g_signal_connect (dlg->expander, "activate",
G_CALLBACK(gtk_assert_dialog_expander_callback), dlg);
#endif // wxUSE_STACKWALKER
}
#if wxUSE_STACKWALKER
{
GtkWidget *hbox, *vbox, *button, *sw;
/* create expander's vbox */
vbox = gtk_box_new(GTK_ORIENTATION_VERTICAL, 0);
gtk_container_add (GTK_CONTAINER (dlg->expander), vbox);
/* add a scrollable window under the expander */
sw = gtk_scrolled_window_new (NULL, NULL);
gtk_scrolled_window_set_shadow_type (GTK_SCROLLED_WINDOW (sw), GTK_SHADOW_ETCHED_IN);
gtk_scrolled_window_set_policy (GTK_SCROLLED_WINDOW (sw), GTK_POLICY_AUTOMATIC,
GTK_POLICY_AUTOMATIC);
gtk_widget_set_size_request(GTK_WIDGET(sw), -1, 180);
gtk_box_pack_start (GTK_BOX(vbox), sw, TRUE, TRUE, 8);
/* add the treeview to the scrollable window */
dlg->treeview = gtk_assert_dialog_create_backtrace_list_model ();
gtk_container_add (GTK_CONTAINER (sw), dlg->treeview);
/* create button's hbox */
hbox = gtk_button_box_new(GTK_ORIENTATION_HORIZONTAL);
gtk_box_pack_end (GTK_BOX(vbox), hbox, FALSE, FALSE, 0);
gtk_button_box_set_layout (GTK_BUTTON_BOX(hbox), GTK_BUTTONBOX_END);
/* add the buttons */
button = gtk_assert_dialog_add_button_to(GTK_BOX(hbox), "Save to _file", "gtk-save");
g_signal_connect (button, "clicked",
G_CALLBACK(gtk_assert_dialog_save_backtrace_callback), dlg);
button = gtk_assert_dialog_add_button_to(GTK_BOX(hbox), "Copy to clip_board", "gtk-copy");
g_signal_connect (button, "clicked", G_CALLBACK(gtk_assert_dialog_copy_callback), dlg);
}
#endif // wxUSE_STACKWALKER
/* add the checkbutton */
dlg->shownexttime = gtk_check_button_new_with_mnemonic("Show this _dialog the next time");
gtk_toggle_button_set_active (GTK_TOGGLE_BUTTON(dlg->shownexttime), TRUE);
wxGCC_WARNING_SUPPRESS(deprecated-declarations)
gtk_box_pack_end(GTK_BOX(gtk_dialog_get_action_area(GTK_DIALOG(dlg))), dlg->shownexttime, false, true, 8);
wxGCC_WARNING_RESTORE()
/* add the stop button */
gtk_assert_dialog_add_button(dlg, "_Stop", "gtk-quit", GTK_ASSERT_DIALOG_STOP);
/* add the continue button */
continuebtn = gtk_assert_dialog_add_button(dlg, "_Continue", "gtk-yes", GTK_ASSERT_DIALOG_CONTINUE);
gtk_dialog_set_default_response (GTK_DIALOG (dlg), GTK_ASSERT_DIALOG_CONTINUE);
g_signal_connect (continuebtn, "clicked", G_CALLBACK(gtk_assert_dialog_continue_callback), dlg);
/* complete creation */
dlg->callback = NULL;
dlg->userdata = NULL;
/* the resizable property of this window is modified by the expander:
when it's collapsed, the window must be non-resizable! */
gtk_window_set_resizable (GTK_WINDOW (dlg), FALSE);
wxGCC_WARNING_SUPPRESS(deprecated-declarations)
gtk_widget_pop_composite_child ();
wxGCC_WARNING_RESTORE()
gtk_widget_show_all (GTK_WIDGET(dlg));
}
}
}
/* ----------------------------------------------------------------------------
GtkAssertDialog public API
---------------------------------------------------------------------------- */
gchar *gtk_assert_dialog_get_message (GtkAssertDialog *dlg)
{
/* NOTES:
1) returned string must g_free()d !
2) Pango markup is automatically stripped off by GTK
*/
return g_strdup (gtk_label_get_text (GTK_LABEL(dlg->message)));
}
#if wxUSE_STACKWALKER
gchar *gtk_assert_dialog_get_backtrace (GtkAssertDialog *dlg)
{
gchar *function, *sourcefile, *linenum;
guint count;
GtkTreeModel *model;
GtkTreeIter iter;
GString *string;
g_return_val_if_fail (GTK_IS_ASSERT_DIALOG (dlg), NULL);
model = gtk_tree_view_get_model (GTK_TREE_VIEW(dlg->treeview));
/* iterate over the list */
if (!gtk_tree_model_get_iter_first (model, &iter))
return NULL;
string = g_string_new("");
do
{
/* append this stack frame's info to the string */
gtk_tree_model_get(model, &iter,
STACKFRAME_LEVEL_COLIDX, &count,
FUNCTION_PROTOTYPE_COLIDX, &function,
SOURCE_FILE_COLIDX, &sourcefile,
LINE_NUMBER_COLIDX, &linenum,
-1);
g_string_append_printf(string, "[%u] %s",
count, function);
if (sourcefile[0] != '\0')
g_string_append_printf (string, " %s", sourcefile);
if (linenum[0] != '\0')
g_string_append_printf (string, ":%s", linenum);
g_string_append (string, "\n");
g_free (function);
g_free (sourcefile);
g_free (linenum);
} while (gtk_tree_model_iter_next (model, &iter));
/* returned string must g_free()d */
return g_string_free (string, FALSE);
}
#endif // wxUSE_STACKWALKER
void gtk_assert_dialog_set_message(GtkAssertDialog *dlg, const gchar *msg)
{
g_return_if_fail (GTK_IS_ASSERT_DIALOG (dlg));
/* prepend and append the <b> tag
NOTE: g_markup_printf_escaped() is not used because it's available
only for glib >= 2.4 */
gchar *escaped_msg = g_markup_escape_text (msg, -1);
gchar *decorated_msg = g_strdup_printf ("<b>%s</b>", escaped_msg);
gtk_label_set_markup (GTK_LABEL(dlg->message), decorated_msg);
g_free (decorated_msg);
g_free (escaped_msg);
}
#if wxUSE_STACKWALKER
void gtk_assert_dialog_set_backtrace_callback(GtkAssertDialog *assertdlg,
GtkAssertDialogStackFrameCallback callback,
void *userdata)
{
assertdlg->callback = callback;
assertdlg->userdata = userdata;
}
void gtk_assert_dialog_append_stack_frame(GtkAssertDialog *dlg,
const gchar *function,
const gchar *sourcefile,
guint line_number)
{
GtkTreeModel *model;
GtkTreeIter iter;
GString *linenum;
gint count;
g_return_if_fail (GTK_IS_ASSERT_DIALOG (dlg));
model = gtk_tree_view_get_model (GTK_TREE_VIEW(dlg->treeview));
/* how many items are in the list up to now ? */
count = gtk_tree_model_iter_n_children (model, NULL);
linenum = g_string_new("");
if ( line_number != 0 )
g_string_printf (linenum, "%u", line_number);
/* add data to the list store */
gtk_list_store_append (GTK_LIST_STORE(model), &iter);
gtk_list_store_set (GTK_LIST_STORE(model), &iter,
STACKFRAME_LEVEL_COLIDX, count+1, /* start from 1 and not from 0 */
FUNCTION_PROTOTYPE_COLIDX, function,
SOURCE_FILE_COLIDX, sourcefile,
LINE_NUMBER_COLIDX, linenum->str,
-1);
g_string_free (linenum, TRUE);
}
#endif // wxUSE_STACKWALKER
GtkWidget *gtk_assert_dialog_new(void)
{
void* dialog = g_object_new(GTK_TYPE_ASSERT_DIALOG, NULL);
return GTK_WIDGET (dialog);
}
#endif // wxDEBUG_LEVEL
|
ric2b/Vivaldi-browser
|
update_notifier/thirdparty/wxWidgets/src/gtk/assertdlg_gtk.cpp
|
C++
|
bsd-3-clause
| 44,230
|
<!DOCTYPE html
PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"
xmlns:py="http://genshi.edgewall.org/"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:i18n="http://genshi.edgewall.org/i18n"
i18n:domain="acct_mgr">
<xi:include href="layout.html" />
<?python
if _dgettext is not None:
dgettext = _dgettext ?>
<head>
<title>Login</title>
<script type="text/javascript">
jQuery(document).ready(function($) {
$("body").addClass("login");
$('#user')[0].focus();
});
</script>
</head>
<body>
<div id="content" class="login">
<h1>Login</h1>
<form method="post" id="acctmgr_loginform" action="">
<div>
<input type="hidden" name="referer" value="${referer}" />
</div>
<div class="textbox">
<label for="user">Username:</label><br />
<input type="text" id="user" name="user" class="textwidget"
size="20" />
</div>
<div class="textbox">
<label for="password">Password:</label><br />
<input type="password" id="password" name="password"
class="textwidget" size="20" />
</div>
<div id="login_options" class="central"
py:if="reset_password_enabled and login_opt_list != True">
<p class="hint">
<a href="${href.reset_password()}">Forgot your password?</a>
</p>
</div>
<div class="textbox" py:if="persistent_sessions">
<input type="checkbox" id="rememberme" name="rememberme"
value="1" />
<label for="rememberme">Remember me</label>
</div>
<div id="login_options" class="buttons central nav">
<input type="submit" value="${dgettext('acct_mgr', 'Login')}" />
<ul py:if="login_opt_list == True">
<py:choose py:if="registration_enabled">
<li class="first" py:when="reset_password_enabled">
<a href="${href.register()}">Register</a>
</li>
<li class="first last" py:otherwise="">
<a href="${href.register()}">Register</a>
</li>
</py:choose>
<py:choose py:if="reset_password_enabled">
<li class="last" py:when="registration_enabled">
<a href="${href.reset_password()}">Forgot your password?</a>
</li>
<li class="first last" py:otherwise="">
<a href="${href.reset_password()}">Forgot your password?</a>
</li>
</py:choose>
</ul>
</div>
</form>
</div>
<div class="central system-message" py:if="login_error">
<h2>Error</h2>
<p>${login_error}</p>
</div>
<div class="spacer" py:if="not login_error"></div>
</body>
</html>
|
marina-lab/docker-trac
|
plugins/accountmanagerplugin/build/lib/acct_mgr/templates/login.html
|
HTML
|
bsd-3-clause
| 2,949
|
#!/usr/bin/env python
import logging
# CUSTOM LOG LEVELS
LOG_LEVEL_TOOL = 25
# Terminal colors
TERMINAL_COLOR_BLUE = '\033[94m'
TERMINAL_COLOR_GREEN = '\033[92m'
TERMINAL_COLOR_YELLOW = '\033[93m'
TERMINAL_COLOR_RED = '\033[91m'
TERMINAL_COLOR_END = '\033[0m'
class ConsoleFormatter(logging.Formatter):
"""
Custom formatter to show logging messages differently on Console
"""
error_fmt = TERMINAL_COLOR_RED + "[!] %(message)s" + TERMINAL_COLOR_END
warn_fmt = TERMINAL_COLOR_YELLOW + "[*] %(message)s" + TERMINAL_COLOR_END
debug_fmt = TERMINAL_COLOR_GREEN + "[+] %(message)s" + TERMINAL_COLOR_END
info_fmt = TERMINAL_COLOR_BLUE + "[-] %(message)s" + TERMINAL_COLOR_END
def format(self, record):
# Save the original format configured by the user
# when the logger formatter was instantiated
format_orig = self._fmt
# Replace the original format with one customized by logging level
if record.levelno == logging.DEBUG:
self._fmt = self.debug_fmt
elif record.levelno == logging.INFO:
self._fmt = self.info_fmt
elif record.levelno == logging.ERROR:
self._fmt = self.error_fmt
elif record.levelno == logging.WARN:
self._fmt = self.warn_fmt
# Call the original formatter class to do the grunt work
result = super(ConsoleFormatter, self).format(record)
# Restore the original format configured by the user
self._fmt = format_orig
return result
class FileFormatter(logging.Formatter):
"""
Custom formatter for log files
"""
def __init__(self, *args, **kwargs):
super(FileFormatter, self).__init__()
self._fmt = "[%(levelname)s] [%(asctime)s] " + "[File '%(filename)s', line %(lineno)s, in %(funcName)s] -" + \
" %(message)s"
|
DarKnight24/owtf
|
framework/lib/formatters.py
|
Python
|
bsd-3-clause
| 1,857
|
# -*- coding: utf-8 -*-
"""
website.api
~~~~~~~~~~~
website api blueprint.
"""
|
alibaba/FlexGW
|
website/api/__init__.py
|
Python
|
bsd-3-clause
| 92
|
<html devsite="">
<head>
<title>gcloud version</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta name="project_path" value="/sdk/_project.yaml">
<meta name="book_path" value="/sdk/_book.yaml">
<!--
THIS DOC IS GENERATED. DO NOT EDIT.
-->
<style>
dd { margin-bottom: 1ex; }
.hangingindent { padding-left: 1.5em; text-indent: -1.5em; }
</style>
</head>
<body><dl>
<section>
<dt>NAME</dt>
<dd>gcloud version - print version information for Cloud SDK components</dd>
</section><section>
<dt>SYNOPSIS</dt>
<dd><dl>
<dt class="hangingindent">
<code>gcloud version</code>  <nobr>[<code>--format</code>  <code><var>FORMAT</var></code>]</nobr>  <nobr>[<code>--help</code>]</nobr>  <nobr>[<code>--log-http</code>]</nobr>  <nobr>[<code>--project</code>  <code><var>PROJECT_ID</var></code>]</nobr>  <nobr>[<code>--quiet</code>,  <code>-q</code>]</nobr>  <nobr>[<code>--trace-token</code>  <code><var>TRACE_TOKEN</var></code>]</nobr>  <nobr>[<code>-h</code>]</nobr>
</dt>
</dl></dd>
</section><section>
<dt>DESCRIPTION</dt>
<dd>This command prints version information for each installed Cloud SDK
component and prints a message if updates are available.</dd>
</section><section>
<dt>FLAGS</dt>
<dd><dl>
<dt>
<code>--format</code> <code><var>FORMAT</var></code>
</dt>
<dd>
Specify a format for printed output. By default, a command-specific
human-friendly output format is used. Setting this flag to one of
the available options will serialize the result of the command in
the chosen format and print it to stdout. Supported formats are:
<code>json</code>, <code>text</code>, <code>yaml</code>.
</dd>
<dt>
<code>--help</code>
</dt>
<dd>
Display detailed help.
</dd>
<dt>
<code>--log-http</code>
</dt>
<dd>
Logs all HTTP server requests and responses to stderr.
</dd>
<dt>
<code>--project</code> <code><var>PROJECT_ID</var></code>
</dt>
<dd>
The Google Cloud Platform project name to use for this invocation. If
omitted then the current project is assumed.
</dd>
<dt>
<code>--quiet</code>, <code>-q</code>
</dt>
<dd>
Disable all interactive prompts when running gcloud commands. If input
is required, defaults will be used, or an error will be raised.
</dd>
<dt>
<code>--trace-token</code> <code><var>TRACE_TOKEN</var></code>
</dt>
<dd>
Token used to route traces of service requests for investigation of issues.
</dd>
<dt>
<code>-h</code>
</dt>
<dd>
Print a summary help and exit.
</dd>
</dl></dd>
</section><section>
<dt>NOTES</dt>
<dd>This command is in the Google Cloud SDK <code>core</code> component. See
<a href="/sdk/gcloud/#gcloud.components">installing components</a>
if it is not installed.</dd>
</section>
</dl></body>
</html>
|
ychen820/microblog
|
y/google-cloud-sdk/lib/googlecloudsdk/gcloud/reference/gcloud_version.html
|
HTML
|
bsd-3-clause
| 2,865
|
from cmsplugin_cascade.segmentation.mixins import EmulateUserModelMixin, EmulateUserAdminMixin
from shop.admin.customer import CustomerProxy
class EmulateCustomerModelMixin(EmulateUserModelMixin):
UserModel = CustomerProxy
class EmulateCustomerAdminMixin(EmulateUserAdminMixin):
UserModel = CustomerProxy
|
divio/django-shop
|
shop/cascade/segmentation.py
|
Python
|
bsd-3-clause
| 317
|
<?php if (!defined('IDIR')) { die; }
/*======================================================================*\
|| ####################################################################
|| # vBulletin Impex
|| # ----------------------------------------------------------------
|| # All PHP code in this file is Copyright 2000-2014 vBulletin Solutions Inc.
|| # This code is made available under the Modified BSD License -- see license.txt
|| # http://www.vbulletin.com
|| ####################################################################
\*======================================================================*/
/**
* openBB_001 Check system module
*
* @package ImpEx.openBB
*
*/
class openBB_001 extends openBB_000
{
var $_version = "0.0.1";
var $_modulestring = 'Check and update database';
function openBB_001()
{
}
function init(&$sessionobject, &$displayobject, &$Db_target, &$Db_source)
{
$displayobject->update_basic('title','Get database information');
$displayobject->update_html($displayobject->do_form_header('index','001'));
$displayobject->update_html($displayobject->make_table_header('Get database information'));
$displayobject->update_html($displayobject->make_hidden_code('database','working'));
$displayobject->update_html($displayobject->make_description('This module will check the tables in the database as well as the connection.'));
$displayobject->update_html($displayobject->do_form_footer('Check database',''));
$sessionobject->add_session_var(substr(get_class($this) , -3) . '_objects_done', '0');
$sessionobject->add_session_var(substr(get_class($this) , -3) . '_objects_failed', '0');
}
function resume(&$sessionobject, &$displayobject, &$Db_target, &$Db_source)
{
// Setup some working variables
$displayobject->update_basic('displaymodules','FALSE');
$target_db_type = $sessionobject->get_session_var('targetdatabasetype');
$target_table_prefix = $sessionobject->get_session_var('targettableprefix');
$source_db_type = $sessionobject->get_session_var('sourcedatabasetype');
$source_table_prefix = $sessionobject->get_session_var('sourcetableprefix');
$class_num = substr(get_class($this) , -3);
$databasedone = true;
if(!$sessionobject->get_session_var($class_num . '_start'))
{
$sessionobject->timing($class_num, 'start' ,$sessionobject->get_session_var('autosubmit'));
}
$displayobject->update_basic('title','Modifying database');
$displayobject->display_now("<h4>Altering tables</h4>");
$displayobject->display_now("<p>ImpEx will now Alter the tables in the vB database to include <i>import id numbers</i>.</p>");
$displayobject->display_now("This is needed during the import process for maintaining refrences between the tables during an import.");
$displayobject->display_now("If you have large tables (i.e. lots of posts) this can take some time.</p>");
$displayobject->display_now("<p> They will also be left after the import if you need to link back to the origional vB userid.</p>");
// Add an importids now
foreach ($this->_import_ids as $id => $table_array)
{ // Add an importids now
foreach ($table_array as $tablename => $column)
{
if ($this->add_import_id($Db_target, $target_db_type, $target_table_prefix, $tablename, $column))
{
$displayobject->display_now("\n<br /><b>$tablename</b> - $column <i>OK</i>");
}
else
{
$sessionobject->add_error('fatal',
$this->_modulestring,
get_class($this) . "::resume failed trying to modify table $tablename to add $column",
'Check database permissions');
}
}
}
// Check the database connection
$result = $this->check_database($Db_source, $source_db_type, $source_table_prefix, $sessionobject->get_session_var('sourceexists'));
$displayobject->display_now($result['text']);
if ($result['code'])
{
$sessionobject->timing($class_num,'stop', $sessionobject->get_session_var('autosubmit'));
$sessionobject->remove_session_var($class_num . '_start');
$displayobject->update_html($displayobject->module_finished($this->_modulestring,
$sessionobject->return_stats($class_num,'_time_taken'),
$sessionobject->return_stats($class_num,'_objects_done'),
$sessionobject->return_stats($class_num,'_objects_failed')
));
$sessionobject->add_session_var($class_num . '_objects_done',intval($sessionobject->get_session_var($class_num . '_objects_done')) + 1 );
$sessionobject->set_session_var(substr(get_class($this), -3), 'FINISHED');
$sessionobject->set_session_var('module','000');
$displayobject->update_basic('displaymodules','FALSE');
$displayobject->update_html($displayobject->print_redirect_001('index.php',$sessionobject->get_session_var('pagespeed')));
}
else
{
$displayobject->update_html($displayobject->make_description("{$displayobject->phrases['failed']} {$displayobject->phrases['check_db_permissions']}"));
$displayobject->update_html($displayobject->make_hidden_code('pathdata','done'));
$sessionobject->set_session_var('001','FAILED');
$sessionobject->set_session_var('module','000');
$displayobject->update_html($displayobject->print_redirect_001('index.php',$sessionobject->get_session_var('pagespeed')));
}
}
}// End class
# Autogenerated on : August 25, 2004, 3:26 pm
# By ImpEx-generator 1.0.
/*======================================================================*/
?>
|
vBZachery/vbimpex
|
systems/openBB/001.php
|
PHP
|
bsd-3-clause
| 5,420
|
<?php
/**
* Magento
*
* NOTICE OF LICENSE
*
* This source file is subject to the Open Software License (OSL 3.0)
* that is bundled with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://opensource.org/licenses/osl-3.0.php
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@magentocommerce.com so we can send you a copy immediately.
*
* DISCLAIMER
*
* Do not edit or add to this file if you wish to upgrade Magento to newer
* versions in the future. If you wish to customize Magento for your
* needs please refer to http://www.magentocommerce.com for more information.
*
* @category Mage
* @package Mage_ImportExport
* @copyright Copyright (c) 2011 Magento Inc. (http://www.magentocommerce.com)
* @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0)
*/
/**
* Import frame result block.
*
* @category Mage
* @package Mage_ImportExport
* @author Magento Core Team <core@magentocommerce.com>
*/
class Mage_ImportExport_Block_Adminhtml_Import_Frame_Result extends Mage_Adminhtml_Block_Template
{
/**
* JavaScript actions for response.
*
* @var array
*/
protected $_actions = array(
'clear' => array(), // remove element from DOM
'innerHTML' => array(), // set innerHTML property (use: elementID => new content)
'value' => array(), // set value for form element (use: elementID => new value)
'show' => array(), // show specified element
'hide' => array(), // hide specified element
'removeClassName' => array(), // remove specified class name from element
'addClassName' => array() // add specified class name to element
);
/**
* Validation messages.
*
* @var array
*/
protected $_messages = array(
'error' => array(),
'success' => array(),
'notice' => array()
);
/**
* Add action for response.
*
* @param string $actionName
* @param string $elementId
* @param mixed $value OPTIONAL
* @return Mage_ImportExport_Block_Adminhtml_Import_Frame_Result
*/
public function addAction($actionName, $elementId, $value = null)
{
if (isset($this->_actions[$actionName])) {
if (null === $value) {
if (is_array($elementId)) {
foreach ($elementId as $oneId) {
$this->_actions[$actionName][] = $oneId;
}
} else {
$this->_actions[$actionName][] = $elementId;
}
} else {
$this->_actions[$actionName][$elementId] = $value;
}
}
return $this;
}
/**
* Add error message.
*
* @param string $message Error message
* @return Mage_ImportExport_Block_Adminhtml_Import_Frame_Result
*/
public function addError($message)
{
if (is_array($message)) {
foreach ($message as $row) {
$this->addError($row);
}
} else {
$this->_messages['error'][] = $message;
}
return $this;
}
/**
* Add notice message.
*
* @param mixed $message Message text
* @param boolean $appendImportButton OPTIONAL Append import button to message?
* @return Mage_ImportExport_Block_Adminhtml_Import_Frame_Result
*/
public function addNotice($message, $appendImportButton = false)
{
if (is_array($message)) {
foreach ($message as $row) {
$this->addNotice($row);
}
} else {
$this->_messages['notice'][] = $message . ($appendImportButton ? $this->getImportButtonHtml() : '');
}
return $this;
}
/**
* Add success message.
*
* @param mixed $message Message text
* @param boolean $appendImportButton OPTIONAL Append import button to message?
* @return Mage_ImportExport_Block_Adminhtml_Import_Frame_Result
*/
public function addSuccess($message, $appendImportButton = false)
{
if (is_array($message)) {
foreach ($message as $row) {
$this->addSuccess($row);
}
} else {
$this->_messages['success'][] = $message . ($appendImportButton ? $this->getImportButtonHtml() : '');
}
return $this;
}
/**
* Import button HTML for append to message.
*
* @return string
*/
public function getImportButtonHtml()
{
return ' <button onclick="editForm.startImport(\'' . $this->getImportStartUrl()
. '\', \'' . Mage_ImportExport_Model_Import::FIELD_NAME_SOURCE_FILE . '\');" class="scalable save"'
. ' type="button"><span>' . $this->__('Import') . '</span></button>';
}
/**
* Import start action URL.
*
* @return string
*/
public function getImportStartUrl()
{
return $this->getUrl('*/*/start');
}
/**
* Messages getter.
*
* @return array
*/
public function getMessages()
{
return $this->_messages;
}
/**
* Messages rendered HTML getter.
*
* @return string
*/
public function getMessagesHtml()
{
/** @var $messagesBlock Mage_Core_Block_Messages */
$messagesBlock = $this->_layout->createBlock('core/messages');
foreach ($this->_messages as $priority => $messages) {
$method = "add{$priority}";
foreach ($messages as $message) {
$messagesBlock->$method($message);
}
}
return $messagesBlock->toHtml();
}
/**
* Return response as JSON.
*
* @return string
*/
public function getResponseJson()
{
// add messages HTML if it is not already specified
if (!isset($this->_actions['import_validation_messages'])) {
$this->addAction('innerHTML', 'import_validation_messages', $this->getMessagesHtml());
}
return Mage::helper('core')->jsonEncode($this->_actions);
}
}
|
5452/durex
|
includes/src/Mage_ImportExport_Block_Adminhtml_Import_Frame_Result.php
|
PHP
|
bsd-3-clause
| 6,325
|
package org.jbehave.core.embedder;
import static java.util.Arrays.asList;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.sameInstance;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.OutputStream;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.jbehave.core.Embeddable;
import org.jbehave.core.InjectableEmbedder;
import org.jbehave.core.annotations.Configure;
import org.jbehave.core.annotations.Given;
import org.jbehave.core.annotations.Then;
import org.jbehave.core.annotations.UsingEmbedder;
import org.jbehave.core.annotations.When;
import org.jbehave.core.configuration.Configuration;
import org.jbehave.core.configuration.MostUsefulConfiguration;
import org.jbehave.core.embedder.Embedder.EmbedderFailureStrategy;
import org.jbehave.core.embedder.Embedder.RunningEmbeddablesFailed;
import org.jbehave.core.embedder.Embedder.RunningStoriesFailed;
import org.jbehave.core.embedder.Embedder.ViewGenerationFailed;
import org.jbehave.core.embedder.StoryRunner.State;
import org.jbehave.core.failures.BatchFailures;
import org.jbehave.core.failures.FailingUponPendingStep;
import org.jbehave.core.io.StoryPathResolver;
import org.jbehave.core.io.UnderscoredCamelCaseResolver;
import org.jbehave.core.junit.AnnotatedEmbedderRunner;
import org.jbehave.core.junit.AnnotatedEmbedderUtils.ClassLoadingFailed;
import org.jbehave.core.junit.JUnitStory;
import org.jbehave.core.junit.JUnitStoryMaps;
import org.jbehave.core.model.Meta;
import org.jbehave.core.model.Story;
import org.jbehave.core.model.StoryMap;
import org.jbehave.core.model.StoryMaps;
import org.jbehave.core.reporters.CrossReference;
import org.jbehave.core.reporters.PrintStreamStepdocReporter;
import org.jbehave.core.reporters.ReportsCount;
import org.jbehave.core.reporters.StoryReporter;
import org.jbehave.core.reporters.StoryReporterBuilder;
import org.jbehave.core.reporters.ViewGenerator;
import org.jbehave.core.steps.CandidateSteps;
import org.jbehave.core.steps.InjectableStepsFactory;
import org.jbehave.core.steps.StepCollector.Stage;
import org.jbehave.core.steps.StepFinder;
import org.jbehave.core.steps.Steps;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Matchers;
import org.mockito.Mockito;
public class EmbedderBehaviour {
@Test
public void shouldMapStoriesAsEmbeddables() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
String myEmbeddableName = MyStoryMaps.class.getName();
List<String> classNames = asList(myEmbeddableName);
Embeddable myEmbeddable = new MyStoryMaps();
List<Embeddable> embeddables = asList(myEmbeddable);
EmbedderClassLoader classLoader = mock(EmbedderClassLoader.class);
when(classLoader.newInstance(Embeddable.class, myEmbeddableName)).thenReturn(myEmbeddable);
// When
Configuration configuration = new MostUsefulConfiguration();
Embedder embedder = embedderWith(runner, new EmbedderControls(), monitor);
embedder.useClassLoader(classLoader);
embedder.useConfiguration(configuration);
embedder.runAsEmbeddables(classNames);
// Then
for (Embeddable embeddable : embeddables) {
assertThat(out.toString(), containsString("Running embeddable " + embeddable.getClass().getName()));
}
assertThat(MyStoryMaps.run, is(true));
}
@SuppressWarnings("unchecked")
@Test
public void shouldMapStoriesAsPaths() throws Throwable {
// Given
StoryMapper mapper = mock(StoryMapper.class);
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls();
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
Embedder embedder = embedderWith(mapper, runner, embedderControls, monitor);
Configuration configuration = embedder.configuration();
List<? extends Class<? extends Embeddable>> embeddables = asList(MyStory.class, MyOtherEmbeddable.class);
StoryPathResolver resolver = configuration.storyPathResolver();
List<String> storyPaths = new ArrayList<String>();
Map<String, Story> stories = new HashMap<String, Story>();
for (Class<? extends Embeddable> embeddable : embeddables) {
String storyPath = resolver.resolve(embeddable);
storyPaths.add(storyPath);
Story story = new Story(storyPath);
stories.put(storyPath, story);
when(runner.storyOfPath(configuration, storyPath)).thenReturn(story);
}
// When
List<StoryMap> maps = asList(new StoryMap("filter", new HashSet<Story>(stories.values())));
StoryMaps storyMaps = new StoryMaps(maps);
when(mapper.getStoryMaps()).thenReturn(storyMaps);
embedder.mapStoriesAsPaths(storyPaths);
// Then
for (String storyPath : storyPaths) {
verify(mapper).map(Matchers.eq(stories.get(storyPath)), Matchers.any(MetaFilter.class));
assertThat(out.toString(), containsString("Mapping story " + storyPath));
}
assertThatMapsViewGenerated(out);
}
private void assertThatMapsViewGenerated(OutputStream out) {
assertThat(out.toString(), containsString("Generating maps view"));
}
@Test
public void shouldRunStoriesAsEmbeddables() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls();
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
String myEmbeddableName = MyEmbeddable.class.getName();
String myOtherEmbeddableName = MyOtherEmbeddable.class.getName();
List<String> classNames = asList(myEmbeddableName, myOtherEmbeddableName);
Embeddable myEmbeddable = new MyEmbeddable();
Embeddable myOtherEmbeddable = new MyOtherEmbeddable();
List<Embeddable> embeddables = asList(myEmbeddable, myOtherEmbeddable);
EmbedderClassLoader classLoader = mock(EmbedderClassLoader.class);
when(classLoader.newInstance(Embeddable.class, myEmbeddableName)).thenReturn(myEmbeddable);
when(classLoader.newInstance(Embeddable.class, myOtherEmbeddableName)).thenReturn(myOtherEmbeddable);
// When
Configuration configuration = new MostUsefulConfiguration();
CandidateSteps steps = mock(CandidateSteps.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
embedder.useClassLoader(classLoader);
embedder.useConfiguration(configuration);
embedder.useCandidateSteps(asList(steps));
embedder.runAsEmbeddables(classNames);
// Then
for (Embeddable embeddable : embeddables) {
assertThat(out.toString(), containsString("Running embeddable " + embeddable.getClass().getName()));
}
}
private void assertThatReportsViewGenerated(OutputStream out) {
assertThat(out.toString(), containsString("Generating reports view"));
assertThat(out.toString(), containsString("Reports view generated"));
}
@Test
public void shouldNotRunStoriesAsEmbeddablesIfAbstract() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls();
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
String myEmbeddableName = MyAbstractEmbeddable.class.getName();
String myOtherEmbeddableName = MyOtherEmbeddable.class.getName();
List<String> classNames = asList(myEmbeddableName, myOtherEmbeddableName);
Embeddable myEmbeddable = new MyEmbeddable();
Embeddable myOtherEmbeddable = new MyOtherEmbeddable();
EmbedderClassLoader classLoader = mock(EmbedderClassLoader.class);
when(classLoader.isAbstract(myEmbeddableName)).thenReturn(true);
when(classLoader.newInstance(Embeddable.class, myEmbeddableName)).thenReturn(myEmbeddable);
when(classLoader.isAbstract(myOtherEmbeddableName)).thenReturn(false);
when(classLoader.newInstance(Embeddable.class, myOtherEmbeddableName)).thenReturn(myOtherEmbeddable);
// When
Embedder embedder = embedderWith(runner, embedderControls, monitor);
embedder.useClassLoader(classLoader);
embedder.configuration().useStoryPathResolver(new UnderscoredCamelCaseResolver());
embedder.runAsEmbeddables(classNames);
// Then
assertThat(out.toString(), not(containsString("Running embeddable " + myEmbeddableName)));
assertThat(out.toString(), containsString("Running embeddable " + myOtherEmbeddableName));
}
@Test
public void shouldNotRunStoriesAsEmbeddablesIfSkipFlagIsSet() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls().doSkip(true);
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
String myEmbeddableName = MyEmbeddable.class.getName();
String myOtherEmbeddableName = MyOtherEmbeddable.class.getName();
List<String> classNames = asList(myEmbeddableName, myOtherEmbeddableName);
Embeddable myEmbeddable = new MyEmbeddable();
Embeddable myOtherEmbeddable = new MyOtherEmbeddable();
List<Embeddable> embeddables = asList(myEmbeddable, myOtherEmbeddable);
EmbedderClassLoader classLoader = mock(EmbedderClassLoader.class);
when(classLoader.newInstance(Embeddable.class, myEmbeddableName)).thenReturn(myEmbeddable);
when(classLoader.newInstance(Embeddable.class, myOtherEmbeddableName)).thenReturn(myOtherEmbeddable);
// When
Embedder embedder = embedderWith(runner, embedderControls, monitor);
embedder.useClassLoader(classLoader);
embedder.configuration().useStoryPathResolver(new UnderscoredCamelCaseResolver());
embedder.runAsEmbeddables(classNames);
// Then
for (Embeddable embeddable : embeddables) {
assertThat(out.toString(), not(containsString("Running embeddable " + embeddable.getClass().getName())));
}
}
@Test(expected = RunningEmbeddablesFailed.class)
public void shouldThrowExceptionUponFailingStoriesAsEmbeddablesIfIgnoreFailureInStoriesFlagIsNotSet()
throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls();
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
String myEmbeddableName = MyEmbeddable.class.getName();
String myOtherEmbeddableName = MyOtherEmbeddable.class.getName();
List<String> classNames = asList(myEmbeddableName, myOtherEmbeddableName);
Embeddable myEmbeddable = new MyFailingEmbeddable();
Embeddable myOtherEmbeddable = new MyOtherEmbeddable();
EmbedderClassLoader classLoader = mock(EmbedderClassLoader.class);
when(classLoader.newInstance(Embeddable.class, myEmbeddableName)).thenReturn(myEmbeddable);
when(classLoader.newInstance(Embeddable.class, myOtherEmbeddableName)).thenReturn(myOtherEmbeddable);
// When
Embedder embedder = embedderWith(runner, embedderControls, monitor);
embedder.useClassLoader(classLoader);
embedder.runAsEmbeddables(classNames);
// Then fail as expected
}
@Test
public void shouldNotThrowExceptionUponFailingStoriesAsEmbeddablesIfIgnoreFailureFlagsAreSet() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls().doIgnoreFailureInStories(true)
.doIgnoreFailureInView(true);
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
String myEmbeddableName = MyFailingEmbeddable.class.getName();
String myOtherEmbeddableName = MyOtherEmbeddable.class.getName();
List<String> classNames = asList(myEmbeddableName, myOtherEmbeddableName);
Embeddable myEmbeddable = new MyFailingEmbeddable();
Embeddable myOtherEmbeddable = new MyOtherEmbeddable();
EmbedderClassLoader classLoader = mock(EmbedderClassLoader.class);
when(classLoader.newInstance(Embeddable.class, myEmbeddableName)).thenReturn(myEmbeddable);
when(classLoader.newInstance(Embeddable.class, myOtherEmbeddableName)).thenReturn(myOtherEmbeddable);
// When
Embedder embedder = embedderWith(runner, embedderControls, monitor);
embedder.useClassLoader(classLoader);
embedder.runAsEmbeddables(classNames);
// Then
assertThat(out.toString(), containsString("Running embeddable " + myEmbeddableName));
assertThat(out.toString(), containsString("Failed to run embeddable " + myEmbeddableName));
assertThat(out.toString(), containsString("Running embeddable " + myOtherEmbeddableName));
assertThat(out.toString(), not(containsString("Failed to run embeddable " + myOtherEmbeddableName)));
}
@Test
public void shouldRunStoriesAsEmbeddablesInBatchIfBatchFlagIsSet() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls().doBatch(true);
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
String myEmbeddableName = MyEmbeddable.class.getName();
String myOtherEmbeddableName = MyOtherEmbeddable.class.getName();
List<String> classNames = asList(myEmbeddableName, myOtherEmbeddableName);
Embeddable myEmbeddable = new MyEmbeddable();
Embeddable myOtherEmbeddable = new MyOtherEmbeddable();
List<Embeddable> embeddables = asList(myEmbeddable, myOtherEmbeddable);
EmbedderClassLoader classLoader = mock(EmbedderClassLoader.class);
when(classLoader.newInstance(Embeddable.class, myEmbeddableName)).thenReturn(myEmbeddable);
when(classLoader.newInstance(Embeddable.class, myOtherEmbeddableName)).thenReturn(myOtherEmbeddable);
// When
Embedder embedder = embedderWith(runner, embedderControls, monitor);
embedder.useClassLoader(classLoader);
embedder.runAsEmbeddables(classNames);
// Then
for (Embeddable story : embeddables) {
String name = story.getClass().getName();
assertThat(out.toString(), containsString("Running embeddable " + name));
}
}
@Test(expected = RunningEmbeddablesFailed.class)
public void shouldThrowExceptionUponFailingStoriesAsEmbeddablesInBatchIfIgnoreFailureInStoriesFlagIsNotSet()
throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls().doBatch(true);
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
String myStoryName = MyStory.class.getName();
String myOtherStoryName = MyOtherEmbeddable.class.getName();
List<String> classNames = asList(myStoryName, myOtherStoryName);
Embeddable myStory = new MyFailingEmbeddable();
Embeddable myOtherStory = new MyOtherEmbeddable();
EmbedderClassLoader classLoader = mock(EmbedderClassLoader.class);
when(classLoader.newInstance(Embeddable.class, myStoryName)).thenReturn(myStory);
when(classLoader.newInstance(Embeddable.class, myOtherStoryName)).thenReturn(myOtherStory);
// When
Embedder embedder = embedderWith(runner, embedderControls, monitor);
embedder.useClassLoader(classLoader);
embedder.runAsEmbeddables(classNames);
// Then fail as expected
}
@Test
public void shouldRunFailingStoriesAsEmbeddablesInBatchIfBatchFlagIsSet() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls().doBatch(true).doIgnoreFailureInStories(true)
.doIgnoreFailureInView(true);
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
String myStoryName = MyFailingEmbeddable.class.getName();
String myOtherStoryName = MyOtherEmbeddable.class.getName();
List<String> classNames = asList(myStoryName, myOtherStoryName);
Embeddable myStory = new MyFailingEmbeddable();
Embeddable myOtherStory = new MyOtherEmbeddable();
List<Embeddable> embeddables = asList(myStory, myOtherStory);
EmbedderClassLoader classLoader = mock(EmbedderClassLoader.class);
when(classLoader.newInstance(Embeddable.class, myStoryName)).thenReturn(myStory);
when(classLoader.newInstance(Embeddable.class, myOtherStoryName)).thenReturn(myOtherStory);
// When
Embedder embedder = embedderWith(runner, embedderControls, monitor);
embedder.useClassLoader(classLoader);
embedder.runAsEmbeddables(classNames);
// Then
for (Embeddable embeddable : embeddables) {
String storyName = embeddable.getClass().getName();
assertThat(out.toString(), containsString("Running embeddable " + storyName));
}
assertThat(out.toString(), containsString("Failed to run batch"));
}
@Test
public void shouldNotGenerateViewWhenRunningStoriesAsEmbeddablesIfGenerateViewAfterStoriesFlagIsNotSet()
throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls().doGenerateViewAfterStories(false);
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
String myEmbeddableName = MyEmbeddable.class.getName();
String myOtherEmbeddableName = MyOtherEmbeddable.class.getName();
List<String> classNames = asList(myEmbeddableName, myOtherEmbeddableName);
Embeddable myEmbeddable = new MyEmbeddable();
Embeddable myOtherEmbeddable = new MyOtherEmbeddable();
List<Embeddable> embeddables = asList(myEmbeddable, myOtherEmbeddable);
EmbedderClassLoader classLoader = mock(EmbedderClassLoader.class);
when(classLoader.newInstance(Embeddable.class, myEmbeddableName)).thenReturn(myEmbeddable);
when(classLoader.newInstance(Embeddable.class, myOtherEmbeddableName)).thenReturn(myOtherEmbeddable);
// When
Embedder embedder = embedderWith(runner, embedderControls, monitor);
embedder.useClassLoader(classLoader);
embedder.runAsEmbeddables(classNames);
// Then
for (Embeddable embeddable : embeddables) {
assertThat(out.toString(), containsString("Running embeddable " + embeddable.getClass().getName()));
}
assertThat(out.toString(), not(containsString("Generating stories view")));
}
@SuppressWarnings("unchecked")
@Test
public void shouldRunStoriesAsPaths() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls();
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
List<? extends Class<? extends Embeddable>> embeddables = asList(MyStory.class, MyOtherEmbeddable.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
final StoryReporter storyReporter = mock(StoryReporter.class);
MostUsefulConfiguration configuration = new MostUsefulConfiguration() {
@Override
public StoryReporter storyReporter(String storyPath) {
return storyReporter;
}
};
embedder.useConfiguration(configuration);
List<CandidateSteps> candidateSteps = embedder.candidateSteps();
InjectableStepsFactory stepsFactory = embedder.stepsFactory();
StoryPathResolver resolver = configuration.storyPathResolver();
State beforeStories = mock(State.class);
when(runner.runBeforeOrAfterStories(configuration, candidateSteps, Stage.BEFORE)).thenReturn(beforeStories);
List<String> storyPaths = new ArrayList<String>();
Map<String, Story> stories = new HashMap<String, Story>();
for (Class<? extends Embeddable> embeddable : embeddables) {
String storyPath = resolver.resolve(embeddable);
storyPaths.add(storyPath);
Story story = mockStory(Meta.EMPTY);
stories.put(storyPath, story);
when(runner.storyOfPath(configuration, storyPath)).thenReturn(story);
assertThat(configuration.storyReporter(storyPath), sameInstance(storyReporter));
}
// When
embedder.runStoriesAsPaths(storyPaths);
// Then
for (String storyPath : storyPaths) {
verify(runner).run(Matchers.eq(configuration), Matchers.eq(stepsFactory),
Matchers.eq(stories.get(storyPath)), Matchers.isA(MetaFilter.class), Matchers.any(State.class));
assertThat(out.toString(), containsString("Running story " + storyPath));
}
assertThatReportsViewGenerated(out);
}
@SuppressWarnings("unchecked")
@Test
public void shouldRunStoriesApplyingFilter() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls();
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
List<? extends Class<? extends Embeddable>> embeddables = asList(MyStory.class, MyOtherEmbeddable.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
final StoryReporter storyReporter = mock(StoryReporter.class);
Configuration configuration = new MostUsefulConfiguration() {
@Override
public StoryReporter storyReporter(String storyPath) {
return storyReporter;
}
};
embedder.useConfiguration(configuration);
List<CandidateSteps> candidateSteps = embedder.candidateSteps();
InjectableStepsFactory stepsFactory = embedder.stepsFactory();
StoryPathResolver resolver = configuration.storyPathResolver();
State beforeStories = mock(State.class);
when(runner.runBeforeOrAfterStories(configuration, candidateSteps, Stage.BEFORE)).thenReturn(beforeStories);
List<String> storyPaths = new ArrayList<String>();
Map<String, Story> stories = new HashMap<String, Story>();
Meta meta = mock(Meta.class);
for (Class<? extends Embeddable> embeddable : embeddables) {
String storyPath = resolver.resolve(embeddable);
storyPaths.add(storyPath);
Story story = mockStory(Meta.EMPTY);
when(story.getMeta()).thenReturn(meta);
stories.put(storyPath, story);
when(runner.storyOfPath(configuration, storyPath)).thenReturn(story);
assertThat(configuration.storyReporter(storyPath), sameInstance(storyReporter));
}
// When
MetaFilter filter = mock(MetaFilter.class);
when(filter.allow(meta)).thenReturn(false);
embedder.runStoriesAsPaths(storyPaths);
// Then
for (String storyPath : storyPaths) {
verify(runner).run(Matchers.eq(configuration), Matchers.eq(stepsFactory),
Matchers.eq(stories.get(storyPath)), Matchers.isA(MetaFilter.class), Matchers.any(State.class));
assertThat(out.toString(), containsString("Running story " + storyPath));
}
assertThatReportsViewGenerated(out);
assertThat(embedder.hasExecutorService(), is(false));
}
@Test
public void shouldProcessSystemProperties() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls();
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
Embedder embedder = embedderWith(runner, embedderControls, monitor);
// When
embedder.processSystemProperties();
// Then
assertThat(out.toString(), containsString("Processing system properties " + embedder.systemProperties()));
assertThat(out.toString(), not(containsString("System property")));
// When
Properties systemProperties = new Properties();
systemProperties.setProperty("first", "one");
systemProperties.setProperty("second", "");
embedder.useSystemProperties(systemProperties);
embedder.processSystemProperties();
// Then
assertThat(out.toString(), containsString("Processing system properties " + systemProperties));
assertThat(out.toString(), containsString("System property 'first' set to 'one'"));
assertThat(out.toString(), containsString("System property 'second' set to ''"));
}
@SuppressWarnings("unchecked")
@Test
public void shouldNotRunStoriesIfSkipFlagIsSet() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls().doSkip(true);
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
List<? extends Class<? extends Embeddable>> embeddables = asList(MyStory.class, MyOtherEmbeddable.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
Configuration configuration = embedder.configuration();
List<CandidateSteps> candidateSteps = embedder.candidateSteps();
StoryPathResolver resolver = configuration.storyPathResolver();
List<String> storyPaths = new ArrayList<String>();
Map<String, Story> stories = new HashMap<String, Story>();
for (Class<? extends Embeddable> embeddable : embeddables) {
String storyPath = resolver.resolve(embeddable);
storyPaths.add(storyPath);
Story story = mockStory(Meta.EMPTY);
stories.put(storyPath, story);
when(runner.storyOfPath(configuration, storyPath)).thenReturn(story);
}
// When
embedder.runStoriesAsPaths(storyPaths);
// Then
for (String storyPath : storyPaths) {
verify(runner, never()).run(configuration, candidateSteps, stories.get(storyPath), MetaFilter.EMPTY);
assertThat(out.toString(), not(containsString("Running story " + storyPath)));
}
assertThat(out.toString(), containsString("Skipped stories " + storyPaths));
}
@SuppressWarnings("unchecked")
@Test(expected = RunningStoriesFailed.class)
public void shouldThrowExceptionUponFailingStoriesAsPathsIfIgnoreFailureInStoriesFlagIsNotSet() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls();
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
List<? extends Class<? extends Embeddable>> embeddables = asList(MyStory.class, MyOtherEmbeddable.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
Configuration configuration = embedder.configuration();
List<CandidateSteps> candidateSteps = embedder.candidateSteps();
InjectableStepsFactory stepsFactory = embedder.stepsFactory();
StoryPathResolver resolver = configuration.storyPathResolver();
State beforeStories = mock(State.class);
when(runner.runBeforeOrAfterStories(configuration, candidateSteps, Stage.BEFORE)).thenReturn(beforeStories);
List<String> storyPaths = new ArrayList<String>();
Map<String, Story> stories = new HashMap<String, Story>();
for (Class<? extends Embeddable> embeddable : embeddables) {
String storyPath = resolver.resolve(embeddable);
storyPaths.add(storyPath);
Story story = mockStory(Meta.EMPTY);
stories.put(storyPath, story);
when(runner.storyOfPath(configuration, storyPath)).thenReturn(story);
}
for (String storyPath : storyPaths) {
doThrow(new RuntimeException(storyPath + " failed")).when(runner).run(Matchers.eq(configuration),
Matchers.eq(stepsFactory), Matchers.eq(stories.get(storyPath)), Matchers.any(MetaFilter.class),
Matchers.any(State.class));
}
// When
embedder.runStoriesAsPaths(storyPaths);
// Then fail as expected
}
@SuppressWarnings("unchecked")
@Test
public void shouldNotThrowExceptionUponFailingStoriesAsPathsIfIgnoreFailureInStoriesFlagIsSet() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls().doIgnoreFailureInStories(true);
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
List<? extends Class<? extends Embeddable>> embeddables = asList(MyStory.class, MyOtherEmbeddable.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
Configuration configuration = embedder.configuration();
List<CandidateSteps> candidateSteps = embedder.candidateSteps();
InjectableStepsFactory stepsFactory = embedder.stepsFactory();
StoryPathResolver resolver = configuration.storyPathResolver();
State beforeStories = mock(State.class);
when(runner.runBeforeOrAfterStories(configuration, candidateSteps, Stage.BEFORE)).thenReturn(beforeStories);
List<String> storyPaths = new ArrayList<String>();
Map<String, Story> stories = new HashMap<String, Story>();
for (Class<? extends Embeddable> embeddable : embeddables) {
String storyPath = resolver.resolve(embeddable);
storyPaths.add(storyPath);
Story story = mockStory(Meta.EMPTY);
stories.put(storyPath, story);
when(runner.storyOfPath(configuration, storyPath)).thenReturn(story);
}
for (String storyPath : storyPaths) {
doThrow(new RuntimeException(storyPath + " failed")).when(runner).run(Matchers.eq(configuration),
Matchers.eq(stepsFactory), Matchers.eq(stories.get(storyPath)), Matchers.any(MetaFilter.class),
Matchers.any(State.class));
}
// When
embedder.runStoriesAsPaths(storyPaths);
// Then
for (String storyPath : storyPaths) {
assertThat(out.toString(), containsString("Running story " + storyPath));
assertThat(out.toString(), containsString("Failed to run story " + storyPath));
}
}
@SuppressWarnings("unchecked")
@Test
public void shouldRunStoriesAsPathsInBatchIfBatchFlagIsSet() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls().doBatch(true);
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
List<? extends Class<? extends Embeddable>> embeddables = asList(MyStory.class, MyOtherEmbeddable.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
Configuration configuration = embedder.configuration();
List<CandidateSteps> candidateSteps = embedder.candidateSteps();
StoryPathResolver resolver = configuration.storyPathResolver();
List<String> storyPaths = new ArrayList<String>();
Map<String, Story> stories = new HashMap<String, Story>();
for (Class<? extends Embeddable> embeddable : embeddables) {
String storyPath = resolver.resolve(embeddable);
storyPaths.add(storyPath);
Story story = mockStory(Meta.EMPTY);
stories.put(storyPath, story);
when(runner.storyOfPath(configuration, storyPath)).thenReturn(story);
}
for (String storyPath : storyPaths) {
doNothing().when(runner).run(configuration, candidateSteps, stories.get(storyPath), MetaFilter.EMPTY);
}
// When
embedder.runStoriesAsPaths(storyPaths);
// Then
for (String storyPath : storyPaths) {
assertThat(out.toString(), containsString("Running story " + storyPath));
}
}
@SuppressWarnings("unchecked")
@Test(expected = RunningStoriesFailed.class)
public void shouldThrowExceptionUponFailingStoriesAsPathsInBatchIfIgnoreFailureInStoriesFlagIsNotSet()
throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls().doBatch(true);
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
List<? extends Class<? extends Embeddable>> embeddables = asList(MyStory.class, MyOtherEmbeddable.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
Configuration configuration = embedder.configuration();
List<CandidateSteps> candidateSteps = embedder.candidateSteps();
InjectableStepsFactory stepsFactory = embedder.stepsFactory();
StoryPathResolver resolver = configuration.storyPathResolver();
State beforeStories = mock(State.class);
when(runner.runBeforeOrAfterStories(configuration, candidateSteps, Stage.BEFORE)).thenReturn(beforeStories);
List<String> storyPaths = new ArrayList<String>();
Map<String, Story> stories = new HashMap<String, Story>();
for (Class<? extends Embeddable> embeddable : embeddables) {
String storyPath = resolver.resolve(embeddable);
storyPaths.add(storyPath);
Story story = mockStory(Meta.EMPTY);
stories.put(storyPath, story);
when(runner.storyOfPath(configuration, storyPath)).thenReturn(story);
}
for (String storyPath : storyPaths) {
doThrow(new RuntimeException(storyPath + " failed")).when(runner).run(Matchers.eq(configuration),
Matchers.eq(stepsFactory), Matchers.eq(stories.get(storyPath)), Matchers.any(MetaFilter.class),
Matchers.any(State.class));
}
// When
embedder.runStoriesAsPaths(storyPaths);
// Then fail as expected
}
@SuppressWarnings("unchecked")
@Test
public void shouldRunFailingStoriesAsPathsInBatchIfBatchFlagIsSet() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls().doBatch(true).doIgnoreFailureInStories(true);
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
List<? extends Class<? extends Embeddable>> embeddables = asList(MyStory.class, MyOtherEmbeddable.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
Configuration configuration = embedder.configuration();
InjectableStepsFactory stepsFactory = embedder.stepsFactory();
StoryPathResolver resolver = configuration.storyPathResolver();
List<String> storyPaths = new ArrayList<String>();
Map<String, Story> stories = new HashMap<String, Story>();
for (Class<? extends Embeddable> embeddable : embeddables) {
String storyPath = resolver.resolve(embeddable);
storyPaths.add(storyPath);
Story story = mockStory(Meta.EMPTY);
stories.put(storyPath, story);
when(runner.storyOfPath(configuration, storyPath)).thenReturn(story);
}
BatchFailures failures = new BatchFailures();
for (String storyPath : storyPaths) {
RuntimeException thrown = new RuntimeException(storyPath + " failed");
failures.put(storyPath, thrown);
doThrow(thrown).when(runner).run(Matchers.eq(configuration), Matchers.eq(stepsFactory),
Matchers.eq(stories.get(storyPath)), Matchers.any(MetaFilter.class), Matchers.any(State.class));
}
// When
embedder.runStoriesAsPaths(storyPaths);
// Then
String output = out.toString();
for (String storyPath : storyPaths) {
assertThat(output, containsString("Running story " + storyPath));
assertThat(output, containsString("Failed to run story " + storyPath));
}
}
@SuppressWarnings("unchecked")
@Test
public void shouldNotGenerateViewWhenRunningStoriesAsPathsIfGenerateViewAfterStoriesFlagIsNotSet() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls().doGenerateViewAfterStories(false);
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
List<? extends Class<? extends Embeddable>> embeddables = asList(MyStory.class, MyOtherEmbeddable.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
Configuration configuration = embedder.configuration();
InjectableStepsFactory stepsFactory = embedder.stepsFactory();
StoryPathResolver resolver = configuration.storyPathResolver();
List<String> storyPaths = new ArrayList<String>();
Map<String, Story> stories = new HashMap<String, Story>();
for (Class<? extends Embeddable> embeddable : embeddables) {
String storyPath = resolver.resolve(embeddable);
storyPaths.add(storyPath);
Story story = mockStory(Meta.EMPTY);
stories.put(storyPath, story);
when(runner.storyOfPath(configuration, storyPath)).thenReturn(story);
}
// When
embedder.runStoriesAsPaths(storyPaths);
// Then
for (String storyPath : storyPaths) {
verify(runner).run(Matchers.eq(configuration), Matchers.eq(stepsFactory),
Matchers.eq(stories.get(storyPath)), Matchers.any(MetaFilter.class), Matchers.any(State.class));
assertThat(out.toString(), containsString("Running story " + storyPath));
}
assertThat(out.toString(), not(containsString("Generating stories view")));
assertThat(out.toString(), not(containsString("Stories view generated")));
}
@Test
public void shouldRunStoriesWithAnnotatedEmbedderRunnerIfEmbeddable() throws Throwable {
// Given
Embedder embedder = new Embedder();
embedder.useClassLoader(new EmbedderClassLoader(this.getClass().getClassLoader()));
String runWithEmbedderRunner = RunningWithAnnotatedEmbedderRunner.class.getName();
// When
embedder.runStoriesWithAnnotatedEmbedderRunner(asList(runWithEmbedderRunner));
// Then
assertThat(RunningWithAnnotatedEmbedderRunner.hasRun, is(true));
}
@Test
public void shouldNotRunStoriesWithAnnotatedEmbedderRunnerIfNotEmbeddable() throws Throwable {
// Given
Embedder embedder = new Embedder();
embedder.useClassLoader(new EmbedderClassLoader(this.getClass().getClassLoader()));
String runWithEmbedderRunner = NotEmbeddableWithAnnotatedEmbedderRunner.class.getName();
// When
embedder.runStoriesWithAnnotatedEmbedderRunner(asList(runWithEmbedderRunner));
// Then
assertThat(NotEmbeddableWithAnnotatedEmbedderRunner.hasRun, is(false));
}
@Test(expected = RuntimeException.class)
public void shouldRethowFailuresWhenRunningWithAnnotatedEmbedderRunner() throws Throwable {
// Given
Embedder embedder = new Embedder();
embedder.useClassLoader(new EmbedderClassLoader(this.getClass().getClassLoader()));
String runWithEmbedderRunner = FailingWithAnnotatedEmbedderRunner.class.getName();
// When
embedder.runStoriesWithAnnotatedEmbedderRunner(asList(runWithEmbedderRunner));
// Then fail as expected
}
@Test(expected = ClassLoadingFailed.class)
public void shouldFailWhenRunningInexistingStoriesWithAnnotatedEmbedderRunner() throws Throwable {
// Given
Embedder embedder = new Embedder();
embedder.useClassLoader(new EmbedderClassLoader(this.getClass().getClassLoader()));
String runWithEmbedderRunner = "InexistingRunner";
// When
embedder.runStoriesWithAnnotatedEmbedderRunner(asList(runWithEmbedderRunner));
// Then fail as expected
}
@Test
public void shouldGenerateReportsViewFromExistingReports() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls().doGenerateViewAfterStories(false);
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
ViewGenerator viewGenerator = mock(ViewGenerator.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
embedder.configuration().useViewGenerator(viewGenerator);
File outputDirectory = new File("target/output");
List<String> formats = asList("html");
Properties viewResources = new Properties();
when(viewGenerator.getReportsCount()).thenReturn(new ReportsCount(2, 0, 1, 2, 0, 0, 1, 0));
embedder.generateReportsView(outputDirectory, formats, viewResources);
// Then
verify(viewGenerator).generateReportsView(outputDirectory, formats, viewResources);
assertThatReportsViewGenerated(out);
}
@Test(expected = RunningStoriesFailed.class)
public void shouldFailWhenGeneratingReportsViewWithFailedSteps() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls().doGenerateViewAfterStories(false);
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
ViewGenerator viewGenerator = mock(ViewGenerator.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
embedder.configuration().useViewGenerator(viewGenerator);
File outputDirectory = new File("target/output");
List<String> formats = asList("html");
Properties viewResources = new Properties();
when(viewGenerator.getReportsCount()).thenReturn(new ReportsCount(2, 0, 0, 2, 1, 0, 0, 1));
embedder.generateReportsView(outputDirectory, formats, viewResources);
// Then
verify(viewGenerator).generateReportsView(outputDirectory, formats, viewResources);
assertThatReportsViewGenerated(out);
}
@Test(expected = RunningStoriesFailed.class)
public void shouldFailWhenGeneratingReportsViewWithPendingSteps() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls().doGenerateViewAfterStories(false);
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
ViewGenerator viewGenerator = mock(ViewGenerator.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
embedder.configuration().useViewGenerator(viewGenerator).usePendingStepStrategy(new FailingUponPendingStep());
File outputDirectory = new File("target/output");
List<String> formats = asList("html");
Properties viewResources = new Properties();
when(viewGenerator.getReportsCount()).thenReturn(new ReportsCount(2, 0, 1, 2, 0, 0, 1, 0));
embedder.generateReportsView(outputDirectory, formats, viewResources);
// Then
verify(viewGenerator).generateReportsView(outputDirectory, formats, viewResources);
assertThatReportsViewGenerated(out);
}
@Test
public void shouldNotGenerateViewIfSkipFlagIsSet() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls().doSkip(true);
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
ViewGenerator viewGenerator = mock(ViewGenerator.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
embedder.configuration().useStoryReporterBuilder(new StoryReporterBuilder().withDefaultFormats());
embedder.configuration().useViewGenerator(viewGenerator);
File outputDirectory = new File("target/output");
List<String> formats = asList("html");
Properties viewResources = new Properties();
embedder.generateReportsView(outputDirectory, formats, viewResources);
// Then
verify(viewGenerator, never()).generateReportsView(outputDirectory, formats, viewResources);
assertThat(out.toString(), not(containsString("Generating stories view")));
assertThat(out.toString(), not(containsString("Stories view generated")));
}
@Test(expected = ViewGenerationFailed.class)
public void shouldThrowExceptionIfViewGenerationFails() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls();
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
ViewGenerator viewGenerator = mock(ViewGenerator.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
embedder.configuration().useViewGenerator(viewGenerator);
File outputDirectory = new File("target/output");
List<String> formats = asList("html");
Properties viewResources = new Properties();
doThrow(new RuntimeException()).when(viewGenerator)
.generateReportsView(outputDirectory, formats, viewResources);
embedder.generateReportsView(outputDirectory, formats, viewResources);
// Then fail as expected
}
@Test(expected = RunningStoriesFailed.class)
public void shouldThrowExceptionIfScenariosFailedAndIgnoreFlagIsNotSet() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls();
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
ViewGenerator viewGenerator = mock(ViewGenerator.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
embedder.configuration().useViewGenerator(viewGenerator);
File outputDirectory = new File("target/output");
List<String> formats = asList("html");
Properties viewResources = new Properties();
when(viewGenerator.getReportsCount()).thenReturn(new ReportsCount(1, 0, 1, 2, 1, 1, 1, 1));
embedder.generateReportsView(outputDirectory, formats, viewResources);
// Then fail as expected
}
@Test(expected = RunningStoriesFailed.class)
public void shouldThrowExceptionIfNoScenariosRunForStoriesAndIgnoreFlagIsNotSet() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls();
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
ViewGenerator viewGenerator = mock(ViewGenerator.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
embedder.configuration().useViewGenerator(viewGenerator);
File outputDirectory = new File("target/output");
List<String> formats = asList("html");
Properties viewResources = new Properties();
when(viewGenerator.getReportsCount()).thenReturn(new ReportsCount(1, 0, 0, 0, 0, 0, 0, 1));
embedder.generateReportsView(outputDirectory, formats, viewResources);
// Then fail as expected
}
@Test
public void shouldNotThrowExceptionIfScenariosFailedAndIgnoreFlagIsSet() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls().doIgnoreFailureInView(true);
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
ViewGenerator viewGenerator = mock(ViewGenerator.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
embedder.configuration().useViewGenerator(viewGenerator);
File outputDirectory = new File("target/output");
List<String> formats = asList("html");
Properties viewResources = new Properties();
when(viewGenerator.getReportsCount()).thenReturn(new ReportsCount(1, 0, 1, 2, 1, 0, 1, 1));
embedder.generateReportsView(outputDirectory, formats, viewResources);
// Then
verify(viewGenerator).generateReportsView(outputDirectory, formats, viewResources);
assertThatReportsViewGenerated(out);
}
@Test
public void shouldHandleFailuresAccordingToStrategy() throws Throwable {
// Given
StoryRunner runner = mock(StoryRunner.class);
EmbedderControls embedderControls = new EmbedderControls();
OutputStream out = new ByteArrayOutputStream();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor(new PrintStream(out));
ViewGenerator viewGenerator = mock(ViewGenerator.class);
Embedder embedder = embedderWith(runner, embedderControls, monitor);
EmbedderFailureStrategy failureStategy = mock(EmbedderFailureStrategy.class);
embedder.useEmbedderFailureStrategy(failureStategy);
embedder.configuration().useViewGenerator(viewGenerator);
File outputDirectory = new File("target/output");
List<String> formats = asList("html");
Properties viewResources = new Properties();
// When
ReportsCount count = new ReportsCount(1, 0, 1, 2, 1, 1, 1, 1);
when(viewGenerator.getReportsCount()).thenReturn(count);
embedder.generateReportsView(outputDirectory, formats, viewResources);
// Then
verify(failureStategy).handleFailures(count);
}
@Test
public void shouldAllowOverrideOfDefaultDependencies() throws Throwable {
// Given
StoryRunner runner = new StoryRunner();
EmbedderControls embedderControls = new EmbedderControls();
EmbedderMonitor monitor = new PrintStreamEmbedderMonitor();
// When
Embedder embedder = embedderWith(runner, embedderControls, monitor);
assertThat(embedder.embedderControls(), is(sameInstance(embedderControls)));
assertThat(embedder.storyRunner(), is(sameInstance(runner)));
assertThat(embedder.embedderMonitor(), is(sameInstance(monitor)));
embedder.useStoryRunner(new StoryRunner());
embedder.useEmbedderMonitor(new PrintStreamEmbedderMonitor());
// Then
assertThat(embedder.storyRunner(), is(not(sameInstance(runner))));
assertThat(embedder.embedderMonitor(), is(not(sameInstance(monitor))));
}
private Embedder embedderWith(StoryRunner runner, EmbedderControls embedderControls, EmbedderMonitor monitor) {
Embedder embedder = new Embedder(new StoryMapper(), runner, monitor);
embedder.useEmbedderControls(embedderControls);
return embedder;
}
private Embedder embedderWith(StoryMapper mapper, StoryRunner runner, EmbedderControls embedderControls,
EmbedderMonitor monitor) {
Embedder embedder = new Embedder(mapper, runner, monitor);
embedder.useEmbedderControls(embedderControls);
return embedder;
}
@Test
public void shouldFindAndReportMatchingSteps() {
// Given
Embedder embedder = new Embedder();
embedder.useCandidateSteps(asList((CandidateSteps) new MySteps()));
embedder.configuration().useStepFinder(new StepFinder());
OutputStream out = new ByteArrayOutputStream();
embedder.configuration().useStepdocReporter(new PrintStreamStepdocReporter(new PrintStream(out)));
// When
embedder.reportMatchingStepdocs("Given a given");
// Then
String expected = "Step 'Given a given' is matched by annotated patterns:\n" + "'Given a given'\n"
+ "org.jbehave.core.embedder.EmbedderBehaviour$MySteps.given()\n" + "from steps instances:\n"
+ "org.jbehave.core.embedder.EmbedderBehaviour$MySteps\n";
assertThat(dos2unix(out.toString()), equalTo(expected));
}
@Test
public void shouldReportNoMatchingStepdocsFoundWithStepProvided() {
// Given
Embedder embedder = new Embedder();
embedder.useCandidateSteps(asList((CandidateSteps) new MySteps()));
embedder.configuration().useStepFinder(new StepFinder());
OutputStream out = new ByteArrayOutputStream();
embedder.configuration().useStepdocReporter(new PrintStreamStepdocReporter(new PrintStream(out)));
// When
embedder.reportMatchingStepdocs("Given a non-defined step");
// Then
String expected = "Step 'Given a non-defined step' is not matched by any pattern\n" + "from steps instances:\n"
+ "org.jbehave.core.embedder.EmbedderBehaviour$MySteps\n";
assertThat(dos2unix(out.toString()), equalTo(expected));
}
@Test
public void shouldReportNoMatchingStepdocsFoundWhenNoStepsProvided() {
// Given
Embedder embedder = new Embedder();
embedder.useCandidateSteps(asList(new CandidateSteps[] {}));
embedder.configuration().useStepFinder(new StepFinder());
OutputStream out = new ByteArrayOutputStream();
embedder.configuration().useStepdocReporter(new PrintStreamStepdocReporter(new PrintStream(out)));
// When
embedder.reportMatchingStepdocs("Given a non-defined step");
// Then
String expected = "Step 'Given a non-defined step' is not matched by any pattern\n"
+ "as no steps instances are provided\n";
assertThat(dos2unix(out.toString()), equalTo(expected));
}
@Test
public void shouldReportAllStepdocs() {
// Given
Embedder embedder = new Embedder();
embedder.useCandidateSteps(asList((CandidateSteps) new MySteps()));
embedder.configuration().useStepFinder(new StepFinder());
OutputStream out = new ByteArrayOutputStream();
embedder.configuration().useStepdocReporter(new PrintStreamStepdocReporter(new PrintStream(out)));
// When
embedder.reportStepdocs();
// Then
String output = dos2unix(out.toString());
assertThat(output, containsString("'Given a given'\n"
+ "org.jbehave.core.embedder.EmbedderBehaviour$MySteps.given()\n"));
assertThat(output, containsString("'When a when'\n"
+ "org.jbehave.core.embedder.EmbedderBehaviour$MySteps.when()\n"));
assertThat(output, containsString("'Then a then'\n"
+ "org.jbehave.core.embedder.EmbedderBehaviour$MySteps.then()\n"));
assertThat(output,
containsString("from steps instances:\norg.jbehave.core.embedder.EmbedderBehaviour$MySteps\n"));
}
@Test
public void shouldAllowStringRepresentationOfEmbedder() throws Throwable {
// Given
Embedder embedder = new Embedder();
assertThat(embedder.configuration(), instanceOf(MostUsefulConfiguration.class));
// When
String embedderAsString = embedder.toString();
// Then
assertThat(embedderAsString, containsString(MostUsefulConfiguration.class.getSimpleName()));
assertThat(embedderAsString, containsString(StoryRunner.class.getSimpleName()));
assertThat(embedderAsString, containsString(PrintStreamEmbedderMonitor.class.getSimpleName()));
}
@Test
public void shouldGenerateCrossReferenceWhenAvailable() {
Embedder embedder = new Embedder();
embedder.useConfiguration(new MostUsefulConfiguration().useStoryReporterBuilder(new StoryReporterBuilder()
.withCrossReference(new CrossReference())));
// When
embedder.generateCrossReference();
// Then
assertXrefExists(embedder.configuration().storyReporterBuilder(), "json");
assertXrefExists(embedder.configuration().storyReporterBuilder(), "xml");
}
private void assertXrefExists(StoryReporterBuilder storyReporterBuilder, String ext) {
assertThat(new File(storyReporterBuilder.outputDirectory(), "view/xref." + ext).exists(), is(true));
}
private String dos2unix(String string) {
return string.replace("\r\n", "\n");
}
private static class MyStoryMaps extends JUnitStoryMaps {
static boolean run = false;
@Override
public void run() throws Throwable {
run = true;
}
@Override
protected List<String> metaFilters() {
return asList("+some property");
}
@Override
protected List<String> storyPaths() {
return asList("**/*.story");
}
}
private Story mockStory(Meta meta) {
Story story = mock(Story.class);
when(story.getPath()).thenReturn("/a/path");
when(story.getMeta()).thenReturn(meta);
when(story.asMeta(Mockito.anyString())).thenReturn(meta);
return story;
}
private class MyEmbeddable implements Embeddable {
public void useEmbedder(Embedder embedder) {
}
public void run() throws Throwable {
}
}
private class MyOtherEmbeddable implements Embeddable {
public void useEmbedder(Embedder embedder) {
}
public void run() throws Throwable {
}
}
private abstract class MyAbstractEmbeddable implements Embeddable {
}
private class MyStory extends JUnitStory {
}
private class MyFailingEmbeddable extends JUnitStory {
@Override
public void run() throws Throwable {
throw new RuntimeException("Failed");
}
}
@RunWith(AnnotatedEmbedderRunner.class)
@Configure()
@UsingEmbedder()
public static class RunningWithAnnotatedEmbedderRunner extends InjectableEmbedder {
static boolean hasRun;
@Test
public void run() {
hasRun = true;
}
}
@RunWith(AnnotatedEmbedderRunner.class)
@Configure()
@UsingEmbedder()
public static class FailingWithAnnotatedEmbedderRunner extends InjectableEmbedder {
@Test
public void run() {
throw new RuntimeException();
}
}
@RunWith(AnnotatedEmbedderRunner.class)
@Configure()
@UsingEmbedder()
public static class NotEmbeddableWithAnnotatedEmbedderRunner {
static boolean hasRun;
@Test
public void run() {
hasRun = true;
}
}
public static class MySteps extends Steps {
@Given("a given")
public void given() {
}
@When("a when")
public void when() {
}
@Then("a then")
public void then() {
}
}
}
|
codehaus/jbehave-core
|
jbehave-core/src/test/java/org/jbehave/core/embedder/EmbedderBehaviour.java
|
Java
|
bsd-3-clause
| 62,023
|
/*
SDL_mixer: An audio mixer library based on the SDL library
Copyright (C) 1997-2012 Sam Lantinga <slouken@libsdl.org>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
/* $Id$ */
#ifndef _SDL_MIXER_H
#define _SDL_MIXER_H
#include "SDL/SDL_types.h"
#include "SDL/SDL_rwops.h"
#include "SDL/SDL_audio.h"
#include "SDL/SDL_endian.h"
#include "SDL/SDL_version.h"
#include "SDL/begin_code.h"
/* Set up for C function definitions, even when using C++ */
#ifdef __cplusplus
extern "C" {
#endif
/* Printable format: "%d.%d.%d", MAJOR, MINOR, PATCHLEVEL
*/
#define SDL_MIXER_MAJOR_VERSION 1
#define SDL_MIXER_MINOR_VERSION 2
#define SDL_MIXER_PATCHLEVEL 12
/* This macro can be used to fill a version structure with the compile-time
* version of the SDL_mixer library.
*/
#define SDL_MIXER_VERSION(X) \
{ \
(X)->major = SDL_MIXER_MAJOR_VERSION; \
(X)->minor = SDL_MIXER_MINOR_VERSION; \
(X)->patch = SDL_MIXER_PATCHLEVEL; \
}
/* Backwards compatibility */
#define MIX_MAJOR_VERSION SDL_MIXER_MAJOR_VERSION
#define MIX_MINOR_VERSION SDL_MIXER_MINOR_VERSION
#define MIX_PATCHLEVEL SDL_MIXER_PATCHLEVEL
#define MIX_VERSION(X) SDL_MIXER_VERSION(X)
/* This function gets the version of the dynamically linked SDL_mixer library.
it should NOT be used to fill a version structure, instead you should
use the SDL_MIXER_VERSION() macro.
*/
extern DECLSPEC const SDL_version * SDLCALL Mix_Linked_Version(void);
typedef enum
{
MIX_INIT_FLAC = 0x00000001,
MIX_INIT_MOD = 0x00000002,
MIX_INIT_MP3 = 0x00000004,
MIX_INIT_OGG = 0x00000008,
MIX_INIT_FLUIDSYNTH = 0x00000010
} MIX_InitFlags;
/* Loads dynamic libraries and prepares them for use. Flags should be
one or more flags from MIX_InitFlags OR'd together.
It returns the flags successfully initialized, or 0 on failure.
*/
extern DECLSPEC int SDLCALL Mix_Init(int flags);
/* Unloads libraries loaded with Mix_Init */
extern DECLSPEC void SDLCALL Mix_Quit(void);
/* The default mixer has 8 simultaneous mixing channels */
#ifndef MIX_CHANNELS
#define MIX_CHANNELS 8
#endif
/* Good default values for a PC soundcard */
#define MIX_DEFAULT_FREQUENCY 22050
#if SDL_BYTEORDER == SDL_LIL_ENDIAN
#define MIX_DEFAULT_FORMAT AUDIO_S16LSB
#else
#define MIX_DEFAULT_FORMAT AUDIO_S16MSB
#endif
#define MIX_DEFAULT_CHANNELS 2
#define MIX_MAX_VOLUME 128 /* Volume of a chunk */
/* The internal format for an audio chunk */
typedef struct Mix_Chunk {
int allocated;
Uint8 *abuf;
Uint32 alen;
Uint8 volume; /* Per-sample volume, 0-128 */
} Mix_Chunk;
/* The different fading types supported */
typedef enum {
MIX_NO_FADING,
MIX_FADING_OUT,
MIX_FADING_IN
} Mix_Fading;
typedef enum {
MUS_NONE,
MUS_CMD,
MUS_WAV,
MUS_MOD,
MUS_MID,
MUS_OGG,
MUS_MP3,
MUS_MP3_MAD,
MUS_FLAC,
MUS_MODPLUG
} Mix_MusicType;
/* The internal format for a music chunk interpreted via mikmod */
typedef struct _Mix_Music Mix_Music;
/* Open the mixer with a certain audio format */
extern DECLSPEC int SDLCALL Mix_OpenAudio(int frequency, Uint16 format, int channels,
int chunksize);
/* Dynamically change the number of channels managed by the mixer.
If decreasing the number of channels, the upper channels are
stopped.
This function returns the new number of allocated channels.
*/
extern DECLSPEC int SDLCALL Mix_AllocateChannels(int numchans);
/* Find out what the actual audio device parameters are.
This function returns 1 if the audio has been opened, 0 otherwise.
*/
extern DECLSPEC int SDLCALL Mix_QuerySpec(int *frequency,Uint16 *format,int *channels);
/* Load a wave file or a music (.mod .s3m .it .xm) file */
extern DECLSPEC Mix_Chunk * SDLCALL Mix_LoadWAV_RW(SDL_RWops *src, int freesrc);
#define Mix_LoadWAV(file) Mix_LoadWAV_RW(SDL_RWFromFile(file, "rb"), 1)
extern DECLSPEC Mix_Music * SDLCALL Mix_LoadMUS(const char *file);
/* Load a music file from an SDL_RWop object (Ogg and MikMod specific currently)
Matt Campbell (matt@campbellhome.dhs.org) April 2000 */
extern DECLSPEC Mix_Music * SDLCALL Mix_LoadMUS_RW(SDL_RWops *rw);
/* Load a music file from an SDL_RWop object assuming a specific format */
extern DECLSPEC Mix_Music * SDLCALL Mix_LoadMUSType_RW(SDL_RWops *rw, Mix_MusicType type, int freesrc);
/* Load a wave file of the mixer format from a memory buffer */
extern DECLSPEC Mix_Chunk * SDLCALL Mix_QuickLoad_WAV(Uint8 *mem);
/* Load raw audio data of the mixer format from a memory buffer */
extern DECLSPEC Mix_Chunk * SDLCALL Mix_QuickLoad_RAW(Uint8 *mem, Uint32 len);
/* Free an audio chunk previously loaded */
extern DECLSPEC void SDLCALL Mix_FreeChunk(Mix_Chunk *chunk);
extern DECLSPEC void SDLCALL Mix_FreeMusic(Mix_Music *music);
/* Get a list of chunk/music decoders that this build of SDL_mixer provides.
This list can change between builds AND runs of the program, if external
libraries that add functionality become available.
You must successfully call Mix_OpenAudio() before calling these functions.
This API is only available in SDL_mixer 1.2.9 and later.
// usage...
int i;
const int total = Mix_GetNumChunkDecoders();
for (i = 0; i < total; i++)
printf("Supported chunk decoder: [%s]\n", Mix_GetChunkDecoder(i));
Appearing in this list doesn't promise your specific audio file will
decode...but it's handy to know if you have, say, a functioning Timidity
install.
These return values are static, read-only data; do not modify or free it.
The pointers remain valid until you call Mix_CloseAudio().
*/
extern DECLSPEC int SDLCALL Mix_GetNumChunkDecoders(void);
extern DECLSPEC const char * SDLCALL Mix_GetChunkDecoder(int index);
extern DECLSPEC int SDLCALL Mix_GetNumMusicDecoders(void);
extern DECLSPEC const char * SDLCALL Mix_GetMusicDecoder(int index);
/* Find out the music format of a mixer music, or the currently playing
music, if 'music' is NULL.
*/
extern DECLSPEC Mix_MusicType SDLCALL Mix_GetMusicType(const Mix_Music *music);
/* Set a function that is called after all mixing is performed.
This can be used to provide real-time visual display of the audio stream
or add a custom mixer filter for the stream data.
*/
extern DECLSPEC void SDLCALL Mix_SetPostMix(void (*mix_func)
(void *udata, Uint8 *stream, int len), void *arg);
/* Add your own music player or additional mixer function.
If 'mix_func' is NULL, the default music player is re-enabled.
*/
extern DECLSPEC void SDLCALL Mix_HookMusic(void (*mix_func)
(void *udata, Uint8 *stream, int len), void *arg);
/* Add your own callback when the music has finished playing.
This callback is only called if the music finishes naturally.
*/
extern DECLSPEC void SDLCALL Mix_HookMusicFinished(void (*music_finished)(void));
/* Get a pointer to the user data for the current music hook */
extern DECLSPEC void * SDLCALL Mix_GetMusicHookData(void);
/*
* Add your own callback when a channel has finished playing. NULL
* to disable callback. The callback may be called from the mixer's audio
* callback or it could be called as a result of Mix_HaltChannel(), etc.
* do not call SDL_LockAudio() from this callback; you will either be
* inside the audio callback, or SDL_mixer will explicitly lock the audio
* before calling your callback.
*/
extern DECLSPEC void SDLCALL Mix_ChannelFinished(void (*channel_finished)(int channel));
/* Special Effects API by ryan c. gordon. (icculus@icculus.org) */
#define MIX_CHANNEL_POST -2
/* This is the format of a special effect callback:
*
* myeffect(int chan, void *stream, int len, void *udata);
*
* (chan) is the channel number that your effect is affecting. (stream) is
* the buffer of data to work upon. (len) is the size of (stream), and
* (udata) is a user-defined bit of data, which you pass as the last arg of
* Mix_RegisterEffect(), and is passed back unmolested to your callback.
* Your effect changes the contents of (stream) based on whatever parameters
* are significant, or just leaves it be, if you prefer. You can do whatever
* you like to the buffer, though, and it will continue in its changed state
* down the mixing pipeline, through any other effect functions, then finally
* to be mixed with the rest of the channels and music for the final output
* stream.
*
* DO NOT EVER call SDL_LockAudio() from your callback function!
*/
typedef void (*Mix_EffectFunc_t)(int chan, void *stream, int len, void *udata);
/*
* This is a callback that signifies that a channel has finished all its
* loops and has completed playback. This gets called if the buffer
* plays out normally, or if you call Mix_HaltChannel(), implicitly stop
* a channel via Mix_AllocateChannels(), or unregister a callback while
* it's still playing.
*
* DO NOT EVER call SDL_LockAudio() from your callback function!
*/
typedef void (*Mix_EffectDone_t)(int chan, void *udata);
/* Register a special effect function. At mixing time, the channel data is
* copied into a buffer and passed through each registered effect function.
* After it passes through all the functions, it is mixed into the final
* output stream. The copy to buffer is performed once, then each effect
* function performs on the output of the previous effect. Understand that
* this extra copy to a buffer is not performed if there are no effects
* registered for a given chunk, which saves CPU cycles, and any given
* effect will be extra cycles, too, so it is crucial that your code run
* fast. Also note that the data that your function is given is in the
* format of the sound device, and not the format you gave to Mix_OpenAudio(),
* although they may in reality be the same. This is an unfortunate but
* necessary speed concern. Use Mix_QuerySpec() to determine if you can
* handle the data before you register your effect, and take appropriate
* actions.
* You may also specify a callback (Mix_EffectDone_t) that is called when
* the channel finishes playing. This gives you a more fine-grained control
* than Mix_ChannelFinished(), in case you need to free effect-specific
* resources, etc. If you don't need this, you can specify NULL.
* You may set the callbacks before or after calling Mix_PlayChannel().
* Things like Mix_SetPanning() are just internal special effect functions,
* so if you are using that, you've already incurred the overhead of a copy
* to a separate buffer, and that these effects will be in the queue with
* any functions you've registered. The list of registered effects for a
* channel is reset when a chunk finishes playing, so you need to explicitly
* set them with each call to Mix_PlayChannel*().
* You may also register a special effect function that is to be run after
* final mixing occurs. The rules for these callbacks are identical to those
* in Mix_RegisterEffect, but they are run after all the channels and the
* music have been mixed into a single stream, whereas channel-specific
* effects run on a given channel before any other mixing occurs. These
* global effect callbacks are call "posteffects". Posteffects only have
* their Mix_EffectDone_t function called when they are unregistered (since
* the main output stream is never "done" in the same sense as a channel).
* You must unregister them manually when you've had enough. Your callback
* will be told that the channel being mixed is (MIX_CHANNEL_POST) if the
* processing is considered a posteffect.
*
* After all these effects have finished processing, the callback registered
* through Mix_SetPostMix() runs, and then the stream goes to the audio
* device.
*
* DO NOT EVER call SDL_LockAudio() from your callback function!
*
* returns zero if error (no such channel), nonzero if added.
* Error messages can be retrieved from Mix_GetError().
*/
extern DECLSPEC int SDLCALL Mix_RegisterEffect(int chan, Mix_EffectFunc_t f,
Mix_EffectDone_t d, void *arg);
/* You may not need to call this explicitly, unless you need to stop an
* effect from processing in the middle of a chunk's playback.
* Posteffects are never implicitly unregistered as they are for channels,
* but they may be explicitly unregistered through this function by
* specifying MIX_CHANNEL_POST for a channel.
* returns zero if error (no such channel or effect), nonzero if removed.
* Error messages can be retrieved from Mix_GetError().
*/
extern DECLSPEC int SDLCALL Mix_UnregisterEffect(int channel, Mix_EffectFunc_t f);
/* You may not need to call this explicitly, unless you need to stop all
* effects from processing in the middle of a chunk's playback. Note that
* this will also shut off some internal effect processing, since
* Mix_SetPanning() and others may use this API under the hood. This is
* called internally when a channel completes playback.
* Posteffects are never implicitly unregistered as they are for channels,
* but they may be explicitly unregistered through this function by
* specifying MIX_CHANNEL_POST for a channel.
* returns zero if error (no such channel), nonzero if all effects removed.
* Error messages can be retrieved from Mix_GetError().
*/
extern DECLSPEC int SDLCALL Mix_UnregisterAllEffects(int channel);
#define MIX_EFFECTSMAXSPEED "MIX_EFFECTSMAXSPEED"
/*
* These are the internally-defined mixing effects. They use the same API that
* effects defined in the application use, but are provided here as a
* convenience. Some effects can reduce their quality or use more memory in
* the name of speed; to enable this, make sure the environment variable
* MIX_EFFECTSMAXSPEED (see above) is defined before you call
* Mix_OpenAudio().
*/
/* Set the panning of a channel. The left and right channels are specified
* as integers between 0 and 255, quietest to loudest, respectively.
*
* Technically, this is just individual volume control for a sample with
* two (stereo) channels, so it can be used for more than just panning.
* If you want real panning, call it like this:
*
* Mix_SetPanning(channel, left, 255 - left);
*
* ...which isn't so hard.
*
* Setting (channel) to MIX_CHANNEL_POST registers this as a posteffect, and
* the panning will be done to the final mixed stream before passing it on
* to the audio device.
*
* This uses the Mix_RegisterEffect() API internally, and returns without
* registering the effect function if the audio device is not configured
* for stereo output. Setting both (left) and (right) to 255 causes this
* effect to be unregistered, since that is the data's normal state.
*
* returns zero if error (no such channel or Mix_RegisterEffect() fails),
* nonzero if panning effect enabled. Note that an audio device in mono
* mode is a no-op, but this call will return successful in that case.
* Error messages can be retrieved from Mix_GetError().
*/
extern DECLSPEC int SDLCALL Mix_SetPanning(int channel, Uint8 left, Uint8 right);
/* Set the position of a channel. (angle) is an integer from 0 to 360, that
* specifies the location of the sound in relation to the listener. (angle)
* will be reduced as neccesary (540 becomes 180 degrees, -100 becomes 260).
* Angle 0 is due north, and rotates clockwise as the value increases.
* For efficiency, the precision of this effect may be limited (angles 1
* through 7 might all produce the same effect, 8 through 15 are equal, etc).
* (distance) is an integer between 0 and 255 that specifies the space
* between the sound and the listener. The larger the number, the further
* away the sound is. Using 255 does not guarantee that the channel will be
* culled from the mixing process or be completely silent. For efficiency,
* the precision of this effect may be limited (distance 0 through 5 might
* all produce the same effect, 6 through 10 are equal, etc). Setting (angle)
* and (distance) to 0 unregisters this effect, since the data would be
* unchanged.
*
* If you need more precise positional audio, consider using OpenAL for
* spatialized effects instead of SDL_mixer. This is only meant to be a
* basic effect for simple "3D" games.
*
* If the audio device is configured for mono output, then you won't get
* any effectiveness from the angle; however, distance attenuation on the
* channel will still occur. While this effect will function with stereo
* voices, it makes more sense to use voices with only one channel of sound,
* so when they are mixed through this effect, the positioning will sound
* correct. You can convert them to mono through SDL before giving them to
* the mixer in the first place if you like.
*
* Setting (channel) to MIX_CHANNEL_POST registers this as a posteffect, and
* the positioning will be done to the final mixed stream before passing it
* on to the audio device.
*
* This is a convenience wrapper over Mix_SetDistance() and Mix_SetPanning().
*
* returns zero if error (no such channel or Mix_RegisterEffect() fails),
* nonzero if position effect is enabled.
* Error messages can be retrieved from Mix_GetError().
*/
extern DECLSPEC int SDLCALL Mix_SetPosition(int channel, Sint16 angle, Uint8 distance);
/* Set the "distance" of a channel. (distance) is an integer from 0 to 255
* that specifies the location of the sound in relation to the listener.
* Distance 0 is overlapping the listener, and 255 is as far away as possible
* A distance of 255 does not guarantee silence; in such a case, you might
* want to try changing the chunk's volume, or just cull the sample from the
* mixing process with Mix_HaltChannel().
* For efficiency, the precision of this effect may be limited (distances 1
* through 7 might all produce the same effect, 8 through 15 are equal, etc).
* (distance) is an integer between 0 and 255 that specifies the space
* between the sound and the listener. The larger the number, the further
* away the sound is.
* Setting (distance) to 0 unregisters this effect, since the data would be
* unchanged.
* If you need more precise positional audio, consider using OpenAL for
* spatialized effects instead of SDL_mixer. This is only meant to be a
* basic effect for simple "3D" games.
*
* Setting (channel) to MIX_CHANNEL_POST registers this as a posteffect, and
* the distance attenuation will be done to the final mixed stream before
* passing it on to the audio device.
*
* This uses the Mix_RegisterEffect() API internally.
*
* returns zero if error (no such channel or Mix_RegisterEffect() fails),
* nonzero if position effect is enabled.
* Error messages can be retrieved from Mix_GetError().
*/
extern DECLSPEC int SDLCALL Mix_SetDistance(int channel, Uint8 distance);
/*
* !!! FIXME : Haven't implemented, since the effect goes past the
* end of the sound buffer. Will have to think about this.
* --ryan.
*/
#if 0
/* Causes an echo effect to be mixed into a sound. (echo) is the amount
* of echo to mix. 0 is no echo, 255 is infinite (and probably not
* what you want).
*
* Setting (channel) to MIX_CHANNEL_POST registers this as a posteffect, and
* the reverbing will be done to the final mixed stream before passing it on
* to the audio device.
*
* This uses the Mix_RegisterEffect() API internally. If you specify an echo
* of zero, the effect is unregistered, as the data is already in that state.
*
* returns zero if error (no such channel or Mix_RegisterEffect() fails),
* nonzero if reversing effect is enabled.
* Error messages can be retrieved from Mix_GetError().
*/
extern no_parse_DECLSPEC int SDLCALL Mix_SetReverb(int channel, Uint8 echo);
#endif
/* Causes a channel to reverse its stereo. This is handy if the user has his
* speakers hooked up backwards, or you would like to have a minor bit of
* psychedelia in your sound code. :) Calling this function with (flip)
* set to non-zero reverses the chunks's usual channels. If (flip) is zero,
* the effect is unregistered.
*
* This uses the Mix_RegisterEffect() API internally, and thus is probably
* more CPU intensive than having the user just plug in his speakers
* correctly. Mix_SetReverseStereo() returns without registering the effect
* function if the audio device is not configured for stereo output.
*
* If you specify MIX_CHANNEL_POST for (channel), then this the effect is used
* on the final mixed stream before sending it on to the audio device (a
* posteffect).
*
* returns zero if error (no such channel or Mix_RegisterEffect() fails),
* nonzero if reversing effect is enabled. Note that an audio device in mono
* mode is a no-op, but this call will return successful in that case.
* Error messages can be retrieved from Mix_GetError().
*/
extern DECLSPEC int SDLCALL Mix_SetReverseStereo(int channel, int flip);
/* end of effects API. --ryan. */
/* Reserve the first channels (0 -> n-1) for the application, i.e. don't allocate
them dynamically to the next sample if requested with a -1 value below.
Returns the number of reserved channels.
*/
extern DECLSPEC int SDLCALL Mix_ReserveChannels(int num);
/* Channel grouping functions */
/* Attach a tag to a channel. A tag can be assigned to several mixer
channels, to form groups of channels.
If 'tag' is -1, the tag is removed (actually -1 is the tag used to
represent the group of all the channels).
Returns true if everything was OK.
*/
extern DECLSPEC int SDLCALL Mix_GroupChannel(int which, int tag);
/* Assign several consecutive channels to a group */
extern DECLSPEC int SDLCALL Mix_GroupChannels(int from, int to, int tag);
/* Finds the first available channel in a group of channels,
returning -1 if none are available.
*/
extern DECLSPEC int SDLCALL Mix_GroupAvailable(int tag);
/* Returns the number of channels in a group. This is also a subtle
way to get the total number of channels when 'tag' is -1
*/
extern DECLSPEC int SDLCALL Mix_GroupCount(int tag);
/* Finds the "oldest" sample playing in a group of channels */
extern DECLSPEC int SDLCALL Mix_GroupOldest(int tag);
/* Finds the "most recent" (i.e. last) sample playing in a group of channels */
extern DECLSPEC int SDLCALL Mix_GroupNewer(int tag);
/* Play an audio chunk on a specific channel.
If the specified channel is -1, play on the first free channel.
If 'loops' is greater than zero, loop the sound that many times.
If 'loops' is -1, loop inifinitely (~65000 times).
Returns which channel was used to play the sound.
*/
#define Mix_PlayChannel(channel,chunk,loops) Mix_PlayChannelTimed(channel,chunk,loops,-1)
/* The same as above, but the sound is played at most 'ticks' milliseconds */
extern DECLSPEC int SDLCALL Mix_PlayChannelTimed(int channel, Mix_Chunk *chunk, int loops, int ticks);
extern DECLSPEC int SDLCALL Mix_PlayMusic(Mix_Music *music, int loops);
/* Fade in music or a channel over "ms" milliseconds, same semantics as the "Play" functions */
extern DECLSPEC int SDLCALL Mix_FadeInMusic(Mix_Music *music, int loops, int ms);
extern DECLSPEC int SDLCALL Mix_FadeInMusicPos(Mix_Music *music, int loops, int ms, double position);
#define Mix_FadeInChannel(channel,chunk,loops,ms) Mix_FadeInChannelTimed(channel,chunk,loops,ms,-1)
extern DECLSPEC int SDLCALL Mix_FadeInChannelTimed(int channel, Mix_Chunk *chunk, int loops, int ms, int ticks);
/* Set the volume in the range of 0-128 of a specific channel or chunk.
If the specified channel is -1, set volume for all channels.
Returns the original volume.
If the specified volume is -1, just return the current volume.
*/
extern DECLSPEC int SDLCALL Mix_Volume(int channel, int volume);
extern DECLSPEC int SDLCALL Mix_VolumeChunk(Mix_Chunk *chunk, int volume);
extern DECLSPEC int SDLCALL Mix_VolumeMusic(int volume);
/* Halt playing of a particular channel */
extern DECLSPEC int SDLCALL Mix_HaltChannel(int channel);
extern DECLSPEC int SDLCALL Mix_HaltGroup(int tag);
extern DECLSPEC int SDLCALL Mix_HaltMusic(void);
/* Change the expiration delay for a particular channel.
The sample will stop playing after the 'ticks' milliseconds have elapsed,
or remove the expiration if 'ticks' is -1
*/
extern DECLSPEC int SDLCALL Mix_ExpireChannel(int channel, int ticks);
/* Halt a channel, fading it out progressively till it's silent
The ms parameter indicates the number of milliseconds the fading
will take.
*/
extern DECLSPEC int SDLCALL Mix_FadeOutChannel(int which, int ms);
extern DECLSPEC int SDLCALL Mix_FadeOutGroup(int tag, int ms);
extern DECLSPEC int SDLCALL Mix_FadeOutMusic(int ms);
/* Query the fading status of a channel */
extern DECLSPEC Mix_Fading SDLCALL Mix_FadingMusic(void);
extern DECLSPEC Mix_Fading SDLCALL Mix_FadingChannel(int which);
/* Pause/Resume a particular channel */
extern DECLSPEC void SDLCALL Mix_Pause(int channel);
extern DECLSPEC void SDLCALL Mix_Resume(int channel);
extern DECLSPEC int SDLCALL Mix_Paused(int channel);
/* Pause/Resume the music stream */
extern DECLSPEC void SDLCALL Mix_PauseMusic(void);
extern DECLSPEC void SDLCALL Mix_ResumeMusic(void);
extern DECLSPEC void SDLCALL Mix_RewindMusic(void);
extern DECLSPEC int SDLCALL Mix_PausedMusic(void);
/* Set the current position in the music stream.
This returns 0 if successful, or -1 if it failed or isn't implemented.
This function is only implemented for MOD music formats (set pattern
order number) and for OGG, FLAC, MP3_MAD, and MODPLUG music (set
position in seconds), at the moment.
*/
extern DECLSPEC int SDLCALL Mix_SetMusicPosition(double position);
/* Check the status of a specific channel.
If the specified channel is -1, check all channels.
*/
extern DECLSPEC int SDLCALL Mix_Playing(int channel);
extern DECLSPEC int SDLCALL Mix_PlayingMusic(void);
/* Stop music and set external music playback command */
extern DECLSPEC int SDLCALL Mix_SetMusicCMD(const char *command);
/* Synchro value is set by MikMod from modules while playing */
extern DECLSPEC int SDLCALL Mix_SetSynchroValue(int value);
extern DECLSPEC int SDLCALL Mix_GetSynchroValue(void);
/* Set/Get/Iterate SoundFonts paths to use by supported MIDI backends */
extern DECLSPEC int SDLCALL Mix_SetSoundFonts(const char *paths);
extern DECLSPEC const char* SDLCALL Mix_GetSoundFonts(void);
extern DECLSPEC int SDLCALL Mix_EachSoundFont(int (*function)(const char*, void*), void *data);
/* Get the Mix_Chunk currently associated with a mixer channel
Returns NULL if it's an invalid channel, or there's no chunk associated.
*/
extern DECLSPEC Mix_Chunk * SDLCALL Mix_GetChunk(int channel);
/* Close the mixer, halting all playing audio */
extern DECLSPEC void SDLCALL Mix_CloseAudio(void);
/* We'll use SDL for reporting errors */
#define Mix_SetError SDL_SetError
#define Mix_GetError SDL_GetError
/* Ends C function definitions when using C++ */
#ifdef __cplusplus
}
#endif
#include "SDL/close_code.h"
#endif /* _SDL_MIXER_H */
|
vrum/ASCIIWar
|
dep/SDL_mixer-1.2.12/SDL_mixer.h
|
C
|
bsd-3-clause
| 27,767
|
<?php
/**
* Magento
*
* NOTICE OF LICENSE
*
* This source file is subject to the Open Software License (OSL 3.0)
* that is bundled with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://opensource.org/licenses/osl-3.0.php
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@magentocommerce.com so we can send you a copy immediately.
*
* DISCLAIMER
*
* Do not edit or add to this file if you wish to upgrade Magento to newer
* versions in the future. If you wish to customize Magento for your
* needs please refer to http://www.magentocommerce.com for more information.
*
* @category Mage
* @package Mage_Rss
* @copyright Copyright (c) 2011 Magento Inc. (http://www.magentocommerce.com)
* @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0)
*/
/**
* Review form block
*
* @category Mage
* @package Mage_Rss
* @author Magento Core Team <core@magentocommerce.com>
*/
class Mage_Rss_Block_Catalog_Category extends Mage_Rss_Block_Catalog_Abstract
{
protected function _construct()
{
/*
* setting cache to save the rss for 10 minutes
*/
$this->setCacheKey('rss_catalog_category_'
. $this->getRequest()->getParam('cid') . '_'
. $this->getRequest()->getParam('store_id') . '_'
. Mage::getModel('customer/session')->getId()
);
$this->setCacheLifetime(600);
}
protected function _toHtml()
{
$categoryId = $this->getRequest()->getParam('cid');
$storeId = $this->_getStoreId();
$rssObj = Mage::getModel('rss/rss');
if ($categoryId) {
$category = Mage::getModel('catalog/category')->load($categoryId);
if ($category && $category->getId()) {
$layer = Mage::getSingleton('catalog/layer')->setStore($storeId);
//want to load all products no matter anchor or not
$category->setIsAnchor(true);
$newurl = $category->getUrl();
$title = $category->getName();
$data = array('title' => $title,
'description' => $title,
'link' => $newurl,
'charset' => 'UTF-8',
);
$rssObj->_addHeader($data);
$_collection = $category->getCollection();
$_collection->addAttributeToSelect('url_key')
->addAttributeToSelect('name')
->addAttributeToSelect('is_anchor')
->addAttributeToFilter('is_active',1)
->addIdFilter($category->getChildren())
->load()
;
$productCollection = Mage::getModel('catalog/product')->getCollection();
$currentCategory = $layer->setCurrentCategory($category);
$layer->prepareProductCollection($productCollection);
$productCollection->addCountToCategories($_collection);
$category->getProductCollection()->setStoreId($storeId);
/*
only load latest 50 products
*/
$_productCollection = $currentCategory
->getProductCollection()
->addAttributeToSort('updated_at','desc')
->setVisibility(Mage::getSingleton('catalog/product_visibility')->getVisibleInCatalogIds())
->setCurPage(1)
->setPageSize(50)
;
if ($_productCollection->getSize()>0) {
$args = array('rssObj' => $rssObj);
foreach ($_productCollection as $_product) {
$args['product'] = $_product;
$this->addNewItemXmlCallback($args);
}
}
}
}
return $rssObj->createRssXml();
}
/**
* Preparing data and adding to rss object
*
* @param array $args
*/
public function addNewItemXmlCallback($args)
{
$product = $args['product'];
$product->setAllowedInRss(true);
$product->setAllowedPriceInRss(true);
Mage::dispatchEvent('rss_catalog_category_xml_callback', $args);
if (!$product->getAllowedInRss()) {
return;
}
$description = '<table><tr>'
. '<td><a href="'.$product->getProductUrl().'"><img src="'
. $this->helper('catalog/image')->init($product, 'thumbnail')->resize(75, 75)
. '" border="0" align="left" height="75" width="75"></a></td>'
. '<td style="text-decoration:none;">' . $product->getDescription();
if ($product->getAllowedPriceInRss()) {
$description.= $this->getPriceHtml($product,true);
}
$description .= '</td></tr></table>';
$rssObj = $args['rssObj'];
$data = array(
'title' => $product->getName(),
'link' => $product->getProductUrl(),
'description' => $description,
);
$rssObj->_addEntry($data);
}
}
|
5452/durex
|
includes/src/Mage_Rss_Block_Catalog_Category.php
|
PHP
|
bsd-3-clause
| 5,378
|
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>istreambuf::headers</title>
<link rel="stylesheet" href="../../../../boostbook.css" type="text/css">
<meta name="generator" content="DocBook XSL Stylesheets V1.73.2">
<link rel="start" href="../../../../index.html" title="Urdl">
<link rel="up" href="../istreambuf.html" title="istreambuf">
<link rel="prev" href="get_options.html" title="istreambuf::get_options">
<link rel="next" href="is_open.html" title="istreambuf::is_open">
</head>
<body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF">
<table cellpadding="2" width="100%"><tr><td valign="top"><img alt="Urdl C++ Library" width="160" height="60" src="../../../../urdl.png"></td></tr></table>
<hr>
<div class="spirit-nav">
<a accesskey="p" href="get_options.html"><img src="../../../../prev.png" alt="Prev"></a><a accesskey="u" href="../istreambuf.html"><img src="../../../../up.png" alt="Up"></a><a accesskey="h" href="../../../../index.html"><img src="../../../../home.png" alt="Home"></a><a accesskey="n" href="is_open.html"><img src="../../../../next.png" alt="Next"></a>
</div>
<div class="section" lang="en">
<div class="titlepage"><div><div><h5 class="title">
<a name="urdl.reference.core.istreambuf.headers"></a><a class="link" href="headers.html" title="istreambuf::headers"> istreambuf::headers</a>
</h5></div></div></div>
<p>
<a class="indexterm" name="id609514"></a>
Gets the protocol-specific headers obtained from the
URL.
</p>
<pre class="programlisting"><span class="identifier">std</span><span class="special">::</span><span class="identifier">string</span> <span class="identifier">headers</span><span class="special">()</span> <span class="keyword">const</span><span class="special">;</span>
</pre>
<p>
<span class="emphasis"><em><span class="bold"><strong>Return Value</strong></span></em></span>
</p>
<p>
A string containing the headers returned with the content from the URL.
The format and interpretation of these headers is specific to the protocol
associated with the URL.
</p>
</div>
<table xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" width="100%"><tr>
<td align="left"></td>
<td align="right"><div class="copyright-footer">Copyright © 2009 Christopher M. Kohlhoff<p>
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at <a href="http://www.boost.org/LICENSE_1_0.txt" target="_top">http://www.boost.org/LICENSE_1_0.txt</a>)
</p>
</div></td>
</tr></table>
<hr>
<div class="spirit-nav">
<a accesskey="p" href="get_options.html"><img src="../../../../prev.png" alt="Prev"></a><a accesskey="u" href="../istreambuf.html"><img src="../../../../up.png" alt="Up"></a><a accesskey="h" href="../../../../index.html"><img src="../../../../home.png" alt="Home"></a><a accesskey="n" href="is_open.html"><img src="../../../../next.png" alt="Next"></a>
</div>
</body>
</html>
|
mapbox/jit_datasource
|
urdl/doc/html/urdl/reference/core/istreambuf/headers.html
|
HTML
|
isc
| 3,068
|
.wrapper #redeemcode .generate-code-panel {
background: #f4f4f4;
padding: 14px;
}
.wrapper #redeemcode .generate-code-panel > div {
display: inline-block;
font-size: 14px;
margin-right: 30px;
}
.wrapper #redeemcode .generate-code-panel > div select {
width: 50px;
background: #fff;
}
.wrapper #redeemcode .generate-code-panel > div span {
color: #f36144;
font-size: 12px;
}
.wrapper #redeemcode .generate-code-panel button {
float: right;
}
.wrapper #redeemcode .tool-panel {
margin: 15px 0;
}
.wrapper #redeemcode .tool-panel a {
display: inline-block;
text-align: center;
}
.wrapper #redeemcode .tool-panel button {
height: 32px;
line-height: 32px;
width: 90px;
}
.wrapper #redeemcode .page-panel {
text-align: right;
margin-top: 16px;
}
.wrapper #redeemcode .page-panel button,
.wrapper #redeemcode .page-panel span,
.wrapper #redeemcode .page-panel input {
display: inline-block;
vertical-align: middle;
}
.wrapper #copyBar {
position: absolute;
top: 0;
left: 0;
z-index: -1;
}
|
JavieChan/nanshaCity
|
yejin/static/css/redeemcode.css
|
CSS
|
isc
| 1,030
|
/***************************************************************************
* Copyright (C) 2006 by Dominik Seichter *
* domseichter@web.de *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU Library General Public License as *
* published by the Free Software Foundation; either version 2 of the *
* License, or (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU Library General Public *
* License along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* In addition, as a special exception, the copyright holders give *
* permission to link the code of portions of this program with the *
* OpenSSL library under certain conditions as described in each *
* individual source file, and distribute linked combinations *
* including the two. *
* You must obey the GNU General Public License in all respects *
* for all of the code used other than OpenSSL. If you modify *
* file(s) with this exception, you may extend this exception to your *
* version of the file(s), but you are not obligated to do so. If you *
* do not wish to do so, delete this exception statement from your *
* version. If you delete this exception statement from all source *
* files in the program, then also delete it here. *
***************************************************************************/
#ifndef _PDF_PAINTER_MM_H_
#define _PDF_PAINTER_MM_H_
#include "podofo/base/PdfDefines.h"
#include "PdfPainter.h"
namespace PoDoFo {
class PdfCanvas;
class PdfFont;
class PdfImage;
class PdfName;
class PdfObject;
class PdfReference;
class PdfStream;
class PdfString;
class PdfXObject;
#ifndef CONVERSION_CONSTANT
/** \def CONVERSION_CONSTANT
* Conversation constant to convert 1/1000th mm to 1/72 inch
* Internal use only.
*/
#define CONVERSION_CONSTANT 0.002834645669291339
#endif // CONVERSION_CONSTANT
/**
* This class provides an easy to use painter object which allows you to draw on a PDF page
* object.
*
* During all drawing operations, you are still able to access the stream of the object you are
* drawing on directly.
*
* This painter takes all coordinates in 1/1000th mm instead of PDF units.
*
* Developer note: we use ownership rather than inheritance here, so as to use the same
* methods names a PdfPainter AND avoid compiler confusion on picking the right one.
*
* \see PdfPainter
*/
class PODOFO_DOC_API PdfPainterMM : public PdfPainter {
public:
/** Create a new PdfPainterMM object.
*/
PdfPainterMM() {}
virtual ~PdfPainterMM();
/** Set the line width for all stroking operations.
* \param lWidth in 1/1000th mm
*/
inline void SetStrokeWidthMM( long lWidth );
/** Draw a line with the current color and line settings.
* \param lStartX x coordinate of the starting point
* \param lStartY y coordinate of the starting point
* \param lEndX x coordinate of the ending point
* \param lEndY y coordinate of the ending point
*/
inline void DrawLineMM( long lStartX, long lStartY, long lEndX, long lEndY );
/** Add a rectangle into the current path
* \param lX x coordinate of the rectangle
* \param lY y coordinate of the rectangle
* \param lWidth width of the rectangle
* \param lHeight absolute height of the rectangle
*/
inline void RectangleMM( long lX, long lY, long lWidth, long lHeight );
/** Add an ellipse into the current path
* \param lX x coordinate of the ellipse (left coordinate)
* \param lY y coordinate of the ellipse (top coordinate)
* \param lWidth width of the ellipse
* \param lHeight absolute height of the ellipse
*/
inline void EllipseMM( long lX, long lY, long lWidth, long lHeight );
/** Draw a text string on a page using a given font object.
* You have to call SetFont before calling this function.
* \param lX the x coordinate
* \param lY the y coordinate
* \param sText the text string which should be printed
*
* \see PdfPainter::SetFont()
*/
inline void DrawTextMM( long lX, long lY, const PdfString & sText);
/** Draw a text string on a page using a given font object.
* You have to call SetFont before calling this function.
* \param lX the x coordinate
* \param lY the y coordinate
* \param sText the text string which should be printed (is not allowed to be NULL!)
* \param lLen draw only lLen characters of pszText
*
* \see PdfPainter::SetFont()
*/
inline void DrawTextMM( long lX, long lY, const PdfString & sText, long lLen );
/** Draw an image on the current page.
* \param lX the x coordinate (bottom left position of the image)
* \param lY the y coordinate (bottom position of the image)
* \param pObject an PdfXObject
* \param dScaleX option scaling factor in x direction
* \param dScaleY option scaling factor in y direction
*/
inline void DrawImageMM( long lX, long lY, PdfImage* pObject, double dScaleX = 1.0, double dScaleY = 1.0);
/** Draw an XObject on the current page.
* \param lX the x coordinate (bottom left position of the XObject)
* \param lY the y coordinate (bottom position of the XObject)
* \param pObject an PdfXObject
* \param dScaleX option scaling factor in x direction
* \param dScaleY option scaling factor in y direction
*/
inline void DrawXObjectMM( long lX, long lY, PdfXObject* pObject, double dScaleX = 1.0, double dScaleY = 1.0);
/** Append a line segment to the current path. Matches the PDF 'l' operator.
* This function is useful to construct an own path
* for drawing or clipping.
* \param lX x position
* \param lY y position
*/
inline void LineToMM( long lX, long lY );
/** Begin a new path. Matches the PDF 'm' operator.
* This function is useful to construct an own path
* for drawing or clipping.
* \param lX x position
* \param lY y position
*/
inline void MoveToMM( long lX, long lY );
};
// -----------------------------------------------------
//
// -----------------------------------------------------
inline void PdfPainterMM::SetStrokeWidthMM( long lWidth )
{
this->SetStrokeWidth( static_cast<double>(lWidth) * CONVERSION_CONSTANT );
}
// -----------------------------------------------------
//
// -----------------------------------------------------
inline void PdfPainterMM::DrawLineMM( long lStartX, long lStartY, long lEndX, long lEndY )
{
this->DrawLine( static_cast<double>(lStartX) * CONVERSION_CONSTANT,
static_cast<double>(lStartY) * CONVERSION_CONSTANT,
static_cast<double>(lEndX) * CONVERSION_CONSTANT,
static_cast<double>(lEndY) * CONVERSION_CONSTANT );
}
// -----------------------------------------------------
//
// -----------------------------------------------------
inline void PdfPainterMM::RectangleMM( long lX, long lY, long lWidth, long lHeight )
{
this->Rectangle( static_cast<double>(lX) * CONVERSION_CONSTANT,
static_cast<double>(lY) * CONVERSION_CONSTANT,
static_cast<double>(lWidth) * CONVERSION_CONSTANT,
static_cast<double>(lHeight) * CONVERSION_CONSTANT );
}
// -----------------------------------------------------
//
// -----------------------------------------------------
inline void PdfPainterMM::EllipseMM( long lX, long lY, long lWidth, long lHeight )
{
this->Ellipse( static_cast<double>(lX) * CONVERSION_CONSTANT,
static_cast<double>(lY) * CONVERSION_CONSTANT,
static_cast<double>(lWidth) * CONVERSION_CONSTANT,
static_cast<double>(lHeight) * CONVERSION_CONSTANT );
}
// -----------------------------------------------------
//
// -----------------------------------------------------
inline void PdfPainterMM::DrawTextMM( long lX, long lY, const PdfString & sText)
{
this->DrawText( static_cast<double>(lX) * CONVERSION_CONSTANT,
static_cast<double>(lY) * CONVERSION_CONSTANT,
sText );
}
// -----------------------------------------------------
//
// -----------------------------------------------------
inline void PdfPainterMM::DrawTextMM( long lX, long lY, const PdfString & sText, long lLen )
{
this->DrawText( static_cast<double>(lX) * CONVERSION_CONSTANT,
static_cast<double>(lY) * CONVERSION_CONSTANT,
sText, lLen );
}
// -----------------------------------------------------
//
// -----------------------------------------------------
inline void PdfPainterMM::DrawImageMM( long lX, long lY, PdfImage* pObject, double dScaleX, double dScaleY )
{
this->DrawImage( static_cast<double>(lX) * CONVERSION_CONSTANT,
static_cast<double>(lY) * CONVERSION_CONSTANT,
pObject, dScaleX, dScaleY );
}
// -----------------------------------------------------
//
// -----------------------------------------------------
inline void PdfPainterMM::DrawXObjectMM( long lX, long lY, PdfXObject* pObject, double dScaleX, double dScaleY )
{
this->DrawXObject( static_cast<double>(lX) * CONVERSION_CONSTANT,
static_cast<double>(lY) * CONVERSION_CONSTANT,
pObject, dScaleX, dScaleY );
}
// -----------------------------------------------------
//
// -----------------------------------------------------
inline void PdfPainterMM::LineToMM( long lX, long lY )
{
this->LineTo( static_cast<double>(lX) * CONVERSION_CONSTANT,
static_cast<double>(lY) * CONVERSION_CONSTANT );
}
// -----------------------------------------------------
//
// -----------------------------------------------------
inline void PdfPainterMM::MoveToMM( long lX, long lY )
{
this->MoveTo( static_cast<double>(lX) * CONVERSION_CONSTANT,
static_cast<double>(lY) * CONVERSION_CONSTANT );
}
};
#endif // _PDF_PAINTER_MM_H_
|
satya-das/cppparser
|
test/e2e/test_input/podofo/doc/PdfPainterMM.h
|
C
|
mit
| 11,227
|
declare module "svgpath" {
interface SvgPath {
(path: string): SvgPath;
new (path: string): SvgPath;
abs(): SvgPath;
scale(sx: number, sy?: number): SvgPath;
translate(x: number, y?: number): SvgPath;
rotate(angle: number, rx?: number, ry?: number): SvgPath;
skewX(degrees: number): SvgPath;
skewY(degrees: number): SvgPath;
matrix(m1: number, m2: number, m3: number, m4: number, m5: number, m6: number): SvgPath;
transform(str: string): SvgPath;
unshort(): SvgPath;
unarc(): SvgPath;
toString(): String;
round(precision: number): SvgPath;
iterate(iterator: (segment: any[], index: number, x: number, y: number) => void, keepLazyStack?: boolean): SvgPath;
}
const svgPath: SvgPath;
export = svgPath;
}
|
delicatesther/wereldkaartjes
|
node_modules/svgpath/index.d.ts
|
TypeScript
|
mit
| 771
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.