code
stringlengths
3
1.05M
repo_name
stringlengths
4
116
path
stringlengths
4
991
language
stringclasses
9 values
license
stringclasses
15 values
size
int32
3
1.05M
#include "editor/osm_auth.hpp" #include "platform/http_client.hpp" #include "coding/url.hpp" #include "base/assert.hpp" #include "base/logging.hpp" #include "base/string_utils.hpp" #include <iostream> #include <map> #include "private.h" #include "3party/liboauthcpp/include/liboauthcpp/liboauthcpp.h" using namespace std; using platform::HttpClient; namespace osm { constexpr char const * kApiVersion = "/api/0.6"; constexpr char const * kFacebookCallbackPart = "/auth/facebook_access_token/callback?access_token="; constexpr char const * kGoogleCallbackPart = "/auth/google_oauth2_access_token/callback?access_token="; constexpr char const * kFacebookOAuthPart = "/auth/facebook?referer=%2Foauth%2Fauthorize%3Foauth_token%3D"; constexpr char const * kGoogleOAuthPart = "/auth/google?referer=%2Foauth%2Fauthorize%3Foauth_token%3D"; namespace { string FindAuthenticityToken(string const & body) { auto pos = body.find("name=\"authenticity_token\""); if (pos == string::npos) return string(); string const kValue = "value=\""; auto start = body.find(kValue, pos); if (start == string::npos) return string(); start += kValue.length(); auto const end = body.find("\"", start); return end == string::npos ? string() : body.substr(start, end - start); } string BuildPostRequest(map<string, string> const & params) { string result; for (auto it = params.begin(); it != params.end(); ++it) { if (it != params.begin()) result += "&"; result += it->first + "=" + url::UrlEncode(it->second); } return result; } } // namespace // static bool OsmOAuth::IsValid(KeySecret const & ks) noexcept { return !(ks.first.empty() || ks.second.empty()); } // static bool OsmOAuth::IsValid(UrlRequestToken const & urt) noexcept { return !(urt.first.empty() || urt.second.first.empty() || urt.second.second.empty()); } OsmOAuth::OsmOAuth(string const & consumerKey, string const & consumerSecret, string const & baseUrl, string const & apiUrl) noexcept : m_consumerKeySecret(consumerKey, consumerSecret), m_baseUrl(baseUrl), m_apiUrl(apiUrl) { } // static OsmOAuth OsmOAuth::ServerAuth() noexcept { #ifdef DEBUG return IZServerAuth(); #else return ProductionServerAuth(); #endif } // static OsmOAuth OsmOAuth::ServerAuth(KeySecret const & userKeySecret) noexcept { OsmOAuth auth = ServerAuth(); auth.SetKeySecret(userKeySecret); return auth; } // static OsmOAuth OsmOAuth::IZServerAuth() noexcept { constexpr char const * kIZTestServer = "http://test.osmz.ru"; constexpr char const * kIZConsumerKey = "F0rURWssXDYxtm61279rHdyu3iSLYSP3LdF6DL3Y"; constexpr char const * kIZConsumerSecret = "IoR5TAedXxcybtd5tIBZqAK07rDRAuFMsQ4nhAP6"; return OsmOAuth(kIZConsumerKey, kIZConsumerSecret, kIZTestServer, kIZTestServer); } // static OsmOAuth OsmOAuth::DevServerAuth() noexcept { constexpr char const * kOsmDevServer = "https://master.apis.dev.openstreetmap.org"; constexpr char const * kOsmDevConsumerKey = "eRtN6yKZZf34oVyBnyaVbsWtHIIeptLArQKdTwN3"; constexpr char const * kOsmDevConsumerSecret = "lC124mtm2VqvKJjSh35qBpKfrkeIjpKuGe38Hd1H"; return OsmOAuth(kOsmDevConsumerKey, kOsmDevConsumerSecret, kOsmDevServer, kOsmDevServer); } // static OsmOAuth OsmOAuth::ProductionServerAuth() noexcept { constexpr char const * kOsmMainSiteURL = "https://www.openstreetmap.org"; constexpr char const * kOsmApiURL = "https://api.openstreetmap.org"; return OsmOAuth(OSM_CONSUMER_KEY, OSM_CONSUMER_SECRET, kOsmMainSiteURL, kOsmApiURL); } void OsmOAuth::SetKeySecret(KeySecret const & keySecret) noexcept { m_tokenKeySecret = keySecret; } KeySecret const & OsmOAuth::GetKeySecret() const noexcept { return m_tokenKeySecret; } bool OsmOAuth::IsAuthorized() const noexcept{ return IsValid(m_tokenKeySecret); } // Opens a login page and extract a cookie and a secret token. OsmOAuth::SessionID OsmOAuth::FetchSessionId(string const & subUrl, string const & cookies) const { string const url = m_baseUrl + subUrl + (cookies.empty() ? "?cookie_test=true" : ""); HttpClient request(url); request.SetCookies(cookies); if (!request.RunHttpRequest()) MYTHROW(NetworkError, ("FetchSessionId Network error while connecting to", url)); if (request.WasRedirected()) MYTHROW(UnexpectedRedirect, ("Redirected to", request.UrlReceived(), "from", url)); if (request.ErrorCode() != HTTP::OK) MYTHROW(FetchSessionIdError, (DebugPrint(request))); SessionID const sid = { request.CombinedCookies(), FindAuthenticityToken(request.ServerResponse()) }; if (sid.m_cookies.empty() || sid.m_token.empty()) MYTHROW(FetchSessionIdError, ("Cookies and/or token are empty for request", DebugPrint(request))); return sid; } void OsmOAuth::LogoutUser(SessionID const & sid) const { HttpClient request(m_baseUrl + "/logout"); request.SetCookies(sid.m_cookies); if (!request.RunHttpRequest()) MYTHROW(NetworkError, ("LogoutUser Network error while connecting to", request.UrlRequested())); if (request.ErrorCode() != HTTP::OK) MYTHROW(LogoutUserError, (DebugPrint(request))); } bool OsmOAuth::LoginUserPassword(string const & login, string const & password, SessionID const & sid) const { map<string, string> const params = { {"username", login}, {"password", password}, {"referer", "/"}, {"commit", "Login"}, {"authenticity_token", sid.m_token} }; HttpClient request(m_baseUrl + "/login"); request.SetBodyData(BuildPostRequest(params), "application/x-www-form-urlencoded") .SetCookies(sid.m_cookies) .SetHandleRedirects(false); if (!request.RunHttpRequest()) MYTHROW(NetworkError, ("LoginUserPassword Network error while connecting to", request.UrlRequested())); // At the moment, automatic redirects handling is buggy on Androids < 4.4. // set_handle_redirects(false) works only for Android code, iOS one (and curl) still automatically follow all redirects. if (request.ErrorCode() != HTTP::OK && request.ErrorCode() != HTTP::Found) MYTHROW(LoginUserPasswordServerError, (DebugPrint(request))); // Not redirected page is a 100% signal that login and/or password are invalid. if (!request.WasRedirected()) return false; // Check if we were redirected to some 3rd party site. if (request.UrlReceived().find(m_baseUrl) != 0) MYTHROW(UnexpectedRedirect, (DebugPrint(request))); // m_baseUrl + "/login" means login and/or password are invalid. return request.ServerResponse().find("/login") == string::npos; } bool OsmOAuth::LoginSocial(string const & callbackPart, string const & socialToken, SessionID const & sid) const { string const url = m_baseUrl + callbackPart + socialToken; HttpClient request(url); request.SetCookies(sid.m_cookies) .SetHandleRedirects(false); if (!request.RunHttpRequest()) MYTHROW(NetworkError, ("LoginSocial Network error while connecting to", request.UrlRequested())); if (request.ErrorCode() != HTTP::OK && request.ErrorCode() != HTTP::Found) MYTHROW(LoginSocialServerError, (DebugPrint(request))); // Not redirected page is a 100% signal that social login has failed. if (!request.WasRedirected()) return false; // Check if we were redirected to some 3rd party site. if (request.UrlReceived().find(m_baseUrl) != 0) MYTHROW(UnexpectedRedirect, (DebugPrint(request))); // m_baseUrl + "/login" means login and/or password are invalid. return request.ServerResponse().find("/login") == string::npos; } // Fakes a buttons press to automatically accept requested permissions. string OsmOAuth::SendAuthRequest(string const & requestTokenKey, SessionID const & lastSid) const { // We have to get a new CSRF token, using existing cookies to open the correct page. SessionID const & sid = FetchSessionId("/oauth/authorize?oauth_token=" + requestTokenKey, lastSid.m_cookies); map<string, string> const params = { {"oauth_token", requestTokenKey}, {"oauth_callback", ""}, {"authenticity_token", sid.m_token}, {"allow_read_prefs", "yes"}, {"allow_write_api", "yes"}, {"allow_write_gpx", "yes"}, {"allow_write_notes", "yes"}, {"commit", "Save changes"} }; HttpClient request(m_baseUrl + "/oauth/authorize"); request.SetBodyData(BuildPostRequest(params), "application/x-www-form-urlencoded") .SetCookies(sid.m_cookies) .SetHandleRedirects(false); if (!request.RunHttpRequest()) MYTHROW(NetworkError, ("SendAuthRequest Network error while connecting to", request.UrlRequested())); string const callbackURL = request.UrlReceived(); string const vKey = "oauth_verifier="; auto const pos = callbackURL.find(vKey); if (pos == string::npos) MYTHROW(SendAuthRequestError, ("oauth_verifier is not found", DebugPrint(request))); auto const end = callbackURL.find("&", pos); return callbackURL.substr(pos + vKey.length(), end == string::npos ? end : end - pos - vKey.length()); } RequestToken OsmOAuth::FetchRequestToken() const { OAuth::Consumer const consumer(m_consumerKeySecret.first, m_consumerKeySecret.second); OAuth::Client oauth(&consumer); string const requestTokenUrl = m_baseUrl + "/oauth/request_token"; string const requestTokenQuery = oauth.getURLQueryString(OAuth::Http::Get, requestTokenUrl + "?oauth_callback=oob"); HttpClient request(requestTokenUrl + "?" + requestTokenQuery); if (!request.RunHttpRequest()) MYTHROW(NetworkError, ("FetchRequestToken Network error while connecting to", request.UrlRequested())); if (request.ErrorCode() != HTTP::OK) MYTHROW(FetchRequestTokenServerError, (DebugPrint(request))); if (request.WasRedirected()) MYTHROW(UnexpectedRedirect, ("Redirected to", request.UrlReceived(), "from", request.UrlRequested())); // Throws std::runtime_error. OAuth::Token const reqToken = OAuth::Token::extract(request.ServerResponse()); return { reqToken.key(), reqToken.secret() }; } KeySecret OsmOAuth::FinishAuthorization(RequestToken const & requestToken, string const & verifier) const { OAuth::Consumer const consumer(m_consumerKeySecret.first, m_consumerKeySecret.second); OAuth::Token const reqToken(requestToken.first, requestToken.second, verifier); OAuth::Client oauth(&consumer, &reqToken); string const accessTokenUrl = m_baseUrl + "/oauth/access_token"; string const queryString = oauth.getURLQueryString(OAuth::Http::Get, accessTokenUrl, "", true); HttpClient request(accessTokenUrl + "?" + queryString); if (!request.RunHttpRequest()) MYTHROW(NetworkError, ("FinishAuthorization Network error while connecting to", request.UrlRequested())); if (request.ErrorCode() != HTTP::OK) MYTHROW(FinishAuthorizationServerError, (DebugPrint(request))); if (request.WasRedirected()) MYTHROW(UnexpectedRedirect, ("Redirected to", request.UrlReceived(), "from", request.UrlRequested())); OAuth::KeyValuePairs const responseData = OAuth::ParseKeyValuePairs(request.ServerResponse()); // Throws std::runtime_error. OAuth::Token const accessToken = OAuth::Token::extract(responseData); return { accessToken.key(), accessToken.secret() }; } // Given a web session id, fetches an OAuth access token. KeySecret OsmOAuth::FetchAccessToken(SessionID const & sid) const { // Aquire a request token. RequestToken const requestToken = FetchRequestToken(); // Faking a button press for access rights. string const pin = SendAuthRequest(requestToken.first, sid); LogoutUser(sid); // Got pin, exchange it for the access token. return FinishAuthorization(requestToken, pin); } bool OsmOAuth::AuthorizePassword(string const & login, string const & password) { SessionID const sid = FetchSessionId(); if (!LoginUserPassword(login, password, sid)) return false; m_tokenKeySecret = FetchAccessToken(sid); return true; } bool OsmOAuth::AuthorizeFacebook(string const & facebookToken) { SessionID const sid = FetchSessionId(); if (!LoginSocial(kFacebookCallbackPart, facebookToken, sid)) return false; m_tokenKeySecret = FetchAccessToken(sid); return true; } bool OsmOAuth::AuthorizeGoogle(string const & googleToken) { SessionID const sid = FetchSessionId(); if (!LoginSocial(kGoogleCallbackPart, googleToken, sid)) return false; m_tokenKeySecret = FetchAccessToken(sid); return true; } OsmOAuth::UrlRequestToken OsmOAuth::GetFacebookOAuthURL() const { RequestToken const requestToken = FetchRequestToken(); string const url = m_baseUrl + kFacebookOAuthPart + requestToken.first; return UrlRequestToken(url, requestToken); } OsmOAuth::UrlRequestToken OsmOAuth::GetGoogleOAuthURL() const { RequestToken const requestToken = FetchRequestToken(); string const url = m_baseUrl + kGoogleOAuthPart + requestToken.first; return UrlRequestToken(url, requestToken); } bool OsmOAuth::ResetPassword(string const & email) const { string const kForgotPasswordUrlPart = "/user/forgot-password"; SessionID const sid = FetchSessionId(kForgotPasswordUrlPart); map<string, string> const params = { {"user[email]", email}, {"authenticity_token", sid.m_token}, {"commit", "Reset password"} }; HttpClient request(m_baseUrl + kForgotPasswordUrlPart); request.SetBodyData(BuildPostRequest(params), "application/x-www-form-urlencoded"); request.SetCookies(sid.m_cookies); if (!request.RunHttpRequest()) MYTHROW(NetworkError, ("ResetPassword Network error while connecting to", request.UrlRequested())); if (request.ErrorCode() != HTTP::OK) MYTHROW(ResetPasswordServerError, (DebugPrint(request))); if (request.WasRedirected() && request.UrlReceived().find(m_baseUrl) != string::npos) return true; return false; } OsmOAuth::Response OsmOAuth::Request(string const & method, string const & httpMethod, string const & body) const { if (!IsValid(m_tokenKeySecret)) MYTHROW(InvalidKeySecret, ("User token (key and secret) are empty.")); OAuth::Consumer const consumer(m_consumerKeySecret.first, m_consumerKeySecret.second); OAuth::Token const oatoken(m_tokenKeySecret.first, m_tokenKeySecret.second); OAuth::Client oauth(&consumer, &oatoken); OAuth::Http::RequestType reqType; if (httpMethod == "GET") reqType = OAuth::Http::Get; else if (httpMethod == "POST") reqType = OAuth::Http::Post; else if (httpMethod == "PUT") reqType = OAuth::Http::Put; else if (httpMethod == "DELETE") reqType = OAuth::Http::Delete; else MYTHROW(UnsupportedApiRequestMethod, ("Unsupported OSM API request method", httpMethod)); string url = m_apiUrl + kApiVersion + method; string const query = oauth.getURLQueryString(reqType, url); auto const qPos = url.find('?'); if (qPos != string::npos) url = url.substr(0, qPos); HttpClient request(url + "?" + query); if (httpMethod != "GET") request.SetBodyData(body, "application/xml", httpMethod); if (!request.RunHttpRequest()) MYTHROW(NetworkError, ("Request Network error while connecting to", url)); if (request.WasRedirected()) MYTHROW(UnexpectedRedirect, ("Redirected to", request.UrlReceived(), "from", url)); return Response(request.ErrorCode(), request.ServerResponse()); } OsmOAuth::Response OsmOAuth::DirectRequest(string const & method, bool api) const { string const url = api ? m_apiUrl + kApiVersion + method : m_baseUrl + method; HttpClient request(url); if (!request.RunHttpRequest()) MYTHROW(NetworkError, ("DirectRequest Network error while connecting to", url)); if (request.WasRedirected()) MYTHROW(UnexpectedRedirect, ("Redirected to", request.UrlReceived(), "from", url)); return Response(request.ErrorCode(), request.ServerResponse()); } string DebugPrint(OsmOAuth::Response const & code) { string r; switch (code.first) { case OsmOAuth::HTTP::OK: r = "OK"; break; case OsmOAuth::HTTP::BadXML: r = "BadXML"; break; case OsmOAuth::HTTP::BadAuth: r = "BadAuth"; break; case OsmOAuth::HTTP::Redacted: r = "Redacted"; break; case OsmOAuth::HTTP::NotFound: r = "NotFound"; break; case OsmOAuth::HTTP::WrongMethod: r = "WrongMethod"; break; case OsmOAuth::HTTP::Conflict: r = "Conflict"; break; case OsmOAuth::HTTP::Gone: r = "Gone"; break; case OsmOAuth::HTTP::PreconditionFailed: r = "PreconditionFailed"; break; case OsmOAuth::HTTP::URITooLong: r = "URITooLong"; break; case OsmOAuth::HTTP::TooMuchData: r = "TooMuchData"; break; default: // No data from server in case of NetworkError. if (code.first < 0) return "NetworkError " + strings::to_string(code.first); r = "HTTP " + strings::to_string(code.first); } return r + ": " + code.second; } } // namespace osm
darina/omim
editor/osm_auth.cpp
C++
apache-2.0
16,629
/* * Copyright 2017 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jbpm.test.persistence.scripts.quartzmockentities; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.Id; import javax.persistence.IdClass; @Entity(name = "QRTZ_PAUSED_TRIGGER_GRPS") @IdClass(QrtzPausedTriggersId.class) public class QrtzPausedTriggerGrps { @Id @Column(name = "SCHED_NAME") private String schedulerName; @Id @Column(name = "TRIGGER_GROUP") private String triggerGroup; public QrtzPausedTriggerGrps schedulerName(final String schedulerName) { this.schedulerName = schedulerName; return this; } public QrtzPausedTriggerGrps triggerGroup(final String triggerGroup) { this.triggerGroup = triggerGroup; return this; } }
mbiarnes/jbpm
jbpm-test-util/src/main/java/org/jbpm/test/persistence/scripts/quartzmockentities/QrtzPausedTriggerGrps.java
Java
apache-2.0
1,381
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package predicates import ( "errors" "fmt" "os" "regexp" "strconv" "k8s.io/klog" "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" corelisters "k8s.io/client-go/listers/core/v1" storagelisters "k8s.io/client-go/listers/storage/v1" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/scheduler/algorithm" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedutil "k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/volumebinder" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) const ( // MatchInterPodAffinityPred defines the name of predicate MatchInterPodAffinity. MatchInterPodAffinityPred = "MatchInterPodAffinity" // CheckVolumeBindingPred defines the name of predicate CheckVolumeBinding. CheckVolumeBindingPred = "CheckVolumeBinding" // CheckNodeConditionPred defines the name of predicate CheckNodeCondition. CheckNodeConditionPred = "CheckNodeCondition" // GeneralPred defines the name of predicate GeneralPredicates. GeneralPred = "GeneralPredicates" // HostNamePred defines the name of predicate HostName. HostNamePred = "HostName" // PodFitsHostPortsPred defines the name of predicate PodFitsHostPorts. PodFitsHostPortsPred = "PodFitsHostPorts" // MatchNodeSelectorPred defines the name of predicate MatchNodeSelector. MatchNodeSelectorPred = "MatchNodeSelector" // PodFitsResourcesPred defines the name of predicate PodFitsResources. PodFitsResourcesPred = "PodFitsResources" // NoDiskConflictPred defines the name of predicate NoDiskConflict. NoDiskConflictPred = "NoDiskConflict" // PodToleratesNodeTaintsPred defines the name of predicate PodToleratesNodeTaints. PodToleratesNodeTaintsPred = "PodToleratesNodeTaints" // CheckNodeUnschedulablePred defines the name of predicate CheckNodeUnschedulablePredicate. CheckNodeUnschedulablePred = "CheckNodeUnschedulable" // PodToleratesNodeNoExecuteTaintsPred defines the name of predicate PodToleratesNodeNoExecuteTaints. PodToleratesNodeNoExecuteTaintsPred = "PodToleratesNodeNoExecuteTaints" // CheckNodeLabelPresencePred defines the name of predicate CheckNodeLabelPresence. CheckNodeLabelPresencePred = "CheckNodeLabelPresence" // CheckServiceAffinityPred defines the name of predicate checkServiceAffinity. CheckServiceAffinityPred = "CheckServiceAffinity" // MaxEBSVolumeCountPred defines the name of predicate MaxEBSVolumeCount. MaxEBSVolumeCountPred = "MaxEBSVolumeCount" // MaxGCEPDVolumeCountPred defines the name of predicate MaxGCEPDVolumeCount. MaxGCEPDVolumeCountPred = "MaxGCEPDVolumeCount" // MaxAzureDiskVolumeCountPred defines the name of predicate MaxAzureDiskVolumeCount. MaxAzureDiskVolumeCountPred = "MaxAzureDiskVolumeCount" // MaxCSIVolumeCountPred defines the predicate that decides how many CSI volumes should be attached MaxCSIVolumeCountPred = "MaxCSIVolumeCountPred" // NoVolumeZoneConflictPred defines the name of predicate NoVolumeZoneConflict. NoVolumeZoneConflictPred = "NoVolumeZoneConflict" // CheckNodeMemoryPressurePred defines the name of predicate CheckNodeMemoryPressure. CheckNodeMemoryPressurePred = "CheckNodeMemoryPressure" // CheckNodeDiskPressurePred defines the name of predicate CheckNodeDiskPressure. CheckNodeDiskPressurePred = "CheckNodeDiskPressure" // CheckNodePIDPressurePred defines the name of predicate CheckNodePIDPressure. CheckNodePIDPressurePred = "CheckNodePIDPressure" // DefaultMaxGCEPDVolumes defines the maximum number of PD Volumes for GCE // GCE instances can have up to 16 PD volumes attached. DefaultMaxGCEPDVolumes = 16 // DefaultMaxAzureDiskVolumes defines the maximum number of PD Volumes for Azure // Larger Azure VMs can actually have much more disks attached. // TODO We should determine the max based on VM size DefaultMaxAzureDiskVolumes = 16 // KubeMaxPDVols defines the maximum number of PD Volumes per kubelet KubeMaxPDVols = "KUBE_MAX_PD_VOLS" // EBSVolumeFilterType defines the filter name for EBSVolumeFilter. EBSVolumeFilterType = "EBS" // GCEPDVolumeFilterType defines the filter name for GCEPDVolumeFilter. GCEPDVolumeFilterType = "GCE" // AzureDiskVolumeFilterType defines the filter name for AzureDiskVolumeFilter. AzureDiskVolumeFilterType = "AzureDisk" ) // IMPORTANT NOTE for predicate developers: // We are using cached predicate result for pods belonging to the same equivalence class. // So when updating an existing predicate, you should consider whether your change will introduce new // dependency to attributes of any API object like Pod, Node, Service etc. // If yes, you are expected to invalidate the cached predicate result for related API object change. // For example: // https://github.com/kubernetes/kubernetes/blob/36a218e/plugin/pkg/scheduler/factory/factory.go#L422 // IMPORTANT NOTE: this list contains the ordering of the predicates, if you develop a new predicate // it is mandatory to add its name to this list. // Otherwise it won't be processed, see generic_scheduler#podFitsOnNode(). // The order is based on the restrictiveness & complexity of predicates. // Design doc: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/predicates-ordering.md var ( predicatesOrdering = []string{CheckNodeConditionPred, CheckNodeUnschedulablePred, GeneralPred, HostNamePred, PodFitsHostPortsPred, MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred, PodToleratesNodeTaintsPred, PodToleratesNodeNoExecuteTaintsPred, CheckNodeLabelPresencePred, CheckServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred, MaxCSIVolumeCountPred, MaxAzureDiskVolumeCountPred, CheckVolumeBindingPred, NoVolumeZoneConflictPred, CheckNodeMemoryPressurePred, CheckNodePIDPressurePred, CheckNodeDiskPressurePred, MatchInterPodAffinityPred} ) // NodeInfo interface represents anything that can get node object from node ID. type NodeInfo interface { GetNodeInfo(nodeID string) (*v1.Node, error) } // PersistentVolumeInfo interface represents anything that can get persistent volume object by PV ID. type PersistentVolumeInfo interface { GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) } // CachedPersistentVolumeInfo implements PersistentVolumeInfo type CachedPersistentVolumeInfo struct { corelisters.PersistentVolumeLister } // Ordering returns the ordering of predicates. func Ordering() []string { return predicatesOrdering } // SetPredicatesOrdering sets the ordering of predicates. func SetPredicatesOrdering(names []string) { predicatesOrdering = names } // GetPersistentVolumeInfo returns a persistent volume object by PV ID. func (c *CachedPersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) { return c.Get(pvID) } // PersistentVolumeClaimInfo interface represents anything that can get a PVC object in // specified namespace with specified name. type PersistentVolumeClaimInfo interface { GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error) } // CachedPersistentVolumeClaimInfo implements PersistentVolumeClaimInfo type CachedPersistentVolumeClaimInfo struct { corelisters.PersistentVolumeClaimLister } // GetPersistentVolumeClaimInfo fetches the claim in specified namespace with specified name func (c *CachedPersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error) { return c.PersistentVolumeClaims(namespace).Get(name) } // CachedNodeInfo implements NodeInfo type CachedNodeInfo struct { corelisters.NodeLister } // GetNodeInfo returns cached data for the node 'id'. func (c *CachedNodeInfo) GetNodeInfo(id string) (*v1.Node, error) { node, err := c.Get(id) if apierrors.IsNotFound(err) { return nil, err } if err != nil { return nil, fmt.Errorf("error retrieving node '%v' from cache: %v", id, err) } return node, nil } // StorageClassInfo interface represents anything that can get a storage class object by class name. type StorageClassInfo interface { GetStorageClassInfo(className string) (*storagev1.StorageClass, error) } // CachedStorageClassInfo implements StorageClassInfo type CachedStorageClassInfo struct { storagelisters.StorageClassLister } // GetStorageClassInfo get StorageClass by class name. func (c *CachedStorageClassInfo) GetStorageClassInfo(className string) (*storagev1.StorageClass, error) { return c.Get(className) } func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool { // fast path if there is no conflict checking targets. if volume.GCEPersistentDisk == nil && volume.AWSElasticBlockStore == nil && volume.RBD == nil && volume.ISCSI == nil { return false } for _, existingVolume := range pod.Spec.Volumes { // Same GCE disk mounted by multiple pods conflicts unless all pods mount it read-only. if volume.GCEPersistentDisk != nil && existingVolume.GCEPersistentDisk != nil { disk, existingDisk := volume.GCEPersistentDisk, existingVolume.GCEPersistentDisk if disk.PDName == existingDisk.PDName && !(disk.ReadOnly && existingDisk.ReadOnly) { return true } } if volume.AWSElasticBlockStore != nil && existingVolume.AWSElasticBlockStore != nil { if volume.AWSElasticBlockStore.VolumeID == existingVolume.AWSElasticBlockStore.VolumeID { return true } } if volume.ISCSI != nil && existingVolume.ISCSI != nil { iqn := volume.ISCSI.IQN eiqn := existingVolume.ISCSI.IQN // two ISCSI volumes are same, if they share the same iqn. As iscsi volumes are of type // RWO or ROX, we could permit only one RW mount. Same iscsi volume mounted by multiple Pods // conflict unless all other pods mount as read only. if iqn == eiqn && !(volume.ISCSI.ReadOnly && existingVolume.ISCSI.ReadOnly) { return true } } if volume.RBD != nil && existingVolume.RBD != nil { mon, pool, image := volume.RBD.CephMonitors, volume.RBD.RBDPool, volume.RBD.RBDImage emon, epool, eimage := existingVolume.RBD.CephMonitors, existingVolume.RBD.RBDPool, existingVolume.RBD.RBDImage // two RBDs images are the same if they share the same Ceph monitor, are in the same RADOS Pool, and have the same image name // only one read-write mount is permitted for the same RBD image. // same RBD image mounted by multiple Pods conflicts unless all Pods mount the image read-only if haveOverlap(mon, emon) && pool == epool && image == eimage && !(volume.RBD.ReadOnly && existingVolume.RBD.ReadOnly) { return true } } } return false } // NoDiskConflict evaluates if a pod can fit due to the volumes it requests, and those that // are already mounted. If there is already a volume mounted on that node, another pod that uses the same volume // can't be scheduled there. // This is GCE, Amazon EBS, and Ceph RBD specific for now: // - GCE PD allows multiple mounts as long as they're all read-only // - AWS EBS forbids any two pods mounting the same volume ID // - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image. // - ISCSI forbids if any two pods share at least same IQN, LUN and Target // TODO: migrate this into some per-volume specific code? func NoDiskConflict(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { for _, v := range pod.Spec.Volumes { for _, ev := range nodeInfo.Pods() { if isVolumeConflict(v, ev) { return false, []algorithm.PredicateFailureReason{ErrDiskConflict}, nil } } } return true, nil, nil } // MaxPDVolumeCountChecker contains information to check the max number of volumes for a predicate. type MaxPDVolumeCountChecker struct { filter VolumeFilter volumeLimitKey v1.ResourceName maxVolumeFunc func(node *v1.Node) int pvInfo PersistentVolumeInfo pvcInfo PersistentVolumeClaimInfo // The string below is generated randomly during the struct's initialization. // It is used to prefix volumeID generated inside the predicate() method to // avoid conflicts with any real volume. randomVolumeIDPrefix string } // VolumeFilter contains information on how to filter PD Volumes when checking PD Volume caps type VolumeFilter struct { // Filter normal volumes FilterVolume func(vol *v1.Volume) (id string, relevant bool) FilterPersistentVolume func(pv *v1.PersistentVolume) (id string, relevant bool) } // NewMaxPDVolumeCountPredicate creates a predicate which evaluates whether a pod can fit based on the // number of volumes which match a filter that it requests, and those that are already present. // // The predicate looks for both volumes used directly, as well as PVC volumes that are backed by relevant volume // types, counts the number of unique volumes, and rejects the new pod if it would place the total count over // the maximum. func NewMaxPDVolumeCountPredicate( filterName string, pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate { var filter VolumeFilter var volumeLimitKey v1.ResourceName switch filterName { case EBSVolumeFilterType: filter = EBSVolumeFilter volumeLimitKey = v1.ResourceName(volumeutil.EBSVolumeLimitKey) case GCEPDVolumeFilterType: filter = GCEPDVolumeFilter volumeLimitKey = v1.ResourceName(volumeutil.GCEVolumeLimitKey) case AzureDiskVolumeFilterType: filter = AzureDiskVolumeFilter volumeLimitKey = v1.ResourceName(volumeutil.AzureVolumeLimitKey) default: klog.Fatalf("Wrong filterName, Only Support %v %v %v ", EBSVolumeFilterType, GCEPDVolumeFilterType, AzureDiskVolumeFilterType) return nil } c := &MaxPDVolumeCountChecker{ filter: filter, volumeLimitKey: volumeLimitKey, maxVolumeFunc: getMaxVolumeFunc(filterName), pvInfo: pvInfo, pvcInfo: pvcInfo, randomVolumeIDPrefix: rand.String(32), } return c.predicate } func getMaxVolumeFunc(filterName string) func(node *v1.Node) int { return func(node *v1.Node) int { maxVolumesFromEnv := getMaxVolLimitFromEnv() if maxVolumesFromEnv > 0 { return maxVolumesFromEnv } var nodeInstanceType string for k, v := range node.ObjectMeta.Labels { if k == kubeletapis.LabelInstanceType { nodeInstanceType = v } } switch filterName { case EBSVolumeFilterType: return getMaxEBSVolume(nodeInstanceType) case GCEPDVolumeFilterType: return DefaultMaxGCEPDVolumes case AzureDiskVolumeFilterType: return DefaultMaxAzureDiskVolumes default: return -1 } } } func getMaxEBSVolume(nodeInstanceType string) int { if ok, _ := regexp.MatchString(volumeutil.EBSNitroLimitRegex, nodeInstanceType); ok { return volumeutil.DefaultMaxEBSNitroVolumeLimit } return volumeutil.DefaultMaxEBSVolumes } // getMaxVolLimitFromEnv checks the max PD volumes environment variable, otherwise returning a default value func getMaxVolLimitFromEnv() int { if rawMaxVols := os.Getenv(KubeMaxPDVols); rawMaxVols != "" { if parsedMaxVols, err := strconv.Atoi(rawMaxVols); err != nil { klog.Errorf("Unable to parse maximum PD volumes value, using default: %v", err) } else if parsedMaxVols <= 0 { klog.Errorf("Maximum PD volumes must be a positive value, using default ") } else { return parsedMaxVols } } return -1 } func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace string, filteredVolumes map[string]bool) error { for i := range volumes { vol := &volumes[i] if id, ok := c.filter.FilterVolume(vol); ok { filteredVolumes[id] = true } else if vol.PersistentVolumeClaim != nil { pvcName := vol.PersistentVolumeClaim.ClaimName if pvcName == "" { return fmt.Errorf("PersistentVolumeClaim had no name") } // Until we know real ID of the volume use namespace/pvcName as substitute // with a random prefix (calculated and stored inside 'c' during initialization) // to avoid conflicts with existing volume IDs. pvID := fmt.Sprintf("%s-%s/%s", c.randomVolumeIDPrefix, namespace, pvcName) pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName) if err != nil || pvc == nil { // if the PVC is not found, log the error and count the PV towards the PV limit klog.V(4).Infof("Unable to look up PVC info for %s/%s, assuming PVC matches predicate when counting limits: %v", namespace, pvcName, err) filteredVolumes[pvID] = true continue } pvName := pvc.Spec.VolumeName if pvName == "" { // PVC is not bound. It was either deleted and created again or // it was forcefully unbound by admin. The pod can still use the // original PV where it was bound to -> log the error and count // the PV towards the PV limit klog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName) filteredVolumes[pvID] = true continue } pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName) if err != nil || pv == nil { // if the PV is not found, log the error // and count the PV towards the PV limit klog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err) filteredVolumes[pvID] = true continue } if id, ok := c.filter.FilterPersistentVolume(pv); ok { filteredVolumes[id] = true } } } return nil } func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { // If a pod doesn't have any volume attached to it, the predicate will always be true. // Thus we make a fast path for it, to avoid unnecessary computations in this case. if len(pod.Spec.Volumes) == 0 { return true, nil, nil } newVolumes := make(map[string]bool) if err := c.filterVolumes(pod.Spec.Volumes, pod.Namespace, newVolumes); err != nil { return false, nil, err } // quick return if len(newVolumes) == 0 { return true, nil, nil } // count unique volumes existingVolumes := make(map[string]bool) for _, existingPod := range nodeInfo.Pods() { if err := c.filterVolumes(existingPod.Spec.Volumes, existingPod.Namespace, existingVolumes); err != nil { return false, nil, err } } numExistingVolumes := len(existingVolumes) // filter out already-mounted volumes for k := range existingVolumes { if _, ok := newVolumes[k]; ok { delete(newVolumes, k) } } numNewVolumes := len(newVolumes) maxAttachLimit := c.maxVolumeFunc(nodeInfo.Node()) if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) { volumeLimits := nodeInfo.VolumeLimits() if maxAttachLimitFromAllocatable, ok := volumeLimits[c.volumeLimitKey]; ok { maxAttachLimit = int(maxAttachLimitFromAllocatable) } } if numExistingVolumes+numNewVolumes > maxAttachLimit { // violates MaxEBSVolumeCount or MaxGCEPDVolumeCount return false, []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil } if nodeInfo != nil && nodeInfo.TransientInfo != nil && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) { nodeInfo.TransientInfo.TransientLock.Lock() defer nodeInfo.TransientInfo.TransientLock.Unlock() nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount = maxAttachLimit - numExistingVolumes nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes = numNewVolumes } return true, nil, nil } // EBSVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes var EBSVolumeFilter = VolumeFilter{ FilterVolume: func(vol *v1.Volume) (string, bool) { if vol.AWSElasticBlockStore != nil { return vol.AWSElasticBlockStore.VolumeID, true } return "", false }, FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { if pv.Spec.AWSElasticBlockStore != nil { return pv.Spec.AWSElasticBlockStore.VolumeID, true } return "", false }, } // GCEPDVolumeFilter is a VolumeFilter for filtering GCE PersistentDisk Volumes var GCEPDVolumeFilter = VolumeFilter{ FilterVolume: func(vol *v1.Volume) (string, bool) { if vol.GCEPersistentDisk != nil { return vol.GCEPersistentDisk.PDName, true } return "", false }, FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { if pv.Spec.GCEPersistentDisk != nil { return pv.Spec.GCEPersistentDisk.PDName, true } return "", false }, } // AzureDiskVolumeFilter is a VolumeFilter for filtering Azure Disk Volumes var AzureDiskVolumeFilter = VolumeFilter{ FilterVolume: func(vol *v1.Volume) (string, bool) { if vol.AzureDisk != nil { return vol.AzureDisk.DiskName, true } return "", false }, FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { if pv.Spec.AzureDisk != nil { return pv.Spec.AzureDisk.DiskName, true } return "", false }, } // VolumeZoneChecker contains information to check the volume zone for a predicate. type VolumeZoneChecker struct { pvInfo PersistentVolumeInfo pvcInfo PersistentVolumeClaimInfo classInfo StorageClassInfo } // NewVolumeZonePredicate evaluates if a pod can fit due to the volumes it requests, given // that some volumes may have zone scheduling constraints. The requirement is that any // volume zone-labels must match the equivalent zone-labels on the node. It is OK for // the node to have more zone-label constraints (for example, a hypothetical replicated // volume might allow region-wide access) // // Currently this is only supported with PersistentVolumeClaims, and looks to the labels // only on the bound PersistentVolume. // // Working with volumes declared inline in the pod specification (i.e. not // using a PersistentVolume) is likely to be harder, as it would require // determining the zone of a volume during scheduling, and that is likely to // require calling out to the cloud provider. It seems that we are moving away // from inline volume declarations anyway. func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo, classInfo StorageClassInfo) algorithm.FitPredicate { c := &VolumeZoneChecker{ pvInfo: pvInfo, pvcInfo: pvcInfo, classInfo: classInfo, } return c.predicate } func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { // If a pod doesn't have any volume attached to it, the predicate will always be true. // Thus we make a fast path for it, to avoid unnecessary computations in this case. if len(pod.Spec.Volumes) == 0 { return true, nil, nil } node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } nodeConstraints := make(map[string]string) for k, v := range node.ObjectMeta.Labels { if k != kubeletapis.LabelZoneFailureDomain && k != kubeletapis.LabelZoneRegion { continue } nodeConstraints[k] = v } if len(nodeConstraints) == 0 { // The node has no zone constraints, so we're OK to schedule. // In practice, when using zones, all nodes must be labeled with zone labels. // We want to fast-path this case though. return true, nil, nil } namespace := pod.Namespace manifest := &(pod.Spec) for i := range manifest.Volumes { volume := &manifest.Volumes[i] if volume.PersistentVolumeClaim != nil { pvcName := volume.PersistentVolumeClaim.ClaimName if pvcName == "" { return false, nil, fmt.Errorf("PersistentVolumeClaim had no name") } pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName) if err != nil { return false, nil, err } if pvc == nil { return false, nil, fmt.Errorf("PersistentVolumeClaim was not found: %q", pvcName) } pvName := pvc.Spec.VolumeName if pvName == "" { if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { scName := v1helper.GetPersistentVolumeClaimClass(pvc) if len(scName) > 0 { class, _ := c.classInfo.GetStorageClassInfo(scName) if class != nil { if class.VolumeBindingMode == nil { return false, nil, fmt.Errorf("VolumeBindingMode not set for StorageClass %q", scName) } if *class.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer { // Skip unbound volumes continue } } } } return false, nil, fmt.Errorf("PersistentVolumeClaim is not bound: %q", pvcName) } pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName) if err != nil { return false, nil, err } if pv == nil { return false, nil, fmt.Errorf("PersistentVolume not found: %q", pvName) } for k, v := range pv.ObjectMeta.Labels { if k != kubeletapis.LabelZoneFailureDomain && k != kubeletapis.LabelZoneRegion { continue } nodeV, _ := nodeConstraints[k] volumeVSet, err := volumeutil.LabelZonesToSet(v) if err != nil { klog.Warningf("Failed to parse label for %q: %q. Ignoring the label. err=%v. ", k, v, err) continue } if !volumeVSet.Has(nodeV) { klog.V(10).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k) return false, []algorithm.PredicateFailureReason{ErrVolumeZoneConflict}, nil } } } } return true, nil, nil } // GetResourceRequest returns a *schedulernodeinfo.Resource that covers the largest // width in each resource dimension. Because init-containers run sequentially, we collect // the max in each dimension iteratively. In contrast, we sum the resource vectors for // regular containers since they run simultaneously. // // Example: // // Pod: // InitContainers // IC1: // CPU: 2 // Memory: 1G // IC2: // CPU: 2 // Memory: 3G // Containers // C1: // CPU: 2 // Memory: 1G // C2: // CPU: 1 // Memory: 1G // // Result: CPU: 3, Memory: 3G func GetResourceRequest(pod *v1.Pod) *schedulernodeinfo.Resource { result := &schedulernodeinfo.Resource{} for _, container := range pod.Spec.Containers { result.Add(container.Resources.Requests) } // take max_resource(sum_pod, any_init_container) for _, container := range pod.Spec.InitContainers { result.SetMaxResource(container.Resources.Requests) } return result } func podName(pod *v1.Pod) string { return pod.Namespace + "/" + pod.Name } // PodFitsResources checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod. // First return value indicates whether a node has sufficient resources to run a pod while the second return value indicates the // predicate failure reasons if the node has insufficient resources to run the pod. func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } var predicateFails []algorithm.PredicateFailureReason allowedPodNumber := nodeInfo.AllowedPodNumber() if len(nodeInfo.Pods())+1 > allowedPodNumber { predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourcePods, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber))) } // No extended resources should be ignored by default. ignoredExtendedResources := sets.NewString() var podRequest *schedulernodeinfo.Resource if predicateMeta, ok := meta.(*predicateMetadata); ok { podRequest = predicateMeta.podRequest if predicateMeta.ignoredExtendedResources != nil { ignoredExtendedResources = predicateMeta.ignoredExtendedResources } } else { // We couldn't parse metadata - fallback to computing it. podRequest = GetResourceRequest(pod) } if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.EphemeralStorage == 0 && len(podRequest.ScalarResources) == 0 { return len(predicateFails) == 0, predicateFails, nil } allocatable := nodeInfo.AllocatableResource() if allocatable.MilliCPU < podRequest.MilliCPU+nodeInfo.RequestedResource().MilliCPU { predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceCPU, podRequest.MilliCPU, nodeInfo.RequestedResource().MilliCPU, allocatable.MilliCPU)) } if allocatable.Memory < podRequest.Memory+nodeInfo.RequestedResource().Memory { predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceMemory, podRequest.Memory, nodeInfo.RequestedResource().Memory, allocatable.Memory)) } if allocatable.EphemeralStorage < podRequest.EphemeralStorage+nodeInfo.RequestedResource().EphemeralStorage { predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceEphemeralStorage, podRequest.EphemeralStorage, nodeInfo.RequestedResource().EphemeralStorage, allocatable.EphemeralStorage)) } for rName, rQuant := range podRequest.ScalarResources { if v1helper.IsExtendedResourceName(rName) { // If this resource is one of the extended resources that should be // ignored, we will skip checking it. if ignoredExtendedResources.Has(string(rName)) { continue } } if allocatable.ScalarResources[rName] < rQuant+nodeInfo.RequestedResource().ScalarResources[rName] { predicateFails = append(predicateFails, NewInsufficientResourceError(rName, podRequest.ScalarResources[rName], nodeInfo.RequestedResource().ScalarResources[rName], allocatable.ScalarResources[rName])) } } if klog.V(10) { if len(predicateFails) == 0 { // We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. klog.Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.", podName(pod), node.Name, len(nodeInfo.Pods()), allowedPodNumber) } } return len(predicateFails) == 0, predicateFails, nil } // nodeMatchesNodeSelectorTerms checks if a node's labels satisfy a list of node selector terms, // terms are ORed, and an empty list of terms will match nothing. func nodeMatchesNodeSelectorTerms(node *v1.Node, nodeSelectorTerms []v1.NodeSelectorTerm) bool { nodeFields := map[string]string{} for k, f := range algorithm.NodeFieldSelectorKeys { nodeFields[k] = f(node) } return v1helper.MatchNodeSelectorTerms(nodeSelectorTerms, labels.Set(node.Labels), fields.Set(nodeFields)) } // podMatchesNodeSelectorAndAffinityTerms checks whether the pod is schedulable onto nodes according to // the requirements in both NodeAffinity and nodeSelector. func podMatchesNodeSelectorAndAffinityTerms(pod *v1.Pod, node *v1.Node) bool { // Check if node.Labels match pod.Spec.NodeSelector. if len(pod.Spec.NodeSelector) > 0 { selector := labels.SelectorFromSet(pod.Spec.NodeSelector) if !selector.Matches(labels.Set(node.Labels)) { return false } } // 1. nil NodeSelector matches all nodes (i.e. does not filter out any nodes) // 2. nil []NodeSelectorTerm (equivalent to non-nil empty NodeSelector) matches no nodes // 3. zero-length non-nil []NodeSelectorTerm matches no nodes also, just for simplicity // 4. nil []NodeSelectorRequirement (equivalent to non-nil empty NodeSelectorTerm) matches no nodes // 5. zero-length non-nil []NodeSelectorRequirement matches no nodes also, just for simplicity // 6. non-nil empty NodeSelectorRequirement is not allowed nodeAffinityMatches := true affinity := pod.Spec.Affinity if affinity != nil && affinity.NodeAffinity != nil { nodeAffinity := affinity.NodeAffinity // if no required NodeAffinity requirements, will do no-op, means select all nodes. // TODO: Replace next line with subsequent commented-out line when implement RequiredDuringSchedulingRequiredDuringExecution. if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { // if nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution == nil && nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { return true } // Match node selector for requiredDuringSchedulingRequiredDuringExecution. // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. // if nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { // nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution.NodeSelectorTerms // klog.V(10).Infof("Match for RequiredDuringSchedulingRequiredDuringExecution node selector terms %+v", nodeSelectorTerms) // nodeAffinityMatches = nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms) // } // Match node selector for requiredDuringSchedulingIgnoredDuringExecution. if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms klog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms) nodeAffinityMatches = nodeAffinityMatches && nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms) } } return nodeAffinityMatches } // PodMatchNodeSelector checks if a pod node selector matches the node label. func PodMatchNodeSelector(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } if podMatchesNodeSelectorAndAffinityTerms(pod, node) { return true, nil, nil } return false, []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}, nil } // PodFitsHost checks if a pod spec node name matches the current node. func PodFitsHost(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { if len(pod.Spec.NodeName) == 0 { return true, nil, nil } node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } if pod.Spec.NodeName == node.Name { return true, nil, nil } return false, []algorithm.PredicateFailureReason{ErrPodNotMatchHostName}, nil } // NodeLabelChecker contains information to check node labels for a predicate. type NodeLabelChecker struct { labels []string presence bool } // NewNodeLabelPredicate creates a predicate which evaluates whether a pod can fit based on the // node labels which match a filter that it requests. func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicate { labelChecker := &NodeLabelChecker{ labels: labels, presence: presence, } return labelChecker.CheckNodeLabelPresence } // CheckNodeLabelPresence checks whether all of the specified labels exists on a node or not, regardless of their value // If "presence" is false, then returns false if any of the requested labels matches any of the node's labels, // otherwise returns true. // If "presence" is true, then returns false if any of the requested labels does not match any of the node's labels, // otherwise returns true. // // Consider the cases where the nodes are placed in regions/zones/racks and these are identified by labels // In some cases, it is required that only nodes that are part of ANY of the defined regions/zones/racks be selected // // Alternately, eliminating nodes that have a certain label, regardless of value, is also useful // A node may have a label with "retiring" as key and the date as the value // and it may be desirable to avoid scheduling new pods on this node func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } var exists bool nodeLabels := labels.Set(node.Labels) for _, label := range n.labels { exists = nodeLabels.Has(label) if (exists && !n.presence) || (!exists && n.presence) { return false, []algorithm.PredicateFailureReason{ErrNodeLabelPresenceViolated}, nil } } return true, nil, nil } // ServiceAffinity defines a struct used for create service affinity predicates. type ServiceAffinity struct { podLister algorithm.PodLister serviceLister algorithm.ServiceLister nodeInfo NodeInfo labels []string } // serviceAffinityMetadataProducer should be run once by the scheduler before looping through the Predicate. It is a helper function that // only should be referenced by NewServiceAffinityPredicate. func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata) { if pm.pod == nil { klog.Errorf("Cannot precompute service affinity, a pod is required to calculate service affinity.") return } pm.serviceAffinityInUse = true var errSvc, errList error // Store services which match the pod. pm.serviceAffinityMatchingPodServices, errSvc = s.serviceLister.GetPodServices(pm.pod) selector := CreateSelectorFromLabels(pm.pod.Labels) allMatches, errList := s.podLister.List(selector) // In the future maybe we will return them as part of the function. if errSvc != nil || errList != nil { klog.Errorf("Some Error were found while precomputing svc affinity: \nservices:%v , \npods:%v", errSvc, errList) } // consider only the pods that belong to the same namespace pm.serviceAffinityMatchingPodList = FilterPodsByNamespace(allMatches, pm.pod.Namespace) } // NewServiceAffinityPredicate creates a ServiceAffinity. func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) (algorithm.FitPredicate, PredicateMetadataProducer) { affinity := &ServiceAffinity{ podLister: podLister, serviceLister: serviceLister, nodeInfo: nodeInfo, labels: labels, } return affinity.checkServiceAffinity, affinity.serviceAffinityMetadataProducer } // checkServiceAffinity is a predicate which matches nodes in such a way to force that // ServiceAffinity.labels are homogenous for pods that are scheduled to a node. // (i.e. it returns true IFF this pod can be added to this node such that all other pods in // the same service are running on nodes with the exact same ServiceAffinity.label values). // // For example: // If the first pod of a service was scheduled to a node with label "region=foo", // all the other subsequent pods belong to the same service will be schedule on // nodes with the same "region=foo" label. // // Details: // // If (the svc affinity labels are not a subset of pod's label selectors ) // The pod has all information necessary to check affinity, the pod's label selector is sufficient to calculate // the match. // Otherwise: // Create an "implicit selector" which guarantees pods will land on nodes with similar values // for the affinity labels. // // To do this, we "reverse engineer" a selector by introspecting existing pods running under the same service+namespace. // These backfilled labels in the selector "L" are defined like so: // - L is a label that the ServiceAffinity object needs as a matching constraints. // - L is not defined in the pod itself already. // - and SOME pod, from a service, in the same namespace, ALREADY scheduled onto a node, has a matching value. // // WARNING: This Predicate is NOT guaranteed to work if some of the predicateMetadata data isn't precomputed... // For that reason it is not exported, i.e. it is highly coupled to the implementation of the FitPredicate construction. func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var services []*v1.Service var pods []*v1.Pod if pm, ok := meta.(*predicateMetadata); ok && (pm.serviceAffinityMatchingPodList != nil || pm.serviceAffinityMatchingPodServices != nil) { services = pm.serviceAffinityMatchingPodServices pods = pm.serviceAffinityMatchingPodList } else { // Make the predicate resilient in case metadata is missing. pm = &predicateMetadata{pod: pod} s.serviceAffinityMetadataProducer(pm) pods, services = pm.serviceAffinityMatchingPodList, pm.serviceAffinityMatchingPodServices } filteredPods := nodeInfo.FilterOutPods(pods) node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } // check if the pod being scheduled has the affinity labels specified in its NodeSelector affinityLabels := FindLabelsInSet(s.labels, labels.Set(pod.Spec.NodeSelector)) // Step 1: If we don't have all constraints, introspect nodes to find the missing constraints. if len(s.labels) > len(affinityLabels) { if len(services) > 0 { if len(filteredPods) > 0 { nodeWithAffinityLabels, err := s.nodeInfo.GetNodeInfo(filteredPods[0].Spec.NodeName) if err != nil { return false, nil, err } AddUnsetLabelsToMap(affinityLabels, s.labels, labels.Set(nodeWithAffinityLabels.Labels)) } } } // Step 2: Finally complete the affinity predicate based on whatever set of predicates we were able to find. if CreateSelectorFromLabels(affinityLabels).Matches(labels.Set(node.Labels)) { return true, nil, nil } return false, []algorithm.PredicateFailureReason{ErrServiceAffinityViolated}, nil } // PodFitsHostPorts checks if a node has free ports for the requested pod ports. func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var wantPorts []*v1.ContainerPort if predicateMeta, ok := meta.(*predicateMetadata); ok { wantPorts = predicateMeta.podPorts } else { // We couldn't parse metadata - fallback to computing it. wantPorts = schedutil.GetContainerPorts(pod) } if len(wantPorts) == 0 { return true, nil, nil } existingPorts := nodeInfo.UsedPorts() // try to see whether existingPorts and wantPorts will conflict or not if portsConflict(existingPorts, wantPorts) { return false, []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts}, nil } return true, nil, nil } // search two arrays and return true if they have at least one common element; return false otherwise func haveOverlap(a1, a2 []string) bool { m := map[string]bool{} for _, val := range a1 { m[val] = true } for _, val := range a2 { if _, ok := m[val]; ok { return true } } return false } // GeneralPredicates checks whether noncriticalPredicates and EssentialPredicates pass. noncriticalPredicates are the predicates // that only non-critical pods need and EssentialPredicates are the predicates that all pods, including critical pods, need func GeneralPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var predicateFails []algorithm.PredicateFailureReason fit, reasons, err := noncriticalPredicates(pod, meta, nodeInfo) if err != nil { return false, predicateFails, err } if !fit { predicateFails = append(predicateFails, reasons...) } fit, reasons, err = EssentialPredicates(pod, meta, nodeInfo) if err != nil { return false, predicateFails, err } if !fit { predicateFails = append(predicateFails, reasons...) } return len(predicateFails) == 0, predicateFails, nil } // noncriticalPredicates are the predicates that only non-critical pods need func noncriticalPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var predicateFails []algorithm.PredicateFailureReason fit, reasons, err := PodFitsResources(pod, meta, nodeInfo) if err != nil { return false, predicateFails, err } if !fit { predicateFails = append(predicateFails, reasons...) } return len(predicateFails) == 0, predicateFails, nil } // EssentialPredicates are the predicates that all pods, including critical pods, need func EssentialPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var predicateFails []algorithm.PredicateFailureReason fit, reasons, err := PodFitsHost(pod, meta, nodeInfo) if err != nil { return false, predicateFails, err } if !fit { predicateFails = append(predicateFails, reasons...) } // TODO: PodFitsHostPorts is essential for now, but kubelet should ideally // preempt pods to free up host ports too fit, reasons, err = PodFitsHostPorts(pod, meta, nodeInfo) if err != nil { return false, predicateFails, err } if !fit { predicateFails = append(predicateFails, reasons...) } fit, reasons, err = PodMatchNodeSelector(pod, meta, nodeInfo) if err != nil { return false, predicateFails, err } if !fit { predicateFails = append(predicateFails, reasons...) } return len(predicateFails) == 0, predicateFails, nil } // PodAffinityChecker contains information to check pod affinity. type PodAffinityChecker struct { info NodeInfo podLister algorithm.PodLister } // NewPodAffinityPredicate creates a PodAffinityChecker. func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) algorithm.FitPredicate { checker := &PodAffinityChecker{ info: info, podLister: podLister, } return checker.InterPodAffinityMatches } // InterPodAffinityMatches checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration. // First return value indicates whether a pod can be scheduled on the specified node while the second return value indicates the // predicate failure reasons if the pod cannot be scheduled on the specified node. func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } if failedPredicates, error := c.satisfiesExistingPodsAntiAffinity(pod, meta, nodeInfo); failedPredicates != nil { failedPredicates := append([]algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}, failedPredicates) return false, failedPredicates, error } // Now check if <pod> requirements will be satisfied on this node. affinity := pod.Spec.Affinity if affinity == nil || (affinity.PodAffinity == nil && affinity.PodAntiAffinity == nil) { return true, nil, nil } if failedPredicates, error := c.satisfiesPodsAffinityAntiAffinity(pod, meta, nodeInfo, affinity); failedPredicates != nil { failedPredicates := append([]algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}, failedPredicates) return false, failedPredicates, error } if klog.V(10) { // We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. klog.Infof("Schedule Pod %+v on Node %+v is allowed, pod (anti)affinity constraints satisfied", podName(pod), node.Name) } return true, nil, nil } // podMatchesPodAffinityTerms checks if the "targetPod" matches the given "terms" // of the "pod" on the given "nodeInfo".Node(). It returns three values: 1) whether // targetPod matches all the terms and their topologies, 2) whether targetPod // matches all the terms label selector and namespaces (AKA term properties), // 3) any error. func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod, targetPod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, terms []v1.PodAffinityTerm) (bool, bool, error) { if len(terms) == 0 { return false, false, fmt.Errorf("terms array is empty") } props, err := getAffinityTermProperties(pod, terms) if err != nil { return false, false, err } if !podMatchesAllAffinityTermProperties(targetPod, props) { return false, false, nil } // Namespace and selector of the terms have matched. Now we check topology of the terms. targetPodNode, err := c.info.GetNodeInfo(targetPod.Spec.NodeName) if err != nil { return false, false, err } for _, term := range terms { if len(term.TopologyKey) == 0 { return false, false, fmt.Errorf("empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity") } if !priorityutil.NodesHaveSameTopologyKey(nodeInfo.Node(), targetPodNode, term.TopologyKey) { return false, true, nil } } return true, true, nil } // GetPodAffinityTerms gets pod affinity terms by a pod affinity object. func GetPodAffinityTerms(podAffinity *v1.PodAffinity) (terms []v1.PodAffinityTerm) { if podAffinity != nil { if len(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { terms = podAffinity.RequiredDuringSchedulingIgnoredDuringExecution } // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. //if len(podAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 { // terms = append(terms, podAffinity.RequiredDuringSchedulingRequiredDuringExecution...) //} } return terms } // GetPodAntiAffinityTerms gets pod affinity terms by a pod anti-affinity. func GetPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.PodAffinityTerm) { if podAntiAffinity != nil { if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { terms = podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution } // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. //if len(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 { // terms = append(terms, podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution...) //} } return terms } // getMatchingAntiAffinityTopologyPairs calculates the following for "existingPod" on given node: // (1) Whether it has PodAntiAffinity // (2) Whether ANY AffinityTerm matches the incoming pod func getMatchingAntiAffinityTopologyPairsOfPod(newPod *v1.Pod, existingPod *v1.Pod, node *v1.Node) (*topologyPairsMaps, error) { affinity := existingPod.Spec.Affinity if affinity == nil || affinity.PodAntiAffinity == nil { return nil, nil } topologyMaps := newTopologyPairsMaps() for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) { namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term) selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) if err != nil { return nil, err } if priorityutil.PodMatchesTermsNamespaceAndSelector(newPod, namespaces, selector) { if topologyValue, ok := node.Labels[term.TopologyKey]; ok { pair := topologyPair{key: term.TopologyKey, value: topologyValue} topologyMaps.addTopologyPair(pair, existingPod) } } } return topologyMaps, nil } func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1.Pod, existingPods []*v1.Pod) (*topologyPairsMaps, error) { topologyMaps := newTopologyPairsMaps() for _, existingPod := range existingPods { existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName) if err != nil { if apierrors.IsNotFound(err) { klog.Errorf("Node not found, %v", existingPod.Spec.NodeName) continue } return nil, err } existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, existingPodNode) if err != nil { return nil, err } topologyMaps.appendMaps(existingPodTopologyMaps) } return topologyMaps, nil } // Checks if scheduling the pod onto this node would break any anti-affinity // terms indicated by the existing pods. func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return ErrExistingPodsAntiAffinityRulesNotMatch, fmt.Errorf("Node is nil") } var topologyMaps *topologyPairsMaps if predicateMeta, ok := meta.(*predicateMetadata); ok { topologyMaps = predicateMeta.topologyPairsAntiAffinityPodsMap } else { // Filter out pods whose nodeName is equal to nodeInfo.node.Name, but are not // present in nodeInfo. Pods on other nodes pass the filter. filteredPods, err := c.podLister.FilteredList(nodeInfo.Filter, labels.Everything()) if err != nil { errMessage := fmt.Sprintf("Failed to get all pods, %+v", err) klog.Error(errMessage) return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage) } if topologyMaps, err = c.getMatchingAntiAffinityTopologyPairsOfPods(pod, filteredPods); err != nil { errMessage := fmt.Sprintf("Failed to get all terms that pod %+v matches, err: %+v", podName(pod), err) klog.Error(errMessage) return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage) } } // Iterate over topology pairs to get any of the pods being affected by // the scheduled pod anti-affinity terms for topologyKey, topologyValue := range node.Labels { if topologyMaps.topologyPairToPods[topologyPair{key: topologyKey, value: topologyValue}] != nil { klog.V(10).Infof("Cannot schedule pod %+v onto node %v", podName(pod), node.Name) return ErrExistingPodsAntiAffinityRulesNotMatch, nil } } if klog.V(10) { // We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. klog.Infof("Schedule Pod %+v on Node %+v is allowed, existing pods anti-affinity terms satisfied.", podName(pod), node.Name) } return nil, nil } // nodeMatchesAllTopologyTerms checks whether "nodeInfo" matches // topology of all the "terms" for the given "pod". func (c *PodAffinityChecker) nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulernodeinfo.NodeInfo, terms []v1.PodAffinityTerm) bool { node := nodeInfo.Node() for _, term := range terms { if topologyValue, ok := node.Labels[term.TopologyKey]; ok { pair := topologyPair{key: term.TopologyKey, value: topologyValue} if _, ok := topologyPairs.topologyPairToPods[pair]; !ok { return false } } else { return false } } return true } // nodeMatchesAnyTopologyTerm checks whether "nodeInfo" matches // topology of any "term" for the given "pod". func (c *PodAffinityChecker) nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulernodeinfo.NodeInfo, terms []v1.PodAffinityTerm) bool { node := nodeInfo.Node() for _, term := range terms { if topologyValue, ok := node.Labels[term.TopologyKey]; ok { pair := topologyPair{key: term.TopologyKey, value: topologyValue} if _, ok := topologyPairs.topologyPairToPods[pair]; ok { return true } } } return false } // Checks if scheduling the pod onto this node would break any term of this pod. func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo, affinity *v1.Affinity) (algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return ErrPodAffinityRulesNotMatch, fmt.Errorf("Node is nil") } if predicateMeta, ok := meta.(*predicateMetadata); ok { // Check all affinity terms. topologyPairsPotentialAffinityPods := predicateMeta.topologyPairsPotentialAffinityPods if affinityTerms := GetPodAffinityTerms(affinity.PodAffinity); len(affinityTerms) > 0 { matchExists := c.nodeMatchesAllTopologyTerms(pod, topologyPairsPotentialAffinityPods, nodeInfo, affinityTerms) if !matchExists { // This pod may the first pod in a series that have affinity to themselves. In order // to not leave such pods in pending state forever, we check that if no other pod // in the cluster matches the namespace and selector of this pod and the pod matches // its own terms, then we allow the pod to pass the affinity check. if !(len(topologyPairsPotentialAffinityPods.topologyPairToPods) == 0 && targetPodMatchesAffinityOfPod(pod, pod)) { klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity", podName(pod), node.Name) return ErrPodAffinityRulesNotMatch, nil } } } // Check all anti-affinity terms. topologyPairsPotentialAntiAffinityPods := predicateMeta.topologyPairsPotentialAntiAffinityPods if antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity); len(antiAffinityTerms) > 0 { matchExists := c.nodeMatchesAnyTopologyTerm(pod, topologyPairsPotentialAntiAffinityPods, nodeInfo, antiAffinityTerms) if matchExists { klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinity", podName(pod), node.Name) return ErrPodAntiAffinityRulesNotMatch, nil } } } else { // We don't have precomputed metadata. We have to follow a slow path to check affinity terms. filteredPods, err := c.podLister.FilteredList(nodeInfo.Filter, labels.Everything()) if err != nil { return ErrPodAffinityRulesNotMatch, err } affinityTerms := GetPodAffinityTerms(affinity.PodAffinity) antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity) matchFound, termsSelectorMatchFound := false, false for _, targetPod := range filteredPods { // Check all affinity terms. if !matchFound && len(affinityTerms) > 0 { affTermsMatch, termsSelectorMatch, err := c.podMatchesPodAffinityTerms(pod, targetPod, nodeInfo, affinityTerms) if err != nil { errMessage := fmt.Sprintf("Cannot schedule pod %+v onto node %v, because of PodAffinity, err: %v", podName(pod), node.Name, err) klog.Error(errMessage) return ErrPodAffinityRulesNotMatch, errors.New(errMessage) } if termsSelectorMatch { termsSelectorMatchFound = true } if affTermsMatch { matchFound = true } } // Check all anti-affinity terms. if len(antiAffinityTerms) > 0 { antiAffTermsMatch, _, err := c.podMatchesPodAffinityTerms(pod, targetPod, nodeInfo, antiAffinityTerms) if err != nil || antiAffTermsMatch { klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinityTerm, err: %v", podName(pod), node.Name, err) return ErrPodAntiAffinityRulesNotMatch, nil } } } if !matchFound && len(affinityTerms) > 0 { // We have not been able to find any matches for the pod's affinity terms. // This pod may be the first pod in a series that have affinity to themselves. In order // to not leave such pods in pending state forever, we check that if no other pod // in the cluster matches the namespace and selector of this pod and the pod matches // its own terms, then we allow the pod to pass the affinity check. if termsSelectorMatchFound { klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity", podName(pod), node.Name) return ErrPodAffinityRulesNotMatch, nil } // Check if pod matches its own affinity properties (namespace and label selector). if !targetPodMatchesAffinityOfPod(pod, pod) { klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity", podName(pod), node.Name) return ErrPodAffinityRulesNotMatch, nil } } } if klog.V(10) { // We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. klog.Infof("Schedule Pod %+v on Node %+v is allowed, pod affinity/anti-affinity constraints satisfied.", podName(pod), node.Name) } return nil, nil } // CheckNodeUnschedulablePredicate checks if a pod can be scheduled on a node with Unschedulable spec. func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { if nodeInfo == nil || nodeInfo.Node() == nil { return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil } // If pod tolerate unschedulable taint, it's also tolerate `node.Spec.Unschedulable`. podToleratesUnschedulable := v1helper.TolerationsTolerateTaint(pod.Spec.Tolerations, &v1.Taint{ Key: schedulerapi.TaintNodeUnschedulable, Effect: v1.TaintEffectNoSchedule, }) // TODO (k82cn): deprecates `node.Spec.Unschedulable` in 1.13. if nodeInfo.Node().Spec.Unschedulable && !podToleratesUnschedulable { return false, []algorithm.PredicateFailureReason{ErrNodeUnschedulable}, nil } return true, nil, nil } // PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { if nodeInfo == nil || nodeInfo.Node() == nil { return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil } return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool { // PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints. return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute }) } // PodToleratesNodeNoExecuteTaints checks if a pod tolerations can tolerate the node's NoExecute taints func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool { return t.Effect == v1.TaintEffectNoExecute }) } func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, filter func(t *v1.Taint) bool) (bool, []algorithm.PredicateFailureReason, error) { taints, err := nodeInfo.Taints() if err != nil { return false, nil, err } if v1helper.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taints, filter) { return true, nil, nil } return false, []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil } // isPodBestEffort checks if pod is scheduled with best-effort QoS func isPodBestEffort(pod *v1.Pod) bool { return v1qos.GetPodQOS(pod) == v1.PodQOSBestEffort } // CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node // reporting memory pressure condition. func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var podBestEffort bool if predicateMeta, ok := meta.(*predicateMetadata); ok { podBestEffort = predicateMeta.podBestEffort } else { // We couldn't parse metadata - fallback to computing it. podBestEffort = isPodBestEffort(pod) } // pod is not BestEffort pod if !podBestEffort { return true, nil, nil } // check if node is under memory pressure if nodeInfo.MemoryPressureCondition() == v1.ConditionTrue { return false, []algorithm.PredicateFailureReason{ErrNodeUnderMemoryPressure}, nil } return true, nil, nil } // CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node // reporting disk pressure condition. func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { // check if node is under disk pressure if nodeInfo.DiskPressureCondition() == v1.ConditionTrue { return false, []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}, nil } return true, nil, nil } // CheckNodePIDPressurePredicate checks if a pod can be scheduled on a node // reporting pid pressure condition. func CheckNodePIDPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { // check if node is under pid pressure if nodeInfo.PIDPressureCondition() == v1.ConditionTrue { return false, []algorithm.PredicateFailureReason{ErrNodeUnderPIDPressure}, nil } return true, nil, nil } // CheckNodeConditionPredicate checks if a pod can be scheduled on a node reporting out of disk, // network unavailable and not ready condition. Only node conditions are accounted in this predicate. func CheckNodeConditionPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { reasons := []algorithm.PredicateFailureReason{} if nodeInfo == nil || nodeInfo.Node() == nil { return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil } node := nodeInfo.Node() for _, cond := range node.Status.Conditions { // We consider the node for scheduling only when its: // - NodeReady condition status is ConditionTrue, // - NodeOutOfDisk condition status is ConditionFalse, // - NodeNetworkUnavailable condition status is ConditionFalse. if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue { reasons = append(reasons, ErrNodeNotReady) } else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse { reasons = append(reasons, ErrNodeOutOfDisk) } else if cond.Type == v1.NodeNetworkUnavailable && cond.Status != v1.ConditionFalse { reasons = append(reasons, ErrNodeNetworkUnavailable) } } if node.Spec.Unschedulable { reasons = append(reasons, ErrNodeUnschedulable) } return len(reasons) == 0, reasons, nil } // VolumeBindingChecker contains information to check a volume binding. type VolumeBindingChecker struct { binder *volumebinder.VolumeBinder } // NewVolumeBindingPredicate evaluates if a pod can fit due to the volumes it requests, // for both bound and unbound PVCs. // // For PVCs that are bound, then it checks that the corresponding PV's node affinity is // satisfied by the given node. // // For PVCs that are unbound, it tries to find available PVs that can satisfy the PVC requirements // and that the PV node affinity is satisfied by the given node. // // The predicate returns true if all bound PVCs have compatible PVs with the node, and if all unbound // PVCs can be matched with an available and node-compatible PV. func NewVolumeBindingPredicate(binder *volumebinder.VolumeBinder) algorithm.FitPredicate { c := &VolumeBindingChecker{ binder: binder, } return c.predicate } func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { return true, nil, nil } node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } unboundSatisfied, boundSatisfied, err := c.binder.Binder.FindPodVolumes(pod, node) if err != nil { return false, nil, err } failReasons := []algorithm.PredicateFailureReason{} if !boundSatisfied { klog.V(5).Infof("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) failReasons = append(failReasons, ErrVolumeNodeConflict) } if !unboundSatisfied { klog.V(5).Infof("Couldn't find matching PVs for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) failReasons = append(failReasons, ErrVolumeBindConflict) } if len(failReasons) > 0 { return false, failReasons, nil } // All volumes bound or matching PVs found for all unbound PVCs klog.V(5).Infof("All PVCs found matches for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) return true, nil, nil }
juanvallejo/kubernetes
pkg/scheduler/algorithm/predicates/predicates.go
GO
apache-2.0
68,284
package main import ( "encoding/json" "fmt" "net/http" "os" "strings" "time" ) type dockerHubTagMeta struct { // we don't care what's in these -- we just need to be able to count them Images []interface{} `json:"images"` LastUpdated string `json:"last_updated"` } func (meta dockerHubTagMeta) lastUpdatedTime() time.Time { t, err := time.Parse(time.RFC3339Nano, meta.LastUpdated) if err != nil { return time.Time{} } return t } func fetchDockerHubTagMeta(repoTag string) dockerHubTagMeta { repoTag = latestizeRepoTag(repoTag) parts := strings.SplitN(repoTag, ":", 2) repo, tag := parts[0], parts[1] var meta dockerHubTagMeta resp, err := http.Get(fmt.Sprintf("https://hub.docker.com/v2/repositories/%s/tags/%s/", repo, tag)) if err != nil { return meta } defer resp.Body.Close() err = json.NewDecoder(resp.Body).Decode(&meta) if err != nil { return meta } return meta } func dockerCreated(image string) time.Time { created, err := dockerInspect("{{.Created}}", image) if err != nil { fmt.Fprintf(os.Stderr, "warning: error while fetching creation time of %q: %v\n", image, err) return time.Now() } created = strings.TrimSpace(created) t, err := time.Parse(time.RFC3339Nano, created) if err != nil { fmt.Fprintf(os.Stderr, "warning: error while parsing creation time of %q (%q): %v\n", image, created, err) return time.Now() } return t }
chorrell/official-images
bashbrew/go/src/bashbrew/hub.go
GO
apache-2.0
1,396
using OfficeDevPnP.PowerShell.Commands.Base.PipeBinds; using Microsoft.SharePoint.Client; using System.Management.Automation; using System; using OfficeDevPnP.Core.Entities; using Microsoft.SharePoint.Client.Taxonomy; using System.Collections.Generic; using OfficeDevPnP.PowerShell.CmdletHelpAttributes; using System.Linq; namespace OfficeDevPnP.PowerShell.Commands { [Cmdlet(VerbsCommon.Set, "SPODefaultColumnValues")] [CmdletHelp("Sets default column values for a document library", DetailedDescription="Sets default column values for a document library, per folder, or for the root folder if the folder parameter has not been specified. Supports both text and taxonomy fields.")] [CmdletExample(Code = "PS:> Set-SPODefaultColumnValues -List Documents -Field TaxKeyword -Value \"Company|Locations|Stockholm\"", SortOrder = 1, Remarks = "Sets a default value for the enterprise keywords field on a library to a term called \"Stockholm\", located in the \"Locations\" term set, which is part of the \"Company\" term group")] [CmdletExample(Code = "PS:> Set-SPODefaultColumnValues -List Documents -Field MyTextField -Value \"DefaultValue\"", SortOrder = 2, Remarks = "Sets a default value for the MyTextField text field on a library to a value of \"DefaultValue\"")] public class SetDefaultColumnValues : SPOWebCmdlet { [Parameter(Mandatory = false, ValueFromPipeline = true, Position = 0, HelpMessage = "The ID, Name or Url of the list.")] public ListPipeBind List; [Parameter(Mandatory = true, HelpMessage="The internal name, id or a reference to a field")] public FieldPipeBind Field; [Parameter(Mandatory = true, HelpMessage="A list of values. In case of a text field the values will be concatenated, separated by a semi-column. In case of a taxonomy field multiple values will added")] public string[] Value; [Parameter(Mandatory = false, HelpMessage="A library relative folder path, if not specified it will set the default column values on the root folder of the library ('/')")] public string Folder = "/"; protected override void ExecuteCmdlet() { List list = null; if (List != null) { list = SelectedWeb.GetList(List); } if (list != null) { if (list.BaseTemplate == (int)ListTemplateType.DocumentLibrary) { Field field = null; // Get the field if (Field.Field != null) { field = Field.Field; if (!field.IsPropertyAvailable("TypeAsString")) { ClientContext.Load(field, f => f.TypeAsString); } if (!field.IsPropertyAvailable("InternalName")) { ClientContext.Load(field, f => f.InternalName); } ClientContext.Load(field); ClientContext.ExecuteQueryRetry(); } else if (Field.Id != Guid.Empty) { field = list.Fields.GetById(Field.Id); ClientContext.Load(field, f => f.InternalName, f => f.TypeAsString); ClientContext.ExecuteQueryRetry(); } else if (!string.IsNullOrEmpty(Field.Name)) { field = list.Fields.GetByInternalNameOrTitle(Field.Name); ClientContext.Load(field, f => f.InternalName, f => f.TypeAsString); ClientContext.ExecuteQueryRetry(); } if (field != null) { IDefaultColumnValue defaultColumnValue = null; if (field.TypeAsString == "Text") { var values = string.Join(";", Value); defaultColumnValue = new DefaultColumnTextValue() { FieldInternalName = field.InternalName, FolderRelativePath = Folder, Text = values }; } else { List<Term> terms = new List<Term>(); foreach (var termString in Value) { var term = ClientContext.Site.GetTaxonomyItemByPath(termString); if (term != null) { terms.Add(term as Term); } } if (terms.Any()) { defaultColumnValue = new DefaultColumnTermValue() { FieldInternalName = field.InternalName, FolderRelativePath = Folder, }; terms.ForEach(t => ((DefaultColumnTermValue)defaultColumnValue).Terms.Add(t)); } } list.SetDefaultColumnValues(new List<IDefaultColumnValue>() { defaultColumnValue }); } } else { WriteWarning("List is not a document library"); } } } } }
JonathanHuss/PnP
Solutions/PowerShell.Commands/Commands/Lists/SetDefaultColumnValues.cs
C#
apache-2.0
5,810
<?php /** * This file is part of the Nette Framework (https://nette.org) * Copyright (c) 2004 David Grudl (https://davidgrudl.com) */ namespace Nette\Application; use Nette; /** * Default presenter loader. */ class PresenterFactory implements IPresenterFactory { use Nette\SmartObject; /** @var array[] of module => splited mask */ private $mapping = [ '*' => ['', '*Module\\', '*Presenter'], 'Nette' => ['NetteModule\\', '*\\', '*Presenter'], ]; /** @var array */ private $cache = []; /** @var callable */ private $factory; /** * @param callable function (string $class): IPresenter */ public function __construct(callable $factory = NULL) { $this->factory = $factory ?: function ($class) { return new $class; }; } /** * Creates new presenter instance. * @param string presenter name * @return IPresenter */ public function createPresenter($name) { return call_user_func($this->factory, $this->getPresenterClass($name)); } /** * Generates and checks presenter class name. * @param string presenter name * @return string class name * @throws InvalidPresenterException */ public function getPresenterClass(&$name) { if (isset($this->cache[$name])) { return $this->cache[$name]; } if (!is_string($name) || !Nette\Utils\Strings::match($name, '#^[a-zA-Z\x7f-\xff][a-zA-Z0-9\x7f-\xff:]*\z#')) { throw new InvalidPresenterException("Presenter name must be alphanumeric string, '$name' is invalid."); } $class = $this->formatPresenterClass($name); if (!class_exists($class)) { throw new InvalidPresenterException("Cannot load presenter '$name', class '$class' was not found."); } $reflection = new \ReflectionClass($class); $class = $reflection->getName(); if (!$reflection->implementsInterface(IPresenter::class)) { throw new InvalidPresenterException("Cannot load presenter '$name', class '$class' is not Nette\\Application\\IPresenter implementor."); } elseif ($reflection->isAbstract()) { throw new InvalidPresenterException("Cannot load presenter '$name', class '$class' is abstract."); } $this->cache[$name] = $class; if ($name !== ($realName = $this->unformatPresenterClass($class))) { trigger_error("Case mismatch on presenter name '$name', correct name is '$realName'.", E_USER_WARNING); $name = $realName; } return $class; } /** * Sets mapping as pairs [module => mask] * @return static */ public function setMapping(array $mapping) { foreach ($mapping as $module => $mask) { if (is_string($mask)) { if (!preg_match('#^\\\\?([\w\\\\]*\\\\)?(\w*\*\w*?\\\\)?([\w\\\\]*\*\w*)\z#', $mask, $m)) { throw new Nette\InvalidStateException("Invalid mapping mask '$mask'."); } $this->mapping[$module] = [$m[1], $m[2] ?: '*Module\\', $m[3]]; } elseif (is_array($mask) && count($mask) === 3) { $this->mapping[$module] = [$mask[0] ? $mask[0] . '\\' : '', $mask[1] . '\\', $mask[2]]; } else { throw new Nette\InvalidStateException("Invalid mapping mask for module $module."); } } return $this; } /** * Formats presenter class name from its name. * @param string * @return string * @internal */ public function formatPresenterClass($presenter) { $parts = explode(':', $presenter); $mapping = isset($parts[1], $this->mapping[$parts[0]]) ? $this->mapping[array_shift($parts)] : $this->mapping['*']; while ($part = array_shift($parts)) { $mapping[0] .= str_replace('*', $part, $mapping[$parts ? 1 : 2]); } return $mapping[0]; } /** * Formats presenter name from class name. * @param string * @return string|NULL * @internal */ public function unformatPresenterClass($class) { foreach ($this->mapping as $module => $mapping) { $mapping = str_replace(['\\', '*'], ['\\\\', '(\w+)'], $mapping); if (preg_match("#^\\\\?$mapping[0]((?:$mapping[1])*)$mapping[2]\\z#i", $class, $matches)) { return ($module === '*' ? '' : $module . ':') . preg_replace("#$mapping[1]#iA", '$1:', $matches[1]) . $matches[3]; } } return NULL; } }
MasaharuKomuro/kitakupics
vendor/nette/application/src/Application/PresenterFactory.php
PHP
apache-2.0
4,074
/** * Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.cognito.sync.devauth.client; /** * This class is used to store the response of the GetToken call of the sample * Cognito developer authentication. */ public class GetTokenResponse extends Response { private final String identityId; private final String identityPoolId; private final String token; public GetTokenResponse(final int responseCode, final String responseMessage) { super(responseCode, responseMessage); this.identityId = null; this.identityPoolId = null; this.token = null; } public GetTokenResponse(final String identityId, final String identityPoolId, final String token) { super(200, null); this.identityId = identityId; this.identityPoolId = identityPoolId; this.token = token; } public String getIdentityId() { return this.identityId; } public String getIdentityPoolId() { return this.identityPoolId; } public String getToken() { return this.token; } }
lyzxsc/aws-sdk-android-samples
CognitoSyncDemo/src/com/amazonaws/cognito/sync/devauth/client/GetTokenResponse.java
Java
apache-2.0
1,640
/* * © Copyright IBM Corp. 2012 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. */ /* * Author: Maire Kehoe (mkehoe@ie.ibm.com) * Date: 20 Mar 2012 * SkipFileContent.java */ package com.ibm.xsp.test.framework.setup; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import com.ibm.commons.util.StringUtil; import com.ibm.xsp.test.framework.AbstractXspTest; import com.ibm.xsp.test.framework.XspTestUtil; /** * * @author Maire Kehoe (mkehoe@ie.ibm.com) */ public class SkipFileContent { /** * */ private static final Object[][] EMPTY_UNCHECKED_ARR = new Object[0][]; private static SkipFileContent EMPTY_SKIP_FILE = new SkipFileContent(); private static SkipFileContent staticSkips = EMPTY_SKIP_FILE; public static SkipFileContent getStaticSkips() { return staticSkips; } public static void setStaticSkips(SkipFileContent staticSkips) { if( SkipFileContent.staticSkips != EMPTY_SKIP_FILE ){ // in setup, but setup has already occurred. throw new IllegalArgumentException("The staticSkips have already been initialized."); } SkipFileContent.staticSkips = staticSkips; } // public static void clearStaticSkips(){ // SkipFileContent.staticSkips = EMPTY_SKIP_FILE; // } public static String[] concatSkips(String[] skips, String testClassName, String testMethodName){ if( null == skips ){ skips = StringUtil.EMPTY_STRING_ARRAY; } SkipFileContent content = getStaticSkips(); if( EMPTY_SKIP_FILE == content ){ return skips; } String[] staticSkips = content.getSkips(testClassName, testMethodName); if( null == staticSkips ){ return skips; } return XspTestUtil.concat(skips, staticSkips); } public static String[] concatSkips(String[] skips, AbstractXspTest testClassName, String testMethodName){ return concatSkips(skips, testClassName.getClass().getName(), testMethodName); } private Map<String, String[]> skips; private List<String> checked; public SkipFileContent() { super(); } public String[] getSkips(String testClassName, String methodName){ if( null == skips ){ return null; } String key = toKey(testClassName, methodName); String[] foundSkips = skips.get(key); if( null != foundSkips ){ // add to checked list if( null == checked ){ checked = new ArrayList<String>(skips.size()); } checked.add(key); } return foundSkips; } /** * @param testClassLine * @param methodName * @param fails */ public void addSkips(String testClassName, String methodName, String[] fails) { if( null == skips ){ skips = new HashMap<String, String[]>(); } String key = toKey(testClassName, methodName); if( skips.containsKey(key) ){ throw new IllegalArgumentException("Skips already registered " +"with testClassName=" + testClassName + ", methodName=" + methodName); } skips.put(key, fails); } /** * @param testClassName * @param methodName * @return */ private String toKey(String testClassName, String methodName) { return testClassName+" "+methodName; } public static Object[][] getUncheckedSkips(){ SkipFileContent content = getStaticSkips(); if( EMPTY_SKIP_FILE == content ){ return EMPTY_UNCHECKED_ARR; } return content.getAllUncheckedSkips(); } /** * @return */ private Object[][] getAllUncheckedSkips() { List<Object[]> unchecked = null; if( null != skips ){ for (Entry<String,String[]> entry : skips.entrySet()) { if( null == checked || ! checked.contains(entry.getKey()) ){ if( null == unchecked ){ unchecked = new ArrayList<Object[]>(); } String key = entry.getKey(); int separatorIndex = key.indexOf(' '); String testClassName = key.substring(0, separatorIndex); String methodName = key.substring(separatorIndex+1); String[] skips = entry.getValue(); unchecked.add(new Object[]{testClassName, methodName, skips}); } } } if( null == unchecked){ return EMPTY_UNCHECKED_ARR; } return unchecked.toArray(new Object[unchecked.size()][]); } }
iharkhukhrakou/XPagesExtensionLibrary
extlib/lwp/openntf/test/eclipse/plugins/com.ibm.xsp.test.framework/src/com/ibm/xsp/test/framework/setup/SkipFileContent.java
Java
apache-2.0
5,361
/* * Copyright 2000-2009 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.debugger.settings; import com.intellij.debugger.DebuggerBundle; import com.intellij.debugger.ui.JavaDebuggerSupport; import com.intellij.openapi.options.Configurable; import com.intellij.openapi.options.SearchableConfigurable; import com.intellij.openapi.project.Project; import com.intellij.ui.classFilter.ClassFilterEditor; import org.jetbrains.annotations.NotNull; import javax.swing.*; import java.awt.*; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; public class DebuggerSteppingConfigurable implements SearchableConfigurable, Configurable.NoScroll { private JCheckBox myCbStepInfoFiltersEnabled; private JCheckBox myCbSkipSyntheticMethods; private JCheckBox myCbSkipConstructors; private JCheckBox myCbSkipClassLoaders; private ClassFilterEditor mySteppingFilterEditor; private JCheckBox myCbSkipSimpleGetters; private Project myProject; public void reset() { final DebuggerSettings settings = DebuggerSettings.getInstance(); myCbSkipSimpleGetters.setSelected(settings.SKIP_GETTERS); myCbSkipSyntheticMethods.setSelected(settings.SKIP_SYNTHETIC_METHODS); myCbSkipConstructors.setSelected(settings.SKIP_CONSTRUCTORS); myCbSkipClassLoaders.setSelected(settings.SKIP_CLASSLOADERS); myCbStepInfoFiltersEnabled.setSelected(settings.TRACING_FILTERS_ENABLED); mySteppingFilterEditor.setFilters(settings.getSteppingFilters()); mySteppingFilterEditor.setEnabled(settings.TRACING_FILTERS_ENABLED); } public void apply() { getSettingsTo(DebuggerSettings.getInstance()); } private void getSettingsTo(DebuggerSettings settings) { settings.SKIP_GETTERS = myCbSkipSimpleGetters.isSelected(); settings.SKIP_SYNTHETIC_METHODS = myCbSkipSyntheticMethods.isSelected(); settings.SKIP_CONSTRUCTORS = myCbSkipConstructors.isSelected(); settings.SKIP_CLASSLOADERS = myCbSkipClassLoaders.isSelected(); settings.TRACING_FILTERS_ENABLED = myCbStepInfoFiltersEnabled.isSelected(); mySteppingFilterEditor.stopEditing(); settings.setSteppingFilters(mySteppingFilterEditor.getFilters()); } public boolean isModified() { final DebuggerSettings currentSettings = DebuggerSettings.getInstance(); final DebuggerSettings debuggerSettings = currentSettings.clone(); getSettingsTo(debuggerSettings); return !debuggerSettings.equals(currentSettings); } public String getDisplayName() { return DebuggerBundle.message("debugger.stepping.configurable.display.name"); } @NotNull public String getHelpTopic() { return "reference.idesettings.debugger.stepping"; } @NotNull public String getId() { return getHelpTopic(); } public Runnable enableSearch(String option) { return null; } public JComponent createComponent() { final JPanel panel = new JPanel(new GridBagLayout()); myProject = JavaDebuggerSupport.getCurrentProject(); myCbSkipSyntheticMethods = new JCheckBox(DebuggerBundle.message("label.debugger.general.configurable.skip.synthetic.methods")); myCbSkipConstructors = new JCheckBox(DebuggerBundle.message("label.debugger.general.configurable.skip.constructors")); myCbSkipClassLoaders = new JCheckBox(DebuggerBundle.message("label.debugger.general.configurable.skip.classloaders")); myCbSkipSimpleGetters = new JCheckBox(DebuggerBundle.message("label.debugger.general.configurable.skip.simple.getters")); myCbStepInfoFiltersEnabled = new JCheckBox(DebuggerBundle.message("label.debugger.general.configurable.step.filters.list.header")); panel.add(myCbSkipSyntheticMethods, new GridBagConstraints(0, GridBagConstraints.RELATIVE, 1, 1, 1.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, new Insets(0, 0, 0, 0),0, 0)); panel.add(myCbSkipConstructors, new GridBagConstraints(0, GridBagConstraints.RELATIVE, 1, 1, 1.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, new Insets(0, 0, 0, 0),0, 0)); panel.add(myCbSkipClassLoaders, new GridBagConstraints(0, GridBagConstraints.RELATIVE, 1, 1, 1.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, new Insets(0, 0, 0, 0),0, 0)); panel.add(myCbSkipSimpleGetters, new GridBagConstraints(0, GridBagConstraints.RELATIVE, 1, 1, 1.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, new Insets(0, 0, 0, 0),0, 0)); panel.add(myCbStepInfoFiltersEnabled, new GridBagConstraints(0, GridBagConstraints.RELATIVE, 1, 1, 1.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, new Insets(8, 0, 0, 0),0, 0)); mySteppingFilterEditor = new ClassFilterEditor(myProject, null, "reference.viewBreakpoints.classFilters.newPattern"); panel.add(mySteppingFilterEditor, new GridBagConstraints(0, GridBagConstraints.RELATIVE, 1, 1, 1.0, 1.0, GridBagConstraints.CENTER, GridBagConstraints.BOTH, new Insets(0, 5, 0, 0),0, 0)); myCbStepInfoFiltersEnabled.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { mySteppingFilterEditor.setEnabled(myCbStepInfoFiltersEnabled.isSelected()); } }); return panel; } public void disposeUIResources() { mySteppingFilterEditor = null; myProject = null; } }
romankagan/DDBWorkbench
java/debugger/impl/src/com/intellij/debugger/settings/DebuggerSteppingConfigurable.java
Java
apache-2.0
5,766
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.ui.components; import com.intellij.ide.ui.AntialiasingType; import com.intellij.openapi.util.NlsContexts; import com.intellij.openapi.util.text.StringUtil; import com.intellij.ui.*; import com.intellij.util.ui.GraphicsUtil; import com.intellij.util.ui.HTMLEditorKitBuilder; import com.intellij.util.ui.JBFont; import com.intellij.util.ui.UIUtil; import com.intellij.util.ui.components.JBComponent; import org.intellij.lang.annotations.JdkConstants; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import javax.swing.*; import javax.swing.border.Border; import javax.swing.border.EmptyBorder; import javax.swing.event.HyperlinkListener; import javax.swing.text.BadLocationException; import javax.swing.text.DefaultCaret; import javax.swing.text.EditorKit; import javax.swing.text.html.HTMLEditorKit; import javax.swing.text.html.StyleSheet; import java.awt.*; import java.awt.event.FocusAdapter; import java.awt.event.FocusEvent; import java.util.Collections; public class JBLabel extends JLabel implements AnchorableComponent, JBComponent<JBLabel> { private UIUtil.ComponentStyle myComponentStyle = UIUtil.ComponentStyle.REGULAR; private UIUtil.FontColor myFontColor = UIUtil.FontColor.NORMAL; private JComponent myAnchor; private JEditorPane myEditorPane; private JLabel myIconLabel; private boolean myMultiline; private boolean myAllowAutoWrapping = false; public JBLabel() { } public JBLabel(@NotNull UIUtil.ComponentStyle componentStyle) { setComponentStyle(componentStyle); } public JBLabel(@Nullable Icon image) { super(image); } public JBLabel(@NotNull @NlsContexts.Label String text) { super(text); } public JBLabel(@NotNull @NlsContexts.Label String text, @NotNull UIUtil.ComponentStyle componentStyle) { super(text); setComponentStyle(componentStyle); } public JBLabel(@NotNull @NlsContexts.Label String text, @NotNull UIUtil.ComponentStyle componentStyle, @NotNull UIUtil.FontColor fontColor) { super(text); setComponentStyle(componentStyle); setFontColor(fontColor); } public JBLabel(@NotNull @NlsContexts.Label String text, @JdkConstants.HorizontalAlignment int horizontalAlignment) { super(text, horizontalAlignment); } public JBLabel(@Nullable Icon image, @JdkConstants.HorizontalAlignment int horizontalAlignment) { super(image, horizontalAlignment); } public JBLabel(@NotNull @NlsContexts.Label String text, @Nullable Icon icon, @JdkConstants.HorizontalAlignment int horizontalAlignment) { super(text, icon, horizontalAlignment); } public void setComponentStyle(@NotNull UIUtil.ComponentStyle componentStyle) { myComponentStyle = componentStyle; UIUtil.applyStyle(componentStyle, this); } public UIUtil.ComponentStyle getComponentStyle() { return myComponentStyle; } public UIUtil.FontColor getFontColor() { return myFontColor; } public void setFontColor(@NotNull UIUtil.FontColor fontColor) { myFontColor = fontColor; } @Override public Color getForeground() { if (!isEnabled()) { return UIUtil.getLabelDisabledForeground(); } if (myFontColor != null) { return UIUtil.getLabelFontColor(myFontColor); } return super.getForeground(); } @Override public void setForeground(Color fg) { myFontColor = null; super.setForeground(fg); if (myEditorPane != null) { updateEditorPaneStyle(); } } @Override public void setEnabled(boolean enabled) { super.setEnabled(enabled); if (myEditorPane != null) { myEditorPane.setEnabled(enabled); } } @Override public void setAnchor(@Nullable JComponent anchor) { myAnchor = anchor; } @Override public JComponent getAnchor() { return myAnchor; } @Override public Dimension getPreferredSize() { if (myAnchor != null && myAnchor != this) return myAnchor.getPreferredSize(); if (myEditorPane != null) return getLayout().preferredLayoutSize(this); return super.getPreferredSize(); } @Override public Dimension getMinimumSize() { if (myAnchor != null && myAnchor != this) return myAnchor.getMinimumSize(); if (myEditorPane != null) return getLayout().minimumLayoutSize(this); return super.getMinimumSize(); } @Override public Dimension getMaximumSize() { if (myAnchor != null && myAnchor != this) return myAnchor.getMaximumSize(); if (myEditorPane != null) { return getLayout().maximumLayoutSize(this); } return super.getMaximumSize(); } @Override public BorderLayout getLayout() { return (BorderLayout)super.getLayout(); } @Override protected void paintComponent(Graphics g) { if (myEditorPane == null) { super.paintComponent(g); } } @Override public void setText(@NlsContexts.Label String text) { super.setText(text); if (myEditorPane != null) { myEditorPane.setText(getText()); updateEditorPaneStyle(); checkMultiline(); updateTextAlignment(); } } @Override public void setIcon(Icon icon) { super.setIcon(icon); if (myIconLabel != null) { myIconLabel.setIcon(icon); updateLayout(); updateTextAlignment(); } } public void setIconWithAlignment(Icon icon, int horizontalAlignment, int verticalAlignment) { super.setIcon(icon); if (myIconLabel != null) { myIconLabel.setIcon(icon); myIconLabel.setHorizontalAlignment(horizontalAlignment); myIconLabel.setVerticalAlignment(verticalAlignment); updateLayout(); updateTextAlignment(); } } @Override public void setFocusable(boolean focusable) { super.setFocusable(focusable); if (myEditorPane != null) { myEditorPane.setFocusable(focusable); } } private void checkMultiline() { String text = getText(); myMultiline = text != null && StringUtil.removeHtmlTags(text).contains(System.lineSeparator()); } @Override public void setFont(Font font) { super.setFont(font); if (myEditorPane != null) { updateEditorPaneStyle(); updateTextAlignment(); } } @Override public void setIconTextGap(int iconTextGap) { super.setIconTextGap(iconTextGap); if (myEditorPane != null) { updateLayout(); } } @Override public void setBounds(int x, int y, int width, int height) { super.setBounds(x, y, width, height); if (myEditorPane != null) { updateTextAlignment(); } } @Override public void setVerticalTextPosition(int textPosition) { super.setVerticalTextPosition(textPosition); if (myEditorPane != null) { updateTextAlignment(); } } @Override public void setHorizontalTextPosition(int textPosition) { super.setHorizontalTextPosition(textPosition); if (myEditorPane != null) { updateLayout(); } } private void updateLayout() { setLayout(new BorderLayout(getIcon() == null ? 0 : getIconTextGap(), 0)); int position = getHorizontalTextPosition(); String iconConstraint = getComponentOrientation().isLeftToRight() ? BorderLayout.WEST : BorderLayout.EAST; if (getComponentOrientation().isLeftToRight() && position == SwingConstants.LEADING) iconConstraint = BorderLayout.EAST; if (!getComponentOrientation().isLeftToRight() && position == SwingConstants.TRAILING) iconConstraint = BorderLayout.EAST; if (position == SwingConstants.LEFT) iconConstraint = BorderLayout.EAST; add(myIconLabel, iconConstraint); add(myEditorPane, BorderLayout.CENTER); } @Override public void updateUI() { super.updateUI(); if (myEditorPane != null) { //init inner components again (if any) to provide proper colors when LAF is being changed setCopyable(false); setCopyable(true); } GraphicsUtil.setAntialiasingType(this, AntialiasingType.getAAHintForSwingComponent()); } /** * This listener will be used in 'copyable' mode when a link is updated (clicked, entered, etc.). */ @NotNull protected HyperlinkListener createHyperlinkListener() { return BrowserHyperlinkListener.INSTANCE; } /** * In 'copyable' mode JBLabel has the same appearance but user can select text with mouse and copy it to clipboard with standard shortcut. * By default JBLabel is NOT copyable * Also 'copyable' label supports web hyperlinks (e.g. opens browser on click) * * @return 'this' (the same instance) */ public JBLabel setCopyable(boolean copyable) { if (copyable ^ myEditorPane != null) { if (myEditorPane == null) { final JLabel ellipsisLabel = new JBLabel("..."); myIconLabel = new JLabel(getIcon()); myEditorPane = new JEditorPane() { @Override public void paint(Graphics g) { Dimension size = getSize(); boolean paintEllipsis = getPreferredSize().width > size.width && !myMultiline && !myAllowAutoWrapping; if (!paintEllipsis) { super.paint(g); } else { Dimension ellipsisSize = ellipsisLabel.getPreferredSize(); int endOffset = size.width - ellipsisSize.width; try { // do not paint half of the letter endOffset = modelToView(viewToModel(new Point(endOffset, getHeight() / 2)) - 1).x; } catch (BadLocationException ignore) { } Shape oldClip = g.getClip(); g.clipRect(0, 0, endOffset, size.height); super.paint(g); g.setClip(oldClip); g.translate(endOffset, 0); ellipsisLabel.setSize(ellipsisSize); ellipsisLabel.paint(g); g.translate(-endOffset, 0); } } }; myEditorPane.addFocusListener(new FocusAdapter() { @Override public void focusLost(FocusEvent e) { if (myEditorPane == null) return; int caretPosition = myEditorPane.getCaretPosition(); myEditorPane.setSelectionStart(caretPosition); myEditorPane.setSelectionEnd(caretPosition); } }); myEditorPane.setContentType("text/html"); myEditorPane.setEditable(false); myEditorPane.setBackground(UIUtil.TRANSPARENT_COLOR); myEditorPane.setOpaque(false); myEditorPane.addHyperlinkListener(createHyperlinkListener()); ComponentUtil.putClientProperty(myEditorPane, UIUtil.NOT_IN_HIERARCHY_COMPONENTS, Collections.singleton(ellipsisLabel)); myEditorPane.setEditorKit(HTMLEditorKitBuilder.simple()); updateEditorPaneStyle(); if (myEditorPane.getCaret() instanceof DefaultCaret) { ((DefaultCaret)myEditorPane.getCaret()).setUpdatePolicy(DefaultCaret.NEVER_UPDATE); } myEditorPane.setText(getText()); checkMultiline(); myEditorPane.setCaretPosition(0); updateLayout(); updateTextAlignment(); // Remove label from tab order because selectable labels doesn't have visible selection state setFocusTraversalPolicyProvider(true); setFocusTraversalPolicy(new DisabledTraversalPolicy()); } else { removeAll(); myEditorPane = null; myIconLabel = null; } } return this; } private void updateEditorPaneStyle() { myEditorPane.setFont(getFont()); myEditorPane.setForeground(getForeground()); EditorKit kit = myEditorPane.getEditorKit(); if (kit instanceof HTMLEditorKit) { StyleSheet css = ((HTMLEditorKit)kit).getStyleSheet(); css.addRule("body, p {" + "color:#" + ColorUtil.toHex(getForeground()) + ";" + "font-family:" + getFont().getFamily() + ";" + "font-size:" + getFont().getSize() + "pt;" + "white-space:" + (myAllowAutoWrapping ? "normal" : "nowrap") + ";}"); } } /** * In 'copyable' mode auto-wrapping is disabled by default. * (In this case you have to markup your HTML with P or BR tags explicitly) */ public JBLabel setAllowAutoWrapping(boolean allowAutoWrapping) { myAllowAutoWrapping = allowAutoWrapping; return this; } public boolean isAllowAutoWrapping() { return myAllowAutoWrapping; } private void updateTextAlignment() { if (myEditorPane == null) return; myEditorPane.setBorder(null); // clear border int position = getVerticalTextPosition(); if (position == TOP) { return; } int preferredHeight = myEditorPane.getPreferredSize().height; int availableHeight = getHeight(); if (availableHeight <= preferredHeight) { return; } // since the 'top' value is in real already-scaled pixels, should use swing's EmptyBorder //noinspection UseDPIAwareBorders myEditorPane.setBorder(new EmptyBorder(position == CENTER ? (availableHeight - preferredHeight + 1) / 2 : availableHeight - preferredHeight, 0, 0, 0)); } @Override public JBLabel withBorder(Border border) { setBorder(border); return this; } @Override public JBLabel withFont(JBFont font) { setFont(font); return this; } @Override public JBLabel andTransparent() { setOpaque(false); return this; } @Override public JBLabel andOpaque() { setOpaque(true); return this; } }
smmribeiro/intellij-community
platform/platform-api/src/com/intellij/ui/components/JBLabel.java
Java
apache-2.0
13,575
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.cassandra.db.lifecycle; import java.io.File; import java.util.*; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicReference; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Function; import com.google.common.base.Predicate; import com.google.common.base.Predicates; import com.google.common.collect.*; import org.apache.cassandra.db.ColumnFamilyStore; import org.apache.cassandra.db.Directories; import org.apache.cassandra.db.Memtable; import org.apache.cassandra.db.commitlog.ReplayPosition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.cassandra.config.DatabaseDescriptor; import org.apache.cassandra.db.compaction.OperationType; import org.apache.cassandra.io.sstable.format.SSTableReader; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.metrics.StorageMetrics; import org.apache.cassandra.notifications.*; import org.apache.cassandra.utils.Pair; import org.apache.cassandra.utils.Throwables; import org.apache.cassandra.utils.concurrent.OpOrder; import static com.google.common.base.Predicates.and; import static com.google.common.collect.ImmutableSet.copyOf; import static com.google.common.collect.Iterables.filter; import static java.util.Collections.singleton; import static org.apache.cassandra.db.lifecycle.Helpers.*; import static org.apache.cassandra.db.lifecycle.View.permitCompacting; import static org.apache.cassandra.db.lifecycle.View.updateCompacting; import static org.apache.cassandra.db.lifecycle.View.updateLiveSet; import static org.apache.cassandra.utils.Throwables.maybeFail; import static org.apache.cassandra.utils.Throwables.merge; import static org.apache.cassandra.utils.concurrent.Refs.release; import static org.apache.cassandra.utils.concurrent.Refs.selfRefs; public class Tracker { private static final Logger logger = LoggerFactory.getLogger(Tracker.class); public final Collection<INotificationConsumer> subscribers = new CopyOnWriteArrayList<>(); public final ColumnFamilyStore cfstore; final AtomicReference<View> view; public final boolean loadsstables; public Tracker(ColumnFamilyStore cfstore, boolean loadsstables) { this.cfstore = cfstore; this.view = new AtomicReference<>(); this.loadsstables = loadsstables; this.reset(); } public LifecycleTransaction tryModify(SSTableReader sstable, OperationType operationType) { return tryModify(singleton(sstable), operationType); } /** * @return a Transaction over the provided sstables if we are able to mark the given @param sstables as compacted, before anyone else */ public LifecycleTransaction tryModify(Iterable<SSTableReader> sstables, OperationType operationType) { if (Iterables.isEmpty(sstables)) return new LifecycleTransaction(this, operationType, sstables); if (null == apply(permitCompacting(sstables), updateCompacting(emptySet(), sstables))) return null; return new LifecycleTransaction(this, operationType, sstables); } // METHODS FOR ATOMICALLY MODIFYING THE VIEW Pair<View, View> apply(Function<View, View> function) { return apply(Predicates.<View>alwaysTrue(), function); } Throwable apply(Function<View, View> function, Throwable accumulate) { try { apply(function); } catch (Throwable t) { accumulate = merge(accumulate, t); } return accumulate; } /** * atomically tests permit against the view and applies function to it, if permit yields true, returning the original; * otherwise the method aborts, returning null */ Pair<View, View> apply(Predicate<View> permit, Function<View, View> function) { while (true) { View cur = view.get(); if (!permit.apply(cur)) return null; View updated = function.apply(cur); if (view.compareAndSet(cur, updated)) return Pair.create(cur, updated); } } Throwable updateSizeTracking(Iterable<SSTableReader> oldSSTables, Iterable<SSTableReader> newSSTables, Throwable accumulate) { if (isDummy()) return accumulate; long add = 0; for (SSTableReader sstable : newSSTables) { if (logger.isTraceEnabled()) logger.trace("adding {} to list of files tracked for {}.{}", sstable.descriptor, cfstore.keyspace.getName(), cfstore.name); try { add += sstable.bytesOnDisk(); } catch (Throwable t) { accumulate = merge(accumulate, t); } } long subtract = 0; for (SSTableReader sstable : oldSSTables) { if (logger.isTraceEnabled()) logger.trace("removing {} from list of files tracked for {}.{}", sstable.descriptor, cfstore.keyspace.getName(), cfstore.name); try { subtract += sstable.bytesOnDisk(); } catch (Throwable t) { accumulate = merge(accumulate, t); } } StorageMetrics.load.inc(add - subtract); cfstore.metric.liveDiskSpaceUsed.inc(add - subtract); // we don't subtract from total until the sstable is deleted, see TransactionLogs.SSTableTidier cfstore.metric.totalDiskSpaceUsed.inc(add); return accumulate; } // SETUP / CLEANUP public void addInitialSSTables(Iterable<SSTableReader> sstables) { if (!isDummy()) setupOnline(sstables); apply(updateLiveSet(emptySet(), sstables)); maybeFail(updateSizeTracking(emptySet(), sstables, null)); // no notifications or backup necessary } public void addSSTables(Iterable<SSTableReader> sstables) { addInitialSSTables(sstables); maybeIncrementallyBackup(sstables); notifyAdded(sstables); } /** (Re)initializes the tracker, purging all references. */ @VisibleForTesting public void reset() { view.set(new View( !isDummy() ? ImmutableList.of(new Memtable(cfstore)) : Collections.<Memtable>emptyList(), ImmutableList.<Memtable>of(), Collections.<SSTableReader, SSTableReader>emptyMap(), Collections.<SSTableReader, SSTableReader>emptyMap(), SSTableIntervalTree.empty())); } public Throwable dropSSTablesIfInvalid(Throwable accumulate) { if (!isDummy() && !cfstore.isValid()) accumulate = dropSSTables(accumulate); return accumulate; } public void dropSSTables() { maybeFail(dropSSTables(null)); } public Throwable dropSSTables(Throwable accumulate) { return dropSSTables(Predicates.<SSTableReader>alwaysTrue(), OperationType.UNKNOWN, accumulate); } /** * removes all sstables that are not busy compacting. */ public Throwable dropSSTables(final Predicate<SSTableReader> remove, OperationType operationType, Throwable accumulate) { try (LogTransaction txnLogs = new LogTransaction(operationType, this)) { Pair<View, View> result = apply(view -> { Set<SSTableReader> toremove = copyOf(filter(view.sstables, and(remove, notIn(view.compacting)))); return updateLiveSet(toremove, emptySet()).apply(view); }); Set<SSTableReader> removed = Sets.difference(result.left.sstables, result.right.sstables); assert Iterables.all(removed, remove); // It is important that any method accepting/returning a Throwable never throws an exception, and does its best // to complete the instructions given to it List<LogTransaction.Obsoletion> obsoletions = new ArrayList<>(); accumulate = prepareForObsoletion(removed, txnLogs, obsoletions, accumulate); try { txnLogs.finish(); if (!removed.isEmpty()) { accumulate = markObsolete(obsoletions, accumulate); accumulate = updateSizeTracking(removed, emptySet(), accumulate); accumulate = release(selfRefs(removed), accumulate); // notifySSTablesChanged -> LeveledManifest.promote doesn't like a no-op "promotion" accumulate = notifySSTablesChanged(removed, Collections.<SSTableReader>emptySet(), txnLogs.type(), accumulate); } } catch (Throwable t) { accumulate = abortObsoletion(obsoletions, accumulate); accumulate = Throwables.merge(accumulate, t); } } catch (Throwable t) { accumulate = Throwables.merge(accumulate, t); } return accumulate; } /** * Removes every SSTable in the directory from the Tracker's view. * @param directory the unreadable directory, possibly with SSTables in it, but not necessarily. */ public void removeUnreadableSSTables(final File directory) { maybeFail(dropSSTables(new Predicate<SSTableReader>() { public boolean apply(SSTableReader reader) { return reader.descriptor.directory.equals(directory); } }, OperationType.UNKNOWN, null)); } // FLUSHING /** * get the Memtable that the ordered writeOp should be directed to */ public Memtable getMemtableFor(OpOrder.Group opGroup, ReplayPosition replayPosition) { // since any new memtables appended to the list after we fetch it will be for operations started // after us, we can safely assume that we will always find the memtable that 'accepts' us; // if the barrier for any memtable is set whilst we are reading the list, it must accept us. // there may be multiple memtables in the list that would 'accept' us, however we only ever choose // the oldest such memtable, as accepts() only prevents us falling behind (i.e. ensures we don't // assign operations to a memtable that was retired/queued before we started) for (Memtable memtable : view.get().liveMemtables) { if (memtable.accepts(opGroup, replayPosition)) return memtable; } throw new AssertionError(view.get().liveMemtables.toString()); } /** * Switch the current memtable. This atomically appends a new memtable to the end of the list of active memtables, * returning the previously last memtable. It leaves the previous Memtable in the list of live memtables until * discarding(memtable) is called. These two methods must be synchronized/paired, i.e. m = switchMemtable * must be followed by discarding(m), they cannot be interleaved. * * @return the previously active memtable */ public Memtable switchMemtable(boolean truncating) { Memtable newMemtable = new Memtable(cfstore); Pair<View, View> result = apply(View.switchMemtable(newMemtable)); if (truncating) notifyRenewed(newMemtable); else notifySwitched(result.left.getCurrentMemtable()); return result.left.getCurrentMemtable(); } public void markFlushing(Memtable memtable) { apply(View.markFlushing(memtable)); } public void replaceFlushed(Memtable memtable, Iterable<SSTableReader> sstables) { assert !isDummy(); if (sstables == null || Iterables.isEmpty(sstables)) { // sstable may be null if we flushed batchlog and nothing needed to be retained // if it's null, we don't care what state the cfstore is in, we just replace it and continue apply(View.replaceFlushed(memtable, null)); return; } sstables.forEach(SSTableReader::setupOnline); // back up before creating a new Snapshot (which makes the new one eligible for compaction) maybeIncrementallyBackup(sstables); apply(View.replaceFlushed(memtable, sstables)); Throwable fail; fail = updateSizeTracking(emptySet(), sstables, null); // TODO: if we're invalidated, should we notifyadded AND removed, or just skip both? fail = notifyAdded(sstables, fail); notifyDiscarded(memtable); if (!isDummy() && !cfstore.isValid()) dropSSTables(); maybeFail(fail); } // MISCELLANEOUS public utility calls public Set<SSTableReader> getCompacting() { return view.get().compacting; } public Iterable<SSTableReader> getUncompacting() { return view.get().sstables(SSTableSet.NONCOMPACTING); } public Iterable<SSTableReader> getUncompacting(Iterable<SSTableReader> candidates) { return view.get().getUncompacting(candidates); } public void maybeIncrementallyBackup(final Iterable<SSTableReader> sstables) { if (!DatabaseDescriptor.isIncrementalBackupsEnabled()) return; for (SSTableReader sstable : sstables) { File backupsDir = Directories.getBackupsDirectory(sstable.descriptor); sstable.createLinks(FileUtils.getCanonicalPath(backupsDir)); } } // NOTIFICATION Throwable notifySSTablesChanged(Collection<SSTableReader> removed, Collection<SSTableReader> added, OperationType compactionType, Throwable accumulate) { INotification notification = new SSTableListChangedNotification(added, removed, compactionType); for (INotificationConsumer subscriber : subscribers) { try { subscriber.handleNotification(notification, this); } catch (Throwable t) { accumulate = merge(accumulate, t); } } return accumulate; } Throwable notifyAdded(Iterable<SSTableReader> added, Throwable accumulate) { INotification notification = new SSTableAddedNotification(added); for (INotificationConsumer subscriber : subscribers) { try { subscriber.handleNotification(notification, this); } catch (Throwable t) { accumulate = merge(accumulate, t); } } return accumulate; } public void notifyAdded(Iterable<SSTableReader> added) { maybeFail(notifyAdded(added, null)); } public void notifySSTableRepairedStatusChanged(Collection<SSTableReader> repairStatusesChanged) { INotification notification = new SSTableRepairStatusChanged(repairStatusesChanged); for (INotificationConsumer subscriber : subscribers) subscriber.handleNotification(notification, this); } public void notifyDeleting(SSTableReader deleting) { INotification notification = new SSTableDeletingNotification(deleting); for (INotificationConsumer subscriber : subscribers) subscriber.handleNotification(notification, this); } public void notifyTruncated(long truncatedAt) { INotification notification = new TruncationNotification(truncatedAt); for (INotificationConsumer subscriber : subscribers) subscriber.handleNotification(notification, this); } public void notifyRenewed(Memtable renewed) { notify(new MemtableRenewedNotification(renewed)); } public void notifySwitched(Memtable previous) { notify(new MemtableSwitchedNotification(previous)); } public void notifyDiscarded(Memtable discarded) { notify(new MemtableDiscardedNotification(discarded)); } private void notify(INotification notification) { for (INotificationConsumer subscriber : subscribers) subscriber.handleNotification(notification, this); } public boolean isDummy() { return cfstore == null; } public void subscribe(INotificationConsumer consumer) { subscribers.add(consumer); } public void unsubscribe(INotificationConsumer consumer) { subscribers.remove(consumer); } private static Set<SSTableReader> emptySet() { return Collections.emptySet(); } public View getView() { return view.get(); } }
RyanMagnusson/cassandra
src/java/org/apache/cassandra/db/lifecycle/Tracker.java
Java
apache-2.0
17,572
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.wicket.cdi.testapp; import javax.enterprise.context.Conversation; import javax.inject.Inject; import org.apache.wicket.markup.html.WebPage; import org.apache.wicket.markup.html.basic.Label; import org.apache.wicket.markup.html.link.Link; import org.apache.wicket.model.PropertyModel; import org.apache.wicket.request.mapper.parameter.PageParameters; /** * @author jsarman */ public class TestConversationPage extends WebPage { private static final long serialVersionUID = 1L; @Inject Conversation conversation; @Inject TestConversationBean counter; public TestConversationPage() { this(new PageParameters()); } public TestConversationPage(final PageParameters parameters) { super(parameters); conversation.begin(); System.out.println("Opened Conversion with id = " + conversation.getId()); add(new Label("count", new PropertyModel<String>(this, "counter.countStr"))); add(new Link<Void>("increment") { private static final long serialVersionUID = 1L; @Override public void onClick() { counter.increment(); } }); add(new Link<Void>("next") { private static final long serialVersionUID = 1L; @Override public void onClick() { String pageType = parameters.get("pageType").toString("nonbookmarkable"); if ("bookmarkable".equals(pageType.toLowerCase())) setResponsePage(TestNonConversationalPage.class); else if ("hybrid".equals(pageType.toLowerCase())) setResponsePage(TestConversationPage.this); else setResponsePage(new TestNonConversationalPage()); } }); } }
dashorst/wicket
wicket-cdi-1.1/src/test/java/org/apache/wicket/cdi/testapp/TestConversationPage.java
Java
apache-2.0
2,394
/* * Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hazelcast.transaction.impl.xa; import com.hazelcast.transaction.TransactionContext; import com.hazelcast.transaction.impl.Transaction; public final class TransactionAccessor { private TransactionAccessor() { } public static Transaction getTransaction(TransactionContext ctx) { if (ctx instanceof XATransactionContextImpl) { XATransactionContextImpl ctxImp = (XATransactionContextImpl) ctx; return ctxImp.getTransaction(); } throw new IllegalArgumentException(); } }
emre-aydin/hazelcast
hazelcast/src/main/java/com/hazelcast/transaction/impl/xa/TransactionAccessor.java
Java
apache-2.0
1,173
package action import ( "errors" boshas "github.com/cloudfoundry/bosh-agent/agent/applier/applyspec" boshscript "github.com/cloudfoundry/bosh-agent/agent/script" boshdrain "github.com/cloudfoundry/bosh-agent/agent/script/drain" boshjobsuper "github.com/cloudfoundry/bosh-agent/jobsupervisor" boshnotif "github.com/cloudfoundry/bosh-agent/notification" bosherr "github.com/cloudfoundry/bosh-utils/errors" boshlog "github.com/cloudfoundry/bosh-utils/logger" ) type DrainAction struct { jobScriptProvider boshscript.JobScriptProvider notifier boshnotif.Notifier specService boshas.V1Service jobSupervisor boshjobsuper.JobSupervisor logTag string logger boshlog.Logger cancelCh chan struct{} } type DrainType string const ( DrainTypeUpdate DrainType = "update" DrainTypeStatus DrainType = "status" DrainTypeShutdown DrainType = "shutdown" ) func NewDrain( notifier boshnotif.Notifier, specService boshas.V1Service, jobScriptProvider boshscript.JobScriptProvider, jobSupervisor boshjobsuper.JobSupervisor, logger boshlog.Logger, ) DrainAction { return DrainAction{ notifier: notifier, specService: specService, jobScriptProvider: jobScriptProvider, jobSupervisor: jobSupervisor, logTag: "Drain Action", logger: logger, cancelCh: make(chan struct{}, 1), } } func (a DrainAction) IsAsynchronous() bool { return true } func (a DrainAction) IsPersistent() bool { return false } func (a DrainAction) Run(drainType DrainType, newSpecs ...boshas.V1ApplySpec) (int, error) { currentSpec, err := a.specService.Get() if err != nil { return 0, bosherr.WrapError(err, "Getting current spec") } params, err := a.determineParams(drainType, currentSpec, newSpecs) if err != nil { return 0, err } a.logger.Debug(a.logTag, "Unmonitoring") err = a.jobSupervisor.Unmonitor() if err != nil { return 0, bosherr.WrapError(err, "Unmonitoring services") } var scripts []boshscript.Script for _, job := range currentSpec.Jobs() { script := a.jobScriptProvider.NewDrainScript(job.BundleName(), params) scripts = append(scripts, script) } script := a.jobScriptProvider.NewParallelScript("drain", scripts) resultsCh := make(chan error, 1) go func() { resultsCh <- script.Run() }() select { case result := <-resultsCh: a.logger.Debug(a.logTag, "Got a result") return 0, result case <-a.cancelCh: a.logger.Debug(a.logTag, "Got a cancel request") return 0, script.Cancel() } } func (a DrainAction) determineParams(drainType DrainType, currentSpec boshas.V1ApplySpec, newSpecs []boshas.V1ApplySpec) (boshdrain.ScriptParams, error) { var newSpec *boshas.V1ApplySpec var params boshdrain.ScriptParams if len(newSpecs) > 0 { newSpec = &newSpecs[0] } switch drainType { case DrainTypeStatus: // Status was used in the past when dynamic drain was implemented in the Director. // Now that we implement it in the agent, we should never get a call for this type. return params, bosherr.Error("Unexpected call with drain type 'status'") case DrainTypeUpdate: if newSpec == nil { return params, bosherr.Error("Drain update requires new spec") } params = boshdrain.NewUpdateParams(currentSpec, *newSpec) case DrainTypeShutdown: err := a.notifier.NotifyShutdown() if err != nil { return params, bosherr.WrapError(err, "Notifying shutdown") } params = boshdrain.NewShutdownParams(currentSpec, newSpec) } return params, nil } func (a DrainAction) Resume() (interface{}, error) { return nil, errors.New("not supported") } func (a DrainAction) Cancel() error { a.logger.Debug(a.logTag, "Cancelling drain action") select { case a.cancelCh <- struct{}{}: default: } return nil }
cloudfoundry/bosh-init
vendor/github.com/cloudfoundry/bosh-agent/agent/action/drain.go
GO
apache-2.0
3,726
from direct.directnotify import DirectNotifyGlobal from direct.distributed.DistributedObjectAI import DistributedObjectAI class DistCogdoCraneMoneyBagAI(DistributedObjectAI): notify = DirectNotifyGlobal.directNotify.newCategory("DistCogdoCraneMoneyBagAI") def setIndex(self, todo0): pass def requestInitial(self): pass
silly-wacky-3-town-toon/SOURCE-COD
toontown/cogdominium/DistCogdoCraneMoneyBagAI.py
Python
apache-2.0
351
# # Lexical analyzer for JSON # Copyright (C) 2003,2005 Rafael R. Sevilla <dido@imperium.ph> # This file is part of JSON for Ruby # # JSON for Ruby is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation; either version 2.1 of # the License, or (at your option) any later version. # # JSON for Ruby is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with JSON for Ruby; if not, write to the Free # Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA # 02111-1307 USA. # # Author:: Rafael R. Sevilla (mailto:dido@imperium.ph) # Some bugs fixed by Adam Kramer (mailto:adam@the-kramers.net) # Copyright:: Copyright (c) 2003,2005 Rafael R. Sevilla # License:: GNU Lesser General Public License # require 'json/objects' require 'cgi' module JSON VERSION ||= '1.1.2' class Lexer # This method will initialize the lexer to contain a string. # =====Parameters # +s+:: the string to initialize the lexer object with def initialize(s) @index = 0 @source = s end # Backs up the lexer status one character. def back @index -= 1 if @index > 0 end def more? return(@index < @source.length) end # Consumes the next character. def nextchar c = self.more?() ? @source[@index,1] : "\0" @index += 1 return(c) end # Consumes the next character and check that it matches a specified # character. def nextmatch(char) n = self.nextchar raise "Expected '#{char}' and instead saw '#{n}'." if (n != char) return(n) end # Read the next n characters from the string in the lexer. # =====Parameters # +n+:: the number of characters to read from the lexer def nextchars(n) raise "substring bounds error" if (@index + n > @source.length) i = @index @index += n return(@source[i,n]) end # Read the next n characters from the string with escape sequence # processing. def nextclean while true c = self.nextchar() if (c == '/') case self.nextchar() when '/' c = self.nextchar() while c != "\n" && c != "\r" && c != "\0" c = self.nextchar() end when '*' while true c = self.nextchar() raise "unclosed comment" if (c == "\0") if (c == '*') break if (self.nextchar() == '/') self.back() end end else self.back() return '/'; end elsif c == "\0" || c[0] > " "[0] return(c) end end end # Given a Unicode code point, return a string giving its UTF-8 # representation based on RFC 2279. def utf8str(code) if (code & ~(0x7f)) == 0 # UCS-4 range 0x00000000 - 0x0000007F return(code.chr) end buf = "" if (code & ~(0x7ff)) == 0 # UCS-4 range 0x00000080 - 0x000007FF buf << (0b11000000 | (code >> 6)).chr buf << (0b10000000 | (code & 0b00111111)).chr return(buf) end if (code & ~(0x000ffff)) == 0 # UCS-4 range 0x00000800 - 0x0000FFFF buf << (0b11100000 | (code >> 12)).chr buf << (0b10000000 | ((code >> 6) & 0b00111111)).chr buf << (0b10000000 | (code & 0b0011111)).chr return(buf) end # Not used -- JSON only has UCS-2, but for the sake # of completeness if (code & ~(0x1FFFFF)) == 0 # UCS-4 range 0x00010000 - 0x001FFFFF buf << (0b11110000 | (code >> 18)).chr buf << (0b10000000 | ((code >> 12) & 0b00111111)).chr buf << (0b10000000 | ((code >> 6) & 0b00111111)).chr buf << (0b10000000 | (code & 0b0011111)).chr return(buf) end if (code & ~(0x03FFFFFF)) == 0 # UCS-4 range 0x00200000 - 0x03FFFFFF buf << (0b11110000 | (code >> 24)).chr buf << (0b10000000 | ((code >> 18) & 0b00111111)).chr buf << (0b10000000 | ((code >> 12) & 0b00111111)).chr buf << (0b10000000 | ((code >> 6) & 0b00111111)).chr buf << (0b10000000 | (code & 0b0011111)).chr return(buf) end # UCS-4 range 0x04000000 - 0x7FFFFFFF buf << (0b11111000 | (code >> 30)).chr buf << (0b10000000 | ((code >> 24) & 0b00111111)).chr buf << (0b10000000 | ((code >> 18) & 0b00111111)).chr buf << (0b10000000 | ((code >> 12) & 0b00111111)).chr buf << (0b10000000 | ((code >> 6) & 0b00111111)).chr buf << (0b10000000 | (code & 0b0011111)).chr return(buf) end # Reads the next string, given a quote character (usually ' or ") # =====Parameters # +quot+: the next matching quote character to use def nextstring(quot) c = buf = "" while true c = self.nextchar() case c when /\0|\n\r/ raise "Unterminated string" when "\\" chr = self.nextchar() case chr when 'b' buf << "\b" when 't' buf << "\t" when 'n' buf << "\n" when 'f' buf << "\f" when 'r' buf << "\r" when 'u' buf << utf8str(Integer("0x" + self.nextchars(4))) else buf << chr end else return(buf) if (c == quot) buf << c end end end # Reads the next group of characters that match a regular # expresion. # def nextto(regex) buf = "" while (true) c = self.nextchar() if !(regex =~ c).nil? || c == '\0' || c == '\n' || c == '\r' self.back() if (c != '\0') return(buf.chomp()) end buf += c end end # Reads the next value from the string. This can return either a # string, a FixNum, a floating point value, a JSON array, or a # JSON object. def nextvalue c = self.nextclean s = "" case c when /\"|\'/ return(self.nextstring(c)) when '{' self.back() return(Hash.new.from_json(self)) when '[' self.back() return(Array.new.from_json(self)) else buf = "" while ((c =~ /"| |:|,|\]|\}|\/|\0/).nil?) buf += c c = self.nextchar() end self.back() s = buf.chomp case s when "true" return(true) when "false" return(false) when "null" return(nil) when /^[0-9]|\.|-|\+/ if s =~ /[.]/ then return Float(s) else return Integer(s) end end if (s == "") s = nil end return(s) end end # Skip to the next instance of the character specified # =====Parameters # +to+:: Character to skip to def skipto(to) index = @index loop { c = self.nextchar() if (c == '\0') @index = index return(c) end if (c == to) self.back return(c) end } end def unescape @source = CGI::unescape(@source) end # Skip past the next instance of the character specified # =====Parameters # +to+:: the character to skip past def skippast(to) @index = @source.index(to, @index) @index = (@index.nil?) ? @source.length : @index + to.length end def each while (n = nextvalue) yield(n) end end end end
bizo/aws-tools
emr/elastic-mapreduce-ruby-20131216/json/lexer.rb
Ruby
apache-2.0
7,352
/*- * * Copyright 2017 Skymind, Inc. * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. */ package org.deeplearning4j.util; import java.util.Random; /** * Created by Alex on 24/01/2017. */ public class RandomUtils { /** * Randomly shuffle the specified integer array using a Fisher-Yates shuffle algorithm * @param toShuffle Array to shuffle * @param random RNG to use for shuffling */ public static void shuffleInPlace(int[] toShuffle, Random random) { //Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle for (int i = 0; i < toShuffle.length - 1; i++) { int j = i + random.nextInt(toShuffle.length - i); int temp = toShuffle[i]; toShuffle[i] = toShuffle[j]; toShuffle[j] = temp; } } }
shuodata/deeplearning4j
deeplearning4j-core/src/test/java/org/deeplearning4j/util/RandomUtils.java
Java
apache-2.0
1,403
package com.vaadin.tests.components.customcomponent; import com.vaadin.tests.components.TestBase; import com.vaadin.ui.Button.ClickEvent; import com.vaadin.ui.Button.ClickListener; import com.vaadin.ui.CustomComponent; import com.vaadin.ui.NativeButton; public class CustomComponentSizeUpdate extends TestBase { @Override protected void setup() { NativeButton nb = new NativeButton( "100%x100% button. Click to reduce CustomComponent size"); nb.setSizeFull(); final CustomComponent cc = new CustomComponent(nb); cc.setWidth("500px"); cc.setHeight("500px"); nb.addClickListener(new ClickListener() { @Override public void buttonClick(ClickEvent event) { cc.setWidth((cc.getWidth() - 20) + "px"); cc.setHeight((cc.getHeight() - 20) + "px"); } }); addComponent(cc); } @Override protected String getDescription() { return "Click the button to reduce the size of the parent. The button should be resized to fit the parent."; } @Override protected Integer getTicketNumber() { return 3705; } }
jdahlstrom/vaadin.react
uitest/src/main/java/com/vaadin/tests/components/customcomponent/CustomComponentSizeUpdate.java
Java
apache-2.0
1,199
public interface I { void foo(); }
smmribeiro/intellij-community
plugins/kotlin/idea/tests/testData/findUsages/kotlin/findFunctionUsages/highlightingOfSuperUsages.1.java
Java
apache-2.0
38
/* ----------------------------------------------------------------------------- This source file is part of OGRE (Object-oriented Graphics Rendering Engine) For the latest info, see http://www.ogre3d.org/ Copyright (c) 2000-2013 Torus Knot Software Ltd Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ----------------------------------------------------------------------------- */ /* ----------------------------------------------------------------------------- Filename: WindowEmbedding.cpp Description: Stuff your windows full of OGRE ----------------------------------------------------------------------------- */ #include "Ogre.h" using namespace Ogre; void setupResources(void); // Just a prototype void setupResources(void) { // Load resource paths from config file ConfigFile cf; cf.load("resources.cfg"); // Go through all sections & settings in the file ConfigFile::SectionIterator seci = cf.getSectionIterator(); String secName, typeName, archName; while (seci.hasMoreElements()) { secName = seci.peekNextKey(); ConfigFile::SettingsMultiMap *settings = seci.getNext(); ConfigFile::SettingsMultiMap::iterator i; for (i = settings->begin(); i != settings->end(); ++i) { typeName = i->first; archName = i->second; ResourceGroupManager::getSingleton().addResourceLocation( archName, typeName, secName); } } } //--------------------------------------------------------------------- // Windows Test //--------------------------------------------------------------------- #if OGRE_PLATFORM == OGRE_PLATFORM_WIN32 #include "windows.h" RenderWindow* renderWindow = 0; bool winActive = false; bool winSizing = false; LRESULT CALLBACK TestWndProc( HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam ) { if (uMsg == WM_CREATE) { return 0; } if (!renderWindow) return DefWindowProc(hWnd, uMsg, wParam, lParam); switch( uMsg ) { case WM_ACTIVATE: winActive = (LOWORD(wParam) != WA_INACTIVE); break; case WM_ENTERSIZEMOVE: winSizing = true; break; case WM_EXITSIZEMOVE: renderWindow->windowMovedOrResized(); renderWindow->update(); winSizing = false; break; case WM_MOVE: case WM_SIZE: if (!winSizing) renderWindow->windowMovedOrResized(); break; case WM_GETMINMAXINFO: // Prevent the window from going smaller than some min size ((MINMAXINFO*)lParam)->ptMinTrackSize.x = 100; ((MINMAXINFO*)lParam)->ptMinTrackSize.y = 100; break; case WM_CLOSE: renderWindow->destroy(); // cleanup and call DestroyWindow PostQuitMessage(0); return 0; case WM_PAINT: if (!winSizing) { renderWindow->update(); return 0; } break; } return DefWindowProc( hWnd, uMsg, wParam, lParam ); } INT WINAPI EmbeddedMain( HINSTANCE hInst, HINSTANCE, LPSTR strCmdLine, INT ) { try { // Create a new window // Style & size DWORD dwStyle = WS_VISIBLE | WS_CLIPCHILDREN | WS_CLIPSIBLINGS | WS_OVERLAPPEDWINDOW; // Register the window class WNDCLASS wc = { 0, TestWndProc, 0, 0, hInst, LoadIcon(0, IDI_APPLICATION), LoadCursor(NULL, IDC_ARROW), (HBRUSH)GetStockObject(BLACK_BRUSH), 0, "TestWnd" }; RegisterClass(&wc); HWND hwnd = CreateWindow("TestWnd", "Test embedding", dwStyle, 0, 0, 800, 600, 0, 0, hInst, 0); Root root("", ""); root.loadPlugin("RenderSystem_GL"); //root.loadPlugin("RenderSystem_Direct3D9"); root.loadPlugin("Plugin_ParticleFX"); root.loadPlugin("Plugin_CgProgramManager"); // select first renderer & init with no window root.setRenderSystem(*(root.getAvailableRenderers().begin())); root.initialise(false); // create first window manually NameValuePairList options; options["externalWindowHandle"] = StringConverter::toString((size_t)hwnd); renderWindow = root.createRenderWindow("embedded", 800, 600, false, &options); setupResources(); ResourceGroupManager::getSingleton().initialiseAllResourceGroups(); SceneManager *scene = root.createSceneManager(Ogre::ST_GENERIC, "default"); Camera *cam = scene->createCamera("cam"); Viewport* vp = renderWindow->addViewport(cam); vp->setBackgroundColour(Ogre::ColourValue(0.5, 0.5, 0.7)); cam->setAutoAspectRatio(true); cam->setPosition(0,0,300); cam->setDirection(0,0,-1); Entity* e = scene->createEntity("1", "ogrehead.mesh"); scene->getRootSceneNode()->createChildSceneNode()->attachObject(e); Light* l = scene->createLight("l"); l->setPosition(300, 100, -100); // message loop MSG msg; while(GetMessage(&msg, NULL, 0, 0 ) != 0) { TranslateMessage(&msg); DispatchMessage(&msg); } } catch( Exception& e ) { MessageBox( NULL, e.getFullDescription().c_str(), "An exception has occurred!", MB_OK | MB_ICONERROR | MB_TASKMODAL); } return 0; } #endif
cesarpazguzman/The-Eternal-Sorrow
dependencies/Ogre/Tests/PlayPen/src/WindowEmbedding.cpp
C++
apache-2.0
5,719
//===-- main.c --------------------------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include <stdio.h> #include <stdint.h> // This simple program is to test the lldb Python API related to process. char my_char = 'u'; char my_cstring[] = "lldb.SBProcess.ReadCStringFromMemory() works!"; char *my_char_ptr = (char *)"Does it work?"; uint32_t my_uint32 = 12345; int my_int = 0; int main (int argc, char const *argv[]) { for (int i = 0; i < 3; ++i) { printf("my_char='%c'\n", my_char); ++my_char; } printf("after the loop: my_char='%c'\n", my_char); // 'my_char' should print out as 'x'. return 0; // Set break point at this line and check variable 'my_char'. // Use lldb Python API to set memory content for my_int and check the result. }
apple/swift-lldb
packages/Python/lldbsuite/test/python_api/process/main.cpp
C++
apache-2.0
1,072
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using Microsoft.CodeAnalysis.CSharp.Syntax; using Microsoft.CodeAnalysis.CSharp.Test.Utilities; using Microsoft.CodeAnalysis.Test.Utilities; using Xunit; namespace Microsoft.CodeAnalysis.CSharp.UnitTests { public partial class IOperationTests : SemanticModelTestBase { [CompilerTrait(CompilerFeature.IOperation)] [Fact] public void IConditionalAccessExpression_SimpleMethodAccess() { string source = @" using System; public class C1 { public void M() { var o = new object(); /*<bind>*/o?.ToString()/*</bind>*/; } } "; string expectedOperationTree = @" IConditionalAccessOperation (OperationKind.ConditionalAccess, Type: System.String) (Syntax: 'o?.ToString()') Operation: ILocalReferenceOperation: o (OperationKind.LocalReference, Type: System.Object) (Syntax: 'o') WhenNotNull: IInvocationOperation (virtual System.String System.Object.ToString()) (OperationKind.Invocation, Type: System.String) (Syntax: '.ToString()') Instance Receiver: IConditionalAccessInstanceOperation (OperationKind.ConditionalAccessInstance, Type: System.Object, IsImplicit) (Syntax: 'o') Arguments(0) "; var expectedDiagnostics = DiagnosticDescription.None; VerifyOperationTreeAndDiagnosticsForTest<ConditionalAccessExpressionSyntax>(source, expectedOperationTree, expectedDiagnostics); } [CompilerTrait(CompilerFeature.IOperation)] [Fact] public void IConditionalAccessExpression_SimplePropertyAccess() { string source = @" using System; public class C1 { int Prop1 { get; } public void M() { C1 c1 = null; var prop = /*<bind>*/c1?.Prop1/*</bind>*/; } } "; string expectedOperationTree = @" IConditionalAccessOperation (OperationKind.ConditionalAccess, Type: System.Int32?) (Syntax: 'c1?.Prop1') Operation: ILocalReferenceOperation: c1 (OperationKind.LocalReference, Type: C1) (Syntax: 'c1') WhenNotNull: IPropertyReferenceOperation: System.Int32 C1.Prop1 { get; } (OperationKind.PropertyReference, Type: System.Int32) (Syntax: '.Prop1') Instance Receiver: IConditionalAccessInstanceOperation (OperationKind.ConditionalAccessInstance, Type: C1, IsImplicit) (Syntax: 'c1') "; var expectedDiagnostics = DiagnosticDescription.None; VerifyOperationTreeAndDiagnosticsForTest<ConditionalAccessExpressionSyntax>(source, expectedOperationTree, expectedDiagnostics); } [CompilerTrait(CompilerFeature.IOperation, CompilerFeature.Dataflow)] [Fact] public void ConditionalAccessFlow_01() { string source = @" class P { void M1(System.Array input, int? result) /*<bind>*/{ result = input?.Length; }/*</bind>*/ } "; string expectedGraph = @" Block[B0] - Entry Statements (0) Next (Regular) Block[B1] Entering: {R1} .locals {R1} { CaptureIds: [0] [2] Block[B1] - Block Predecessors: [B0] Statements (1) IFlowCaptureOperation: 0 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'result') Value: IParameterReferenceOperation: result (OperationKind.ParameterReference, Type: System.Int32?) (Syntax: 'result') Next (Regular) Block[B2] Entering: {R2} .locals {R2} { CaptureIds: [1] Block[B2] - Block Predecessors: [B1] Statements (1) IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input') Value: IParameterReferenceOperation: input (OperationKind.ParameterReference, Type: System.Array) (Syntax: 'input') Jump if True (Regular) to Block[B4] IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input') Operand: IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: System.Array, IsImplicit) (Syntax: 'input') Leaving: {R2} Next (Regular) Block[B3] Block[B3] - Block Predecessors: [B2] Statements (1) IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.Length') Value: IConversionOperation (TryCast: False, Unchecked) (OperationKind.Conversion, Type: System.Int32?, IsImplicit) (Syntax: '.Length') Conversion: CommonConversion (Exists: True, IsIdentity: False, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null) (ImplicitNullable) Operand: IPropertyReferenceOperation: System.Int32 System.Array.Length { get; } (OperationKind.PropertyReference, Type: System.Int32) (Syntax: '.Length') Instance Receiver: IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: System.Array, IsImplicit) (Syntax: 'input') Next (Regular) Block[B5] Leaving: {R2} } Block[B4] - Block Predecessors: [B2] Statements (1) IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input') Value: IDefaultValueOperation (OperationKind.DefaultValue, Type: System.Int32?, IsImplicit) (Syntax: 'input') Next (Regular) Block[B5] Block[B5] - Block Predecessors: [B3] [B4] Statements (1) IExpressionStatementOperation (OperationKind.ExpressionStatement, Type: null) (Syntax: 'result = input?.Length;') Expression: ISimpleAssignmentOperation (OperationKind.SimpleAssignment, Type: System.Int32?) (Syntax: 'result = input?.Length') Left: IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsImplicit) (Syntax: 'result') Right: IFlowCaptureReferenceOperation: 2 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsImplicit) (Syntax: 'input?.Length') Next (Regular) Block[B6] Leaving: {R1} } Block[B6] - Exit Predecessors: [B5] Statements (0) "; var expectedDiagnostics = DiagnosticDescription.None; VerifyFlowGraphAndDiagnosticsForTest<BlockSyntax>(source, expectedGraph, expectedDiagnostics); } [CompilerTrait(CompilerFeature.IOperation, CompilerFeature.Dataflow)] [Fact] public void ConditionalAccessFlow_02() { string source = @" class P { void M1(int? input, string result) /*<bind>*/{ result = input?.ToString(); }/*</bind>*/ } "; string expectedGraph = @" Block[B0] - Entry Statements (0) Next (Regular) Block[B1] Entering: {R1} .locals {R1} { CaptureIds: [0] [2] Block[B1] - Block Predecessors: [B0] Statements (1) IFlowCaptureOperation: 0 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'result') Value: IParameterReferenceOperation: result (OperationKind.ParameterReference, Type: System.String) (Syntax: 'result') Next (Regular) Block[B2] Entering: {R2} .locals {R2} { CaptureIds: [1] Block[B2] - Block Predecessors: [B1] Statements (1) IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input') Value: IParameterReferenceOperation: input (OperationKind.ParameterReference, Type: System.Int32?) (Syntax: 'input') Jump if True (Regular) to Block[B4] IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input') Operand: IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsImplicit) (Syntax: 'input') Leaving: {R2} Next (Regular) Block[B3] Block[B3] - Block Predecessors: [B2] Statements (1) IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.ToString()') Value: IInvocationOperation (virtual System.String System.Int32.ToString()) (OperationKind.Invocation, Type: System.String) (Syntax: '.ToString()') Instance Receiver: IInvocationOperation ( System.Int32 System.Int32?.GetValueOrDefault()) (OperationKind.Invocation, Type: System.Int32, IsImplicit) (Syntax: 'input') Instance Receiver: IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsImplicit) (Syntax: 'input') Arguments(0) Arguments(0) Next (Regular) Block[B5] Leaving: {R2} } Block[B4] - Block Predecessors: [B2] Statements (1) IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input') Value: IDefaultValueOperation (OperationKind.DefaultValue, Type: System.String, Constant: null, IsImplicit) (Syntax: 'input') Next (Regular) Block[B5] Block[B5] - Block Predecessors: [B3] [B4] Statements (1) IExpressionStatementOperation (OperationKind.ExpressionStatement, Type: null) (Syntax: 'result = in ... ToString();') Expression: ISimpleAssignmentOperation (OperationKind.SimpleAssignment, Type: System.String) (Syntax: 'result = in ... .ToString()') Left: IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: System.String, IsImplicit) (Syntax: 'result') Right: IFlowCaptureReferenceOperation: 2 (OperationKind.FlowCaptureReference, Type: System.String, IsImplicit) (Syntax: 'input?.ToString()') Next (Regular) Block[B6] Leaving: {R1} } Block[B6] - Exit Predecessors: [B5] Statements (0) "; var expectedDiagnostics = DiagnosticDescription.None; VerifyFlowGraphAndDiagnosticsForTest<BlockSyntax>(source, expectedGraph, expectedDiagnostics); } [CompilerTrait(CompilerFeature.IOperation, CompilerFeature.Dataflow)] [Fact] public void ConditionalAccessFlow_03() { string source = @" class P { void M1(P input, int? result) /*<bind>*/{ result = input?.Access(); }/*</bind>*/ int? Access() => null; } "; string expectedGraph = @" Block[B0] - Entry Statements (0) Next (Regular) Block[B1] Entering: {R1} .locals {R1} { CaptureIds: [0] [2] Block[B1] - Block Predecessors: [B0] Statements (1) IFlowCaptureOperation: 0 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'result') Value: IParameterReferenceOperation: result (OperationKind.ParameterReference, Type: System.Int32?) (Syntax: 'result') Next (Regular) Block[B2] Entering: {R2} .locals {R2} { CaptureIds: [1] Block[B2] - Block Predecessors: [B1] Statements (1) IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input') Value: IParameterReferenceOperation: input (OperationKind.ParameterReference, Type: P) (Syntax: 'input') Jump if True (Regular) to Block[B4] IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input') Operand: IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: 'input') Leaving: {R2} Next (Regular) Block[B3] Block[B3] - Block Predecessors: [B2] Statements (1) IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.Access()') Value: IInvocationOperation ( System.Int32? P.Access()) (OperationKind.Invocation, Type: System.Int32?) (Syntax: '.Access()') Instance Receiver: IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: 'input') Arguments(0) Next (Regular) Block[B5] Leaving: {R2} } Block[B4] - Block Predecessors: [B2] Statements (1) IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input') Value: IDefaultValueOperation (OperationKind.DefaultValue, Type: System.Int32?, IsImplicit) (Syntax: 'input') Next (Regular) Block[B5] Block[B5] - Block Predecessors: [B3] [B4] Statements (1) IExpressionStatementOperation (OperationKind.ExpressionStatement, Type: null) (Syntax: 'result = in ... ?.Access();') Expression: ISimpleAssignmentOperation (OperationKind.SimpleAssignment, Type: System.Int32?) (Syntax: 'result = input?.Access()') Left: IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsImplicit) (Syntax: 'result') Right: IFlowCaptureReferenceOperation: 2 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsImplicit) (Syntax: 'input?.Access()') Next (Regular) Block[B6] Leaving: {R1} } Block[B6] - Exit Predecessors: [B5] Statements (0) "; var expectedDiagnostics = DiagnosticDescription.None; VerifyFlowGraphAndDiagnosticsForTest<BlockSyntax>(source, expectedGraph, expectedDiagnostics); } [CompilerTrait(CompilerFeature.IOperation, CompilerFeature.Dataflow)] [Fact] public void ConditionalAccessFlow_04() { string source = @" class P { void M1(P input, P result) /*<bind>*/{ result = (input?[11]?.Access1())?[22]?.Access2(); }/*</bind>*/ P this[int x] => null; P[] Access1() => null; P Access2() => null; } "; string expectedGraph = @" Block[B0] - Entry Statements (0) Next (Regular) Block[B1] Entering: {R1} .locals {R1} { CaptureIds: [0] [5] Block[B1] - Block Predecessors: [B0] Statements (1) IFlowCaptureOperation: 0 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'result') Value: IParameterReferenceOperation: result (OperationKind.ParameterReference, Type: P) (Syntax: 'result') Next (Regular) Block[B2] Entering: {R2} {R3} {R4} {R5} .locals {R2} { CaptureIds: [4] .locals {R3} { CaptureIds: [3] .locals {R4} { CaptureIds: [2] .locals {R5} { CaptureIds: [1] Block[B2] - Block Predecessors: [B1] Statements (1) IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input') Value: IParameterReferenceOperation: input (OperationKind.ParameterReference, Type: P) (Syntax: 'input') Jump if True (Regular) to Block[B6] IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input') Operand: IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: 'input') Leaving: {R5} {R4} Next (Regular) Block[B3] Block[B3] - Block Predecessors: [B2] Statements (1) IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '[11]') Value: IPropertyReferenceOperation: P P.this[System.Int32 x] { get; } (OperationKind.PropertyReference, Type: P) (Syntax: '[11]') Instance Receiver: IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: 'input') Arguments(1): IArgumentOperation (ArgumentKind.Explicit, Matching Parameter: x) (OperationKind.Argument, Type: null) (Syntax: '11') ILiteralOperation (OperationKind.Literal, Type: System.Int32, Constant: 11) (Syntax: '11') InConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null) OutConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null) Next (Regular) Block[B4] Leaving: {R5} } Block[B4] - Block Predecessors: [B3] Statements (0) Jump if True (Regular) to Block[B6] IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: '[11]') Operand: IFlowCaptureReferenceOperation: 2 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: '[11]') Leaving: {R4} Next (Regular) Block[B5] Block[B5] - Block Predecessors: [B4] Statements (1) IFlowCaptureOperation: 3 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.Access1()') Value: IInvocationOperation ( P[] P.Access1()) (OperationKind.Invocation, Type: P[]) (Syntax: '.Access1()') Instance Receiver: IFlowCaptureReferenceOperation: 2 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: '[11]') Arguments(0) Next (Regular) Block[B7] Leaving: {R4} } Block[B6] - Block Predecessors: [B2] [B4] Statements (1) IFlowCaptureOperation: 3 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input?[11]?.Access1()') Value: IDefaultValueOperation (OperationKind.DefaultValue, Type: P[], Constant: null, IsImplicit) (Syntax: 'input?[11]?.Access1()') Next (Regular) Block[B7] Block[B7] - Block Predecessors: [B5] [B6] Statements (0) Jump if True (Regular) to Block[B11] IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input?[11]?.Access1()') Operand: IFlowCaptureReferenceOperation: 3 (OperationKind.FlowCaptureReference, Type: P[], IsImplicit) (Syntax: 'input?[11]?.Access1()') Leaving: {R3} {R2} Next (Regular) Block[B8] Block[B8] - Block Predecessors: [B7] Statements (1) IFlowCaptureOperation: 4 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '[22]') Value: IArrayElementReferenceOperation (OperationKind.ArrayElementReference, Type: P) (Syntax: '[22]') Array reference: IFlowCaptureReferenceOperation: 3 (OperationKind.FlowCaptureReference, Type: P[], IsImplicit) (Syntax: 'input?[11]?.Access1()') Indices(1): ILiteralOperation (OperationKind.Literal, Type: System.Int32, Constant: 22) (Syntax: '22') Next (Regular) Block[B9] Leaving: {R3} } Block[B9] - Block Predecessors: [B8] Statements (0) Jump if True (Regular) to Block[B11] IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: '[22]') Operand: IFlowCaptureReferenceOperation: 4 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: '[22]') Leaving: {R2} Next (Regular) Block[B10] Block[B10] - Block Predecessors: [B9] Statements (1) IFlowCaptureOperation: 5 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.Access2()') Value: IInvocationOperation ( P P.Access2()) (OperationKind.Invocation, Type: P) (Syntax: '.Access2()') Instance Receiver: IFlowCaptureReferenceOperation: 4 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: '[22]') Arguments(0) Next (Regular) Block[B12] Leaving: {R2} } Block[B11] - Block Predecessors: [B7] [B9] Statements (1) IFlowCaptureOperation: 5 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '(input?[11] ... ?.Access2()') Value: IDefaultValueOperation (OperationKind.DefaultValue, Type: P, Constant: null, IsImplicit) (Syntax: '(input?[11] ... ?.Access2()') Next (Regular) Block[B12] Block[B12] - Block Predecessors: [B10] [B11] Statements (1) IExpressionStatementOperation (OperationKind.ExpressionStatement, Type: null) (Syntax: 'result = (i ... .Access2();') Expression: ISimpleAssignmentOperation (OperationKind.SimpleAssignment, Type: P) (Syntax: 'result = (i ... ?.Access2()') Left: IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: 'result') Right: IFlowCaptureReferenceOperation: 5 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: '(input?[11] ... ?.Access2()') Next (Regular) Block[B13] Leaving: {R1} } Block[B13] - Exit Predecessors: [B12] Statements (0) "; var expectedDiagnostics = DiagnosticDescription.None; VerifyFlowGraphAndDiagnosticsForTest<BlockSyntax>(source, expectedGraph, expectedDiagnostics); } [CompilerTrait(CompilerFeature.IOperation, CompilerFeature.Dataflow)] [Fact] public void ConditionalAccessFlow_05() { string source = @" struct P { void M1(P? input, P? result) /*<bind>*/{ result = (input?.Access1()?[11])?[22]?.Access2(); }/*</bind>*/ P? this[int x] => default; P[] Access1() => default; P Access2() => default; } "; string expectedGraph = @" Block[B0] - Entry Statements (0) Next (Regular) Block[B1] Entering: {R1} .locals {R1} { CaptureIds: [0] [5] Block[B1] - Block Predecessors: [B0] Statements (1) IFlowCaptureOperation: 0 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'result') Value: IParameterReferenceOperation: result (OperationKind.ParameterReference, Type: P?) (Syntax: 'result') Next (Regular) Block[B2] Entering: {R2} {R3} {R4} {R5} .locals {R2} { CaptureIds: [4] .locals {R3} { CaptureIds: [3] .locals {R4} { CaptureIds: [2] .locals {R5} { CaptureIds: [1] Block[B2] - Block Predecessors: [B1] Statements (1) IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input') Value: IParameterReferenceOperation: input (OperationKind.ParameterReference, Type: P?) (Syntax: 'input') Jump if True (Regular) to Block[B6] IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input') Operand: IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: 'input') Leaving: {R5} {R4} Next (Regular) Block[B3] Block[B3] - Block Predecessors: [B2] Statements (1) IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.Access1()') Value: IInvocationOperation ( P[] P.Access1()) (OperationKind.Invocation, Type: P[]) (Syntax: '.Access1()') Instance Receiver: IInvocationOperation ( P P?.GetValueOrDefault()) (OperationKind.Invocation, Type: P, IsImplicit) (Syntax: 'input') Instance Receiver: IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: 'input') Arguments(0) Arguments(0) Next (Regular) Block[B4] Leaving: {R5} } Block[B4] - Block Predecessors: [B3] Statements (0) Jump if True (Regular) to Block[B6] IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: '.Access1()') Operand: IFlowCaptureReferenceOperation: 2 (OperationKind.FlowCaptureReference, Type: P[], IsImplicit) (Syntax: '.Access1()') Leaving: {R4} Next (Regular) Block[B5] Block[B5] - Block Predecessors: [B4] Statements (1) IFlowCaptureOperation: 3 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '[11]') Value: IConversionOperation (TryCast: False, Unchecked) (OperationKind.Conversion, Type: P?, IsImplicit) (Syntax: '[11]') Conversion: CommonConversion (Exists: True, IsIdentity: False, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null) (ImplicitNullable) Operand: IArrayElementReferenceOperation (OperationKind.ArrayElementReference, Type: P) (Syntax: '[11]') Array reference: IFlowCaptureReferenceOperation: 2 (OperationKind.FlowCaptureReference, Type: P[], IsImplicit) (Syntax: '.Access1()') Indices(1): ILiteralOperation (OperationKind.Literal, Type: System.Int32, Constant: 11) (Syntax: '11') Next (Regular) Block[B7] Leaving: {R4} } Block[B6] - Block Predecessors: [B2] [B4] Statements (1) IFlowCaptureOperation: 3 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input?.Access1()?[11]') Value: IDefaultValueOperation (OperationKind.DefaultValue, Type: P?, IsImplicit) (Syntax: 'input?.Access1()?[11]') Next (Regular) Block[B7] Block[B7] - Block Predecessors: [B5] [B6] Statements (0) Jump if True (Regular) to Block[B11] IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input?.Access1()?[11]') Operand: IFlowCaptureReferenceOperation: 3 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: 'input?.Access1()?[11]') Leaving: {R3} {R2} Next (Regular) Block[B8] Block[B8] - Block Predecessors: [B7] Statements (1) IFlowCaptureOperation: 4 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '[22]') Value: IPropertyReferenceOperation: P? P.this[System.Int32 x] { get; } (OperationKind.PropertyReference, Type: P?) (Syntax: '[22]') Instance Receiver: IInvocationOperation ( P P?.GetValueOrDefault()) (OperationKind.Invocation, Type: P, IsImplicit) (Syntax: 'input?.Access1()?[11]') Instance Receiver: IFlowCaptureReferenceOperation: 3 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: 'input?.Access1()?[11]') Arguments(0) Arguments(1): IArgumentOperation (ArgumentKind.Explicit, Matching Parameter: x) (OperationKind.Argument, Type: null) (Syntax: '22') ILiteralOperation (OperationKind.Literal, Type: System.Int32, Constant: 22) (Syntax: '22') InConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null) OutConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null) Next (Regular) Block[B9] Leaving: {R3} } Block[B9] - Block Predecessors: [B8] Statements (0) Jump if True (Regular) to Block[B11] IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: '[22]') Operand: IFlowCaptureReferenceOperation: 4 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: '[22]') Leaving: {R2} Next (Regular) Block[B10] Block[B10] - Block Predecessors: [B9] Statements (1) IFlowCaptureOperation: 5 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.Access2()') Value: IConversionOperation (TryCast: False, Unchecked) (OperationKind.Conversion, Type: P?, IsImplicit) (Syntax: '.Access2()') Conversion: CommonConversion (Exists: True, IsIdentity: False, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null) (ImplicitNullable) Operand: IInvocationOperation ( P P.Access2()) (OperationKind.Invocation, Type: P) (Syntax: '.Access2()') Instance Receiver: IInvocationOperation ( P P?.GetValueOrDefault()) (OperationKind.Invocation, Type: P, IsImplicit) (Syntax: '[22]') Instance Receiver: IFlowCaptureReferenceOperation: 4 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: '[22]') Arguments(0) Arguments(0) Next (Regular) Block[B12] Leaving: {R2} } Block[B11] - Block Predecessors: [B7] [B9] Statements (1) IFlowCaptureOperation: 5 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '(input?.Acc ... ?.Access2()') Value: IDefaultValueOperation (OperationKind.DefaultValue, Type: P?, IsImplicit) (Syntax: '(input?.Acc ... ?.Access2()') Next (Regular) Block[B12] Block[B12] - Block Predecessors: [B10] [B11] Statements (1) IExpressionStatementOperation (OperationKind.ExpressionStatement, Type: null) (Syntax: 'result = (i ... .Access2();') Expression: ISimpleAssignmentOperation (OperationKind.SimpleAssignment, Type: P?) (Syntax: 'result = (i ... ?.Access2()') Left: IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: 'result') Right: IFlowCaptureReferenceOperation: 5 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: '(input?.Acc ... ?.Access2()') Next (Regular) Block[B13] Leaving: {R1} } Block[B13] - Exit Predecessors: [B12] Statements (0) "; var expectedDiagnostics = DiagnosticDescription.None; VerifyFlowGraphAndDiagnosticsForTest<BlockSyntax>(source, expectedGraph, expectedDiagnostics); } [CompilerTrait(CompilerFeature.IOperation, CompilerFeature.Dataflow)] [Fact] public void ConditionalAccessFlow_06() { string source = @" struct P { void M1(S1? x) /*<bind>*/{ x?.P1 = 0; }/*</bind>*/ } public struct S1 { public int P1 { get; set; } }"; string expectedGraph = @" Block[B0] - Entry Statements (0) Next (Regular) Block[B1] Entering: {R1} {R2} .locals {R1} { CaptureIds: [1] .locals {R2} { CaptureIds: [0] Block[B1] - Block Predecessors: [B0] Statements (1) IFlowCaptureOperation: 0 (OperationKind.FlowCapture, Type: null, IsInvalid, IsImplicit) (Syntax: 'x') Value: IParameterReferenceOperation: x (OperationKind.ParameterReference, Type: S1?, IsInvalid) (Syntax: 'x') Jump if True (Regular) to Block[B3] IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsInvalid, IsImplicit) (Syntax: 'x') Operand: IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: S1?, IsInvalid, IsImplicit) (Syntax: 'x') Leaving: {R2} Next (Regular) Block[B2] Block[B2] - Block Predecessors: [B1] Statements (1) IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsInvalid, IsImplicit) (Syntax: '.P1') Value: IConversionOperation (TryCast: False, Unchecked) (OperationKind.Conversion, Type: System.Int32?, IsInvalid, IsImplicit) (Syntax: '.P1') Conversion: CommonConversion (Exists: True, IsIdentity: False, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null) (ImplicitNullable) Operand: IPropertyReferenceOperation: System.Int32 S1.P1 { get; set; } (OperationKind.PropertyReference, Type: System.Int32, IsInvalid) (Syntax: '.P1') Instance Receiver: IInvocationOperation ( S1 S1?.GetValueOrDefault()) (OperationKind.Invocation, Type: S1, IsInvalid, IsImplicit) (Syntax: 'x') Instance Receiver: IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: S1?, IsInvalid, IsImplicit) (Syntax: 'x') Arguments(0) Next (Regular) Block[B4] Leaving: {R2} } Block[B3] - Block Predecessors: [B1] Statements (1) IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsInvalid, IsImplicit) (Syntax: 'x') Value: IDefaultValueOperation (OperationKind.DefaultValue, Type: System.Int32?, IsInvalid, IsImplicit) (Syntax: 'x') Next (Regular) Block[B4] Block[B4] - Block Predecessors: [B2] [B3] Statements (1) IExpressionStatementOperation (OperationKind.ExpressionStatement, Type: null, IsInvalid) (Syntax: 'x?.P1 = 0;') Expression: ISimpleAssignmentOperation (OperationKind.SimpleAssignment, Type: System.Int32?, IsInvalid) (Syntax: 'x?.P1 = 0') Left: IInvalidOperation (OperationKind.Invalid, Type: System.Int32?, IsInvalid, IsImplicit) (Syntax: 'x?.P1') Children(1): IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsInvalid, IsImplicit) (Syntax: 'x?.P1') Right: ILiteralOperation (OperationKind.Literal, Type: System.Int32, Constant: 0) (Syntax: '0') Next (Regular) Block[B5] Leaving: {R1} } Block[B5] - Exit Predecessors: [B4] Statements (0) "; var expectedDiagnostics = new[] { // file.cs(6,9): error CS0131: The left-hand side of an assignment must be a variable, property or indexer // x?.P1 = 0; Diagnostic(ErrorCode.ERR_AssgLvalueExpected, "x?.P1").WithLocation(6, 9) }; VerifyFlowGraphAndDiagnosticsForTest<BlockSyntax>(source, expectedGraph, expectedDiagnostics); } [CompilerTrait(CompilerFeature.IOperation, CompilerFeature.Dataflow)] [Fact] public void ConditionalAccessFlow_07() { string source = @" struct P { void M1(S1? x) /*<bind>*/{ x?.P1 = 0; }/*</bind>*/ } public struct S1 { public int P1; }"; string expectedGraph = @" Block[B0] - Entry Statements (0) Next (Regular) Block[B1] Entering: {R1} {R2} .locals {R1} { CaptureIds: [1] .locals {R2} { CaptureIds: [0] Block[B1] - Block Predecessors: [B0] Statements (1) IFlowCaptureOperation: 0 (OperationKind.FlowCapture, Type: null, IsInvalid, IsImplicit) (Syntax: 'x') Value: IParameterReferenceOperation: x (OperationKind.ParameterReference, Type: S1?, IsInvalid) (Syntax: 'x') Jump if True (Regular) to Block[B3] IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsInvalid, IsImplicit) (Syntax: 'x') Operand: IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: S1?, IsInvalid, IsImplicit) (Syntax: 'x') Leaving: {R2} Next (Regular) Block[B2] Block[B2] - Block Predecessors: [B1] Statements (1) IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsInvalid, IsImplicit) (Syntax: '.P1') Value: IConversionOperation (TryCast: False, Unchecked) (OperationKind.Conversion, Type: System.Int32?, IsInvalid, IsImplicit) (Syntax: '.P1') Conversion: CommonConversion (Exists: True, IsIdentity: False, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null) (ImplicitNullable) Operand: IFieldReferenceOperation: System.Int32 S1.P1 (OperationKind.FieldReference, Type: System.Int32, IsInvalid) (Syntax: '.P1') Instance Receiver: IInvocationOperation ( S1 S1?.GetValueOrDefault()) (OperationKind.Invocation, Type: S1, IsInvalid, IsImplicit) (Syntax: 'x') Instance Receiver: IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: S1?, IsInvalid, IsImplicit) (Syntax: 'x') Arguments(0) Next (Regular) Block[B4] Leaving: {R2} } Block[B3] - Block Predecessors: [B1] Statements (1) IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsInvalid, IsImplicit) (Syntax: 'x') Value: IDefaultValueOperation (OperationKind.DefaultValue, Type: System.Int32?, IsInvalid, IsImplicit) (Syntax: 'x') Next (Regular) Block[B4] Block[B4] - Block Predecessors: [B2] [B3] Statements (1) IExpressionStatementOperation (OperationKind.ExpressionStatement, Type: null, IsInvalid) (Syntax: 'x?.P1 = 0;') Expression: ISimpleAssignmentOperation (OperationKind.SimpleAssignment, Type: System.Int32?, IsInvalid) (Syntax: 'x?.P1 = 0') Left: IInvalidOperation (OperationKind.Invalid, Type: System.Int32?, IsInvalid, IsImplicit) (Syntax: 'x?.P1') Children(1): IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsInvalid, IsImplicit) (Syntax: 'x?.P1') Right: ILiteralOperation (OperationKind.Literal, Type: System.Int32, Constant: 0) (Syntax: '0') Next (Regular) Block[B5] Leaving: {R1} } Block[B5] - Exit Predecessors: [B4] Statements (0) "; var expectedDiagnostics = new[] { // file.cs(6,9): error CS0131: The left-hand side of an assignment must be a variable, property or indexer // x?.P1 = 0; Diagnostic(ErrorCode.ERR_AssgLvalueExpected, "x?.P1").WithLocation(6, 9) }; VerifyFlowGraphAndDiagnosticsForTest<BlockSyntax>(source, expectedGraph, expectedDiagnostics); } [CompilerTrait(CompilerFeature.IOperation, CompilerFeature.Dataflow)] [Fact] public void ConditionalAccessFlow_08() { string source = @" struct P { void M1(P? input, int? result) /*<bind>*/{ result = input?.Length; }/*</bind>*/ public int Length { get; } } "; var compilation = CreateCompilationWithMscorlib45(source, parseOptions: TestOptions.RegularWithFlowAnalysisFeature); compilation.MakeMemberMissing(SpecialMember.System_Nullable_T_GetValueOrDefault); string expectedGraph = @" Block[B0] - Entry Statements (0) Next (Regular) Block[B1] Entering: {R1} .locals {R1} { CaptureIds: [0] [2] Block[B1] - Block Predecessors: [B0] Statements (1) IFlowCaptureOperation: 0 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'result') Value: IParameterReferenceOperation: result (OperationKind.ParameterReference, Type: System.Int32?) (Syntax: 'result') Next (Regular) Block[B2] Entering: {R2} .locals {R2} { CaptureIds: [1] Block[B2] - Block Predecessors: [B1] Statements (1) IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input') Value: IParameterReferenceOperation: input (OperationKind.ParameterReference, Type: P?) (Syntax: 'input') Jump if True (Regular) to Block[B4] IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input') Operand: IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: 'input') Leaving: {R2} Next (Regular) Block[B3] Block[B3] - Block Predecessors: [B2] Statements (1) IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.Length') Value: IConversionOperation (TryCast: False, Unchecked) (OperationKind.Conversion, Type: System.Int32?, IsImplicit) (Syntax: '.Length') Conversion: CommonConversion (Exists: True, IsIdentity: False, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null) (ImplicitNullable) Operand: IPropertyReferenceOperation: System.Int32 P.Length { get; } (OperationKind.PropertyReference, Type: System.Int32) (Syntax: '.Length') Instance Receiver: IInvalidOperation (OperationKind.Invalid, Type: P, IsImplicit) (Syntax: 'input') Children(1): IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: 'input') Next (Regular) Block[B5] Leaving: {R2} } Block[B4] - Block Predecessors: [B2] Statements (1) IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input') Value: IDefaultValueOperation (OperationKind.DefaultValue, Type: System.Int32?, IsImplicit) (Syntax: 'input') Next (Regular) Block[B5] Block[B5] - Block Predecessors: [B3] [B4] Statements (1) IExpressionStatementOperation (OperationKind.ExpressionStatement, Type: null) (Syntax: 'result = input?.Length;') Expression: ISimpleAssignmentOperation (OperationKind.SimpleAssignment, Type: System.Int32?) (Syntax: 'result = input?.Length') Left: IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsImplicit) (Syntax: 'result') Right: IFlowCaptureReferenceOperation: 2 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsImplicit) (Syntax: 'input?.Length') Next (Regular) Block[B6] Leaving: {R1} } Block[B6] - Exit Predecessors: [B5] Statements (0) "; var expectedDiagnostics = DiagnosticDescription.None; VerifyFlowGraphAndDiagnosticsForTest<BlockSyntax>(compilation, expectedGraph, expectedDiagnostics); } [CompilerTrait(CompilerFeature.IOperation, CompilerFeature.Dataflow)] [Fact] public void ConditionalAccessFlow_09() { string source = @" class C { void M1(C input1, C input2, C input3) /*<bind>*/{ input1?.M(input2?.M(input3?.M(null))); }/*</bind>*/ public string M(string x) => x; } "; string expectedGraph = @" Block[B0] - Entry Statements (0) Next (Regular) Block[B1] Entering: {R1} .locals {R1} { CaptureIds: [0] [2] Block[B1] - Block Predecessors: [B0] Statements (1) IFlowCaptureOperation: 0 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input1') Value: IParameterReferenceOperation: input1 (OperationKind.ParameterReference, Type: C) (Syntax: 'input1') Jump if True (Regular) to Block[B9] IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input1') Operand: IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: C, IsImplicit) (Syntax: 'input1') Leaving: {R1} Next (Regular) Block[B2] Entering: {R2} .locals {R2} { CaptureIds: [1] [4] Block[B2] - Block Predecessors: [B1] Statements (1) IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input2') Value: IParameterReferenceOperation: input2 (OperationKind.ParameterReference, Type: C) (Syntax: 'input2') Jump if True (Regular) to Block[B7] IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input2') Operand: IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: C, IsImplicit) (Syntax: 'input2') Leaving: {R2} Next (Regular) Block[B3] Entering: {R3} .locals {R3} { CaptureIds: [3] Block[B3] - Block Predecessors: [B2] Statements (1) IFlowCaptureOperation: 3 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input3') Value: IParameterReferenceOperation: input3 (OperationKind.ParameterReference, Type: C) (Syntax: 'input3') Jump if True (Regular) to Block[B5] IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input3') Operand: IFlowCaptureReferenceOperation: 3 (OperationKind.FlowCaptureReference, Type: C, IsImplicit) (Syntax: 'input3') Leaving: {R3} Next (Regular) Block[B4] Block[B4] - Block Predecessors: [B3] Statements (1) IFlowCaptureOperation: 4 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.M(null)') Value: IInvocationOperation ( System.String C.M(System.String x)) (OperationKind.Invocation, Type: System.String) (Syntax: '.M(null)') Instance Receiver: IFlowCaptureReferenceOperation: 3 (OperationKind.FlowCaptureReference, Type: C, IsImplicit) (Syntax: 'input3') Arguments(1): IArgumentOperation (ArgumentKind.Explicit, Matching Parameter: x) (OperationKind.Argument, Type: null) (Syntax: 'null') IConversionOperation (TryCast: False, Unchecked) (OperationKind.Conversion, Type: System.String, Constant: null, IsImplicit) (Syntax: 'null') Conversion: CommonConversion (Exists: True, IsIdentity: False, IsNumeric: False, IsReference: True, IsUserDefined: False) (MethodSymbol: null) (ImplicitReference) Operand: ILiteralOperation (OperationKind.Literal, Type: null, Constant: null) (Syntax: 'null') InConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null) OutConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null) Next (Regular) Block[B6] Leaving: {R3} } Block[B5] - Block Predecessors: [B3] Statements (1) IFlowCaptureOperation: 4 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input3') Value: IDefaultValueOperation (OperationKind.DefaultValue, Type: System.String, Constant: null, IsImplicit) (Syntax: 'input3') Next (Regular) Block[B6] Block[B6] - Block Predecessors: [B4] [B5] Statements (1) IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.M(input3?.M(null))') Value: IInvocationOperation ( System.String C.M(System.String x)) (OperationKind.Invocation, Type: System.String) (Syntax: '.M(input3?.M(null))') Instance Receiver: IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: C, IsImplicit) (Syntax: 'input2') Arguments(1): IArgumentOperation (ArgumentKind.Explicit, Matching Parameter: x) (OperationKind.Argument, Type: null) (Syntax: 'input3?.M(null)') IFlowCaptureReferenceOperation: 4 (OperationKind.FlowCaptureReference, Type: System.String, IsImplicit) (Syntax: 'input3?.M(null)') InConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null) OutConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null) Next (Regular) Block[B8] Leaving: {R2} } Block[B7] - Block Predecessors: [B2] Statements (1) IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input2') Value: IDefaultValueOperation (OperationKind.DefaultValue, Type: System.String, Constant: null, IsImplicit) (Syntax: 'input2') Next (Regular) Block[B8] Block[B8] - Block Predecessors: [B6] [B7] Statements (1) IExpressionStatementOperation (OperationKind.ExpressionStatement, Type: null) (Syntax: 'input1?.M(i ... .M(null)));') Expression: IInvocationOperation ( System.String C.M(System.String x)) (OperationKind.Invocation, Type: System.String) (Syntax: '.M(input2?. ... ?.M(null)))') Instance Receiver: IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: C, IsImplicit) (Syntax: 'input1') Arguments(1): IArgumentOperation (ArgumentKind.Explicit, Matching Parameter: x) (OperationKind.Argument, Type: null) (Syntax: 'input2?.M(i ... 3?.M(null))') IFlowCaptureReferenceOperation: 2 (OperationKind.FlowCaptureReference, Type: System.String, IsImplicit) (Syntax: 'input2?.M(i ... 3?.M(null))') InConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null) OutConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null) Next (Regular) Block[B9] Leaving: {R1} } Block[B9] - Exit Predecessors: [B1] [B8] Statements (0) "; var expectedDiagnostics = DiagnosticDescription.None; VerifyFlowGraphAndDiagnosticsForTest<BlockSyntax>(source, expectedGraph, expectedDiagnostics); } } }
OmarTawfik/roslyn
src/Compilers/CSharp/Test/Semantic/IOperation/IOperationTests_IConditionalAccessExpression.cs
C#
apache-2.0
55,769
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package go_kafka_client import ( "fmt" "github.com/Shopify/sarama" "math/rand" "sync" "testing" "time" ) var numMessages = 1000 var consumeTimeout = 1 * time.Minute var localZk = "localhost:2181" var localBroker = "localhost:9092" func TestConsumerWithInconsistentProducing(t *testing.T) { consumeStatus := make(chan int) produceMessages := 1 consumeMessages := 2 sleepTime := 10 * time.Second timeout := 30 * time.Second topic := fmt.Sprintf("inconsistent-producing-%d", time.Now().Unix()) //create topic CreateMultiplePartitionsTopic(localZk, topic, 1) EnsureHasLeader(localZk, topic) Infof("test", "Produce %d message", produceMessages) go produceN(t, produceMessages, topic, localBroker) config := testConsumerConfig() config.Strategy = newCountingStrategy(t, consumeMessages, timeout, consumeStatus) consumer := NewConsumer(config) Info("test", "Starting consumer") go consumer.StartStatic(map[string]int{topic: 1}) //produce one more message after 10 seconds Infof("test", "Waiting for %s before producing another message", sleepTime) time.Sleep(sleepTime) Infof("test", "Produce %d message", produceMessages) go produceN(t, produceMessages, topic, localBroker) //make sure we get 2 messages if actual := <-consumeStatus; actual != consumeMessages { t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", consumeMessages, timeout, actual) } closeWithin(t, 10*time.Second, consumer) } func TestStaticConsumingSinglePartition(t *testing.T) { consumeStatus := make(chan int) topic := fmt.Sprintf("test-static-%d", time.Now().Unix()) CreateMultiplePartitionsTopic(localZk, topic, 1) EnsureHasLeader(localZk, topic) go produceN(t, numMessages, topic, localBroker) config := testConsumerConfig() config.Strategy = newCountingStrategy(t, numMessages, consumeTimeout, consumeStatus) consumer := NewConsumer(config) go consumer.StartStatic(map[string]int{topic: 1}) if actual := <-consumeStatus; actual != numMessages { t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", numMessages, consumeTimeout, actual) } closeWithin(t, 10*time.Second, consumer) } func TestStaticConsumingMultiplePartitions(t *testing.T) { consumeStatus := make(chan int) topic := fmt.Sprintf("test-static-%d", time.Now().Unix()) CreateMultiplePartitionsTopic(localZk, topic, 5) EnsureHasLeader(localZk, topic) go produceN(t, numMessages, topic, localBroker) config := testConsumerConfig() config.Strategy = newCountingStrategy(t, numMessages, consumeTimeout, consumeStatus) consumer := NewConsumer(config) go consumer.StartStatic(map[string]int{topic: 3}) if actual := <-consumeStatus; actual != numMessages { t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", numMessages, consumeTimeout, actual) } closeWithin(t, 10*time.Second, consumer) } func TestWhitelistConsumingSinglePartition(t *testing.T) { consumeStatus := make(chan int) timestamp := time.Now().Unix() topic1 := fmt.Sprintf("test-whitelist-%d-1", timestamp) topic2 := fmt.Sprintf("test-whitelist-%d-2", timestamp) CreateMultiplePartitionsTopic(localZk, topic1, 1) EnsureHasLeader(localZk, topic1) CreateMultiplePartitionsTopic(localZk, topic2, 1) EnsureHasLeader(localZk, topic2) go produceN(t, numMessages, topic1, localBroker) go produceN(t, numMessages, topic2, localBroker) expectedMessages := numMessages * 2 config := testConsumerConfig() config.Strategy = newCountingStrategy(t, expectedMessages, consumeTimeout, consumeStatus) consumer := NewConsumer(config) go consumer.StartWildcard(NewWhiteList(fmt.Sprintf("test-whitelist-%d-.+", timestamp)), 1) if actual := <-consumeStatus; actual != expectedMessages { t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", expectedMessages, consumeTimeout, actual) } closeWithin(t, 10*time.Second, consumer) } func TestStaticPartitionConsuming(t *testing.T) { consumeStatus := make(chan int) timestamp := time.Now().Unix() topic := fmt.Sprintf("test-static-partitions-%d", timestamp) CreateMultiplePartitionsTopic(localZk, topic, 2) EnsureHasLeader(localZk, topic) go produceN(t, numMessages, topic, localBroker) checkPartition := int32(0) // expectedMessages := numMessages * 2 config := testConsumerConfig() config.Strategy = newPartitionTrackingStrategy(t, numMessages, consumeTimeout, consumeStatus, checkPartition) consumer := NewConsumer(config) go consumer.StartStatic(map[string]int{topic: 2}) actual := <-consumeStatus expectedForPartition := <-consumeStatus if actual != numMessages { t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", numMessages, consumeTimeout, actual) } closeWithin(t, 10*time.Second, consumer) staticConfig := testConsumerConfig() staticConfig.Groupid = "static-test-group" staticConfig.Strategy = newCountingStrategy(t, expectedForPartition, consumeTimeout, consumeStatus) staticConsumer := NewConsumer(staticConfig) go staticConsumer.StartStaticPartitions(map[string][]int32{topic: []int32{checkPartition}}) if actualForPartition := <-consumeStatus; actualForPartition != expectedForPartition { t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", numMessages, consumeTimeout, actualForPartition) } closeWithin(t, 10*time.Second, staticConsumer) } func TestMessagesProcessedOnce(t *testing.T) { closeTimeout := 15 * time.Second consumeFinished := make(chan bool) messages := 100 topic := fmt.Sprintf("test-processing-%d", time.Now().Unix()) CreateMultiplePartitionsTopic(localZk, topic, 1) EnsureHasLeader(localZk, topic) go produceN(t, messages, topic, localBroker) config := testConsumerConfig() messagesMap := make(map[string]bool) var messagesMapLock sync.Mutex config.Strategy = func(_ *Worker, msg *Message, id TaskId) WorkerResult { value := string(msg.Value) inLock(&messagesMapLock, func() { if _, exists := messagesMap[value]; exists { t.Errorf("Duplicate message: %s", value) } messagesMap[value] = true if len(messagesMap) == messages { consumeFinished <- true } }) return NewSuccessfulResult(id) } consumer := NewConsumer(config) go consumer.StartStatic(map[string]int{topic: 1}) select { case <-consumeFinished: case <-time.After(consumeTimeout): t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", messages, consumeTimeout, len(messagesMap)) } closeWithin(t, closeTimeout, consumer) //restart consumer zkConfig := NewZookeeperConfig() zkConfig.ZookeeperConnect = []string{localZk} config.Coordinator = NewZookeeperCoordinator(zkConfig) consumer = NewConsumer(config) go consumer.StartStatic(map[string]int{topic: 1}) select { //this happens if we get a duplicate case <-consumeFinished: //and this happens normally case <-time.After(closeTimeout): } closeWithin(t, closeTimeout, consumer) } func TestSequentialConsuming(t *testing.T) { topic := fmt.Sprintf("test-sequential-%d", time.Now().Unix()) messages := make([]string, 0) for i := 0; i < numMessages; i++ { messages = append(messages, fmt.Sprintf("test-message-%d", i)) } CreateMultiplePartitionsTopic(localZk, topic, 1) EnsureHasLeader(localZk, topic) produce(t, messages, topic, localBroker, sarama.CompressionNone) config := testConsumerConfig() config.NumWorkers = 1 successChan := make(chan bool) config.Strategy = func(_ *Worker, msg *Message, id TaskId) WorkerResult { value := string(msg.Value) Debug("test", value) message := messages[0] assert(t, value, message) messages = messages[1:] if len(messages) == 0 { successChan <- true } return NewSuccessfulResult(id) } consumer := NewConsumer(config) go consumer.StartStatic(map[string]int{topic: 1}) select { case <-successChan: case <-time.After(consumeTimeout): t.Errorf("Failed to consume %d messages within %s", numMessages, consumeTimeout) } closeWithin(t, 10*time.Second, consumer) } func TestGzipCompression(t *testing.T) { testCompression(t, sarama.CompressionGZIP) } func TestSnappyCompression(t *testing.T) { testCompression(t, sarama.CompressionSnappy) } func testCompression(t *testing.T, codec sarama.CompressionCodec) { topic := fmt.Sprintf("test-compression-%d", time.Now().Unix()) messages := make([]string, 0) for i := 0; i < numMessages; i++ { messages = append(messages, fmt.Sprintf("test-message-%d", i)) } CreateMultiplePartitionsTopic(localZk, topic, 1) EnsureHasLeader(localZk, topic) produce(t, messages, topic, localBroker, codec) config := testConsumerConfig() config.NumWorkers = 1 successChan := make(chan bool) config.Strategy = func(_ *Worker, msg *Message, id TaskId) WorkerResult { value := string(msg.Value) Warn("test", value) message := messages[0] assert(t, value, message) messages = messages[1:] if len(messages) == 0 { successChan <- true } return NewSuccessfulResult(id) } consumer := NewConsumer(config) go consumer.StartStatic(map[string]int{topic: 1}) select { case <-successChan: case <-time.After(consumeTimeout): t.Errorf("Failed to consume %d messages within %s", numMessages, consumeTimeout) } closeWithin(t, 10*time.Second, consumer) } func TestBlueGreenDeployment(t *testing.T) { partitions := 2 activeTopic := fmt.Sprintf("active-%d", time.Now().Unix()) inactiveTopic := fmt.Sprintf("inactive-%d", time.Now().Unix()) zkConfig := NewZookeeperConfig() zkConfig.ZookeeperConnect = []string{localZk} coordinator := NewZookeeperCoordinator(zkConfig) coordinator.Connect() CreateMultiplePartitionsTopic(localZk, activeTopic, partitions) EnsureHasLeader(localZk, activeTopic) CreateMultiplePartitionsTopic(localZk, inactiveTopic, partitions) EnsureHasLeader(localZk, inactiveTopic) blueGroup := fmt.Sprintf("blue-%d", time.Now().Unix()) greenGroup := fmt.Sprintf("green-%d", time.Now().Unix()) processedInactiveMessages := 0 var inactiveCounterLock sync.Mutex processedActiveMessages := 0 var activeCounterLock sync.Mutex inactiveStrategy := func(worker *Worker, msg *Message, taskId TaskId) WorkerResult { atomicIncrement(&processedInactiveMessages, &inactiveCounterLock) return NewSuccessfulResult(taskId) } activeStrategy := func(worker *Worker, msg *Message, taskId TaskId) WorkerResult { atomicIncrement(&processedActiveMessages, &activeCounterLock) return NewSuccessfulResult(taskId) } blueGroupConsumers := []*Consumer{createConsumerForGroup(blueGroup, inactiveStrategy), createConsumerForGroup(blueGroup, inactiveStrategy)} greenGroupConsumers := []*Consumer{createConsumerForGroup(greenGroup, activeStrategy), createConsumerForGroup(greenGroup, activeStrategy)} for _, consumer := range blueGroupConsumers { consumer.config.BarrierTimeout = 10 * time.Second go consumer.StartStatic(map[string]int{ activeTopic: 1, }) } for _, consumer := range greenGroupConsumers { consumer.config.BarrierTimeout = 10 * time.Second go consumer.StartStatic(map[string]int{ inactiveTopic: 1, }) } blue := BlueGreenDeployment{activeTopic, "static", blueGroup} green := BlueGreenDeployment{inactiveTopic, "static", greenGroup} time.Sleep(30 * time.Second) coordinator.RequestBlueGreenDeployment(blue, green) time.Sleep(30 * time.Second) //All Blue consumers should switch to Green group and change topic to inactive greenConsumerIds, _ := coordinator.GetConsumersInGroup(greenGroup) for _, consumer := range blueGroupConsumers { found := false for _, consumerId := range greenConsumerIds { if consumerId == consumer.config.Consumerid { found = true } } assert(t, found, true) } //All Green consumers should switch to Blue group and change topic to active blueConsumerIds, _ := coordinator.GetConsumersInGroup(blueGroup) for _, consumer := range greenGroupConsumers { found := false for _, consumerId := range blueConsumerIds { if consumerId == consumer.config.Consumerid { found = true } } assert(t, found, true) } //At this stage Blue group became Green group //and Green group became Blue group //Producing messages to both topics produceMessages := 10 Infof(activeTopic, "Produce %d message", produceMessages) go produceN(t, produceMessages, activeTopic, localBroker) Infof(inactiveTopic, "Produce %d message", produceMessages) go produceN(t, produceMessages, inactiveTopic, localBroker) time.Sleep(10 * time.Second) //Green group consumes from inactive topic assert(t, processedInactiveMessages, produceMessages) //Blue group consumes from active topic assert(t, processedActiveMessages, produceMessages) for _, consumer := range blueGroupConsumers { closeWithin(t, 60*time.Second, consumer) } for _, consumer := range greenGroupConsumers { closeWithin(t, 60*time.Second, consumer) } } func TestConsumeAfterRebalance(t *testing.T) { partitions := 10 topic := fmt.Sprintf("testConsumeAfterRebalance-%d", time.Now().Unix()) group := fmt.Sprintf("consumeAfterRebalanceGroup-%d", time.Now().Unix()) CreateMultiplePartitionsTopic(localZk, topic, partitions) EnsureHasLeader(localZk, topic) consumeMessages := 10 delayTimeout := 10 * time.Second consumeTimeout := 60 * time.Second consumeStatus1 := make(chan int) consumeStatus2 := make(chan int) consumer1 := createConsumerForGroup(group, newCountingStrategy(t, consumeMessages, consumeTimeout, consumeStatus1)) consumer2 := createConsumerForGroup(group, newCountingStrategy(t, consumeMessages, consumeTimeout, consumeStatus2)) go consumer1.StartStatic(map[string]int{topic: 1}) time.Sleep(delayTimeout) go consumer2.StartStatic(map[string]int{topic: 1}) time.Sleep(delayTimeout) closeWithin(t, delayTimeout, consumer2) Infof(topic, "Produce %d message", consumeMessages) produceN(t, consumeMessages, topic, localBroker) if actual := <-consumeStatus1; actual != consumeMessages { t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", consumeMessages, consumeTimeout, actual) } closeWithin(t, delayTimeout, consumer1) } // Test that the first offset for a consumer group is correctly // saved even after receiving just one message. func TestConsumeFirstOffset(t *testing.T) { topic := fmt.Sprintf("test-consume-first-offset-%d", time.Now().Unix()) group := fmt.Sprintf("test-group-%d", time.Now().Unix()) CreateMultiplePartitionsTopic(localZk, topic, 1) EnsureHasLeader(localZk, topic) produce(t, []string{"m1"}, topic, localBroker, sarama.CompressionNone) config := testConsumerConfig() config.NumWorkers = 1 config.Groupid = group successChan := make(chan bool) config.Strategy = func(_ *Worker, msg *Message, id TaskId) WorkerResult { value := string(msg.Value) assert(t, value, "m1") successChan <- true return NewSuccessfulResult(id) } consumer := NewConsumer(config) go consumer.StartStatic(map[string]int{topic: 1}) select { case <-successChan: case <-time.After(consumeTimeout): t.Errorf("Failed to consume %d messages within %s", numMessages, consumeTimeout) } closeWithin(t, 10*time.Second, consumer) produce(t, []string{"m2"}, topic, localBroker, sarama.CompressionNone) config.Strategy = func(_ *Worker, msg *Message, id TaskId) WorkerResult { value := string(msg.Value) assert(t, value, "m2") successChan <- true return NewSuccessfulResult(id) } consumer = NewConsumer(config) go consumer.StartStatic(map[string]int{topic: 1}) select { case <-successChan: case <-time.After(consumeTimeout): t.Errorf("Failed to consume %d messages within %s", numMessages, consumeTimeout) } closeWithin(t, 10*time.Second, consumer) } // Test consumer will properly start consuming a topic when it is created after starting the consumer but before it fails to fetch topic info func TestCreateTopicAfterStartConsuming(t *testing.T) { partitions := 2 topic := fmt.Sprintf("testConsumeAfterRebalance-%d", time.Now().Unix()) consumeMessages := 10 delayTimeout := 10 * time.Second consumeTimeout := 60 * time.Second consumeStatus := make(chan int) config := testConsumerConfig() config.Strategy = newCountingStrategy(t, consumeMessages, consumeTimeout, consumeStatus) consumer := NewConsumer(config) go consumer.StartStatic(map[string]int{topic: 2}) time.Sleep(10 * time.Second) CreateMultiplePartitionsTopic(localZk, topic, partitions) EnsureHasLeader(localZk, topic) Infof(topic, "Produce %d message", consumeMessages) produceN(t, consumeMessages, topic, localBroker) if actual := <-consumeStatus; actual != consumeMessages { t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", consumeMessages, consumeTimeout, actual) } closeWithin(t, delayTimeout, consumer) } func TestConsumeDistinctTopicsWithDistinctPartitions(t *testing.T) { topic1 := fmt.Sprintf("testConsumeDistinctTopics-%d", time.Now().UnixNano()) topic1Partitions := 16 topic2 := fmt.Sprintf("testConsumeDistinctTopics-%d", time.Now().UnixNano()) topic2Partitions := 4 CreateMultiplePartitionsTopic(localZk, topic1, topic1Partitions) EnsureHasLeader(localZk, topic1) Infof("distinct-topics-test", "Topic %s is created and has a leader elected", topic1) CreateMultiplePartitionsTopic(localZk, topic2, topic2Partitions) EnsureHasLeader(localZk, topic2) Infof("distinct-topics-test", "Topic %s is created and has a leader elected", topic2) consumeMessages := 100 delayTimeout := 10 * time.Second consumeTimeout := 60 * time.Second consumeStatus := make(chan map[string]map[int]int) for partition := 0; partition < topic1Partitions; partition++ { produceNToTopicPartition(t, consumeMessages, topic1, partition, localBroker) } Infof("distinct-topics-test", "Produced %d messages to each partition of topic %s", consumeMessages, topic1) for partition := 0; partition < topic2Partitions; partition++ { produceNToTopicPartition(t, consumeMessages, topic2, partition, localBroker) } Infof("distinct-topics-test", "Produced %d messages to each partition of topic %s", consumeMessages, topic2) config := testConsumerConfig() config.Strategy = newAllPartitionsTrackingStrategy(t, consumeMessages*(topic1Partitions+topic2Partitions), consumeTimeout, consumeStatus) config.KeyDecoder = &Int32Decoder{} consumer := NewConsumer(config) go consumer.StartStatic(map[string]int{topic1: topic1Partitions, topic2: topic2Partitions}) consumed := <-consumeStatus for _, partitionInfo := range consumed { for _, numMessages := range partitionInfo { if numMessages != consumeMessages { t.Errorf("Failed to consume %d messages within %s. Actual messages = %v", consumeMessages, consumeTimeout, consumed) } } } closeWithin(t, delayTimeout, consumer) } func TestConsumeMultipleTopics(t *testing.T) { partitions1 := 16 partitions2 := 4 topic1 := fmt.Sprintf("testConsumeMultipleTopics-1-%d", time.Now().Unix()) topic2 := fmt.Sprintf("testConsumeMultipleTopics-2-%d", time.Now().Unix()) CreateMultiplePartitionsTopic(localZk, topic1, partitions1) EnsureHasLeader(localZk, topic1) CreateMultiplePartitionsTopic(localZk, topic2, partitions2) EnsureHasLeader(localZk, topic2) consumeMessages := 5000 produceMessages1 := 4000 produceMessages2 := 1000 delayTimeout := 10 * time.Second consumeTimeout := 60 * time.Second consumeStatus := make(chan int) config := testConsumerConfig() config.Strategy = newCountingStrategy(t, consumeMessages, consumeTimeout, consumeStatus) consumer := NewConsumer(config) go consumer.StartStatic(map[string]int{topic1: 2, topic2: 2}) Infof(topic1, "Produce %d message", produceMessages1) produceN(t, produceMessages1, topic1, localBroker) Infof(topic2, "Produce %d message", produceMessages2) produceN(t, produceMessages2, topic2, localBroker) if actual := <-consumeStatus; actual != consumeMessages { t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", consumeMessages, consumeTimeout, actual) } closeWithin(t, delayTimeout, consumer) } func TestConsumeOnePartitionWithData(t *testing.T) { partitions := 50 topic := fmt.Sprintf("testConsumeOnePartitionWithData-%d", time.Now().Unix()) CreateMultiplePartitionsTopic(localZk, topic, partitions) EnsureHasLeader(localZk, topic) consumeMessages := 1000 delayTimeout := 20 * time.Second consumeTimeout := 60 * time.Second consumeStatus := make(chan int) Infof(topic, "Produce %d messages", consumeMessages) produceNToTopicPartition(t, consumeMessages, topic, rand.Int()%partitions, localBroker) config := testConsumerConfig() config.Strategy = newCountingStrategy(t, consumeMessages, consumeTimeout, consumeStatus) consumer := NewConsumer(config) go consumer.StartStatic(map[string]int{topic: 1}) if actual := <-consumeStatus; actual != consumeMessages { t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", consumeMessages, consumeTimeout, actual) } closeWithin(t, delayTimeout, consumer) } func testConsumerConfig() *ConsumerConfig { config := DefaultConsumerConfig() config.AutoOffsetReset = SmallestOffset config.WorkerFailureCallback = func(_ *WorkerManager) FailedDecision { return CommitOffsetAndContinue } config.WorkerFailedAttemptCallback = func(_ *Task, _ WorkerResult) FailedDecision { return CommitOffsetAndContinue } config.Strategy = goodStrategy zkConfig := NewZookeeperConfig() zkConfig.ZookeeperConnect = []string{localZk} zkConfig.MaxRequestRetries = 10 zkConfig.ZookeeperTimeout = 30 * time.Second zkConfig.RequestBackoff = 3 * time.Second config.Coordinator = NewZookeeperCoordinator(zkConfig) return config } func createConsumerForGroup(group string, strategy WorkerStrategy) *Consumer { config := testConsumerConfig() config.Groupid = group config.NumConsumerFetchers = 1 config.NumWorkers = 1 config.FetchBatchTimeout = 1 * time.Second config.FetchBatchSize = 1 config.Strategy = strategy return NewConsumer(config) } func newCountingStrategy(t *testing.T, expectedMessages int, timeout time.Duration, notify chan int) WorkerStrategy { return newPartitionTrackingStrategy(t, expectedMessages, timeout, notify, -1) } func newPartitionTrackingStrategy(t *testing.T, expectedMessages int, timeout time.Duration, notify chan int, trackPartition int32) WorkerStrategy { allConsumedMessages := 0 partitionConsumedMessages := 0 var consumedMessagesLock sync.Mutex consumeFinished := make(chan bool) go func() { select { case <-consumeFinished: case <-time.After(timeout): } inLock(&consumedMessagesLock, func() { notify <- allConsumedMessages if trackPartition != -1 { notify <- partitionConsumedMessages } }) }() return func(_ *Worker, msg *Message, id TaskId) WorkerResult { inLock(&consumedMessagesLock, func() { if msg.Partition == trackPartition || trackPartition == -1 { partitionConsumedMessages++ } allConsumedMessages++ if allConsumedMessages == expectedMessages { consumeFinished <- true } }) return NewSuccessfulResult(id) } } func newAllPartitionsTrackingStrategy(t *testing.T, expectedMessages int, timeout time.Duration, notify chan map[string]map[int]int) WorkerStrategy { allConsumedMessages := make(map[string]map[int]int) var consumedMessagesLock sync.Mutex consumeFinished := make(chan bool) go func() { select { case <-consumeFinished: case <-time.After(timeout): } inLock(&consumedMessagesLock, func() { notify <- allConsumedMessages }) }() return func(_ *Worker, msg *Message, id TaskId) WorkerResult { inLock(&consumedMessagesLock, func() { if _, exists := allConsumedMessages[msg.Topic]; !exists { allConsumedMessages[msg.Topic] = make(map[int]int) } allConsumedMessages[msg.Topic][int(msg.DecodedKey.(uint32))]++ total := 0 for _, partitionInfo := range allConsumedMessages { for _, numMessages := range partitionInfo { total += numMessages } } if total == expectedMessages { consumeFinished <- true } }) return NewSuccessfulResult(id) } } func atomicIncrement(counter *int, lock *sync.Mutex) { inLock(lock, func() { *counter++ }) }
lazyval/go_kafka_mirror
consumer_test.go
GO
apache-2.0
24,744
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. #nullable enable using System.Collections.Immutable; using System.Linq; using System.Threading; using System.Threading.Tasks; using Microsoft.CodeAnalysis.CodeActions; using Microsoft.CodeAnalysis.Shared.Extensions; using Microsoft.CodeAnalysis.Text; namespace Microsoft.CodeAnalysis.CodeRefactorings { internal static class CodeRefactoringContextExtensions { /// <summary> /// Use this helper to register multiple refactorings (<paramref name="actions"/>). /// </summary> internal static void RegisterRefactorings<TCodeAction>( this CodeRefactoringContext context, ImmutableArray<TCodeAction> actions) where TCodeAction : CodeAction { if (!actions.IsDefault) { foreach (var action in actions) { context.RegisterRefactoring(action); } } } internal static Task<TSyntaxNode?> TryGetRelevantNodeAsync<TSyntaxNode>(this CodeRefactoringContext context) where TSyntaxNode : SyntaxNode => TryGetRelevantNodeAsync<TSyntaxNode>(context.Document, context.Span, context.CancellationToken); internal static Task<ImmutableArray<TSyntaxNode>> GetRelevantNodesAsync<TSyntaxNode>(this CodeRefactoringContext context) where TSyntaxNode : SyntaxNode => GetRelevantNodesAsync<TSyntaxNode>(context.Document, context.Span, context.CancellationToken); internal static async Task<TSyntaxNode?> TryGetRelevantNodeAsync<TSyntaxNode>( this Document document, TextSpan span, CancellationToken cancellationToken) where TSyntaxNode : SyntaxNode { var potentialNodes = await GetRelevantNodesAsync<TSyntaxNode>(document, span, cancellationToken).ConfigureAwait(false); return potentialNodes.FirstOrDefault(); } internal static Task<ImmutableArray<TSyntaxNode>> GetRelevantNodesAsync<TSyntaxNode>( this Document document, TextSpan span, CancellationToken cancellationToken) where TSyntaxNode : SyntaxNode { var helpers = document.GetRequiredLanguageService<IRefactoringHelpersService>(); return helpers.GetRelevantNodesAsync<TSyntaxNode>(document, span, cancellationToken); } } }
reaction1989/roslyn
src/Features/Core/Portable/CodeRefactorings/CodeRefactoringContextExtensions.cs
C#
apache-2.0
2,593
var VERB_NEW = 1; var VERB_CHANGED = 2; var VERB_CURRENT = 3; var VERB_NEW_CLASS = "verb-new"; var VERB_CHANGED_CLASS = "verb-changed"; var VERB_CURRENT_CLASS = "verb-current"; var EDITED_CLASS = "edited"; var NOT_STARTED_TRANSLATION = 0; var VALID_TRANSLATION = 1; var INVALID_TRANSLATION = 2; function getVerbClassName(verb) { switch (verb) { case VERB_NEW: // new source value added, no mapping to target exists return VERB_NEW_CLASS; case VERB_CHANGED: // source value changed, mapping to target likely invalid return VERB_CHANGED_CLASS; case VERB_CURRENT: // source value is mapped to valid target value return VERB_CURRENT_CLASS; default: return ""; } } /** * Iterate over the items of the InputEx form. * * @param env The cb_global object * @param action A function which is passed the sections and items of the form */ function iterateFormItems(env, action) { $.each(env.form.inputsNames.sections.subFields, function(i, section) { $.each(section.inputsNames.data.subFields, function(j, item) { action(section, item); }) }); } function getSectionByName(env, name){ var section = null; $.each(env.form.inputsNames.sections.subFields, function(i, s){ if (s.inputsNames.name.getValue() == name) { section = s; return false; } }); return section; } function markAsEdited(item) { item.changed.setValue(true); $(item.changed.el).closest("fieldset") .removeClass().addClass(EDITED_CLASS); } function insertValidateButton() { var button = new Y.inputEx.widget.Button({ type: "submit-link", value: "Validate", className: "inputEx-Button inputEx-Button-Submit-Link gcb-pull-left", onClick: onClickValidate }); button.render($("div.inputEx-Form-buttonBar")[0]); // Button rendering will append the button at the end of the div, so we // move it to the second position after it's been created. $("div.inputEx-Form-buttonBar > a:first-child").after(button.el); cb_global.form.buttons.splice(1, 0, button); } function onClickValidate() { disableAllControlButtons(cb_global.form); var request = { key: cb_global.save_args.key, xsrf_token: cb_global.xsrf_token, payload: JSON.stringify(cb_global.form.getValue()), validate: true } Y.io(cb_global.save_url, { method: "PUT", data: {"request": JSON.stringify(request)}, on: { complete: onValidateComplete } }); return false; } function onValidateComplete(transactionId, response, args) { enableAllControlButtons(cb_global.form); if (response.status != 200) { cbShowMsg("Server error, please try again."); return; } response = parseJson(response.responseText); if (response.status != 200) { cbShowMsg(response.message); } var payload = JSON.parse(response.payload || "{}"); for (var name in payload) { if (payload.hasOwnProperty(name)) { var section = getSectionByName(cb_global, name); addValidationFeedbackTo(section.divEl.firstChild, payload[name]); } } } function addValidationFeedbackTo(fieldsetEl, feedback) { $("div.validation-feedback", fieldsetEl).remove(); var feedbackDiv = $("<div/>").addClass("validation-feedback"); if (feedback.status == VALID_TRANSLATION) { feedbackDiv.addClass("valid"); } else { feedbackDiv.addClass("invalid"); } feedbackDiv.append($("<div/>").addClass("icon")); feedbackDiv.append($("<div/>").addClass("errm").text(feedback.errm)); $(fieldsetEl).append(feedbackDiv); } function markValidationFeedbackStale(sectionField) { $("div.validation-feedback", sectionField.divEl) .removeClass() .addClass("validation-feedback stale"); } $(function() { iterateFormItems(cb_global, function(sectionField, itemField) { var verb = itemField.inputsNames.verb.getValue(); $(itemField.divEl.firstChild).addClass(getVerbClassName(verb)); }); $(".disabled textarea").prop("disabled", true); // Insert the status indicators into the DOM $(".translation-item fieldset fieldset") .append($("<div class=\"status\"></div>")); // Set up the accept buttons to appear when there is changed content iterateFormItems(cb_global, function(sectionField, itemField) { var button = $("<button class=\"accept inputEx-Button\">Accept</button>"); button.click(function() { markAsEdited(itemField.inputsNames); return false; }); $(itemField.divEl.firstChild).append(button); }); $(".translation-console > fieldset > div:last-child").before($( "<div class=\"translation-header\">" + " <div>Source (<span class=\"source-locale\"></span>)</div>" + " <div>Translation (<span class=\"target-locale\"></span>)</div>" + "</div>")); var formValue = cb_global.form.getValue(); $(".translation-header .source-locale").text(formValue['source_locale']); $(".translation-header .target-locale").text(formValue['target_locale']); iterateFormItems(cb_global, function(sectionField, itemField) { $(itemField.inputsNames.target_value.el).on("input change", function() { // Listen on "change" for older browser support markAsEdited(itemField.inputsNames); markValidationFeedbackStale(sectionField); }); }); cb_global.onSaveComplete = function() { iterateFormItems(cb_global, function(sectionField, itemField) { var item = itemField.inputsNames; if (item.changed.getValue()) { item.verb.setValue(VERB_CURRENT); $(item.changed.el).closest('fieldset') .removeClass().addClass(VERB_CURRENT_CLASS); } item.changed.setValue(false); }); cb_global.lastSavedFormValue = cb_global.form.getValue(); }; insertValidateButton(); });
UniMOOC/AAClassroom
modules/i18n_dashboard/templates/translation_console.js
JavaScript
apache-2.0
5,738
/* * Copyright (c) 2016, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.wso2.developerstudio.esb.form.editors.article.rcp; import java.util.MissingResourceException; import java.util.ResourceBundle; public class Messages { private static final String BUNDLE_NAME = "org.wso2.developerstudio.esb.form.editors.article.rcp.messages"; //$NON-NLS-1$ private static final ResourceBundle RESOURCE_BUNDLE = ResourceBundle .getBundle(BUNDLE_NAME); private Messages() { } public static String getString(String key) { // TODO Auto-generated method stub try { return RESOURCE_BUNDLE.getString(key); } catch (MissingResourceException e) { return '!' + key + '!'; } } }
prabushi/devstudio-tooling-esb
plugins/org.wso2.developerstudio.esb.form.editors/src/org/wso2/developerstudio/esb/form/editors/article/rcp/Messages.java
Java
apache-2.0
1,304
// @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ int main(int argc, char *argv[]) { return 0; }
apache/incubator-trafodion
core/sqf/src/win/t.cpp
C++
apache-2.0
910
/* * Copyright (c) 2017, Adam <Adam@sigterm.info> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package net.runelite.http.api.account; import com.google.gson.JsonParseException; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.util.UUID; import net.runelite.http.api.RuneLiteAPI; import okhttp3.HttpUrl; import okhttp3.Request; import okhttp3.Response; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class AccountClient { private static final Logger logger = LoggerFactory.getLogger(AccountClient.class); private UUID uuid; public AccountClient() { } public AccountClient(UUID uuid) { this.uuid = uuid; } public OAuthResponse login() throws IOException { HttpUrl url = RuneLiteAPI.getApiBase().newBuilder() .addPathSegment("account") .addPathSegment("login") .build(); logger.debug("Built URI: {}", url); Request request = new Request.Builder() .url(url) .build(); try (Response response = RuneLiteAPI.CLIENT.newCall(request).execute()) { InputStream in = response.body().byteStream(); return RuneLiteAPI.GSON.fromJson(new InputStreamReader(in), OAuthResponse.class); } catch (JsonParseException ex) { throw new IOException(ex); } } public void logout() throws IOException { HttpUrl url = RuneLiteAPI.getApiBase().newBuilder() .addPathSegment("account") .addPathSegment("logout") .build(); logger.debug("Built URI: {}", url); Request request = new Request.Builder() .header(RuneLiteAPI.RUNELITE_AUTH, uuid.toString()) .url(url) .build(); try (Response response = RuneLiteAPI.CLIENT.newCall(request).execute()) { logger.debug("Sent logout request"); } } public boolean sesssionCheck() { HttpUrl url = RuneLiteAPI.getApiBase().newBuilder() .addPathSegment("account") .addPathSegment("session-check") .build(); logger.debug("Built URI: {}", url); Request request = new Request.Builder() .header(RuneLiteAPI.RUNELITE_AUTH, uuid.toString()) .url(url) .build(); try (Response response = RuneLiteAPI.CLIENT.newCall(request).execute()) { return response.isSuccessful(); } catch (IOException ex) { logger.debug("Unable to verify session", ex); return true; // assume it is still valid if the server is unreachable } } }
UniquePassive/runelite
http-api/src/main/java/net/runelite/http/api/account/AccountClient.java
Java
bsd-2-clause
3,621
cask :v1 => 'moneywell' do version '2.3.4' sha256 'f4b900576657c669a40481d7c2ad1ad6d48a4468d16963d4e9c8ddeca9c1548a' url "http://downloads.nothirst.com/MoneyWell_#{version.sub(%r{^(\d+)\.(\d+).*},'\1\2')}.zip" appcast 'http://nothirst.com/feeds/MoneyWell2Appcast.xml', :sha256 => '8de9519f9ff874d9baf67feefbe3f258ca89e6c07fbdf35fef6f1a6c55af9ea2' homepage 'http://nothirst.com/moneywell/' license :unknown app 'MoneyWell.app' end
L2G/homebrew-cask
Casks/moneywell.rb
Ruby
bsd-2-clause
456
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/installer/setup/install.h" #include <windows.h> #include <shlobj.h> #include <time.h> #include <string> #include "base/files/file_path.h" #include "base/files/file_util.h" #include "base/logging.h" #include "base/memory/scoped_ptr.h" #include "base/numerics/safe_conversions.h" #include "base/strings/string_util.h" #include "base/strings/stringprintf.h" #include "base/strings/utf_string_conversions.h" #include "base/win/shortcut.h" #include "base/win/windows_version.h" #include "chrome/common/chrome_constants.h" #include "chrome/common/chrome_switches.h" #include "chrome/installer/setup/install_worker.h" #include "chrome/installer/setup/setup_constants.h" #include "chrome/installer/setup/setup_util.h" #include "chrome/installer/setup/update_active_setup_version_work_item.h" #include "chrome/installer/util/auto_launch_util.h" #include "chrome/installer/util/beacons.h" #include "chrome/installer/util/browser_distribution.h" #include "chrome/installer/util/create_reg_key_work_item.h" #include "chrome/installer/util/delete_after_reboot_helper.h" #include "chrome/installer/util/google_update_constants.h" #include "chrome/installer/util/helper.h" #include "chrome/installer/util/install_util.h" #include "chrome/installer/util/master_preferences.h" #include "chrome/installer/util/master_preferences_constants.h" #include "chrome/installer/util/set_reg_value_work_item.h" #include "chrome/installer/util/util_constants.h" #include "chrome/installer/util/work_item.h" #include "chrome/installer/util/work_item_list.h" namespace { void LogShortcutOperation(ShellUtil::ShortcutLocation location, BrowserDistribution* dist, const ShellUtil::ShortcutProperties& properties, ShellUtil::ShortcutOperation operation, bool failed) { // ShellUtil::SHELL_SHORTCUT_UPDATE_EXISTING should not be used at install and // thus this method does not handle logging a message for it. DCHECK(operation != ShellUtil::SHELL_SHORTCUT_UPDATE_EXISTING); std::string message; if (failed) message.append("Failed: "); message.append( (operation == ShellUtil::SHELL_SHORTCUT_CREATE_ALWAYS || operation == ShellUtil::SHELL_SHORTCUT_CREATE_IF_NO_SYSTEM_LEVEL) ? "Creating " : "Overwriting "); if (failed && operation == ShellUtil::SHELL_SHORTCUT_REPLACE_EXISTING) message.append("(maybe the shortcut doesn't exist?) "); message.append((properties.level == ShellUtil::CURRENT_USER) ? "per-user " : "all-users "); switch (location) { case ShellUtil::SHORTCUT_LOCATION_DESKTOP: message.append("Desktop "); break; case ShellUtil::SHORTCUT_LOCATION_QUICK_LAUNCH: message.append("Quick Launch "); break; case ShellUtil::SHORTCUT_LOCATION_START_MENU_CHROME_DIR: message.append("Start menu/" + base::UTF16ToUTF8(dist->GetStartMenuShortcutSubfolder( BrowserDistribution::SUBFOLDER_CHROME)) + " "); break; case ShellUtil::SHORTCUT_LOCATION_START_MENU_CHROME_APPS_DIR: message.append("Start menu/" + base::UTF16ToUTF8(dist->GetStartMenuShortcutSubfolder( BrowserDistribution::SUBFOLDER_APPS)) + " "); break; default: NOTREACHED(); } message.push_back('"'); if (properties.has_shortcut_name()) message.append(base::UTF16ToUTF8(properties.shortcut_name)); else message.append(base::UTF16ToUTF8(dist->GetDisplayName())); message.push_back('"'); message.append(" shortcut to "); message.append(base::UTF16ToUTF8(properties.target.value())); if (properties.has_arguments()) message.append(base::UTF16ToUTF8(properties.arguments)); if (properties.pin_to_taskbar && base::win::GetVersion() >= base::win::VERSION_WIN7) { message.append(" and pinning to the taskbar"); } if (properties.pin_to_start && base::win::GetVersion() >= base::win::VERSION_WIN10) { message.append(" and pinning to Start"); } message.push_back('.'); if (failed) LOG(WARNING) << message; else VLOG(1) << message; } void ExecuteAndLogShortcutOperation( ShellUtil::ShortcutLocation location, BrowserDistribution* dist, const ShellUtil::ShortcutProperties& properties, ShellUtil::ShortcutOperation operation) { LogShortcutOperation(location, dist, properties, operation, false); if (!ShellUtil::CreateOrUpdateShortcut(location, dist, properties, operation)) { LogShortcutOperation(location, dist, properties, operation, true); } } void AddChromeToMediaPlayerList() { base::string16 reg_path(installer::kMediaPlayerRegPath); // registry paths can also be appended like file system path reg_path.push_back(base::FilePath::kSeparators[0]); reg_path.append(installer::kChromeExe); VLOG(1) << "Adding Chrome to Media player list at " << reg_path; scoped_ptr<WorkItem> work_item(WorkItem::CreateCreateRegKeyWorkItem( HKEY_LOCAL_MACHINE, reg_path, WorkItem::kWow64Default)); // if the operation fails we log the error but still continue if (!work_item.get()->Do()) LOG(ERROR) << "Could not add Chrome to media player inclusion list."; } // Copy master_preferences file provided to installer, in the same folder // as chrome.exe so Chrome first run can find it. This function will be called // only on the first install of Chrome. void CopyPreferenceFileForFirstRun( const installer::InstallerState& installer_state, const base::FilePath& prefs_source_path) { base::FilePath prefs_dest_path(installer_state.target_path().AppendASCII( installer::kDefaultMasterPrefs)); if (!base::CopyFile(prefs_source_path, prefs_dest_path)) { VLOG(1) << "Failed to copy master preferences from:" << prefs_source_path.value() << " gle: " << ::GetLastError(); } } // This function installs a new version of Chrome to the specified location. // // setup_path: Path to the executable (setup.exe) as it will be copied // to Chrome install folder after install is complete // archive_path: Path to the archive (chrome.7z) as it will be copied // to Chrome install folder after install is complete // src_path: the path that contains a complete and unpacked Chrome package // to be installed. // temp_path: the path of working directory used during installation. This path // does not need to exist. // new_version: new Chrome version that needs to be installed // current_version: returns the current active version (if any) // // This function makes best effort to do installation in a transactional // manner. If failed it tries to rollback all changes on the file system // and registry. For example, if package exists before calling the // function, it rolls back all new file and directory changes under // package. If package does not exist before calling the function // (typical new install), the function creates package during install // and removes the whole directory during rollback. installer::InstallStatus InstallNewVersion( const installer::InstallationState& original_state, const installer::InstallerState& installer_state, const base::FilePath& setup_path, const base::FilePath& archive_path, const base::FilePath& src_path, const base::FilePath& temp_path, const Version& new_version, scoped_ptr<Version>* current_version) { DCHECK(current_version); installer_state.UpdateStage(installer::BUILDING); current_version->reset(installer_state.GetCurrentVersion(original_state)); scoped_ptr<WorkItemList> install_list(WorkItem::CreateWorkItemList()); AddInstallWorkItems(original_state, installer_state, setup_path, archive_path, src_path, temp_path, current_version->get(), new_version, install_list.get()); base::FilePath new_chrome_exe( installer_state.target_path().Append(installer::kChromeNewExe)); installer_state.UpdateStage(installer::EXECUTING); if (!install_list->Do()) { installer_state.UpdateStage(installer::ROLLINGBACK); installer::InstallStatus result = base::PathExists(new_chrome_exe) && current_version->get() && new_version.Equals(*current_version->get()) ? installer::SAME_VERSION_REPAIR_FAILED : installer::INSTALL_FAILED; LOG(ERROR) << "Install failed, rolling back... result: " << result; install_list->Rollback(); LOG(ERROR) << "Rollback complete. "; return result; } installer_state.UpdateStage(installer::REFRESHING_POLICY); installer::RefreshElevationPolicy(); if (!current_version->get()) { VLOG(1) << "First install of version " << new_version.GetString(); return installer::FIRST_INSTALL_SUCCESS; } if (new_version.Equals(**current_version)) { VLOG(1) << "Install repaired of version " << new_version.GetString(); return installer::INSTALL_REPAIRED; } if (new_version.CompareTo(**current_version) > 0) { if (base::PathExists(new_chrome_exe)) { VLOG(1) << "Version updated to " << new_version.GetString() << " while running " << (*current_version)->GetString(); return installer::IN_USE_UPDATED; } VLOG(1) << "Version updated to " << new_version.GetString(); return installer::NEW_VERSION_UPDATED; } LOG(ERROR) << "Not sure how we got here while updating" << ", new version: " << new_version.GetString() << ", old version: " << (*current_version)->GetString(); return installer::INSTALL_FAILED; } } // end namespace namespace installer { void EscapeXmlAttributeValueInSingleQuotes(base::string16* att_value) { base::ReplaceChars(*att_value, base::ASCIIToUTF16("&"), base::ASCIIToUTF16("&amp;"), att_value); base::ReplaceChars(*att_value, base::ASCIIToUTF16("'"), base::ASCIIToUTF16("&apos;"), att_value); base::ReplaceChars(*att_value, base::ASCIIToUTF16("<"), base::ASCIIToUTF16("&lt;"), att_value); } bool CreateVisualElementsManifest(const base::FilePath& src_path, const Version& version) { // Construct the relative path to the versioned VisualElements directory. base::string16 elements_dir(base::ASCIIToUTF16(version.GetString())); elements_dir.push_back(base::FilePath::kSeparators[0]); elements_dir.append(installer::kVisualElements); // Some distributions of Chromium may not include visual elements. Only // proceed if this distribution does. if (!base::PathExists(src_path.Append(elements_dir))) { VLOG(1) << "No visual elements found, not writing " << installer::kVisualElementsManifest << " to " << src_path.value(); return true; } else { // A printf-style format string for generating the visual elements // manifest. Required arguments, in order, are: // - Localized display name for the product. // - Relative path to the VisualElements directory, three times. static const char kManifestTemplate[] = "<Application>\r\n" " <VisualElements\r\n" " DisplayName='%ls'\r\n" " Logo='%ls\\Logo.png'\r\n" " SmallLogo='%ls\\SmallLogo.png'\r\n" " ForegroundText='light'\r\n" " BackgroundColor='#323232'>\r\n" " <DefaultTile ShowName='allLogos'/>\r\n" " <SplashScreen Image='%ls\\splash-620x300.png'/>\r\n" " </VisualElements>\r\n" "</Application>"; const base::string16 manifest_template( base::ASCIIToUTF16(kManifestTemplate)); BrowserDistribution* dist = BrowserDistribution::GetSpecificDistribution( BrowserDistribution::CHROME_BROWSER); // TODO(grt): http://crbug.com/75152 Write a reference to a localized // resource for |display_name|. base::string16 display_name(dist->GetDisplayName()); EscapeXmlAttributeValueInSingleQuotes(&display_name); // Fill the manifest with the desired values. base::string16 manifest16(base::StringPrintf( manifest_template.c_str(), display_name.c_str(), elements_dir.c_str(), elements_dir.c_str(), elements_dir.c_str())); // Write the manifest to |src_path|. const std::string manifest(base::UTF16ToUTF8(manifest16)); int size = base::checked_cast<int>(manifest.size()); if (base::WriteFile( src_path.Append(installer::kVisualElementsManifest), manifest.c_str(), size) == size) { VLOG(1) << "Successfully wrote " << installer::kVisualElementsManifest << " to " << src_path.value(); return true; } else { PLOG(ERROR) << "Error writing " << installer::kVisualElementsManifest << " to " << src_path.value(); return false; } } } void CreateOrUpdateShortcuts( const base::FilePath& target, const installer::Product& product, const MasterPreferences& prefs, InstallShortcutLevel install_level, InstallShortcutOperation install_operation) { bool do_not_create_any_shortcuts = false; prefs.GetBool(master_preferences::kDoNotCreateAnyShortcuts, &do_not_create_any_shortcuts); if (do_not_create_any_shortcuts) return; // Extract shortcut preferences from |prefs|. bool do_not_create_desktop_shortcut = false; bool do_not_create_quick_launch_shortcut = false; bool do_not_create_taskbar_shortcut = false; bool do_not_create_start_pin = false; bool alternate_desktop_shortcut = false; prefs.GetBool(master_preferences::kDoNotCreateDesktopShortcut, &do_not_create_desktop_shortcut); prefs.GetBool(master_preferences::kDoNotCreateQuickLaunchShortcut, &do_not_create_quick_launch_shortcut); prefs.GetBool(master_preferences::kDoNotCreateTaskbarShortcut, &do_not_create_taskbar_shortcut); prefs.GetBool(master_preferences::kDoNotCreateStartPin, &do_not_create_start_pin); prefs.GetBool(master_preferences::kAltShortcutText, &alternate_desktop_shortcut); BrowserDistribution* dist = product.distribution(); // The default operation on update is to overwrite shortcuts with the // currently desired properties, but do so only for shortcuts that still // exist. ShellUtil::ShortcutOperation shortcut_operation; switch (install_operation) { case INSTALL_SHORTCUT_CREATE_ALL: shortcut_operation = ShellUtil::SHELL_SHORTCUT_CREATE_ALWAYS; break; case INSTALL_SHORTCUT_CREATE_EACH_IF_NO_SYSTEM_LEVEL: shortcut_operation = ShellUtil::SHELL_SHORTCUT_CREATE_IF_NO_SYSTEM_LEVEL; break; default: DCHECK(install_operation == INSTALL_SHORTCUT_REPLACE_EXISTING); shortcut_operation = ShellUtil::SHELL_SHORTCUT_REPLACE_EXISTING; break; } // Shortcuts are always installed per-user unless specified. ShellUtil::ShellChange shortcut_level = (install_level == ALL_USERS ? ShellUtil::SYSTEM_LEVEL : ShellUtil::CURRENT_USER); // |base_properties|: The basic properties to set on every shortcut installed // (to be refined on a per-shortcut basis). ShellUtil::ShortcutProperties base_properties(shortcut_level); product.AddDefaultShortcutProperties(target, &base_properties); if (!do_not_create_desktop_shortcut || shortcut_operation == ShellUtil::SHELL_SHORTCUT_REPLACE_EXISTING) { ShellUtil::ShortcutProperties desktop_properties(base_properties); if (alternate_desktop_shortcut) { desktop_properties.set_shortcut_name( dist->GetShortcutName( BrowserDistribution::SHORTCUT_CHROME_ALTERNATE)); } ExecuteAndLogShortcutOperation( ShellUtil::SHORTCUT_LOCATION_DESKTOP, dist, desktop_properties, shortcut_operation); // On update there is no harm in always trying to update the alternate // Desktop shortcut. if (!alternate_desktop_shortcut && shortcut_operation == ShellUtil::SHELL_SHORTCUT_REPLACE_EXISTING) { desktop_properties.set_shortcut_name( dist->GetShortcutName( BrowserDistribution::SHORTCUT_CHROME_ALTERNATE)); ExecuteAndLogShortcutOperation( ShellUtil::SHORTCUT_LOCATION_DESKTOP, dist, desktop_properties, shortcut_operation); } } if (!do_not_create_quick_launch_shortcut || shortcut_operation == ShellUtil::SHELL_SHORTCUT_REPLACE_EXISTING) { // There is no such thing as an all-users Quick Launch shortcut, always // install the per-user shortcut. ShellUtil::ShortcutProperties quick_launch_properties(base_properties); quick_launch_properties.level = ShellUtil::CURRENT_USER; ExecuteAndLogShortcutOperation( ShellUtil::SHORTCUT_LOCATION_QUICK_LAUNCH, dist, quick_launch_properties, shortcut_operation); } ShellUtil::ShortcutProperties start_menu_properties(base_properties); // IMPORTANT: Only the default (no arguments and default browserappid) browser // shortcut in the Start menu (Start screen on Win8+) should be made dual // mode and that prior to Windows 10 only. if (InstallUtil::ShouldInstallMetroProperties()) start_menu_properties.set_dual_mode(true); if (shortcut_operation == ShellUtil::SHELL_SHORTCUT_CREATE_ALWAYS || shortcut_operation == ShellUtil::SHELL_SHORTCUT_CREATE_IF_NO_SYSTEM_LEVEL) { start_menu_properties.set_pin_to_taskbar(!do_not_create_taskbar_shortcut); // Disabled for now. TODO(gab): Remove this and the associated code if it // remains disabled long term. start_menu_properties.set_pin_to_start(false); } ExecuteAndLogShortcutOperation( ShellUtil::SHORTCUT_LOCATION_START_MENU_CHROME_DIR, dist, start_menu_properties, shortcut_operation); } void RegisterChromeOnMachine(const installer::InstallerState& installer_state, const installer::Product& product, bool make_chrome_default) { DCHECK(product.is_chrome()); // Try to add Chrome to Media Player shim inclusion list. We don't do any // error checking here because this operation will fail if user doesn't // have admin rights and we want to ignore the error. AddChromeToMediaPlayerList(); // Make Chrome the default browser if desired when possible. Otherwise, only // register it with Windows. BrowserDistribution* dist = product.distribution(); const base::FilePath chrome_exe( installer_state.target_path().Append(installer::kChromeExe)); VLOG(1) << "Registering Chrome as browser: " << chrome_exe.value(); if (make_chrome_default && ShellUtil::CanMakeChromeDefaultUnattended()) { int level = ShellUtil::CURRENT_USER; if (installer_state.system_install()) level = level | ShellUtil::SYSTEM_LEVEL; ShellUtil::MakeChromeDefault(dist, level, chrome_exe, true); } else { ShellUtil::RegisterChromeBrowser(dist, chrome_exe, base::string16(), false); } } InstallStatus InstallOrUpdateProduct( const installer::InstallationState& original_state, const installer::InstallerState& installer_state, const base::FilePath& setup_path, const base::FilePath& archive_path, const base::FilePath& install_temp_path, const base::FilePath& src_path, const base::FilePath& prefs_path, const MasterPreferences& prefs, const Version& new_version) { DCHECK(!installer_state.products().empty()); // TODO(robertshield): Removing the pending on-reboot moves should be done // elsewhere. // Remove any scheduled MOVEFILE_DELAY_UNTIL_REBOOT entries in the target of // this installation. These may have been added during a previous uninstall of // the same version. LOG_IF(ERROR, !RemoveFromMovesPendingReboot(installer_state.target_path())) << "Error accessing pending moves value."; // Create VisualElementManifest.xml in |src_path| (if required) so that it // looks as if it had been extracted from the archive when calling // InstallNewVersion() below. installer_state.UpdateStage(installer::CREATING_VISUAL_MANIFEST); CreateVisualElementsManifest(src_path, new_version); scoped_ptr<Version> existing_version; InstallStatus result = InstallNewVersion(original_state, installer_state, setup_path, archive_path, src_path, install_temp_path, new_version, &existing_version); // TODO(robertshield): Everything below this line should instead be captured // by WorkItems. if (!InstallUtil::GetInstallReturnCode(result)) { installer_state.UpdateStage(installer::UPDATING_CHANNELS); // Update the modifiers on the channel values for the product(s) being // installed and for the binaries in case of multi-install. installer_state.UpdateChannels(); installer_state.UpdateStage(installer::COPYING_PREFERENCES_FILE); if (result == FIRST_INSTALL_SUCCESS && !prefs_path.empty()) CopyPreferenceFileForFirstRun(installer_state, prefs_path); installer_state.UpdateStage(installer::CREATING_SHORTCUTS); const installer::Product* chrome_product = installer_state.FindProduct(BrowserDistribution::CHROME_BROWSER); // Creates shortcuts for Chrome. if (chrome_product) { BrowserDistribution* chrome_dist = chrome_product->distribution(); const base::FilePath chrome_exe( installer_state.target_path().Append(kChromeExe)); // Install per-user shortcuts on user-level installs and all-users // shortcuts on system-level installs. Note that Active Setup will take // care of installing missing per-user shortcuts on system-level install // (i.e., quick launch, taskbar pin, and possibly deleted all-users // shortcuts). InstallShortcutLevel install_level = installer_state.system_install() ? ALL_USERS : CURRENT_USER; InstallShortcutOperation install_operation = INSTALL_SHORTCUT_REPLACE_EXISTING; if (result == installer::FIRST_INSTALL_SUCCESS || result == installer::INSTALL_REPAIRED || !original_state.GetProductState(installer_state.system_install(), chrome_dist->GetType())) { // Always create the shortcuts on a new install, a repair install, and // when the Chrome product is being added to the current install. install_operation = INSTALL_SHORTCUT_CREATE_ALL; } CreateOrUpdateShortcuts(chrome_exe, *chrome_product, prefs, install_level, install_operation); } if (chrome_product) { // Register Chrome and, if requested, make Chrome the default browser. installer_state.UpdateStage(installer::REGISTERING_CHROME); bool make_chrome_default = false; prefs.GetBool(master_preferences::kMakeChromeDefault, &make_chrome_default); // If this is not the user's first Chrome install, but they have chosen // Chrome to become their default browser on the download page, we must // force it here because the master_preferences file will not get copied // into the build. bool force_chrome_default_for_user = false; if (result == NEW_VERSION_UPDATED || result == INSTALL_REPAIRED) { prefs.GetBool(master_preferences::kMakeChromeDefaultForUser, &force_chrome_default_for_user); } RegisterChromeOnMachine(installer_state, *chrome_product, make_chrome_default || force_chrome_default_for_user); // Configure auto-launch. if (result == FIRST_INSTALL_SUCCESS) { installer_state.UpdateStage(installer::CONFIGURE_AUTO_LAUNCH); // Add auto-launch key if specified in master_preferences. bool auto_launch_chrome = false; prefs.GetBool( installer::master_preferences::kAutoLaunchChrome, &auto_launch_chrome); if (auto_launch_chrome) { auto_launch_util::EnableForegroundStartAtLogin( base::ASCIIToUTF16(chrome::kInitialProfile), installer_state.target_path()); } } if (!installer_state.system_install()) { DCHECK_EQ(chrome_product->distribution(), BrowserDistribution::GetDistribution()); UpdateDefaultBrowserBeaconForPath( installer_state.target_path().Append(installer::kChromeExe)); } } installer_state.UpdateStage(installer::REMOVING_OLD_VERSIONS); installer_state.RemoveOldVersionDirectories( new_version, existing_version.get(), install_temp_path); } return result; } void HandleOsUpgradeForBrowser(const installer::InstallerState& installer_state, const installer::Product& chrome, const base::Version& installed_version) { DCHECK(chrome.is_chrome()); VLOG(1) << "Updating and registering shortcuts for --on-os-upgrade."; // Read master_preferences copied beside chrome.exe at install. const MasterPreferences prefs( installer_state.target_path().AppendASCII(kDefaultMasterPrefs)); // Update shortcuts at this install level (per-user shortcuts on system-level // installs will be updated through Active Setup). const InstallShortcutLevel level = installer_state.system_install() ? ALL_USERS : CURRENT_USER; const base::FilePath chrome_exe( installer_state.target_path().Append(kChromeExe)); CreateOrUpdateShortcuts(chrome_exe, chrome, prefs, level, INSTALL_SHORTCUT_REPLACE_EXISTING); // Adapt Chrome registrations to this new OS. RegisterChromeOnMachine(installer_state, chrome, false); // Active Setup registrations are sometimes lost across OS update, make sure // they're back in place. Note: when Active Setup registrations in HKLM are // lost, the per-user values of performed Active Setups in HKCU are also lost, // so it is fine to restart the dynamic components of the Active Setup version // (ref. UpdateActiveSetupVersionWorkItem) from scratch. // TODO(gab): This should really perform all registry only update steps (i.e., // something between InstallOrUpdateProduct and AddActiveSetupWorkItems, but // this takes care of what is most required for now). scoped_ptr<WorkItemList> work_item_list(WorkItem::CreateWorkItemList()); AddActiveSetupWorkItems(installer_state, installed_version, chrome, work_item_list.get()); if (!work_item_list->Do()) { LOG(WARNING) << "Failed to reinstall Active Setup keys."; work_item_list->Rollback(); } UpdateOsUpgradeBeacon(installer_state.system_install(), BrowserDistribution::GetDistribution()); // Update the per-user default browser beacon. For user-level installs this // can be done directly; whereas it requires triggering Active Setup for each // user's subsequent login on system-level installs. if (!installer_state.system_install()) { UpdateDefaultBrowserBeaconForPath(chrome_exe); } else { UpdateActiveSetupVersionWorkItem active_setup_work_item( InstallUtil::GetActiveSetupPath(chrome.distribution()), UpdateActiveSetupVersionWorkItem:: UPDATE_AND_BUMP_OS_UPGRADES_COMPONENT); if (active_setup_work_item.Do()) VLOG(1) << "Bumped Active Setup Version on-os-upgrade."; else LOG(ERROR) << "Failed to bump Active Setup Version on-os-upgrade."; } } // NOTE: Should the work done here, on Active Setup, change: kActiveSetupVersion // in update_active_setup_version_work_item.cc needs to be increased for Active // Setup to invoke this again for all users of this install. It may also be // invoked again when a system-level chrome install goes through an OS upgrade. void HandleActiveSetupForBrowser(const base::FilePath& installation_root, const installer::Product& chrome, bool force) { DCHECK(chrome.is_chrome()); // Only create shortcuts on Active Setup if the first run sentinel is not // present for this user (as some shortcuts used to be installed on first // run and this could otherwise re-install shortcuts for users that have // already deleted them in the past). // Decide whether to create the shortcuts or simply replace existing // shortcuts; if the decision is to create them, only shortcuts whose matching // all-users shortcut isn't present on the system will be created. InstallShortcutOperation install_operation = (!force && InstallUtil::IsFirstRunSentinelPresent()) ? INSTALL_SHORTCUT_REPLACE_EXISTING : INSTALL_SHORTCUT_CREATE_EACH_IF_NO_SYSTEM_LEVEL; // Read master_preferences copied beside chrome.exe at install. MasterPreferences prefs(installation_root.AppendASCII(kDefaultMasterPrefs)); base::FilePath chrome_exe(installation_root.Append(kChromeExe)); CreateOrUpdateShortcuts( chrome_exe, chrome, prefs, CURRENT_USER, install_operation); UpdateDefaultBrowserBeaconForPath(chrome_exe); } } // namespace installer
CapOM/ChromiumGStreamerBackend
chrome/installer/setup/install.cc
C++
bsd-3-clause
29,451
/* YUI 3.6.0 (build 5521) Copyright 2012 Yahoo! Inc. All rights reserved. Licensed under the BSD License. http://yuilibrary.com/license/ */ YUI.add('exec-command', function(Y) { /** * Plugin for the frame module to handle execCommands for Editor * @class Plugin.ExecCommand * @extends Base * @constructor * @module editor * @submodule exec-command */ var ExecCommand = function() { ExecCommand.superclass.constructor.apply(this, arguments); }; Y.extend(ExecCommand, Y.Base, { /** * An internal reference to the keyCode of the last key that was pressed. * @private * @property _lastKey */ _lastKey: null, /** * An internal reference to the instance of the frame plugged into. * @private * @property _inst */ _inst: null, /** * Execute a command on the frame's document. * @method command * @param {String} action The action to perform (bold, italic, fontname) * @param {String} value The optional value (helvetica) * @return {Node/NodeList} Should return the Node/Nodelist affected */ command: function(action, value) { var fn = ExecCommand.COMMANDS[action]; Y.log('execCommand(' + action + '): "' + value + '"', 'info', 'exec-command'); if (fn) { Y.log('OVERIDE execCommand(' + action + '): "' + value + '"', 'info', 'exec-command'); return fn.call(this, action, value); } else { return this._command(action, value); } }, /** * The private version of execCommand that doesn't filter for overrides. * @private * @method _command * @param {String} action The action to perform (bold, italic, fontname) * @param {String} value The optional value (helvetica) */ _command: function(action, value) { var inst = this.getInstance(); try { try { inst.config.doc.execCommand('styleWithCSS', null, 1); } catch (e1) { try { inst.config.doc.execCommand('useCSS', null, 0); } catch (e2) { } } Y.log('Using default browser execCommand(' + action + '): "' + value + '"', 'info', 'exec-command'); inst.config.doc.execCommand(action, null, value); } catch (e) { Y.log(e.message, 'warn', 'exec-command'); } }, /** * Get's the instance of YUI bound to the parent frame * @method getInstance * @return {YUI} The YUI instance bound to the parent frame */ getInstance: function() { if (!this._inst) { this._inst = this.get('host').getInstance(); } return this._inst; }, initializer: function() { Y.mix(this.get('host'), { execCommand: function(action, value) { return this.exec.command(action, value); }, _execCommand: function(action, value) { return this.exec._command(action, value); } }); this.get('host').on('dom:keypress', Y.bind(function(e) { this._lastKey = e.keyCode; }, this)); }, _wrapContent: function(str, override) { var useP = (this.getInstance().host.editorPara && !override ? true : false); if (useP) { str = '<p>' + str + '</p>'; } else { str = str + '<br>'; } return str; } }, { /** * execCommand * @property NAME * @static */ NAME: 'execCommand', /** * exec * @property NS * @static */ NS: 'exec', ATTRS: { host: { value: false } }, /** * Static object literal of execCommand overrides * @property COMMANDS * @static */ COMMANDS: { /** * Wraps the content with a new element of type (tag) * @method COMMANDS.wrap * @static * @param {String} cmd The command executed: wrap * @param {String} tag The tag to wrap the selection with * @return {NodeList} NodeList of the items touched by this command. */ wrap: function(cmd, tag) { var inst = this.getInstance(); return (new inst.EditorSelection()).wrapContent(tag); }, /** * Inserts the provided HTML at the cursor, should be a single element. * @method COMMANDS.inserthtml * @static * @param {String} cmd The command executed: inserthtml * @param {String} html The html to insert * @return {Node} Node instance of the item touched by this command. */ inserthtml: function(cmd, html) { var inst = this.getInstance(); if (inst.EditorSelection.hasCursor() || Y.UA.ie) { return (new inst.EditorSelection()).insertContent(html); } else { this._command('inserthtml', html); } }, /** * Inserts the provided HTML at the cursor, and focuses the cursor afterwards. * @method COMMANDS.insertandfocus * @static * @param {String} cmd The command executed: insertandfocus * @param {String} html The html to insert * @return {Node} Node instance of the item touched by this command. */ insertandfocus: function(cmd, html) { var inst = this.getInstance(), out, sel; if (inst.EditorSelection.hasCursor()) { html += inst.EditorSelection.CURSOR; out = this.command('inserthtml', html); sel = new inst.EditorSelection(); sel.focusCursor(true, true); } else { this.command('inserthtml', html); } return out; }, /** * Inserts a BR at the current cursor position * @method COMMANDS.insertbr * @static * @param {String} cmd The command executed: insertbr */ insertbr: function(cmd) { var inst = this.getInstance(), sel = new inst.EditorSelection(), html = '<var>|</var>', last = null, q = (Y.UA.webkit) ? 'span.Apple-style-span,var' : 'var'; if (sel._selection.pasteHTML) { sel._selection.pasteHTML(html); } else { this._command('inserthtml', html); } var insert = function(n) { var c = inst.Node.create('<br>'); n.insert(c, 'before'); return c; }; inst.all(q).each(function(n) { var g = true; if (Y.UA.webkit) { g = false; if (n.get('innerHTML') === '|') { g = true; } } if (g) { last = insert(n); if ((!last.previous() || !last.previous().test('br')) && Y.UA.gecko) { var s = last.cloneNode(); last.insert(s, 'after'); last = s; } n.remove(); } }); if (Y.UA.webkit && last) { insert(last); sel.selectNode(last); } }, /** * Inserts an image at the cursor position * @method COMMANDS.insertimage * @static * @param {String} cmd The command executed: insertimage * @param {String} img The url of the image to be inserted * @return {Node} Node instance of the item touched by this command. */ insertimage: function(cmd, img) { return this.command('inserthtml', '<img src="' + img + '">'); }, /** * Add a class to all of the elements in the selection * @method COMMANDS.addclass * @static * @param {String} cmd The command executed: addclass * @param {String} cls The className to add * @return {NodeList} NodeList of the items touched by this command. */ addclass: function(cmd, cls) { var inst = this.getInstance(); return (new inst.EditorSelection()).getSelected().addClass(cls); }, /** * Remove a class from all of the elements in the selection * @method COMMANDS.removeclass * @static * @param {String} cmd The command executed: removeclass * @param {String} cls The className to remove * @return {NodeList} NodeList of the items touched by this command. */ removeclass: function(cmd, cls) { var inst = this.getInstance(); return (new inst.EditorSelection()).getSelected().removeClass(cls); }, /** * Adds a forecolor to the current selection, or creates a new element and applies it * @method COMMANDS.forecolor * @static * @param {String} cmd The command executed: forecolor * @param {String} val The color value to apply * @return {NodeList} NodeList of the items touched by this command. */ forecolor: function(cmd, val) { var inst = this.getInstance(), sel = new inst.EditorSelection(), n; if (!Y.UA.ie) { this._command('useCSS', false); } if (inst.EditorSelection.hasCursor()) { if (sel.isCollapsed) { if (sel.anchorNode && (sel.anchorNode.get('innerHTML') === '&nbsp;')) { sel.anchorNode.setStyle('color', val); n = sel.anchorNode; } else { n = this.command('inserthtml', '<span style="color: ' + val + '">' + inst.EditorSelection.CURSOR + '</span>'); sel.focusCursor(true, true); } return n; } else { return this._command(cmd, val); } } else { this._command(cmd, val); } }, /** * Adds a background color to the current selection, or creates a new element and applies it * @method COMMANDS.backcolor * @static * @param {String} cmd The command executed: backcolor * @param {String} val The color value to apply * @return {NodeList} NodeList of the items touched by this command. */ backcolor: function(cmd, val) { var inst = this.getInstance(), sel = new inst.EditorSelection(), n; if (Y.UA.gecko || Y.UA.opera) { cmd = 'hilitecolor'; } if (!Y.UA.ie) { this._command('useCSS', false); } if (inst.EditorSelection.hasCursor()) { if (sel.isCollapsed) { if (sel.anchorNode && (sel.anchorNode.get('innerHTML') === '&nbsp;')) { sel.anchorNode.setStyle('backgroundColor', val); n = sel.anchorNode; } else { n = this.command('inserthtml', '<span style="background-color: ' + val + '">' + inst.EditorSelection.CURSOR + '</span>'); sel.focusCursor(true, true); } return n; } else { return this._command(cmd, val); } } else { this._command(cmd, val); } }, /** * Sugar method, calles backcolor * @method COMMANDS.hilitecolor * @static * @param {String} cmd The command executed: backcolor * @param {String} val The color value to apply * @return {NodeList} NodeList of the items touched by this command. */ hilitecolor: function() { return ExecCommand.COMMANDS.backcolor.apply(this, arguments); }, /** * Adds a font name to the current selection, or creates a new element and applies it * @method COMMANDS.fontname2 * @deprecated * @static * @param {String} cmd The command executed: fontname * @param {String} val The font name to apply * @return {NodeList} NodeList of the items touched by this command. */ fontname2: function(cmd, val) { this._command('fontname', val); var inst = this.getInstance(), sel = new inst.EditorSelection(); if (sel.isCollapsed && (this._lastKey != 32)) { if (sel.anchorNode.test('font')) { sel.anchorNode.set('face', val); } } }, /** * Adds a fontsize to the current selection, or creates a new element and applies it * @method COMMANDS.fontsize2 * @deprecated * @static * @param {String} cmd The command executed: fontsize * @param {String} val The font size to apply * @return {NodeList} NodeList of the items touched by this command. */ fontsize2: function(cmd, val) { this._command('fontsize', val); var inst = this.getInstance(), sel = new inst.EditorSelection(); if (sel.isCollapsed && sel.anchorNode && (this._lastKey != 32)) { if (Y.UA.webkit) { if (sel.anchorNode.getStyle('lineHeight')) { sel.anchorNode.setStyle('lineHeight', ''); } } if (sel.anchorNode.test('font')) { sel.anchorNode.set('size', val); } else if (Y.UA.gecko) { var p = sel.anchorNode.ancestor(inst.EditorSelection.DEFAULT_BLOCK_TAG); if (p) { p.setStyle('fontSize', ''); } } } }, /** * Overload for COMMANDS.list * @method COMMANDS.insertorderedlist * @static * @param {String} cmd The command executed: list, ul */ insertunorderedlist: function(cmd) { this.command('list', 'ul'); }, /** * Overload for COMMANDS.list * @method COMMANDS.insertunorderedlist * @static * @param {String} cmd The command executed: list, ol */ insertorderedlist: function(cmd) { this.command('list', 'ol'); }, /** * Noramlizes lists creation/destruction for IE. All others pass through to native calls * @method COMMANDS.list * @static * @param {String} cmd The command executed: list (not used) * @param {String} tag The tag to deal with */ list: function(cmd, tag) { var inst = this.getInstance(), html, self = this, /* The yui3- class name below is not a skinnable class, it's a utility class used internally by editor and stripped when completed, calling getClassName on this is a waste of resources. */ DIR = 'dir', cls = 'yui3-touched', dir, range, div, elm, n, str, s, par, list, lis, useP = (inst.host.editorPara ? true : false), sel = new inst.EditorSelection(); cmd = 'insert' + ((tag === 'ul') ? 'un' : '') + 'orderedlist'; if (Y.UA.ie && !sel.isCollapsed) { range = sel._selection; html = range.htmlText; div = inst.Node.create(html) || inst.one('body'); if (div.test('li') || div.one('li')) { this._command(cmd, null); return; } if (div.test(tag)) { elm = range.item ? range.item(0) : range.parentElement(); n = inst.one(elm); lis = n.all('li'); str = '<div>'; lis.each(function(l) { str = self._wrapContent(l.get('innerHTML')); }); str += '</div>'; s = inst.Node.create(str); if (n.get('parentNode').test('div')) { n = n.get('parentNode'); } if (n && n.hasAttribute(DIR)) { if (useP) { s.all('p').setAttribute(DIR, n.getAttribute(DIR)); } else { s.setAttribute(DIR, n.getAttribute(DIR)); } } if (useP) { n.replace(s.get('innerHTML')); } else { n.replace(s); } if (range.moveToElementText) { range.moveToElementText(s._node); } range.select(); } else { par = Y.one(range.parentElement()); if (!par.test(inst.EditorSelection.BLOCKS)) { par = par.ancestor(inst.EditorSelection.BLOCKS); } if (par) { if (par.hasAttribute(DIR)) { dir = par.getAttribute(DIR); } } if (html.indexOf('<br>') > -1) { html = html.split(/<br>/i); } else { var tmp = inst.Node.create(html), ps = tmp ? tmp.all('p') : null; if (ps && ps.size()) { html = []; ps.each(function(n) { html.push(n.get('innerHTML')); }); } else { html = [html]; } } list = '<' + tag + ' id="ie-list">'; Y.each(html, function(v) { var a = inst.Node.create(v); if (a && a.test('p')) { if (a.hasAttribute(DIR)) { dir = a.getAttribute(DIR); } v = a.get('innerHTML'); } list += '<li>' + v + '</li>'; }); list += '</' + tag + '>'; range.pasteHTML(list); elm = inst.config.doc.getElementById('ie-list'); elm.id = ''; if (dir) { elm.setAttribute(DIR, dir); } if (range.moveToElementText) { range.moveToElementText(elm); } range.select(); } } else if (Y.UA.ie) { par = inst.one(sel._selection.parentElement()); if (par.test('p')) { if (par && par.hasAttribute(DIR)) { dir = par.getAttribute(DIR); } html = Y.EditorSelection.getText(par); if (html === '') { var sdir = ''; if (dir) { sdir = ' dir="' + dir + '"'; } list = inst.Node.create(Y.Lang.sub('<{tag}{dir}><li></li></{tag}>', { tag: tag, dir: sdir })); par.replace(list); sel.selectNode(list.one('li')); } else { this._command(cmd, null); } } else { this._command(cmd, null); } } else { inst.all(tag).addClass(cls); if (sel.anchorNode.test(inst.EditorSelection.BLOCKS)) { par = sel.anchorNode; } else { par = sel.anchorNode.ancestor(inst.EditorSelection.BLOCKS); } if (!par) { //No parent, find the first block under the anchorNode par = sel.anchorNode.one(inst.EditorSelection.BLOCKS); } if (par && par.hasAttribute(DIR)) { dir = par.getAttribute(DIR); } if (par && par.test(tag)) { var hasPParent = par.ancestor('p'); html = inst.Node.create('<div/>'); elm = par.all('li'); elm.each(function(h) { html.append(self._wrapContent(h.get('innerHTML'), hasPParent)); }); if (dir) { if (useP) { html.all('p').setAttribute(DIR, dir); } else { html.setAttribute(DIR, dir); } } if (useP) { html = inst.Node.create(html.get('innerHTML')); } var fc = html.get('firstChild'); par.replace(html); sel.selectNode(fc); } else { this._command(cmd, null); } list = inst.all(tag); if (dir) { if (list.size()) { //Changed to a List list.each(function(n) { if (!n.hasClass(cls)) { n.setAttribute(DIR, dir); } }); } } list.removeClass(cls); } }, /** * Noramlizes alignment for Webkit Browsers * @method COMMANDS.justify * @static * @param {String} cmd The command executed: justify (not used) * @param {String} val The actual command from the justify{center,all,left,right} stubs */ justify: function(cmd, val) { if (Y.UA.webkit) { var inst = this.getInstance(), sel = new inst.EditorSelection(), aNode = sel.anchorNode; var bgColor = aNode.getStyle('backgroundColor'); this._command(val); sel = new inst.EditorSelection(); if (sel.anchorNode.test('div')) { var html = '<span>' + sel.anchorNode.get('innerHTML') + '</span>'; sel.anchorNode.set('innerHTML', html); sel.anchorNode.one('span').setStyle('backgroundColor', bgColor); sel.selectNode(sel.anchorNode.one('span')); } } else { this._command(val); } }, /** * Override method for COMMANDS.justify * @method COMMANDS.justifycenter * @static */ justifycenter: function(cmd) { this.command('justify', 'justifycenter'); }, /** * Override method for COMMANDS.justify * @method COMMANDS.justifyleft * @static */ justifyleft: function(cmd) { this.command('justify', 'justifyleft'); }, /** * Override method for COMMANDS.justify * @method COMMANDS.justifyright * @static */ justifyright: function(cmd) { this.command('justify', 'justifyright'); }, /** * Override method for COMMANDS.justify * @method COMMANDS.justifyfull * @static */ justifyfull: function(cmd) { this.command('justify', 'justifyfull'); } } }); /** * This method is meant to normalize IE's in ability to exec the proper command on elements with CSS styling. * @method fixIETags * @protected * @param {String} cmd The command to execute * @param {String} tag The tag to create * @param {String} rule The rule that we are looking for. */ var fixIETags = function(cmd, tag, rule) { var inst = this.getInstance(), doc = inst.config.doc, sel = doc.selection.createRange(), o = doc.queryCommandValue(cmd), html, reg, m, p, d, s, c; if (o) { html = sel.htmlText; reg = new RegExp(rule, 'g'); m = html.match(reg); if (m) { html = html.replace(rule + ';', '').replace(rule, ''); sel.pasteHTML('<var id="yui-ie-bs">'); p = doc.getElementById('yui-ie-bs'); d = doc.createElement('div'); s = doc.createElement(tag); d.innerHTML = html; if (p.parentNode !== inst.config.doc.body) { p = p.parentNode; } c = d.childNodes; p.parentNode.replaceChild(s, p); Y.each(c, function(f) { s.appendChild(f); }); sel.collapse(); if (sel.moveToElementText) { sel.moveToElementText(s); } sel.select(); } } this._command(cmd); }; if (Y.UA.ie) { ExecCommand.COMMANDS.bold = function() { fixIETags.call(this, 'bold', 'b', 'FONT-WEIGHT: bold'); }; ExecCommand.COMMANDS.italic = function() { fixIETags.call(this, 'italic', 'i', 'FONT-STYLE: italic'); }; ExecCommand.COMMANDS.underline = function() { fixIETags.call(this, 'underline', 'u', 'TEXT-DECORATION: underline'); }; } Y.namespace('Plugin'); Y.Plugin.ExecCommand = ExecCommand; }, '3.6.0' ,{skinnable:false, requires:['frame']});
bretkikehara/wattdepot-visualization
src/main/webapp/yui/3.6.0/build/exec-command/exec-command-debug.js
JavaScript
bsd-3-clause
31,671
from functools import wraps from django.contrib import messages from django.contrib.auth import REDIRECT_FIELD_NAME from django.contrib.auth.decorators import user_passes_test from django.contrib.auth.views import redirect_to_login from django.core.exceptions import PermissionDenied from django.core.urlresolvers import reverse_lazy from django.shortcuts import render from django.utils.six.moves.urllib import parse from django.utils.translation import ugettext_lazy as _ from oscar.core.compat import user_is_authenticated def staff_member_required(view_func, login_url=None): """ Ensure that the user is a logged-in staff member. * If not authenticated, redirect to a specified login URL. * If not staff, show a 403 page This decorator is based on the decorator with the same name from django.contrib.admin.views.decorators. This one is superior as it allows a redirect URL to be specified. """ if login_url is None: login_url = reverse_lazy('customer:login') @wraps(view_func) def _checklogin(request, *args, **kwargs): if request.user.is_active and request.user.is_staff: return view_func(request, *args, **kwargs) # If user is not logged in, redirect to login page if not user_is_authenticated(request.user): # If the login url is the same scheme and net location then just # use the path as the "next" url. path = request.build_absolute_uri() login_scheme, login_netloc = parse.urlparse(login_url)[:2] current_scheme, current_netloc = parse.urlparse(path)[:2] if ((not login_scheme or login_scheme == current_scheme) and (not login_netloc or login_netloc == current_netloc)): path = request.get_full_path() messages.warning(request, _("You must log in to access this page")) return redirect_to_login(path, login_url, REDIRECT_FIELD_NAME) else: # User does not have permission to view this page raise PermissionDenied return _checklogin def check_permissions(user, permissions): """ Permissions can be a list or a tuple of lists. If it is a tuple, every permission list will be evaluated and the outcome will be checked for truthiness. Each item of the list(s) must be either a valid Django permission name (model.codename) or a property or method on the User model (e.g. 'is_active', 'is_superuser'). Example usage: - permissions_required(['is_staff', ]) would replace staff_member_required - permissions_required(['is_anonymous', ]) would replace login_forbidden - permissions_required((['is_staff',], ['partner.dashboard_access'])) allows both staff users and users with the above permission """ def _check_one_permission_list(perms): regular_permissions = [perm for perm in perms if '.' in perm] conditions = [perm for perm in perms if '.' not in perm] # always check for is_active if not checking for is_anonymous if (conditions and 'is_anonymous' not in conditions and 'is_active' not in conditions): conditions.append('is_active') attributes = [getattr(user, perm) for perm in conditions] # evaluates methods, explicitly casts properties to booleans passes_conditions = all([ attr() if callable(attr) else bool(attr) for attr in attributes]) return passes_conditions and user.has_perms(regular_permissions) if not permissions: return True elif isinstance(permissions, list): return _check_one_permission_list(permissions) else: return any(_check_one_permission_list(perm) for perm in permissions) def permissions_required(permissions, login_url=None): """ Decorator that checks if a user has the given permissions. Accepts a list or tuple of lists of permissions (see check_permissions documentation). If the user is not logged in and the test fails, she is redirected to a login page. If the user is logged in, she gets a HTTP 403 Permission Denied message, analogous to Django's permission_required decorator. """ if login_url is None: login_url = reverse_lazy('customer:login') def _check_permissions(user): outcome = check_permissions(user, permissions) if not outcome and user_is_authenticated(user): raise PermissionDenied else: return outcome return user_passes_test(_check_permissions, login_url=login_url) def login_forbidden(view_func, template_name='login_forbidden.html', status=403): """ Only allow anonymous users to access this view. """ @wraps(view_func) def _checklogin(request, *args, **kwargs): if not user_is_authenticated(request.user): return view_func(request, *args, **kwargs) return render(request, template_name, status=status) return _checklogin
sonofatailor/django-oscar
src/oscar/views/decorators.py
Python
bsd-3-clause
5,064
/* * Copyright 2016 The Chromium Authors. All rights reserved. * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ /* global PaymentRequest:false */ /** * Launches the PaymentRequest UI that offers free shipping in California and * $5.00 shipping in US. Does not allow shipping outside of US. * * Legacy entry-point until basic-card is disabled */ function buy() { // eslint-disable-line no-unused-vars buyWithMethods( [{supportedMethods: 'basic-card', data: {supportedNetworks: ['visa']}}]); } /** * Launches the PaymentRequest UI that offers free shipping in California and * $5.00 shipping in US. Does not allow shipping outside of US. * * @param {String} methodData - An array of payment method objects. */ function buyWithMethods(methodData) { // eslint-disable-line no-unused-vars try { var details = { total: {label: 'Total', amount: {currency: 'USD', value: '5.00'}}, displayItems: [ { label: 'Pending shipping price', amount: {currency: 'USD', value: '0.00'}, pending: true, }, {label: 'Subtotal', amount: {currency: 'USD', value: '5.00'}}, ], }; var request = new PaymentRequest( methodData, details, {requestShipping: true}); request.addEventListener('shippingaddresschange', function(evt) { evt.updateWith(new Promise(function(resolve) { resolve(updateDetails(details, request.shippingAddress)); })); }); request.show() .then(function(resp) { resp.complete('success') .then(function() { print(JSON.stringify(resp, undefined, 2)); }) .catch(function(error) { print(error); }); }) .catch(function(error) { print(error); }); } catch (error) { print(error.message); } } /** * Updates the shopping cart with the appropriate shipping prices according to * the shipping address. * @param {object} details - The shopping cart. * @param {ShippingAddress} addr - The shipping address. * @return {object} The updated shopping cart. */ function updateDetails(details, addr) { if (addr.country === 'US') { var shippingOption = { id: '', label: '', amount: {currency: 'USD', value: '0.00'}, selected: true, }; if (addr.region === 'CA') { shippingOption.id = 'californiaShippingOption'; shippingOption.label = 'Free shipping in California'; details.total.amount.value = '5.00'; } else { shippingOption.id = 'usShippingOption'; shippingOption.label = 'Standard shipping in US'; shippingOption.amount.value = '5.00'; details.total.amount.value = '10.00'; } details.displayItems.splice(0, 1, shippingOption); details.shippingOptions = [shippingOption]; } else { details.shippingOptions = []; details.error = 'We do not ship to this address'; details.shippingAddressErrors = { addressLine: 'ADDRESS LINE ERROR', city: 'CITY ERROR', }; } return details; }
chromium/chromium
components/test/data/payments/dynamic_shipping.js
JavaScript
bsd-3-clause
3,140
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "net/socket/transport_client_socket_pool_test_util.h" #include <stdint.h> #include <string> #include <utility> #include "base/location.h" #include "base/logging.h" #include "base/macros.h" #include "base/memory/weak_ptr.h" #include "base/run_loop.h" #include "base/single_thread_task_runner.h" #include "base/threading/thread_task_runner_handle.h" #include "net/base/ip_address.h" #include "net/base/ip_endpoint.h" #include "net/base/load_timing_info.h" #include "net/base/load_timing_info_test_util.h" #include "net/socket/client_socket_handle.h" #include "net/socket/ssl_client_socket.h" #include "net/udp/datagram_client_socket.h" #include "testing/gtest/include/gtest/gtest.h" namespace net { namespace { IPAddress ParseIP(const std::string& ip) { IPAddress address; CHECK(address.AssignFromIPLiteral(ip)); return address; } // A StreamSocket which connects synchronously and successfully. class MockConnectClientSocket : public StreamSocket { public: MockConnectClientSocket(const AddressList& addrlist, net::NetLog* net_log) : connected_(false), addrlist_(addrlist), net_log_(BoundNetLog::Make(net_log, NetLog::SOURCE_SOCKET)) {} // StreamSocket implementation. int Connect(const CompletionCallback& callback) override { connected_ = true; return OK; } void Disconnect() override { connected_ = false; } bool IsConnected() const override { return connected_; } bool IsConnectedAndIdle() const override { return connected_; } int GetPeerAddress(IPEndPoint* address) const override { *address = addrlist_.front(); return OK; } int GetLocalAddress(IPEndPoint* address) const override { if (!connected_) return ERR_SOCKET_NOT_CONNECTED; if (addrlist_.front().GetFamily() == ADDRESS_FAMILY_IPV4) SetIPv4Address(address); else SetIPv6Address(address); return OK; } const BoundNetLog& NetLog() const override { return net_log_; } void SetSubresourceSpeculation() override {} void SetOmniboxSpeculation() override {} bool WasEverUsed() const override { return false; } void EnableTCPFastOpenIfSupported() override {} bool WasNpnNegotiated() const override { return false; } NextProto GetNegotiatedProtocol() const override { return kProtoUnknown; } bool GetSSLInfo(SSLInfo* ssl_info) override { return false; } void GetConnectionAttempts(ConnectionAttempts* out) const override { out->clear(); } void ClearConnectionAttempts() override {} void AddConnectionAttempts(const ConnectionAttempts& attempts) override {} int64_t GetTotalReceivedBytes() const override { NOTIMPLEMENTED(); return 0; } // Socket implementation. int Read(IOBuffer* buf, int buf_len, const CompletionCallback& callback) override { return ERR_FAILED; } int Write(IOBuffer* buf, int buf_len, const CompletionCallback& callback) override { return ERR_FAILED; } int SetReceiveBufferSize(int32_t size) override { return OK; } int SetSendBufferSize(int32_t size) override { return OK; } private: bool connected_; const AddressList addrlist_; BoundNetLog net_log_; DISALLOW_COPY_AND_ASSIGN(MockConnectClientSocket); }; class MockFailingClientSocket : public StreamSocket { public: MockFailingClientSocket(const AddressList& addrlist, net::NetLog* net_log) : addrlist_(addrlist), net_log_(BoundNetLog::Make(net_log, NetLog::SOURCE_SOCKET)) {} // StreamSocket implementation. int Connect(const CompletionCallback& callback) override { return ERR_CONNECTION_FAILED; } void Disconnect() override {} bool IsConnected() const override { return false; } bool IsConnectedAndIdle() const override { return false; } int GetPeerAddress(IPEndPoint* address) const override { return ERR_UNEXPECTED; } int GetLocalAddress(IPEndPoint* address) const override { return ERR_UNEXPECTED; } const BoundNetLog& NetLog() const override { return net_log_; } void SetSubresourceSpeculation() override {} void SetOmniboxSpeculation() override {} bool WasEverUsed() const override { return false; } void EnableTCPFastOpenIfSupported() override {} bool WasNpnNegotiated() const override { return false; } NextProto GetNegotiatedProtocol() const override { return kProtoUnknown; } bool GetSSLInfo(SSLInfo* ssl_info) override { return false; } void GetConnectionAttempts(ConnectionAttempts* out) const override { out->clear(); for (const auto& addr : addrlist_) out->push_back(ConnectionAttempt(addr, ERR_CONNECTION_FAILED)); } void ClearConnectionAttempts() override {} void AddConnectionAttempts(const ConnectionAttempts& attempts) override {} int64_t GetTotalReceivedBytes() const override { NOTIMPLEMENTED(); return 0; } // Socket implementation. int Read(IOBuffer* buf, int buf_len, const CompletionCallback& callback) override { return ERR_FAILED; } int Write(IOBuffer* buf, int buf_len, const CompletionCallback& callback) override { return ERR_FAILED; } int SetReceiveBufferSize(int32_t size) override { return OK; } int SetSendBufferSize(int32_t size) override { return OK; } private: const AddressList addrlist_; BoundNetLog net_log_; DISALLOW_COPY_AND_ASSIGN(MockFailingClientSocket); }; class MockTriggerableClientSocket : public StreamSocket { public: // |should_connect| indicates whether the socket should successfully complete // or fail. MockTriggerableClientSocket(const AddressList& addrlist, bool should_connect, net::NetLog* net_log) : should_connect_(should_connect), is_connected_(false), addrlist_(addrlist), net_log_(BoundNetLog::Make(net_log, NetLog::SOURCE_SOCKET)), weak_factory_(this) {} // Call this method to get a closure which will trigger the connect callback // when called. The closure can be called even after the socket is deleted; it // will safely do nothing. base::Closure GetConnectCallback() { return base::Bind(&MockTriggerableClientSocket::DoCallback, weak_factory_.GetWeakPtr()); } static std::unique_ptr<StreamSocket> MakeMockPendingClientSocket( const AddressList& addrlist, bool should_connect, net::NetLog* net_log) { std::unique_ptr<MockTriggerableClientSocket> socket( new MockTriggerableClientSocket(addrlist, should_connect, net_log)); base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, socket->GetConnectCallback()); return std::move(socket); } static std::unique_ptr<StreamSocket> MakeMockDelayedClientSocket( const AddressList& addrlist, bool should_connect, const base::TimeDelta& delay, net::NetLog* net_log) { std::unique_ptr<MockTriggerableClientSocket> socket( new MockTriggerableClientSocket(addrlist, should_connect, net_log)); base::ThreadTaskRunnerHandle::Get()->PostDelayedTask( FROM_HERE, socket->GetConnectCallback(), delay); return std::move(socket); } static std::unique_ptr<StreamSocket> MakeMockStalledClientSocket( const AddressList& addrlist, net::NetLog* net_log, bool failing) { std::unique_ptr<MockTriggerableClientSocket> socket( new MockTriggerableClientSocket(addrlist, true, net_log)); if (failing) { DCHECK_LE(1u, addrlist.size()); ConnectionAttempts attempts; attempts.push_back(ConnectionAttempt(addrlist[0], ERR_CONNECTION_FAILED)); socket->AddConnectionAttempts(attempts); } return std::move(socket); } // StreamSocket implementation. int Connect(const CompletionCallback& callback) override { DCHECK(callback_.is_null()); callback_ = callback; return ERR_IO_PENDING; } void Disconnect() override {} bool IsConnected() const override { return is_connected_; } bool IsConnectedAndIdle() const override { return is_connected_; } int GetPeerAddress(IPEndPoint* address) const override { *address = addrlist_.front(); return OK; } int GetLocalAddress(IPEndPoint* address) const override { if (!is_connected_) return ERR_SOCKET_NOT_CONNECTED; if (addrlist_.front().GetFamily() == ADDRESS_FAMILY_IPV4) SetIPv4Address(address); else SetIPv6Address(address); return OK; } const BoundNetLog& NetLog() const override { return net_log_; } void SetSubresourceSpeculation() override {} void SetOmniboxSpeculation() override {} bool WasEverUsed() const override { return false; } void EnableTCPFastOpenIfSupported() override {} bool WasNpnNegotiated() const override { return false; } NextProto GetNegotiatedProtocol() const override { return kProtoUnknown; } bool GetSSLInfo(SSLInfo* ssl_info) override { return false; } void GetConnectionAttempts(ConnectionAttempts* out) const override { *out = connection_attempts_; } void ClearConnectionAttempts() override { connection_attempts_.clear(); } void AddConnectionAttempts(const ConnectionAttempts& attempts) override { connection_attempts_.insert(connection_attempts_.begin(), attempts.begin(), attempts.end()); } int64_t GetTotalReceivedBytes() const override { NOTIMPLEMENTED(); return 0; } // Socket implementation. int Read(IOBuffer* buf, int buf_len, const CompletionCallback& callback) override { return ERR_FAILED; } int Write(IOBuffer* buf, int buf_len, const CompletionCallback& callback) override { return ERR_FAILED; } int SetReceiveBufferSize(int32_t size) override { return OK; } int SetSendBufferSize(int32_t size) override { return OK; } private: void DoCallback() { is_connected_ = should_connect_; callback_.Run(is_connected_ ? OK : ERR_CONNECTION_FAILED); } bool should_connect_; bool is_connected_; const AddressList addrlist_; BoundNetLog net_log_; CompletionCallback callback_; ConnectionAttempts connection_attempts_; base::WeakPtrFactory<MockTriggerableClientSocket> weak_factory_; DISALLOW_COPY_AND_ASSIGN(MockTriggerableClientSocket); }; } // namespace void TestLoadTimingInfoConnectedReused(const ClientSocketHandle& handle) { LoadTimingInfo load_timing_info; // Only pass true in as |is_reused|, as in general, HttpStream types should // have stricter concepts of reuse than socket pools. EXPECT_TRUE(handle.GetLoadTimingInfo(true, &load_timing_info)); EXPECT_TRUE(load_timing_info.socket_reused); EXPECT_NE(NetLog::Source::kInvalidId, load_timing_info.socket_log_id); ExpectConnectTimingHasNoTimes(load_timing_info.connect_timing); ExpectLoadTimingHasOnlyConnectionTimes(load_timing_info); } void TestLoadTimingInfoConnectedNotReused(const ClientSocketHandle& handle) { EXPECT_FALSE(handle.is_reused()); LoadTimingInfo load_timing_info; EXPECT_TRUE(handle.GetLoadTimingInfo(false, &load_timing_info)); EXPECT_FALSE(load_timing_info.socket_reused); EXPECT_NE(NetLog::Source::kInvalidId, load_timing_info.socket_log_id); ExpectConnectTimingHasTimes(load_timing_info.connect_timing, CONNECT_TIMING_HAS_DNS_TIMES); ExpectLoadTimingHasOnlyConnectionTimes(load_timing_info); TestLoadTimingInfoConnectedReused(handle); } void SetIPv4Address(IPEndPoint* address) { *address = IPEndPoint(ParseIP("1.1.1.1"), 80); } void SetIPv6Address(IPEndPoint* address) { *address = IPEndPoint(ParseIP("1:abcd::3:4:ff"), 80); } MockTransportClientSocketFactory::MockTransportClientSocketFactory( NetLog* net_log) : net_log_(net_log), allocation_count_(0), client_socket_type_(MOCK_CLIENT_SOCKET), client_socket_types_(NULL), client_socket_index_(0), client_socket_index_max_(0), delay_(base::TimeDelta::FromMilliseconds( ClientSocketPool::kMaxConnectRetryIntervalMs)) {} MockTransportClientSocketFactory::~MockTransportClientSocketFactory() {} std::unique_ptr<DatagramClientSocket> MockTransportClientSocketFactory::CreateDatagramClientSocket( DatagramSocket::BindType bind_type, const RandIntCallback& rand_int_cb, NetLog* net_log, const NetLog::Source& source) { NOTREACHED(); return std::unique_ptr<DatagramClientSocket>(); } std::unique_ptr<StreamSocket> MockTransportClientSocketFactory::CreateTransportClientSocket( const AddressList& addresses, std::unique_ptr<SocketPerformanceWatcher> /* socket_performance_watcher */, NetLog* /* net_log */, const NetLog::Source& /* source */) { allocation_count_++; ClientSocketType type = client_socket_type_; if (client_socket_types_ && client_socket_index_ < client_socket_index_max_) { type = client_socket_types_[client_socket_index_++]; } switch (type) { case MOCK_CLIENT_SOCKET: return std::unique_ptr<StreamSocket>( new MockConnectClientSocket(addresses, net_log_)); case MOCK_FAILING_CLIENT_SOCKET: return std::unique_ptr<StreamSocket>( new MockFailingClientSocket(addresses, net_log_)); case MOCK_PENDING_CLIENT_SOCKET: return MockTriggerableClientSocket::MakeMockPendingClientSocket( addresses, true, net_log_); case MOCK_PENDING_FAILING_CLIENT_SOCKET: return MockTriggerableClientSocket::MakeMockPendingClientSocket( addresses, false, net_log_); case MOCK_DELAYED_CLIENT_SOCKET: return MockTriggerableClientSocket::MakeMockDelayedClientSocket( addresses, true, delay_, net_log_); case MOCK_DELAYED_FAILING_CLIENT_SOCKET: return MockTriggerableClientSocket::MakeMockDelayedClientSocket( addresses, false, delay_, net_log_); case MOCK_STALLED_CLIENT_SOCKET: return MockTriggerableClientSocket::MakeMockStalledClientSocket( addresses, net_log_, false); case MOCK_STALLED_FAILING_CLIENT_SOCKET: return MockTriggerableClientSocket::MakeMockStalledClientSocket( addresses, net_log_, true); case MOCK_TRIGGERABLE_CLIENT_SOCKET: { std::unique_ptr<MockTriggerableClientSocket> rv( new MockTriggerableClientSocket(addresses, true, net_log_)); triggerable_sockets_.push(rv->GetConnectCallback()); // run_loop_quit_closure_ behaves like a condition variable. It will // wake up WaitForTriggerableSocketCreation() if it is sleeping. We // don't need to worry about atomicity because this code is // single-threaded. if (!run_loop_quit_closure_.is_null()) run_loop_quit_closure_.Run(); return std::move(rv); } default: NOTREACHED(); return std::unique_ptr<StreamSocket>( new MockConnectClientSocket(addresses, net_log_)); } } std::unique_ptr<SSLClientSocket> MockTransportClientSocketFactory::CreateSSLClientSocket( std::unique_ptr<ClientSocketHandle> transport_socket, const HostPortPair& host_and_port, const SSLConfig& ssl_config, const SSLClientSocketContext& context) { NOTIMPLEMENTED(); return std::unique_ptr<SSLClientSocket>(); } void MockTransportClientSocketFactory::ClearSSLSessionCache() { NOTIMPLEMENTED(); } void MockTransportClientSocketFactory::set_client_socket_types( ClientSocketType* type_list, int num_types) { DCHECK_GT(num_types, 0); client_socket_types_ = type_list; client_socket_index_ = 0; client_socket_index_max_ = num_types; } base::Closure MockTransportClientSocketFactory::WaitForTriggerableSocketCreation() { while (triggerable_sockets_.empty()) { base::RunLoop run_loop; run_loop_quit_closure_ = run_loop.QuitClosure(); run_loop.Run(); run_loop_quit_closure_.Reset(); } base::Closure trigger = triggerable_sockets_.front(); triggerable_sockets_.pop(); return trigger; } } // namespace net
axinging/chromium-crosswalk
net/socket/transport_client_socket_pool_test_util.cc
C++
bsd-3-clause
16,065
// (function (root) { "use strict"; if (!root.lux) root.lux = {}; // If a file assign http as protocol (https does not work with PhantomJS) var protocol = root.location ? (root.location.protocol === 'file:' ? 'http:' : '') : '', end = '.js', ostring = Object.prototype.toString, lux = root.lux; function isArray(it) { return ostring.call(it) === '[object Array]'; } function minify () { if (root.lux.context) return lux.context.MINIFIED_MEDIA; } function baseUrl () { if (root.lux.context) return lux.context.MEDIA_URL; } function extend (o1, o2) { if (o2) { for (var key in o2) { if (o2.hasOwnProperty(key)) o1[key] = o2[key]; } } return o1; } function defaultPaths () { return { "angular": "//ajax.googleapis.com/ajax/libs/angularjs/1.3.15/angular", "angular-animate": "//ajax.googleapis.com/ajax/libs/angularjs/1.3.15/angular-animate", "angular-mocks": "//ajax.googleapis.com/ajax/libs/angularjs/1.3.15/angular-mocks.js", "angular-sanitize": "//ajax.googleapis.com/ajax/libs/angularjs/1.3.15/angular-sanitize", "angular-touch": "//cdnjs.cloudflare.com/ajax/libs/angular.js/1.3.15/angular-touch", "angular-strap": "//cdnjs.cloudflare.com/ajax/libs/angular-strap/2.2.1/angular-strap", "angular-strap-tpl": "//cdnjs.cloudflare.com/ajax/libs/angular-strap/2.2.4/angular-strap.tpl", "angular-ui-router": "//cdnjs.cloudflare.com/ajax/libs/angular-ui-router/0.2.14/angular-ui-router", "angular-pusher": "//cdn.jsdelivr.net/angular.pusher/latest/pusher-angular.min.js", "async": "//cdnjs.cloudflare.com/ajax/libs/requirejs-async/0.1.1/async.js", "pusher": "//js.pusher.com/2.2/pusher", "codemirror": "//cdnjs.cloudflare.com/ajax/libs/codemirror/3.21.0/codemirror", "codemirror-markdown": "//cdnjs.cloudflare.com/ajax/libs/codemirror/3.21.0/mode/markdown/markdown", "codemirror-javascript": "//cdnjs.cloudflare.com/ajax/libs/codemirror/3.21.0/mode/javascript/javascript", "codemirror-xml": "//cdnjs.cloudflare.com/ajax/libs/codemirror/3.21.0/mode/xml/xml", "codemirror-css": "//cdnjs.cloudflare.com/ajax/libs/codemirror/3.21.0/mode/css/css", "codemirror-htmlmixed": "//cdnjs.cloudflare.com/ajax/libs/codemirror/3.21.0/mode/htmlmixed/htmlmixed", "crossfilter": "//cdnjs.cloudflare.com/ajax/libs/crossfilter/1.3.11/crossfilter", "d3": "//cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3", "google-analytics": "//www.google-analytics.com/analytics.js", "gridster": "//cdnjs.cloudflare.com/ajax/libs/jquery.gridster/0.5.6/jquery.gridster", "holder": "//cdnjs.cloudflare.com/ajax/libs/holder/2.3.1/holder", "highlight": "//cdnjs.cloudflare.com/ajax/libs/highlight.js/8.3/highlight.min.js", "katex": "//cdnjs.cloudflare.com/ajax/libs/KaTeX/0.3.0/katex.min.js", "leaflet": "//cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.3/leaflet.js", "lodash": "//cdnjs.cloudflare.com/ajax/libs/lodash.js/2.4.1/lodash", "marked": "//cdnjs.cloudflare.com/ajax/libs/marked/0.3.2/marked", "mathjax": "//cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML", "moment": "//cdnjs.cloudflare.com/ajax/libs/moment.js/2.10.3/moment", "restangular": "//cdnjs.cloudflare.com/ajax/libs/restangular/1.4.0/restangular", "sockjs": "//cdnjs.cloudflare.com/ajax/libs/sockjs-client/0.3.4/sockjs.min.js", "stats": "//cdnjs.cloudflare.com/ajax/libs/stats.js/r11/Stats", "topojson": "//cdnjs.cloudflare.com/ajax/libs/topojson/1.6.19/topojson" }; } // Default shims function defaultShim () { return { angular: { exports: "angular" }, "angular-strap-tpl": { deps: ["angular", "angular-strap"] }, "google-analytics": { exports: root.GoogleAnalyticsObject || "ga" }, highlight: { exports: "hljs" }, lux: { deps: ["angular"] }, "ui-bootstrap": { deps: ["angular"] }, "codemirror": { exports: "CodeMirror" }, "codemirror-markdown": { deps: ["codemirror"] }, "codemirror-xml": { deps: ["codemirror"] }, "codemirror-javascript": { deps: ["codemirror"] }, "codemirror-css": { deps: ["codemirror"] }, "codemirror-htmlmixed": { deps: ["codemirror", "codemirror-xml", "codemirror-javascript", "codemirror-css"], }, restangular: { deps: ["angular"] }, crossfilter: { exports: "crossfilter" }, trianglify: { deps: ["d3"], exports: "Trianglify" }, mathjax: { exports: "MathJax" } }; } function newPaths (cfg) { var all = {}, min = minify() ? '.min' : '', prefix = root.local_require_prefix, paths = extend(defaultPaths(), cfg.paths); for(var name in paths) { if(paths.hasOwnProperty(name)) { var path = paths[name]; if (prefix && path.substring(0, prefix.length) === prefix) path = path.substring(prefix.length); if (!cfg.shim[name]) { // Add angular dependency if (name.substring(0, 8) === "angular-") cfg.shim[name] = { deps: ["angular"] }; else if (name.substring(0, 3) === "d3-") cfg.shim[name] = { deps: ["d3"] }; } if (typeof(path) !== 'string') { // Don't maanipulate it, live it as it is path = path.url; } else { var params = path.split('?'); if (params.length === 2) { path = params[0]; params = params[1]; } else params = ''; if (path.substring(path.length-3) !== end) path += min; if (params) { if (path.substring(path.length-3) !== end) path += end; path += '?' + params; } // Add protocol if (path.substring(0, 2) === '//' && protocol) path = protocol + path; if (path.substring(path.length-3) === end) path = path.substring(0, path.length-3); } all[name] = path; } } return all; } // require.config override lux.config = function (cfg) { if(!cfg.baseUrl) { var url = baseUrl(); if (url !== undefined) cfg.baseUrl = url; } cfg.shim = extend(defaultShim(), cfg.shim); cfg.paths = newPaths(cfg); require.config(cfg); }; }(this)); lux.config({}); require(['angular'], function (angular) { angular.module('twitter-example', ['templates-tweets']) .directive('twitter', ['$rootScope', '$log', function (root, log) { function connectSock(scope, url) { if (!root.websockets) root.websockets = {}; var hnd = root.websockets[url]; if (!hnd) root.websockets[url] = hnd = createSocket(url); return hnd; } function createSocket (url) { var sock = new WebSocket(url), listeners = []; sock.onopen = function() { log.info('New connection with ' + url); }; sock.onmessage = function (e) { var msg = angular.fromJson(e.data); msg.timestamp = +msg.timestamp; msg.url = 'https://twitter.com/' + msg.user.screen_name + '/status/' + msg.id_str; angular.forEach(listeners, function (listener) { listener(sock, msg); }); }; return { sock: sock, listeners: listeners }; } // Closure which handle incoming messages fro the server function tweetArrived (scope) { return function (sock, msg) { scope.messages.push(msg); scope.$apply(); }; } return { restrict: 'AE', templateUrl: 'tweets/templates/tweets.tpl.html', link: function (scope, element, attrs) { var options = attrs.twitter; if (options) options = angular.fromJson(options); scope.messages = []; if (options && options.url) { var hnd = connectSock(scope, options.url); hnd.listeners.push(tweetArrived(scope)); } else log.error('Twitter directive improperly configured, no url found'); } }; }]); angular.module('templates-tweets', ['tweets/templates/tweets.tpl.html']); angular.module("tweets/templates/tweets.tpl.html", []).run(["$templateCache", function($templateCache) { $templateCache.put("tweets/templates/tweets.tpl.html", "<div class=\"media\" ng-repeat=\"msg in messages | orderBy: ['-timestamp']\">\n" + " <div class=\"media-left\">\n" + " <a ng-href=\"{{ msg.url }}\">\n" + " <img class=\"media-object\" ng-src=\"{{msg.user.profile_image_url_https}}\"\n" + " alt=\"{{msg.user.name}}\" class=\"img-thumbnail\">\n" + " </a>\n" + " </div>\n" + " <div class=\"media-body\">\n" + " <p class='list-group-item-text message'>{{msg.text}}</p>\n" + " </div>\n" + "</div>\n" + ""); }]); // // Angular bootstrap angular.bootstrap(document, ['twitter-example']); });
dejlek/pulsar
examples/tweets/assets/tweets.js
JavaScript
bsd-3-clause
10,267
//===- X86AvoidStoreForwardingBlockis.cpp - Avoid HW Store Forward Block --===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // If a load follows a store and reloads data that the store has written to // memory, Intel microarchitectures can in many cases forward the data directly // from the store to the load, This "store forwarding" saves cycles by enabling // the load to directly obtain the data instead of accessing the data from // cache or memory. // A "store forward block" occurs in cases that a store cannot be forwarded to // the load. The most typical case of store forward block on Intel Core // microarchitecture that a small store cannot be forwarded to a large load. // The estimated penalty for a store forward block is ~13 cycles. // // This pass tries to recognize and handle cases where "store forward block" // is created by the compiler when lowering memcpy calls to a sequence // of a load and a store. // // The pass currently only handles cases where memcpy is lowered to // XMM/YMM registers, it tries to break the memcpy into smaller copies. // breaking the memcpy should be possible since there is no atomicity // guarantee for loads and stores to XMM/YMM. // // It could be better for performance to solve the problem by loading // to XMM/YMM then inserting the partial store before storing back from XMM/YMM // to memory, but this will result in a more conservative optimization since it // requires we prove that all memory accesses between the blocking store and the // load must alias/don't alias before we can move the store, whereas the // transformation done here is correct regardless to other memory accesses. //===----------------------------------------------------------------------===// #include "X86InstrInfo.h" #include "X86Subtarget.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/IR/DebugInfoMetadata.h" #include "llvm/IR/DebugLoc.h" #include "llvm/IR/Function.h" #include "llvm/InitializePasses.h" #include "llvm/MC/MCInstrDesc.h" using namespace llvm; #define DEBUG_TYPE "x86-avoid-SFB" static cl::opt<bool> DisableX86AvoidStoreForwardBlocks( "x86-disable-avoid-SFB", cl::Hidden, cl::desc("X86: Disable Store Forwarding Blocks fixup."), cl::init(false)); static cl::opt<unsigned> X86AvoidSFBInspectionLimit( "x86-sfb-inspection-limit", cl::desc("X86: Number of instructions backward to " "inspect for store forwarding blocks."), cl::init(20), cl::Hidden); namespace { using DisplacementSizeMap = std::map<int64_t, unsigned>; class X86AvoidSFBPass : public MachineFunctionPass { public: static char ID; X86AvoidSFBPass() : MachineFunctionPass(ID) { } StringRef getPassName() const override { return "X86 Avoid Store Forwarding Blocks"; } bool runOnMachineFunction(MachineFunction &MF) override; void getAnalysisUsage(AnalysisUsage &AU) const override { MachineFunctionPass::getAnalysisUsage(AU); AU.addRequired<AAResultsWrapperPass>(); } private: MachineRegisterInfo *MRI = nullptr; const X86InstrInfo *TII = nullptr; const X86RegisterInfo *TRI = nullptr; SmallVector<std::pair<MachineInstr *, MachineInstr *>, 2> BlockedLoadsStoresPairs; SmallVector<MachineInstr *, 2> ForRemoval; AliasAnalysis *AA = nullptr; /// Returns couples of Load then Store to memory which look /// like a memcpy. void findPotentiallylBlockedCopies(MachineFunction &MF); /// Break the memcpy's load and store into smaller copies /// such that each memory load that was blocked by a smaller store /// would now be copied separately. void breakBlockedCopies(MachineInstr *LoadInst, MachineInstr *StoreInst, const DisplacementSizeMap &BlockingStoresDispSizeMap); /// Break a copy of size Size to smaller copies. void buildCopies(int Size, MachineInstr *LoadInst, int64_t LdDispImm, MachineInstr *StoreInst, int64_t StDispImm, int64_t LMMOffset, int64_t SMMOffset); void buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode, int64_t LoadDisp, MachineInstr *StoreInst, unsigned NStoreOpcode, int64_t StoreDisp, unsigned Size, int64_t LMMOffset, int64_t SMMOffset); bool alias(const MachineMemOperand &Op1, const MachineMemOperand &Op2) const; unsigned getRegSizeInBytes(MachineInstr *Inst); }; } // end anonymous namespace char X86AvoidSFBPass::ID = 0; INITIALIZE_PASS_BEGIN(X86AvoidSFBPass, DEBUG_TYPE, "Machine code sinking", false, false) INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) INITIALIZE_PASS_END(X86AvoidSFBPass, DEBUG_TYPE, "Machine code sinking", false, false) FunctionPass *llvm::createX86AvoidStoreForwardingBlocks() { return new X86AvoidSFBPass(); } static bool isXMMLoadOpcode(unsigned Opcode) { return Opcode == X86::MOVUPSrm || Opcode == X86::MOVAPSrm || Opcode == X86::VMOVUPSrm || Opcode == X86::VMOVAPSrm || Opcode == X86::VMOVUPDrm || Opcode == X86::VMOVAPDrm || Opcode == X86::VMOVDQUrm || Opcode == X86::VMOVDQArm || Opcode == X86::VMOVUPSZ128rm || Opcode == X86::VMOVAPSZ128rm || Opcode == X86::VMOVUPDZ128rm || Opcode == X86::VMOVAPDZ128rm || Opcode == X86::VMOVDQU64Z128rm || Opcode == X86::VMOVDQA64Z128rm || Opcode == X86::VMOVDQU32Z128rm || Opcode == X86::VMOVDQA32Z128rm; } static bool isYMMLoadOpcode(unsigned Opcode) { return Opcode == X86::VMOVUPSYrm || Opcode == X86::VMOVAPSYrm || Opcode == X86::VMOVUPDYrm || Opcode == X86::VMOVAPDYrm || Opcode == X86::VMOVDQUYrm || Opcode == X86::VMOVDQAYrm || Opcode == X86::VMOVUPSZ256rm || Opcode == X86::VMOVAPSZ256rm || Opcode == X86::VMOVUPDZ256rm || Opcode == X86::VMOVAPDZ256rm || Opcode == X86::VMOVDQU64Z256rm || Opcode == X86::VMOVDQA64Z256rm || Opcode == X86::VMOVDQU32Z256rm || Opcode == X86::VMOVDQA32Z256rm; } static bool isPotentialBlockedMemCpyLd(unsigned Opcode) { return isXMMLoadOpcode(Opcode) || isYMMLoadOpcode(Opcode); } static bool isPotentialBlockedMemCpyPair(int LdOpcode, int StOpcode) { switch (LdOpcode) { case X86::MOVUPSrm: case X86::MOVAPSrm: return StOpcode == X86::MOVUPSmr || StOpcode == X86::MOVAPSmr; case X86::VMOVUPSrm: case X86::VMOVAPSrm: return StOpcode == X86::VMOVUPSmr || StOpcode == X86::VMOVAPSmr; case X86::VMOVUPDrm: case X86::VMOVAPDrm: return StOpcode == X86::VMOVUPDmr || StOpcode == X86::VMOVAPDmr; case X86::VMOVDQUrm: case X86::VMOVDQArm: return StOpcode == X86::VMOVDQUmr || StOpcode == X86::VMOVDQAmr; case X86::VMOVUPSZ128rm: case X86::VMOVAPSZ128rm: return StOpcode == X86::VMOVUPSZ128mr || StOpcode == X86::VMOVAPSZ128mr; case X86::VMOVUPDZ128rm: case X86::VMOVAPDZ128rm: return StOpcode == X86::VMOVUPDZ128mr || StOpcode == X86::VMOVAPDZ128mr; case X86::VMOVUPSYrm: case X86::VMOVAPSYrm: return StOpcode == X86::VMOVUPSYmr || StOpcode == X86::VMOVAPSYmr; case X86::VMOVUPDYrm: case X86::VMOVAPDYrm: return StOpcode == X86::VMOVUPDYmr || StOpcode == X86::VMOVAPDYmr; case X86::VMOVDQUYrm: case X86::VMOVDQAYrm: return StOpcode == X86::VMOVDQUYmr || StOpcode == X86::VMOVDQAYmr; case X86::VMOVUPSZ256rm: case X86::VMOVAPSZ256rm: return StOpcode == X86::VMOVUPSZ256mr || StOpcode == X86::VMOVAPSZ256mr; case X86::VMOVUPDZ256rm: case X86::VMOVAPDZ256rm: return StOpcode == X86::VMOVUPDZ256mr || StOpcode == X86::VMOVAPDZ256mr; case X86::VMOVDQU64Z128rm: case X86::VMOVDQA64Z128rm: return StOpcode == X86::VMOVDQU64Z128mr || StOpcode == X86::VMOVDQA64Z128mr; case X86::VMOVDQU32Z128rm: case X86::VMOVDQA32Z128rm: return StOpcode == X86::VMOVDQU32Z128mr || StOpcode == X86::VMOVDQA32Z128mr; case X86::VMOVDQU64Z256rm: case X86::VMOVDQA64Z256rm: return StOpcode == X86::VMOVDQU64Z256mr || StOpcode == X86::VMOVDQA64Z256mr; case X86::VMOVDQU32Z256rm: case X86::VMOVDQA32Z256rm: return StOpcode == X86::VMOVDQU32Z256mr || StOpcode == X86::VMOVDQA32Z256mr; default: return false; } } static bool isPotentialBlockingStoreInst(int Opcode, int LoadOpcode) { bool PBlock = false; PBlock |= Opcode == X86::MOV64mr || Opcode == X86::MOV64mi32 || Opcode == X86::MOV32mr || Opcode == X86::MOV32mi || Opcode == X86::MOV16mr || Opcode == X86::MOV16mi || Opcode == X86::MOV8mr || Opcode == X86::MOV8mi; if (isYMMLoadOpcode(LoadOpcode)) PBlock |= Opcode == X86::VMOVUPSmr || Opcode == X86::VMOVAPSmr || Opcode == X86::VMOVUPDmr || Opcode == X86::VMOVAPDmr || Opcode == X86::VMOVDQUmr || Opcode == X86::VMOVDQAmr || Opcode == X86::VMOVUPSZ128mr || Opcode == X86::VMOVAPSZ128mr || Opcode == X86::VMOVUPDZ128mr || Opcode == X86::VMOVAPDZ128mr || Opcode == X86::VMOVDQU64Z128mr || Opcode == X86::VMOVDQA64Z128mr || Opcode == X86::VMOVDQU32Z128mr || Opcode == X86::VMOVDQA32Z128mr; return PBlock; } static const int MOV128SZ = 16; static const int MOV64SZ = 8; static const int MOV32SZ = 4; static const int MOV16SZ = 2; static const int MOV8SZ = 1; static unsigned getYMMtoXMMLoadOpcode(unsigned LoadOpcode) { switch (LoadOpcode) { case X86::VMOVUPSYrm: case X86::VMOVAPSYrm: return X86::VMOVUPSrm; case X86::VMOVUPDYrm: case X86::VMOVAPDYrm: return X86::VMOVUPDrm; case X86::VMOVDQUYrm: case X86::VMOVDQAYrm: return X86::VMOVDQUrm; case X86::VMOVUPSZ256rm: case X86::VMOVAPSZ256rm: return X86::VMOVUPSZ128rm; case X86::VMOVUPDZ256rm: case X86::VMOVAPDZ256rm: return X86::VMOVUPDZ128rm; case X86::VMOVDQU64Z256rm: case X86::VMOVDQA64Z256rm: return X86::VMOVDQU64Z128rm; case X86::VMOVDQU32Z256rm: case X86::VMOVDQA32Z256rm: return X86::VMOVDQU32Z128rm; default: llvm_unreachable("Unexpected Load Instruction Opcode"); } return 0; } static unsigned getYMMtoXMMStoreOpcode(unsigned StoreOpcode) { switch (StoreOpcode) { case X86::VMOVUPSYmr: case X86::VMOVAPSYmr: return X86::VMOVUPSmr; case X86::VMOVUPDYmr: case X86::VMOVAPDYmr: return X86::VMOVUPDmr; case X86::VMOVDQUYmr: case X86::VMOVDQAYmr: return X86::VMOVDQUmr; case X86::VMOVUPSZ256mr: case X86::VMOVAPSZ256mr: return X86::VMOVUPSZ128mr; case X86::VMOVUPDZ256mr: case X86::VMOVAPDZ256mr: return X86::VMOVUPDZ128mr; case X86::VMOVDQU64Z256mr: case X86::VMOVDQA64Z256mr: return X86::VMOVDQU64Z128mr; case X86::VMOVDQU32Z256mr: case X86::VMOVDQA32Z256mr: return X86::VMOVDQU32Z128mr; default: llvm_unreachable("Unexpected Load Instruction Opcode"); } return 0; } static int getAddrOffset(MachineInstr *MI) { const MCInstrDesc &Descl = MI->getDesc(); int AddrOffset = X86II::getMemoryOperandNo(Descl.TSFlags); assert(AddrOffset != -1 && "Expected Memory Operand"); AddrOffset += X86II::getOperandBias(Descl); return AddrOffset; } static MachineOperand &getBaseOperand(MachineInstr *MI) { int AddrOffset = getAddrOffset(MI); return MI->getOperand(AddrOffset + X86::AddrBaseReg); } static MachineOperand &getDispOperand(MachineInstr *MI) { int AddrOffset = getAddrOffset(MI); return MI->getOperand(AddrOffset + X86::AddrDisp); } // Relevant addressing modes contain only base register and immediate // displacement or frameindex and immediate displacement. // TODO: Consider expanding to other addressing modes in the future static bool isRelevantAddressingMode(MachineInstr *MI) { int AddrOffset = getAddrOffset(MI); MachineOperand &Base = getBaseOperand(MI); MachineOperand &Disp = getDispOperand(MI); MachineOperand &Scale = MI->getOperand(AddrOffset + X86::AddrScaleAmt); MachineOperand &Index = MI->getOperand(AddrOffset + X86::AddrIndexReg); MachineOperand &Segment = MI->getOperand(AddrOffset + X86::AddrSegmentReg); if (!((Base.isReg() && Base.getReg() != X86::NoRegister) || Base.isFI())) return false; if (!Disp.isImm()) return false; if (Scale.getImm() != 1) return false; if (!(Index.isReg() && Index.getReg() == X86::NoRegister)) return false; if (!(Segment.isReg() && Segment.getReg() == X86::NoRegister)) return false; return true; } // Collect potentially blocking stores. // Limit the number of instructions backwards we want to inspect // since the effect of store block won't be visible if the store // and load instructions have enough instructions in between to // keep the core busy. static SmallVector<MachineInstr *, 2> findPotentialBlockers(MachineInstr *LoadInst) { SmallVector<MachineInstr *, 2> PotentialBlockers; unsigned BlockCount = 0; const unsigned InspectionLimit = X86AvoidSFBInspectionLimit; for (auto PBInst = std::next(MachineBasicBlock::reverse_iterator(LoadInst)), E = LoadInst->getParent()->rend(); PBInst != E; ++PBInst) { if (PBInst->isMetaInstruction()) continue; BlockCount++; if (BlockCount >= InspectionLimit) break; MachineInstr &MI = *PBInst; if (MI.getDesc().isCall()) return PotentialBlockers; PotentialBlockers.push_back(&MI); } // If we didn't get to the instructions limit try predecessing blocks. // Ideally we should traverse the predecessor blocks in depth with some // coloring algorithm, but for now let's just look at the first order // predecessors. if (BlockCount < InspectionLimit) { MachineBasicBlock *MBB = LoadInst->getParent(); int LimitLeft = InspectionLimit - BlockCount; for (MachineBasicBlock::pred_iterator PB = MBB->pred_begin(), PE = MBB->pred_end(); PB != PE; ++PB) { MachineBasicBlock *PMBB = *PB; int PredCount = 0; for (MachineBasicBlock::reverse_iterator PBInst = PMBB->rbegin(), PME = PMBB->rend(); PBInst != PME; ++PBInst) { if (PBInst->isMetaInstruction()) continue; PredCount++; if (PredCount >= LimitLeft) break; if (PBInst->getDesc().isCall()) break; PotentialBlockers.push_back(&*PBInst); } } } return PotentialBlockers; } void X86AvoidSFBPass::buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode, int64_t LoadDisp, MachineInstr *StoreInst, unsigned NStoreOpcode, int64_t StoreDisp, unsigned Size, int64_t LMMOffset, int64_t SMMOffset) { MachineOperand &LoadBase = getBaseOperand(LoadInst); MachineOperand &StoreBase = getBaseOperand(StoreInst); MachineBasicBlock *MBB = LoadInst->getParent(); MachineMemOperand *LMMO = *LoadInst->memoperands_begin(); MachineMemOperand *SMMO = *StoreInst->memoperands_begin(); Register Reg1 = MRI->createVirtualRegister( TII->getRegClass(TII->get(NLoadOpcode), 0, TRI, *(MBB->getParent()))); MachineInstr *NewLoad = BuildMI(*MBB, LoadInst, LoadInst->getDebugLoc(), TII->get(NLoadOpcode), Reg1) .add(LoadBase) .addImm(1) .addReg(X86::NoRegister) .addImm(LoadDisp) .addReg(X86::NoRegister) .addMemOperand( MBB->getParent()->getMachineMemOperand(LMMO, LMMOffset, Size)); if (LoadBase.isReg()) getBaseOperand(NewLoad).setIsKill(false); LLVM_DEBUG(NewLoad->dump()); // If the load and store are consecutive, use the loadInst location to // reduce register pressure. MachineInstr *StInst = StoreInst; auto PrevInstrIt = skipDebugInstructionsBackward( std::prev(MachineBasicBlock::instr_iterator(StoreInst)), MBB->instr_begin()); if (PrevInstrIt.getNodePtr() == LoadInst) StInst = LoadInst; MachineInstr *NewStore = BuildMI(*MBB, StInst, StInst->getDebugLoc(), TII->get(NStoreOpcode)) .add(StoreBase) .addImm(1) .addReg(X86::NoRegister) .addImm(StoreDisp) .addReg(X86::NoRegister) .addReg(Reg1) .addMemOperand( MBB->getParent()->getMachineMemOperand(SMMO, SMMOffset, Size)); if (StoreBase.isReg()) getBaseOperand(NewStore).setIsKill(false); MachineOperand &StoreSrcVReg = StoreInst->getOperand(X86::AddrNumOperands); assert(StoreSrcVReg.isReg() && "Expected virtual register"); NewStore->getOperand(X86::AddrNumOperands).setIsKill(StoreSrcVReg.isKill()); LLVM_DEBUG(NewStore->dump()); } void X86AvoidSFBPass::buildCopies(int Size, MachineInstr *LoadInst, int64_t LdDispImm, MachineInstr *StoreInst, int64_t StDispImm, int64_t LMMOffset, int64_t SMMOffset) { int LdDisp = LdDispImm; int StDisp = StDispImm; while (Size > 0) { if ((Size - MOV128SZ >= 0) && isYMMLoadOpcode(LoadInst->getOpcode())) { Size = Size - MOV128SZ; buildCopy(LoadInst, getYMMtoXMMLoadOpcode(LoadInst->getOpcode()), LdDisp, StoreInst, getYMMtoXMMStoreOpcode(StoreInst->getOpcode()), StDisp, MOV128SZ, LMMOffset, SMMOffset); LdDisp += MOV128SZ; StDisp += MOV128SZ; LMMOffset += MOV128SZ; SMMOffset += MOV128SZ; continue; } if (Size - MOV64SZ >= 0) { Size = Size - MOV64SZ; buildCopy(LoadInst, X86::MOV64rm, LdDisp, StoreInst, X86::MOV64mr, StDisp, MOV64SZ, LMMOffset, SMMOffset); LdDisp += MOV64SZ; StDisp += MOV64SZ; LMMOffset += MOV64SZ; SMMOffset += MOV64SZ; continue; } if (Size - MOV32SZ >= 0) { Size = Size - MOV32SZ; buildCopy(LoadInst, X86::MOV32rm, LdDisp, StoreInst, X86::MOV32mr, StDisp, MOV32SZ, LMMOffset, SMMOffset); LdDisp += MOV32SZ; StDisp += MOV32SZ; LMMOffset += MOV32SZ; SMMOffset += MOV32SZ; continue; } if (Size - MOV16SZ >= 0) { Size = Size - MOV16SZ; buildCopy(LoadInst, X86::MOV16rm, LdDisp, StoreInst, X86::MOV16mr, StDisp, MOV16SZ, LMMOffset, SMMOffset); LdDisp += MOV16SZ; StDisp += MOV16SZ; LMMOffset += MOV16SZ; SMMOffset += MOV16SZ; continue; } if (Size - MOV8SZ >= 0) { Size = Size - MOV8SZ; buildCopy(LoadInst, X86::MOV8rm, LdDisp, StoreInst, X86::MOV8mr, StDisp, MOV8SZ, LMMOffset, SMMOffset); LdDisp += MOV8SZ; StDisp += MOV8SZ; LMMOffset += MOV8SZ; SMMOffset += MOV8SZ; continue; } } assert(Size == 0 && "Wrong size division"); } static void updateKillStatus(MachineInstr *LoadInst, MachineInstr *StoreInst) { MachineOperand &LoadBase = getBaseOperand(LoadInst); MachineOperand &StoreBase = getBaseOperand(StoreInst); auto StorePrevNonDbgInstr = skipDebugInstructionsBackward( std::prev(MachineBasicBlock::instr_iterator(StoreInst)), LoadInst->getParent()->instr_begin()).getNodePtr(); if (LoadBase.isReg()) { MachineInstr *LastLoad = LoadInst->getPrevNode(); // If the original load and store to xmm/ymm were consecutive // then the partial copies were also created in // a consecutive order to reduce register pressure, // and the location of the last load is before the last store. if (StorePrevNonDbgInstr == LoadInst) LastLoad = LoadInst->getPrevNode()->getPrevNode(); getBaseOperand(LastLoad).setIsKill(LoadBase.isKill()); } if (StoreBase.isReg()) { MachineInstr *StInst = StoreInst; if (StorePrevNonDbgInstr == LoadInst) StInst = LoadInst; getBaseOperand(StInst->getPrevNode()).setIsKill(StoreBase.isKill()); } } bool X86AvoidSFBPass::alias(const MachineMemOperand &Op1, const MachineMemOperand &Op2) const { if (!Op1.getValue() || !Op2.getValue()) return true; int64_t MinOffset = std::min(Op1.getOffset(), Op2.getOffset()); int64_t Overlapa = Op1.getSize() + Op1.getOffset() - MinOffset; int64_t Overlapb = Op2.getSize() + Op2.getOffset() - MinOffset; AliasResult AAResult = AA->alias(MemoryLocation(Op1.getValue(), Overlapa, Op1.getAAInfo()), MemoryLocation(Op2.getValue(), Overlapb, Op2.getAAInfo())); return AAResult != NoAlias; } void X86AvoidSFBPass::findPotentiallylBlockedCopies(MachineFunction &MF) { for (auto &MBB : MF) for (auto &MI : MBB) { if (!isPotentialBlockedMemCpyLd(MI.getOpcode())) continue; int DefVR = MI.getOperand(0).getReg(); if (!MRI->hasOneNonDBGUse(DefVR)) continue; for (auto UI = MRI->use_nodbg_begin(DefVR), UE = MRI->use_nodbg_end(); UI != UE;) { MachineOperand &StoreMO = *UI++; MachineInstr &StoreMI = *StoreMO.getParent(); // Skip cases where the memcpy may overlap. if (StoreMI.getParent() == MI.getParent() && isPotentialBlockedMemCpyPair(MI.getOpcode(), StoreMI.getOpcode()) && isRelevantAddressingMode(&MI) && isRelevantAddressingMode(&StoreMI)) { assert(MI.hasOneMemOperand() && "Expected one memory operand for load instruction"); assert(StoreMI.hasOneMemOperand() && "Expected one memory operand for store instruction"); if (!alias(**MI.memoperands_begin(), **StoreMI.memoperands_begin())) BlockedLoadsStoresPairs.push_back(std::make_pair(&MI, &StoreMI)); } } } } unsigned X86AvoidSFBPass::getRegSizeInBytes(MachineInstr *LoadInst) { auto TRC = TII->getRegClass(TII->get(LoadInst->getOpcode()), 0, TRI, *LoadInst->getParent()->getParent()); return TRI->getRegSizeInBits(*TRC) / 8; } void X86AvoidSFBPass::breakBlockedCopies( MachineInstr *LoadInst, MachineInstr *StoreInst, const DisplacementSizeMap &BlockingStoresDispSizeMap) { int64_t LdDispImm = getDispOperand(LoadInst).getImm(); int64_t StDispImm = getDispOperand(StoreInst).getImm(); int64_t LMMOffset = 0; int64_t SMMOffset = 0; int64_t LdDisp1 = LdDispImm; int64_t LdDisp2 = 0; int64_t StDisp1 = StDispImm; int64_t StDisp2 = 0; unsigned Size1 = 0; unsigned Size2 = 0; int64_t LdStDelta = StDispImm - LdDispImm; for (auto DispSizePair : BlockingStoresDispSizeMap) { LdDisp2 = DispSizePair.first; StDisp2 = DispSizePair.first + LdStDelta; Size2 = DispSizePair.second; // Avoid copying overlapping areas. if (LdDisp2 < LdDisp1) { int OverlapDelta = LdDisp1 - LdDisp2; LdDisp2 += OverlapDelta; StDisp2 += OverlapDelta; Size2 -= OverlapDelta; } Size1 = LdDisp2 - LdDisp1; // Build a copy for the point until the current blocking store's // displacement. buildCopies(Size1, LoadInst, LdDisp1, StoreInst, StDisp1, LMMOffset, SMMOffset); // Build a copy for the current blocking store. buildCopies(Size2, LoadInst, LdDisp2, StoreInst, StDisp2, LMMOffset + Size1, SMMOffset + Size1); LdDisp1 = LdDisp2 + Size2; StDisp1 = StDisp2 + Size2; LMMOffset += Size1 + Size2; SMMOffset += Size1 + Size2; } unsigned Size3 = (LdDispImm + getRegSizeInBytes(LoadInst)) - LdDisp1; buildCopies(Size3, LoadInst, LdDisp1, StoreInst, StDisp1, LMMOffset, LMMOffset); } static bool hasSameBaseOpValue(MachineInstr *LoadInst, MachineInstr *StoreInst) { MachineOperand &LoadBase = getBaseOperand(LoadInst); MachineOperand &StoreBase = getBaseOperand(StoreInst); if (LoadBase.isReg() != StoreBase.isReg()) return false; if (LoadBase.isReg()) return LoadBase.getReg() == StoreBase.getReg(); return LoadBase.getIndex() == StoreBase.getIndex(); } static bool isBlockingStore(int64_t LoadDispImm, unsigned LoadSize, int64_t StoreDispImm, unsigned StoreSize) { return ((StoreDispImm >= LoadDispImm) && (StoreDispImm <= LoadDispImm + (LoadSize - StoreSize))); } // Keep track of all stores blocking a load static void updateBlockingStoresDispSizeMap(DisplacementSizeMap &BlockingStoresDispSizeMap, int64_t DispImm, unsigned Size) { if (BlockingStoresDispSizeMap.count(DispImm)) { // Choose the smallest blocking store starting at this displacement. if (BlockingStoresDispSizeMap[DispImm] > Size) BlockingStoresDispSizeMap[DispImm] = Size; } else BlockingStoresDispSizeMap[DispImm] = Size; } // Remove blocking stores contained in each other. static void removeRedundantBlockingStores(DisplacementSizeMap &BlockingStoresDispSizeMap) { if (BlockingStoresDispSizeMap.size() <= 1) return; SmallVector<std::pair<int64_t, unsigned>, 0> DispSizeStack; for (auto DispSizePair : BlockingStoresDispSizeMap) { int64_t CurrDisp = DispSizePair.first; unsigned CurrSize = DispSizePair.second; while (DispSizeStack.size()) { int64_t PrevDisp = DispSizeStack.back().first; unsigned PrevSize = DispSizeStack.back().second; if (CurrDisp + CurrSize > PrevDisp + PrevSize) break; DispSizeStack.pop_back(); } DispSizeStack.push_back(DispSizePair); } BlockingStoresDispSizeMap.clear(); for (auto Disp : DispSizeStack) BlockingStoresDispSizeMap.insert(Disp); } bool X86AvoidSFBPass::runOnMachineFunction(MachineFunction &MF) { bool Changed = false; if (DisableX86AvoidStoreForwardBlocks || skipFunction(MF.getFunction()) || !MF.getSubtarget<X86Subtarget>().is64Bit()) return false; MRI = &MF.getRegInfo(); assert(MRI->isSSA() && "Expected MIR to be in SSA form"); TII = MF.getSubtarget<X86Subtarget>().getInstrInfo(); TRI = MF.getSubtarget<X86Subtarget>().getRegisterInfo(); AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); LLVM_DEBUG(dbgs() << "Start X86AvoidStoreForwardBlocks\n";); // Look for a load then a store to XMM/YMM which look like a memcpy findPotentiallylBlockedCopies(MF); for (auto LoadStoreInstPair : BlockedLoadsStoresPairs) { MachineInstr *LoadInst = LoadStoreInstPair.first; int64_t LdDispImm = getDispOperand(LoadInst).getImm(); DisplacementSizeMap BlockingStoresDispSizeMap; SmallVector<MachineInstr *, 2> PotentialBlockers = findPotentialBlockers(LoadInst); for (auto PBInst : PotentialBlockers) { if (!isPotentialBlockingStoreInst(PBInst->getOpcode(), LoadInst->getOpcode()) || !isRelevantAddressingMode(PBInst)) continue; int64_t PBstDispImm = getDispOperand(PBInst).getImm(); assert(PBInst->hasOneMemOperand() && "Expected One Memory Operand"); unsigned PBstSize = (*PBInst->memoperands_begin())->getSize(); // This check doesn't cover all cases, but it will suffice for now. // TODO: take branch probability into consideration, if the blocking // store is in an unreached block, breaking the memcopy could lose // performance. if (hasSameBaseOpValue(LoadInst, PBInst) && isBlockingStore(LdDispImm, getRegSizeInBytes(LoadInst), PBstDispImm, PBstSize)) updateBlockingStoresDispSizeMap(BlockingStoresDispSizeMap, PBstDispImm, PBstSize); } if (BlockingStoresDispSizeMap.empty()) continue; // We found a store forward block, break the memcpy's load and store // into smaller copies such that each smaller store that was causing // a store block would now be copied separately. MachineInstr *StoreInst = LoadStoreInstPair.second; LLVM_DEBUG(dbgs() << "Blocked load and store instructions: \n"); LLVM_DEBUG(LoadInst->dump()); LLVM_DEBUG(StoreInst->dump()); LLVM_DEBUG(dbgs() << "Replaced with:\n"); removeRedundantBlockingStores(BlockingStoresDispSizeMap); breakBlockedCopies(LoadInst, StoreInst, BlockingStoresDispSizeMap); updateKillStatus(LoadInst, StoreInst); ForRemoval.push_back(LoadInst); ForRemoval.push_back(StoreInst); } for (auto RemovedInst : ForRemoval) { RemovedInst->eraseFromParent(); } ForRemoval.clear(); BlockedLoadsStoresPairs.clear(); LLVM_DEBUG(dbgs() << "End X86AvoidStoreForwardBlocks\n";); return Changed; }
endlessm/chromium-browser
third_party/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
C++
bsd-3-clause
28,773
/** * Copyright (C) 2016 Turi * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD license. See the LICENSE file for details. */ /* * Copyright (c) 2009 Carnegie Mellon University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an "AS * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language * governing permissions and limitations under the License. * * For more about this software visit: * * http://www.graphlab.ml.cmu.edu * */ #ifndef GRAPHLAB_RPC_CIRCULAR_IOVEC_BUFFER_HPP #define GRAPHLAB_RPC_CIRCULAR_IOVEC_BUFFER_HPP #include <vector> #include <sys/socket.h> namespace graphlab{ namespace dc_impl { /** * \ingroup rpc * \internal * A circular buffer which maintains a parallel sequence of iovecs. * One sequence is basic iovecs * The other sequence is used for storing the original unomidifed pointers * This is minimally checked. length must be a power of 2 */ struct circular_iovec_buffer { inline circular_iovec_buffer(size_t len = 4096) { v.resize(4096); parallel_v.resize(4096); head = 0; tail = 0; numel = 0; } inline bool empty() const { return numel == 0; } size_t size() const { return numel; } void reserve(size_t _n) { if (_n <= v.size()) return; size_t originalsize = v.size(); size_t n = v.size(); // must be a power of 2 while (n <= _n) n *= 2; v.resize(n); parallel_v.resize(n); if (head >= tail && numel > 0) { // there is a loop around // we need to fix the shift size_t newtail = originalsize; for (size_t i = 0;i < tail; ++i) { v[newtail] = v[i]; parallel_v[newtail] = parallel_v[i]; ++newtail; } tail = newtail; } } inline void write(const std::vector<iovec>& other, size_t nwrite) { reserve(numel + nwrite); for (size_t i = 0;i < nwrite; ++i) { v[tail] = other[i]; parallel_v[tail] = other[i]; tail = (tail + 1) & (v.size() - 1); } numel += nwrite; } /** * Writes an entry into the buffer, resizing the buffer if necessary. * This buffer will take over all iovec pointers and free them when done */ inline void write(const iovec &entry) { if (numel == v.size()) { reserve(2 * numel); } v[tail] = entry; parallel_v[tail] = entry; tail = (tail + 1) & (v.size() - 1); ++numel; } /** * Writes an entry into the buffer, resizing the buffer if necessary. * This buffer will take over all iovec pointers and free them when done. * This version of write allows the iovec that is sent to be different from the * iovec that is freed. (for instance, what is sent could be subarray of * what is to be freed. */ inline void write(const iovec &entry, const iovec& actual_ptr_entry) { if (numel == v.size()) { reserve(2 * numel); } v[tail] = actual_ptr_entry; parallel_v[tail] = entry; tail = (tail + 1) & (v.size() - 1); ++numel; } /** * Erases a single iovec from the head and free the pointer */ inline void erase_from_head_and_free() { free(v[head].iov_base); head = (head + 1) & (v.size() - 1); --numel; } /** * Fills a msghdr for unsent data. */ void fill_msghdr(struct msghdr& data) { data.msg_iov = &(parallel_v[head]); if (head < tail) { data.msg_iovlen = tail - head; } else { data.msg_iovlen = v.size() - head; } data.msg_iovlen = std::min<size_t>(IOV_MAX, data.msg_iovlen); } /** * Advances the head as if some amount of data was sent. */ void sent(size_t len) { while(len > 0) { size_t curv_sent_len = std::min(len, parallel_v[head].iov_len); parallel_v[head].iov_len -= curv_sent_len; parallel_v[head].iov_base = (char*)(parallel_v[head].iov_base) + curv_sent_len; len -= curv_sent_len; if (parallel_v[head].iov_len == 0) { erase_from_head_and_free(); } } } std::vector<struct iovec> v; std::vector<struct iovec> parallel_v; size_t head; size_t tail; size_t numel; }; } } #endif
TobyRoseman/SFrame
oss_src/rpc/circular_iovec_buffer.hpp
C++
bsd-3-clause
4,545
<?php // Name: /alert/confirm/index.php // Author: Richard Allan richard@sheffieldhallam.org.uk // Version: 0.5 beta // Date: 6th Jan 2005 // Description: This file contains ALERT class. // This the page users come to when they click the link in their // confirmation email after joining the site. // What happens? They will come here with t=23-adsf7897fd78d9sfsd200501021500 // where the value of 't' is a form of their registration token. // This token is a salted version of their email address concatenated // with the time the alert was created. // We check this exists in the database and if so we run the confirm // function of class ALERT to set the field confirmed in the table // alerts to true. // We then print a nice welcome message. // This depends on there being page definitions in metadata.php // FUNCTIONS // confirm_success() Displays a page with a success confirmation message // confirm_error() Displays a page with an error message // INITIALISATION include_once "../../../includes/easyparliament/init.php"; include_once "../../../includes/easyparliament/member.php"; include_once INCLUDESPATH . '../../../phplib/crosssell.php'; // Instantiate an instance of ALERT $ALERT = new ALERT; $success = $ALERT->confirm( get_http_var('t') ); if ($success) { confirm_success($ALERT); } else { confirm_error(); } // FUNCTION: confirm_success function confirm_success ($ALERT) { global $PAGE, $this_page, $THEUSER; $this_page = 'alertconfirmsucceeded'; $criteria = $ALERT->criteria_pretty(true); $email = $ALERT->email(); $extra = null; $PAGE->page_start(); $PAGE->stripe_start(); ?> <p>Your alert has been confirmed.</p> <p>You will now receive email alerts for the following criteria:</p> <ul><?=$criteria?></ul> <p>This is normally the day after, but could conceivably be later due to issues at our or aph.gov.au's end.</p> <?php $extra = alert_confirmation_advert(array('email'=>$email, 'pid'=>strstr($ALERT->criteria(),'speaker:'))); if ($extra) $extra = "advert=$extra"; $PAGE->stripe_end(); $PAGE->page_end($extra); } // FUNCTION: confirm_error function confirm_error() { // Friendly error, not a normal one! global $PAGE, $this_page; $this_page = 'alertconfirmfailed'; $PAGE->page_start(); $PAGE->stripe_start(); ?> <p>The link you followed to reach this page appears to be incomplete.</p> <p>If you clicked a link in your confirmation email you may need to manually copy and paste the entire link to the 'Location' bar of the web browser and try again.</p> <p>If you still get this message, please do <a href="mailto:<?php echo CONTACTEMAIL; ?>">email us</a> and let us know, and we'll help out!</p> <?php $PAGE->stripe_end(); $PAGE->page_end(); } ?>
NathanaelB/twfy
www/docs/alert/confirm/index.php
PHP
bsd-3-clause
2,728
//===-- AMDGPUMachineFunctionInfo.cpp ---------------------------------------=// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "AMDGPUMachineFunction.h" #include "AMDGPUSubtarget.h" #include "AMDGPUPerfHintAnalysis.h" #include "llvm/CodeGen/MachineModuleInfo.h" using namespace llvm; AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) : MachineFunctionInfo(), LocalMemoryObjects(), ExplicitKernArgSize(0), LDSSize(0), Mode(MF.getFunction(), MF.getSubtarget<GCNSubtarget>()), IsEntryFunction(AMDGPU::isEntryFunctionCC(MF.getFunction().getCallingConv())), NoSignedZerosFPMath(MF.getTarget().Options.NoSignedZerosFPMath), MemoryBound(false), WaveLimiter(false) { const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF); // FIXME: Should initialize KernArgSize based on ExplicitKernelArgOffset, // except reserved size is not correctly aligned. const Function &F = MF.getFunction(); Attribute MemBoundAttr = F.getFnAttribute("amdgpu-memory-bound"); MemoryBound = MemBoundAttr.isStringAttribute() && MemBoundAttr.getValueAsString() == "true"; Attribute WaveLimitAttr = F.getFnAttribute("amdgpu-wave-limiter"); WaveLimiter = WaveLimitAttr.isStringAttribute() && WaveLimitAttr.getValueAsString() == "true"; CallingConv::ID CC = F.getCallingConv(); if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL) ExplicitKernArgSize = ST.getExplicitKernArgSize(F, MaxKernArgAlign); } unsigned AMDGPUMachineFunction::allocateLDSGlobal(const DataLayout &DL, const GlobalValue &GV) { auto Entry = LocalMemoryObjects.insert(std::make_pair(&GV, 0)); if (!Entry.second) return Entry.first->second; unsigned Align = GV.getAlignment(); if (Align == 0) Align = DL.getABITypeAlignment(GV.getValueType()); /// TODO: We should sort these to minimize wasted space due to alignment /// padding. Currently the padding is decided by the first encountered use /// during lowering. unsigned Offset = LDSSize = alignTo(LDSSize, Align); Entry.first->second = Offset; LDSSize += DL.getTypeAllocSize(GV.getValueType()); return Offset; }
endlessm/chromium-browser
third_party/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
C++
bsd-3-clause
2,450
// Copyright 2014 Google Inc. All rights reserved. // // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file or at // https://developers.google.com/open-source/licenses/bsd package com.google.u2f.key.messages; import java.util.Arrays; public class AuthenticateResponse extends U2FResponse { private final byte userPresence; private final int counter; private final byte[] signature; public AuthenticateResponse(byte userPresence, int counter, byte[] signature) { super(); this.userPresence = userPresence; this.counter = counter; this.signature = signature; } /** * Bit 0 is set to 1, which means that user presence was verified. (This * version of the protocol doesn't specify a way to request authentication * responses without requiring user presence.) A different value of Bit 0, as * well as Bits 1 through 7, are reserved for future use. The values of Bit 1 * through 7 SHOULD be 0 */ public byte getUserPresence() { return userPresence; } /** * This is the big-endian representation of a counter value that the U2F token * increments every time it performs an authentication operation. */ public int getCounter() { return counter; } /** This is a ECDSA signature (on P-256) */ public byte[] getSignature() { return signature; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + counter; result = prime * result + Arrays.hashCode(signature); result = prime * result + userPresence; return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; AuthenticateResponse other = (AuthenticateResponse) obj; if (counter != other.counter) return false; if (!Arrays.equals(signature, other.signature)) return false; if (userPresence != other.userPresence) return false; return true; } }
jshufelt/u2f-ref-code
u2f-ref-code/java/src/com/google/u2f/key/messages/AuthenticateResponse.java
Java
bsd-3-clause
2,086
#!/usr/bin/python #---------------------------------------------------------------------- # This module is designed to live inside the "lldb" python package # in the "lldb.macosx" package. To use this in the embedded python # interpreter using "lldb" just import it: # # (lldb) script import lldb.macosx.heap #---------------------------------------------------------------------- from __future__ import print_function import lldb import optparse import os import os.path import re import shlex import string import sys import tempfile import lldb.utils.symbolication g_libheap_dylib_dir = None g_libheap_dylib_dict = dict() def get_iterate_memory_expr( options, process, user_init_code, user_return_code): expr = ''' typedef unsigned natural_t; typedef uintptr_t vm_size_t; typedef uintptr_t vm_address_t; typedef natural_t task_t; typedef int kern_return_t; #define KERN_SUCCESS 0 typedef void (*range_callback_t)(task_t task, void *baton, unsigned type, uintptr_t ptr_addr, uintptr_t ptr_size); ''' if options.search_vm_regions: expr += ''' typedef int vm_prot_t; typedef unsigned int vm_inherit_t; typedef unsigned long long memory_object_offset_t; typedef unsigned int boolean_t; typedef int vm_behavior_t; typedef uint32_t vm32_object_id_t; typedef natural_t mach_msg_type_number_t; typedef uint64_t mach_vm_address_t; typedef uint64_t mach_vm_offset_t; typedef uint64_t mach_vm_size_t; typedef uint64_t vm_map_offset_t; typedef uint64_t vm_map_address_t; typedef uint64_t vm_map_size_t; #define VM_PROT_NONE ((vm_prot_t) 0x00) #define VM_PROT_READ ((vm_prot_t) 0x01) #define VM_PROT_WRITE ((vm_prot_t) 0x02) #define VM_PROT_EXECUTE ((vm_prot_t) 0x04) typedef struct vm_region_submap_short_info_data_64_t { vm_prot_t protection; vm_prot_t max_protection; vm_inherit_t inheritance; memory_object_offset_t offset; // offset into object/map unsigned int user_tag; // user tag on map entry unsigned int ref_count; // obj/map mappers, etc unsigned short shadow_depth; // only for obj unsigned char external_pager; // only for obj unsigned char share_mode; // see enumeration boolean_t is_submap; // submap vs obj vm_behavior_t behavior; // access behavior hint vm32_object_id_t object_id; // obj/map name, not a handle unsigned short user_wired_count; } vm_region_submap_short_info_data_64_t; #define VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 ((mach_msg_type_number_t)(sizeof(vm_region_submap_short_info_data_64_t)/sizeof(int)))''' if user_init_code: expr += user_init_code expr += ''' task_t task = (task_t)mach_task_self(); mach_vm_address_t vm_region_base_addr; mach_vm_size_t vm_region_size; natural_t vm_region_depth; vm_region_submap_short_info_data_64_t vm_region_info; kern_return_t err; for (vm_region_base_addr = 0, vm_region_size = 1; vm_region_size != 0; vm_region_base_addr += vm_region_size) { mach_msg_type_number_t vm_region_info_size = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; err = (kern_return_t)mach_vm_region_recurse (task, &vm_region_base_addr, &vm_region_size, &vm_region_depth, &vm_region_info, &vm_region_info_size); if (err) break; // Check all read + write regions. This will cover the thread stacks // and any regions of memory like __DATA segments, that might contain // data we are looking for if (vm_region_info.protection & VM_PROT_WRITE && vm_region_info.protection & VM_PROT_READ) { baton.callback (task, &baton, 64, vm_region_base_addr, vm_region_size); } }''' else: if options.search_stack: expr += get_thread_stack_ranges_struct(process) if options.search_segments: expr += get_sections_ranges_struct(process) if user_init_code: expr += user_init_code if options.search_heap: expr += ''' #define MALLOC_PTR_IN_USE_RANGE_TYPE 1 typedef struct vm_range_t { vm_address_t address; vm_size_t size; } vm_range_t; typedef kern_return_t (*memory_reader_t)(task_t task, vm_address_t remote_address, vm_size_t size, void **local_memory); typedef void (*vm_range_recorder_t)(task_t task, void *baton, unsigned type, vm_range_t *range, unsigned size); typedef struct malloc_introspection_t { kern_return_t (*enumerator)(task_t task, void *, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder); /* enumerates all the malloc pointers in use */ } malloc_introspection_t; typedef struct malloc_zone_t { void *reserved1[12]; struct malloc_introspection_t *introspect; } malloc_zone_t; memory_reader_t task_peek = [](task_t task, vm_address_t remote_address, vm_size_t size, void **local_memory) -> kern_return_t { *local_memory = (void*) remote_address; return KERN_SUCCESS; }; vm_address_t *zones = 0; unsigned int num_zones = 0;task_t task = 0; kern_return_t err = (kern_return_t)malloc_get_all_zones (task, task_peek, &zones, &num_zones); if (KERN_SUCCESS == err) { for (unsigned int i=0; i<num_zones; ++i) { const malloc_zone_t *zone = (const malloc_zone_t *)zones[i]; if (zone && zone->introspect) zone->introspect->enumerator (task, &baton, MALLOC_PTR_IN_USE_RANGE_TYPE, (vm_address_t)zone, task_peek, [] (task_t task, void *baton, unsigned type, vm_range_t *ranges, unsigned size) -> void { range_callback_t callback = ((callback_baton_t *)baton)->callback; for (unsigned i=0; i<size; ++i) { callback (task, baton, type, ranges[i].address, ranges[i].size); } }); } }''' if options.search_stack: expr += ''' #ifdef NUM_STACKS // Call the callback for the thread stack ranges for (uint32_t i=0; i<NUM_STACKS; ++i) { range_callback(task, &baton, 8, stacks[i].base, stacks[i].size); if (STACK_RED_ZONE_SIZE > 0) { range_callback(task, &baton, 16, stacks[i].base - STACK_RED_ZONE_SIZE, STACK_RED_ZONE_SIZE); } } #endif''' if options.search_segments: expr += ''' #ifdef NUM_SEGMENTS // Call the callback for all segments for (uint32_t i=0; i<NUM_SEGMENTS; ++i) range_callback(task, &baton, 32, segments[i].base, segments[i].size); #endif''' if user_return_code: expr += "\n%s" % (user_return_code,) return expr def get_member_types_for_offset(value_type, offset, member_list): member = value_type.GetFieldAtIndex(0) search_bases = False if member: if member.GetOffsetInBytes() <= offset: for field_idx in range(value_type.GetNumberOfFields()): member = value_type.GetFieldAtIndex(field_idx) member_byte_offset = member.GetOffsetInBytes() member_end_byte_offset = member_byte_offset + member.type.size if member_byte_offset <= offset and offset < member_end_byte_offset: member_list.append(member) get_member_types_for_offset( member.type, offset - member_byte_offset, member_list) return else: search_bases = True else: search_bases = True if search_bases: for field_idx in range(value_type.GetNumberOfDirectBaseClasses()): member = value_type.GetDirectBaseClassAtIndex(field_idx) member_byte_offset = member.GetOffsetInBytes() member_end_byte_offset = member_byte_offset + member.type.size if member_byte_offset <= offset and offset < member_end_byte_offset: member_list.append(member) get_member_types_for_offset( member.type, offset - member_byte_offset, member_list) return for field_idx in range(value_type.GetNumberOfVirtualBaseClasses()): member = value_type.GetVirtualBaseClassAtIndex(field_idx) member_byte_offset = member.GetOffsetInBytes() member_end_byte_offset = member_byte_offset + member.type.size if member_byte_offset <= offset and offset < member_end_byte_offset: member_list.append(member) get_member_types_for_offset( member.type, offset - member_byte_offset, member_list) return def append_regex_callback(option, opt, value, parser): try: ivar_regex = re.compile(value) parser.values.ivar_regex_blacklist.append(ivar_regex) except: print('error: an exception was thrown when compiling the ivar regular expression for "%s"' % value) def add_common_options(parser): parser.add_option( '-v', '--verbose', action='store_true', dest='verbose', help='display verbose debug info', default=False) parser.add_option( '-t', '--type', action='store_true', dest='print_type', help='print the full value of the type for each matching malloc block', default=False) parser.add_option( '-o', '--po', action='store_true', dest='print_object_description', help='print the object descriptions for any matches', default=False) parser.add_option( '-z', '--size', action='store_true', dest='show_size', help='print the allocation size in bytes', default=False) parser.add_option( '-r', '--range', action='store_true', dest='show_range', help='print the allocation address range instead of just the allocation base address', default=False) parser.add_option( '-m', '--memory', action='store_true', dest='memory', help='dump the memory for each matching block', default=False) parser.add_option( '-f', '--format', type='string', dest='format', help='the format to use when dumping memory if --memory is specified', default=None) parser.add_option( '-I', '--omit-ivar-regex', type='string', action='callback', callback=append_regex_callback, dest='ivar_regex_blacklist', default=[], help='specify one or more regular expressions used to backlist any matches that are in ivars') parser.add_option( '-s', '--stack', action='store_true', dest='stack', help='gets the stack that allocated each malloc block if MallocStackLogging is enabled', default=False) parser.add_option( '-S', '--stack-history', action='store_true', dest='stack_history', help='gets the stack history for all allocations whose start address matches each malloc block if MallocStackLogging is enabled', default=False) parser.add_option( '-F', '--max-frames', type='int', dest='max_frames', help='the maximum number of stack frames to print when using the --stack or --stack-history options (default=128)', default=128) parser.add_option( '-H', '--max-history', type='int', dest='max_history', help='the maximum number of stack history backtraces to print for each allocation when using the --stack-history option (default=16)', default=16) parser.add_option( '-M', '--max-matches', type='int', dest='max_matches', help='the maximum number of matches to print', default=32) parser.add_option( '-O', '--offset', type='int', dest='offset', help='the matching data must be at this offset', default=-1) parser.add_option( '--ignore-stack', action='store_false', dest='search_stack', help="Don't search the stack when enumerating memory", default=True) parser.add_option( '--ignore-heap', action='store_false', dest='search_heap', help="Don't search the heap allocations when enumerating memory", default=True) parser.add_option( '--ignore-segments', action='store_false', dest='search_segments', help="Don't search readable executable segments enumerating memory", default=True) parser.add_option( '-V', '--vm-regions', action='store_true', dest='search_vm_regions', help='Check all VM regions instead of searching the heap, stack and segments', default=False) def type_flags_to_string(type_flags): if type_flags == 0: type_str = 'free' elif type_flags & 2: type_str = 'malloc' elif type_flags & 4: type_str = 'free' elif type_flags & 1: type_str = 'generic' elif type_flags & 8: type_str = 'stack' elif type_flags & 16: type_str = 'stack (red zone)' elif type_flags & 32: type_str = 'segment' elif type_flags & 64: type_str = 'vm_region' else: type_str = hex(type_flags) return type_str def find_variable_containing_address(verbose, frame, match_addr): variables = frame.GetVariables(True, True, True, True) matching_var = None for var in variables: var_addr = var.GetLoadAddress() if var_addr != lldb.LLDB_INVALID_ADDRESS: byte_size = var.GetType().GetByteSize() if verbose: print('frame #%u: [%#x - %#x) %s' % (frame.GetFrameID(), var.load_addr, var.load_addr + byte_size, var.name)) if var_addr == match_addr: if verbose: print('match') return var else: if byte_size > 0 and var_addr <= match_addr and match_addr < ( var_addr + byte_size): if verbose: print('match') return var return None def find_frame_for_stack_address(process, addr): closest_delta = sys.maxsize closest_frame = None # print 'find_frame_for_stack_address(%#x)' % (addr) for thread in process: prev_sp = lldb.LLDB_INVALID_ADDRESS for frame in thread: cfa = frame.GetCFA() # print 'frame #%u: cfa = %#x' % (frame.GetFrameID(), cfa) if addr < cfa: delta = cfa - addr # print '%#x < %#x, delta = %i' % (addr, cfa, delta) if delta < closest_delta: # print 'closest' closest_delta = delta closest_frame = frame # else: # print 'delta >= closest_delta' return closest_frame def type_flags_to_description( process, type_flags, ptr_addr, ptr_size, offset, match_addr): show_offset = False if type_flags == 0 or type_flags & 4: type_str = 'free(%#x)' % (ptr_addr,) elif type_flags & 2 or type_flags & 1: type_str = 'malloc(%6u) -> %#x' % (ptr_size, ptr_addr) show_offset = True elif type_flags & 8: type_str = 'stack' frame = find_frame_for_stack_address(process, match_addr) if frame: type_str += ' in frame #%u of thread #%u: tid %#x' % (frame.GetFrameID( ), frame.GetThread().GetIndexID(), frame.GetThread().GetThreadID()) variables = frame.GetVariables(True, True, True, True) matching_var = None for var in variables: var_addr = var.GetLoadAddress() if var_addr != lldb.LLDB_INVALID_ADDRESS: # print 'variable "%s" @ %#x (%#x)' % (var.name, var.load_addr, # match_addr) if var_addr == match_addr: matching_var = var break else: byte_size = var.GetType().GetByteSize() if byte_size > 0 and var_addr <= match_addr and match_addr < ( var_addr + byte_size): matching_var = var break if matching_var: type_str += ' in variable at %#x:\n %s' % ( matching_var.GetLoadAddress(), matching_var) elif type_flags & 16: type_str = 'stack (red zone)' elif type_flags & 32: sb_addr = process.GetTarget().ResolveLoadAddress(ptr_addr + offset) type_str = 'segment [%#x - %#x), %s + %u, %s' % ( ptr_addr, ptr_addr + ptr_size, sb_addr.section.name, sb_addr.offset, sb_addr) elif type_flags & 64: sb_addr = process.GetTarget().ResolveLoadAddress(ptr_addr + offset) type_str = 'vm_region [%#x - %#x), %s + %u, %s' % ( ptr_addr, ptr_addr + ptr_size, sb_addr.section.name, sb_addr.offset, sb_addr) else: type_str = '%#x' % (ptr_addr,) show_offset = True if show_offset and offset != 0: type_str += ' + %-6u' % (offset,) return type_str def dump_stack_history_entry(options, result, stack_history_entry, idx): address = int(stack_history_entry.address) if address: type_flags = int(stack_history_entry.type_flags) symbolicator = lldb.utils.symbolication.Symbolicator() symbolicator.target = lldb.debugger.GetSelectedTarget() type_str = type_flags_to_string(type_flags) result.AppendMessage( 'stack[%u]: addr = 0x%x, type=%s, frames:' % (idx, address, type_str)) frame_idx = 0 idx = 0 pc = int(stack_history_entry.frames[idx]) while pc != 0: if pc >= 0x1000: frames = symbolicator.symbolicate(pc) if frames: for frame in frames: result.AppendMessage( ' [%u] %s' % (frame_idx, frame)) frame_idx += 1 else: result.AppendMessage(' [%u] 0x%x' % (frame_idx, pc)) frame_idx += 1 idx = idx + 1 pc = int(stack_history_entry.frames[idx]) else: pc = 0 if idx >= options.max_frames: result.AppendMessage( 'warning: the max number of stack frames (%u) was reached, use the "--max-frames=<COUNT>" option to see more frames' % (options.max_frames)) result.AppendMessage('') def dump_stack_history_entries(options, result, addr, history): # malloc_stack_entry *get_stack_history_for_address (const void * addr) expr_prefix = ''' typedef int kern_return_t; typedef struct $malloc_stack_entry { uint64_t address; uint64_t argument; uint32_t type_flags; uint32_t num_frames; uint64_t frames[512]; kern_return_t err; } $malloc_stack_entry; ''' single_expr = ''' #define MAX_FRAMES %u typedef unsigned task_t; $malloc_stack_entry stack; stack.address = 0x%x; stack.type_flags = 2; stack.num_frames = 0; stack.frames[0] = 0; uint32_t max_stack_frames = MAX_FRAMES; stack.err = (kern_return_t)__mach_stack_logging_get_frames ( (task_t)mach_task_self(), stack.address, &stack.frames[0], max_stack_frames, &stack.num_frames); if (stack.num_frames < MAX_FRAMES) stack.frames[stack.num_frames] = 0; else stack.frames[MAX_FRAMES-1] = 0; stack''' % (options.max_frames, addr) history_expr = ''' typedef int kern_return_t; typedef unsigned task_t; #define MAX_FRAMES %u #define MAX_HISTORY %u typedef struct mach_stack_logging_record_t { uint32_t type_flags; uint64_t stack_identifier; uint64_t argument; uint64_t address; } mach_stack_logging_record_t; typedef void (*enumerate_callback_t)(mach_stack_logging_record_t, void *); typedef struct malloc_stack_entry { uint64_t address; uint64_t argument; uint32_t type_flags; uint32_t num_frames; uint64_t frames[MAX_FRAMES]; kern_return_t frames_err; } malloc_stack_entry; typedef struct $malloc_stack_history { task_t task; unsigned idx; malloc_stack_entry entries[MAX_HISTORY]; } $malloc_stack_history; $malloc_stack_history lldb_info = { (task_t)mach_task_self(), 0 }; uint32_t max_stack_frames = MAX_FRAMES; enumerate_callback_t callback = [] (mach_stack_logging_record_t stack_record, void *baton) -> void { $malloc_stack_history *lldb_info = ($malloc_stack_history *)baton; if (lldb_info->idx < MAX_HISTORY) { malloc_stack_entry *stack_entry = &(lldb_info->entries[lldb_info->idx]); stack_entry->address = stack_record.address; stack_entry->type_flags = stack_record.type_flags; stack_entry->argument = stack_record.argument; stack_entry->num_frames = 0; stack_entry->frames[0] = 0; stack_entry->frames_err = (kern_return_t)__mach_stack_logging_frames_for_uniqued_stack ( lldb_info->task, stack_record.stack_identifier, stack_entry->frames, (uint32_t)MAX_FRAMES, &stack_entry->num_frames); // Terminate the frames with zero if there is room if (stack_entry->num_frames < MAX_FRAMES) stack_entry->frames[stack_entry->num_frames] = 0; } ++lldb_info->idx; }; (kern_return_t)__mach_stack_logging_enumerate_records (lldb_info.task, (uint64_t)0x%x, callback, &lldb_info); lldb_info''' % (options.max_frames, options.max_history, addr) frame = lldb.debugger.GetSelectedTarget().GetProcess( ).GetSelectedThread().GetSelectedFrame() if history: expr = history_expr else: expr = single_expr expr_options = lldb.SBExpressionOptions() expr_options.SetIgnoreBreakpoints(True) expr_options.SetTimeoutInMicroSeconds(5 * 1000 * 1000) # 5 second timeout expr_options.SetTryAllThreads(True) expr_options.SetLanguage(lldb.eLanguageTypeObjC_plus_plus) expr_options.SetPrefix(expr_prefix) expr_sbvalue = frame.EvaluateExpression(expr, expr_options) if options.verbose: print("expression:") print(expr) print("expression result:") print(expr_sbvalue) if expr_sbvalue.error.Success(): if history: malloc_stack_history = lldb.value(expr_sbvalue) num_stacks = int(malloc_stack_history.idx) if num_stacks <= options.max_history: i_max = num_stacks else: i_max = options.max_history for i in range(i_max): stack_history_entry = malloc_stack_history.entries[i] dump_stack_history_entry( options, result, stack_history_entry, i) if num_stacks > options.max_history: result.AppendMessage( 'warning: the max number of stacks (%u) was reached, use the "--max-history=%u" option to see all of the stacks' % (options.max_history, num_stacks)) else: stack_history_entry = lldb.value(expr_sbvalue) dump_stack_history_entry(options, result, stack_history_entry, 0) else: result.AppendMessage( 'error: expression failed "%s" => %s' % (expr, expr_sbvalue.error)) def display_match_results( process, result, options, arg_str_description, expr, print_no_matches, expr_prefix=None): frame = lldb.debugger.GetSelectedTarget().GetProcess( ).GetSelectedThread().GetSelectedFrame() if not frame: result.AppendMessage('error: invalid frame') return 0 expr_options = lldb.SBExpressionOptions() expr_options.SetIgnoreBreakpoints(True) expr_options.SetFetchDynamicValue(lldb.eNoDynamicValues) expr_options.SetTimeoutInMicroSeconds( 30 * 1000 * 1000) # 30 second timeout expr_options.SetTryAllThreads(False) expr_options.SetLanguage(lldb.eLanguageTypeObjC_plus_plus) if expr_prefix: expr_options.SetPrefix(expr_prefix) expr_sbvalue = frame.EvaluateExpression(expr, expr_options) if options.verbose: print("expression:") print(expr) print("expression result:") print(expr_sbvalue) if expr_sbvalue.error.Success(): match_value = lldb.value(expr_sbvalue) i = 0 match_idx = 0 while True: print_entry = True match_entry = match_value[i] i += 1 if i > options.max_matches: result.AppendMessage( 'warning: the max number of matches (%u) was reached, use the --max-matches option to get more results' % (options.max_matches)) break malloc_addr = match_entry.addr.sbvalue.unsigned if malloc_addr == 0: break malloc_size = int(match_entry.size) offset = int(match_entry.offset) if options.offset >= 0 and options.offset != offset: print_entry = False else: match_addr = malloc_addr + offset type_flags = int(match_entry.type) #result.AppendMessage (hex(malloc_addr + offset)) if type_flags == 64: search_stack_old = options.search_stack search_segments_old = options.search_segments search_heap_old = options.search_heap search_vm_regions = options.search_vm_regions options.search_stack = True options.search_segments = True options.search_heap = True options.search_vm_regions = False if malloc_info_impl(lldb.debugger, result, options, [ hex(malloc_addr + offset)]): print_entry = False options.search_stack = search_stack_old options.search_segments = search_segments_old options.search_heap = search_heap_old options.search_vm_regions = search_vm_regions if print_entry: description = '%#16.16x: %s' % (match_addr, type_flags_to_description( process, type_flags, malloc_addr, malloc_size, offset, match_addr)) if options.show_size: description += ' <%5u>' % (malloc_size) if options.show_range: description += ' [%#x - %#x)' % ( malloc_addr, malloc_addr + malloc_size) derefed_dynamic_value = None dynamic_value = match_entry.addr.sbvalue.GetDynamicValue( lldb.eDynamicCanRunTarget) if dynamic_value.type.name == 'void *': if options.type == 'pointer' and malloc_size == 4096: error = lldb.SBError() process = expr_sbvalue.GetProcess() target = expr_sbvalue.GetTarget() data = bytearray( process.ReadMemory( malloc_addr, 16, error)) if data == '\xa1\xa1\xa1\xa1AUTORELEASE!': ptr_size = target.addr_size thread = process.ReadUnsignedFromMemory( malloc_addr + 16 + ptr_size, ptr_size, error) # 4 bytes 0xa1a1a1a1 # 12 bytes 'AUTORELEASE!' # ptr bytes autorelease insertion point # ptr bytes pthread_t # ptr bytes next colder page # ptr bytes next hotter page # 4 bytes this page's depth in the list # 4 bytes high-water mark description += ' AUTORELEASE! for pthread_t %#x' % ( thread) # else: # description += 'malloc(%u)' % (malloc_size) # else: # description += 'malloc(%u)' % (malloc_size) else: derefed_dynamic_value = dynamic_value.deref if derefed_dynamic_value: derefed_dynamic_type = derefed_dynamic_value.type derefed_dynamic_type_size = derefed_dynamic_type.size derefed_dynamic_type_name = derefed_dynamic_type.name description += ' ' description += derefed_dynamic_type_name if offset < derefed_dynamic_type_size: member_list = list() get_member_types_for_offset( derefed_dynamic_type, offset, member_list) if member_list: member_path = '' for member in member_list: member_name = member.name if member_name: if member_path: member_path += '.' member_path += member_name if member_path: if options.ivar_regex_blacklist: for ivar_regex in options.ivar_regex_blacklist: if ivar_regex.match( member_path): print_entry = False description += '.%s' % (member_path) else: description += '%u bytes after %s' % ( offset - derefed_dynamic_type_size, derefed_dynamic_type_name) else: # strip the "*" from the end of the name since we # were unable to dereference this description += dynamic_value.type.name[0:-1] if print_entry: match_idx += 1 result_output = '' if description: result_output += description if options.print_type and derefed_dynamic_value: result_output += ' %s' % (derefed_dynamic_value) if options.print_object_description and dynamic_value: desc = dynamic_value.GetObjectDescription() if desc: result_output += '\n%s' % (desc) if result_output: result.AppendMessage(result_output) if options.memory: cmd_result = lldb.SBCommandReturnObject() if options.format is None: memory_command = "memory read --force 0x%x 0x%x" % ( malloc_addr, malloc_addr + malloc_size) else: memory_command = "memory read --force -f %s 0x%x 0x%x" % ( options.format, malloc_addr, malloc_addr + malloc_size) if options.verbose: result.AppendMessage(memory_command) lldb.debugger.GetCommandInterpreter().HandleCommand(memory_command, cmd_result) result.AppendMessage(cmd_result.GetOutput()) if options.stack_history: dump_stack_history_entries(options, result, malloc_addr, 1) elif options.stack: dump_stack_history_entries(options, result, malloc_addr, 0) return i else: result.AppendMessage(str(expr_sbvalue.error)) return 0 def get_ptr_refs_options(): usage = "usage: %prog [options] <EXPR> [EXPR ...]" description = '''Searches all allocations on the heap for pointer values on darwin user space programs. Any matches that were found will dump the malloc blocks that contain the pointers and might be able to print what kind of objects the pointers are contained in using dynamic type information in the program.''' parser = optparse.OptionParser( description=description, prog='ptr_refs', usage=usage) add_common_options(parser) return parser def find_variable(debugger, command, result, dict): usage = "usage: %prog [options] <ADDR> [ADDR ...]" description = '''Searches for a local variable in all frames that contains a hex ADDR.''' command_args = shlex.split(command) parser = optparse.OptionParser( description=description, prog='find_variable', usage=usage) parser.add_option( '-v', '--verbose', action='store_true', dest='verbose', help='display verbose debug info', default=False) try: (options, args) = parser.parse_args(command_args) except: return process = debugger.GetSelectedTarget().GetProcess() if not process: result.AppendMessage('error: invalid process') return for arg in args: var_addr = int(arg, 16) print("Finding a variable with address %#x..." % (var_addr), file=result) done = False for thread in process: for frame in thread: var = find_variable_containing_address( options.verbose, frame, var_addr) if var: print(var) done = True break if done: break def ptr_refs(debugger, command, result, dict): command_args = shlex.split(command) parser = get_ptr_refs_options() try: (options, args) = parser.parse_args(command_args) except: return process = debugger.GetSelectedTarget().GetProcess() if not process: result.AppendMessage('error: invalid process') return frame = process.GetSelectedThread().GetSelectedFrame() if not frame: result.AppendMessage('error: invalid frame') return options.type = 'pointer' if options.format is None: options.format = "A" # 'A' is "address" format if args: # When we initialize the expression, we must define any types that # we will need when looking at every allocation. We must also define # a type named callback_baton_t and make an instance named "baton" # and initialize it how ever we want to. The address of "baton" will # be passed into our range callback. callback_baton_t must contain # a member named "callback" whose type is "range_callback_t". This # will be used by our zone callbacks to call the range callback for # each malloc range. expr_prefix = ''' struct $malloc_match { void *addr; uintptr_t size; uintptr_t offset; uintptr_t type; }; ''' user_init_code_format = ''' #define MAX_MATCHES %u typedef struct callback_baton_t { range_callback_t callback; unsigned num_matches; $malloc_match matches[MAX_MATCHES]; void *ptr; } callback_baton_t; range_callback_t range_callback = [](task_t task, void *baton, unsigned type, uintptr_t ptr_addr, uintptr_t ptr_size) -> void { callback_baton_t *lldb_info = (callback_baton_t *)baton; typedef void* T; const unsigned size = sizeof(T); T *array = (T*)ptr_addr; for (unsigned idx = 0; ((idx + 1) * sizeof(T)) <= ptr_size; ++idx) { if (array[idx] == lldb_info->ptr) { if (lldb_info->num_matches < MAX_MATCHES) { lldb_info->matches[lldb_info->num_matches].addr = (void*)ptr_addr; lldb_info->matches[lldb_info->num_matches].size = ptr_size; lldb_info->matches[lldb_info->num_matches].offset = idx*sizeof(T); lldb_info->matches[lldb_info->num_matches].type = type; ++lldb_info->num_matches; } } } }; callback_baton_t baton = { range_callback, 0, {0}, (void *)%s }; ''' # We must also define a snippet of code to be run that returns # the result of the expression we run. # Here we return NULL if our pointer was not found in any malloc blocks, # and we return the address of the matches array so we can then access # the matching results user_return_code = '''if (baton.num_matches < MAX_MATCHES) baton.matches[baton.num_matches].addr = 0; // Terminate the matches array baton.matches''' # Iterate through all of our pointer expressions and display the # results for ptr_expr in args: user_init_code = user_init_code_format % ( options.max_matches, ptr_expr) expr = get_iterate_memory_expr( options, process, user_init_code, user_return_code) arg_str_description = 'malloc block containing pointer %s' % ptr_expr display_match_results( process, result, options, arg_str_description, expr, True, expr_prefix) else: result.AppendMessage('error: no pointer arguments were given') def get_cstr_refs_options(): usage = "usage: %prog [options] <CSTR> [CSTR ...]" description = '''Searches all allocations on the heap for C string values on darwin user space programs. Any matches that were found will dump the malloc blocks that contain the C strings and might be able to print what kind of objects the pointers are contained in using dynamic type information in the program.''' parser = optparse.OptionParser( description=description, prog='cstr_refs', usage=usage) add_common_options(parser) return parser def cstr_refs(debugger, command, result, dict): command_args = shlex.split(command) parser = get_cstr_refs_options() try: (options, args) = parser.parse_args(command_args) except: return process = debugger.GetSelectedTarget().GetProcess() if not process: result.AppendMessage('error: invalid process') return frame = process.GetSelectedThread().GetSelectedFrame() if not frame: result.AppendMessage('error: invalid frame') return options.type = 'cstr' if options.format is None: options.format = "Y" # 'Y' is "bytes with ASCII" format if args: # When we initialize the expression, we must define any types that # we will need when looking at every allocation. We must also define # a type named callback_baton_t and make an instance named "baton" # and initialize it how ever we want to. The address of "baton" will # be passed into our range callback. callback_baton_t must contain # a member named "callback" whose type is "range_callback_t". This # will be used by our zone callbacks to call the range callback for # each malloc range. expr_prefix = ''' struct $malloc_match { void *addr; uintptr_t size; uintptr_t offset; uintptr_t type; }; ''' user_init_code_format = ''' #define MAX_MATCHES %u typedef struct callback_baton_t { range_callback_t callback; unsigned num_matches; $malloc_match matches[MAX_MATCHES]; const char *cstr; unsigned cstr_len; } callback_baton_t; range_callback_t range_callback = [](task_t task, void *baton, unsigned type, uintptr_t ptr_addr, uintptr_t ptr_size) -> void { callback_baton_t *lldb_info = (callback_baton_t *)baton; if (lldb_info->cstr_len < ptr_size) { const char *begin = (const char *)ptr_addr; const char *end = begin + ptr_size - lldb_info->cstr_len; for (const char *s = begin; s < end; ++s) { if ((int)memcmp(s, lldb_info->cstr, lldb_info->cstr_len) == 0) { if (lldb_info->num_matches < MAX_MATCHES) { lldb_info->matches[lldb_info->num_matches].addr = (void*)ptr_addr; lldb_info->matches[lldb_info->num_matches].size = ptr_size; lldb_info->matches[lldb_info->num_matches].offset = s - begin; lldb_info->matches[lldb_info->num_matches].type = type; ++lldb_info->num_matches; } } } } }; const char *cstr = "%s"; callback_baton_t baton = { range_callback, 0, {0}, cstr, (unsigned)strlen(cstr) };''' # We must also define a snippet of code to be run that returns # the result of the expression we run. # Here we return NULL if our pointer was not found in any malloc blocks, # and we return the address of the matches array so we can then access # the matching results user_return_code = '''if (baton.num_matches < MAX_MATCHES) baton.matches[baton.num_matches].addr = 0; // Terminate the matches array baton.matches''' # Iterate through all of our pointer expressions and display the # results for cstr in args: user_init_code = user_init_code_format % ( options.max_matches, cstr) expr = get_iterate_memory_expr( options, process, user_init_code, user_return_code) arg_str_description = 'malloc block containing "%s"' % cstr display_match_results( process, result, options, arg_str_description, expr, True, expr_prefix) else: result.AppendMessage( 'error: command takes one or more C string arguments') def get_malloc_info_options(): usage = "usage: %prog [options] <EXPR> [EXPR ...]" description = '''Searches the heap a malloc block that contains the addresses specified as one or more address expressions. Any matches that were found will dump the malloc blocks that match or contain the specified address. The matching blocks might be able to show what kind of objects they are using dynamic type information in the program.''' parser = optparse.OptionParser( description=description, prog='malloc_info', usage=usage) add_common_options(parser) return parser def malloc_info(debugger, command, result, dict): command_args = shlex.split(command) parser = get_malloc_info_options() try: (options, args) = parser.parse_args(command_args) except: return malloc_info_impl(debugger, result, options, args) def malloc_info_impl(debugger, result, options, args): # We are specifically looking for something on the heap only options.type = 'malloc_info' process = debugger.GetSelectedTarget().GetProcess() if not process: result.AppendMessage('error: invalid process') return frame = process.GetSelectedThread().GetSelectedFrame() if not frame: result.AppendMessage('error: invalid frame') return expr_prefix = ''' struct $malloc_match { void *addr; uintptr_t size; uintptr_t offset; uintptr_t type; }; ''' user_init_code_format = ''' typedef struct callback_baton_t { range_callback_t callback; unsigned num_matches; $malloc_match matches[2]; // Two items so they can be NULL terminated void *ptr; } callback_baton_t; range_callback_t range_callback = [](task_t task, void *baton, unsigned type, uintptr_t ptr_addr, uintptr_t ptr_size) -> void { callback_baton_t *lldb_info = (callback_baton_t *)baton; if (lldb_info->num_matches == 0) { uint8_t *p = (uint8_t *)lldb_info->ptr; uint8_t *lo = (uint8_t *)ptr_addr; uint8_t *hi = lo + ptr_size; if (lo <= p && p < hi) { lldb_info->matches[lldb_info->num_matches].addr = (void*)ptr_addr; lldb_info->matches[lldb_info->num_matches].size = ptr_size; lldb_info->matches[lldb_info->num_matches].offset = p - lo; lldb_info->matches[lldb_info->num_matches].type = type; lldb_info->num_matches = 1; } } }; callback_baton_t baton = { range_callback, 0, {0}, (void *)%s }; baton.matches[0].addr = 0; baton.matches[1].addr = 0;''' if args: total_matches = 0 for ptr_expr in args: user_init_code = user_init_code_format % (ptr_expr) expr = get_iterate_memory_expr( options, process, user_init_code, 'baton.matches') arg_str_description = 'malloc block that contains %s' % ptr_expr total_matches += display_match_results( process, result, options, arg_str_description, expr, True, expr_prefix) return total_matches else: result.AppendMessage( 'error: command takes one or more pointer expressions') return 0 def get_thread_stack_ranges_struct(process): '''Create code that defines a structure that represents threads stack bounds for all threads. It returns a static sized array initialized with all of the tid, base, size structs for all the threads.''' stack_dicts = list() if process: i = 0 for thread in process: min_sp = thread.frame[0].sp max_sp = min_sp for frame in thread.frames: sp = frame.sp if sp < min_sp: min_sp = sp if sp > max_sp: max_sp = sp if min_sp < max_sp: stack_dicts.append({'tid': thread.GetThreadID( ), 'base': min_sp, 'size': max_sp - min_sp, 'index': i}) i += 1 stack_dicts_len = len(stack_dicts) if stack_dicts_len > 0: result = ''' #define NUM_STACKS %u #define STACK_RED_ZONE_SIZE %u typedef struct thread_stack_t { uint64_t tid, base, size; } thread_stack_t; thread_stack_t stacks[NUM_STACKS];''' % (stack_dicts_len, process.target.GetStackRedZoneSize()) for stack_dict in stack_dicts: result += ''' stacks[%(index)u].tid = 0x%(tid)x; stacks[%(index)u].base = 0x%(base)x; stacks[%(index)u].size = 0x%(size)x;''' % stack_dict return result else: return '' def get_sections_ranges_struct(process): '''Create code that defines a structure that represents all segments that can contain data for all images in "target". It returns a static sized array initialized with all of base, size structs for all the threads.''' target = process.target segment_dicts = list() for (module_idx, module) in enumerate(target.modules): for sect_idx in range(module.GetNumSections()): section = module.GetSectionAtIndex(sect_idx) if not section: break name = section.name if name != '__TEXT' and name != '__LINKEDIT' and name != '__PAGEZERO': base = section.GetLoadAddress(target) size = section.GetByteSize() if base != lldb.LLDB_INVALID_ADDRESS and size > 0: segment_dicts.append({'base': base, 'size': size}) segment_dicts_len = len(segment_dicts) if segment_dicts_len > 0: result = ''' #define NUM_SEGMENTS %u typedef struct segment_range_t { uint64_t base; uint32_t size; } segment_range_t; segment_range_t segments[NUM_SEGMENTS];''' % (segment_dicts_len,) for (idx, segment_dict) in enumerate(segment_dicts): segment_dict['index'] = idx result += ''' segments[%(index)u].base = 0x%(base)x; segments[%(index)u].size = 0x%(size)x;''' % segment_dict return result else: return '' def section_ptr_refs(debugger, command, result, dict): command_args = shlex.split(command) usage = "usage: %prog [options] <EXPR> [EXPR ...]" description = '''Searches section contents for pointer values in darwin user space programs.''' parser = optparse.OptionParser( description=description, prog='section_ptr_refs', usage=usage) add_common_options(parser) parser.add_option( '--section', action='append', type='string', dest='section_names', help='section name to search', default=list()) try: (options, args) = parser.parse_args(command_args) except: return options.type = 'pointer' sections = list() section_modules = list() if not options.section_names: result.AppendMessage( 'error: at least one section must be specified with the --section option') return target = debugger.GetSelectedTarget() for module in target.modules: for section_name in options.section_names: section = module.section[section_name] if section: sections.append(section) section_modules.append(module) if sections: dylid_load_err = load_dylib() if dylid_load_err: result.AppendMessage(dylid_load_err) return frame = target.GetProcess().GetSelectedThread().GetSelectedFrame() for expr_str in args: for (idx, section) in enumerate(sections): expr = 'find_pointer_in_memory(0x%xllu, %ullu, (void *)%s)' % ( section.addr.load_addr, section.size, expr_str) arg_str_description = 'section %s.%s containing "%s"' % ( section_modules[idx].file.fullpath, section.name, expr_str) num_matches = display_match_results( target.GetProcess(), result, options, arg_str_description, expr, False) if num_matches: if num_matches < options.max_matches: options.max_matches = options.max_matches - num_matches else: options.max_matches = 0 if options.max_matches == 0: return else: result.AppendMessage( 'error: no sections were found that match any of %s' % (', '.join( options.section_names))) def get_objc_refs_options(): usage = "usage: %prog [options] <CLASS> [CLASS ...]" description = '''Searches all allocations on the heap for instances of objective C classes, or any classes that inherit from the specified classes in darwin user space programs. Any matches that were found will dump the malloc blocks that contain the C strings and might be able to print what kind of objects the pointers are contained in using dynamic type information in the program.''' parser = optparse.OptionParser( description=description, prog='objc_refs', usage=usage) add_common_options(parser) return parser def objc_refs(debugger, command, result, dict): command_args = shlex.split(command) parser = get_objc_refs_options() try: (options, args) = parser.parse_args(command_args) except: return process = debugger.GetSelectedTarget().GetProcess() if not process: result.AppendMessage('error: invalid process') return frame = process.GetSelectedThread().GetSelectedFrame() if not frame: result.AppendMessage('error: invalid frame') return options.type = 'isa' if options.format is None: options.format = "A" # 'A' is "address" format expr_options = lldb.SBExpressionOptions() expr_options.SetIgnoreBreakpoints(True) expr_options.SetTimeoutInMicroSeconds( 3 * 1000 * 1000) # 3 second infinite timeout expr_options.SetTryAllThreads(True) expr_options.SetLanguage(lldb.eLanguageTypeObjC_plus_plus) num_objc_classes_value = frame.EvaluateExpression( "(int)objc_getClassList((void *)0, (int)0)", expr_options) if not num_objc_classes_value.error.Success(): result.AppendMessage('error: %s' % num_objc_classes_value.error.GetCString()) return num_objc_classes = num_objc_classes_value.GetValueAsUnsigned() if num_objc_classes == 0: result.AppendMessage('error: no objective C classes in program') return if args: # When we initialize the expression, we must define any types that # we will need when looking at every allocation. We must also define # a type named callback_baton_t and make an instance named "baton" # and initialize it how ever we want to. The address of "baton" will # be passed into our range callback. callback_baton_t must contain # a member named "callback" whose type is "range_callback_t". This # will be used by our zone callbacks to call the range callback for # each malloc range. expr_prefix = ''' struct $malloc_match { void *addr; uintptr_t size; uintptr_t offset; uintptr_t type; }; ''' user_init_code_format = ''' #define MAX_MATCHES %u typedef int (*compare_callback_t)(const void *a, const void *b); typedef struct callback_baton_t { range_callback_t callback; compare_callback_t compare_callback; unsigned num_matches; $malloc_match matches[MAX_MATCHES]; void *isa; Class classes[%u]; } callback_baton_t; compare_callback_t compare_callback = [](const void *a, const void *b) -> int { Class a_ptr = *(Class *)a; Class b_ptr = *(Class *)b; if (a_ptr < b_ptr) return -1; if (a_ptr > b_ptr) return +1; return 0; }; typedef Class (*class_getSuperclass_type)(void *isa); range_callback_t range_callback = [](task_t task, void *baton, unsigned type, uintptr_t ptr_addr, uintptr_t ptr_size) -> void { class_getSuperclass_type class_getSuperclass_impl = (class_getSuperclass_type)class_getSuperclass; callback_baton_t *lldb_info = (callback_baton_t *)baton; if (sizeof(Class) <= ptr_size) { Class *curr_class_ptr = (Class *)ptr_addr; Class *matching_class_ptr = (Class *)bsearch (curr_class_ptr, (const void *)lldb_info->classes, sizeof(lldb_info->classes)/sizeof(Class), sizeof(Class), lldb_info->compare_callback); if (matching_class_ptr) { bool match = false; if (lldb_info->isa) { Class isa = *curr_class_ptr; if (lldb_info->isa == isa) match = true; else { // if (lldb_info->objc.match_superclasses) { Class super = class_getSuperclass_impl(isa); while (super) { if (super == lldb_info->isa) { match = true; break; } super = class_getSuperclass_impl(super); } } } else match = true; if (match) { if (lldb_info->num_matches < MAX_MATCHES) { lldb_info->matches[lldb_info->num_matches].addr = (void*)ptr_addr; lldb_info->matches[lldb_info->num_matches].size = ptr_size; lldb_info->matches[lldb_info->num_matches].offset = 0; lldb_info->matches[lldb_info->num_matches].type = type; ++lldb_info->num_matches; } } } } }; callback_baton_t baton = { range_callback, compare_callback, 0, {0}, (void *)0x%x, {0} }; int nc = (int)objc_getClassList(baton.classes, sizeof(baton.classes)/sizeof(Class)); (void)qsort (baton.classes, sizeof(baton.classes)/sizeof(Class), sizeof(Class), compare_callback);''' # We must also define a snippet of code to be run that returns # the result of the expression we run. # Here we return NULL if our pointer was not found in any malloc blocks, # and we return the address of the matches array so we can then access # the matching results user_return_code = '''if (baton.num_matches < MAX_MATCHES) baton.matches[baton.num_matches].addr = 0; // Terminate the matches array baton.matches''' # Iterate through all of our ObjC class name arguments for class_name in args: addr_expr_str = "(void *)[%s class]" % class_name expr_options = lldb.SBExpressionOptions() expr_options.SetIgnoreBreakpoints(True) expr_options.SetTimeoutInMicroSeconds( 1 * 1000 * 1000) # 1 second timeout expr_options.SetTryAllThreads(True) expr_options.SetLanguage(lldb.eLanguageTypeObjC_plus_plus) expr_sbvalue = frame.EvaluateExpression( addr_expr_str, expr_options) if expr_sbvalue.error.Success(): isa = expr_sbvalue.unsigned if isa: options.type = 'isa' result.AppendMessage( 'Searching for all instances of classes or subclasses of "%s" (isa=0x%x)' % (class_name, isa)) user_init_code = user_init_code_format % ( options.max_matches, num_objc_classes, isa) expr = get_iterate_memory_expr( options, process, user_init_code, user_return_code) arg_str_description = 'objective C classes with isa 0x%x' % isa display_match_results( process, result, options, arg_str_description, expr, True, expr_prefix) else: result.AppendMessage( 'error: Can\'t find isa for an ObjC class named "%s"' % (class_name)) else: result.AppendMessage( 'error: expression error for "%s": %s' % (addr_expr_str, expr_sbvalue.error)) else: result.AppendMessage( 'error: command takes one or more C string arguments') if __name__ == '__main__': lldb.debugger = lldb.SBDebugger.Create() # Make the options so we can generate the help text for the new LLDB # command line command prior to registering it with LLDB below. This way # if clients in LLDB type "help malloc_info", they will see the exact same # output as typing "malloc_info --help". ptr_refs.__doc__ = get_ptr_refs_options().format_help() cstr_refs.__doc__ = get_cstr_refs_options().format_help() malloc_info.__doc__ = get_malloc_info_options().format_help() objc_refs.__doc__ = get_objc_refs_options().format_help() lldb.debugger.HandleCommand( 'command script add -f %s.ptr_refs ptr_refs' % __name__) lldb.debugger.HandleCommand( 'command script add -f %s.cstr_refs cstr_refs' % __name__) lldb.debugger.HandleCommand( 'command script add -f %s.malloc_info malloc_info' % __name__) lldb.debugger.HandleCommand( 'command script add -f %s.find_variable find_variable' % __name__) # lldb.debugger.HandleCommand('command script add -f %s.heap heap' % package_name) # lldb.debugger.HandleCommand('command script add -f %s.section_ptr_refs section_ptr_refs' % package_name) # lldb.debugger.HandleCommand('command script add -f %s.stack_ptr_refs stack_ptr_refs' % package_name) lldb.debugger.HandleCommand( 'command script add -f %s.objc_refs objc_refs' % __name__) print('"malloc_info", "ptr_refs", "cstr_refs", "find_variable", and "objc_refs" commands have been installed, use the "--help" options on these commands for detailed help.')
endlessm/chromium-browser
third_party/llvm/lldb/examples/darwin/heap_find/heap.py
Python
bsd-3-clause
61,273
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. import 'chrome://resources/cr_elements/md_select_css.m.js'; import './print_preview_shared_css.js'; import './settings_section.js'; import {PolymerElement} from 'chrome://resources/polymer/v3_0/polymer/polymer_bundled.min.js'; import {getTemplate} from './color_settings.html.js'; import {SelectMixin} from './select_mixin.js'; import {SettingsMixin} from './settings_mixin.js'; const PrintPreviewColorSettingsElementBase = SettingsMixin(SelectMixin(PolymerElement)); export class PrintPreviewColorSettingsElement extends PrintPreviewColorSettingsElementBase { static get is() { return 'print-preview-color-settings'; } static get template() { return getTemplate(); } static get properties() { return { disabled: Boolean, disabled_: { type: Boolean, computed: 'computeDisabled_(disabled, settings.color.setByPolicy)', }, }; } static get observers() { return ['onColorSettingChange_(settings.color.value)']; } disabled: boolean; private disabled_: boolean; private onColorSettingChange_(newValue: boolean) { this.selectedValue = newValue ? 'color' : 'bw'; } /** * @param disabled Whether color selection is disabled. * @param managed Whether color selection is managed. * @return Whether drop-down should be disabled. */ private computeDisabled_(disabled: boolean, managed: boolean): boolean { return disabled || managed; } /** @param value The new select value. */ onProcessSelectChange(value: string) { this.setSetting('color', value === 'color'); } } declare global { interface HTMLElementTagNameMap { 'print-preview-color-settings': PrintPreviewColorSettingsElement; } } customElements.define( PrintPreviewColorSettingsElement.is, PrintPreviewColorSettingsElement);
chromium/chromium
chrome/browser/resources/print_preview/ui/color_settings.ts
TypeScript
bsd-3-clause
1,983
<?php /** * ezcCacheStorageFileEvalArrayTest * * @package Cache * @subpackage Tests * @version 1.5 * @copyright Copyright (C) 2005-2009 eZ Systems AS. All rights reserved. * @license http://ez.no/licenses/new_bsd New BSD License */ /** * Require parent test class. */ require_once 'storage_test.php'; /** * Test suite for ezcStorageFileEvalArray class. * * @package Cache * @subpackage Tests */ class ezcCacheStorageFileEvalArrayTest extends ezcCacheStorageTest { public static function suite() { return new PHPUnit_Framework_TestSuite( "ezcCacheStorageFileEvalArrayTest" ); } } ?>
faclib/ezcomponents
Cache/tests/storage_file_evalarray_test.php
PHP
bsd-3-clause
609
package org.apollo.game.message.impl; import org.apollo.net.message.Message; /** * A {@link Message} sent to the client to open up the enter amount interface. * * @author Graham */ public final class EnterAmountMessage extends Message { }
garyttierney/apollo
game/src/main/org/apollo/game/message/impl/EnterAmountMessage.java
Java
isc
245
require 'spec_helper' describe Gitlab::GithubImport::LabelFormatter, lib: true do let(:project) { create(:project) } let(:raw) { double(name: 'improvements', color: 'e6e6e6') } subject { described_class.new(project, raw) } describe '#attributes' do it 'returns formatted attributes' do expect(subject.attributes).to eq({ project: project, title: 'improvements', color: '#e6e6e6' }) end end describe '#create!' do context 'when label does not exist' do it 'creates a new label' do expect { subject.create! }.to change(Label, :count).by(1) end end context 'when label exists' do it 'does not create a new label' do project.labels.create(name: raw.name) expect { subject.create! }.not_to change(Label, :count) end end end end
shinexiao/gitlabhq
spec/lib/gitlab/github_import/label_formatter_spec.rb
Ruby
mit
851
// Copyright 2009 the Sputnik authors. All rights reserved. // This code is governed by the BSD license found in the LICENSE file. /** * @name: S15.9.5.11_A2_T1; * @section: 15.9.5.11; * @assertion: The "length" property of the "getUTCFullYear" is 0; * @description: The "length" property of the "getUTCFullYear" is 0; */ if(Date.prototype.getUTCFullYear.hasOwnProperty("length") !== true){ $ERROR('#1: The getUTCFullYear has a "length" property'); } if(Date.prototype.getUTCFullYear.length !== 0){ $ERROR('#2: The "length" property of the getUTCFullYear is 0'); }
seraum/nectarjs
tests/ES3/Conformance/15_Native_ECMA_Script_Objects/15.9_Date_Objects/15.9.5_Properties_of_the_Date_Prototype_Object/15.9.5.11_Date.prototype.getUTCFullYear/S15.9.5.11_A2_T1.js
JavaScript
mit
579
/* Copyright (c) 2015, Marc Clifton All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of MyXaml nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ using System; using System.Collections.Generic; using System.Linq; using System.Net; using System.Text; using System.Threading.Tasks; namespace Clifton.WebServer { /// <summary> /// A route entry, consisting of optional session, authorization, and routing providers. /// </summary> public class RouteEntry { public Func<WorkflowContinuation<ContextWrapper>, ContextWrapper, Session, PathParams, WorkflowState> SessionExpirationHandler; public Func<WorkflowContinuation<ContextWrapper>, ContextWrapper, Session, PathParams, WorkflowState> AuthorizationHandler; public Func<WorkflowContinuation<ContextWrapper>, ContextWrapper, Session, PathParams, WorkflowState> RouteHandler; } }
cliftonm/WebServersSuccinctly
Examples/Chapter 9/Clifton.WebServer/RouteEntry.cs
C#
mit
2,183
class Admin::PublicationsController < Admin::EditionsController before_filter :pre_fill_edition_from_statistics_announcement, only: :new, if: :statistics_announcement private def edition_class Publication end def permitted_edition_attributes super << :statistics_announcement_id end def pre_fill_edition_from_statistics_announcement @edition.statistics_announcement_id = statistics_announcement.id @edition.title = statistics_announcement.title @edition.summary = statistics_announcement.summary @edition.publication_type = statistics_announcement.publication_type @edition.topics = statistics_announcement.topics @edition.scheduled_publication = statistics_announcement.release_date @edition.previously_published = "false" end def statistics_announcement if params[:statistics_announcement_id] @statistics_announcement ||= StatisticsAnnouncement.friendly.find(params[:statistics_announcement_id]) end end end
YOTOV-LIMITED/whitehall
app/controllers/admin/publications_controller.rb
Ruby
mit
985
<?php /** * Magento * * NOTICE OF LICENSE * * This source file is subject to the Open Software License (OSL 3.0) * that is bundled with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://opensource.org/licenses/osl-3.0.php * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@magento.com so we can send you a copy immediately. * * DISCLAIMER * * Do not edit or add to this file if you wish to upgrade Magento to newer * versions in the future. If you wish to customize Magento for your * needs please refer to http://www.magento.com for more information. * * @category Tests * @package Tests_Functional * @copyright Copyright (c) 2006-2016 X.commerce, Inc. and affiliates (http://www.magento.com) * @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0) */ namespace Mage\Directory\Test\Block\Currency; use Mage\CurrencySymbol\Test\Fixture\CurrencySymbolEntity; use Magento\Mtf\Block\Block; use Magento\Mtf\Client\Locator; /** * Switcher Currency Symbol. */ class Switcher extends Block { /** * Currency switch locator. * * @var string */ protected $currencySwitch = '#select-currency'; /** * Selected Currency switch locator. * * @var string */ protected $currencySwitchSelected = '#select-currency [selected="selected"]'; /** * Switch currency to specified one. * * @param CurrencySymbolEntity $currencySymbol * @return void */ public function switchCurrency(CurrencySymbolEntity $currencySymbol) { $this->waitForElementVisible($this->currencySwitch); $customCurrencySwitch = explode(" - ", $this->_rootElement->find($this->currencySwitchSelected)->getText()); $currencyCode = $currencySymbol->getCode(); if ($customCurrencySwitch[1] !== $currencyCode) { $this->_rootElement->find($this->currencySwitch, Locator::SELECTOR_CSS, 'select') ->setValue($currencyCode); } } }
hansbonini/cloud9-magento
www/dev/tests/functional/tests/app/Mage/Directory/Test/Block/Currency/Switcher.php
PHP
mit
2,153
<?php /** * Magento * * NOTICE OF LICENSE * * This source file is subject to the Open Software License (OSL 3.0) * that is bundled with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://opensource.org/licenses/osl-3.0.php * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@magento.com so we can send you a copy immediately. * * DISCLAIMER * * Do not edit or add to this file if you wish to upgrade Magento to newer * versions in the future. If you wish to customize Magento for your * needs please refer to http://www.magento.com for more information. * * @category Mage * @package Mage_Dataflow * @copyright Copyright (c) 2006-2016 X.commerce, Inc. and affiliates (http://www.magento.com) * @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0) */ /** * Import collection * * @category Mage * @package Mage_Dataflow * @author Magento Core Team <core@magentocommerce.com> */ class Mage_Dataflow_Model_Resource_Import_Collection extends Mage_Core_Model_Resource_Db_Collection_Abstract { /** * Define resource model and model * */ protected function _construct() { $this->_init('dataflow/import'); } }
hansbonini/cloud9-magento
www/app/code/core/Mage/Dataflow/Model/Resource/Import/Collection.php
PHP
mit
1,372
<?php CM_Db_Db::exec('ALTER TABLE `cm_actionLimit` CHANGE `actionType` `actionType` INT UNSIGNED DEFAULT NULL');
zazabe/cm
resources/db/update/35.php
PHP
mit
114
// Spart License (zlib/png) // // // Copyright (c) 2003 Jonathan de Halleux // // This software is provided 'as-is', without any express or implied warranty. // In no event will the authors be held liable for any damages arising from // the use of this software. // // Permission is granted to anyone to use this software for any purpose, // including commercial applications, and to alter it and redistribute it // freely, subject to the following restrictions: // // 1. The origin of this software must not be misrepresented; you must not // claim that you wrote the original software. If you use this software in a // product, an acknowledgment in the product documentation would be // appreciated but is not required. // // 2. Altered source versions must be plainly marked as such, and must not be // misrepresented as being the original software. // // 3. This notice may not be removed or altered from any source distribution. // // Author: Jonathan de Halleuxusing System; using System; using System.Collections; namespace Spart.Actions { /// <summary> /// Static helper class that creates actors /// </summary> public class ActionHandlers { /// <summary> /// Create an actor that append the parse result to a <see cref="IList"/>. /// </summary> /// <param name="list"></param> /// <returns></returns> public static ActionHandler Append(IList list) { return delegate(Object sender, ActionEventArgs args) { list.Add(args.Value); }; } /// <summary> /// Creates an actor that throws an exception /// </summary> /// <param name="ex"></param> /// <returns></returns> public static ActionHandler Throw(Exception ex) { return delegate { throw ex; }; } } }
darcywong00/libpalaso
Palaso/Spart/Actions/Actions.cs
C#
mit
1,724
define('lodash/object/get', ['exports', 'lodash/internal/baseGet', 'lodash/internal/toPath'], function (exports, _lodashInternalBaseGet, _lodashInternalToPath) { 'use strict'; /** * Gets the property value at `path` of `object`. If the resolved value is * `undefined` the `defaultValue` is used in its place. * * @static * @memberOf _ * @category Object * @param {Object} object The object to query. * @param {Array|string} path The path of the property to get. * @param {*} [defaultValue] The value returned if the resolved value is `undefined`. * @returns {*} Returns the resolved value. * @example * * var object = { 'a': [{ 'b': { 'c': 3 } }] }; * * _.get(object, 'a[0].b.c'); * // => 3 * * _.get(object, ['a', '0', 'b', 'c']); * // => 3 * * _.get(object, 'a.b.c', 'default'); * // => 'default' */ function get(object, path, defaultValue) { var result = object == null ? undefined : (0, _lodashInternalBaseGet['default'])(object, (0, _lodashInternalToPath['default'])(path), path + ''); return result === undefined ? defaultValue : result; } exports['default'] = get; });
hoka-plus/p-01-web
tmp/babel-output_path-hOv4KMmE.tmp/lodash/object/get.js
JavaScript
mit
1,164
<?php /** * This file is part of CodeIgniter 4 framework. * * (c) CodeIgniter Foundation <admin@codeigniter.com> * * For the full copyright and license information, please view * the LICENSE file that was distributed with this source code. */ namespace CodeIgniter\Test\Interfaces; use Faker\Generator; use ReflectionException; /** * FabricatorModel * * An interface defining the required methods and properties * needed for a model to qualify for use with the Fabricator class. * While interfaces cannot enforce properties, the following * are required for use with Fabricator: * * @property string $returnType * @property string $primaryKey * @property string $dateFormat */ interface FabricatorModel { /** * Fetches the row of database from $this->table with a primary key * matching $id. * * @param array|mixed|null $id One primary key or an array of primary keys * * @return array|object|null The resulting row of data, or null. */ public function find($id = null); /** * Inserts data into the current table. If an object is provided, * it will attempt to convert it to an array. * * @param array|object $data * @param bool $returnID Whether insert ID should be returned or not. * * @throws ReflectionException * * @return bool|int|string */ public function insert($data = null, bool $returnID = true); /** * The following properties and methods are optional, but if present should * adhere to their definitions. * * @property array $allowedFields * @property string $useSoftDeletes * @property string $useTimestamps * @property string $createdField * @property string $updatedField * @property string $deletedField */ /* * Sets $useSoftDeletes value so that we can temporarily override * the softdeletes settings. Can be used for all find* methods. * * @param boolean $val * * @return Model */ // public function withDeleted($val = true); /** * Faked data for Fabricator. * * @param Generator $faker * * @return array|object */ // public function fake(Generator &$faker); }
bcit-ci/CodeIgniter4
system/Test/Interfaces/FabricatorModel.php
PHP
mit
2,251
/* ==================================================================== Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================================================================== */ package org.apache.poi.poifs.storage; import java.io.IOException; /** * An interface for blocks managed by a list that works with a * BlockAllocationTable to keep block sequences straight * * @author Marc Johnson (mjohnson at apache dot org */ public interface ListManagedBlock { /** * Get the data from the block * * @return the block's data as a byte array * * @exception IOException if there is no data */ public byte [] getData() throws IOException; } // end public interface ListManagedBlock
tobyclemson/msci-project
vendor/poi-3.6/src/java/org/apache/poi/poifs/storage/ListManagedBlock.java
Java
mit
1,502
CfContainersBroker::Application.configure do # Settings specified here will take precedence over those in config/application.rb. # The test environment is used exclusively to run your application's # test suite. You never need to work with it otherwise. Remember that # your test database is "scratch space" for the test suite and is wiped # and recreated between test runs. Don't rely on the data there! config.cache_classes = true # Do not eager load code on boot. This avoids loading your whole application # just for the purpose of running a single test. If you are using a tool that # preloads Rails for running tests, you may have to set it to true. config.eager_load = false # Configure static asset server for tests with Cache-Control for performance. config.serve_static_files = true config.static_cache_control = 'public, max-age=3600' # Show full error reports and disable caching. config.consider_all_requests_local = true config.action_controller.perform_caching = false # Raise exceptions instead of rendering exception templates. config.action_dispatch.show_exceptions = false # Disable request forgery protection in test environment. config.action_controller.allow_forgery_protection = false # Tell Action Mailer not to deliver emails to the real world. # The :test delivery method accumulates sent emails in the # ActionMailer::Base.deliveries array. # config.action_mailer.delivery_method = :test # Print deprecation notices to the stderr. config.active_support.deprecation = :stderr end
dynatrace-innovationlab/easyTravel-Cloud-Foundry
deploy/config/cf-containers-broker/environments/test.rb
Ruby
mit
1,573
#!/usr/bin/env python # *-* coding: UTF-8 *-* """Împăratul a primit serie de mesaje importante pe care este important să le descifreze cât mai repede. Din păcate mesagerul nu a apucat să îi spună împăratul care au fost cheile alese pentru fiecare mesaj și tu ai fost ales să descifrezi misterul. Informații: În criptografie, cifrul lui Caesar este o metodă simplă de a cripta un mesaj prin înlocuirea fiecărei litere cu litera de pe poziția aflată la un n pași de ea în alfabet (unde este n este un număr întreg cunoscut """ # existau 2 variante de a rezolva problema cu parantezele la print # am preferat sa o folosesc pe asta pentru a evita si eventualele probleme # cu care ziceai tu ca o sa ne stresezi ;) from __future__ import print_function LETTERS = "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" # tot timpul se va gasi litera in string-ul "LETTERS" # deci circularitatea e suficient # reprezentata prin a-z de doua ori def shift_letter(let, number): """Shifts a letter by number places in LETTERS""" if let.isalpha(): # procesam doar literele return LETTERS[ord(let) - 97 + number] # returnam litera de peste n locuri in LETTERS else: return let # daca nu e litera, returnam caracterul original def decripteaza(mesaj, number): """Decrypts every line in <mesaj>""" new_msg = "" for char in mesaj: new_msg += shift_letter(char, number) if "ave" in new_msg: print(new_msg) def main(): """Have a main docstring, pylint""" try: fisier = open("mesaje.secret", "r") mesaje = fisier.read() fisier.close() except IOError: print("Nu am putut obține mesajele.") return for mesaj in mesaje.splitlines(): for i in range(26): decripteaza(mesaj, i) if __name__ == "__main__": main()
iulianbute/labs
python/solutii/alex_mitan/caesar.py
Python
mit
1,892
import React, { cloneElement } from 'react'; import classNames from 'classnames'; import ValidComponentChildren from './utils/ValidComponentChildren'; class ListGroup extends React.Component { render() { let items = ValidComponentChildren.map( this.props.children, (item, index) => cloneElement(item, { key: item.key ? item.key : index }) ); let shouldRenderDiv = false; if (!this.props.children) { shouldRenderDiv = true; } else { React.Children.forEach(this.props.children, (child) => { if (this.isAnchorOrButton(child.props)) { shouldRenderDiv = true; } }); } if (shouldRenderDiv) { return this.renderDiv(items); } else { return this.renderUL(items); } } isAnchorOrButton(props) { return (props.href || props.onClick); } renderUL(items) { let listItems = ValidComponentChildren.map(items, (item) => cloneElement(item, { listItem: true }) ); return ( <ul {...this.props} className={classNames(this.props.className, 'list-group')}> {listItems} </ul> ); } renderDiv(items) { return ( <div {...this.props} className={classNames(this.props.className, 'list-group')}> {items} </div> ); } } ListGroup.propTypes = { className: React.PropTypes.string, id: React.PropTypes.oneOfType([ React.PropTypes.string, React.PropTypes.number ]) }; export default ListGroup;
jontewks/react-bootstrap
src/ListGroup.js
JavaScript
mit
1,507
/* * Copyright (c) 2009 WiQuery team * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package org.odlabs.wiquery.ui.progressbar; import org.apache.wicket.ajax.AjaxRequestTarget; import org.apache.wicket.markup.html.IHeaderResponse; import org.apache.wicket.markup.html.WebMarkupContainer; import org.odlabs.wiquery.core.IWiQueryPlugin; import org.odlabs.wiquery.core.javascript.JsQuery; import org.odlabs.wiquery.core.javascript.JsStatement; import org.odlabs.wiquery.core.options.Options; import org.odlabs.wiquery.ui.commons.WiQueryUIPlugin; import org.odlabs.wiquery.ui.core.JsScopeUiEvent; import org.odlabs.wiquery.ui.options.UiOptionsRenderer; import org.odlabs.wiquery.ui.widget.WidgetJavaScriptResourceReference; /** * $Id$ * <p> * Creates a progressBar UI component from this {@link WebMarkupContainer}'s HTML markup. * </p> * * @author Lionel Armanet * @since 1.0 */ @WiQueryUIPlugin public class ProgressBar extends WebMarkupContainer implements IWiQueryPlugin { // Constants /** Constant of serialization */ private static final long serialVersionUID = 8268721447610956664L; // Properties private Options options; /** * Builds a new progress bar. */ public ProgressBar(String id) { super(id); this.options = new Options(this); this.options.setRenderer(new UiOptionsRenderer("progressbar", this)); } @Override protected void detachModel() { super.detachModel(); options.detach(); } @Override public void renderHead(IHeaderResponse response) { response.renderJavaScriptReference(WidgetJavaScriptResourceReference.get()); response.renderJavaScriptReference(ProgressBarJavaScriptResourceReference.get()); } public JsStatement statement() { JsStatement componentStatement = new JsQuery(this).$().chain("progressbar"); JsStatement wholeStatement = new JsStatement(); wholeStatement.append(componentStatement.render()); wholeStatement.append(options.getJavaScriptOptions()); return wholeStatement; } /** * Method retrieving the options of the component * * @return the options */ protected Options getOptions() { return options; } public JsStatement update() { JsStatement wholeStatement = new JsStatement(); wholeStatement.append(options.getJavaScriptOptions()); return wholeStatement; } /*---- Options section ---*/ /** * Disables (true) or enables (false) the progressBar. Can be set when initialising * (first creating) the progressBar. * * @param disabled * @return instance of the current behavior */ public ProgressBar setDisabled(boolean disabled) { this.options.put("disabled", disabled); return this; } /** * @return the disabled option */ public boolean isDisabled() { if (this.options.containsKey("disabled")) { return this.options.getBoolean("disabled"); } return false; } /** * Sets the current value of the progressBar * * @param value * @return instance of the current component */ public ProgressBar setValue(int value) { this.options.put("value", value); return this; } /** * @return the current value of the progressBar */ public int getValue() { if (this.options.containsKey("value")) { return options.getInt("value"); } return 0; } /*---- Events section ---*/ /** * Set's the callback when the value of the progressBar changes. * * @param change * @return instance of the current component */ public ProgressBar setChangeEvent(JsScopeUiEvent change) { this.options.put("change", change); return this; } /*---- Methods section ---*/ /** * Method to destroy the progressBar This will return the element back to its pre-init * state. * * @return the associated JsStatement */ public JsStatement destroy() { return new JsQuery(this).$().chain("progressbar", "'destroy'"); } /** * Method to destroy the progressBar within the ajax request * * @param ajaxRequestTarget */ public void destroy(AjaxRequestTarget ajaxRequestTarget) { ajaxRequestTarget.appendJavaScript(this.destroy().render().toString()); } /** * Method to disable the progressBar * * @return the associated JsStatement */ public JsStatement disable() { return new JsQuery(this).$().chain("progressbar", "'disable'"); } /** * Method to disable the progressBar within the ajax request * * @param ajaxRequestTarget */ public void disable(AjaxRequestTarget ajaxRequestTarget) { ajaxRequestTarget.appendJavaScript(this.disable().render().toString()); } /** * Method to enable the progressBar * * @return the associated JsStatement */ public JsStatement enable() { return new JsQuery(this).$().chain("progressbar", "'enable'"); } /** * Method to enable the progressBar within the ajax request * * @param ajaxRequestTarget */ public void enable(AjaxRequestTarget ajaxRequestTarget) { ajaxRequestTarget.appendJavaScript(this.enable().render().toString()); } /** * Method to get the current value of the progressBar * * @return the associated JsStatement */ public JsStatement value() { return new JsQuery(this).$().chain("progressbar", "'value'"); } /** * Method to set the current value of the progressBar * * @param value * @return the associated JsStatement */ public JsStatement value(int value) { return new JsQuery(this).$().chain("progressbar", "'value'", Integer.toString(value)); } /** * Method to set the current value of the progressBar within the ajax request * * @param ajaxRequestTarget * @param value */ public void value(AjaxRequestTarget ajaxRequestTarget, int value) { ajaxRequestTarget.appendJavaScript(this.value(value).render().toString()); } /*---- wiQuery Methods section ---*/ /** * Method to increment the value of the progressBar * * @return the associated JsStatement */ public JsStatement increment() { return increment(1); } /** * Method to increment the value of the progressBar * * @param increment * The increment to add to the current value * @return the associated JsStatement */ public JsStatement increment(int increment) { JsStatement statement = new JsStatement(); statement.append(new JsQuery(this) .$() .chain( "progressbar", "'value'", new JsQuery(this).$().chain("progressbar", "'value'").render(false) + " + " + increment).render()); return statement; } /** * Method to increment the value of the progressBar within the ajax request * * @param ajaxRequestTarget */ public void increment(AjaxRequestTarget ajaxRequestTarget) { ajaxRequestTarget.appendJavaScript(this.increment().render().toString()); } /** * Method to increment the value of the progressBar within the ajax request * * @param ajaxRequestTarget * @param increment * The increment to add to the current value */ public void increment(AjaxRequestTarget ajaxRequestTarget, int increment) { ajaxRequestTarget.appendJavaScript(this.increment(increment).render().toString()); } /** * Method to decrement the value of the progressBar * * @return the associated JsStatement */ public JsStatement decrement() { return decrement(1); } /** * Method to decrement the value of the progressBar * * @param decrement * The decrement to add to the current value * @return the associated JsStatement */ public JsStatement decrement(int decrement) { JsStatement statement = new JsStatement(); statement.append(new JsQuery(this) .$() .chain( "progressbar", "'value'", new JsQuery(this).$().chain("progressbar", "'value'").render(false) + " - " + decrement).render()); return statement; } /** * Method to decrement the value of the progressBar within the ajax request * * @param ajaxRequestTarget */ public void decrement(AjaxRequestTarget ajaxRequestTarget) { ajaxRequestTarget.appendJavaScript(this.decrement().render().toString()); } /** * Method to decrement the value of the progressBar within the ajax request * * @param ajaxRequestTarget * @param decrement * The decrement to add to the current value */ public void decrement(AjaxRequestTarget ajaxRequestTarget, int decrement) { ajaxRequestTarget.appendJavaScript(this.decrement(decrement).render().toString()); } /** * Method to returns the .ui-progressbar element * * @return the associated JsStatement */ public JsStatement widget() { return new JsQuery(this).$().chain("progressbar", "'widget'"); } /** * Method to returns the .ui-progressbar element within the ajax request * * @param ajaxRequestTarget */ public void widget(AjaxRequestTarget ajaxRequestTarget) { ajaxRequestTarget.appendJavaScript(this.widget().render().toString()); } }
google-code-export/wiquery
wiquery-jquery-ui/src/main/java/org/odlabs/wiquery/ui/progressbar/ProgressBar.java
Java
mit
9,828
// Copyright 2016 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package models import ( "fmt" "strings" "testing" . "github.com/smartystreets/goconvey/convey" "github.com/gogits/gogs/pkg/setting" ) func init() { setting.NewContext() } func Test_SSHParsePublicKey(t *testing.T) { testKeys := map[string]struct { typeName string length int content string }{ "dsa-1024": {"dsa", 1024, "ssh-dss AAAAB3NzaC1kc3MAAACBAOChCC7lf6Uo9n7BmZ6M8St19PZf4Tn59NriyboW2x/DZuYAz3ibZ2OkQ3S0SqDIa0HXSEJ1zaExQdmbO+Ux/wsytWZmCczWOVsaszBZSl90q8UnWlSH6P+/YA+RWJm5SFtuV9PtGIhyZgoNuz5kBQ7K139wuQsecdKktISwTakzAAAAFQCzKsO2JhNKlL+wwwLGOcLffoAmkwAAAIBpK7/3xvduajLBD/9vASqBQIHrgK2J+wiQnIb/Wzy0UsVmvfn8A+udRbBo+csM8xrSnlnlJnjkJS3qiM5g+eTwsLIV1IdKPEwmwB+VcP53Cw6lSyWyJcvhFb0N6s08NZysLzvj0N+ZC/FnhKTLzIyMtkHf/IrPCwlM+pV/M/96YgAAAIEAqQcGn9CKgzgPaguIZooTAOQdvBLMI5y0bQjOW6734XOpqQGf/Kra90wpoasLKZjSYKNPjE+FRUOrStLrxcNs4BeVKhy2PYTRnybfYVk1/dmKgH6P1YSRONsGKvTsH6c5IyCRG0ncCgYeF8tXppyd642982daopE7zQ/NPAnJfag= nocomment"}, "rsa-1024": {"rsa", 1024, "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDAu7tvIvX6ZHrRXuZNfkR3XLHSsuCK9Zn3X58lxBcQzuo5xZgB6vRwwm/QtJuF+zZPtY5hsQILBLmF+BZ5WpKZp1jBeSjH2G7lxet9kbcH+kIVj0tPFEoyKI9wvWqIwC4prx/WVk2wLTJjzBAhyNxfEq7C9CeiX9pQEbEqJfkKCQ== nocomment\n"}, "rsa-2048": {"rsa", 2048, "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDMZXh+1OBUwSH9D45wTaxErQIN9IoC9xl7MKJkqvTvv6O5RR9YW/IK9FbfjXgXsppYGhsCZo1hFOOsXHMnfOORqu/xMDx4yPuyvKpw4LePEcg4TDipaDFuxbWOqc/BUZRZcXu41QAWfDLrInwsltWZHSeG7hjhpacl4FrVv9V1pS6Oc5Q1NxxEzTzuNLS/8diZrTm/YAQQ/+B+mzWI3zEtF4miZjjAljWd1LTBPvU23d29DcBmmFahcZ441XZsTeAwGxG/Q6j8NgNXj9WxMeWwxXV2jeAX/EBSpZrCVlCQ1yJswT6xCp8TuBnTiGWYMBNTbOZvPC4e0WI2/yZW/s5F nocomment"}, "ecdsa-256": {"ecdsa", 256, "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFQacN3PrOll7PXmN5B/ZNVahiUIqI05nbBlZk1KXsO3d06ktAWqbNflv2vEmA38bTFTfJ2sbn2B5ksT52cDDbA= nocomment"}, "ecdsa-384": {"ecdsa", 384, "ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBINmioV+XRX1Fm9Qk2ehHXJ2tfVxW30ypUWZw670Zyq5GQfBAH6xjygRsJ5wWsHXBsGYgFUXIHvMKVAG1tpw7s6ax9oA+dJOJ7tj+vhn8joFqT+sg3LYHgZkHrfqryRasQ== nocomment"}, // "ecdsa-521": {"ecdsa", 521, "ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBACGt3UG3EzRwNOI17QR84l6PgiAcvCE7v6aXPj/SC6UWKg4EL8vW9ZBcdYL9wzs4FZXh4MOV8jAzu3KRWNTwb4k2wFNUpGOt7l28MztFFEtH5BDDrtAJSPENPy8pvPLMfnPg5NhvWycqIBzNcHipem5wSJFN5PdpNOC2xMrPWKNqj+ZjQ== nocomment"}, } Convey("Parse public keys in both native and ssh-keygen", t, func() { for name, key := range testKeys { fmt.Println("\nTesting key:", name) keyTypeN, lengthN, errN := SSHNativeParsePublicKey(key.content) So(errN, ShouldBeNil) So(keyTypeN, ShouldEqual, key.typeName) So(lengthN, ShouldEqual, key.length) keyTypeK, lengthK, errK := SSHKeyGenParsePublicKey(key.content) if errK != nil { // Some server just does not support ecdsa format. if strings.Contains(errK.Error(), "line 1 too long:") { continue } So(errK, ShouldBeNil) } So(keyTypeK, ShouldEqual, key.typeName) So(lengthK, ShouldEqual, key.length) } }) }
xaionaro/gogs
models/ssh_key_test.go
GO
mit
3,217
#include <vector> #include <stdio.h> #include <cstring> #include <glm.hpp> #include "objloader.hpp" #pragma warning(disable:4996) bool loadOBJ( const char * path, std::vector<glm::vec3> & out_vertices, std::vector<glm::vec3> & out_normals, std::vector<glm::vec2> & out_uvs) { std::vector<unsigned int> vertexIndices, uvIndices, normalIndices; std::vector<glm::vec3> temp_vertices; std::vector<glm::vec2> temp_uvs; std::vector<glm::vec3> temp_normals; FILE * file = fopen(path, "r"); if (file == NULL) { printf("Impossible to open the file ! Are you in the right path ?\n"); getchar(); return false; } while (1) { char lineHeader[128]; // read the first word of the line int res = fscanf(file, "%s", lineHeader); if (res == EOF) break; // EOF = End Of File. Quit the loop. // else : parse lineHeader if (strcmp(lineHeader, "v") == 0) { glm::vec3 vertex; fscanf(file, "%f %f %f\n", &vertex.x, &vertex.y, &vertex.z); temp_vertices.push_back(vertex); } else if (strcmp(lineHeader, "vt") == 0) { glm::vec2 uv; fscanf(file, "%f %f\n", &uv.x, &uv.y); uv.y = -uv.y; // Invert V coordinate since we will only use DDS texture, which are inverted. Remove if you want to use TGA or BMP loaders. temp_uvs.push_back(uv); } else if (strcmp(lineHeader, "vn") == 0) { glm::vec3 normal; fscanf(file, "%f %f %f\n", &normal.x, &normal.y, &normal.z); temp_normals.push_back(normal); } else if (strcmp(lineHeader, "f") == 0) { std::string vertex1, vertex2, vertex3; unsigned int vertexIndex[3], uvIndex[3], normalIndex[3]; int matches = fscanf(file, "%d/%d/%d %d/%d/%d %d/%d/%d\n", &vertexIndex[0], &uvIndex[0], &normalIndex[0], &vertexIndex[1], &uvIndex[1], &normalIndex[1], &vertexIndex[2], &uvIndex[2], &normalIndex[2]); if (matches != 9) { printf("File can't be read by our simple parser :-( Try exporting with other options\n"); return false; } vertexIndices.push_back(vertexIndex[0]); vertexIndices.push_back(vertexIndex[1]); vertexIndices.push_back(vertexIndex[2]); uvIndices.push_back(uvIndex[0]); uvIndices.push_back(uvIndex[1]); uvIndices.push_back(uvIndex[2]); normalIndices.push_back(normalIndex[0]); normalIndices.push_back(normalIndex[1]); normalIndices.push_back(normalIndex[2]); } else { // Probably a comment, eat up the rest of the line char stupidBuffer[1000]; fgets(stupidBuffer, 1000, file); } } // For each vertex of each triangle for (unsigned int i = 0; i<vertexIndices.size(); i++) { // Get the indices of its attributes unsigned int vertexIndex = vertexIndices[i]; unsigned int uvIndex = uvIndices[i]; unsigned int normalIndex = normalIndices[i]; // Get the attributes thanks to the index glm::vec3 vertex = temp_vertices[vertexIndex - 1]; glm::vec2 uv = temp_uvs[uvIndex - 1]; glm::vec3 normal = temp_normals[normalIndex - 1]; // Put the attributes in buffers out_vertices.push_back(vertex); out_uvs.push_back(uv); out_normals.push_back(normal); } return true; }
DanielPri/COMP371
Skeleton/COMP371_Skeleton_code-master/Skeleton/VS_Solution/COMP371/COMP371/objloader.cpp
C++
mit
3,059
// Copyright (c) 2001-2010 Hartmut Kaiser // Copyright (c) 2001-2010 Joel de Guzman // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #if !defined(BOOST_SPIRIT_PARSE_SEXPR_IMPL) #define BOOST_SPIRIT_PARSE_SEXPR_IMPL #include <iostream> #include <string> #include <boost/spirit/include/support_istream_iterator.hpp> #include <boost/spirit/include/support_line_pos_iterator.hpp> #include <boost/spirit/include/qi_parse.hpp> #include <input/sexpr.hpp> #include <input/parse_sexpr.hpp> namespace scheme { namespace input { /////////////////////////////////////////////////////////////////////////// template <typename Char> bool parse_sexpr( std::basic_istream<Char>& is, utree& result, std::string const& source_file) { // no white space skipping in the stream! is.unsetf(std::ios::skipws); typedef boost::spirit::basic_istream_iterator<Char> stream_iterator_type; stream_iterator_type sfirst(is); stream_iterator_type slast; typedef boost::spirit::line_pos_iterator<stream_iterator_type> iterator_type; iterator_type first(sfirst); iterator_type last(slast); scheme::input::sexpr<iterator_type> p(source_file); scheme::input::sexpr_white_space<iterator_type> ws; using boost::spirit::qi::phrase_parse; return phrase_parse(first, last, p, ws, result); } /////////////////////////////////////////////////////////////////////////// template <typename Char> bool parse_sexpr_list( std::basic_istream<Char>& is, utree& result, std::string const& source_file) { // no white space skipping in the stream! is.unsetf(std::ios::skipws); typedef boost::spirit::basic_istream_iterator<Char> stream_iterator_type; stream_iterator_type sfirst(is); stream_iterator_type slast; typedef boost::spirit::line_pos_iterator<stream_iterator_type> iterator_type; iterator_type first(sfirst); iterator_type last(slast); scheme::input::sexpr<iterator_type> p(source_file); scheme::input::sexpr_white_space<iterator_type> ws; using boost::spirit::qi::phrase_parse; bool ok = phrase_parse(first, last, +p, ws, result); result.tag(1); // line return ok; } /////////////////////////////////////////////////////////////////////////// template <typename Range> typename boost::disable_if<boost::is_base_of<std::ios_base, Range>, bool>::type parse_sexpr( Range const& rng, utree& result, std::string const& source_file) { typedef boost::spirit::line_pos_iterator<typename Range::const_iterator> iterator_type; scheme::input::sexpr<iterator_type> p(source_file); scheme::input::sexpr_white_space<iterator_type> ws; iterator_type first(rng.begin()); iterator_type last(rng.end()); using boost::spirit::qi::phrase_parse; return phrase_parse(first, last, p, ws, result); } template <typename Range> typename boost::disable_if<boost::is_base_of<std::ios_base, Range>, bool>::type parse_sexpr_list( Range const& rng, utree& result, std::string const& source_file) { typedef boost::spirit::line_pos_iterator<typename Range::const_iterator> iterator_type; scheme::input::sexpr<iterator_type> p(source_file); scheme::input::sexpr_white_space<iterator_type> ws; iterator_type first(rng.begin()); iterator_type last(rng.end()); using boost::spirit::qi::phrase_parse; bool ok = phrase_parse(first, last, +p, ws, result); result.tag(1); // line return ok; } /////////////////////////////////////////////////////////////////////////// bool parse_sexpr( utree const& in, utree& result, std::string const& source_file) { return parse_sexpr(in.get<utf8_string_range_type>(), result, source_file); } bool parse_sexpr_list( utree const& in, utree& result, std::string const& source_file) { return parse_sexpr_list(in.get<utf8_string_range_type>(), result, source_file); } }} #endif
djsedulous/namecoind
libs/boost_1_50_0/libs/spirit/example/scheme/input/parse_sexpr_impl.hpp
C++
mit
4,452
/** * Module dependencies. */ var express = require('express'); /** * Initialize middleware. */ module.exports = function() { this.use(express.urlencoded()); this.use(express.json()); this.use(this.router); this.use(express.errorHandler()); }
bosgood/electrolyte-examples
express/etc/init/02_middleware.js
JavaScript
mit
262
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2013, OpenCV Foundation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the OpenCV Foundation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #ifndef __OPENCV_OPTIM_HPP__ #define __OPENCV_OPTIM_HPP__ #include "opencv2/core.hpp" namespace cv { /** @addtogroup core_optim The algorithms in this section minimize or maximize function value within specified constraints or without any constraints. @{ */ /** @brief Basic interface for all solvers */ class CV_EXPORTS MinProblemSolver : public Algorithm { public: /** @brief Represents function being optimized */ class CV_EXPORTS Function { public: virtual ~Function() {} virtual double calc(const double* x) const = 0; virtual void getGradient(const double* /*x*/,double* /*grad*/) {} }; /** @brief Getter for the optimized function. The optimized function is represented by Function interface, which requires derivatives to implement the sole method calc(double*) to evaluate the function. @return Smart-pointer to an object that implements Function interface - it represents the function that is being optimized. It can be empty, if no function was given so far. */ virtual Ptr<Function> getFunction() const = 0; /** @brief Setter for the optimized function. *It should be called at least once before the call to* minimize(), as default value is not usable. @param f The new function to optimize. */ virtual void setFunction(const Ptr<Function>& f) = 0; /** @brief Getter for the previously set terminal criteria for this algorithm. @return Deep copy of the terminal criteria used at the moment. */ virtual TermCriteria getTermCriteria() const = 0; /** @brief Set terminal criteria for solver. This method *is not necessary* to be called before the first call to minimize(), as the default value is sensible. Algorithm stops when the number of function evaluations done exceeds termcrit.maxCount, when the function values at the vertices of simplex are within termcrit.epsilon range or simplex becomes so small that it can enclosed in a box with termcrit.epsilon sides, whatever comes first. @param termcrit Terminal criteria to be used, represented as cv::TermCriteria structure. */ virtual void setTermCriteria(const TermCriteria& termcrit) = 0; /** @brief actually runs the algorithm and performs the minimization. The sole input parameter determines the centroid of the starting simplex (roughly, it tells where to start), all the others (terminal criteria, initial step, function to be minimized) are supposed to be set via the setters before the call to this method or the default values (not always sensible) will be used. @param x The initial point, that will become a centroid of an initial simplex. After the algorithm will terminate, it will be setted to the point where the algorithm stops, the point of possible minimum. @return The value of a function at the point found. */ virtual double minimize(InputOutputArray x) = 0; }; /** @brief This class is used to perform the non-linear non-constrained minimization of a function, defined on an `n`-dimensional Euclidean space, using the **Nelder-Mead method**, also known as **downhill simplex method**. The basic idea about the method can be obtained from <http://en.wikipedia.org/wiki/Nelder-Mead_method>. It should be noted, that this method, although deterministic, is rather a heuristic and therefore may converge to a local minima, not necessary a global one. It is iterative optimization technique, which at each step uses an information about the values of a function evaluated only at `n+1` points, arranged as a *simplex* in `n`-dimensional space (hence the second name of the method). At each step new point is chosen to evaluate function at, obtained value is compared with previous ones and based on this information simplex changes it's shape , slowly moving to the local minimum. Thus this method is using *only* function values to make decision, on contrary to, say, Nonlinear Conjugate Gradient method (which is also implemented in optim). Algorithm stops when the number of function evaluations done exceeds termcrit.maxCount, when the function values at the vertices of simplex are within termcrit.epsilon range or simplex becomes so small that it can enclosed in a box with termcrit.epsilon sides, whatever comes first, for some defined by user positive integer termcrit.maxCount and positive non-integer termcrit.epsilon. @note DownhillSolver is a derivative of the abstract interface cv::MinProblemSolver, which in turn is derived from the Algorithm interface and is used to encapsulate the functionality, common to all non-linear optimization algorithms in the optim module. @note term criteria should meet following condition: @code termcrit.type == (TermCriteria::MAX_ITER + TermCriteria::EPS) && termcrit.epsilon > 0 && termcrit.maxCount > 0 @endcode */ class CV_EXPORTS DownhillSolver : public MinProblemSolver { public: /** @brief Returns the initial step that will be used in downhill simplex algorithm. @param step Initial step that will be used in algorithm. Note, that although corresponding setter accepts column-vectors as well as row-vectors, this method will return a row-vector. @see DownhillSolver::setInitStep */ virtual void getInitStep(OutputArray step) const=0; /** @brief Sets the initial step that will be used in downhill simplex algorithm. Step, together with initial point (givin in DownhillSolver::minimize) are two `n`-dimensional vectors that are used to determine the shape of initial simplex. Roughly said, initial point determines the position of a simplex (it will become simplex's centroid), while step determines the spread (size in each dimension) of a simplex. To be more precise, if \f$s,x_0\in\mathbb{R}^n\f$ are the initial step and initial point respectively, the vertices of a simplex will be: \f$v_0:=x_0-\frac{1}{2} s\f$ and \f$v_i:=x_0+s_i\f$ for \f$i=1,2,\dots,n\f$ where \f$s_i\f$ denotes projections of the initial step of *n*-th coordinate (the result of projection is treated to be vector given by \f$s_i:=e_i\cdot\left<e_i\cdot s\right>\f$, where \f$e_i\f$ form canonical basis) @param step Initial step that will be used in algorithm. Roughly said, it determines the spread (size in each dimension) of an initial simplex. */ virtual void setInitStep(InputArray step)=0; /** @brief This function returns the reference to the ready-to-use DownhillSolver object. All the parameters are optional, so this procedure can be called even without parameters at all. In this case, the default values will be used. As default value for terminal criteria are the only sensible ones, MinProblemSolver::setFunction() and DownhillSolver::setInitStep() should be called upon the obtained object, if the respective parameters were not given to create(). Otherwise, the two ways (give parameters to createDownhillSolver() or miss them out and call the MinProblemSolver::setFunction() and DownhillSolver::setInitStep()) are absolutely equivalent (and will drop the same errors in the same way, should invalid input be detected). @param f Pointer to the function that will be minimized, similarly to the one you submit via MinProblemSolver::setFunction. @param initStep Initial step, that will be used to construct the initial simplex, similarly to the one you submit via MinProblemSolver::setInitStep. @param termcrit Terminal criteria to the algorithm, similarly to the one you submit via MinProblemSolver::setTermCriteria. */ static Ptr<DownhillSolver> create(const Ptr<MinProblemSolver::Function>& f=Ptr<MinProblemSolver::Function>(), InputArray initStep=Mat_<double>(1,1,0.0), TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5000,0.000001)); }; /** @brief This class is used to perform the non-linear non-constrained minimization of a function with known gradient, defined on an *n*-dimensional Euclidean space, using the **Nonlinear Conjugate Gradient method**. The implementation was done based on the beautifully clear explanatory article [An Introduction to the Conjugate Gradient Method Without the Agonizing Pain](http://www.cs.cmu.edu/~quake-papers/painless-conjugate-gradient.pdf) by Jonathan Richard Shewchuk. The method can be seen as an adaptation of a standard Conjugate Gradient method (see, for example <http://en.wikipedia.org/wiki/Conjugate_gradient_method>) for numerically solving the systems of linear equations. It should be noted, that this method, although deterministic, is rather a heuristic method and therefore may converge to a local minima, not necessary a global one. What is even more disastrous, most of its behaviour is ruled by gradient, therefore it essentially cannot distinguish between local minima and maxima. Therefore, if it starts sufficiently near to the local maximum, it may converge to it. Another obvious restriction is that it should be possible to compute the gradient of a function at any point, thus it is preferable to have analytic expression for gradient and computational burden should be born by the user. The latter responsibility is accompilished via the getGradient method of a MinProblemSolver::Function interface (which represents function being optimized). This method takes point a point in *n*-dimensional space (first argument represents the array of coordinates of that point) and comput its gradient (it should be stored in the second argument as an array). @note class ConjGradSolver thus does not add any new methods to the basic MinProblemSolver interface. @note term criteria should meet following condition: @code termcrit.type == (TermCriteria::MAX_ITER + TermCriteria::EPS) && termcrit.epsilon > 0 && termcrit.maxCount > 0 // or termcrit.type == TermCriteria::MAX_ITER) && termcrit.maxCount > 0 @endcode */ class CV_EXPORTS ConjGradSolver : public MinProblemSolver { public: /** @brief This function returns the reference to the ready-to-use ConjGradSolver object. All the parameters are optional, so this procedure can be called even without parameters at all. In this case, the default values will be used. As default value for terminal criteria are the only sensible ones, MinProblemSolver::setFunction() should be called upon the obtained object, if the function was not given to create(). Otherwise, the two ways (submit it to create() or miss it out and call the MinProblemSolver::setFunction()) are absolutely equivalent (and will drop the same errors in the same way, should invalid input be detected). @param f Pointer to the function that will be minimized, similarly to the one you submit via MinProblemSolver::setFunction. @param termcrit Terminal criteria to the algorithm, similarly to the one you submit via MinProblemSolver::setTermCriteria. */ static Ptr<ConjGradSolver> create(const Ptr<MinProblemSolver::Function>& f=Ptr<ConjGradSolver::Function>(), TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5000,0.000001)); }; //! return codes for cv::solveLP() function enum SolveLPResult { SOLVELP_UNBOUNDED = -2, //!< problem is unbounded (target function can achieve arbitrary high values) SOLVELP_UNFEASIBLE = -1, //!< problem is unfeasible (there are no points that satisfy all the constraints imposed) SOLVELP_SINGLE = 0, //!< there is only one maximum for target function SOLVELP_MULTI = 1 //!< there are multiple maxima for target function - the arbitrary one is returned }; /** @brief Solve given (non-integer) linear programming problem using the Simplex Algorithm (Simplex Method). What we mean here by "linear programming problem" (or LP problem, for short) can be formulated as: \f[\mbox{Maximize } c\cdot x\\ \mbox{Subject to:}\\ Ax\leq b\\ x\geq 0\f] Where \f$c\f$ is fixed `1`-by-`n` row-vector, \f$A\f$ is fixed `m`-by-`n` matrix, \f$b\f$ is fixed `m`-by-`1` column vector and \f$x\f$ is an arbitrary `n`-by-`1` column vector, which satisfies the constraints. Simplex algorithm is one of many algorithms that are designed to handle this sort of problems efficiently. Although it is not optimal in theoretical sense (there exist algorithms that can solve any problem written as above in polynomial type, while simplex method degenerates to exponential time for some special cases), it is well-studied, easy to implement and is shown to work well for real-life purposes. The particular implementation is taken almost verbatim from **Introduction to Algorithms, third edition** by T. H. Cormen, C. E. Leiserson, R. L. Rivest and Clifford Stein. In particular, the Bland's rule <http://en.wikipedia.org/wiki/Bland%27s_rule> is used to prevent cycling. @param Func This row-vector corresponds to \f$c\f$ in the LP problem formulation (see above). It should contain 32- or 64-bit floating point numbers. As a convenience, column-vector may be also submitted, in the latter case it is understood to correspond to \f$c^T\f$. @param Constr `m`-by-`n+1` matrix, whose rightmost column corresponds to \f$b\f$ in formulation above and the remaining to \f$A\f$. It should containt 32- or 64-bit floating point numbers. @param z The solution will be returned here as a column-vector - it corresponds to \f$c\f$ in the formulation above. It will contain 64-bit floating point numbers. @return One of cv::SolveLPResult */ CV_EXPORTS_W int solveLP(const Mat& Func, const Mat& Constr, Mat& z); //! @} }// cv #endif
chliam/OpenCV_IOS_3.0RC1
opencv2.framework/Versions/A/Headers/core/optim.hpp
C++
mit
15,775
// Generated by CoffeeScript 1.3.3 (function() { var Path, createApiTree, extensions, fs, __indexOf = [].indexOf || function(item) { for (var i = 0, l = this.length; i < l; i++) { if (i in this && this[i] === item) return i; } return -1; }, __hasProp = {}.hasOwnProperty; fs = require('fs'); Path = require('path'); extensions = function() { var k, v, _ref, _results; if (typeof require !== "undefined" && require !== null ? require.extensions : void 0) { _ref = require.extensions; _results = []; for (k in _ref) { v = _ref[k]; if (k !== '.json') { _results.push(k); } } return _results; } else { return ['.coffee']; } }; exports.createApiTree = createApiTree = function(directory, options) { var child, item, k, key, name, names, node, tree, v, _i, _len; if (options == null) { options = {}; } options.loadItem || (options.loadItem = require); options.nameToKey || (options.nameToKey = function(name) { return name.split('.')[0].replace(/_*\W+_*/g, '_'); }); options.readdirSync || (options.readdirSync = function(path) { return fs.readdirSync(path); }); options.isDirectory || (options.isDirectory = function(path) { return fs.lstatSync(path).isDirectory(); }); options.filter || (options.filter = function(name, names) { var ext, _ref; ext = Path.extname(name); return ext === '.js' || (__indexOf.call(extensions(), ext) >= 0 && !(_ref = Path.basename(name, ext).concat('.js'), __indexOf.call(names, _ref) >= 0)); }); tree = {}; names = options.readdirSync(directory); for (_i = 0, _len = names.length; _i < _len; _i++) { name = names[_i]; if (name.match(/^[._#]|[#~]$/)) { continue; } child = Path.join(directory, name); key = options.nameToKey(name); item = options.isDirectory(child) ? createApiTree(child, options) : options.filter(name, names) ? options.loadItem(child) : void 0; if (item && Object.keys(item).length) { node = (tree[key] || (tree[key] = {})); for (k in item) { if (!__hasProp.call(item, k)) continue; v = item[k]; if (node[k] != null) { throw new Error("API tree name conflict for '" + k + "' in " + child); } node[k] = v; } } } return tree; }; }).call(this);
rybon/Remocial
node_modules/ss-angular/node_modules/apitree/lib/apitree.js
JavaScript
mit
2,451
class GetterWithDollar1 { @lombok.Getter int $i; GetterWithDollar1() { super(); } public @java.lang.SuppressWarnings("all") int get$i() { return this.$i; } } class GetterWithDollar2 { @lombok.Getter int $i; @lombok.Getter int i; GetterWithDollar2() { super(); } public @java.lang.SuppressWarnings("all") int get$i() { return this.$i; } public @java.lang.SuppressWarnings("all") int getI() { return this.i; } }
domix/lombok
test/transform/resource/after-ecj/GetterWithDollar.java
Java
mit
456
YUI.add('gallery-plugin-node-io', function(Y) { /** * Node IO provides a simple interface to load text into a node * * @class NodeIo * @extends Base * @version 1.1.0 */ var YL = Y.Lang; Y.Plugin.NodeIo = Y.Base.create('node-io', Y.Base, [], { /////// P U B L I C ////// /** * Set up ioHandler and bind events * @since 1.1.0 * @method initializer */ initializer : function(){ this.publish('success', {defaultFn: this._defSuccessFn }); this.after('uriChange', this._afterUriChange); this._ioHandlers = { complete: Y.bind(this._handleResponse, this, 'complete'), success: Y.bind(this._handleResponse, this, 'success'), failure: Y.bind(this._handleResponse, this, 'failure'), end: Y.bind(this._handleResponse, this, 'end') }; }, /** * Set uri and start io * @since 1.0.0 * @method load * @chainable * @return {NodeIo} A reference to this object */ load : function(uri) { var config = this.get('ioConfig'); if(!uri) { uri = this.get('uri'); }else{ this.set('uri', uri); } config.on = this._ioHandlers; this._io = Y.io(uri, config); return this; }, /** * Sugar method to refresh the content * Not recommended if placement is not `replace` * @since 1.0.0 * @method refresh * @chainable * @return {NodeIo} A reference to this object */ refresh : function(){ return this.load(); }, /** * Stops any current io * @since 1.0.0 * @method abort * @chainable * @return {NodeIo} A reference to this object */ abort : function() { this._stopIO(); return this; }, ////// P R O T E C T E D ////// /** * Local storage of the internal Y.io * @since 1.0.0 * @protected */ _io: null, /** * Object used to set the on of the _io * @since 1.1.0 * @protected */ _ioHandlers: null, /** * Aborts any current io * @since 1.0.0 * @method _stopIO * @protected */ _stopIO : function() { if(this._io) { this._io.abort(); this._io = null; } }, /** * Single interface for io responses * @since 1.1.0 * @method _handleResponse * @protected */ _handleResponse : function (type, id, o) { this.fire(type, {id: id, response: o}); this._io = null; }, /** * Default onSuccess method for io * Inserts response text into the host by placement * @since 1.1.0 * @method _defSuccessFn * @protected */ _defSuccessFn : function(e) { this.get('host').insert(e.response.responseText, this.get('placement')); }, /** * Aborts any io when the uri is changed * @since 1.1.0 * @method _afterUriChange * @protected */ _afterUriChange : function() { this._stopIO(); } }, { NS : 'io', ATTRS : { /** * Stores host node * @since 1.0.0 * @attribute host * @type Y.Plugin.Host */ host : { writeOnce : true }, /** * Allows for advanced io configuration * @since 1.0.0 * @attribute ioConfig * @type object * @default {} */ ioConfig : { value : {}, validator : YL.isObject }, /** * Placement of responseText * @since 1.0.0 * @attribute placement * @type string * @defautl replace */ placement : { value : 'replace', validator : function(val) { return (/replace|(?:ap|pre)pend/).test(val); } }, /** * Specifies the URI for the io * @since 1.0.0 * @attribute uri * @type string */ uri : { validator : YL.isString } } }); }, 'gallery-2010.09.08-19-45' ,{requires:['plugin','node-base','node-pluginhost','io-base','base-build']});
inikoo/fact
libs/yui/yui3-gallery/build/gallery-plugin-node-io/gallery-plugin-node-io.js
JavaScript
mit
4,086
/** * @license * Copyright Google LLC All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ /// <reference types="node" /> import * as p from 'path'; import {AbsoluteFsPath, PathSegment, PathString} from '../../src/types'; import {MockFileSystem} from './mock_file_system'; export class MockFileSystemPosix extends MockFileSystem { resolve(...paths: string[]): AbsoluteFsPath { const resolved = p.posix.resolve(this.pwd(), ...paths); return this.normalize(resolved) as AbsoluteFsPath; } dirname<T extends string>(file: T): T { return this.normalize(p.posix.dirname(file)) as T; } join<T extends string>(basePath: T, ...paths: string[]): T { return this.normalize(p.posix.join(basePath, ...paths)) as T; } relative<T extends PathString>(from: T, to: T): PathSegment|AbsoluteFsPath { return this.normalize(p.posix.relative(from, to)) as PathSegment | AbsoluteFsPath; } basename(filePath: string, extension?: string): PathSegment { return p.posix.basename(filePath, extension) as PathSegment; } isRooted(path: string): boolean { return path.startsWith('/'); } protected splitPath<T extends PathString>(path: T): string[] { return path.split('/'); } normalize<T extends PathString>(path: T): T { return path.replace(/^[a-z]:\//i, '/').replace(/\\/g, '/') as T; } }
mgechev/angular
packages/compiler-cli/src/ngtsc/file_system/testing/src/mock_file_system_posix.ts
TypeScript
mit
1,448
// Copyright 2012-2019 Oliver Eilhard. All rights reserved. // Use of this source code is governed by a MIT-license. // See http://olivere.mit-license.org/license.txt for details. package elastic import ( "context" "encoding/json" "fmt" "net/http" "net/url" "strings" "github.com/olivere/elastic/v7/uritemplates" ) // XPackSecurityGetUserService retrieves a user by its name. // See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/security-api-get-user.html. type XPackSecurityGetUserService struct { client *Client pretty *bool // pretty format the returned JSON response human *bool // return human readable values for statistics errorTrace *bool // include the stack trace of returned errors filterPath []string // list of filters used to reduce the response headers http.Header // custom request-level HTTP headers usernames []string } // NewXPackSecurityGetUserService creates a new XPackSecurityGetUserService. func NewXPackSecurityGetUserService(client *Client) *XPackSecurityGetUserService { return &XPackSecurityGetUserService{ client: client, } } // Pretty indicates that the JSON response be indented and human readable. func (s *XPackSecurityGetUserService) Pretty(pretty bool) *XPackSecurityGetUserService { s.pretty = &pretty return s } // Human specifies whether human readable values should be returned in // the JSON response, e.g. "7.5mb". func (s *XPackSecurityGetUserService) Human(human bool) *XPackSecurityGetUserService { s.human = &human return s } // ErrorTrace specifies whether to include the stack trace of returned errors. func (s *XPackSecurityGetUserService) ErrorTrace(errorTrace bool) *XPackSecurityGetUserService { s.errorTrace = &errorTrace return s } // FilterPath specifies a list of filters used to reduce the response. func (s *XPackSecurityGetUserService) FilterPath(filterPath ...string) *XPackSecurityGetUserService { s.filterPath = filterPath return s } // Header adds a header to the request. func (s *XPackSecurityGetUserService) Header(name string, value string) *XPackSecurityGetUserService { if s.headers == nil { s.headers = http.Header{} } s.headers.Add(name, value) return s } // Headers specifies the headers of the request. func (s *XPackSecurityGetUserService) Headers(headers http.Header) *XPackSecurityGetUserService { s.headers = headers return s } // Usernames are the names of one or more users to retrieve. func (s *XPackSecurityGetUserService) Usernames(usernames ...string) *XPackSecurityGetUserService { for _, username := range usernames { if v := strings.TrimSpace(username); v != "" { s.usernames = append(s.usernames, v) } } return s } // buildURL builds the URL for the operation. func (s *XPackSecurityGetUserService) buildURL() (string, url.Values, error) { // Build URL var ( path string err error ) if len(s.usernames) > 0 { path, err = uritemplates.Expand("/_security/user/{username}", map[string]string{ "username": strings.Join(s.usernames, ","), }) } else { path = "/_security/user" } if err != nil { return "", url.Values{}, err } // Add query string parameters params := url.Values{} if v := s.pretty; v != nil { params.Set("pretty", fmt.Sprint(*v)) } if v := s.human; v != nil { params.Set("human", fmt.Sprint(*v)) } if v := s.errorTrace; v != nil { params.Set("error_trace", fmt.Sprint(*v)) } if len(s.filterPath) > 0 { params.Set("filter_path", strings.Join(s.filterPath, ",")) } return path, params, nil } // Validate checks if the operation is valid. func (s *XPackSecurityGetUserService) Validate() error { return nil } // Do executes the operation. func (s *XPackSecurityGetUserService) Do(ctx context.Context) (*XPackSecurityGetUserResponse, error) { // Check pre-conditions if err := s.Validate(); err != nil { return nil, err } // Get URL for request path, params, err := s.buildURL() if err != nil { return nil, err } // Get HTTP response res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ Method: "GET", Path: path, Params: params, Headers: s.headers, }) if err != nil { return nil, err } // Return operation response ret := XPackSecurityGetUserResponse{} if err := json.Unmarshal(res.Body, &ret); err != nil { return nil, err } return &ret, nil } // XPackSecurityGetUserResponse is the response of XPackSecurityGetUserService.Do. type XPackSecurityGetUserResponse map[string]XPackSecurityUser // XPackSecurityUser is the user object. // // The Java source for this struct is defined here: // https://github.com/elastic/elasticsearch/blob/7.3/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/User.java type XPackSecurityUser struct { Username string `json:"username"` Roles []string `json:"roles"` Fullname string `json:"full_name"` Email string `json:"email"` Metadata map[string]interface{} `json:"metadata"` Enabled bool `json:"enabled"` }
olivere/elastic
xpack_security_get_user.go
GO
mit
5,061
// * @param aaa bbb ccc ddd eee fff ggg //
general-language-syntax/GLS
test/integration/CommentDocTag/long parameter.ts
TypeScript
mit
46
package de.fhpotsdam.unfolding.marker; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import de.fhpotsdam.unfolding.UnfoldingMap; import de.fhpotsdam.unfolding.geo.Location; import de.fhpotsdam.unfolding.utils.GeoUtils; /** * A MultiMarker enables handling of multiple, logically grouped markers. Properties and display states are the same for * all its markers. * * A MultiMarker can consist of various sub-markers, even of different types. For instance, a MultiMarker could have * three polygon marker and one point marker. */ public class MultiMarker implements Marker { protected List<Marker> markers = new ArrayList<Marker>(); public HashMap<String, Object> properties; protected boolean selected; protected boolean hidden; protected String id; public String getId() { return id; } public void setId(String id) { this.id = id; } public void setMarkers(List<Marker> markers) { this.markers = markers; } public List<Marker> getMarkers() { return markers; } public void addMarkers(Marker... markers) { for (Marker marker : markers) { this.markers.add(marker); } } /** * Return center of all markers. * * * This uses marker.getLocation() which either returns single location, or centroid location (of shape marker), and * then combines it. TODO Check whether to use {@link GeoUtils#getCentroid(List)} instead. */ @Override public Location getLocation() { Location center = new Location(0, 0); for (Marker marker : markers) { center.add(marker.getLocation()); } center.div((float) markers.size()); return center; } @Override public void setLocation(float lat, float lng) { // TODO Auto-generated method stub } @Override public void setLocation(Location location) { // TODO Auto-generated method stub } /** * return distance between location and the (to the location) closest marker */ // REVISIT alternatively method could return distance to the of all markers // implement both in different methods? examples needed! @Override public double getDistanceTo(Location location) { double minDistance = Double.MAX_VALUE; for (Marker marker : markers) { double dist = marker.getDistanceTo(location); minDistance = dist < minDistance ? dist : minDistance; } return minDistance; } @Override public void setProperties(HashMap<String, Object> properties) { this.properties = properties; } @Override public Object setProperty(String key, Object value) { return properties.put(key, value); } @Override public HashMap<String, Object> getProperties() { return properties; } @Override public Object getProperty(String key) { return properties.get(key); } @Override public String getStringProperty(String key) { Object value = properties.get(key); if (value != null && value instanceof String) { return (String) value; } else { return null; } } @Override public Integer getIntegerProperty(String key) { Object value = properties.get(key); if (value != null && value instanceof Integer) { return (Integer) value; } else { return null; } } /** * Returns true if at least one marker is hit. */ @Override public boolean isInside(UnfoldingMap map, float checkX, float checkY) { boolean inside = false; for (Marker marker : markers) { inside |= marker.isInside(map, checkX, checkY); } return inside; } @Override public void draw(UnfoldingMap map) { for (Marker marker : markers) { marker.draw(map); } } /** * Sets the selected status of all its markers. */ @Override public void setSelected(boolean selected) { this.selected = selected; for (Marker marker : markers) { marker.setSelected(selected); } } /** * Indicates whether this multi marker is selected. This does not necessarily reflect the selected states of all its * markers (i.e. a marker of a MultiMarker can have a different selection status): */ @Override public boolean isSelected() { return selected; } @Override public void setHidden(boolean hidden) { this.hidden = hidden; for (Marker marker : markers) { marker.setHidden(hidden); } } @Override public boolean isHidden() { return hidden; } public void setColor(int color) { for (Marker marker : markers) { marker.setColor(color); } } @Override public void setStrokeColor(int color) { for (Marker marker : markers) { marker.setStrokeColor(color); } } @Override public void setStrokeWeight(int weight) { for (Marker marker : markers) { marker.setStrokeWeight(weight); } } }
ashr81/unfolding
src/de/fhpotsdam/unfolding/marker/MultiMarker.java
Java
mit
4,593
var validate, $i; var {{ schema | capitalize }}Validator = function(di) { $i = di; validate = $i.validate; return {}; }; module.exports = exports = {{ schema | capitalize }}Validator;
marcelomf/graojs
skeletons/bundle/Validator.js
JavaScript
mit
190
<?php /* * This file is part of the Symfony package. * * (c) Fabien Potencier <fabien@symfony.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Symfony\Component\Console\Command; use Symfony\Component\Console\Helper\DescriptorHelper; use Symfony\Component\Console\Input\InputArgument; use Symfony\Component\Console\Input\InputOption; use Symfony\Component\Console\Input\InputInterface; use Symfony\Component\Console\Output\OutputInterface; use Symfony\Component\Console\Input\InputDefinition; /** * ListCommand displays the list of all available commands for the application. * * @author Fabien Potencier <fabien@symfony.com> */ class ListCommand extends Command { /** * {@inheritdoc} */ protected function configure() { $this ->setName('list') ->setDefinition($this->createDefinition()) ->setDescription('Lists commands') ->setHelp(<<<EOF The <info>%command.name%</info> command lists all commands: <info>php %command.full_name%</info> You can also display the commands for a specific namespace: <info>php %command.full_name% test</info> You can also output the information in other formats by using the <comment>--format</comment> option: <info>php %command.full_name% --format=xml</info> It's also possible to get raw list of commands (useful for embedding command runner): <info>php %command.full_name% --raw</info> EOF ) ; } /** * {@inheritdoc} */ public function getNativeDefinition() { return $this->createDefinition(); } /** * {@inheritdoc} */ protected function execute(InputInterface $input, OutputInterface $output) { if ($input->getOption('xml')) { $input->setOption('format', 'xml'); } $helper = new DescriptorHelper(); $helper->describe($output, $this->getApplication(), $input->getOption('format'), $input->getOption('raw')); } /** * {@inheritdoc} */ private function createDefinition() { return new InputDefinition(array( new InputArgument('namespace', InputArgument::OPTIONAL, 'The namespace name'), new InputOption('xml', null, InputOption::VALUE_NONE, 'To output list as XML'), new InputOption('raw', null, InputOption::VALUE_NONE, 'To output raw command list'), new InputOption('format', null, InputOption::VALUE_REQUIRED, 'To output list in other formats'), )); } }
tjoskar/odot
vendor/symfony/console/Symfony/Component/Console/Command/ListCommand.php
PHP
mit
2,612
Tinytest.add('options cacheLimit - exceed', function(test) { var sm = new SubsManager({cacheLimit: 2}); sm._addSub(['posts']); sm._addSub(['comments']); sm._addSub(['singlePoint', 'one']); sm._applyCacheLimit(); test.equal(sm._cacheList.length, 2); var subsIds = sm._cacheList.map(function(sub) { return sub.args[0]; }); test.equal(subsIds, ['comments', 'singlePoint']); sm.clear(); }); Tinytest.add('options cacheLimit - not-exceed', function(test) { var sm = new SubsManager({cacheLimit: 10}); sm._addSub(['posts']); sm._addSub(['comments']); sm._addSub(['singlePoint', 'one']); sm._applyCacheLimit(); test.equal(sm._cacheList.length, 3); var subsIds = sm._cacheList.map(function(sub) { return sub.args[0]; }); test.equal(subsIds, ['posts', 'comments', 'singlePoint']); sm.clear(); }); Tinytest.addAsync('options expireIn - expired', function(test, done) { // expireIn 100 millis var sm = new SubsManager({cacheLimit: 20, expireIn: 1/60/10}); sm._addSub(['posts']); sm._addSub(['comments']); test.equal(sm._cacheList.length, 2); Meteor.call('wait', 200, function() { sm._applyExpirations(); test.equal(sm._cacheList.length, 0); sm.clear(); done(); }); }); Tinytest.addAsync('options expireIn - not expired', function(test, done) { // expireIn 2 minutes var sm = new SubsManager({cacheLimit: 20, expireIn: 2}); sm._addSub(['posts']); sm._addSub(['comments']); test.equal(sm._cacheList.length, 2); Meteor.call('wait', 200, function() { sm._applyExpirations(); test.equal(sm._cacheList.length, 2); sm.clear(); done(); }); });
parkerkimbell/subs-manager
tests/options.js
JavaScript
mit
1,642
<?php /* * This file is part of the Symfony package. * * (c) Fabien Potencier <fabien@symfony.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace AppBundle\DataFixtures\ORM; use AppBundle\Entity\Comment; use AppBundle\Entity\Post; use AppBundle\Entity\User; use Doctrine\Common\DataFixtures\AbstractFixture; use Doctrine\Common\Persistence\ObjectManager; use Symfony\Component\DependencyInjection\ContainerAwareInterface; use Symfony\Component\DependencyInjection\ContainerAwareTrait; /** * Defines the sample data to load in the database when running the unit and * functional tests. * * Execute this command to load the data: * * $ php bin/console doctrine:fixtures:load * * See http://symfony.com/doc/current/bundles/DoctrineFixturesBundle/index.html * * @author Ryan Weaver <weaverryan@gmail.com> * @author Javier Eguiluz <javier.eguiluz@gmail.com> */ class LoadFixtures extends AbstractFixture implements ContainerAwareInterface { use ContainerAwareTrait; /** * {@inheritdoc} */ public function load(ObjectManager $manager) { $this->loadUsers($manager); $this->loadPosts($manager); } private function loadUsers(ObjectManager $manager) { $passwordEncoder = $this->container->get('security.password_encoder'); $johnUser = new User(); $johnUser->setUsername('john_user'); $johnUser->setEmail('john_user@symfony.com'); $encodedPassword = $passwordEncoder->encodePassword($johnUser, 'kitten'); $johnUser->setPassword($encodedPassword); $manager->persist($johnUser); $this->addReference('john-user', $johnUser); $annaAdmin = new User(); $annaAdmin->setUsername('anna_admin'); $annaAdmin->setEmail('anna_admin@symfony.com'); $annaAdmin->setRoles(['ROLE_ADMIN']); $encodedPassword = $passwordEncoder->encodePassword($annaAdmin, 'kitten'); $annaAdmin->setPassword($encodedPassword); $manager->persist($annaAdmin); $this->addReference('anna-admin', $annaAdmin); $manager->flush(); } private function loadPosts(ObjectManager $manager) { foreach (range(1, 30) as $i) { $post = new Post(); $post->setTitle($this->getRandomPostTitle()); $post->setSummary($this->getRandomPostSummary()); $post->setSlug($this->container->get('slugger')->slugify($post->getTitle())); $post->setContent($this->getPostContent()); $post->setAuthor($this->getReference('anna-admin')); $post->setPublishedAt(new \DateTime('now - '.$i.'days')); foreach (range(1, 5) as $j) { $comment = new Comment(); $comment->setAuthor($this->getReference('john-user')); $comment->setPublishedAt(new \DateTime('now + '.($i + $j).'seconds')); $comment->setContent($this->getRandomCommentContent()); $comment->setPost($post); $manager->persist($comment); $post->addComment($comment); } $manager->persist($post); } $manager->flush(); } private function getPostContent() { return <<<'MARKDOWN' Lorem ipsum dolor sit amet consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et **dolore magna aliqua**: Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. * Ut enim ad minim veniam * Quis nostrud exercitation *ullamco laboris* * Nisi ut aliquip ex ea commodo consequat Praesent id fermentum lorem. Ut est lorem, fringilla at accumsan nec, euismod at nunc. Aenean mattis sollicitudin mattis. Nullam pulvinar vestibulum bibendum. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Fusce nulla purus, gravida ac interdum ut, blandit eget ex. Duis a luctus dolor. Integer auctor massa maximus nulla scelerisque accumsan. *Aliquam ac malesuada* ex. Pellentesque tortor magna, vulputate eu vulputate ut, venenatis ac lectus. Praesent ut lacinia sem. Mauris a lectus eget felis mollis feugiat. Quisque efficitur, mi ut semper pulvinar, urna urna blandit massa, eget tincidunt augue nulla vitae est. Ut posuere aliquet tincidunt. Aliquam erat volutpat. **Class aptent taciti** sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Morbi arcu orci, gravida eget aliquam eu, suscipit et ante. Morbi vulputate metus vel ipsum finibus, ut dapibus massa feugiat. Vestibulum vel lobortis libero. Sed tincidunt tellus et viverra scelerisque. Pellentesque tincidunt cursus felis. Sed in egestas erat. Aliquam pulvinar interdum massa, vel ullamcorper ante consectetur eu. Vestibulum lacinia ac enim vel placerat. Integer pulvinar magna nec dui malesuada, nec congue nisl dictum. Donec mollis nisl tortor, at congue erat consequat a. Nam tempus elit porta, blandit elit vel, viverra lorem. Sed sit amet tellus tincidunt, faucibus nisl in, aliquet libero. MARKDOWN; } private function getPhrases() { return [ 'Lorem ipsum dolor sit amet consectetur adipiscing elit', 'Pellentesque vitae velit ex', 'Mauris dapibus risus quis suscipit vulputate', 'Eros diam egestas libero eu vulputate risus', 'In hac habitasse platea dictumst', 'Morbi tempus commodo mattis', 'Ut suscipit posuere justo at vulputate', 'Ut eleifend mauris et risus ultrices egestas', 'Aliquam sodales odio id eleifend tristique', 'Urna nisl sollicitudin id varius orci quam id turpis', 'Nulla porta lobortis ligula vel egestas', 'Curabitur aliquam euismod dolor non ornare', 'Sed varius a risus eget aliquam', 'Nunc viverra elit ac laoreet suscipit', 'Pellentesque et sapien pulvinar consectetur', ]; } private function getRandomPostTitle() { $titles = $this->getPhrases(); return $titles[array_rand($titles)]; } private function getRandomPostSummary($maxLength = 255) { $phrases = $this->getPhrases(); $numPhrases = mt_rand(6, 12); shuffle($phrases); return substr(implode(' ', array_slice($phrases, 0, $numPhrases - 1)), 0, $maxLength); } private function getRandomCommentContent() { $phrases = $this->getPhrases(); $numPhrases = mt_rand(2, 15); shuffle($phrases); return implode(' ', array_slice($phrases, 0, $numPhrases - 1)); } }
hkbrain/test
src/AppBundle/DataFixtures/ORM/LoadFixtures.php
PHP
mit
6,826
<?php /** * Magento * * NOTICE OF LICENSE * * This source file is subject to the Open Software License (OSL 3.0) * that is bundled with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://opensource.org/licenses/osl-3.0.php * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@magento.com so we can send you a copy immediately. * * DISCLAIMER * * Do not edit or add to this file if you wish to upgrade Magento to newer * versions in the future. If you wish to customize Magento for your * needs please refer to http://www.magento.com for more information. * * @category Mage * @package Mage_Downloadable * @copyright Copyright (c) 2006-2016 X.commerce, Inc. and affiliates (http://www.magento.com) * @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0) */ /* @var $installer Mage_Catalog_Model_Resource_Eav_Mysql4_Setup */ $installer = $this; $installer->startSetup(); $installer->run(" DROP TABLE IF EXISTS `{$installer->getTable('catalog/product_index_price')}_downloadable_idx`; CREATE TABLE `{$installer->getTable('downloadable/product_price_indexer_idx')}` ( `entity_id` int(10) unsigned NOT NULL, `customer_group_id` smallint(5) unsigned NOT NULL, `website_id` smallint(5) unsigned NOT NULL, `min_price` decimal(12,4) default NULL, `max_price` decimal(12,4) default NULL, PRIMARY KEY (`entity_id`,`customer_group_id`,`website_id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE TABLE `{$installer->getTable('downloadable/product_price_indexer_tmp')}` ( `entity_id` int(10) unsigned NOT NULL, `customer_group_id` smallint(5) unsigned NOT NULL, `website_id` smallint(5) unsigned NOT NULL, `min_price` decimal(12,4) default NULL, `max_price` decimal(12,4) default NULL, PRIMARY KEY (`entity_id`,`customer_group_id`,`website_id`) ) ENGINE=MEMORY DEFAULT CHARSET=utf8; "); $installer->endSetup();
hansbonini/cloud9-magento
www/app/code/core/Mage/Downloadable/sql/downloadable_setup/mysql4-upgrade-1.4.0.0-1.4.0.1.php
PHP
mit
2,062
/******************************************************************************* * Copyright (c) 2015 * * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. *******************************************************************************/ package jsettlers.mapcreator.tools.shapes; import jsettlers.common.movable.EDirection; import jsettlers.common.position.ShortPoint2D; public class LineShape extends ShapeType { /** * Constructor */ public LineShape() { super("line"); } @Override public void setAffectedStatus(byte[][] fields, ShortPoint2D start, ShortPoint2D end) { ShortPoint2D current = start; if (shouldDrawAt(current)) { setFieldToMax(fields, current); } while (!current.equals(end)) { EDirection d = EDirection.getApproxDirection(current, end); current = d.getNextHexPoint(current); if (shouldDrawAt(current)) { setFieldToMax(fields, current); } } } protected boolean shouldDrawAt(ShortPoint2D current) { return true; } private static void setFieldToMax(byte[][] fields, ShortPoint2D current) { short x = current.x; short y = current.y; if (x < fields.length && x >= 0 && y >= 0 && y < fields[x].length) { fields[x][y] = Byte.MAX_VALUE; } } @Override public int getSize() { return 1; } }
Peter-Maximilian/settlers-remake
jsettlers.mapcreator/src/main/java/jsettlers/mapcreator/tools/shapes/LineShape.java
Java
mit
2,269
# -- # Copyright 2007 Nominet UK # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either tmexpress or implied. # See the License for the specific language governing permissions and # limitations under the License. # ++ require_relative 'spec_helper' require 'socket' # @TODO@ We also need a test server so we can control behaviour of server to test # different aspects of retry strategy. # Of course, with Ruby's limit of 256 open sockets per process, we'd need to run # the server in a different Ruby process. class TestResolver < Minitest::Test include Dnsruby Thread::abort_on_exception = true GOOD_DOMAIN_NAME = 'example.com' BAD_DOMAIN_NAME = 'dnsruby-test-of-bad-domain-name.blah' PORT = 42138 @@port = PORT def setup Dnsruby::Config.reset end def assert_valid_response(response) assert(response.kind_of?(Message), "Expected response to be a message but was a #{response.class}") end def assert_nil_response(response) assert(response.nil?, "Expected no response but got a #{response.class}:\n#{response}") end def assert_error_is_exception(error, error_class = Exception) assert(error.is_a?(error_class), "Expected error to be an #{error_class}, but was a #{error.class}:\n#{error}") end def assert_nil_error(error) assert(error.nil?, "Expected no error but got a #{error.class}:\n#{error}") end def test_send_message response = Resolver.new.send_message(Message.new("example.com", Types.A)) assert_valid_response(response) end def test_send_message_bang_noerror response, error = Resolver.new.send_message!(Message.new(GOOD_DOMAIN_NAME, Types.A)) assert_nil_error(error) assert_valid_response(response) end def test_send_message_bang_error message = Message.new(BAD_DOMAIN_NAME, Types.A) response, error = Resolver.new.send_message!(message) assert_nil_response(response) assert_error_is_exception(error) end def test_send_plain_message resolver = Resolver.new response, error = resolver.send_plain_message(Message.new("cnn.com")) assert_nil_error(error) assert_valid_response(response) m = Message.new(BAD_DOMAIN_NAME) m.header.rd = true response, error = resolver.send_plain_message(m) assert_valid_response(response) assert_error_is_exception(error, NXDomain) end def test_query response = Resolver.new.query("example.com") assert_valid_response(response) end def test_query_bang_noerror response, error = Resolver.new.query!(GOOD_DOMAIN_NAME) assert_nil_error(error) assert_valid_response(response) end def test_query_bang_error response, error = Resolver.new.query!(BAD_DOMAIN_NAME) assert_nil_response(response) assert_error_is_exception(error) end def test_query_async q = Queue.new Resolver.new.send_async(Message.new("example.com", Types.A),q,q) id, response, error = q.pop assert_equal(id, q, "Id wrong!") assert_valid_response(response) assert_nil_error(error) end def test_query_one_duff_server_one_good res = Resolver.new({:nameserver => ["8.8.8.8", "8.8.8.7"]}) res.retry_delay=1 q = Queue.new res.send_async(Message.new("example.com", Types.A),q,q) id, response, error = q.pop assert_equal(id, q, "Id wrong!") assert_valid_response(response) assert_nil_error(error) end # @TODO@ Implement!! But then, why would anyone want to do this? # def test_many_threaded_clients # assert(false, "IMPLEMENT!") # end def test_reverse_lookup m = Message.new("8.8.8.8", Types.PTR) r = Resolver.new q=Queue.new r.send_async(m,q,q) id,ret, error=q.pop assert(ret.kind_of?(Message)) no_pointer=true ret.each_answer do |answer| if (answer.type==Types.PTR) no_pointer=false assert(answer.domainname.to_s=~/google/) end end assert(!no_pointer) end # def test_bad_host # res = Resolver.new({:nameserver => "localhost"}) # res.retry_times=1 # res.retry_delay=0 # res.query_timeout = 1 # q = Queue.new # res.send_async(Message.new("example.com", Types.A), q, q) # id, m, err = q.pop # assert(id==q) # assert(m == nil) # assert(err.kind_of?(OtherResolvError) || err.kind_of?(IOError), "OtherResolvError or IOError expected : got #{err.class}") # end # def test_nxdomain resolver = Resolver.new q = Queue.new resolver .send_async(Message.new(BAD_DOMAIN_NAME, Types.A), q, 1) id, m, error = q.pop assert(id==1, "Id should have been 1 but was #{id}") assert(m.rcode == RCode.NXDOMAIN, "Expected NXDOMAIN but got #{m.rcode} instead.") assert_error_is_exception(error, NXDomain) end def test_timeouts # test timeout behaviour for different retry, retrans, total timeout etc. # Problem here is that many sockets will be created for queries which time out. # Run a query which will not respond, and check that the timeout works if (!RUBY_PLATFORM=~/darwin/) start=stop=0 retry_times = 3 retry_delay=1 packet_timeout=2 # Work out what time should be, then time it to check expected = ((2**(retry_times-1))*retry_delay) + packet_timeout begin res = Dnsruby::Resolver.new({:nameserver => "10.0.1.128"}) # res = Resolver.new({:nameserver => "213.248.199.17"}) res.packet_timeout=packet_timeout res.retry_times=retry_times res.retry_delay=retry_delay start=Time.now m = res.send_message(Message.new("a.t.dnsruby.validation-test-servers.nominet.org.uk", Types.A)) fail rescue ResolvTimeout stop=Time.now time = stop-start assert(time <= expected * 1.3 && time >= expected * 0.9, "Wrong time take, expected #{expected}, took #{time}") end end end def test_packet_timeout res = Dnsruby::Resolver.new({:nameserver => []}) # res = Resolver.new({:nameserver => "10.0.1.128"}) start=stop=0 retry_times = retry_delay = packet_timeout= 10 query_timeout=2 begin res.packet_timeout=packet_timeout res.retry_times=retry_times res.retry_delay=retry_delay res.query_timeout=query_timeout # Work out what time should be, then time it to check expected = query_timeout start=Time.now m = res.send_message(Message.new("a.t.dnsruby.validation-test-servers.nominet.org.uk", Types.A)) fail rescue Dnsruby::ResolvTimeout stop=Time.now time = stop-start assert(time <= expected * 1.3 && time >= expected * 0.9, "Wrong time take, expected #{expected}, took #{time}") end # end def test_queue_packet_timeout # if (!RUBY_PLATFORM=~/darwin/) res = Dnsruby::Resolver.new({:nameserver => "10.0.1.128"}) # bad = SingleResolver.new("localhost") res.add_server("localhost") expected = 2 res.query_timeout=expected q = Queue.new start = Time.now m = res.send_async(Message.new("a.t.dnsruby.validation-test-servers.nominet.org.uk", Types.A), q, q) id,ret,err = q.pop stop = Time.now assert(id=q) assert(ret==nil) assert(err.class == ResolvTimeout, "#{err.class}, #{err}") time = stop-start assert(time <= expected * 1.3 && time >= expected * 0.9, "Wrong time take, expected #{expected}, took #{time}") # end end def test_illegal_src_port # Also test all singleresolver ports ok # Try to set src_port to an illegal value - make sure error raised, and port OK res = Dnsruby::Resolver.new res.port = 56789 tests = [53, 387, 1265, 3210, 48619] tests.each do |bad_port| begin res.src_port = bad_port fail("bad port #{bad_port}") rescue end end assert(res.single_resolvers[0].src_port = 56789) end def test_add_src_port # Try setting and adding port ranges, and invalid ports, and 0. # Also test all singleresolver ports ok res = Resolver.new res.src_port = [56789,56790, 56793] assert(res.src_port == [56789,56790, 56793]) res.src_port = 56889..56891 assert(res.src_port == [56889,56890,56891]) res.add_src_port(60000..60002) assert(res.src_port == [56889,56890,56891,60000,60001,60002]) res.add_src_port([60004,60005]) assert(res.src_port == [56889,56890,56891,60000,60001,60002,60004,60005]) res.add_src_port(60006) assert(res.src_port == [56889,56890,56891,60000,60001,60002,60004,60005,60006]) # Now test invalid src_ports tests = [0, 53, [60007, 53], [60008, 0], 55..100] tests.each do |x| begin res.add_src_port(x) fail() rescue end end assert(res.src_port == [56889,56890,56891,60000,60001,60002,60004,60005,60006]) assert(res.single_resolvers[0].src_port == [56889,56890,56891,60000,60001,60002,60004,60005,60006]) end def test_eventtype_api # @TODO@ TEST THE Resolver::EventType interface! end end # Tests to see that query_raw handles send_plain_message's return values correctly. class TestRawQuery < Minitest::Test KEY_NAME = 'key-name' KEY = '0123456789' ALGO = 'hmac-md5' class CustomError < RuntimeError; end # Returns a new resolver whose send_plain_message method always returns # nil for the response, and a RuntimeError for the error. def resolver_returning_error resolver = Dnsruby::Resolver.new def resolver.send_plain_message(_message) [nil, CustomError.new] end resolver end # Returns a new resolver whose send_plain_message is overridden to return # :response_from_send_plain_message instead of a real Dnsruby::Message, # for easy comparison in the tests. def resolver_returning_response resolver = Dnsruby::Resolver.new def resolver.send_plain_message(_message) [:response_from_send_plain_message, nil] end resolver end # Test that when a strategy other than :raise or :return is passed, # an ArgumentError is raised. def test_bad_strategy assert_raises(ArgumentError) do resolver_returning_error.query_raw(Dnsruby::Message.new, :invalid_strategy) end end # Test that when send_plain_message returns an error, # and the error strategy is :raise, query_raw raises an error. def test_raise_error assert_raises(CustomError) do resolver_returning_error.query_raw(Dnsruby::Message.new, :raise) end end # Tests that if you don't specify an error strategy, an error will be # returned rather than raised (i.e. strategy defaults to :return). def test_return_error_is_default _response, error = resolver_returning_error.query_raw(Dnsruby::Message.new) assert error.is_a?(CustomError) end # Tests that when no error is returned, no error is raised. def test_raise_no_error response, _error = resolver_returning_response.query_raw(Dnsruby::Message.new, :raise) assert_equal :response_from_send_plain_message, response end # Test that when send_plain_message returns an error, and the error strategy # is set to :return, then an error is returned. def test_return_error _response, error = resolver_returning_error.query_raw(Dnsruby::Message.new, :return) assert error.is_a?(CustomError) end # Test that when send_plain_message returns a valid and response # and nil error, the same are returned by query_raw. def test_return_no_error response, error = resolver_returning_response.query_raw(Dnsruby::Message.new, :return) assert_nil error assert_equal :response_from_send_plain_message, response end def test_2_args_init options = Dnsruby::Resolver.create_tsig_options(KEY_NAME, KEY) assert_equal KEY_NAME, options[:name] assert_equal KEY, options[:key] assert_nil options[:algorithm] end def test_3_args_init options = Dnsruby::Resolver.create_tsig_options(KEY_NAME,KEY,ALGO) assert_equal KEY_NAME, options[:name] assert_equal KEY, options[:key] assert_equal ALGO, options[:algorithm] end def test_threads resolver = Dnsruby::Resolver.new(nameserver: ["8.8.8.8", "8.8.4.4"]) resolver.query("google.com", "MX") resolver.query("google.com", "MX") resolver.query("google.com", "MX") begin resolver.query("googlöe.com", "MX") rescue Dnsruby::ResolvError => e # fine end resolver.query("google.com", "MX") resolver.query("google.com", "MX") begin resolver.query("googlöe.com", "MX") rescue Dnsruby::ResolvError => e # fine end begin resolver.query("googlöe.com", "MX") rescue Dnsruby::ResolvError => e # fine end # Dnsruby::Cache.delete("googlöe.com", "MX") end end
NerdSec/nerdsec.github.io
vendor/bundle/ruby/2.7.0/gems/dnsruby-1.61.3/test/tc_resolver.rb
Ruby
cc0-1.0
13,226
/* * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ package org.opendaylight.controller.northbound.commons.utils; import java.util.HashMap; import java.util.Map; import javax.ws.rs.core.Response; import org.opendaylight.controller.containermanager.IContainerAuthorization; import org.opendaylight.controller.sal.authorization.Privilege; import org.opendaylight.controller.sal.authorization.UserLevel; import org.opendaylight.controller.sal.core.Description; import org.opendaylight.controller.sal.core.Name; import org.opendaylight.controller.sal.core.Node; import org.opendaylight.controller.sal.core.NodeConnector; import org.opendaylight.controller.sal.utils.GlobalConstants; import org.opendaylight.controller.sal.utils.ServiceHelper; import org.opendaylight.controller.sal.utils.Status; import org.opendaylight.controller.sal.utils.StatusCode; import org.opendaylight.controller.switchmanager.ISwitchManager; import org.opendaylight.controller.usermanager.IUserManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class NorthboundUtils { private static final Map<StatusCode, Response.Status> ResponseStatusMapping = new HashMap<StatusCode, Response.Status>() { private static final long serialVersionUID = 1L; { put(StatusCode.SUCCESS, Response.Status.OK); put(StatusCode.BADREQUEST, Response.Status.BAD_REQUEST); put(StatusCode.UNAUTHORIZED, Response.Status.UNAUTHORIZED); put(StatusCode.FORBIDDEN, Response.Status.FORBIDDEN); put(StatusCode.NOTFOUND, Response.Status.NOT_FOUND); put(StatusCode.NOTALLOWED, Response.Status.FORBIDDEN); put(StatusCode.NOTACCEPTABLE, Response.Status.NOT_ACCEPTABLE); put(StatusCode.TIMEOUT, Response.Status.GONE); put(StatusCode.CONFLICT, Response.Status.CONFLICT); put(StatusCode.GONE, Response.Status.GONE); put(StatusCode.UNSUPPORTED, Response.Status.BAD_REQUEST); put(StatusCode.INTERNALERROR, Response.Status.INTERNAL_SERVER_ERROR); put(StatusCode.NOTIMPLEMENTED, Response.Status.NOT_ACCEPTABLE); put(StatusCode.NOSERVICE, Response.Status.SERVICE_UNAVAILABLE); put(StatusCode.UNDEFINED, Response.Status.BAD_REQUEST); } }; private static final String AUDIT = "audit"; private static final Logger logger = LoggerFactory.getLogger(AUDIT); // Suppress default constructor for noninstantiability private NorthboundUtils() { } /** * Returns Response.Status for a given status. If the status is null or if * the corresponding StatusCode is not present in the ResponseStatusMapping, * it returns null. * * @param status * The Status * @return The Response.Status for a given status */ public static Response.Status getResponseStatus(Status status) { return ResponseStatusMapping.get(status.getCode()); } /** * Returns Response for a given status. If the status provided is null or if * the corresponding StatusCode is not present in the ResponseStatusMapping, * it returns Response with StatusType as INTERNAL_SERVER_ERROR. * * @param status * The Status * @return The Response for a given status. */ public static Response getResponse(Status status) { if ((status == null) || (!ResponseStatusMapping.containsKey(status.getCode()))) { return Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity("Action Result Unknown").build(); } return Response.status(ResponseStatusMapping.get(status.getCode())).entity(status.getDescription()).build(); } /** * Returns whether the current user has the required privilege on the * specified container * * @param userName * The user name * @param containerName * The container name * @param required * Operation to be performed - READ/WRITE * @param bundle * Class from where the function is invoked * @return The Status of the request, either Success or Unauthorized */ public static boolean isAuthorized(String userName, String containerName, Privilege required, Object bundle) { if (containerName.equals(GlobalConstants.DEFAULT.toString())) { IUserManager auth = (IUserManager) ServiceHelper.getGlobalInstance(IUserManager.class, bundle); switch (required) { case WRITE: return (auth.getUserLevel(userName).ordinal() <= UserLevel.NETWORKADMIN.ordinal()); case READ: return (auth.getUserLevel(userName).ordinal() <= UserLevel.NETWORKOPERATOR.ordinal()); default: return false; } } else { IContainerAuthorization auth = (IContainerAuthorization) ServiceHelper.getGlobalInstance( IContainerAuthorization.class, bundle); if (auth == null) { return false; } Privilege current = auth.getResourcePrivilege(userName, containerName); if (required.ordinal() > current.ordinal()) { return false; } } return true; } public static void auditlog(String moduleName, String user, String action, String resource, String containerName) { String auditMsg = ""; String mode = "REST"; if (containerName != null) { auditMsg = "Mode: " + mode + " User " + user + " " + action + " " + moduleName + " " + resource + " in container " + containerName; } else { auditMsg = "Mode: " + mode + " User " + user + " " + action + " " + moduleName + " " + resource; } logger.trace(auditMsg); } public static void auditlog(String moduleName, String user, String action, String resource) { auditlog(moduleName, user, action, resource, null); } public static String getNodeDesc(Node node, ISwitchManager switchManager) { Description desc = (Description) switchManager.getNodeProp(node, Description.propertyName); String description = (desc == null) ? "" : desc.getValue(); return (description.isEmpty() || description.equalsIgnoreCase("none")) ? node .toString() : description; } public static String getNodeDesc(Node node, String containerName, Object bundle) { ISwitchManager switchManager = (ISwitchManager) ServiceHelper .getInstance(ISwitchManager.class, containerName, bundle); if (switchManager == null) { return null; } return getNodeDesc(node, switchManager); } public static String getNodeDesc(Node node, Object bundle) { ISwitchManager switchManager = (ISwitchManager) ServiceHelper .getInstance(ISwitchManager.class, GlobalConstants.DEFAULT.toString(), bundle); if (switchManager == null) { return null; } return getNodeDesc(node, switchManager); } public static String getPortName(NodeConnector nodeConnector, String container, Object bundle) { ISwitchManager switchManager = (ISwitchManager) ServiceHelper .getInstance(ISwitchManager.class, container, bundle); return getPortName(nodeConnector, switchManager); } public static String getPortName(NodeConnector nodeConnector, Object bundle) { return getPortName(nodeConnector, GlobalConstants.DEFAULT.toString(), bundle); } public static String getPortName(NodeConnector nodeConnector, ISwitchManager switchManager) { Name ncName = ((Name) switchManager.getNodeConnectorProp(nodeConnector, Name.NamePropName)); String nodeConnectorName = (ncName != null) ? ncName.getValue() : nodeConnector.getNodeConnectorIdAsString(); nodeConnectorName = nodeConnectorName + "@" + getNodeDesc(nodeConnector.getNode(), switchManager); return nodeConnectorName.substring(0, nodeConnectorName.length()); } }
yuyf10/opendaylight-controller
opendaylight/northbound/commons/src/main/java/org/opendaylight/controller/northbound/commons/utils/NorthboundUtils.java
Java
epl-1.0
8,516
/******************************************************************************* * Copyright (c) 1998, 2015 Oracle and/or its affiliates. All rights reserved. * This program and the accompanying materials are made available under the * terms of the Eclipse Public License v1.0 and Eclipse Distribution License v. 1.0 * which accompanies this distribution. * The Eclipse Public License is available at http://www.eclipse.org/legal/epl-v10.html * and the Eclipse Distribution License is available at * http://www.eclipse.org/org/documents/edl-v10.php. * * Contributors: * Oracle - initial API and implementation from Oracle TopLink ******************************************************************************/ package org.eclipse.persistence.tools.workbench.framework.help; import javax.swing.JOptionPane; import org.eclipse.persistence.tools.workbench.framework.action.AbstractFrameworkAction; import org.eclipse.persistence.tools.workbench.framework.context.WorkbenchContext; /** * There should be one HelpTopicIDWindowAction per WorkbenchWindow. * This command should ONLY be used in "development" mode. */ final class HelpTopicIDWindowAction extends AbstractFrameworkAction { /** * Construct an action that will open the Help Topic ID window. * There is only one window per application. */ HelpTopicIDWindowAction(WorkbenchContext context) { super(context); } /** * initialize stuff */ protected void initialize() { super.initialize(); this.initializeTextAndMnemonic("HELP_TOPIC_ID_WINDOW"); this.initializeToolTipText("HELP_TOPIC_ID_WINDOW.TOOL_TIP"); } /** * ignore the selected nodes */ protected void execute() { // no need for localization - this should only occur in development JOptionPane.showMessageDialog(this.currentWindow(), "Invalid Help Manager: "); } }
RallySoftware/eclipselink.runtime
utils/eclipselink.utils.workbench/framework/source/org/eclipse/persistence/tools/workbench/framework/help/HelpTopicIDWindowAction.java
Java
epl-1.0
1,916
/******************************************************************************* * Copyright (c) 1998, 2015 Oracle and/or its affiliates. All rights reserved. * This program and the accompanying materials are made available under the * terms of the Eclipse Public License v1.0 and Eclipse Distribution License v. 1.0 * which accompanies this distribution. * The Eclipse Public License is available at http://www.eclipse.org/legal/epl-v10.html * and the Eclipse Distribution License is available at * http://www.eclipse.org/org/documents/edl-v10.php. * * Contributors: * Oracle - initial API and implementation from Oracle TopLink ******************************************************************************/ package org.eclipse.persistence.tools.workbench.test.mappingsmodel.query; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.eclipse.persistence.tools.workbench.test.models.projects.EmployeeProject; import org.eclipse.persistence.tools.workbench.test.utility.TestTools; import junit.framework.Test; import junit.framework.TestCase; import junit.framework.TestSuite; import org.eclipse.persistence.tools.workbench.mappingsmodel.descriptor.MWDescriptor; import org.eclipse.persistence.tools.workbench.mappingsmodel.descriptor.relational.MWTableDescriptor; import org.eclipse.persistence.tools.workbench.mappingsmodel.descriptor.relational.MWUserDefinedQueryKey; import org.eclipse.persistence.tools.workbench.mappingsmodel.mapping.relational.MWDirectToFieldMapping; import org.eclipse.persistence.tools.workbench.mappingsmodel.mapping.relational.MWOneToOneMapping; import org.eclipse.persistence.tools.workbench.mappingsmodel.project.MWProject; import org.eclipse.persistence.tools.workbench.mappingsmodel.query.MWQueryManager; import org.eclipse.persistence.tools.workbench.mappingsmodel.query.MWQueryParameter; import org.eclipse.persistence.tools.workbench.mappingsmodel.query.relational.MWBasicExpression; import org.eclipse.persistence.tools.workbench.mappingsmodel.query.relational.MWCompoundExpression; import org.eclipse.persistence.tools.workbench.mappingsmodel.query.relational.MWExpression; import org.eclipse.persistence.tools.workbench.mappingsmodel.query.relational.MWExpressionQueryFormat; import org.eclipse.persistence.tools.workbench.mappingsmodel.query.relational.MWLiteralArgument; import org.eclipse.persistence.tools.workbench.mappingsmodel.query.relational.MWNullArgument; import org.eclipse.persistence.tools.workbench.mappingsmodel.query.relational.MWQueryParameterArgument; import org.eclipse.persistence.tools.workbench.mappingsmodel.query.relational.MWQueryableArgument; import org.eclipse.persistence.tools.workbench.mappingsmodel.query.relational.MWRelationalReadQuery; /** * */ public class MWExpressionTests extends TestCase { private MWProject employeeProject; public static Test suite() { return new TestSuite(MWExpressionTests.class); } public MWExpressionTests(String name) { super(name); } private MWOneToOneMapping getAddressMapping() { return (MWOneToOneMapping)((MWTableDescriptor) getDescriptorWithShortName("Employee")).mappingNamed("address"); } private MWDirectToFieldMapping getCityMapping() { return (MWDirectToFieldMapping) ((MWTableDescriptor) getDescriptorWithShortName("Address")).mappingNamed("city"); } private MWDirectToFieldMapping getLastNameMapping() { return (MWDirectToFieldMapping) ((MWTableDescriptor) getDescriptorWithShortName("Employee")).mappingNamed("lastName"); } protected void setUp() throws Exception { super.setUp(); this.employeeProject = new EmployeeProject().getProject(); } protected void tearDown() throws Exception { TestTools.clear(this); super.tearDown(); } public void testAddingExpressions() { MWRelationalReadQuery query = buildTestQuery(); MWCompoundExpression mainExpression = ((MWExpressionQueryFormat)query.getQueryFormat()).getExpression(); assertTrue("An expression was not created by default for the query", mainExpression != null); MWBasicExpression basicExpression = mainExpression.addBasicExpression(); assertTrue("An expression was not added",mainExpression.expressionsSize() == 1); assertTrue("The first argument is not a queryable argument", MWQueryableArgument.class.isAssignableFrom(basicExpression.getFirstArgument().getClass())); assertTrue("The expression added is not a MWBasicExpression", MWBasicExpression.class.isAssignableFrom(basicExpression.getClass())); assertTrue("The second argument is not a literal argument", MWLiteralArgument.class.isAssignableFrom(basicExpression.getSecondArgument().getClass())); MWTableDescriptor employeeDescriptor = (MWTableDescriptor) query.getProject().descriptorForTypeNamed("org.eclipse.persistence.tools.workbench.test.models.employee.Employee"); MWDirectToFieldMapping lastNameMapping = (MWDirectToFieldMapping) employeeDescriptor.mappingNamed("lastName"); basicExpression.getFirstArgument().setQueryableArgument(lastNameMapping); assertTrue("The queryable argument was not set", basicExpression.getFirstArgument().getQueryableArgumentElement().getQueryable() == lastNameMapping); employeeDescriptor.removeMapping(lastNameMapping); assertTrue("The queryable argument was not deleted as a result of the mapping being deleted", basicExpression.getFirstArgument().getQueryableArgumentElement().getQueryable() == null); assertTrue("The queryable argument was not deleted as a result of the mapping being deleted", basicExpression.getFirstArgument().getQueryableArgumentElement().getJoinedQueryableElement() == null); } public void testChangingBasicExpressionOperatorType() { MWRelationalReadQuery query = buildTestQueryWithExpression(); MWBasicExpression basicExpression = (MWBasicExpression)((MWExpressionQueryFormat)query.getQueryFormat()).getExpression().getExpression(0); basicExpression.setOperatorType(MWBasicExpression.IS_NULL); basicExpression = (MWBasicExpression)((MWExpressionQueryFormat)query.getQueryFormat()).getExpression().getExpression(0); assertTrue("The operator type was not set to IS_NULL", basicExpression.getOperatorType() == MWBasicExpression.IS_NULL); assertTrue("The second argument is not an instanceof MWNullArgument for the unary expression", basicExpression.getSecondArgument() instanceof MWNullArgument); assertTrue("Changing the operator type did change the type of the expression", MWBasicExpression.class.isAssignableFrom(basicExpression.getClass())); assertTrue("The parent of the queryableArgument was not set correctlyafter morphing to a UnaryExpression", basicExpression.getFirstArgument().getParent() == basicExpression); basicExpression.setOperatorType(MWBasicExpression.LIKE_IGNORE_CASE); basicExpression = (MWBasicExpression)((MWExpressionQueryFormat)query.getQueryFormat()).getExpression().getExpression(0); assertTrue("The operator type was not set to LIKE_IGNORE_CASE", basicExpression.getOperatorType() == MWBasicExpression.LIKE_IGNORE_CASE); assertTrue("The second argument is null for the binary expression", basicExpression.getSecondArgument() != null); assertTrue("Changing the operator type did change the type of the expression", MWBasicExpression.class.isAssignableFrom(basicExpression.getClass())); assertTrue("The parent of the queryableArgument was not set correctly after morphing to a BinaryExpression", basicExpression.getFirstArgument().getParent() == basicExpression); } public void testChangingCompoundExpressionOperatorType() { MWRelationalReadQuery query = buildTestQueryWithExpression(); MWCompoundExpression expression = query.getQueryFormat().getExpression(); assertTrue("The operator was not set to AND by default", expression.getOperatorType().equals(MWCompoundExpression.AND)); expression.setOperatorType(MWCompoundExpression.NOR); assertTrue("The operator was not set to NOR", expression.getOperatorType().equals(MWCompoundExpression.NOR)); } public void testMorphingDescriptor() { MWRelationalReadQuery query = buildTestQueryWithExpression(); MWBasicExpression basicExpression = (MWBasicExpression)((MWExpressionQueryFormat)query.getQueryFormat()).getExpression().expressions().next(); assertTrue("The queryable argument was not set", basicExpression.getFirstArgument().getQueryableArgumentElement().getQueryable() == getCityMapping()); assertTrue("The joined queryable argument was not set", basicExpression.getFirstArgument().getQueryableArgumentElement().getJoinedQueryableElement().getQueryable() == getAddressMapping()); //test morphing the address descriptor ((MWTableDescriptor) getDescriptorWithShortName("Address")).asMWAggregateDescriptor(); assertTrue("The first argument was set to null when the reference descriptor was morphed", basicExpression.getFirstArgument().getQueryableArgumentElement().getQueryable() != null); } public void testRemovingDescriptor() { MWRelationalReadQuery query = buildTestQueryWithExpression(); MWBasicExpression basicExpression = (MWBasicExpression)((MWExpressionQueryFormat)query.getQueryFormat()).getExpression().expressions().next(); //test removing the address descriptor MWTableDescriptor descriptor = ((MWTableDescriptor) getDescriptorWithShortName("Address")); descriptor.getProject().removeDescriptor(descriptor); assertTrue("The first argument was not set to null when the reference descriptor was removed, thus set to null", basicExpression.getFirstArgument().getQueryableArgumentElement().getQueryable() == null); } public void testRenamingDescriptor() { MWRelationalReadQuery query = buildTestQueryWithExpression(); MWBasicExpression basicExpression = (MWBasicExpression)((MWExpressionQueryFormat)query.getQueryFormat()).getExpression().expressions().next(); MWDirectToFieldMapping cityMapping = getCityMapping(); //test renaming the address descriptor MWTableDescriptor descriptor = ((MWTableDescriptor) getDescriptorWithShortName("Address")); descriptor.getMWClass().setName("MyAddress"); descriptor.setName("MyAddress"); assertTrue("The first argument is no longer the same", basicExpression.getFirstArgument().getQueryableArgumentElement().getQueryable() == cityMapping); assertTrue("The descriptor was renamed but the queryable is still holding on to an old copy", basicExpression.getFirstArgument().getQueryableArgumentElement().getQueryable().getParentDescriptor().getName().equals("MyAddress")); } public void testRemovingMapping() { MWRelationalReadQuery query = buildTestQueryWithExpression(); MWBasicExpression basicExpression = (MWBasicExpression)((MWExpressionQueryFormat)query.getQueryFormat()).getExpression().expressions().next(); //test removing the city mapping MWTableDescriptor descriptor = ((MWTableDescriptor) getDescriptorWithShortName("Address")); descriptor.removeMapping(getCityMapping()); assertTrue("The first argument queryable element was not removed when the mapping was removed", basicExpression.getFirstArgument().getQueryableArgumentElement().getQueryable() == null); } public void testUnmappingJoinedQueryable() { MWRelationalReadQuery query = buildTestQueryWithExpression(); MWBasicExpression basicExpression = (MWBasicExpression)((MWExpressionQueryFormat)query.getQueryFormat()).getExpression().expressions().next(); //test unmapping the address mapping MWOneToOneMapping addressMapping = getAddressMapping(); addressMapping.getParentDescriptor().removeMapping(addressMapping); assertTrue("The first argument queryable element was not removed when the joined mapping was unmapped", basicExpression.getFirstArgument().getQueryableArgumentElement().getQueryable() == null); } public void testMorhpingJoinedQueryable() { MWRelationalReadQuery query = buildTestQueryWithExpression(); MWBasicExpression basicExpression = (MWBasicExpression)((MWExpressionQueryFormat)query.getQueryFormat()).getExpression().expressions().next(); //test unmapping the address mapping MWOneToOneMapping addressMapping = getAddressMapping(); addressMapping.asMWOneToManyMapping(); assertTrue("The first argument queryable element was removed when the joined mapping was morhphed into a 1-many", basicExpression.getFirstArgument().getQueryableArgumentElement().getQueryable() == getCityMapping()); } public void testRemovingJoinedQueryable() { MWRelationalReadQuery query = buildTestQueryWithExpression(); MWBasicExpression basicExpression = (MWBasicExpression)((MWExpressionQueryFormat)query.getQueryFormat()).getExpression().expressions().next(); //test removing the address mapping MWOneToOneMapping addressMapping = getAddressMapping(); addressMapping.getParentDescriptor().removeMapping(addressMapping); assertTrue("The first argument queryable element was not removed when the joined mapping was removed", basicExpression.getFirstArgument().getQueryableArgumentElement().getQueryable() == null); } public void testRenamingMapping() { MWRelationalReadQuery query = buildTestQueryWithExpression(); MWBasicExpression basicExpression = (MWBasicExpression)((MWExpressionQueryFormat)query.getQueryFormat()).getExpression().expressions().next(); //test renaming the city mapping MWDirectToFieldMapping cityMapping = getCityMapping(); cityMapping.getInstanceVariable().setName("myCity"); cityMapping.setName("myCity"); assertTrue("The first argument queryable element was not renamed", basicExpression.getFirstArgument().getQueryableArgumentElement().getQueryable().getName().equals("myCity")); } public void testMappingReferenceDescriptorSetToNull() { MWRelationalReadQuery query = buildTestQueryWithExpression(); MWBasicExpression basicExpression = (MWBasicExpression) ((MWExpressionQueryFormat)query.getQueryFormat()).getExpression().expressions().next(); MWOneToOneMapping addressMapping = getAddressMapping(); addressMapping.setReferenceDescriptor(null); assertTrue("The first argument queryable element was not removed", basicExpression.getFirstArgument().getQueryableArgumentElement().getQueryable() == null); } public void testMappingReferenceDescriptorChanged() { MWRelationalReadQuery query = buildTestQueryWithExpression(); MWBasicExpression basicExpression = (MWBasicExpression)((MWExpressionQueryFormat)query.getQueryFormat()).getExpression().getExpression(0); MWOneToOneMapping addressMapping = getAddressMapping(); addressMapping.setReferenceDescriptor(getDescriptorWithShortName("Project")); assertTrue("The first argument queryable element was not removed", basicExpression.getFirstArgument().getQueryableArgumentElement().getQueryable() == null); } public void testQueryParameterDeleted() { MWRelationalReadQuery query = buildTestQueryWithExpressionWithParameterArgument(); query.removeParameter(query.getParameter(0)); MWBasicExpression basicExpression = (MWBasicExpression)((MWExpressionQueryFormat)query.getQueryFormat()).getExpression().expressions().next(); assertTrue("The parameter arugment was not deleted when the parameter was deleted", ((MWQueryParameterArgument)basicExpression.getSecondArgument()).getQueryParameter() == null); } public void testRemovingExpression() { MWRelationalReadQuery query = buildTestQueryWithCompoundExpression(); MWCompoundExpression subCompoundExpression = (MWCompoundExpression) query.getQueryFormat().getExpression().expressions().next(); MWExpression expressionToRemove = (MWExpression) subCompoundExpression.expressions().next(); subCompoundExpression.removeExpression(expressionToRemove); assertTrue("", subCompoundExpression.expressionsSize() == 1); } public void testRemovingQueryKey() { MWRelationalReadQuery query = buildTestQueryWithCompoundExpression(); MWCompoundExpression subCompoundExpression = (MWCompoundExpression) query.getQueryFormat().getExpression().expressions().next(); MWBasicExpression basicExpression = (MWBasicExpression) subCompoundExpression.getExpression(1); MWTableDescriptor desc = (MWTableDescriptor) getDescriptorWithShortName("Employee"); desc.removeQueryKey((MWUserDefinedQueryKey) desc.queryKeyNamed("foo")); assertTrue("The queryable was not set to null when the query key was removed", ((MWQueryableArgument) basicExpression.getSecondArgument()).getQueryableArgumentElement().getQueryable() == null); } //test removing expressions //test adding sub compound expressions //test removing expressions and make sure sub expressions are removed //test MWCompoundExpression.removeAllSubExpressions private MWRelationalReadQuery buildTestQueryWithExpression() { MWRelationalReadQuery query = buildTestQuery(); MWCompoundExpression mainExpression = ((MWExpressionQueryFormat)query.getQueryFormat()).getExpression(); MWBasicExpression basicExpression = mainExpression.addBasicExpression(); //set up the basic expression address.city equals "" MWOneToOneMapping addressMapping = getAddressMapping(); MWDirectToFieldMapping cityMapping = getCityMapping(); List joinedQueryables = new ArrayList(); joinedQueryables.add(cityMapping); joinedQueryables.add(addressMapping); basicExpression.getFirstArgument().setQueryableArgument(joinedQueryables.iterator()); return query; } private MWRelationalReadQuery buildTestQueryWithCompoundExpression() { MWRelationalReadQuery query = buildTestQuery(); MWCompoundExpression mainExpression = ((MWExpressionQueryFormat)query.getQueryFormat()).getExpression(); MWCompoundExpression subCompoundExpression = mainExpression.addSubCompoundExpression(); MWBasicExpression basicExpression = subCompoundExpression.addBasicExpression(); //set up the basic expression address.city equals "" MWOneToOneMapping addressMapping = getAddressMapping(); MWDirectToFieldMapping cityMapping = getCityMapping(); List joinedQueryables = new ArrayList(); joinedQueryables.add(cityMapping); joinedQueryables.add(addressMapping); basicExpression.getFirstArgument().setQueryableArgument(joinedQueryables.iterator()); MWTableDescriptor desc = (MWTableDescriptor) getDescriptorWithShortName("Employee"); MWUserDefinedQueryKey queryKey = desc.addQueryKey("foo", null); basicExpression.setSecondArgumentToQueryable(); ((MWQueryableArgument) basicExpression.getSecondArgument()).setQueryableArgument(queryKey); return query; } private MWRelationalReadQuery buildTestQueryWithExpressionWithParameterArgument() { MWRelationalReadQuery query = buildTestQuery(); MWQueryParameter parameter = query.addParameter(this.employeeProject.typeNamed("java.lang.String")); parameter.setName("lastName"); MWCompoundExpression mainExpression = ((MWExpressionQueryFormat)query.getQueryFormat()).getExpression(); MWBasicExpression basicExpression = mainExpression.addBasicExpression(); //set up the basic expression lastName equals lastName(parameter) MWDirectToFieldMapping cityMapping = getLastNameMapping(); basicExpression.getFirstArgument().setQueryableArgument(cityMapping); basicExpression.setSecondArgumentToParameter(); ((MWQueryParameterArgument)basicExpression.getSecondArgument()).setQueryParameter(parameter); return query; } private MWRelationalReadQuery buildTestQuery() { MWTableDescriptor desc = (MWTableDescriptor) getDescriptorWithShortName("Employee"); MWQueryManager qm = desc.getQueryManager(); MWRelationalReadQuery query = (MWRelationalReadQuery) qm.addReadObjectQuery("test-query"); return query; } public MWDescriptor getDescriptorWithShortName(String name) { for (Iterator stream = this.employeeProject.descriptors(); stream.hasNext(); ) { MWDescriptor descriptor = (MWDescriptor) stream.next(); if (descriptor.getMWClass().shortName().equals(name)) { return descriptor; } } throw new IllegalArgumentException(name); } }
RallySoftware/eclipselink.runtime
utils/eclipselink.utils.workbench.test/mappingsplugin/source/org/eclipse/persistence/tools/workbench/test/mappingsmodel/query/MWExpressionTests.java
Java
epl-1.0
21,076
/******************************************************************************* * Copyright (c) 1998, 2015 Oracle and/or its affiliates. All rights reserved. * This program and the accompanying materials are made available under the * terms of the Eclipse Public License v1.0 and Eclipse Distribution License v. 1.0 * which accompanies this distribution. * The Eclipse Public License is available at http://www.eclipse.org/legal/epl-v10.html * and the Eclipse Distribution License is available at * http://www.eclipse.org/org/documents/edl-v10.php. * * Contributors: * Oracle - initial API and implementation from Oracle TopLink ******************************************************************************/ package org.eclipse.persistence.testing.models.jpa.datatypes.arraypks; import java.util.UUID; import javax.persistence.*; @Entity @Table(name = "CMP3_PBYTEARRAYPK_TYPE") public class PrimByteArrayPKType implements java.io.Serializable{ public PrimByteArrayPKType() { } private byte[] id; public PrimByteArrayPKType(byte[] primitiveByteArrayData) { this.id = primitiveByteArrayData; } @Id public byte[] getId() { return id; } public void setId(byte[] id) { this.id= id; } private static final int UUID_LENGTH = 0x10; private static int BITSPERLONG = 0x40; private static int BITSPERBYTE = 0x8; public void createRandomId() { UUID uuid = UUID.randomUUID(); id = getBytes(uuid); } public static byte[] getBytes(UUID u) { byte [] raw = new byte [UUID_LENGTH]; long msb = u.getMostSignificantBits(); long lsb = u.getLeastSignificantBits(); /* * Convert 2 longs to 16 bytes. */ int i = 0; for (int sh = BITSPERLONG - BITSPERBYTE; sh >= 0; sh -= BITSPERBYTE) { raw [i++] = (byte) (msb >> sh); } for (int sh = BITSPERLONG - BITSPERBYTE; sh >= 0; sh -= BITSPERBYTE) { raw [i++] = (byte) (lsb >> sh); } return raw; } }
RallySoftware/eclipselink.runtime
jpa/eclipselink.jpa.test/src/org/eclipse/persistence/testing/models/jpa/datatypes/arraypks/PrimByteArrayPKType.java
Java
epl-1.0
2,086
/******************************************************************************* * Copyright (c) 1998, 2015 Oracle and/or its affiliates. All rights reserved. * This program and the accompanying materials are made available under the * terms of the Eclipse Public License v1.0 and Eclipse Distribution License v. 1.0 * which accompanies this distribution. * The Eclipse Public License is available at http://www.eclipse.org/legal/epl-v10.html * and the Eclipse Distribution License is available at * http://www.eclipse.org/org/documents/edl-v10.php. * * Contributors: * Oracle - initial API and implementation from Oracle TopLink ******************************************************************************/ package org.eclipse.persistence.internal.eis.adapters.jms; import javax.resource.cci.*; /** * INTERNAL: * Interaction spec for JMS JCA adapter. * * @author Dave McCann * @since OracleAS TopLink 10<i>g</i> (10.0.3) */ public abstract class CciJMSInteractionSpec implements InteractionSpec { protected String destinationURL;// JNDI name (URL) of the destination (queue/topic) protected String destination;// if no JNDI, the destination name protected String messageSelector;// message selector to link the request and response messages /** * Default constructor. */ public CciJMSInteractionSpec() { destinationURL = ""; destination = ""; messageSelector = ""; } /** * Indicates if a JNDI lookup is to be performed to locate the destination. * * @return true if a destination URL has been specified, false otherwise */ public boolean hasDestinationURL() { return (destinationURL != null) && (destinationURL.length() > 0); } /** * Set the destination URL to be looked up. * * @param url - the destination name as registered in JNDI */ public void setDestinationURL(String url) { destinationURL = url; } /** * Return the URL of the destination. * * @return the destination name as registered in JNDI */ public String getDestinationURL() { return destinationURL; } /** * Set the name of the destination to be used. This is required if JNDI is * not being used to lookup the destination. * * @param dest */ public void setDestination(String dest) { destination = dest; } /** * Return the name of the destination - required if JNDI is not being used. * * @return the name of the destination to be used */ public String getDestination() { return destination; } /** * Sets the message selector to be used to link the request and response messages. * If this value is not set, it is assumed that the entity processing the request * will set the JMSCorrelationID of the reponse message using the JMSMessageID of * the request messsage. * * @param selector */ public void setMessageSelector(String selector) { messageSelector = selector; } /** * Returns the message selector to be used to link the request and response messages. * * @return the message selector. */ public String getMessageSelector() { return messageSelector; } /** * Returns the message selector in the appropriate format. The selector is * set in JMS message selector format: "JMSCorrelationID = 'message_selector' * * @return formatted message selector - uses JMSCorrelationID */ public String getFormattedMessageSelector() { return "JMSCorrelationID = '" + messageSelector + "'"; } /** * Indicates if the user has set a message selector. * * @return true if a message selector has been set, false otherwise */ public boolean hasMessageSelector() { return (messageSelector != null) && (messageSelector.length() > 0); } /** * Returns the destination URL or class * * @return destination URL or destination class */ public String toString() { if (hasDestinationURL()) { return getClass().getName() + "(" + getDestinationURL() + ")"; } return getClass().getName() + "(" + getDestination() + ")"; } }
RallySoftware/eclipselink.runtime
foundation/org.eclipse.persistence.nosql/src/org/eclipse/persistence/internal/eis/adapters/jms/CciJMSInteractionSpec.java
Java
epl-1.0
4,313
/* * Copyright (C) 2008-2017 TrinityCore <http://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "SpellScript.h" #include "Cell.h" #include "CellImpl.h" #include "GridNotifiers.h" #include "GridNotifiersImpl.h" #include "ulduar.h" enum FreyaYells { // Freya SAY_AGGRO = 0, SAY_AGGRO_WITH_ELDER = 1, SAY_SLAY = 2, SAY_DEATH = 3, SAY_BERSERK = 4, SAY_SUMMON_CONSERVATOR = 5, SAY_SUMMON_TRIO = 6, SAY_SUMMON_LASHERS = 7, EMOTE_LIFEBINDERS_GIFT = 8, EMOTE_ALLIES_OF_NATURE = 9, EMOTE_GROUND_TREMOR = 10, EMOTE_IRON_ROOTS = 11, // Elder Brightleaf / Elder Ironbranch / Elder Stonebark SAY_ELDER_AGGRO = 0, SAY_ELDER_SLAY = 1, SAY_ELDER_DEATH = 2 }; enum FreyaSpells { // Freya SPELL_ATTUNED_TO_NATURE = 62519, SPELL_TOUCH_OF_EONAR = 62528, SPELL_SUNBEAM = 62623, SPELL_ENRAGE = 47008, SPELL_FREYA_GROUND_TREMOR = 62437, SPELL_ROOTS_FREYA = 62283, SPELL_STONEBARK_ESSENCE = 62483, SPELL_IRONBRANCH_ESSENCE = 62484, SPELL_BRIGHTLEAF_ESSENCE = 62485, SPELL_DRAINED_OF_POWER = 62467, SPELL_SUMMON_EONAR_GIFT = 62572, // Stonebark SPELL_FISTS_OF_STONE = 62344, SPELL_GROUND_TREMOR = 62325, SPELL_PETRIFIED_BARK = 62337, SPELL_PETRIFIED_BARK_DMG = 62379, // Ironbranch SPELL_IMPALE = 62310, SPELL_ROOTS_IRONBRANCH = 62438, SPELL_THORN_SWARM = 62285, // Brightleaf SPELL_FLUX_AURA = 62239, SPELL_FLUX = 62262, SPELL_FLUX_PLUS = 62251, SPELL_FLUX_MINUS = 62252, SPELL_SOLAR_FLARE = 62240, SPELL_UNSTABLE_SUN_BEAM_SUMMON = 62207, // Trigger 62221 // Stack Removing of Attuned to Nature SPELL_REMOVE_25STACK = 62521, SPELL_REMOVE_10STACK = 62525, SPELL_REMOVE_2STACK = 62524, // Achievement spells SPELL_DEFORESTATION_CREDIT = 65015, SPELL_KNOCK_ON_WOOD_CREDIT = 65074, // Wave summoning spells SPELL_SUMMON_LASHERS = 62687, SPELL_SUMMON_TRIO = 62686, SPELL_SUMMON_ANCIENT_CONSERVATOR = 62685, // Detonating Lasher SPELL_DETONATE = 62598, SPELL_FLAME_LASH = 62608, // Ancient Water Spirit SPELL_TIDAL_WAVE = 62653, SPELL_TIDAL_WAVE_EFFECT = 62654, // Storm Lasher SPELL_LIGHTNING_LASH = 62648, SPELL_STORMBOLT = 62649, // Snaplasher SPELL_HARDENED_BARK = 62664, SPELL_BARK_AURA = 62663, // Ancient Conservator SPELL_CONSERVATOR_GRIP = 62532, SPELL_NATURE_FURY = 62589, SPELL_SUMMON_PERIODIC = 62566, SPELL_SPORE_SUMMON_NW = 62582, // Not used, triggered by SPELL_SUMMON_PERIODIC SPELL_SPORE_SUMMON_NE = 62591, SPELL_SPORE_SUMMON_SE = 62592, SPELL_SPORE_SUMMON_SW = 62593, // Healthly Spore SPELL_HEALTHY_SPORE_VISUAL = 62538, SPELL_GROW = 62559, SPELL_POTENT_PHEROMONES = 62541, // Eonar's Gift SPELL_LIFEBINDERS_GIFT = 62584, SPELL_PHEROMONES = 62619, SPELL_EONAR_VISUAL = 62579, // Nature Bomb SPELL_NATURE_BOMB = 64587, SPELL_OBJECT_BOMB = 64600, SPELL_SUMMON_NATURE_BOMB = 64604, // Unstable Sun Beam SPELL_UNSTABLE_SUN_BEAM = 62211, SPELL_UNSTABLE_ENERGY = 62217, SPELL_PHOTOSYNTHESIS = 62209, SPELL_UNSTABLE_SUN_BEAM_TRIGGERED = 62243, SPELL_FREYA_UNSTABLE_SUNBEAM = 62450, // Or maybe 62866? // Sun Beam SPELL_FREYA_UNSTABLE_ENERGY = 62451, SPELL_FREYA_UNSTABLE_ENERGY_VISUAL = 62216, // Attuned To Nature spells SPELL_ATTUNED_TO_NATURE_2_DOSE_REDUCTION = 62524, SPELL_ATTUNED_TO_NATURE_10_DOSE_REDUCTION = 62525, SPELL_ATTUNED_TO_NATURE_25_DOSE_REDUCTION = 62521 }; enum FreyaNpcs { NPC_SUN_BEAM = 33170, NPC_DETONATING_LASHER = 32918, NPC_ANCIENT_CONSERVATOR = 33203, NPC_ANCIENT_WATER_SPIRIT = 33202, NPC_STORM_LASHER = 32919, NPC_SNAPLASHER = 32916, NPC_NATURE_BOMB = 34129, NPC_EONARS_GIFT = 33228, NPC_HEALTHY_SPORE = 33215, NPC_UNSTABLE_SUN_BEAM = 33050, NPC_IRON_ROOTS = 33088, NPC_STRENGTHENED_IRON_ROOTS = 33168, OBJECT_NATURE_BOMB = 194902 }; enum FreyaActions { ACTION_ELDER_FREYA_KILLED = 1 }; enum FreyaEvents { // Freya EVENT_WAVE = 1, EVENT_EONAR_GIFT = 2, EVENT_NATURE_BOMB = 3, EVENT_UNSTABLE_ENERGY = 4, EVENT_STRENGTHENED_IRON_ROOTS = 5, EVENT_GROUND_TREMOR = 6, EVENT_SUNBEAM = 7, EVENT_ENRAGE = 8, // Elder Stonebark EVENT_TREMOR = 9, EVENT_BARK = 10, EVENT_FISTS = 11, // Elder Ironbranch EVENT_IMPALE = 12, EVENT_IRON_ROOTS = 13, EVENT_THORN_SWARM = 14, // Elder Brightleaf EVENT_SOLAR_FLARE = 15, EVENT_UNSTABLE_SUN_BEAM = 16, EVENT_FLUX = 17 }; enum Misc { WAVE_TIME = 60000, // Normal wave is one minute TIME_DIFFERENCE = 10000, // If difference between waveTime and WAVE_TIME is bigger then TIME_DIFFERENCE, schedule EVENT_WAVE in 10 seconds DATA_GETTING_BACK_TO_NATURE = 1, DATA_KNOCK_ON_WOOD = 2 }; class npc_iron_roots : public CreatureScript { public: npc_iron_roots() : CreatureScript("npc_iron_roots") { } struct npc_iron_rootsAI : public ScriptedAI { npc_iron_rootsAI(Creature* creature) : ScriptedAI(creature) { SetCombatMovement(false); me->ApplySpellImmune(0, IMMUNITY_ID, 49560, true); // Death Grip me->setFaction(14); me->SetReactState(REACT_PASSIVE); } void IsSummonedBy(Unit* summoner) override { if (summoner->GetTypeId() != TYPEID_PLAYER) return; // Summoner is a player, who should have root aura on self summonerGUID = summoner->GetGUID(); me->SetFacingToObject(summoner); me->SetInCombatWith(summoner); } void JustDied(Unit* /*killer*/) override { if (Player* target = ObjectAccessor::GetPlayer(*me, summonerGUID)) { target->RemoveAurasDueToSpell(SPELL_ROOTS_IRONBRANCH); target->RemoveAurasDueToSpell(SPELL_ROOTS_FREYA); } me->RemoveCorpse(false); } private: ObjectGuid summonerGUID; }; CreatureAI* GetAI(Creature* creature) const override { return new npc_iron_rootsAI(creature); } }; class boss_freya : public CreatureScript { public: boss_freya() : CreatureScript("boss_freya") { } struct boss_freyaAI : public BossAI { boss_freyaAI(Creature* creature) : BossAI(creature, BOSS_FREYA) { Initialize(); memset(elementalTimer, 0, sizeof(elementalTimer)); diffTimer = 0; attunedToNature = 0; } void Initialize() { trioWaveCount = 0; trioWaveController = 0; waveCount = 0; elderCount = 0; for (uint8 i = 0; i < 3; ++i) for (uint8 n = 0; n < 2; ++n) ElementalGUID[i][n].Clear(); for (uint8 i = 0; i < 6; ++i) for (uint8 n = 0; n < 2; ++n) deforestation[i][n] = 0; for (uint8 n = 0; n < 2; ++n) { checkElementalAlive[n] = true; trioDefeated[n] = false; } for (uint8 n = 0; n < 3; ++n) random[n] = false; } ObjectGuid ElementalGUID[3][2]; uint32 deforestation[6][2]; uint32 elementalTimer[2]; uint32 diffTimer; uint8 trioWaveCount; uint8 trioWaveController; uint8 waveCount; uint8 elderCount; uint8 attunedToNature; bool checkElementalAlive[2]; bool trioDefeated[2]; bool random[3]; void Reset() override { _Reset(); Initialize(); } void KilledUnit(Unit* who) override { if (who->GetTypeId() == TYPEID_PLAYER) Talk(SAY_SLAY); } void DamageTaken(Unit* who, uint32& damage) override { if (damage >= me->GetHealth()) { damage = 0; JustDied(who); } } void EnterCombat(Unit* who) override { _EnterCombat(); DoZoneInCombat(); Creature* Elder[3]; for (uint8 n = 0; n < 3; ++n) { Elder[n] = ObjectAccessor::GetCreature(*me, instance->GetGuidData(BOSS_BRIGHTLEAF + n)); if (Elder[n] && Elder[n]->IsAlive()) { me->AddAura(SPELL_DRAINED_OF_POWER, Elder[n]); Elder[n]->CastSpell(me, SPELL_IRONBRANCH_ESSENCE, true); Elder[n]->RemoveLootMode(LOOT_MODE_DEFAULT); //! Why? Elder[n]->AI()->AttackStart(who); Elder[n]->AddThreat(who, 250.0f); Elder[n]->SetInCombatWith(who); ++elderCount; } } if (Elder[0] && Elder[0]->IsAlive()) { Elder[0]->CastSpell(me, SPELL_BRIGHTLEAF_ESSENCE, true); events.ScheduleEvent(EVENT_UNSTABLE_ENERGY, urand(10000, 20000)); } if (Elder[1] && Elder[1]->IsAlive()) { Elder[1]->CastSpell(me, SPELL_STONEBARK_ESSENCE, true); events.ScheduleEvent(EVENT_GROUND_TREMOR, urand(10000, 20000)); } if (Elder[2] && Elder[2]->IsAlive()) { Elder[2]->CastSpell(me, SPELL_IRONBRANCH_ESSENCE, true); events.ScheduleEvent(EVENT_STRENGTHENED_IRON_ROOTS, urand(10000, 20000)); } if (elderCount == 0) Talk(SAY_AGGRO); else Talk(SAY_AGGRO_WITH_ELDER); me->CastCustomSpell(SPELL_ATTUNED_TO_NATURE, SPELLVALUE_AURA_STACK, 150, me, true); events.ScheduleEvent(EVENT_WAVE, 10000); events.ScheduleEvent(EVENT_EONAR_GIFT, 25000); events.ScheduleEvent(EVENT_ENRAGE, 600000); events.ScheduleEvent(EVENT_SUNBEAM, urand(5000, 15000)); } uint32 GetData(uint32 type) const override { switch (type) { case DATA_GETTING_BACK_TO_NATURE: return attunedToNature; case DATA_KNOCK_ON_WOOD: return elderCount; } return 0; } void UpdateAI(uint32 diff) override { if (!UpdateVictim()) return; events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; while (uint32 eventId = events.ExecuteEvent()) { switch (eventId) { case EVENT_ENRAGE: Talk(SAY_BERSERK); DoCast(me, SPELL_ENRAGE); break; case EVENT_SUNBEAM: if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100.0f, true)) DoCast(target, SPELL_SUNBEAM); events.ScheduleEvent(EVENT_SUNBEAM, urand(10000, 15000)); break; case EVENT_NATURE_BOMB: DoCastAOE(SPELL_SUMMON_NATURE_BOMB, true); events.ScheduleEvent(EVENT_NATURE_BOMB, urand(10000, 12000)); break; case EVENT_UNSTABLE_ENERGY: if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100.0f, true)) DoCast(target, SPELL_FREYA_UNSTABLE_SUNBEAM, true); events.ScheduleEvent(EVENT_UNSTABLE_ENERGY, urand(15000, 20000)); break; case EVENT_WAVE: SpawnWave(); if (waveCount <= 6) // If set to 6 The Bombs appear during the Final Add wave events.ScheduleEvent(EVENT_WAVE, WAVE_TIME); else events.ScheduleEvent(EVENT_NATURE_BOMB, urand(10000, 20000)); break; case EVENT_EONAR_GIFT: Talk(EMOTE_LIFEBINDERS_GIFT); DoCast(me, SPELL_SUMMON_EONAR_GIFT); events.ScheduleEvent(EVENT_EONAR_GIFT, urand(40000, 50000)); break; case EVENT_STRENGTHENED_IRON_ROOTS: Talk(EMOTE_IRON_ROOTS); if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100.0f, true, -SPELL_ROOTS_FREYA)) target->CastSpell(target, SPELL_ROOTS_FREYA, true); // This must be cast by Target self events.ScheduleEvent(EVENT_STRENGTHENED_IRON_ROOTS, urand(12000, 20000)); break; case EVENT_GROUND_TREMOR: Talk(EMOTE_GROUND_TREMOR); DoCastAOE(SPELL_FREYA_GROUND_TREMOR); events.ScheduleEvent(EVENT_GROUND_TREMOR, urand(25000, 28000)); break; } if (me->HasUnitState(UNIT_STATE_CASTING)) return; } if (!me->HasAura(SPELL_TOUCH_OF_EONAR)) me->CastSpell(me, SPELL_TOUCH_OF_EONAR, true); // For achievement check if (Aura* aura = me->GetAura(SPELL_ATTUNED_TO_NATURE)) attunedToNature = aura->GetStackAmount(); else attunedToNature = 0; diffTimer += diff; // For getting time difference for Deforestation achievement // Elementals must be killed within 12 seconds of each other, or they will all revive and heal Creature* Elemental[3][2]; for (uint8 i = 0; i < 2; ++i) { if (checkElementalAlive[i]) elementalTimer[i] = 0; else { elementalTimer[i] += diff; for (uint8 k = 0; k < 3; ++k) Elemental[k][i] = ObjectAccessor::GetCreature(*me, ElementalGUID[k][i]); if (elementalTimer[i] > 12000) { if (!trioDefeated[i]) // Do *NOT* merge this bool with bool few lines below! { if (Elemental[0][i] && Elemental[1][i] && Elemental[2][i]) { for (uint8 n = 0; n < 3; ++n) { if (Elemental[n][i]->IsAlive()) Elemental[n][i]->SetHealth(Elemental[n][i]->GetMaxHealth()); else Elemental[n][i]->Respawn(); } } } checkElementalAlive[i] = true; } else { if (!trioDefeated[i]) { if (Elemental[0][i] && Elemental[1][i] && Elemental[2][i]) { if (Elemental[0][i]->isDead() && Elemental[1][i]->isDead() && Elemental[2][i]->isDead()) { for (uint8 n = 0; n < 3; ++n) { summons.Despawn(Elemental[n][i]); Elemental[n][i]->DespawnOrUnsummon(5000); trioDefeated[i] = true; Elemental[n][i]->CastSpell(me, SPELL_REMOVE_10STACK, true); } } } } } } } DoMeleeAttackIfReady(); } // Check if all Trio NPCs are dead - achievement check void LasherDead(uint32 type) // Type must be in format of a binary mask { uint8 n = 0; // Handling received data for (uint8 i = 0; i < 5; ++i) // We have created "instances" for keeping informations about last 6 death lashers - needed because of respawning { deforestation[i][0] = deforestation[(i + 1)][0]; // Time deforestation[i][1] = deforestation[(i + 1)][1]; // Type } deforestation[5][0] = diffTimer; deforestation[5][1] = type; // Check for achievement completion if (deforestation[0][1]) // Check for proper functionality of binary masks (overflow would not be problem) { for (uint8 i = 0; i < 6; ++i) // Count binary mask { n += deforestation[i][1]; } if ((deforestation[5][0] - deforestation[0][0]) < 10000) // Time check { if (n == 14 && instance) // Binary mask check - verification of lasher types { instance->DoCastSpellOnPlayers(SPELL_DEFORESTATION_CREDIT); } } } } // Random order of spawning waves int GetWaveId() { if (random[0] && random[1] && random[2]) for (uint8 n = 0; n < 3; ++n) random[n] = false; uint8 randomId = urand(0, 2); while (random[randomId]) randomId = urand(0, 2); random[randomId] = true; return randomId; } void SpawnWave() { switch (GetWaveId()) { case 0: Talk(SAY_SUMMON_LASHERS); for (uint8 n = 0; n < 10; ++n) DoCast(SPELL_SUMMON_LASHERS); break; case 1: Talk(SAY_SUMMON_TRIO); DoCast(SPELL_SUMMON_TRIO); trioWaveCount++; break; case 2: Talk(SAY_SUMMON_CONSERVATOR); DoCast(SPELL_SUMMON_ANCIENT_CONSERVATOR); break; } Talk(EMOTE_ALLIES_OF_NATURE); waveCount++; } void JustDied(Unit* /*killer*/) override { //! Freya's chest is dynamically spawned on death by different spells. const uint32 summonSpell[2][4] = { /* 0Elder, 1Elder, 2Elder, 3Elder */ /* 10N */ {62950, 62952, 62953, 62954}, /* 25N */ {62955, 62956, 62957, 62958} }; me->CastSpell((Unit*)NULL, summonSpell[me->GetMap()->GetDifficultyID() - DIFFICULTY_10_N][elderCount], true); Talk(SAY_DEATH); me->SetReactState(REACT_PASSIVE); _JustDied(); me->RemoveAllAuras(); me->AttackStop(); me->setFaction(35); me->DeleteThreatList(); me->CombatStop(true); me->DespawnOrUnsummon(7500); me->CastSpell(me, SPELL_KNOCK_ON_WOOD_CREDIT, true); for (uint8 n = 0; n < 3; ++n) { Creature* Elder = ObjectAccessor::GetCreature(*me, instance->GetGuidData(BOSS_BRIGHTLEAF + n)); if (Elder && Elder->IsAlive()) { Elder->RemoveAllAuras(); Elder->AttackStop(); Elder->CombatStop(true); Elder->DeleteThreatList(); Elder->AI()->DoAction(ACTION_ELDER_FREYA_KILLED); } } } void JustSummoned(Creature* summoned) override { switch (summoned->GetEntry()) { case NPC_SNAPLASHER: case NPC_ANCIENT_WATER_SPIRIT: case NPC_STORM_LASHER: ElementalGUID[trioWaveController][trioWaveCount] = summoned->GetGUID(); summons.Summon(summoned); ++trioWaveController; if (trioWaveController > 2) trioWaveController = 0; break; case NPC_DETONATING_LASHER: case NPC_ANCIENT_CONSERVATOR: default: summons.Summon(summoned); break; } // Need to have it there, or summoned units would do nothing untill attacked if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 250.0f, true)) { summoned->AI()->AttackStart(target); summoned->AddThreat(target, 250.0f); DoZoneInCombat(summoned); } } void SummonedCreatureDies(Creature* summoned, Unit* who) override { switch (summoned->GetEntry()) { case NPC_DETONATING_LASHER: summoned->CastSpell(me, SPELL_REMOVE_2STACK, true); summoned->CastSpell(who, SPELL_DETONATE, true); summoned->DespawnOrUnsummon(5000); summons.Despawn(summoned); break; case NPC_ANCIENT_CONSERVATOR: summoned->CastSpell(me, SPELL_REMOVE_25STACK, true); summoned->DespawnOrUnsummon(5000); summons.Despawn(summoned); break; } } }; CreatureAI* GetAI(Creature* creature) const override { return GetUlduarAI<boss_freyaAI>(creature); } }; class boss_elder_brightleaf : public CreatureScript { public: boss_elder_brightleaf() : CreatureScript("boss_elder_brightleaf") { } struct boss_elder_brightleafAI : public BossAI { boss_elder_brightleafAI(Creature* creature) : BossAI(creature, BOSS_BRIGHTLEAF) { } void Reset() override { _Reset(); if (me->HasAura(SPELL_DRAINED_OF_POWER)) me->RemoveAurasDueToSpell(SPELL_DRAINED_OF_POWER); events.ScheduleEvent(EVENT_SOLAR_FLARE, urand(5000, 7000)); events.ScheduleEvent(EVENT_UNSTABLE_SUN_BEAM, urand(7000, 12000)); events.ScheduleEvent(EVENT_FLUX, 5000); } void KilledUnit(Unit* who) override { if (who->GetTypeId() == TYPEID_PLAYER) Talk(SAY_ELDER_SLAY); } void JustDied(Unit* /*killer*/) override { _JustDied(); Talk(SAY_ELDER_DEATH); } void EnterCombat(Unit* /*who*/) override { _EnterCombat(); if (!me->HasAura(SPELL_DRAINED_OF_POWER)) Talk(SAY_ELDER_AGGRO); } void UpdateAI(uint32 diff) override { if (!UpdateVictim() || me->HasAura(SPELL_DRAINED_OF_POWER)) return; events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; while (uint32 eventId = events.ExecuteEvent()) { switch (eventId) { case EVENT_UNSTABLE_SUN_BEAM: me->CastSpell(me, SPELL_UNSTABLE_SUN_BEAM_SUMMON, true); events.ScheduleEvent(EVENT_UNSTABLE_SUN_BEAM, urand(10000, 15000)); break; case EVENT_SOLAR_FLARE: { uint8 stackAmount = 0; if (Aura* aura = me->GetAura(SPELL_FLUX_AURA)) stackAmount = aura->GetStackAmount(); me->CastCustomSpell(SPELL_SOLAR_FLARE, SPELLVALUE_MAX_TARGETS, stackAmount, me, false); events.ScheduleEvent(EVENT_SOLAR_FLARE, urand(5000, 10000)); break; } case EVENT_FLUX: me->RemoveAurasDueToSpell(SPELL_FLUX_AURA); me->AddAura(SPELL_FLUX_AURA, me); if (Aura* Flux = me->GetAura(SPELL_FLUX_AURA)) Flux->SetStackAmount(urand(1, 8)); events.ScheduleEvent(EVENT_FLUX, 7500); break; } if (me->HasUnitState(UNIT_STATE_CASTING)) return; } DoMeleeAttackIfReady(); } void DoAction(int32 action) override { switch (action) { case ACTION_ELDER_FREYA_KILLED: me->DespawnOrUnsummon(10000); _JustDied(); break; } } }; CreatureAI* GetAI(Creature* creature) const override { return GetUlduarAI<boss_elder_brightleafAI>(creature); } }; class boss_elder_stonebark : public CreatureScript { public: boss_elder_stonebark() : CreatureScript("boss_elder_stonebark") { } struct boss_elder_stonebarkAI : public BossAI { boss_elder_stonebarkAI(Creature* creature) : BossAI(creature, BOSS_STONEBARK) { } void Reset() override { _Reset(); if (me->HasAura(SPELL_DRAINED_OF_POWER)) me->RemoveAurasDueToSpell(SPELL_DRAINED_OF_POWER); events.ScheduleEvent(EVENT_TREMOR, urand(10000, 12000)); events.ScheduleEvent(EVENT_FISTS, urand(25000, 35000)); events.ScheduleEvent(EVENT_BARK, urand(37500, 40000)); } void KilledUnit(Unit* who) override { if (who->GetTypeId() == TYPEID_PLAYER) Talk(SAY_ELDER_SLAY); } void JustDied(Unit* /*killer*/) override { _JustDied(); Talk(SAY_ELDER_DEATH); } void EnterCombat(Unit* /*who*/) override { _EnterCombat(); if (!me->HasAura(SPELL_DRAINED_OF_POWER)) Talk(SAY_ELDER_AGGRO); } void DamageTaken(Unit* who, uint32& damage) override { if (who == me) return; if (me->HasAura(SPELL_PETRIFIED_BARK)) { int32 reflect = damage; who->CastCustomSpell(who, SPELL_PETRIFIED_BARK_DMG, &reflect, NULL, NULL, true); damage = 0; } } void UpdateAI(uint32 diff) override { if (!UpdateVictim() || me->HasAura(SPELL_DRAINED_OF_POWER)) return; events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; while (uint32 eventId = events.ExecuteEvent()) { switch (eventId) { case EVENT_BARK: DoCast(me, SPELL_PETRIFIED_BARK); events.ScheduleEvent(EVENT_BARK, urand(30000, 50000)); break; case EVENT_FISTS: DoCastVictim(SPELL_FISTS_OF_STONE); events.ScheduleEvent(EVENT_FISTS, urand(20000, 30000)); break; case EVENT_TREMOR: if (!me->HasAura(SPELL_FISTS_OF_STONE)) DoCastVictim(SPELL_GROUND_TREMOR); events.ScheduleEvent(EVENT_TREMOR, urand(10000, 20000)); break; } if (me->HasUnitState(UNIT_STATE_CASTING)) return; } DoMeleeAttackIfReady(); } void DoAction(int32 action) override { switch (action) { case ACTION_ELDER_FREYA_KILLED: me->DespawnOrUnsummon(10000); _JustDied(); break; } } }; CreatureAI* GetAI(Creature* creature) const override { return GetUlduarAI<boss_elder_stonebarkAI>(creature); } }; class boss_elder_ironbranch : public CreatureScript { public: boss_elder_ironbranch() : CreatureScript("boss_elder_ironbranch") { } struct boss_elder_ironbranchAI : public BossAI { boss_elder_ironbranchAI(Creature* creature) : BossAI(creature, BOSS_IRONBRANCH) { } void Reset() override { _Reset(); if (me->HasAura(SPELL_DRAINED_OF_POWER)) me->RemoveAurasDueToSpell(SPELL_DRAINED_OF_POWER); events.ScheduleEvent(EVENT_IMPALE, urand(18000, 22000)); events.ScheduleEvent(EVENT_IRON_ROOTS, urand(12000, 17000)); events.ScheduleEvent(EVENT_THORN_SWARM, urand(7500, 12500)); } void KilledUnit(Unit* who) override { if (who->GetTypeId() == TYPEID_PLAYER) Talk(SAY_ELDER_SLAY); } void JustDied(Unit* /*killer*/) override { _JustDied(); Talk(SAY_ELDER_DEATH); } void EnterCombat(Unit* /*who*/) override { _EnterCombat(); if (!me->HasAura(SPELL_DRAINED_OF_POWER)) Talk(SAY_ELDER_AGGRO); } void UpdateAI(uint32 diff) override { if (!UpdateVictim() || me->HasAura(SPELL_DRAINED_OF_POWER)) return; events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; while (uint32 eventId = events.ExecuteEvent()) { switch (eventId) { case EVENT_IMPALE: DoCastVictim(SPELL_IMPALE); events.ScheduleEvent(EVENT_IMPALE, urand(15000, 25000)); break; case EVENT_IRON_ROOTS: if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100.0f, true, -SPELL_ROOTS_IRONBRANCH)) target->CastSpell(target, SPELL_ROOTS_IRONBRANCH, true); events.ScheduleEvent(EVENT_IRON_ROOTS, urand(10000, 20000)); break; case EVENT_THORN_SWARM: DoCastVictim(SPELL_THORN_SWARM); events.ScheduleEvent(EVENT_THORN_SWARM, urand(8000, 13000)); break; } if (me->HasUnitState(UNIT_STATE_CASTING)) return; } DoMeleeAttackIfReady(); } void DoAction(int32 action) override { switch (action) { case ACTION_ELDER_FREYA_KILLED: me->DespawnOrUnsummon(10000); _JustDied(); break; } } }; CreatureAI* GetAI(Creature* creature) const override { return GetUlduarAI<boss_elder_ironbranchAI>(creature); } }; class npc_detonating_lasher : public CreatureScript { public: npc_detonating_lasher() : CreatureScript("npc_detonating_lasher") { } struct npc_detonating_lasherAI : public ScriptedAI { npc_detonating_lasherAI(Creature* creature) : ScriptedAI(creature) { Initialize(); me->ApplySpellImmune(0, IMMUNITY_STATE, SPELL_AURA_MOD_TAUNT, true); } void Initialize() { lashTimer = 5000; changeTargetTimer = 7500; } void Reset() override { Initialize(); } void UpdateAI(uint32 diff) override { if (!UpdateVictim()) return; if (lashTimer <= diff) { DoCast(SPELL_FLAME_LASH); lashTimer = urand(5000, 10000); } else lashTimer -= diff; if (changeTargetTimer <= diff) { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100.0f, true)) { // Switching to other target - modify aggro of new target by 20% from current target's aggro me->AddThreat(target, me->getThreatManager().getThreat(me->GetVictim(), false) * 1.2f); AttackStart(target); } changeTargetTimer = urand(5000, 10000); } else changeTargetTimer -= diff; DoMeleeAttackIfReady(); } private: uint32 lashTimer; uint32 changeTargetTimer; }; CreatureAI* GetAI(Creature* creature) const override { return new npc_detonating_lasherAI(creature); } }; class npc_ancient_water_spirit : public CreatureScript { public: npc_ancient_water_spirit() : CreatureScript("npc_ancient_water_spirit") { } struct npc_ancient_water_spiritAI : public ScriptedAI { npc_ancient_water_spiritAI(Creature* creature) : ScriptedAI(creature) { Initialize(); instance = me->GetInstanceScript(); if (Creature* Freya = ObjectAccessor::GetCreature(*me, instance->GetGuidData(BOSS_FREYA))) waveCount = ENSURE_AI(boss_freya::boss_freyaAI, Freya->AI())->trioWaveCount; else waveCount = 0; } void Initialize() { tidalWaveTimer = 10000; } void Reset() override { Initialize(); } void UpdateAI(uint32 diff) override { if (!UpdateVictim()) return; if (tidalWaveTimer <= diff) { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100.0f, true)) { DoCast(target, SPELL_TIDAL_WAVE); DoCast(target, SPELL_TIDAL_WAVE_EFFECT, true); } tidalWaveTimer = urand(12000, 25000); } else tidalWaveTimer -= diff; DoMeleeAttackIfReady(); } void JustDied(Unit* /*killer*/) override { if (Creature* Freya = ObjectAccessor::GetCreature(*me, instance->GetGuidData(BOSS_FREYA))) { ENSURE_AI(boss_freya::boss_freyaAI, Freya->AI())->checkElementalAlive[waveCount] = false; ENSURE_AI(boss_freya::boss_freyaAI, Freya->AI())->LasherDead(1); } } private: InstanceScript* instance; uint32 tidalWaveTimer; uint8 waveCount; }; CreatureAI* GetAI(Creature* creature) const override { return GetInstanceAI<npc_ancient_water_spiritAI>(creature); } }; class npc_storm_lasher : public CreatureScript { public: npc_storm_lasher() : CreatureScript("npc_storm_lasher") { } struct npc_storm_lasherAI : public ScriptedAI { npc_storm_lasherAI(Creature* creature) : ScriptedAI(creature) { Initialize(); instance = me->GetInstanceScript(); if (Creature* Freya = ObjectAccessor::GetCreature(*me, instance->GetGuidData(BOSS_FREYA))) waveCount = ENSURE_AI(boss_freya::boss_freyaAI, Freya->AI())->trioWaveCount; else waveCount = 0; } void Initialize() { lightningLashTimer = 10000; stormboltTimer = 5000; } void Reset() override { Initialize(); } void UpdateAI(uint32 diff) override { if (!UpdateVictim()) return; if (lightningLashTimer <= diff) { DoCast(SPELL_LIGHTNING_LASH); lightningLashTimer = urand(7000, 14000); } else lightningLashTimer -= diff; if (stormboltTimer <= diff) { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100.0f, true)) DoCast(target, SPELL_STORMBOLT); stormboltTimer = urand(8000, 12000); } else stormboltTimer -= diff; DoMeleeAttackIfReady(); } void JustDied(Unit* /*killer*/) override { if (Creature* Freya = ObjectAccessor::GetCreature(*me, instance->GetGuidData(BOSS_FREYA))) { ENSURE_AI(boss_freya::boss_freyaAI, Freya->AI())->checkElementalAlive[waveCount] = false; ENSURE_AI(boss_freya::boss_freyaAI, Freya->AI())->LasherDead(2); } } private: InstanceScript* instance; uint32 lightningLashTimer; uint32 stormboltTimer; uint8 waveCount; }; CreatureAI* GetAI(Creature* creature) const override { return GetInstanceAI<npc_storm_lasherAI>(creature); } }; class npc_snaplasher : public CreatureScript { public: npc_snaplasher() : CreatureScript("npc_snaplasher") { } struct npc_snaplasherAI : public ScriptedAI { npc_snaplasherAI(Creature* creature) : ScriptedAI(creature) { instance = me->GetInstanceScript(); if (Creature* Freya = ObjectAccessor::GetCreature(*me, instance->GetGuidData(BOSS_FREYA))) waveCount = ENSURE_AI(boss_freya::boss_freyaAI, Freya->AI())->trioWaveCount; else waveCount = 0; } void UpdateAI(uint32 /*diff*/) override { if (!UpdateVictim()) return; if (!me->HasAura(SPELL_BARK_AURA)) DoCast(SPELL_HARDENED_BARK); DoMeleeAttackIfReady(); } void JustDied(Unit* /*killer*/) override { if (Creature* Freya = ObjectAccessor::GetCreature(*me, instance->GetGuidData(BOSS_FREYA))) { ENSURE_AI(boss_freya::boss_freyaAI, Freya->AI())->checkElementalAlive[waveCount] = false; ENSURE_AI(boss_freya::boss_freyaAI, Freya->AI())->LasherDead(4); } } private: InstanceScript* instance; uint8 waveCount; }; CreatureAI* GetAI(Creature* creature) const override { return GetInstanceAI<npc_snaplasherAI>(creature); } }; class npc_ancient_conservator : public CreatureScript { public: npc_ancient_conservator() : CreatureScript("npc_ancient_conservator") { } struct npc_ancient_conservatorAI : public ScriptedAI { npc_ancient_conservatorAI(Creature* creature) : ScriptedAI(creature) { Initialize(); } void Initialize() { natureFuryTimer = 7500; healthySporeTimer = 3500; } void Reset() override { Initialize(); SummonHealthySpores(2); } void SummonHealthySpores(uint8 sporesCount) { for (uint8 n = 0; n < sporesCount; ++n) { DoCast(SPELL_SUMMON_PERIODIC); DoCast(SPELL_SPORE_SUMMON_NE); DoCast(SPELL_SPORE_SUMMON_SE); DoCast(SPELL_SPORE_SUMMON_SW); } } void EnterCombat(Unit* who) override { DoCast(who, SPELL_CONSERVATOR_GRIP, true); } void UpdateAI(uint32 diff) override { if (!UpdateVictim()) return; if (healthySporeTimer <= diff) { SummonHealthySpores(1); healthySporeTimer = urand(15000, 17500); } else healthySporeTimer -= diff; if (natureFuryTimer <= diff) { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100.0f, true, -SPELL_NATURE_FURY)) DoCast(target, SPELL_NATURE_FURY); me->AddAura(SPELL_CONSERVATOR_GRIP, me); natureFuryTimer = 5000; } else natureFuryTimer -= diff; DoMeleeAttackIfReady(); } private: uint32 natureFuryTimer; uint32 healthySporeTimer; }; CreatureAI* GetAI(Creature* creature) const override { return new npc_ancient_conservatorAI(creature); } }; class npc_sun_beam : public CreatureScript { public: npc_sun_beam() : CreatureScript("npc_sun_beam") { } struct npc_sun_beamAI : public ScriptedAI { npc_sun_beamAI(Creature* creature) : ScriptedAI(creature) { SetCombatMovement(false); me->SetReactState(REACT_PASSIVE); DoCastAOE(SPELL_FREYA_UNSTABLE_ENERGY_VISUAL, true); DoCast(SPELL_FREYA_UNSTABLE_ENERGY); } }; CreatureAI* GetAI(Creature* creature) const override { return new npc_sun_beamAI(creature); } }; class npc_healthy_spore : public CreatureScript { public: npc_healthy_spore() : CreatureScript("npc_healthy_spore") { } struct npc_healthy_sporeAI : public ScriptedAI { npc_healthy_sporeAI(Creature* creature) : ScriptedAI(creature) { SetCombatMovement(false); me->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NOT_SELECTABLE | UNIT_FLAG_NON_ATTACKABLE | UNIT_FLAG_IMMUNE_TO_PC); me->SetReactState(REACT_PASSIVE); DoCast(me, SPELL_HEALTHY_SPORE_VISUAL); DoCast(me, SPELL_POTENT_PHEROMONES); DoCast(me, SPELL_GROW); lifeTimer = urand(22000, 30000); } void UpdateAI(uint32 diff) override { if (lifeTimer <= diff) { me->RemoveAurasDueToSpell(SPELL_GROW); me->DespawnOrUnsummon(2200); lifeTimer = urand(22000, 30000); } else lifeTimer -= diff; } private: uint32 lifeTimer; }; CreatureAI* GetAI(Creature* creature) const override { return new npc_healthy_sporeAI(creature); } }; class npc_eonars_gift : public CreatureScript { public: npc_eonars_gift() : CreatureScript("npc_eonars_gift") { } struct npc_eonars_giftAI : public ScriptedAI { npc_eonars_giftAI(Creature* creature) : ScriptedAI(creature) { SetCombatMovement(false); lifeBindersGiftTimer = 12000; DoCast(me, SPELL_GROW); DoCast(me, SPELL_PHEROMONES, true); DoCast(me, SPELL_EONAR_VISUAL, true); } void UpdateAI(uint32 diff) override { if (lifeBindersGiftTimer <= diff) { me->RemoveAurasDueToSpell(SPELL_GROW); DoCast(SPELL_LIFEBINDERS_GIFT); me->DespawnOrUnsummon(2500); lifeBindersGiftTimer = 12000; } else lifeBindersGiftTimer -= diff; } private: uint32 lifeBindersGiftTimer; }; CreatureAI* GetAI(Creature* creature) const override { return new npc_eonars_giftAI(creature); } }; class npc_nature_bomb : public CreatureScript { public: npc_nature_bomb() : CreatureScript("npc_nature_bomb") { } struct npc_nature_bombAI : public ScriptedAI { npc_nature_bombAI(Creature* creature) : ScriptedAI(creature) { SetCombatMovement(false); bombTimer = urand(8000, 10000); DoCast(SPELL_OBJECT_BOMB); } void UpdateAI(uint32 diff) override { if (bombTimer <= diff) { if (GameObject* go = me->FindNearestGameObject(OBJECT_NATURE_BOMB, 1.0f)) { DoCast(me, SPELL_NATURE_BOMB); me->RemoveGameObject(go, true); me->RemoveFromWorld(); } bombTimer = 10000; } else bombTimer -= diff; } private: uint32 bombTimer; }; CreatureAI* GetAI(Creature* creature) const override { return new npc_nature_bombAI(creature); } }; class npc_unstable_sun_beam : public CreatureScript { public: npc_unstable_sun_beam() : CreatureScript("npc_unstable_sun_beam") { } struct npc_unstable_sun_beamAI : public ScriptedAI { npc_unstable_sun_beamAI(Creature* creature) : ScriptedAI(creature) { SetCombatMovement(false); despawnTimer = urand(7000, 12000); instance = me->GetInstanceScript(); DoCast(me, SPELL_PHOTOSYNTHESIS); DoCast(me, SPELL_UNSTABLE_SUN_BEAM); me->SetReactState(REACT_PASSIVE); } void UpdateAI(uint32 diff) override { if (despawnTimer <= diff) { DoCastAOE(SPELL_UNSTABLE_ENERGY, true); me->DisappearAndDie(); } else despawnTimer -= diff; } void SpellHitTarget(Unit* target, SpellInfo const* spell) override { if (target && spell->Id == SPELL_UNSTABLE_ENERGY) { target->RemoveAurasDueToSpell(SPELL_UNSTABLE_SUN_BEAM); target->RemoveAurasDueToSpell(SPELL_UNSTABLE_SUN_BEAM_TRIGGERED); } } private: InstanceScript* instance; uint32 despawnTimer; }; CreatureAI* GetAI(Creature* creature) const override { return GetInstanceAI<npc_unstable_sun_beamAI>(creature); } }; class spell_freya_attuned_to_nature_dose_reduction : public SpellScriptLoader { public: spell_freya_attuned_to_nature_dose_reduction() : SpellScriptLoader("spell_freya_attuned_to_nature_dose_reduction") { } class spell_freya_attuned_to_nature_dose_reduction_SpellScript : public SpellScript { PrepareSpellScript(spell_freya_attuned_to_nature_dose_reduction_SpellScript); void HandleScript(SpellEffIndex /*effIndex*/) { Unit* target = GetHitUnit(); switch (GetSpellInfo()->Id) { case SPELL_ATTUNED_TO_NATURE_2_DOSE_REDUCTION: if (target->HasAura(GetEffectValue())) for (uint8 n = 0; n < 2; ++n) target->RemoveAuraFromStack(GetEffectValue()); break; case SPELL_ATTUNED_TO_NATURE_10_DOSE_REDUCTION: if (target->HasAura(GetEffectValue())) for (uint8 n = 0; n < 10; ++n) target->RemoveAuraFromStack(GetEffectValue()); break; case SPELL_ATTUNED_TO_NATURE_25_DOSE_REDUCTION: if (target->HasAura(GetEffectValue())) for (uint8 n = 0; n < 25; ++n) target->RemoveAuraFromStack(GetEffectValue()); break; default: break; } } void Register() override { OnEffectHitTarget += SpellEffectFn(spell_freya_attuned_to_nature_dose_reduction_SpellScript::HandleScript, EFFECT_0, SPELL_EFFECT_SCRIPT_EFFECT); } }; SpellScript* GetSpellScript() const override { return new spell_freya_attuned_to_nature_dose_reduction_SpellScript(); } }; class spell_freya_iron_roots : public SpellScriptLoader { public: spell_freya_iron_roots() : SpellScriptLoader("spell_freya_iron_roots") { } class spell_freya_iron_roots_SpellScript : public SpellScript { PrepareSpellScript(spell_freya_iron_roots_SpellScript); void HandleSummon(SpellEffIndex effIndex) { PreventHitDefaultEffect(effIndex); uint32 entry = uint32(GetSpellInfo()->GetEffect(effIndex)->MiscValue); Position pos = GetCaster()->GetPosition(); // Not good at all, but this prevents having roots in a different position then player if (Creature* Roots = GetCaster()->SummonCreature(entry, pos)) GetCaster()->NearTeleportTo(Roots->GetPositionX(), Roots->GetPositionY(), Roots->GetPositionZ(), GetCaster()->GetOrientation()); } void Register() override { OnEffectHit += SpellEffectFn(spell_freya_iron_roots_SpellScript::HandleSummon, EFFECT_0, SPELL_EFFECT_SUMMON); } }; SpellScript* GetSpellScript() const override { return new spell_freya_iron_roots_SpellScript(); } }; class achievement_getting_back_to_nature : public AchievementCriteriaScript { public: achievement_getting_back_to_nature() : AchievementCriteriaScript("achievement_getting_back_to_nature") { } bool OnCheck(Player* /*player*/, Unit* target) override { return target && target->GetAI()->GetData(DATA_GETTING_BACK_TO_NATURE) >= 25; } }; class achievement_knock_on_wood : public AchievementCriteriaScript { public: achievement_knock_on_wood() : AchievementCriteriaScript("achievement_knock_on_wood") { } bool OnCheck(Player* /*player*/, Unit* target) override { return target && target->GetAI()->GetData(DATA_KNOCK_ON_WOOD) >= 1; } }; class achievement_knock_knock_on_wood : public AchievementCriteriaScript { public: achievement_knock_knock_on_wood() : AchievementCriteriaScript("achievement_knock_knock_on_wood") { } bool OnCheck(Player* /*player*/, Unit* target) override { return target && target->GetAI()->GetData(DATA_KNOCK_ON_WOOD) >= 2; } }; class achievement_knock_knock_knock_on_wood : public AchievementCriteriaScript { public: achievement_knock_knock_knock_on_wood() : AchievementCriteriaScript("achievement_knock_knock_knock_on_wood") { } bool OnCheck(Player* /*player*/, Unit* target) override { return target && target->GetAI()->GetData(DATA_KNOCK_ON_WOOD) == 3; } }; void AddSC_boss_freya() { new boss_freya(); new boss_elder_brightleaf(); new boss_elder_ironbranch(); new boss_elder_stonebark(); new npc_ancient_conservator(); new npc_snaplasher(); new npc_storm_lasher(); new npc_ancient_water_spirit(); new npc_detonating_lasher(); new npc_sun_beam(); new npc_nature_bomb(); new npc_eonars_gift(); new npc_healthy_spore(); new npc_unstable_sun_beam(); new npc_iron_roots(); new spell_freya_attuned_to_nature_dose_reduction(); new spell_freya_iron_roots(); new achievement_getting_back_to_nature(); new achievement_knock_on_wood(); new achievement_knock_knock_on_wood(); new achievement_knock_knock_knock_on_wood(); }
sidneeginger/TrinityCore
src/server/scripts/Northrend/Ulduar/Ulduar/boss_freya.cpp
C++
gpl-2.0
59,692
//////////////////////////////////////////////////////////////////////////////////////// // // Nestopia - NES/Famicom emulator written in C++ // // Copyright (C) 2003-2007 Martin Freij // // This file is part of Nestopia. // // Nestopia is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // Nestopia is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Nestopia; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA // //////////////////////////////////////////////////////////////////////////////////////// #ifndef NST_MAPPER_243_H #define NST_MAPPER_243_H #ifdef NST_PRAGMA_ONCE #pragma once #endif namespace Nes { namespace Core { class Mapper243 : public Mapper { public: explicit Mapper243(Context& c) : Mapper(c,PROM_MAX_64K|CROM_MAX_128K) {} private: ~Mapper243() {} void SubReset(bool); void SubSave(State::Saver&) const; void SubLoad(State::Loader&); NES_DECL_POKE( 4100 ); NES_DECL_POKE( 4101 ); uint command; }; } } #endif
Joride/nestopia
core/mapper/NstMapper243.hpp
C++
gpl-2.0
1,553
<?php /** * File containing the CreatedRole class * * @copyright Copyright (C) eZ Systems AS. All rights reserved. * @license For full copyright and license information view LICENSE file distributed with this source code. * @version 2014.11.1 */ namespace eZ\Publish\Core\REST\Server\Values; use eZ\Publish\API\Repository\Values\ValueObject; /** * Struct representing a freshly created role. */ class CreatedRole extends ValueObject { /** * The created role * * @var \eZ\Publish\API\Repository\Values\User\Role */ public $role; }
wnsonsa/destin-foot
vendor/ezsystems/ezpublish-kernel/eZ/Publish/Core/REST/Server/Values/CreatedRole.php
PHP
gpl-2.0
569
/* Copyright 2005-2010 Intel Corporation. All Rights Reserved. This file is part of Threading Building Blocks. Threading Building Blocks is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. Threading Building Blocks is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Threading Building Blocks; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA As a special exception, you may use this file as part of a free software library without restriction. Specifically, if other files instantiate templates or use macros or inline functions from this file, or you compile this file and link it with other files to produce an executable, this file does not by itself cause the resulting executable to be covered by the GNU General Public License. This exception does not however invalidate any other reasons why the executable file might be covered by the GNU General Public License. */ #include "tbb/parallel_while.h" #include "harness.h" const int N = 200; typedef int Element; //! Representation of an array index with only those signatures required by parallel_while. class MinimalArgumentType { void operator=( const MinimalArgumentType& ); long my_value; enum { DEAD=0xDEAD, LIVE=0x2718, INITIALIZED=0x3141 } my_state; public: ~MinimalArgumentType() { ASSERT( my_state==LIVE||my_state==INITIALIZED, NULL ); my_state = DEAD; } MinimalArgumentType() { my_state = LIVE; } void set_value( long i ) { ASSERT( my_state==LIVE||my_state==INITIALIZED, NULL ); my_value = i; my_state = INITIALIZED; } long get_value() const { ASSERT( my_state==INITIALIZED, NULL ); return my_value; } }; class IntegerStream { long my_limit; long my_index; public: IntegerStream( long n ) : my_limit(n), my_index(0) {} bool pop_if_present( MinimalArgumentType& v ) { if( my_index>=my_limit ) return false; v.set_value( my_index ); my_index+=2; return true; } }; class MatrixMultiplyBody: NoAssign { Element (*a)[N]; Element (*b)[N]; Element (*c)[N]; const int n; tbb::parallel_while<MatrixMultiplyBody>& my_while; public: typedef MinimalArgumentType argument_type; void operator()( argument_type i_arg ) const { long i = i_arg.get_value(); if( (i&1)==0 && i+1<N ) { MinimalArgumentType value; value.set_value(i+1); my_while.add( value ); } for( int j=0; j<n; ++j ) c[i][j] = 0; for( int k=0; k<n; ++k ) { Element aik = a[i][k]; for( int j=0; j<n; ++j ) c[i][j] += aik*b[k][j]; } } MatrixMultiplyBody( tbb::parallel_while<MatrixMultiplyBody>& w, Element c_[N][N], Element a_[N][N], Element b_[N][N], int n_ ) : a(a_), b(b_), c(c_), n(n_), my_while(w) {} }; void WhileMatrixMultiply( Element c[N][N], Element a[N][N], Element b[N][N], int n ) { IntegerStream stream( N ); tbb::parallel_while<MatrixMultiplyBody> w; MatrixMultiplyBody body(w,c,a,b,n); w.run( stream, body ); } #include "tbb/tick_count.h" #include <cstdlib> #include <cstdio> using namespace std; static long Iterations = 5; static void SerialMatrixMultiply( Element c[N][N], Element a[N][N], Element b[N][N], int n ) { for( int i=0; i<n; ++i ) { for( int j=0; j<n; ++j ) c[i][j] = 0; for( int k=0; k<n; ++k ) { Element aik = a[i][k]; for( int j=0; j<n; ++j ) c[i][j] += aik*b[k][j]; } } } static void InitializeMatrix( Element x[N][N], int n, int salt ) { for( int i=0; i<n; ++i ) for( int j=0; j<n; ++j ) x[i][j] = (i*n+j)^salt; } static Element A[N][N], B[N][N], C[N][N], D[N][N]; static void Run( int nthread, int n ) { /* Initialize matrices */ InitializeMatrix(A,n,5); InitializeMatrix(B,n,10); InitializeMatrix(C,n,0); InitializeMatrix(D,n,15); tbb::tick_count t0 = tbb::tick_count::now(); for( long i=0; i<Iterations; ++i ) { WhileMatrixMultiply( C, A, B, n ); } tbb::tick_count t1 = tbb::tick_count::now(); SerialMatrixMultiply( D, A, B, n ); // Check result for( int i=0; i<n; ++i ) for( int j=0; j<n; ++j ) ASSERT( C[i][j]==D[i][j], NULL ); REMARK("time=%g\tnthread=%d\tn=%d\n",(t1-t0).seconds(),nthread,n); } #include "tbb/task_scheduler_init.h" #include "harness_cpu.h" int TestMain () { if( MinThread<1 ) { REPORT("number of threads must be positive\n"); exit(1); } for( int p=MinThread; p<=MaxThread; ++p ) { tbb::task_scheduler_init init( p ); for( int n=N/4; n<=N; n+=N/4 ) Run(p,n); // Test that all workers sleep when no work TestCPUUserTime(p); } return Harness::Done; }
dusek/tbb
src/test/test_parallel_while.cpp
C++
gpl-2.0
5,445
<?php /** * Zend Framework * * LICENSE * * This source file is subject to the new BSD license that is bundled * with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://framework.zend.com/license/new-bsd * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@zend.com so we can send you a copy immediately. * * @category Zend * @package Zend_Service * @subpackage DeveloperGarden * @copyright Copyright (c) 2005-2015 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License * @version $Id$ */ /** * @see Zend_Service_DeveloperGarden_Response_ResponseAbstract */ #require_once 'Zend/Service/DeveloperGarden/Response/ResponseAbstract.php'; /** * @category Zend * @package Zend_Service * @subpackage DeveloperGarden * @copyright Copyright (c) 2005-2015 Zend Technologies USA Inc. (http://www.zend.com) * @author Marco Kaiser * @license http://framework.zend.com/license/new-bsd New BSD License */ class Zend_Service_DeveloperGarden_Response_BaseUserService_GetAccountBalanceResponse extends Zend_Service_DeveloperGarden_Response_ResponseAbstract { }
dvh11er/mage-cheatcode
magento/lib/Zend/Service/DeveloperGarden/Response/BaseUserService/GetAccountBalanceResponse.php
PHP
gpl-2.0
1,317
/* * Copyright (C) 2008-2013 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2005-2009 MaNGOS <http://getmangos.com/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /** \file \ingroup u2w */ #include "WorldSocket.h" // must be first to make ACE happy with ACE includes in it #include <zlib.h> #include "Common.h" #include "DatabaseEnv.h" #include "Log.h" #include "Opcodes.h" #include "WorldPacket.h" #include "WorldSession.h" #include "Player.h" #include "Vehicle.h" #include "ObjectMgr.h" #include "GuildMgr.h" #include "Group.h" #include "Guild.h" #include "World.h" #include "ObjectAccessor.h" #include "BattlegroundMgr.h" #include "OutdoorPvPMgr.h" #include "MapManager.h" #include "SocialMgr.h" #include "zlib.h" #include "ScriptMgr.h" #include "Transport.h" #include "WardenWin.h" #include "WardenMac.h" namespace { std::string const DefaultPlayerName = "<none>"; } // namespace bool MapSessionFilter::Process(WorldPacket* packet) { Opcodes opcode = DropHighBytes(packet->GetOpcode()); OpcodeHandler const* opHandle = opcodeTable[opcode]; //let's check if our opcode can be really processed in Map::Update() if (opHandle->ProcessingPlace == PROCESS_INPLACE) return true; //we do not process thread-unsafe packets if (opHandle->ProcessingPlace == PROCESS_THREADUNSAFE) return false; Player* player = m_pSession->GetPlayer(); if (!player) return false; //in Map::Update() we do not process packets where player is not in world! return player->IsInWorld(); } //we should process ALL packets when player is not in world/logged in //OR packet handler is not thread-safe! bool WorldSessionFilter::Process(WorldPacket* packet) { Opcodes opcode = DropHighBytes(packet->GetOpcode()); OpcodeHandler const* opHandle = opcodeTable[opcode]; //check if packet handler is supposed to be safe if (opHandle->ProcessingPlace == PROCESS_INPLACE) return true; //thread-unsafe packets should be processed in World::UpdateSessions() if (opHandle->ProcessingPlace == PROCESS_THREADUNSAFE) return true; //no player attached? -> our client! ^^ Player* player = m_pSession->GetPlayer(); if (!player) return true; //lets process all packets for non-in-the-world player return (player->IsInWorld() == false); } /// WorldSession constructor WorldSession::WorldSession(uint32 id, WorldSocket* sock, AccountTypes sec, uint8 expansion, time_t mute_time, LocaleConstant locale, uint32 recruiter, bool isARecruiter): m_muteTime(mute_time), m_timeOutTime(0), _player(NULL), m_Socket(sock), _security(sec), _accountId(id), m_expansion(expansion), _warden(NULL), _logoutTime(0), m_inQueue(false), m_playerLoading(false), m_playerLogout(false), m_playerRecentlyLogout(false), m_playerSave(false), m_sessionDbcLocale(sWorld->GetAvailableDbcLocale(locale)), m_sessionDbLocaleIndex(locale), m_latency(0), m_TutorialsChanged(false), _filterAddonMessages(false), recruiterId(recruiter), isRecruiter(isARecruiter), timeLastWhoCommand(0) { if (sock) { m_Address = sock->GetRemoteAddress(); sock->AddReference(); ResetTimeOutTime(); LoginDatabase.PExecute("UPDATE account SET online = 1 WHERE id = %u;", GetAccountId()); // One-time query } InitializeQueryCallbackParameters(); _compressionStream = new z_stream(); _compressionStream->zalloc = (alloc_func)NULL; _compressionStream->zfree = (free_func)NULL; _compressionStream->opaque = (voidpf)NULL; _compressionStream->avail_in = 0; _compressionStream->next_in = NULL; int32 z_res = deflateInit(_compressionStream, sWorld->getIntConfig(CONFIG_COMPRESSION)); if (z_res != Z_OK) { sLog->outError(LOG_FILTER_NETWORKIO, "Can't initialize packet compression (zlib: deflateInit) Error code: %i (%s)", z_res, zError(z_res)); return; } } /// WorldSession destructor WorldSession::~WorldSession() { ///- unload player if not unloaded if (_player) LogoutPlayer (true); /// - If have unclosed socket, close it if (m_Socket) { m_Socket->CloseSocket(); m_Socket->RemoveReference(); m_Socket = NULL; } if (_warden) delete _warden; ///- empty incoming packet queue WorldPacket* packet = NULL; while (_recvQueue.next(packet)) delete packet; LoginDatabase.PExecute("UPDATE account SET online = 0 WHERE id = %u;", GetAccountId()); // One-time query int32 z_res = deflateEnd(_compressionStream); if (z_res != Z_OK && z_res != Z_DATA_ERROR) // Z_DATA_ERROR signals that internal state was BUSY { sLog->outError(LOG_FILTER_NETWORKIO, "Can't close packet compression stream (zlib: deflateEnd) Error code: %i (%s)", z_res, zError(z_res)); return; } delete _compressionStream; } std::string const & WorldSession::GetPlayerName() const { return _player != NULL ? _player->GetName() : DefaultPlayerName; } std::string WorldSession::GetPlayerInfo() const { std::ostringstream ss; ss << "[Player: " << GetPlayerName() << " (Guid: " << (_player != NULL ? _player->GetGUID() : 0) << ", Account: " << GetAccountId() << ")]"; return ss.str(); } /// Get player guid if available. Use for logging purposes only uint32 WorldSession::GetGuidLow() const { return GetPlayer() ? GetPlayer()->GetGUIDLow() : 0; } /// Send a packet to the client void WorldSession::SendPacket(WorldPacket const* packet, bool forced /*= false*/) { if (!m_Socket) return; if (packet->GetOpcode() == NULL_OPCODE) { sLog->outError(LOG_FILTER_OPCODES, "Prevented sending of NULL_OPCODE to %s", GetPlayerInfo().c_str()); return; } else if (packet->GetOpcode() == UNKNOWN_OPCODE) { sLog->outError(LOG_FILTER_OPCODES, "Prevented sending of UNKNOWN_OPCODE to %s", GetPlayerInfo().c_str()); return; } if (!forced) { OpcodeHandler const* handler = opcodeTable[packet->GetOpcode()]; if (!handler || handler->Status == STATUS_UNHANDLED) { sLog->outError(LOG_FILTER_OPCODES, "Prevented sending disabled opcode %s to %s", GetOpcodeNameForLogging(packet->GetOpcode()).c_str(), GetPlayerInfo().c_str()); return; } } #ifdef TRINITY_DEBUG // Code for network use statistic static uint64 sendPacketCount = 0; static uint64 sendPacketBytes = 0; static time_t firstTime = time(NULL); static time_t lastTime = firstTime; // next 60 secs start time static uint64 sendLastPacketCount = 0; static uint64 sendLastPacketBytes = 0; time_t cur_time = time(NULL); if ((cur_time - lastTime) < 60) { sendPacketCount+=1; sendPacketBytes+=packet->size(); sendLastPacketCount+=1; sendLastPacketBytes+=packet->size(); } else { uint64 minTime = uint64(cur_time - lastTime); uint64 fullTime = uint64(lastTime - firstTime); sLog->outInfo(LOG_FILTER_GENERAL, "Send all time packets count: " UI64FMTD " bytes: " UI64FMTD " avr.count/sec: %f avr.bytes/sec: %f time: %u", sendPacketCount, sendPacketBytes, float(sendPacketCount)/fullTime, float(sendPacketBytes)/fullTime, uint32(fullTime)); sLog->outInfo(LOG_FILTER_GENERAL, "Send last min packets count: " UI64FMTD " bytes: " UI64FMTD " avr.count/sec: %f avr.bytes/sec: %f", sendLastPacketCount, sendLastPacketBytes, float(sendLastPacketCount)/minTime, float(sendLastPacketBytes)/minTime); lastTime = cur_time; sendLastPacketCount = 1; sendLastPacketBytes = packet->wpos(); // wpos is real written size } #endif // !TRINITY_DEBUG if (m_Socket->SendPacket(*packet) == -1) m_Socket->CloseSocket(); } /// Add an incoming packet to the queue void WorldSession::QueuePacket(WorldPacket* new_packet) { _recvQueue.add(new_packet); } /// Logging helper for unexpected opcodes void WorldSession::LogUnexpectedOpcode(WorldPacket* packet, const char* status, const char *reason) { sLog->outError(LOG_FILTER_OPCODES, "Received unexpected opcode %s Status: %s Reason: %s from %s", GetOpcodeNameForLogging(packet->GetOpcode()).c_str(), status, reason, GetPlayerInfo().c_str()); } /// Logging helper for unexpected opcodes void WorldSession::LogUnprocessedTail(WorldPacket* packet) { if (!sLog->ShouldLog(LOG_FILTER_OPCODES, LOG_LEVEL_TRACE) || packet->rpos() >= packet->wpos()) return; sLog->outTrace(LOG_FILTER_OPCODES, "Unprocessed tail data (read stop at %u from %u) Opcode %s from %s", uint32(packet->rpos()), uint32(packet->wpos()), GetOpcodeNameForLogging(packet->GetOpcode()).c_str(), GetPlayerInfo().c_str()); packet->print_storage(); } /// Update the WorldSession (triggered by World update) bool WorldSession::Update(uint32 diff, PacketFilter& updater) { /// Update Timeout timer. UpdateTimeOutTime(diff); ///- Before we process anything: /// If necessary, kick the player from the character select screen if (IsConnectionIdle()) m_Socket->CloseSocket(); ///- Retrieve packets from the receive queue and call the appropriate handlers /// not process packets if socket already closed WorldPacket* packet = NULL; //! Delete packet after processing by default bool deletePacket = true; //! To prevent infinite loop WorldPacket* firstDelayedPacket = NULL; //! If _recvQueue.peek() == firstDelayedPacket it means that in this Update call, we've processed all //! *properly timed* packets, and we're now at the part of the queue where we find //! delayed packets that were re-enqueued due to improper timing. To prevent an infinite //! loop caused by re-enqueueing the same packets over and over again, we stop updating this session //! and continue updating others. The re-enqueued packets will be handled in the next Update call for this session. while (m_Socket && !m_Socket->IsClosed() && !_recvQueue.empty() && _recvQueue.peek(true) != firstDelayedPacket && _recvQueue.next(packet, updater)) { OpcodeHandler const* opHandle = opcodeTable[packet->GetOpcode()]; try { switch (opHandle->Status) { case STATUS_LOGGEDIN: if (!_player) { // skip STATUS_LOGGEDIN opcode unexpected errors if player logout sometime ago - this can be network lag delayed packets //! If player didn't log out a while ago, it means packets are being sent while the server does not recognize //! the client to be in world yet. We will re-add the packets to the bottom of the queue and process them later. if (!m_playerRecentlyLogout) { //! Prevent infinite loop if (!firstDelayedPacket) firstDelayedPacket = packet; //! Because checking a bool is faster than reallocating memory deletePacket = false; QueuePacket(packet); //! Log sLog->outDebug(LOG_FILTER_NETWORKIO, "Re-enqueueing packet with opcode %s with with status STATUS_LOGGEDIN. " "Player is currently not in world yet.", GetOpcodeNameForLogging(packet->GetOpcode()).c_str()); } } else if (_player->IsInWorld()) { sScriptMgr->OnPacketReceive(m_Socket, WorldPacket(*packet)); (this->*opHandle->Handler)(*packet); LogUnprocessedTail(packet); } // lag can cause STATUS_LOGGEDIN opcodes to arrive after the player started a transfer break; case STATUS_LOGGEDIN_OR_RECENTLY_LOGGOUT: if (!_player && !m_playerRecentlyLogout && !m_playerLogout) // There's a short delay between _player = null and m_playerRecentlyLogout = true during logout LogUnexpectedOpcode(packet, "STATUS_LOGGEDIN_OR_RECENTLY_LOGGOUT", "the player has not logged in yet and not recently logout"); else { // not expected _player or must checked in packet hanlder sScriptMgr->OnPacketReceive(m_Socket, WorldPacket(*packet)); (this->*opHandle->Handler)(*packet); LogUnprocessedTail(packet); } break; case STATUS_TRANSFER: if (!_player) LogUnexpectedOpcode(packet, "STATUS_TRANSFER", "the player has not logged in yet"); else if (_player->IsInWorld()) LogUnexpectedOpcode(packet, "STATUS_TRANSFER", "the player is still in world"); else { sScriptMgr->OnPacketReceive(m_Socket, WorldPacket(*packet)); (this->*opHandle->Handler)(*packet); LogUnprocessedTail(packet); } break; case STATUS_AUTHED: // prevent cheating with skip queue wait if (m_inQueue) { LogUnexpectedOpcode(packet, "STATUS_AUTHED", "the player not pass queue yet"); break; } // some auth opcodes can be recieved before STATUS_LOGGEDIN_OR_RECENTLY_LOGGOUT opcodes // however when we recieve CMSG_CHAR_ENUM we are surely no longer during the logout process. if (packet->GetOpcode() == CMSG_CHAR_ENUM) m_playerRecentlyLogout = false; sScriptMgr->OnPacketReceive(m_Socket, WorldPacket(*packet)); (this->*opHandle->Handler)(*packet); LogUnprocessedTail(packet); break; case STATUS_NEVER: sLog->outError(LOG_FILTER_OPCODES, "Received not allowed opcode %s from %s", GetOpcodeNameForLogging(packet->GetOpcode()).c_str() , GetPlayerInfo().c_str()); break; case STATUS_UNHANDLED: sLog->outError(LOG_FILTER_OPCODES, "Received not handled opcode %s from %s", GetOpcodeNameForLogging(packet->GetOpcode()).c_str() , GetPlayerInfo().c_str()); break; } } catch(ByteBufferException &) { sLog->outError(LOG_FILTER_NETWORKIO, "WorldSession::Update ByteBufferException occured while parsing a packet (opcode: %u) from client %s, accountid=%i. Skipped packet.", packet->GetOpcode(), GetRemoteAddress().c_str(), GetAccountId()); packet->hexlike(); } if (deletePacket) delete packet; } if (m_Socket && !m_Socket->IsClosed() && _warden) _warden->Update(); ProcessQueryCallbacks(); //check if we are safe to proceed with logout //logout procedure should happen only in World::UpdateSessions() method!!! if (updater.ProcessLogout()) { time_t currTime = time(NULL); ///- If necessary, log the player out if (ShouldLogOut(currTime) && !m_playerLoading) LogoutPlayer(true); if (m_Socket && GetPlayer() && _warden) _warden->Update(); ///- Cleanup socket pointer if need if (m_Socket && m_Socket->IsClosed()) { m_Socket->RemoveReference(); m_Socket = NULL; } if (!m_Socket) return false; //Will remove this session from the world session map } return true; } /// %Log the player out void WorldSession::LogoutPlayer(bool Save) { // finish pending transfers before starting the logout while (_player && _player->IsBeingTeleportedFar()) HandleMoveWorldportAckOpcode(); m_playerLogout = true; m_playerSave = Save; if (_player) { if (uint64 lguid = _player->GetLootGUID()) DoLootRelease(lguid); ///- If the player just died before logging out, make him appear as a ghost //FIXME: logout must be delayed in case lost connection with client in time of combat if (_player->GetDeathTimer()) { _player->getHostileRefManager().deleteReferences(); _player->BuildPlayerRepop(); _player->RepopAtGraveyard(); } else if (!_player->getAttackers().empty()) { // build set of player who attack _player or who have pet attacking of _player std::set<Player*> aset; for (Unit::AttackerSet::const_iterator itr = _player->getAttackers().begin(); itr != _player->getAttackers().end(); ++itr) { Unit* owner = (*itr)->GetOwner(); // including player controlled case if (owner && owner->GetTypeId() == TYPEID_PLAYER) aset.insert(owner->ToPlayer()); else if ((*itr)->GetTypeId() == TYPEID_PLAYER) aset.insert((Player*)(*itr)); } // CombatStop() method is removing all attackers from the AttackerSet // That is why it must be AFTER building current set of attackers _player->CombatStop(); _player->getHostileRefManager().setOnlineOfflineState(false); _player->RemoveAllAurasOnDeath(); _player->SetPvPDeath(!aset.empty()); _player->KillPlayer(); _player->BuildPlayerRepop(); _player->RepopAtGraveyard(); // give honor to all attackers from set like group case for (std::set<Player*>::const_iterator itr = aset.begin(); itr != aset.end(); ++itr) (*itr)->RewardHonor(_player, aset.size()); // give bg rewards and update counters like kill by first from attackers // this can't be called for all attackers. if (!aset.empty()) if (Battleground* bg = _player->GetBattleground()) bg->HandleKillPlayer(_player, *aset.begin()); } else if (_player->HasAuraType(SPELL_AURA_SPIRIT_OF_REDEMPTION)) { // this will kill character by SPELL_AURA_SPIRIT_OF_REDEMPTION _player->RemoveAurasByType(SPELL_AURA_MOD_SHAPESHIFT); _player->KillPlayer(); _player->BuildPlayerRepop(); _player->RepopAtGraveyard(); } else if (_player->HasPendingBind()) { _player->RepopAtGraveyard(); _player->SetPendingBind(0, 0); } //drop a flag if player is carrying it if (Battleground* bg = _player->GetBattleground()) bg->EventPlayerLoggedOut(_player); ///- Teleport to home if the player is in an invalid instance if (!_player->m_InstanceValid && !_player->isGameMaster()) _player->TeleportTo(_player->m_homebindMapId, _player->m_homebindX, _player->m_homebindY, _player->m_homebindZ, _player->GetOrientation()); sOutdoorPvPMgr->HandlePlayerLeaveZone(_player, _player->GetZoneId()); for (int i=0; i < PLAYER_MAX_BATTLEGROUND_QUEUES; ++i) { if (BattlegroundQueueTypeId bgQueueTypeId = _player->GetBattlegroundQueueTypeId(i)) { _player->RemoveBattlegroundQueueId(bgQueueTypeId); BattlegroundQueue& queue = sBattlegroundMgr->GetBattlegroundQueue(bgQueueTypeId); queue.RemovePlayer(_player->GetGUID(), true); } } // Repop at GraveYard or other player far teleport will prevent saving player because of not present map // Teleport player immediately for correct player save while (_player->IsBeingTeleportedFar()) HandleMoveWorldportAckOpcode(); ///- If the player is in a guild, update the guild roster and broadcast a logout message to other guild members if (Guild* guild = sGuildMgr->GetGuildById(_player->GetGuildId())) guild->HandleMemberLogout(this); ///- Remove pet _player->RemovePet(NULL, PET_SAVE_AS_CURRENT, true); ///- empty buyback items and save the player in the database // some save parts only correctly work in case player present in map/player_lists (pets, etc) if (Save) { uint32 eslot; for (int j = BUYBACK_SLOT_START; j < BUYBACK_SLOT_END; ++j) { eslot = j - BUYBACK_SLOT_START; _player->SetUInt64Value(PLAYER_FIELD_VENDORBUYBACK_SLOT_1 + (eslot * 2), 0); _player->SetUInt32Value(PLAYER_FIELD_BUYBACK_PRICE_1 + eslot, 0); _player->SetUInt32Value(PLAYER_FIELD_BUYBACK_TIMESTAMP_1 + eslot, 0); } _player->SaveToDB(); } ///- Leave all channels before player delete... _player->CleanupChannels(); ///- If the player is in a group (or invited), remove him. If the group if then only 1 person, disband the group. _player->UninviteFromGroup(); // remove player from the group if he is: // a) in group; b) not in raid group; c) logging out normally (not being kicked or disconnected) if (_player->GetGroup() && !_player->GetGroup()->isRaidGroup() && m_Socket) _player->RemoveFromGroup(); //! Send update to group and reset stored max enchanting level if (_player->GetGroup()) { _player->GetGroup()->SendUpdate(); _player->GetGroup()->ResetMaxEnchantingLevel(); } //! Broadcast a logout message to the player's friends sSocialMgr->SendFriendStatus(_player, FRIEND_OFFLINE, _player->GetGUIDLow(), true); sSocialMgr->RemovePlayerSocial(_player->GetGUIDLow()); //! Call script hook before deletion sScriptMgr->OnPlayerLogout(_player); //! Remove the player from the world // the player may not be in the world when logging out // e.g if he got disconnected during a transfer to another map // calls to GetMap in this case may cause crashes _player->CleanupsBeforeDelete(); sLog->outInfo(LOG_FILTER_CHARACTER, "Account: %d (IP: %s) Logout Character:[%s] (GUID: %u) Level: %d", GetAccountId(), GetRemoteAddress().c_str(), _player->GetName().c_str(), _player->GetGUIDLow(), _player->getLevel()); if (Map* _map = _player->FindMap()) _map->RemovePlayerFromMap(_player, true); SetPlayer(NULL); //! Pointer already deleted during RemovePlayerFromMap //! Send the 'logout complete' packet to the client //! Client will respond by sending 3x CMSG_CANCEL_TRADE, which we currently dont handle WorldPacket data(SMSG_LOGOUT_COMPLETE, 0); SendPacket(&data); sLog->outDebug(LOG_FILTER_NETWORKIO, "SESSION: Sent SMSG_LOGOUT_COMPLETE Message"); //! Since each account can only have one online character at any given time, ensure all characters for active account are marked as offline PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_UPD_ACCOUNT_ONLINE); stmt->setUInt32(0, GetAccountId()); CharacterDatabase.Execute(stmt); } m_playerLogout = false; m_playerSave = false; m_playerRecentlyLogout = true; LogoutRequest(0); } /// Kick a player out of the World void WorldSession::KickPlayer() { if (m_Socket) m_Socket->CloseSocket(); } void WorldSession::SendNotification(const char *format, ...) { if (format) { va_list ap; char szStr[1024]; szStr[0] = '\0'; va_start(ap, format); vsnprintf(szStr, 1024, format, ap); va_end(ap); size_t len = strlen(szStr); WorldPacket data(SMSG_NOTIFICATION, 2 + len); data.WriteBits(len, 13); data.FlushBits(); data.append(szStr, len); SendPacket(&data); } } void WorldSession::SendNotification(uint32 string_id, ...) { char const* format = GetTrinityString(string_id); if (format) { va_list ap; char szStr[1024]; szStr[0] = '\0'; va_start(ap, string_id); vsnprintf(szStr, 1024, format, ap); va_end(ap); size_t len = strlen(szStr); WorldPacket data(SMSG_NOTIFICATION, 2 + len); data.WriteBits(len, 13); data.FlushBits(); data.append(szStr, len); SendPacket(&data); } } const char *WorldSession::GetTrinityString(int32 entry) const { return sObjectMgr->GetTrinityString(entry, GetSessionDbLocaleIndex()); } void WorldSession::Handle_NULL(WorldPacket& recvPacket) { sLog->outError(LOG_FILTER_OPCODES, "Received unhandled opcode %s from %s" , GetOpcodeNameForLogging(recvPacket.GetOpcode()).c_str(), GetPlayerInfo().c_str()); } void WorldSession::Handle_EarlyProccess(WorldPacket& recvPacket) { sLog->outError(LOG_FILTER_OPCODES, "Received opcode %s that must be processed in WorldSocket::OnRead from %s" , GetOpcodeNameForLogging(recvPacket.GetOpcode()).c_str(), GetPlayerInfo().c_str()); } void WorldSession::Handle_ServerSide(WorldPacket& recvPacket) { sLog->outError(LOG_FILTER_OPCODES, "Received server-side opcode %s from %s" , GetOpcodeNameForLogging(recvPacket.GetOpcode()).c_str(), GetPlayerInfo().c_str()); } void WorldSession::Handle_Deprecated(WorldPacket& recvPacket) { sLog->outError(LOG_FILTER_OPCODES, "Received deprecated opcode %s from %s" , GetOpcodeNameForLogging(recvPacket.GetOpcode()).c_str(), GetPlayerInfo().c_str()); } void WorldSession::SendAuthWaitQue(uint32 position) { if (position == 0) { WorldPacket packet(SMSG_AUTH_RESPONSE, 1); packet.WriteBit(0); // has queue info packet.WriteBit(0); // has account info packet.FlushBits(); packet << uint8(AUTH_OK); SendPacket(&packet); } else { WorldPacket packet(SMSG_AUTH_RESPONSE, 6); packet.WriteBit(1); // has queue info packet.WriteBit(0); // unk queue bool packet.WriteBit(0); // has account info packet.FlushBits(); packet << uint8(AUTH_WAIT_QUEUE); packet << uint32(position); SendPacket(&packet); } } void WorldSession::LoadGlobalAccountData() { PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_SEL_ACCOUNT_DATA); stmt->setUInt32(0, GetAccountId()); LoadAccountData(CharacterDatabase.Query(stmt), GLOBAL_CACHE_MASK); } void WorldSession::LoadAccountData(PreparedQueryResult result, uint32 mask) { for (uint32 i = 0; i < NUM_ACCOUNT_DATA_TYPES; ++i) if (mask & (1 << i)) m_accountData[i] = AccountData(); if (!result) return; do { Field* fields = result->Fetch(); uint32 type = fields[0].GetUInt8(); if (type >= NUM_ACCOUNT_DATA_TYPES) { sLog->outError(LOG_FILTER_GENERAL, "Table `%s` have invalid account data type (%u), ignore.", mask == GLOBAL_CACHE_MASK ? "account_data" : "character_account_data", type); continue; } if ((mask & (1 << type)) == 0) { sLog->outError(LOG_FILTER_GENERAL, "Table `%s` have non appropriate for table account data type (%u), ignore.", mask == GLOBAL_CACHE_MASK ? "account_data" : "character_account_data", type); continue; } m_accountData[type].Time = time_t(fields[1].GetUInt32()); m_accountData[type].Data = fields[2].GetString(); } while (result->NextRow()); } void WorldSession::SetAccountData(AccountDataType type, time_t tm, std::string const& data) { uint32 id = 0; uint32 index = 0; if ((1 << type) & GLOBAL_CACHE_MASK) { id = GetAccountId(); index = CHAR_REP_ACCOUNT_DATA; } else { // _player can be NULL and packet received after logout but m_GUID still store correct guid if (!m_GUIDLow) return; id = m_GUIDLow; index = CHAR_REP_PLAYER_ACCOUNT_DATA; } PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(index); stmt->setUInt32(0, id); stmt->setUInt8 (1, type); stmt->setUInt32(2, uint32(tm)); stmt->setString(3, data); CharacterDatabase.Execute(stmt); m_accountData[type].Time = tm; m_accountData[type].Data = data; } void WorldSession::SendAccountDataTimes(uint32 mask) { WorldPacket data(SMSG_ACCOUNT_DATA_TIMES, 4+1+4+NUM_ACCOUNT_DATA_TYPES*4); data << uint32(time(NULL)); // Server time data << uint8(1); data << uint32(mask); // type mask for (uint32 i = 0; i < NUM_ACCOUNT_DATA_TYPES; ++i) if (mask & (1 << i)) data << uint32(GetAccountData(AccountDataType(i))->Time);// also unix time SendPacket(&data); } void WorldSession::LoadTutorialsData() { memset(m_Tutorials, 0, sizeof(uint32) * MAX_ACCOUNT_TUTORIAL_VALUES); PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_SEL_TUTORIALS); stmt->setUInt32(0, GetAccountId()); if (PreparedQueryResult result = CharacterDatabase.Query(stmt)) for (uint8 i = 0; i < MAX_ACCOUNT_TUTORIAL_VALUES; ++i) m_Tutorials[i] = (*result)[i].GetUInt32(); m_TutorialsChanged = false; } void WorldSession::SendTutorialsData() { WorldPacket data(SMSG_TUTORIAL_FLAGS, 4 * MAX_ACCOUNT_TUTORIAL_VALUES); for (uint8 i = 0; i < MAX_ACCOUNT_TUTORIAL_VALUES; ++i) data << m_Tutorials[i]; SendPacket(&data); } void WorldSession::SaveTutorialsData(SQLTransaction &trans) { if (!m_TutorialsChanged) return; PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_SEL_HAS_TUTORIALS); stmt->setUInt32(0, GetAccountId()); bool hasTutorials = !CharacterDatabase.Query(stmt).null(); // Modify data in DB stmt = CharacterDatabase.GetPreparedStatement(hasTutorials ? CHAR_UPD_TUTORIALS : CHAR_INS_TUTORIALS); for (uint8 i = 0; i < MAX_ACCOUNT_TUTORIAL_VALUES; ++i) stmt->setUInt32(i, m_Tutorials[i]); stmt->setUInt32(MAX_ACCOUNT_TUTORIAL_VALUES, GetAccountId()); trans->Append(stmt); m_TutorialsChanged = false; } void WorldSession::ReadAddonsInfo(WorldPacket &data) { if (data.rpos() + 4 > data.size()) return; uint32 size; data >> size; if (!size) return; if (size > 0xFFFFF) { sLog->outError(LOG_FILTER_GENERAL, "WorldSession::ReadAddonsInfo addon info too big, size %u", size); return; } uLongf uSize = size; uint32 pos = data.rpos(); ByteBuffer addonInfo; addonInfo.resize(size); if (uncompress(const_cast<uint8*>(addonInfo.contents()), &uSize, const_cast<uint8*>(data.contents() + pos), data.size() - pos) == Z_OK) { uint32 addonsCount; addonInfo >> addonsCount; // addons count for (uint32 i = 0; i < addonsCount; ++i) { std::string addonName; uint8 enabled; uint32 crc, unk1; // check next addon data format correctness if (addonInfo.rpos() + 1 > addonInfo.size()) return; addonInfo >> addonName; addonInfo >> enabled >> crc >> unk1; sLog->outInfo(LOG_FILTER_GENERAL, "ADDON: Name: %s, Enabled: 0x%x, CRC: 0x%x, Unknown2: 0x%x", addonName.c_str(), enabled, crc, unk1); AddonInfo addon(addonName, enabled, crc, 2, true); SavedAddon const* savedAddon = AddonMgr::GetAddonInfo(addonName); if (savedAddon) { bool match = true; if (addon.CRC != savedAddon->CRC) match = false; if (!match) sLog->outInfo(LOG_FILTER_GENERAL, "ADDON: %s was known, but didn't match known CRC (0x%x)!", addon.Name.c_str(), savedAddon->CRC); else sLog->outInfo(LOG_FILTER_GENERAL, "ADDON: %s was known, CRC is correct (0x%x)", addon.Name.c_str(), savedAddon->CRC); } else { AddonMgr::SaveAddon(addon); sLog->outInfo(LOG_FILTER_GENERAL, "ADDON: %s (0x%x) was not known, saving...", addon.Name.c_str(), addon.CRC); } // TODO: Find out when to not use CRC/pubkey, and other possible states. m_addonsList.push_back(addon); } uint32 currentTime; addonInfo >> currentTime; sLog->outDebug(LOG_FILTER_NETWORKIO, "ADDON: CurrentTime: %u", currentTime); if (addonInfo.rpos() != addonInfo.size()) sLog->outDebug(LOG_FILTER_NETWORKIO, "packet under-read!"); } else sLog->outError(LOG_FILTER_GENERAL, "Addon packet uncompress error!"); } void WorldSession::SendAddonsInfo() { uint8 addonPublicKey[256] = { 0xC3, 0x5B, 0x50, 0x84, 0xB9, 0x3E, 0x32, 0x42, 0x8C, 0xD0, 0xC7, 0x48, 0xFA, 0x0E, 0x5D, 0x54, 0x5A, 0xA3, 0x0E, 0x14, 0xBA, 0x9E, 0x0D, 0xB9, 0x5D, 0x8B, 0xEE, 0xB6, 0x84, 0x93, 0x45, 0x75, 0xFF, 0x31, 0xFE, 0x2F, 0x64, 0x3F, 0x3D, 0x6D, 0x07, 0xD9, 0x44, 0x9B, 0x40, 0x85, 0x59, 0x34, 0x4E, 0x10, 0xE1, 0xE7, 0x43, 0x69, 0xEF, 0x7C, 0x16, 0xFC, 0xB4, 0xED, 0x1B, 0x95, 0x28, 0xA8, 0x23, 0x76, 0x51, 0x31, 0x57, 0x30, 0x2B, 0x79, 0x08, 0x50, 0x10, 0x1C, 0x4A, 0x1A, 0x2C, 0xC8, 0x8B, 0x8F, 0x05, 0x2D, 0x22, 0x3D, 0xDB, 0x5A, 0x24, 0x7A, 0x0F, 0x13, 0x50, 0x37, 0x8F, 0x5A, 0xCC, 0x9E, 0x04, 0x44, 0x0E, 0x87, 0x01, 0xD4, 0xA3, 0x15, 0x94, 0x16, 0x34, 0xC6, 0xC2, 0xC3, 0xFB, 0x49, 0xFE, 0xE1, 0xF9, 0xDA, 0x8C, 0x50, 0x3C, 0xBE, 0x2C, 0xBB, 0x57, 0xED, 0x46, 0xB9, 0xAD, 0x8B, 0xC6, 0xDF, 0x0E, 0xD6, 0x0F, 0xBE, 0x80, 0xB3, 0x8B, 0x1E, 0x77, 0xCF, 0xAD, 0x22, 0xCF, 0xB7, 0x4B, 0xCF, 0xFB, 0xF0, 0x6B, 0x11, 0x45, 0x2D, 0x7A, 0x81, 0x18, 0xF2, 0x92, 0x7E, 0x98, 0x56, 0x5D, 0x5E, 0x69, 0x72, 0x0A, 0x0D, 0x03, 0x0A, 0x85, 0xA2, 0x85, 0x9C, 0xCB, 0xFB, 0x56, 0x6E, 0x8F, 0x44, 0xBB, 0x8F, 0x02, 0x22, 0x68, 0x63, 0x97, 0xBC, 0x85, 0xBA, 0xA8, 0xF7, 0xB5, 0x40, 0x68, 0x3C, 0x77, 0x86, 0x6F, 0x4B, 0xD7, 0x88, 0xCA, 0x8A, 0xD7, 0xCE, 0x36, 0xF0, 0x45, 0x6E, 0xD5, 0x64, 0x79, 0x0F, 0x17, 0xFC, 0x64, 0xDD, 0x10, 0x6F, 0xF3, 0xF5, 0xE0, 0xA6, 0xC3, 0xFB, 0x1B, 0x8C, 0x29, 0xEF, 0x8E, 0xE5, 0x34, 0xCB, 0xD1, 0x2A, 0xCE, 0x79, 0xC3, 0x9A, 0x0D, 0x36, 0xEA, 0x01, 0xE0, 0xAA, 0x91, 0x20, 0x54, 0xF0, 0x72, 0xD8, 0x1E, 0xC7, 0x89, 0xD2 }; WorldPacket data(SMSG_ADDON_INFO, 4); for (AddonsList::iterator itr = m_addonsList.begin(); itr != m_addonsList.end(); ++itr) { data << uint8(itr->State); uint8 crcpub = itr->UsePublicKeyOrCRC; data << uint8(crcpub); if (crcpub) { uint8 usepk = (itr->CRC != STANDARD_ADDON_CRC); // If addon is Standard addon CRC data << uint8(usepk); if (usepk) // if CRC is wrong, add public key (client need it) { sLog->outInfo(LOG_FILTER_GENERAL, "ADDON: CRC (0x%x) for addon %s is wrong (does not match expected 0x%x), sending pubkey", itr->CRC, itr->Name.c_str(), STANDARD_ADDON_CRC); data.append(addonPublicKey, sizeof(addonPublicKey)); } data << uint32(0); // TODO: Find out the meaning of this. } uint8 unk3 = 0; // 0 is sent here data << uint8(unk3); if (unk3) { // String, length 256 (null terminated) data << uint8(0); } } m_addonsList.clear(); data << uint32(0); // count for an unknown for loop SendPacket(&data); } bool WorldSession::IsAddonRegistered(const std::string& prefix) const { if (!_filterAddonMessages) // if we have hit the softcap (64) nothing should be filtered return true; if (_registeredAddonPrefixes.empty()) return false; std::vector<std::string>::const_iterator itr = std::find(_registeredAddonPrefixes.begin(), _registeredAddonPrefixes.end(), prefix); return itr != _registeredAddonPrefixes.end(); } void WorldSession::HandleUnregisterAddonPrefixesOpcode(WorldPacket& /*recvPacket*/) // empty packet { sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Received CMSG_UNREGISTER_ALL_ADDON_PREFIXES"); _registeredAddonPrefixes.clear(); } void WorldSession::HandleAddonRegisteredPrefixesOpcode(WorldPacket& recvPacket) { sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Received CMSG_ADDON_REGISTERED_PREFIXES"); // This is always sent after CMSG_UNREGISTER_ALL_ADDON_PREFIXES uint32 count = recvPacket.ReadBits(25); if (count > REGISTERED_ADDON_PREFIX_SOFTCAP) { // if we have hit the softcap (64) nothing should be filtered _filterAddonMessages = false; recvPacket.rfinish(); return; } std::vector<uint8> lengths(count); for (uint32 i = 0; i < count; ++i) lengths[i] = recvPacket.ReadBits(5); for (uint32 i = 0; i < count; ++i) _registeredAddonPrefixes.push_back(recvPacket.ReadString(lengths[i])); if (_registeredAddonPrefixes.size() > REGISTERED_ADDON_PREFIX_SOFTCAP) // shouldn't happen { _filterAddonMessages = false; return; } _filterAddonMessages = true; } void WorldSession::SetPlayer(Player* player) { _player = player; // set m_GUID that can be used while player loggined and later until m_playerRecentlyLogout not reset if (_player) m_GUIDLow = _player->GetGUIDLow(); } void WorldSession::InitializeQueryCallbackParameters() { // Callback parameters that have pointers in them should be properly // initialized to NULL here. _charCreateCallback.SetParam(NULL); } void WorldSession::ProcessQueryCallbacks() { PreparedQueryResult result; //! HandleCharEnumOpcode if (_charEnumCallback.ready()) { _charEnumCallback.get(result); HandleCharEnum(result); _charEnumCallback.cancel(); } if (_charCreateCallback.IsReady()) { _charCreateCallback.GetResult(result); HandleCharCreateCallback(result, _charCreateCallback.GetParam()); // Don't call FreeResult() here, the callback handler will do that depending on the events in the callback chain } //! HandlePlayerLoginOpcode if (_charLoginCallback.ready()) { SQLQueryHolder* param; _charLoginCallback.get(param); HandlePlayerLogin((LoginQueryHolder*)param); _charLoginCallback.cancel(); } //! HandleAddFriendOpcode if (_addFriendCallback.IsReady()) { std::string param = _addFriendCallback.GetParam(); _addFriendCallback.GetResult(result); HandleAddFriendOpcodeCallBack(result, param); _addFriendCallback.FreeResult(); } //- HandleCharRenameOpcode if (_charRenameCallback.IsReady()) { std::string param = _charRenameCallback.GetParam(); _charRenameCallback.GetResult(result); HandleChangePlayerNameOpcodeCallBack(result, param); _charRenameCallback.FreeResult(); } //- HandleCharAddIgnoreOpcode if (_addIgnoreCallback.ready()) { _addIgnoreCallback.get(result); HandleAddIgnoreOpcodeCallBack(result); _addIgnoreCallback.cancel(); } //- SendStabledPet if (_sendStabledPetCallback.IsReady()) { uint64 param = _sendStabledPetCallback.GetParam(); _sendStabledPetCallback.GetResult(result); SendStablePetCallback(result, param); _sendStabledPetCallback.FreeResult(); } //- HandleStablePet if (_stablePetCallback.ready()) { _stablePetCallback.get(result); HandleStablePetCallback(result); _stablePetCallback.cancel(); } //- HandleUnstablePet if (_unstablePetCallback.IsReady()) { uint32 param = _unstablePetCallback.GetParam(); _unstablePetCallback.GetResult(result); HandleUnstablePetCallback(result, param); _unstablePetCallback.FreeResult(); } //- HandleStableSwapPet if (_stableSwapCallback.IsReady()) { uint32 param = _stableSwapCallback.GetParam(); _stableSwapCallback.GetResult(result); HandleStableSwapPetCallback(result, param); _stableSwapCallback.FreeResult(); } } void WorldSession::InitWarden(BigNumber* k, std::string const& os) { if (os == "Win") { _warden = new WardenWin(); _warden->Init(this, k); } else if (os == "OSX") { // Disabled as it is causing the client to crash // _warden = new WardenMac(); // _warden->Init(this, k); } }
Ikesters/TrinityCore-5.0.5b
Нова папка/src/server/game/Server/WorldSession.cpp
C++
gpl-2.0
42,375
/* * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ /* * @test * * @bug 6388456 * @summary Need adjustable TLS max record size for interoperability * with non-compliant * * @run main/othervm -Djsse.enableCBCProtection=false LargePacket * * @author Xuelei Fan */ import javax.net.ssl.*; import java.nio.channels.*; import java.net.*; public class LargePacket extends SSLEngineService { /* * ============================================================= * Set the various variables needed for the tests, then * specify what tests to run on each side. */ /* * Should we run the client or server in a separate thread? * Both sides can throw exceptions, but do you have a preference * as to which side should be the main thread. */ static boolean separateServerThread = true; // Is the server ready to serve? volatile static boolean serverReady = false; /* * Turn on SSL debugging? */ static boolean debug = false; /* * Define the server side of the test. * * If the server prematurely exits, serverReady will be set to true * to avoid infinite hangs. */ void doServerSide() throws Exception { // create SSLEngine. SSLEngine ssle = createSSLEngine(false); // Create a server socket channel. InetSocketAddress isa = new InetSocketAddress(InetAddress.getLocalHost(), serverPort); ServerSocketChannel ssc = ServerSocketChannel.open(); ssc.socket().bind(isa); serverPort = ssc.socket().getLocalPort(); // Signal Client, we're ready for his connect. serverReady = true; // Accept a socket channel. SocketChannel sc = ssc.accept(); // Complete connection. while (!sc.finishConnect() ) { // waiting for the connection completed. } // handshaking handshaking(ssle, sc); // receive application data receive(ssle, sc); // send out application data deliver(ssle, sc); // close the socket channel. sc.close(); ssc.close(); } /* * Define the client side of the test. * * If the server prematurely exits, serverReady will be set to true * to avoid infinite hangs. */ void doClientSide() throws Exception { // create SSLEngine. SSLEngine ssle = createSSLEngine(true); /* * Wait for server to get started. */ while (!serverReady) { Thread.sleep(50); } // Create a non-blocking socket channel. SocketChannel sc = SocketChannel.open(); sc.configureBlocking(false); InetSocketAddress isa = new InetSocketAddress(InetAddress.getLocalHost(), serverPort); sc.connect(isa); // Complete connection. while (!sc.finishConnect() ) { // waiting for the connection completed. } // handshaking handshaking(ssle, sc); // send out application data deliver(ssle, sc); // receive application data receive(ssle, sc); // close the socket channel. sc.close(); } /* * ============================================================= * The remainder is just support stuff */ volatile Exception serverException = null; volatile Exception clientException = null; // use any free port by default volatile int serverPort = 0; public static void main(String args[]) throws Exception { if (debug) System.setProperty("javax.net.debug", "all"); new LargePacket(); } Thread clientThread = null; Thread serverThread = null; /* * Primary constructor, used to drive remainder of the test. * * Fork off the other side, then do your work. */ LargePacket() throws Exception { if (separateServerThread) { startServer(true); startClient(false); } else { startClient(true); startServer(false); } /* * Wait for other side to close down. */ if (separateServerThread) { serverThread.join(); } else { clientThread.join(); } /* * When we get here, the test is pretty much over. * * If the main thread excepted, that propagates back * immediately. If the other thread threw an exception, we * should report back. */ if (serverException != null) { System.out.print("Server Exception:"); throw serverException; } if (clientException != null) { System.out.print("Client Exception:"); throw clientException; } } void startServer(boolean newThread) throws Exception { if (newThread) { serverThread = new Thread() { public void run() { try { doServerSide(); } catch (Exception e) { /* * Our server thread just died. * * Release the client, if not active already... */ System.err.println("Server died..."); System.err.println(e); serverReady = true; serverException = e; } } }; serverThread.start(); } else { doServerSide(); } } void startClient(boolean newThread) throws Exception { if (newThread) { clientThread = new Thread() { public void run() { try { doClientSide(); } catch (Exception e) { /* * Our client thread just died. */ System.err.println("Client died..."); clientException = e; } } }; clientThread.start(); } else { doClientSide(); } } }
openjdk-mirror/jdk7u-jdk
test/sun/security/ssl/javax/net/ssl/NewAPIs/SSLEngine/LargePacket.java
Java
gpl-2.0
7,353
# -*- coding: utf-8 -*- """ @author: Fabio Erculiani <lxnay@sabayon.org> @contact: lxnay@sabayon.org @copyright: Fabio Erculiani @license: GPL-2 B{Entropy Infrastructure Toolkit}. """ import sys import argparse import textwrap as _textwrap from entropy.output import decolorize class ColorfulFormatter(argparse.RawTextHelpFormatter): """ This is just a whacky HelpFormatter flavour to add some coloring. """ def __colors(self, tup_str, orig_str): pre_spaces = len(tup_str) - len(tup_str.lstrip()) post_spaces = len(tup_str) - len(tup_str.rstrip()) return " "*pre_spaces + orig_str.strip() \ + " "*post_spaces def _format_action(self, action): # determine the required width and the entry label help_position = min(self._action_max_length + 2, self._max_help_position) help_width = self._width - help_position action_width = help_position - self._current_indent - 2 orig_action_header = self._format_action_invocation(action) action_header = decolorize(orig_action_header) # ho nelp; start on same line and add a final newline if not action.help: tup = self._current_indent, '', action_header action_header = '%*s%s\n' % tup # short action name; start on the same line and pad two spaces elif len(action_header) <= action_width: tup = self._current_indent, '', action_width, action_header tup_str = '%*s%-*s ' % tup action_header = self.__colors(tup_str, orig_action_header) indent_first = 0 # long action name; start on the next line else: tup = self._current_indent, '', action_header tup_str = '%*s%-*s ' % tup action_header = self.__colors(tup_str, orig_action_header) indent_first = help_position # collect the pieces of the action help parts = [action_header] # if there was help for the action, add lines of help text if action.help: orig_help_text = self._expand_help(action) help_text = decolorize(orig_help_text) help_lines = self._split_lines(help_text, help_width) orig_help_lines = self._split_lines(orig_help_text, help_width) tup_str = '%*s%s' % (indent_first, '', help_lines[0]) parts.append(self.__colors(tup_str, orig_help_lines[0]) + "\n") for idx, line in enumerate(help_lines[1:]): tup_str = '%*s%s' % (help_position, '', line) parts.append( self.__colors(tup_str, orig_help_lines[idx+1]) + "\n") # or add a newline if the description doesn't end with one elif not action_header.endswith('\n'): parts.append('\n') # if there are any sub-actions, add their help as well for subaction in self._iter_indented_subactions(action): parts.append(self._format_action(subaction)) # return a single string return self._join_parts(parts)
mudler/entropy
server/eit/colorful.py
Python
gpl-2.0
3,125
Ext.define('Ext.chart.theme.Default', { extend: 'Ext.chart.theme.Base', singleton: true, alias: [ 'chart.theme.default', 'chart.theme.Base' ] });
ybbkd2/publicweb
web/ext/packages/sencha-charts/src/chart/theme/Default.js
JavaScript
gpl-2.0
177
/* * Copyright (C) 2010-2018 Team Kodi * This file is part of Kodi - https://kodi.tv * * SPDX-License-Identifier: GPL-2.0-or-later * See LICENSES/README.md for more information. */ #include "AEStreamInfo.h" #include "utils/log.h" #include <algorithm> #include <string.h> #define DTS_PREAMBLE_14BE 0x1FFFE800 #define DTS_PREAMBLE_14LE 0xFF1F00E8 #define DTS_PREAMBLE_16BE 0x7FFE8001 #define DTS_PREAMBLE_16LE 0xFE7F0180 #define DTS_PREAMBLE_HD 0x64582025 #define DTS_PREAMBLE_XCH 0x5a5a5a5a #define DTS_PREAMBLE_XXCH 0x47004a03 #define DTS_PREAMBLE_X96K 0x1d95f262 #define DTS_PREAMBLE_XBR 0x655e315e #define DTS_PREAMBLE_LBR 0x0a801921 #define DTS_PREAMBLE_XLL 0x41a29547 #define DTS_SFREQ_COUNT 16 #define MAX_EAC3_BLOCKS 6 #define UNKNOWN_DTS_EXTENSION 255 static const uint16_t AC3Bitrates [] = {32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 576, 640}; static const uint16_t AC3FSCod [] = {48000, 44100, 32000, 0}; static const uint8_t AC3BlkCod [] = {1, 2, 3, 6}; static const uint8_t AC3Channels [] = {2, 1, 2, 3, 3, 4, 4, 5}; static const uint8_t DTSChannels [] = {1, 2, 2, 2, 2, 3, 3, 4, 4, 5, 6, 6, 6, 7, 8, 8}; static const uint8_t THDChanMap [] = {2, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2, 1, 1}; static const uint32_t DTSSampleRates[DTS_SFREQ_COUNT] = { 0 , 8000 , 16000 , 32000 , 64000 , 128000, 11025 , 22050 , 44100 , 88200 , 176400, 12000 , 24000 , 48000 , 96000 , 192000 }; CAEStreamParser::CAEStreamParser() : m_syncFunc (&CAEStreamParser::DetectType) { av_crc_init(m_crcTrueHD, 0, 16, 0x2D, sizeof(m_crcTrueHD)); } double CAEStreamInfo::GetDuration() const { double duration = 0; switch (m_type) { case STREAM_TYPE_AC3: duration = 0.032; break; case STREAM_TYPE_EAC3: duration = 6144.0 / m_sampleRate / 4; break; case STREAM_TYPE_TRUEHD: int rate; if (m_sampleRate == 48000 || m_sampleRate == 96000 || m_sampleRate == 192000) rate = 192000; else rate = 176400; duration = 3840.0 / rate; break; case STREAM_TYPE_DTS_512: case STREAM_TYPE_DTSHD_CORE: case STREAM_TYPE_DTSHD: case STREAM_TYPE_DTSHD_MA: duration = 512.0 / m_sampleRate; break; case STREAM_TYPE_DTS_1024: duration = 1024.0 / m_sampleRate; break; case STREAM_TYPE_DTS_2048: duration = 2048.0 / m_sampleRate; break; default: CLog::Log(LOGERROR, "CAEStreamInfo::GetDuration - invalid stream type"); break; } return duration * 1000; } bool CAEStreamInfo::operator==(const CAEStreamInfo& info) const { if (m_type != info.m_type) return false; if (m_dataIsLE != info.m_dataIsLE) return false; if (m_repeat != info.m_repeat) return false; return true; } CAEStreamParser::~CAEStreamParser() = default; void CAEStreamParser::Reset() { m_skipBytes = 0; m_bufferSize = 0; m_needBytes = 0; m_hasSync = false; } int CAEStreamParser::AddData(uint8_t *data, unsigned int size, uint8_t **buffer/* = NULL */, unsigned int *bufferSize/* = 0 */) { if (size == 0) { if (bufferSize) *bufferSize = 0; return 0; } if (m_skipBytes) { unsigned int canSkip = std::min(size, m_skipBytes); unsigned int room = sizeof(m_buffer) - m_bufferSize; unsigned int copy = std::min(room, canSkip); memcpy(m_buffer + m_bufferSize, data, copy); m_bufferSize += copy; m_skipBytes -= copy; if (m_skipBytes) { if (bufferSize) *bufferSize = 0; return copy; } GetPacket(buffer, bufferSize); return copy; } else { unsigned int consumed = 0; unsigned int offset = 0; unsigned int room = sizeof(m_buffer) - m_bufferSize; while(1) { if (!size) { if (bufferSize) *bufferSize = 0; return consumed; } unsigned int copy = std::min(room, size); memcpy(m_buffer + m_bufferSize, data, copy); m_bufferSize += copy; consumed += copy; data += copy; size -= copy; room -= copy; if (m_needBytes > m_bufferSize) continue; m_needBytes = 0; offset = (this->*m_syncFunc)(m_buffer, m_bufferSize); if (m_hasSync || m_needBytes) break; else { /* lost sync */ m_syncFunc = &CAEStreamParser::DetectType; m_info.m_type = CAEStreamInfo::STREAM_TYPE_NULL; m_info.m_repeat = 1; /* if the buffer is full, or the offset < the buffer size */ if (m_bufferSize == sizeof(m_buffer) || offset < m_bufferSize) { m_bufferSize -= offset; room += offset; memmove(m_buffer, m_buffer + offset, m_bufferSize); } } } /* if we got here, we acquired sync on the buffer */ /* align the buffer */ if (offset) { m_bufferSize -= offset; memmove(m_buffer, m_buffer + offset, m_bufferSize); } /* bytes to skip until the next packet */ m_skipBytes = std::max(0, (int)m_fsize - (int)m_bufferSize); if (m_skipBytes) { if (bufferSize) *bufferSize = 0; return consumed; } if (!m_needBytes) GetPacket(buffer, bufferSize); else if (bufferSize) *bufferSize = 0; return consumed; } } void CAEStreamParser::GetPacket(uint8_t **buffer, unsigned int *bufferSize) { /* if the caller wants the packet */ if (buffer) { /* if it is dtsHD and we only want the core, just fetch that */ unsigned int size = m_fsize; if (m_info.m_type == CAEStreamInfo::STREAM_TYPE_DTSHD_CORE) size = m_coreSize; /* make sure the buffer is allocated and big enough */ if (!*buffer || !bufferSize || *bufferSize < size) { delete[] *buffer; *buffer = new uint8_t[size]; } /* copy the data into the buffer and update the size */ memcpy(*buffer, m_buffer, size); if (bufferSize) *bufferSize = size; } /* remove the parsed data from the buffer */ m_bufferSize -= m_fsize; memmove(m_buffer, m_buffer + m_fsize, m_bufferSize); m_fsize = 0; m_coreSize = 0; } /* SYNC FUNCTIONS */ /* This function looks for sync words across the types in parallel, and only does an exhaustive test if it finds a syncword. Once sync has been established, the relevent sync function sets m_syncFunc to itself. This function will only be called again if total sync is lost, which allows is to switch stream types on the fly much like a real receiver does. */ unsigned int CAEStreamParser::DetectType(uint8_t *data, unsigned int size) { unsigned int skipped = 0; unsigned int possible = 0; while (size > 8) { /* if it could be DTS */ unsigned int header = data[0] << 24 | data[1] << 16 | data[2] << 8 | data[3]; if (header == DTS_PREAMBLE_14LE || header == DTS_PREAMBLE_14BE || header == DTS_PREAMBLE_16LE || header == DTS_PREAMBLE_16BE) { unsigned int skip = SyncDTS(data, size); if (m_hasSync || m_needBytes) return skipped + skip; else possible = skipped; } /* if it could be AC3 */ if (data[0] == 0x0b && data[1] == 0x77) { unsigned int skip = SyncAC3(data, size); if (m_hasSync || m_needBytes) return skipped + skip; else possible = skipped; } /* if it could be TrueHD */ if (data[4] == 0xf8 && data[5] == 0x72 && data[6] == 0x6f && data[7] == 0xba) { unsigned int skip = SyncTrueHD(data, size); if (m_hasSync) return skipped + skip; else possible = skipped; } /* move along one byte */ --size; ++skipped; ++data; } return possible ? possible : skipped; } bool CAEStreamParser::TrySyncAC3(uint8_t *data, unsigned int size, bool resyncing, bool wantEAC3dependent) { if (size < 8) return false; /* look for an ac3 sync word */ if (data[0] != 0x0b || data[1] != 0x77) return false; uint8_t bsid = data[5] >> 3; uint8_t acmod = data[6] >> 5; uint8_t lfeon; int8_t pos = 4; if ((acmod & 0x1) && (acmod != 0x1)) pos -= 2; if (acmod & 0x4 ) pos -= 2; if (acmod == 0x2) pos -= 2; if (pos < 0) lfeon = (data[7] & 0x64) ? 1 : 0; else lfeon = ((data[6] >> pos) & 0x1) ? 1 : 0; if (bsid > 0x11 || acmod > 7) return false; if (bsid <= 10) { /* Normal AC-3 */ if (wantEAC3dependent) return false; uint8_t fscod = data[4] >> 6; uint8_t frmsizecod = data[4] & 0x3F; if (fscod == 3 || frmsizecod > 37) return false; /* get the details we need to check crc1 and framesize */ unsigned int bitRate = AC3Bitrates[frmsizecod >> 1]; unsigned int framesize = 0; switch (fscod) { case 0: framesize = bitRate * 2; break; case 1: framesize = (320 * bitRate / 147 + (frmsizecod & 1 ? 1 : 0)); break; case 2: framesize = bitRate * 4; break; } m_fsize = framesize << 1; m_info.m_sampleRate = AC3FSCod[fscod]; /* dont do extensive testing if we have not lost sync */ if (m_info.m_type == CAEStreamInfo::STREAM_TYPE_AC3 && !resyncing) return true; /* this may be the main stream of EAC3 */ unsigned int fsizeMain = m_fsize; unsigned int reqBytes = fsizeMain + 8; if (size < reqBytes) { /* not enough data to check for E-AC3 dependent frame, request more */ m_needBytes = reqBytes; m_fsize = 0; /* no need to resync => return true */ return true; } if (TrySyncAC3(data + fsizeMain, size - fsizeMain, resyncing, /*wantEAC3dependent*/ true)) { /* concatenate the main and dependent frames */ m_fsize += fsizeMain; return true; } unsigned int crc_size; /* if we have enough data, validate the entire packet, else try to validate crc2 (5/8 of the packet) */ if (framesize <= size) crc_size = framesize - 1; else crc_size = (framesize >> 1) + (framesize >> 3) - 1; if (crc_size <= size) if (av_crc(av_crc_get_table(AV_CRC_16_ANSI), 0, &data[2], crc_size * 2)) return false; /* if we get here, we can sync */ m_hasSync = true; m_info.m_channels = AC3Channels[acmod] + lfeon; m_syncFunc = &CAEStreamParser::SyncAC3; m_info.m_type = CAEStreamInfo::STREAM_TYPE_AC3; m_info.m_ac3FrameSize = m_fsize; m_info.m_repeat = 1; CLog::Log(LOGINFO, "CAEStreamParser::TrySyncAC3 - AC3 stream detected (%d channels, %dHz)", m_info.m_channels, m_info.m_sampleRate); return true; } else { // Enhanced AC-3 uint8_t strmtyp = data[2] >> 6; if (strmtyp == 3) return false; if (strmtyp != 1 && wantEAC3dependent) { CLog::Log(LOGDEBUG, "CAEStreamParser::TrySyncAC3 - Unexpected stream type: %d (wantEAC3dependent: %d)", strmtyp, wantEAC3dependent); return false; } unsigned int framesize = (((data[2] & 0x7) << 8) | data[3]) + 1; uint8_t fscod = (data[4] >> 6) & 0x3; uint8_t cod = (data[4] >> 4) & 0x3; uint8_t acmod = (data[4] >> 1) & 0x7; uint8_t lfeon = data[4] & 0x1; uint8_t blocks; if (fscod == 0x3) { if (cod == 0x3) return false; blocks = 6; m_info.m_sampleRate = AC3FSCod[cod] >> 1; } else { blocks = AC3BlkCod[cod]; m_info.m_sampleRate = AC3FSCod[fscod]; } m_fsize = framesize << 1; m_info.m_repeat = MAX_EAC3_BLOCKS / blocks; if (m_info.m_type == CAEStreamInfo::STREAM_TYPE_EAC3 && m_hasSync && !resyncing) return true; // if we get here, we can sync m_hasSync = true; m_info.m_channels = AC3Channels[acmod] + lfeon; m_syncFunc = &CAEStreamParser::SyncAC3; m_info.m_type = CAEStreamInfo::STREAM_TYPE_EAC3; m_info.m_ac3FrameSize = m_fsize; CLog::Log(LOGINFO, "CAEStreamParser::TrySyncAC3 - E-AC3 stream detected (%d channels, %dHz)", m_info.m_channels, m_info.m_sampleRate); return true; } } unsigned int CAEStreamParser::SyncAC3(uint8_t *data, unsigned int size) { unsigned int skip = 0; for (; size - skip > 7; ++skip, ++data) { bool resyncing = (skip != 0); if (TrySyncAC3(data, size - skip, resyncing, /*wantEAC3dependent*/ false)) return skip; } // if we get here, the entire packet is invalid and we have lost sync CLog::Log(LOGINFO, "CAEStreamParser::SyncAC3 - AC3 sync lost"); m_hasSync = false; return skip; } unsigned int CAEStreamParser::SyncDTS(uint8_t *data, unsigned int size) { if (size < 13) { if (m_needBytes < 13) m_needBytes = 14; return 0; } unsigned int skip = 0; for (; size - skip > 13; ++skip, ++data) { unsigned int header = data[0] << 24 | data[1] << 16 | data[2] << 8 | data[3]; unsigned int hd_sync = 0; unsigned int dtsBlocks; unsigned int amode; unsigned int sfreq; unsigned int target_rate; unsigned int extension = 0; unsigned int ext_type = UNKNOWN_DTS_EXTENSION; unsigned int lfe; int bits; switch (header) { /* 14bit BE */ case DTS_PREAMBLE_14BE: if (data[4] != 0x07 || (data[5] & 0xf0) != 0xf0) continue; dtsBlocks = (((data[5] & 0x7) << 4) | ((data[6] & 0x3C) >> 2)) + 1; m_fsize = (((((data[6] & 0x3) << 8) | data[7]) << 4) | ((data[8] & 0x3C) >> 2)) + 1; amode = ((data[8] & 0x3) << 4) | ((data[9] & 0xF0) >> 4); target_rate = ((data[10] & 0x3e) >> 1); extension = ((data[11] & 0x1)); ext_type = ((data[11] & 0xe) >> 1); sfreq = data[9] & 0xF; lfe = (data[12] & 0x18) >> 3; m_info.m_dataIsLE = false; bits = 14; break; /* 14bit LE */ case DTS_PREAMBLE_14LE: if (data[5] != 0x07 || (data[4] & 0xf0) != 0xf0) continue; dtsBlocks = (((data[4] & 0x7) << 4) | ((data[7] & 0x3C) >> 2)) + 1; m_fsize = (((((data[7] & 0x3) << 8) | data[6]) << 4) | ((data[9] & 0x3C) >> 2)) + 1; amode = ((data[9] & 0x3) << 4) | ((data[8] & 0xF0) >> 4); target_rate = ((data[11] & 0x3e) >> 1); extension = ((data[10] & 0x1)); ext_type = ((data[10] & 0xe) >> 1); sfreq = data[8] & 0xF; lfe = (data[13] & 0x18) >> 3; m_info.m_dataIsLE = true; bits = 14; break; /* 16bit BE */ case DTS_PREAMBLE_16BE: dtsBlocks = (((data[4] & 0x1) << 7) | ((data[5] & 0xFC) >> 2)) + 1; m_fsize = (((((data[5] & 0x3) << 8) | data[6]) << 4) | ((data[7] & 0xF0) >> 4)) + 1; amode = ((data[7] & 0x0F) << 2) | ((data[8] & 0xC0) >> 6); sfreq = (data[8] & 0x3C) >> 2; target_rate = ((data[8] & 0x03) << 3) | ((data[9] & 0xe0) >> 5); extension = (data[10] & 0x10) >> 4; ext_type = (data[10] & 0xe0) >> 5; lfe = (data[10] >> 1) & 0x3; m_info.m_dataIsLE = false; bits = 16; break; /* 16bit LE */ case DTS_PREAMBLE_16LE: dtsBlocks = (((data[5] & 0x1) << 7) | ((data[4] & 0xFC) >> 2)) + 1; m_fsize = (((((data[4] & 0x3) << 8) | data[7]) << 4) | ((data[6] & 0xF0) >> 4)) + 1; amode = ((data[6] & 0x0F) << 2) | ((data[9] & 0xC0) >> 6); sfreq = (data[9] & 0x3C) >> 2; target_rate = ((data[9] & 0x03) << 3) | ((data[8] & 0xe0) >> 5); extension = (data[11] & 0x10) >> 4; ext_type = (data[11] & 0xe0) >> 5; lfe = (data[11] >> 1) & 0x3; m_info.m_dataIsLE = true; bits = 16; break; default: continue; } if (sfreq == 0 || sfreq >= DTS_SFREQ_COUNT) continue; /* make sure the framesize is sane */ if (m_fsize < 96 || m_fsize > 16384) continue; bool invalid = false; CAEStreamInfo::DataType dataType; switch (dtsBlocks << 5) { case 512 : dataType = CAEStreamInfo::STREAM_TYPE_DTS_512 ; break; case 1024: dataType = CAEStreamInfo::STREAM_TYPE_DTS_1024; break; case 2048: dataType = CAEStreamInfo::STREAM_TYPE_DTS_2048; break; default: invalid = true; break; } if (invalid) continue; /* adjust the fsize for 14 bit streams */ if (bits == 14) m_fsize = m_fsize / 14 * 16; /* we need enough data to check for DTS-HD */ if (size - skip < m_fsize + 10) { /* we can assume DTS sync at this point */ m_syncFunc = &CAEStreamParser::SyncDTS; m_needBytes = m_fsize + 10; m_fsize = 0; return skip; } /* look for DTS-HD */ hd_sync = (data[m_fsize] << 24) | (data[m_fsize + 1] << 16) | (data[m_fsize + 2] << 8) | data[m_fsize + 3]; if (hd_sync == DTS_PREAMBLE_HD) { int hd_size; bool blownup = (data[m_fsize + 5] & 0x20) != 0; if (blownup) hd_size = (((data[m_fsize + 6] & 0x01) << 19) | (data[m_fsize + 7] << 11) | (data[m_fsize + 8] << 3) | ((data[m_fsize + 9] & 0xe0) >> 5)) + 1; else hd_size = (((data[m_fsize + 6] & 0x1f) << 11) | (data[m_fsize + 7] << 3) | ((data[m_fsize + 8] & 0xe0) >> 5)) + 1; int header_size; if (blownup) header_size = (((data[m_fsize + 5] & 0x1f) << 7) | ((data[m_fsize + 6] & 0xfe) >> 1)) + 1; else header_size = (((data[m_fsize + 5] & 0x1f) << 3) | ((data[m_fsize + 6] & 0xe0) >> 5)) + 1; hd_sync = data[m_fsize + header_size] << 24 | data[m_fsize + header_size + 1] << 16 | data[m_fsize + header_size + 2] << 8 | data[m_fsize + header_size + 3]; /* set the type according to core or not */ if (m_coreOnly) dataType = CAEStreamInfo::STREAM_TYPE_DTSHD_CORE; else if (hd_sync == DTS_PREAMBLE_XLL) dataType = CAEStreamInfo::STREAM_TYPE_DTSHD_MA; else if (hd_sync == DTS_PREAMBLE_XCH || hd_sync == DTS_PREAMBLE_XXCH || hd_sync == DTS_PREAMBLE_X96K || hd_sync == DTS_PREAMBLE_XBR || hd_sync == DTS_PREAMBLE_LBR) dataType = CAEStreamInfo::STREAM_TYPE_DTSHD; else dataType = m_info.m_type; m_coreSize = m_fsize; m_fsize += hd_size; } unsigned int sampleRate = DTSSampleRates[sfreq]; if (!m_hasSync || skip || dataType != m_info.m_type || sampleRate != m_info.m_sampleRate || dtsBlocks != m_dtsBlocks) { m_hasSync = true; m_info.m_type = dataType; m_info.m_sampleRate = sampleRate; m_dtsBlocks = dtsBlocks; m_info.m_channels = DTSChannels[amode] + (lfe ? 1 : 0); m_syncFunc = &CAEStreamParser::SyncDTS; m_info.m_repeat = 1; if (dataType == CAEStreamInfo::STREAM_TYPE_DTSHD_MA) { m_info.m_channels += 2; /* FIXME: this needs to be read out, not sure how to do that yet */ m_info.m_dtsPeriod = (192000 * (8 >> 1)) * (m_dtsBlocks << 5) / m_info.m_sampleRate; } else if (dataType == CAEStreamInfo::STREAM_TYPE_DTSHD) { m_info.m_dtsPeriod = (192000 * (2 >> 1)) * (m_dtsBlocks << 5) / m_info.m_sampleRate; } else { m_info.m_dtsPeriod = (m_info.m_sampleRate * (2 >> 1)) * (m_dtsBlocks << 5) / m_info.m_sampleRate; } std::string type; switch (dataType) { case CAEStreamInfo::STREAM_TYPE_DTSHD: type = "dtsHD"; break; case CAEStreamInfo::STREAM_TYPE_DTSHD_MA: type = "dtsHD MA"; break; case CAEStreamInfo::STREAM_TYPE_DTSHD_CORE: type = "dtsHD (core)"; break; default: type = "dts"; break; } if (extension) { switch (ext_type) { case 0: type += " XCH"; break; case 2: type += " X96"; break; case 6: type += " XXCH"; break; default: type += " ext unknown"; break; } } CLog::Log(LOGINFO, "CAEStreamParser::SyncDTS - %s stream detected (%d channels, %dHz, %dbit %s, period: %u, syncword: 0x%x, target rate: 0x%x, framesize %u))", type.c_str(), m_info.m_channels, m_info.m_sampleRate, bits, m_info.m_dataIsLE ? "LE" : "BE", m_info.m_dtsPeriod, hd_sync, target_rate, m_fsize); } return skip; } /* lost sync */ CLog::Log(LOGINFO, "CAEStreamParser::SyncDTS - DTS sync lost"); m_hasSync = false; return skip; } inline unsigned int CAEStreamParser::GetTrueHDChannels(const uint16_t chanmap) { int channels = 0; for (int i = 0; i < 13; ++i) channels += THDChanMap[i] * ((chanmap >> i) & 1); return channels; } unsigned int CAEStreamParser::SyncTrueHD(uint8_t *data, unsigned int size) { unsigned int left = size; unsigned int skip = 0; /* if MLP */ for (; left; ++skip, ++data, --left) { /* if we dont have sync and there is less the 8 bytes, then break out */ if (!m_hasSync && left < 8) return size; /* if its a major audio unit */ uint16_t length = ((data[0] & 0x0F) << 8 | data[1]) << 1; uint32_t syncword = ((((data[4] << 8 | data[5]) << 8) | data[6]) << 8) | data[7]; if (syncword == 0xf8726fba) { /* we need 32 bytes to sync on a master audio unit */ if (left < 32) return skip; /* get the rate and ensure its valid */ int rate = (data[8] & 0xf0) >> 4; if (rate == 0xF) continue; unsigned int major_sync_size = 28; if (data[29] & 1) { /* extension(s) present, look up count */ int extension_count = data[30] >> 4; major_sync_size += 2 + extension_count * 2; } if (left < 4 + major_sync_size) return skip; /* verify the crc of the audio unit */ uint16_t crc = av_crc(m_crcTrueHD, 0, data + 4, major_sync_size - 4); crc ^= (data[4 + major_sync_size - 3] << 8) | data[4 + major_sync_size - 4]; if (((data[4 + major_sync_size - 1] << 8) | data[4 + major_sync_size - 2]) != crc) continue; /* get the sample rate and substreams, we have a valid master audio unit */ m_info.m_sampleRate = (rate & 0x8 ? 44100 : 48000) << (rate & 0x7); m_substreams = (data[20] & 0xF0) >> 4; /* get the number of encoded channels */ uint16_t channel_map = ((data[10] & 0x1F) << 8) | data[11]; if (!channel_map) channel_map = (data[9] << 1) | (data[10] >> 7); m_info.m_channels = CAEStreamParser::GetTrueHDChannels(channel_map); if (!m_hasSync) CLog::Log(LOGINFO, "CAEStreamParser::SyncTrueHD - TrueHD stream detected (%d channels, %dHz)", m_info.m_channels, m_info.m_sampleRate); m_hasSync = true; m_fsize = length; m_info.m_type = CAEStreamInfo::STREAM_TYPE_TRUEHD; m_syncFunc = &CAEStreamParser::SyncTrueHD; m_info.m_repeat = 1; return skip; } else { /* we cant sink to a subframe until we have the information from a master audio unit */ if (!m_hasSync) continue; /* if there is not enough data left to verify the packet, just return the skip amount */ if (left < (unsigned int)m_substreams * 4) return skip; /* verify the parity */ int p = 0; uint8_t check = 0; for (int i = -1; i < m_substreams; ++i) { check ^= data[p++]; check ^= data[p++]; if (i == -1 || data[p - 2] & 0x80) { check ^= data[p++]; check ^= data[p++]; } } /* if the parity nibble does not match */ if ((((check >> 4) ^ check) & 0xF) != 0xF) { /* lost sync */ m_hasSync = false; CLog::Log(LOGINFO, "CAEStreamParser::SyncTrueHD - Sync Lost"); continue; } else { m_fsize = length; return skip; } } } /* lost sync */ m_hasSync = false; return skip; }
PIPplware/xbmc
xbmc/cores/AudioEngine/Utils/AEStreamInfo.cpp
C++
gpl-2.0
23,883
/* $Id$ */ /* * This file is part of OpenTTD. * OpenTTD is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, version 2. * OpenTTD is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OpenTTD. If not, see <http://www.gnu.org/licenses/>. */ /** @file newgrf_industries.cpp Handling of NewGRF industries. */ #include "stdafx.h" #include "debug.h" #include "industry.h" #include "newgrf_industries.h" #include "newgrf_town.h" #include "newgrf_cargo.h" #include "window_func.h" #include "town.h" #include "company_base.h" #include "error.h" #include "strings_func.h" #include "core/random_func.hpp" #include "table/strings.h" #include "safeguards.h" /* Since the industry IDs defined by the GRF file don't necessarily correlate * to those used by the game, the IDs used for overriding old industries must be * translated when the idustry spec is set. */ IndustryOverrideManager _industry_mngr(NEW_INDUSTRYOFFSET, NUM_INDUSTRYTYPES, INVALID_INDUSTRYTYPE); IndustryTileOverrideManager _industile_mngr(NEW_INDUSTRYTILEOFFSET, NUM_INDUSTRYTILES, INVALID_INDUSTRYTILE); /** * Map the GRF local type to an industry type. * @param grf_type The GRF local type. * @param grf_id The GRF of the local type. * @return The industry type in the global scope. */ IndustryType MapNewGRFIndustryType(IndustryType grf_type, uint32 grf_id) { if (grf_type == IT_INVALID) return IT_INVALID; if (!HasBit(grf_type, 7)) return GB(grf_type, 0, 7); return _industry_mngr.GetID(GB(grf_type, 0, 7), grf_id); } /** * Make an analysis of a tile and check for its belonging to the same * industry, and/or the same grf file * @param tile TileIndex of the tile to query * @param i Industry to which to compare the tile to * @param cur_grfid GRFID of the current callback chain * @return value encoded as per NFO specs */ uint32 GetIndustryIDAtOffset(TileIndex tile, const Industry *i, uint32 cur_grfid) { if (!i->TileBelongsToIndustry(tile)) { /* No industry and/or the tile does not have the same industry as the one we match it with */ return 0xFFFF; } IndustryGfx gfx = GetCleanIndustryGfx(tile); const IndustryTileSpec *indtsp = GetIndustryTileSpec(gfx); if (gfx < NEW_INDUSTRYTILEOFFSET) { // Does it belongs to an old type? /* It is an old tile. We have to see if it's been overridden */ if (indtsp->grf_prop.override == INVALID_INDUSTRYTILE) { // has it been overridden? return 0xFF << 8 | gfx; // no. Tag FF + the gfx id of that tile } /* Overridden */ const IndustryTileSpec *tile_ovr = GetIndustryTileSpec(indtsp->grf_prop.override); if (tile_ovr->grf_prop.grffile->grfid == cur_grfid) { return tile_ovr->grf_prop.local_id; // same grf file } else { return 0xFFFE; // not the same grf file } } /* Not an 'old type' tile */ if (indtsp->grf_prop.spritegroup[0] != NULL) { // tile has a spritegroup ? if (indtsp->grf_prop.grffile->grfid == cur_grfid) { // same industry, same grf ? return indtsp->grf_prop.local_id; } else { return 0xFFFE; // Defined in another grf file } } /* The tile has no spritegroup */ return 0xFF << 8 | indtsp->grf_prop.subst_id; // so just give him the substitute } static uint32 GetClosestIndustry(TileIndex tile, IndustryType type, const Industry *current) { uint32 best_dist = UINT32_MAX; const Industry *i; FOR_ALL_INDUSTRIES(i) { if (i->type != type || i == current) continue; best_dist = min(best_dist, DistanceManhattan(tile, i->location.tile)); } return best_dist; } /** * Implementation of both var 67 and 68 * since the mechanism is almost the same, it is easier to regroup them on the same * function. * @param param_setID parameter given to the callback, which is the set id, or the local id, in our terminology * @param layout_filter on what layout do we filter? * @param town_filter Do we filter on the same town as the current industry? * @param current Industry for which the inquiry is made * @return the formatted answer to the callback : rr(reserved) cc(count) dddd(manhattan distance of closest sister) */ static uint32 GetCountAndDistanceOfClosestInstance(byte param_setID, byte layout_filter, bool town_filter, const Industry *current) { uint32 GrfID = GetRegister(0x100); ///< Get the GRFID of the definition to look for in register 100h IndustryType ind_index; uint32 closest_dist = UINT32_MAX; byte count = 0; /* Determine what will be the industry type to look for */ switch (GrfID) { case 0: // this is a default industry type ind_index = param_setID; break; case 0xFFFFFFFF: // current grf GrfID = GetIndustrySpec(current->type)->grf_prop.grffile->grfid; FALLTHROUGH; default: // use the grfid specified in register 100h SetBit(param_setID, 7); // bit 7 means it is not an old type ind_index = MapNewGRFIndustryType(param_setID, GrfID); break; } /* If the industry type is invalid, there is none and the closest is far away. */ if (ind_index >= NUM_INDUSTRYTYPES) return 0 | 0xFFFF; if (layout_filter == 0 && !town_filter) { /* If the filter is 0, it could be because none was specified as well as being really a 0. * In either case, just do the regular var67 */ closest_dist = GetClosestIndustry(current->location.tile, ind_index, current); count = min(Industry::GetIndustryTypeCount(ind_index), UINT8_MAX); // clamp to 8 bit } else { /* Count only those who match the same industry type and layout filter * Unfortunately, we have to do it manually */ const Industry *i; FOR_ALL_INDUSTRIES(i) { if (i->type == ind_index && i != current && (i->selected_layout == layout_filter || layout_filter == 0) && (!town_filter || i->town == current->town)) { closest_dist = min(closest_dist, DistanceManhattan(current->location.tile, i->location.tile)); count++; } } } return count << 16 | GB(closest_dist, 0, 16); } /* virtual */ uint32 IndustriesScopeResolver::GetVariable(byte variable, uint32 parameter, bool *available) const { if (this->ro.callback == CBID_INDUSTRY_LOCATION) { /* Variables available during construction check. */ switch (variable) { case 0x80: return this->tile; case 0x81: return GB(this->tile, 8, 8); /* Pointer to the town the industry is associated with */ case 0x82: return this->industry->town->index; case 0x83: case 0x84: case 0x85: DEBUG(grf, 0, "NewGRFs shouldn't be doing pointer magic"); break; // not supported /* Number of the layout */ case 0x86: return this->industry->selected_layout; /* Ground type */ case 0x87: return GetTerrainType(this->tile); /* Town zone */ case 0x88: return GetTownRadiusGroup(this->industry->town, this->tile); /* Manhattan distance of the closest town */ case 0x89: return min(DistanceManhattan(this->industry->town->xy, this->tile), 255); /* Lowest height of the tile */ case 0x8A: return Clamp(GetTileZ(this->tile) * (this->ro.grffile->grf_version >= 8 ? 1 : TILE_HEIGHT), 0, 0xFF); /* Distance to the nearest water/land tile */ case 0x8B: return GetClosestWaterDistance(this->tile, (GetIndustrySpec(this->industry->type)->behaviour & INDUSTRYBEH_BUILT_ONWATER) == 0); /* Square of Euclidian distance from town */ case 0x8D: return min(DistanceSquare(this->industry->town->xy, this->tile), 65535); /* 32 random bits */ case 0x8F: return this->random_bits; } } const IndustrySpec *indspec = GetIndustrySpec(this->type); if (this->industry == NULL) { DEBUG(grf, 1, "Unhandled variable 0x%X (no available industry) in callback 0x%x", variable, this->ro.callback); *available = false; return UINT_MAX; } switch (variable) { case 0x40: case 0x41: case 0x42: { // waiting cargo, but only if those two callback flags are set uint16 callback = indspec->callback_mask; if (HasBit(callback, CBM_IND_PRODUCTION_CARGO_ARRIVAL) || HasBit(callback, CBM_IND_PRODUCTION_256_TICKS)) { if ((indspec->behaviour & INDUSTRYBEH_PROD_MULTI_HNDLING) != 0) { if (this->industry->prod_level == 0) return 0; return min(this->industry->incoming_cargo_waiting[variable - 0x40] / this->industry->prod_level, (uint16)0xFFFF); } else { return min(this->industry->incoming_cargo_waiting[variable - 0x40], (uint16)0xFFFF); } } else { return 0; } } /* Manhattan distance of closes dry/water tile */ case 0x43: if (this->tile == INVALID_TILE) break; return GetClosestWaterDistance(this->tile, (indspec->behaviour & INDUSTRYBEH_BUILT_ONWATER) == 0); /* Layout number */ case 0x44: return this->industry->selected_layout; /* Company info */ case 0x45: { byte colours = 0; bool is_ai = false; const Company *c = Company::GetIfValid(this->industry->founder); if (c != NULL) { const Livery *l = &c->livery[LS_DEFAULT]; is_ai = c->is_ai; colours = l->colour1 + l->colour2 * 16; } return this->industry->founder | (is_ai ? 0x10000 : 0) | (colours << 24); } case 0x46: return this->industry->construction_date; // Date when built - long format - (in days) /* Get industry ID at offset param */ case 0x60: return GetIndustryIDAtOffset(GetNearbyTile(parameter, this->industry->location.tile, false), this->industry, this->ro.grffile->grfid); /* Get random tile bits at offset param */ case 0x61: { if (this->tile == INVALID_TILE) break; TileIndex tile = GetNearbyTile(parameter, this->tile, false); return this->industry->TileBelongsToIndustry(tile) ? GetIndustryRandomBits(tile) : 0; } /* Land info of nearby tiles */ case 0x62: if (this->tile == INVALID_TILE) break; return GetNearbyIndustryTileInformation(parameter, this->tile, INVALID_INDUSTRY, false, this->ro.grffile->grf_version >= 8); /* Animation stage of nearby tiles */ case 0x63: { if (this->tile == INVALID_TILE) break; TileIndex tile = GetNearbyTile(parameter, this->tile, false); if (this->industry->TileBelongsToIndustry(tile)) { return GetAnimationFrame(tile); } return 0xFFFFFFFF; } /* Distance of nearest industry of given type */ case 0x64: if (this->tile == INVALID_TILE) break; return GetClosestIndustry(this->tile, MapNewGRFIndustryType(parameter, indspec->grf_prop.grffile->grfid), this->industry); /* Get town zone and Manhattan distance of closest town */ case 0x65: if (this->tile == INVALID_TILE) break; return GetTownRadiusGroup(this->industry->town, this->tile) << 16 | min(DistanceManhattan(this->tile, this->industry->town->xy), 0xFFFF); /* Get square of Euclidian distance of closes town */ case 0x66: if (this->tile == INVALID_TILE) break; return GetTownRadiusGroup(this->industry->town, this->tile) << 16 | min(DistanceSquare(this->tile, this->industry->town->xy), 0xFFFF); /* Count of industry, distance of closest instance * 68 is the same as 67, but with a filtering on selected layout */ case 0x67: case 0x68: { byte layout_filter = 0; bool town_filter = false; if (variable == 0x68) { uint32 reg = GetRegister(0x101); layout_filter = GB(reg, 0, 8); town_filter = HasBit(reg, 8); } return GetCountAndDistanceOfClosestInstance(parameter, layout_filter, town_filter, this->industry); } /* Get a variable from the persistent storage */ case 0x7C: return (this->industry->psa != NULL) ? this->industry->psa->GetValue(parameter) : 0; /* Industry structure access*/ case 0x80: return this->industry->location.tile; case 0x81: return GB(this->industry->location.tile, 8, 8); /* Pointer to the town the industry is associated with */ case 0x82: return this->industry->town->index; case 0x83: case 0x84: case 0x85: DEBUG(grf, 0, "NewGRFs shouldn't be doing pointer magic"); break; // not supported case 0x86: return this->industry->location.w; case 0x87: return this->industry->location.h;// xy dimensions case 0x88: case 0x89: return this->industry->produced_cargo[variable - 0x88]; case 0x8A: return this->industry->produced_cargo_waiting[0]; case 0x8B: return GB(this->industry->produced_cargo_waiting[0], 8, 8); case 0x8C: return this->industry->produced_cargo_waiting[1]; case 0x8D: return GB(this->industry->produced_cargo_waiting[1], 8, 8); case 0x8E: case 0x8F: return this->industry->production_rate[variable - 0x8E]; case 0x90: case 0x91: case 0x92: return this->industry->accepts_cargo[variable - 0x90]; case 0x93: return this->industry->prod_level; /* amount of cargo produced so far THIS month. */ case 0x94: return this->industry->this_month_production[0]; case 0x95: return GB(this->industry->this_month_production[0], 8, 8); case 0x96: return this->industry->this_month_production[1]; case 0x97: return GB(this->industry->this_month_production[1], 8, 8); /* amount of cargo transported so far THIS month. */ case 0x98: return this->industry->this_month_transported[0]; case 0x99: return GB(this->industry->this_month_transported[0], 8, 8); case 0x9A: return this->industry->this_month_transported[1]; case 0x9B: return GB(this->industry->this_month_transported[1], 8, 8); /* fraction of cargo transported LAST month. */ case 0x9C: case 0x9D: return this->industry->last_month_pct_transported[variable - 0x9C]; /* amount of cargo produced LAST month. */ case 0x9E: return this->industry->last_month_production[0]; case 0x9F: return GB(this->industry->last_month_production[0], 8, 8); case 0xA0: return this->industry->last_month_production[1]; case 0xA1: return GB(this->industry->last_month_production[1], 8, 8); /* amount of cargo transported last month. */ case 0xA2: return this->industry->last_month_transported[0]; case 0xA3: return GB(this->industry->last_month_transported[0], 8, 8); case 0xA4: return this->industry->last_month_transported[1]; case 0xA5: return GB(this->industry->last_month_transported[1], 8, 8); case 0xA6: return indspec->grf_prop.local_id; case 0xA7: return this->industry->founder; case 0xA8: return this->industry->random_colour; case 0xA9: return Clamp(this->industry->last_prod_year - ORIGINAL_BASE_YEAR, 0, 255); case 0xAA: return this->industry->counter; case 0xAB: return GB(this->industry->counter, 8, 8); case 0xAC: return this->industry->was_cargo_delivered; case 0xB0: return Clamp(this->industry->construction_date - DAYS_TILL_ORIGINAL_BASE_YEAR, 0, 65535); // Date when built since 1920 (in days) case 0xB3: return this->industry->construction_type; // Construction type case 0xB4: return Clamp(this->industry->last_cargo_accepted_at - DAYS_TILL_ORIGINAL_BASE_YEAR, 0, 65535); // Date last cargo accepted since 1920 (in days) } DEBUG(grf, 1, "Unhandled industry variable 0x%X", variable); *available = false; return UINT_MAX; } /* virtual */ uint32 IndustriesScopeResolver::GetRandomBits() const { return this->industry != NULL ? this->industry->random : 0; } /* virtual */ uint32 IndustriesScopeResolver::GetTriggers() const { return 0; } /* virtual */ void IndustriesScopeResolver::StorePSA(uint pos, int32 value) { if (this->industry->index == INVALID_INDUSTRY) return; if (this->industry->psa == NULL) { /* There is no need to create a storage if the value is zero. */ if (value == 0) return; /* Create storage on first modification. */ const IndustrySpec *indsp = GetIndustrySpec(this->industry->type); uint32 grfid = (indsp->grf_prop.grffile != NULL) ? indsp->grf_prop.grffile->grfid : 0; assert(PersistentStorage::CanAllocateItem()); this->industry->psa = new PersistentStorage(grfid, GSF_INDUSTRIES, this->industry->location.tile); } this->industry->psa->StoreValue(pos, value); } /** * Get the grf file associated with the given industry type. * @param type Industry type to query. * @return The associated GRF file, if any. */ static const GRFFile *GetGrffile(IndustryType type) { const IndustrySpec *indspec = GetIndustrySpec(type); return (indspec != NULL) ? indspec->grf_prop.grffile : NULL; } /** * Constructor of the industries resolver. * @param tile %Tile owned by the industry. * @param industry %Industry being resolved. * @param type Type of the industry. * @param random_bits Random bits of the new industry. * @param callback Callback ID. * @param callback_param1 First parameter (var 10) of the callback. * @param callback_param2 Second parameter (var 18) of the callback. */ IndustriesResolverObject::IndustriesResolverObject(TileIndex tile, Industry *indus, IndustryType type, uint32 random_bits, CallbackID callback, uint32 callback_param1, uint32 callback_param2) : ResolverObject(GetGrffile(type), callback, callback_param1, callback_param2), industries_scope(*this, tile, indus, type, random_bits), town_scope(NULL) { this->root_spritegroup = GetIndustrySpec(type)->grf_prop.spritegroup[0]; } IndustriesResolverObject::~IndustriesResolverObject() { delete this->town_scope; } /** * Get or create the town scope object associated with the industry. * @return The associated town scope, if it exists. */ TownScopeResolver *IndustriesResolverObject::GetTown() { if (this->town_scope == NULL) { Town *t = NULL; bool readonly = true; if (this->industries_scope.industry != NULL) { t = this->industries_scope.industry->town; readonly = this->industries_scope.industry->index == INVALID_INDUSTRY; } else if (this->industries_scope.tile != INVALID_TILE) { t = ClosestTownFromTile(this->industries_scope.tile, UINT_MAX); } if (t == NULL) return NULL; this->town_scope = new TownScopeResolver(*this, t, readonly); } return this->town_scope; } /** * Perform an industry callback. * @param callback The callback to perform. * @param param1 The first parameter. * @param param2 The second parameter. * @param industry The industry to do the callback for. * @param type The type of industry to do the callback for. * @param tile The tile associated with the callback. * @return The callback result. */ uint16 GetIndustryCallback(CallbackID callback, uint32 param1, uint32 param2, Industry *industry, IndustryType type, TileIndex tile) { IndustriesResolverObject object(tile, industry, type, 0, callback, param1, param2); return object.ResolveCallback(); } /** * Check that the industry callback allows creation of the industry. * @param tile %Tile to build the industry. * @param type Type of industry to build. * @param layout Layout number. * @param seed Seed for the random generator. * @param initial_random_bits The random bits the industry is going to have after construction. * @param founder Industry founder * @param creation_type The circumstances the industry is created under. * @return Succeeded or failed command. */ CommandCost CheckIfCallBackAllowsCreation(TileIndex tile, IndustryType type, uint layout, uint32 seed, uint16 initial_random_bits, Owner founder, IndustryAvailabilityCallType creation_type) { const IndustrySpec *indspec = GetIndustrySpec(type); Industry ind; ind.index = INVALID_INDUSTRY; ind.location.tile = tile; ind.location.w = 0; // important to mark the industry invalid ind.type = type; ind.selected_layout = layout; ind.town = ClosestTownFromTile(tile, UINT_MAX); ind.random = initial_random_bits; ind.founder = founder; ind.psa = NULL; IndustriesResolverObject object(tile, &ind, type, seed, CBID_INDUSTRY_LOCATION, 0, creation_type); uint16 result = object.ResolveCallback(); /* Unlike the "normal" cases, not having a valid result means we allow * the building of the industry, as that's how it's done in TTDP. */ if (result == CALLBACK_FAILED) return CommandCost(); return GetErrorMessageFromLocationCallbackResult(result, indspec->grf_prop.grffile, STR_ERROR_SITE_UNSUITABLE); } /** * Check with callback #CBID_INDUSTRY_PROBABILITY whether the industry can be built. * @param type Industry type to check. * @param creation_type Reason to construct a new industry. * @return If the industry has no callback or allows building, \c true is returned. Otherwise, \c false is returned. */ uint32 GetIndustryProbabilityCallback(IndustryType type, IndustryAvailabilityCallType creation_type, uint32 default_prob) { const IndustrySpec *indspec = GetIndustrySpec(type); if (HasBit(indspec->callback_mask, CBM_IND_PROBABILITY)) { uint16 res = GetIndustryCallback(CBID_INDUSTRY_PROBABILITY, 0, creation_type, NULL, type, INVALID_TILE); if (res != CALLBACK_FAILED) { if (indspec->grf_prop.grffile->grf_version < 8) { /* Disallow if result != 0 */ if (res != 0) default_prob = 0; } else { /* Use returned probability. 0x100 to use default */ if (res < 0x100) { default_prob = res; } else if (res > 0x100) { ErrorUnknownCallbackResult(indspec->grf_prop.grffile->grfid, CBID_INDUSTRY_PROBABILITY, res); } } } } return default_prob; } static int32 DerefIndProd(int field, bool use_register) { return use_register ? (int32)GetRegister(field) : field; } /** * Get the industry production callback and apply it to the industry. * @param ind the industry this callback has to be called for * @param reason the reason it is called (0 = incoming cargo, 1 = periodic tick callback) */ void IndustryProductionCallback(Industry *ind, int reason) { const IndustrySpec *spec = GetIndustrySpec(ind->type); IndustriesResolverObject object(ind->location.tile, ind, ind->type); if ((spec->behaviour & INDUSTRYBEH_PRODCALLBACK_RANDOM) != 0) object.callback_param1 = Random(); int multiplier = 1; if ((spec->behaviour & INDUSTRYBEH_PROD_MULTI_HNDLING) != 0) multiplier = ind->prod_level; object.callback_param2 = reason; for (uint loop = 0;; loop++) { /* limit the number of calls to break infinite loops. * 'loop' is provided as 16 bits to the newgrf, so abort when those are exceeded. */ if (loop >= 0x10000) { /* display error message */ SetDParamStr(0, spec->grf_prop.grffile->filename); SetDParam(1, spec->name); ShowErrorMessage(STR_NEWGRF_BUGGY, STR_NEWGRF_BUGGY_ENDLESS_PRODUCTION_CALLBACK, WL_WARNING); /* abort the function early, this error isn't critical and will allow the game to continue to run */ break; } SB(object.callback_param2, 8, 16, loop); const SpriteGroup *tgroup = object.Resolve(); if (tgroup == NULL || tgroup->type != SGT_INDUSTRY_PRODUCTION) break; const IndustryProductionSpriteGroup *group = (const IndustryProductionSpriteGroup *)tgroup; bool deref = (group->version == 1); for (uint i = 0; i < 3; i++) { ind->incoming_cargo_waiting[i] = Clamp(ind->incoming_cargo_waiting[i] - DerefIndProd(group->subtract_input[i], deref) * multiplier, 0, 0xFFFF); } for (uint i = 0; i < 2; i++) { ind->produced_cargo_waiting[i] = Clamp(ind->produced_cargo_waiting[i] + max(DerefIndProd(group->add_output[i], deref), 0) * multiplier, 0, 0xFFFF); } int32 again = DerefIndProd(group->again, deref); if (again == 0) break; SB(object.callback_param2, 24, 8, again); } SetWindowDirty(WC_INDUSTRY_VIEW, ind->index); } /** * Check whether an industry temporarily refuses to accept a certain cargo. * @param ind The industry to query. * @param cargo_type The cargo to get information about. * @pre cargo_type is in ind->accepts_cargo. * @return Whether the given industry refuses to accept this cargo type. */ bool IndustryTemporarilyRefusesCargo(Industry *ind, CargoID cargo_type) { assert(cargo_type == ind->accepts_cargo[0] || cargo_type == ind->accepts_cargo[1] || cargo_type == ind->accepts_cargo[2]); const IndustrySpec *indspec = GetIndustrySpec(ind->type); if (HasBit(indspec->callback_mask, CBM_IND_REFUSE_CARGO)) { uint16 res = GetIndustryCallback(CBID_INDUSTRY_REFUSE_CARGO, 0, indspec->grf_prop.grffile->cargo_map[cargo_type], ind, ind->type, ind->location.tile); if (res != CALLBACK_FAILED) return !ConvertBooleanCallback(indspec->grf_prop.grffile, CBID_INDUSTRY_REFUSE_CARGO, res); } return false; }
andythenorth/NotRoadTypes
src/newgrf_industries.cpp
C++
gpl-2.0
24,123
<?php /* +--------------------------------------------------------------------+ | CiviCRM version 4.2 | +--------------------------------------------------------------------+ | Copyright CiviCRM LLC (c) 2004-2012 | +--------------------------------------------------------------------+ | This file is a part of CiviCRM. | | | | CiviCRM is free software; you can copy, modify, and distribute it | | under the terms of the GNU Affero General Public License | | Version 3, 19 November 2007 and the CiviCRM Licensing Exception. | | | | CiviCRM is distributed in the hope that it will be useful, but | | WITHOUT ANY WARRANTY; without even the implied warranty of | | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | | See the GNU Affero General Public License for more details. | | | | You should have received a copy of the GNU Affero General Public | | License and the CiviCRM Licensing Exception along | | with this program; if not, contact CiviCRM LLC | | at info[AT]civicrm[DOT]org. If you have questions about the | | GNU Affero General Public License or the licensing of CiviCRM, | | see the CiviCRM license FAQ at http://civicrm.org/licensing | +--------------------------------------------------------------------+ */ /** * * @package CRM * @copyright CiviCRM LLC (c) 2004-2012 * $Id$ * */ /** * This class is for displaying alphabetical bar * */ class CRM_Utils_PagerAToZ { /** * returns the alphabetic array for sorting by character * * @param array $query The query object * @param string $sortByCharacter The character that we are potentially sorting on * * @return string The html formatted string * @access public * @static */ static function getAToZBar(&$query, $sortByCharacter, $isDAO = FALSE) { $AToZBar = self::createLinks($query, $sortByCharacter, $isDAO); return $AToZBar; } /** * Function to return the all the static characters * * @return array $staticAlphabets is a array of static characters * @access private * @static */ static function getStaticCharacters() { $staticAlphabets = array('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'); return $staticAlphabets; } /** * Function to return the all the dynamic characters * * @return array $dynamicAlphabets is a array of dynamic characters * @access private * @static */ static function getDynamicCharacters(&$query, $isDAO) { if ($isDAO) { $result = $query; } else { $result = $query->alphabetQuery(); } if (!$result) { return NULL; } $dynamicAlphabets = array(); while ($result->fetch()) { $dynamicAlphabets[] = $result->sort_name; } return $dynamicAlphabets; } /** * create the links * * @param array $query The form values for search * @param string $sortByCharacter The character that we are potentially sorting on * * @return array with links * @access private * @static */ static function createLinks(&$query, $sortByCharacter, $isDAO) { $AToZBar = self::getStaticCharacters(); $dynamicAlphabets = self::getDynamicCharacters($query, $isDAO); if (!$dynamicAlphabets) { return NULL; } $AToZBar = array_merge($AToZBar, $dynamicAlphabets); sort($AToZBar, SORT_STRING); $AToZBar = array_unique($AToZBar); //get the current path $path = CRM_Utils_System::currentPath(); $qfKey = null; if (isset($query->_formValues)) { $qfKey = CRM_Utils_Array::value('qfKey', $query->_formValues); } if (empty($qfKey)) { $qfKey = CRM_Utils_Request::retrieve('qfKey', 'String', $this, FALSE, NULL, $_REQUEST); } $aToZBar = array(); foreach ($AToZBar as $key => $link) { if ($link === NULL) { continue; } $element = array(); if (in_array($link, $dynamicAlphabets)) { $klass = ''; if ($link == $sortByCharacter) { $element['class'] = "active"; $klass = 'class="active"'; } $url = CRM_Utils_System::url($path, "force=1&qfKey=$qfKey&sortByCharacter="); // we do it this way since we want the url to be encoded but not the link character // since that seems to mess up drupal utf-8 encoding etc $url .= urlencode($link); $element['item'] = sprintf('<a href="%s" %s>%s</a>', $url, $klass, $link ); } else { $element['item'] = $link; } $aToZBar[] = $element; } $url = sprintf('<a href="%s">%s</a>', CRM_Utils_System::url($path, "force=1&qfKey=$qfKey&sortByCharacter=all"), 'All' ); $aToZBar[] = array('item' => $url); return $aToZBar; } }
cfusch/drupal-sandbox
sites/all/modules/civicrm/CRM/Utils/PagerAToZ.php
PHP
gpl-2.0
5,279
(function() { var extend = function(child, parent) { for (var key in parent) { if (hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; }, hasProp = {}.hasOwnProperty; define(function(require) { var AnimationProperty, StampingEffectProperty; AnimationProperty = require('../base/module'); return StampingEffectProperty = (function(superClass) { extend(StampingEffectProperty, superClass); function StampingEffectProperty() { StampingEffectProperty.__super__.constructor.call(this, { id: 'stamping', name: 'Stamping' }); } return StampingEffectProperty; })(AnimationProperty); }); }).call(this); //# sourceMappingURL=../../../../../maps/modules/datasketch/animation/properties/stamping/module.js.map
CalCoRE/DataSketch
DataSketchServerUpload/DataSketchWeb/DataSketch.Web/obj/Release/Package/PackageTmp/DataSketchApp3/cslib/modules/datasketch/animation/properties/stamping/module.js
JavaScript
gpl-2.0
954
<?php /* ** Zabbix ** Copyright (C) 2000-2012 Zabbix SIA ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU General Public License for more details. ** ** You should have received a copy of the GNU General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. **/ class CJsonImportReader extends CImportReader { /** * convert string with data in JSON format to php array. * * @param string $string * * @return array */ public function read($string) { $json = new CJSON; return $json->decode($string, true); } }
gheja/zabbix-ext
frontends/php/include/classes/import/readers/CJsonImportReader.php
PHP
gpl-2.0
1,052
<?php /** * Akeeba Engine * The modular PHP5 site backup engine * * @copyright Copyright (c)2009-2014 Nicholas K. Dionysopoulos * @license GNU GPL version 3 or, at your option, any later version * @package akeebaengine * */ namespace Akeeba\Engine\Filter; // Protection against direct access defined('AKEEBAENGINE') or die(); use Akeeba\Engine\Factory; use Akeeba\Engine\Platform; /** * Subdirectories exclusion filter. Excludes temporary, cache and backup output * directories' contents from being backed up. */ class Joomlaskipdirs extends Base { public function __construct() { $this->object = 'dir'; $this->subtype = 'children'; $this->method = 'direct'; $this->filter_name = 'Joomlaskipdirs'; // We take advantage of the filter class magic to inject our custom filters $configuration = Factory::getConfiguration(); $jreg = \JFactory::getConfig(); $tmpdir = $jreg->get('tmp_path'); $logsdir = $jreg->get('log_path'); // Get the site's root if ($configuration->get('akeeba.platform.override_root', 0)) { $root = $configuration->get('akeeba.platform.newroot', '[SITEROOT]'); } else { $root = '[SITEROOT]'; } $this->filter_data[$root] = array( // Output & temp directory of the component $this->treatDirectory($configuration->get('akeeba.basic.output_directory')), // Joomla! temporary directory $this->treatDirectory($tmpdir), // Joomla! logs directory $this->treatDirectory($logsdir), // default temp directory 'tmp', // Joomla! front- and back-end cache, as reported by Joomla! $this->treatDirectory(JPATH_CACHE), $this->treatDirectory(JPATH_ADMINISTRATOR . '/cache'), $this->treatDirectory(JPATH_ROOT . '/cache'), // cache directories fallback 'cache', 'administrator/cache', // This is not needed except on sites running SVN or beta releases $this->treatDirectory(JPATH_ROOT . '/installation'), // ...and the fallback 'installation', // Joomla! front- and back-end cache, as calculated by us (redundancy, for funky server setups) $this->treatDirectory(Platform::getInstance()->get_site_root() . '/cache'), $this->treatDirectory(Platform::getInstance()->get_site_root() . '/administrator/cache'), // Default backup output (many people change it, forget to remove old backup archives and they end up backing up old backups) 'administrator/components/com_akeeba/backup', // MyBlog's cache $this->treatDirectory(Platform::getInstance()->get_site_root() . '/components/libraries/cmslib/cache'), // ...and fallback 'components/libraries/cmslib/cache', // The logs and log directories, hardcoded 'logs', 'log' ); parent::__construct(); } }
ForAEdesWeb/AEW2
administrator/components/com_akeeba/platform/joomla25/Filter/Joomlaskipdirs.php
PHP
gpl-2.0
2,728
/**************************************************************************** * * * Project64 - A Nintendo 64 emulator. * * http://www.pj64-emu.com/ * * Copyright (C) 2016 Project64. All rights reserved. * * Copyright (C) 2012 Bobby Smiles * * Copyright (C) 2009 Richard Goedeken * * Copyright (C) 2002 Hacktarux * * * * License: * * GNU/GPLv2 http://www.gnu.org/licenses/gpl-2.0.html * * version 2 of the License, or (at your option) any later version. * * * ****************************************************************************/ #include "stdafx.h" #include <stdlib.h> #include "arithmetics.h" #include "mem.h" #define SUBBLOCK_SIZE 64 typedef void(*tile_line_emitter_t)(CHle * hle, const int16_t *y, const int16_t *u, uint32_t address); typedef void(*subblock_transform_t)(int16_t *dst, const int16_t *src); /* standard jpeg ucode decoder */ static void jpeg_decode_std(CHle * hle, const char *const version, const subblock_transform_t transform_luma, const subblock_transform_t transform_chroma, const tile_line_emitter_t emit_line); /* helper functions */ static uint8_t clamp_u8(int16_t x); static int16_t clamp_s12(int16_t x); static uint16_t clamp_RGBA_component(int16_t x); /* pixel conversion & formatting */ static uint32_t GetUYVY(int16_t y1, int16_t y2, int16_t u, int16_t v); static uint16_t GetRGBA(int16_t y, int16_t u, int16_t v); /* tile line emitters */ static void EmitYUVTileLine(CHle * hle, const int16_t *y, const int16_t *u, uint32_t address); static void EmitRGBATileLine(CHle * hle, const int16_t *y, const int16_t *u, uint32_t address); /* macroblocks operations */ static void decode_macroblock_ob(int16_t *macroblock, int32_t *y_dc, int32_t *u_dc, int32_t *v_dc, const int16_t *qtable); static void decode_macroblock_std(const subblock_transform_t transform_luma, const subblock_transform_t transform_chroma, int16_t *macroblock, unsigned int subblock_count, const int16_t qtables[3][SUBBLOCK_SIZE]); static void EmitTilesMode0(CHle * hle, const tile_line_emitter_t emit_line, const int16_t *macroblock, uint32_t address); static void EmitTilesMode2(CHle * hle, const tile_line_emitter_t emit_line, const int16_t *macroblock, uint32_t address); /* subblocks operations */ static void TransposeSubBlock(int16_t *dst, const int16_t *src); static void ZigZagSubBlock(int16_t *dst, const int16_t *src); static void ReorderSubBlock(int16_t *dst, const int16_t *src, const unsigned int *table); static void MultSubBlocks(int16_t *dst, const int16_t *src1, const int16_t *src2, unsigned int shift); static void ScaleSubBlock(int16_t *dst, const int16_t *src, int16_t scale); static void RShiftSubBlock(int16_t *dst, const int16_t *src, unsigned int shift); static void InverseDCT1D(const float *const x, float *dst, unsigned int stride); static void InverseDCTSubBlock(int16_t *dst, const int16_t *src); static void RescaleYSubBlock(int16_t *dst, const int16_t *src); static void RescaleUVSubBlock(int16_t *dst, const int16_t *src); /* transposed dequantization table */ static const int16_t DEFAULT_QTABLE[SUBBLOCK_SIZE] = { 16, 12, 14, 14, 18, 24, 49, 72, 11, 12, 13, 17, 22, 35, 64, 92, 10, 14, 16, 22, 37, 55, 78, 95, 16, 19, 24, 29, 56, 64, 87, 98, 24, 26, 40, 51, 68, 81, 103, 112, 40, 58, 57, 87, 109, 104, 121, 100, 51, 60, 69, 80, 103, 113, 120, 103, 61, 55, 56, 62, 77, 92, 101, 99 }; /* zig-zag indices */ static const unsigned int ZIGZAG_TABLE[SUBBLOCK_SIZE] = { 0, 1, 5, 6, 14, 15, 27, 28, 2, 4, 7, 13, 16, 26, 29, 42, 3, 8, 12, 17, 25, 30, 41, 43, 9, 11, 18, 24, 31, 40, 44, 53, 10, 19, 23, 32, 39, 45, 52, 54, 20, 22, 33, 38, 46, 51, 55, 60, 21, 34, 37, 47, 50, 56, 59, 61, 35, 36, 48, 49, 57, 58, 62, 63 }; /* transposition indices */ static const unsigned int TRANSPOSE_TABLE[SUBBLOCK_SIZE] = { 0, 8, 16, 24, 32, 40, 48, 56, 1, 9, 17, 25, 33, 41, 49, 57, 2, 10, 18, 26, 34, 42, 50, 58, 3, 11, 19, 27, 35, 43, 51, 59, 4, 12, 20, 28, 36, 44, 52, 60, 5, 13, 21, 29, 37, 45, 53, 61, 6, 14, 22, 30, 38, 46, 54, 62, 7, 15, 23, 31, 39, 47, 55, 63 }; /* IDCT related constants * Cn = alpha * cos(n * PI / 16) (alpha is chosen such as C4 = 1) */ static const float IDCT_C3 = 1.175875602f; static const float IDCT_C6 = 0.541196100f; static const float IDCT_K[10] = { 0.765366865f, /* C2-C6 */ -1.847759065f, /* -C2-C6 */ -0.390180644f, /* C5-C3 */ -1.961570561f, /* -C5-C3 */ 1.501321110f, /* C1+C3-C5-C7 */ 2.053119869f, /* C1+C3-C5+C7 */ 3.072711027f, /* C1+C3+C5-C7 */ 0.298631336f, /* -C1+C3+C5-C7 */ -0.899976223f, /* C7-C3 */ -2.562915448f /* -C1-C3 */ }; /* global functions */ /*************************************************************************** * JPEG decoding ucode found in Japanese exclusive version of Pokemon Stadium. **************************************************************************/ void jpeg_decode_PS0(CHle * hle) { jpeg_decode_std(hle, "PS0", RescaleYSubBlock, RescaleUVSubBlock, EmitYUVTileLine); } /*************************************************************************** * JPEG decoding ucode found in Ocarina of Time, Pokemon Stadium 1 and * Pokemon Stadium 2. **************************************************************************/ void jpeg_decode_PS(CHle * hle) { jpeg_decode_std(hle, "PS", NULL, NULL, EmitRGBATileLine); } /*************************************************************************** * JPEG decoding ucode found in Ogre Battle and Bottom of the 9th. **************************************************************************/ void jpeg_decode_OB(CHle * hle) { int16_t qtable[SUBBLOCK_SIZE]; unsigned int mb; int32_t y_dc = 0; int32_t u_dc = 0; int32_t v_dc = 0; uint32_t address = *dmem_u32(hle, TASK_DATA_PTR); const unsigned int macroblock_count = *dmem_u32(hle, TASK_DATA_SIZE); const int qscale = *dmem_u32(hle, TASK_YIELD_DATA_SIZE); hle->VerboseMessage("jpeg_decode_OB: *buffer=%x, #MB=%d, qscale=%d", address, macroblock_count, qscale); if (qscale != 0) { if (qscale > 0) { ScaleSubBlock(qtable, DEFAULT_QTABLE, qscale); } else { RShiftSubBlock(qtable, DEFAULT_QTABLE, -qscale); } } for (mb = 0; mb < macroblock_count; ++mb) { int16_t macroblock[6 * SUBBLOCK_SIZE]; dram_load_u16(hle, (uint16_t *)macroblock, address, 6 * SUBBLOCK_SIZE); decode_macroblock_ob(macroblock, &y_dc, &u_dc, &v_dc, (qscale != 0) ? qtable : NULL); EmitTilesMode2(hle, EmitYUVTileLine, macroblock, address); address += (2 * 6 * SUBBLOCK_SIZE); } } /* local functions */ static void jpeg_decode_std(CHle * hle, const char *const version, const subblock_transform_t transform_luma, const subblock_transform_t transform_chroma, const tile_line_emitter_t emit_line) { int16_t qtables[3][SUBBLOCK_SIZE]; unsigned int mb; uint32_t address; uint32_t macroblock_count; uint32_t mode; uint32_t qtableY_ptr; uint32_t qtableU_ptr; uint32_t qtableV_ptr; unsigned int subblock_count; unsigned int macroblock_size; /* macroblock contains at most 6 subblocks */ int16_t macroblock[6 * SUBBLOCK_SIZE]; uint32_t data_ptr; if (*dmem_u32(hle, TASK_FLAGS) & 0x1) { hle->WarnMessage("jpeg_decode_%s: task yielding not implemented", version); return; } data_ptr = *dmem_u32(hle, TASK_DATA_PTR); address = *dram_u32(hle, data_ptr); macroblock_count = *dram_u32(hle, data_ptr + 4); mode = *dram_u32(hle, data_ptr + 8); qtableY_ptr = *dram_u32(hle, data_ptr + 12); qtableU_ptr = *dram_u32(hle, data_ptr + 16); qtableV_ptr = *dram_u32(hle, data_ptr + 20); hle->VerboseMessage("jpeg_decode_%s: *buffer=%x, #MB=%d, mode=%d, *Qy=%x, *Qu=%x, *Qv=%x", version, address, macroblock_count, mode, qtableY_ptr, qtableU_ptr, qtableV_ptr); if (mode != 0 && mode != 2) { hle->WarnMessage("jpeg_decode_%s: invalid mode %d", version, mode); return; } subblock_count = mode + 4; macroblock_size = subblock_count * SUBBLOCK_SIZE; dram_load_u16(hle, (uint16_t *)qtables[0], qtableY_ptr, SUBBLOCK_SIZE); dram_load_u16(hle, (uint16_t *)qtables[1], qtableU_ptr, SUBBLOCK_SIZE); dram_load_u16(hle, (uint16_t *)qtables[2], qtableV_ptr, SUBBLOCK_SIZE); for (mb = 0; mb < macroblock_count; ++mb) { dram_load_u16(hle, (uint16_t *)macroblock, address, macroblock_size); decode_macroblock_std(transform_luma, transform_chroma, macroblock, subblock_count, (const int16_t(*)[SUBBLOCK_SIZE])qtables); if (mode == 0) { EmitTilesMode0(hle, emit_line, macroblock, address); } else { EmitTilesMode2(hle, emit_line, macroblock, address); } address += 2 * macroblock_size; } } static uint8_t clamp_u8(int16_t x) { return (x & (0xff00)) ? ((-x) >> 15) & 0xff : x; } static int16_t clamp_s12(int16_t x) { if (x < -0x800) { x = -0x800; } else if (x > 0x7f0) { x = 0x7f0; } return x; } static uint16_t clamp_RGBA_component(int16_t x) { if (x > 0xff0) { x = 0xff0; } else if (x < 0) { x = 0; } return (x & 0xf80); } static uint32_t GetUYVY(int16_t y1, int16_t y2, int16_t u, int16_t v) { return (uint32_t)clamp_u8(u) << 24 | (uint32_t)clamp_u8(y1) << 16 | (uint32_t)clamp_u8(v) << 8 | (uint32_t)clamp_u8(y2); } static uint16_t GetRGBA(int16_t y, int16_t u, int16_t v) { const float fY = (float)y + 2048.0f; const float fU = (float)u; const float fV = (float)v; const uint16_t r = clamp_RGBA_component((int16_t)(fY + 1.4025 * fV)); const uint16_t g = clamp_RGBA_component((int16_t)(fY - 0.3443 * fU - 0.7144 * fV)); const uint16_t b = clamp_RGBA_component((int16_t)(fY + 1.7729 * fU)); return (r << 4) | (g >> 1) | (b >> 6) | 1; } static void EmitYUVTileLine(CHle * hle, const int16_t *y, const int16_t *u, uint32_t address) { uint32_t uyvy[8]; const int16_t *const v = u + SUBBLOCK_SIZE; const int16_t *const y2 = y + SUBBLOCK_SIZE; uyvy[0] = GetUYVY(y[0], y[1], u[0], v[0]); uyvy[1] = GetUYVY(y[2], y[3], u[1], v[1]); uyvy[2] = GetUYVY(y[4], y[5], u[2], v[2]); uyvy[3] = GetUYVY(y[6], y[7], u[3], v[3]); uyvy[4] = GetUYVY(y2[0], y2[1], u[4], v[4]); uyvy[5] = GetUYVY(y2[2], y2[3], u[5], v[5]); uyvy[6] = GetUYVY(y2[4], y2[5], u[6], v[6]); uyvy[7] = GetUYVY(y2[6], y2[7], u[7], v[7]); dram_store_u32(hle, uyvy, address, 8); } static void EmitRGBATileLine(CHle * hle, const int16_t *y, const int16_t *u, uint32_t address) { uint16_t rgba[16]; const int16_t *const v = u + SUBBLOCK_SIZE; const int16_t *const y2 = y + SUBBLOCK_SIZE; rgba[0] = GetRGBA(y[0], u[0], v[0]); rgba[1] = GetRGBA(y[1], u[0], v[0]); rgba[2] = GetRGBA(y[2], u[1], v[1]); rgba[3] = GetRGBA(y[3], u[1], v[1]); rgba[4] = GetRGBA(y[4], u[2], v[2]); rgba[5] = GetRGBA(y[5], u[2], v[2]); rgba[6] = GetRGBA(y[6], u[3], v[3]); rgba[7] = GetRGBA(y[7], u[3], v[3]); rgba[8] = GetRGBA(y2[0], u[4], v[4]); rgba[9] = GetRGBA(y2[1], u[4], v[4]); rgba[10] = GetRGBA(y2[2], u[5], v[5]); rgba[11] = GetRGBA(y2[3], u[5], v[5]); rgba[12] = GetRGBA(y2[4], u[6], v[6]); rgba[13] = GetRGBA(y2[5], u[6], v[6]); rgba[14] = GetRGBA(y2[6], u[7], v[7]); rgba[15] = GetRGBA(y2[7], u[7], v[7]); dram_store_u16(hle, rgba, address, 16); } static void EmitTilesMode0(CHle * hle, const tile_line_emitter_t emit_line, const int16_t *macroblock, uint32_t address) { unsigned int i; unsigned int y_offset = 0; unsigned int u_offset = 2 * SUBBLOCK_SIZE; for (i = 0; i < 8; ++i) { emit_line(hle, &macroblock[y_offset], &macroblock[u_offset], address); y_offset += 8; u_offset += 8; address += 32; } } static void EmitTilesMode2(CHle * hle, const tile_line_emitter_t emit_line, const int16_t *macroblock, uint32_t address) { unsigned int i; unsigned int y_offset = 0; unsigned int u_offset = 4 * SUBBLOCK_SIZE; for (i = 0; i < 8; ++i) { emit_line(hle, &macroblock[y_offset], &macroblock[u_offset], address); emit_line(hle, &macroblock[y_offset + 8], &macroblock[u_offset], address + 32); y_offset += (i == 3) ? SUBBLOCK_SIZE + 16 : 16; u_offset += 8; address += 64; } } static void decode_macroblock_ob(int16_t *macroblock, int32_t *y_dc, int32_t *u_dc, int32_t *v_dc, const int16_t *qtable) { int sb; for (sb = 0; sb < 6; ++sb) { int16_t tmp_sb[SUBBLOCK_SIZE]; /* update DC */ int32_t dc = (int32_t)macroblock[0]; switch (sb) { case 0: case 1: case 2: case 3: *y_dc += dc; macroblock[0] = *y_dc & 0xffff; break; case 4: *u_dc += dc; macroblock[0] = *u_dc & 0xffff; break; case 5: *v_dc += dc; macroblock[0] = *v_dc & 0xffff; break; } ZigZagSubBlock(tmp_sb, macroblock); if (qtable != NULL) { MultSubBlocks(tmp_sb, tmp_sb, qtable, 0); } TransposeSubBlock(macroblock, tmp_sb); InverseDCTSubBlock(macroblock, macroblock); macroblock += SUBBLOCK_SIZE; } } static void decode_macroblock_std(const subblock_transform_t transform_luma, const subblock_transform_t transform_chroma, int16_t *macroblock, unsigned int subblock_count, const int16_t qtables[3][SUBBLOCK_SIZE]) { unsigned int sb; unsigned int q = 0; for (sb = 0; sb < subblock_count; ++sb) { int16_t tmp_sb[SUBBLOCK_SIZE]; const int isChromaSubBlock = (subblock_count - sb <= 2); if (isChromaSubBlock) { ++q; } MultSubBlocks(macroblock, macroblock, qtables[q], 4); ZigZagSubBlock(tmp_sb, macroblock); InverseDCTSubBlock(macroblock, tmp_sb); if (isChromaSubBlock) { if (transform_chroma != NULL) { transform_chroma(macroblock, macroblock); } } else { if (transform_luma != NULL) { transform_luma(macroblock, macroblock); } } macroblock += SUBBLOCK_SIZE; } } static void TransposeSubBlock(int16_t *dst, const int16_t *src) { ReorderSubBlock(dst, src, TRANSPOSE_TABLE); } static void ZigZagSubBlock(int16_t *dst, const int16_t *src) { ReorderSubBlock(dst, src, ZIGZAG_TABLE); } static void ReorderSubBlock(int16_t *dst, const int16_t *src, const unsigned int *table) { unsigned int i; /* source and destination sublocks cannot overlap */ assert(abs(dst - src) > SUBBLOCK_SIZE); for (i = 0; i < SUBBLOCK_SIZE; ++i) dst[i] = src[table[i]]; } static void MultSubBlocks(int16_t *dst, const int16_t *src1, const int16_t *src2, unsigned int shift) { unsigned int i; for (i = 0; i < SUBBLOCK_SIZE; ++i) { int32_t v = src1[i] * src2[i]; dst[i] = clamp_s16(v) << shift; } } static void ScaleSubBlock(int16_t *dst, const int16_t *src, int16_t scale) { unsigned int i; for (i = 0; i < SUBBLOCK_SIZE; ++i) { int32_t v = src[i] * scale; dst[i] = clamp_s16(v); } } static void RShiftSubBlock(int16_t *dst, const int16_t *src, unsigned int shift) { unsigned int i; for (i = 0; i < SUBBLOCK_SIZE; ++i) dst[i] = src[i] >> shift; } /*************************************************************************** * Fast 2D IDCT using separable formulation and normalization * Computations use single precision floats * Implementation based on Wikipedia : * http://fr.wikipedia.org/wiki/Transform%C3%A9e_en_cosinus_discr%C3%A8te **************************************************************************/ static void InverseDCT1D(const float *const x, float *dst, unsigned int stride) { float e[4]; float f[4]; float x26, x1357, x15, x37, x17, x35; x15 = IDCT_K[2] * (x[1] + x[5]); x37 = IDCT_K[3] * (x[3] + x[7]); x17 = IDCT_K[8] * (x[1] + x[7]); x35 = IDCT_K[9] * (x[3] + x[5]); x1357 = IDCT_C3 * (x[1] + x[3] + x[5] + x[7]); x26 = IDCT_C6 * (x[2] + x[6]); f[0] = x[0] + x[4]; f[1] = x[0] - x[4]; f[2] = x26 + IDCT_K[0] * x[2]; f[3] = x26 + IDCT_K[1] * x[6]; e[0] = x1357 + x15 + IDCT_K[4] * x[1] + x17; e[1] = x1357 + x37 + IDCT_K[6] * x[3] + x35; e[2] = x1357 + x15 + IDCT_K[5] * x[5] + x35; e[3] = x1357 + x37 + IDCT_K[7] * x[7] + x17; *dst = f[0] + f[2] + e[0]; dst += stride; *dst = f[1] + f[3] + e[1]; dst += stride; *dst = f[1] - f[3] + e[2]; dst += stride; *dst = f[0] - f[2] + e[3]; dst += stride; *dst = f[0] - f[2] - e[3]; dst += stride; *dst = f[1] - f[3] - e[2]; dst += stride; *dst = f[1] + f[3] - e[1]; dst += stride; *dst = f[0] + f[2] - e[0]; } static void InverseDCTSubBlock(int16_t *dst, const int16_t *src) { float x[8]; float block[SUBBLOCK_SIZE]; unsigned int i, j; /* idct 1d on rows (+transposition) */ for (i = 0; i < 8; ++i) { for (j = 0; j < 8; ++j) { x[j] = (float)src[i * 8 + j]; } InverseDCT1D(x, &block[i], 8); } /* idct 1d on columns (thanks to previous transposition) */ for (i = 0; i < 8; ++i) { InverseDCT1D(&block[i * 8], x, 1); /* C4 = 1 normalization implies a division by 8 */ for (j = 0; j < 8; ++j) { dst[i + j * 8] = (int16_t)x[j] >> 3; } } } static void RescaleYSubBlock(int16_t *dst, const int16_t *src) { unsigned int i; for (i = 0; i < SUBBLOCK_SIZE; ++i) { dst[i] = (((uint32_t)(clamp_s12(src[i]) + 0x800) * 0xdb0) >> 16) + 0x10; } } static void RescaleUVSubBlock(int16_t *dst, const int16_t *src) { unsigned int i; for (i = 0; i < SUBBLOCK_SIZE; ++i) { dst[i] = (((int)clamp_s12(src[i]) * 0xe00) >> 16) + 0x80; } }
Frank-74/project64
Source/Android/PluginRSP/jpeg.cpp
C++
gpl-2.0
18,978
<?php /** * The template for displaying all pages. * * This is the template that displays all pages by default. * Please note that this is the WordPress construct of pages * and that other 'pages' on your WordPress site will use a * different template. * * @package fastr */ get_header(); ?> <div id="primary" class="content-area"> <main id="main" class="site-main" role="main"> <?php while ( have_posts() ) : the_post(); ?> <?php get_template_part( 'content', 'page' ); ?> <?php // If comments are open or we have at least one comment, load up the comment template if ( comments_open() || '0' != get_comments_number() ) : comments_template(); endif; ?> <?php endwhile; // end of the loop. ?> </main><!-- #main --> </div><!-- #primary --> <?php get_sidebar(); ?> <?php get_footer(); ?>
di0fref/wordpress_fahlslstad
wp-content/themes/fastr/page.php
PHP
gpl-2.0
848