answer
stringlengths
15
1.25M
#ifndef IN_OUT_PARAMETER_H #define IN_OUT_PARAMETER_H #include "UnaryOperator.h" #include "ast-decl.h" SWALLOW_NS_BEGIN class SWALLOW_EXPORT InOutParameter : public UnaryOperator { public: InOutParameter(); }; SWALLOW_NS_END #endif//IN_OUT_PARAMETER_H
using NUnit.Framework; using <API key>.Infrastructure.ContainerExtensions; using <API key>.Common; namespace <API key>.UnitTests.<API key>.Infrastructure { [TestFixture(Category=TestCategory.UnitTests)] public class <API key> { [Test, RequiresSTA] public void <API key>() { <API key>.<API key><TraceLogAspect>(1); <API key>.<API key><TraceLogAspect, TraceLogAspect>(); <API key>.<API key><TraceLogAspect>(); } } }
#include "berryCTKPluginUtils.h" #include "<API key>.h" #include "<API key>.h" #include "berryLog.h" #include <ctkServiceTracker.h> namespace berry { const QString CTKPluginUtils::PROP_CONFIG_AREA = "blueberry.configuration.area"; const QString CTKPluginUtils::PROP_INSTANCE_AREA = "blueberry.instance.area"; CTKPluginUtils::CTKPluginUtils() { InitServices(); } void CTKPluginUtils::InitServices() { ctkPluginContext* context = <API key>::getPluginContext(); if (context == nullptr) { //RuntimeLog.log(new Status(IStatus.ERROR, RegistryMessages.OWNER_NAME, 0, RegistryMessages.<API key>, null)); BERRY_ERROR << "The plugin " << IRuntimeConstants::PI_RUNTIME() << " was not activated."; return; } //debugTracker = new ServiceTracker(context, DebugOptions.class.getName(), null); //debugTracker.open(); // locations //const QString FILTER_PREFIX = "(&(objectClass=org.blueberry.service.datalocation.Location)(type="; // Filter filter = null; // try { // filter = context.createFilter(FILTER_PREFIX + PROP_CONFIG_AREA + "))"); //$NON-NLS-1$ // } catch (<API key> e) { // // ignore this. It should never happen as we have tested the above format. // <API key> = new ServiceTracker(context, filter, null); // <API key>.open(); } CTKPluginUtils* CTKPluginUtils::GetDefault() { static CTKPluginUtils singleton; return &singleton; } void CTKPluginUtils::CloseServices() { // if (!debugTracker.isNull()) // debugTracker->close(); // debugTracker.reset(); // if (!<API key>.isNull()) // <API key>->close(); // <API key>.reset(); } bool CTKPluginUtils::GetBoolDebugOption(const QString& /*option*/, bool defaultValue) const { // if (debugTracker == null) { // RuntimeLog.log(new Status(IStatus.ERROR, RegistryMessages.OWNER_NAME, 0, RegistryMessages.<API key>, null)); // return defaultValue; // DebugOptions options = (DebugOptions) debugTracker.getService(); // if (options != null) { // String value = options.getOption(option); // if (value != null) // return value.equalsIgnoreCase("true"); //$NON-NLS-1$ return defaultValue; } QSharedPointer<ctkPlugin> CTKPluginUtils::GetPlugin(const QString& pluginName) { QList<QSharedPointer<ctkPlugin> > plugins = <API key>::getPluginContext()->getPlugins(); //Return the first plugin matching the plugin name and which is not installed or uninstalled foreach (QSharedPointer<ctkPlugin> plugin, plugins) { if (plugin->getSymbolicName() == pluginName && (plugin->getState() & (ctkPlugin::INSTALLED | ctkPlugin::UNINSTALLED)) == 0) { return plugin; } } return QSharedPointer<ctkPlugin>(); } //Location CTKPluginUtils::<API key>() const // if (<API key> == null) // return null; // return (Location) <API key>.getService(); }
#include <utility> #include "base/bind.h" #include "base/run_loop.h" #include "base/test/metrics/histogram_tester.h" #include "base/time/time.h" #include "chrome/browser/ash/arc/arc_util.h" #include "chrome/browser/ash/arc/fileapi/<API key>.h" #include "chrome/browser/ash/arc/fileapi/<API key>.h" #include "chrome/browser/ash/arc/fileapi/<API key>.h" #include "chrome/browser/chromeos/fileapi/<API key>.h" #include "chrome/browser/chromeos/fileapi/recent_file.h" #include "chrome/browser/chromeos/fileapi/recent_source.h" #include "chrome/test/base/testing_profile.h" #include "components/arc/mojom/file_system.mojom.h" #include "components/arc/session/arc_bridge_service.h" #include "components/arc/session/arc_service_manager.h" #include "components/arc/test/<API key>.h" #include "components/arc/test/<API key>.h" #include "components/keyed_service/content/<API key>.h" #include "content/public/test/<API key>.h" #include "testing/gtest/include/gtest/gtest.h" namespace chromeos { namespace { const char <API key>[] = "com.android.providers.media.documents"; const char kAudioRootId[] = "audio_root"; const char kImagesRootId[] = "images_root"; const char kVideosRootId[] = "videos_root"; std::unique_ptr<KeyedService> <API key>( content::BrowserContext* context) { return arc::<API key>::CreateForTesting( context, arc::ArcServiceManager::Get()->arc_bridge_service()); } arc::<API key>::Document MakeDocument( const std::string& document_id, const std::string& parent_document_id, const std::string& display_name, const std::string& mime_type, const base::Time& last_modified) { return arc::<API key>::Document( <API key>, // authority document_id, // document_id parent_document_id, // parent_document_id display_name, // display_name mime_type, // mime_type 0, // size last_modified.ToJavaTime()); // last_modified } } // namespace class <API key> : public testing::Test { public: <API key>() = default; void SetUp() override { <API key> = std::make_unique<arc::ArcServiceManager>(); profile_ = std::make_unique<TestingProfile>(); <API key>->set_browser_context(profile_.get()); runner_ = static_cast<arc::<API key>*>( arc::<API key>::GetFactory() -><API key>( profile_.get(), base::BindRepeating( &<API key>))); // Mount ARC file systems. arc::<API key>::<API key>(profile_.get()); // Add documents to <API key>. Note that they are not available // until <API key>() is called. <API key>(); source_ = std::make_unique<<API key>>(profile_.get()); } void TearDown() override { <API key>->arc_bridge_service()->file_system()->CloseInstance( &fake_file_system_); } protected: void <API key>() { auto images_root_doc = MakeDocument(kImagesRootId, "", "", arc::<API key>, base::Time::FromJavaTime(1)); auto cat_doc = MakeDocument("cat", kImagesRootId, "cat.png", "image/png", base::Time::FromJavaTime(2)); auto dog_doc = MakeDocument("dog", kImagesRootId, "dog.jpg", "image/jpeg", base::Time::FromJavaTime(3)); auto fox_doc = MakeDocument("fox", kImagesRootId, "fox.gif", "image/gif", base::Time::FromJavaTime(4)); auto elk_doc = MakeDocument("elk", kImagesRootId, "elk.tiff", "image/tiff", base::Time::FromJavaTime(5)); auto audio_root_doc = MakeDocument(kAudioRootId, "", "", arc::<API key>, base::Time::FromJavaTime(1)); auto god_doc = MakeDocument("god", kAudioRootId, "god.mp3", "audio/mp3", base::Time::FromJavaTime(6)); auto videos_root_doc = MakeDocument(kVideosRootId, "", "", arc::<API key>, base::Time::FromJavaTime(1)); auto hot_doc = MakeDocument("hot", kVideosRootId, "hot.mp4", "video/mp4", base::Time::FromJavaTime(7)); auto ink_doc = MakeDocument("ink", kVideosRootId, "ink.webm", "video/webm", base::Time::FromJavaTime(8)); fake_file_system_.AddDocument(images_root_doc); fake_file_system_.AddDocument(cat_doc); fake_file_system_.AddDocument(dog_doc); fake_file_system_.AddDocument(fox_doc); fake_file_system_.AddDocument(audio_root_doc); fake_file_system_.AddDocument(god_doc); fake_file_system_.AddDocument(videos_root_doc); fake_file_system_.AddDocument(hot_doc); fake_file_system_.AddDocument(ink_doc); fake_file_system_.AddRecentDocument(kImagesRootId, images_root_doc); fake_file_system_.AddRecentDocument(kImagesRootId, cat_doc); fake_file_system_.AddRecentDocument(kImagesRootId, dog_doc); fake_file_system_.AddRecentDocument(kImagesRootId, elk_doc); fake_file_system_.AddRecentDocument(kAudioRootId, audio_root_doc); fake_file_system_.AddRecentDocument(kAudioRootId, god_doc); fake_file_system_.AddRecentDocument(kVideosRootId, videos_root_doc); fake_file_system_.AddRecentDocument(kVideosRootId, hot_doc); fake_file_system_.AddRecentDocument(kVideosRootId, ink_doc); } void <API key>() { <API key>->arc_bridge_service()->file_system()->SetInstance( &fake_file_system_); arc::<API key>( <API key>->arc_bridge_service()->file_system()); } std::vector<RecentFile> GetRecentFiles( RecentSource::FileType file_type = RecentSource::FileType::kAll) { std::vector<RecentFile> files; base::RunLoop run_loop; source_->GetRecentFiles(RecentSource::Params( nullptr /* file_system_context */, GURL() /* origin */, 1 /* max_files: ignored */, base::Time() /* cutoff_time: ignored */, file_type /* file_type */, base::BindOnce( [](base::RunLoop* run_loop, std::vector<RecentFile>* out_files, std::vector<RecentFile> files) { run_loop->Quit(); *out_files = std::move(files); }, &run_loop, &files))); run_loop.Run(); return files; } void EnableDefer() { runner_->SetShouldDefer(true); } content::<API key> task_environment_; arc::<API key> fake_file_system_; // Use the same initialization/destruction order as // `<API key>`. std::unique_ptr<arc::ArcServiceManager> <API key>; std::unique_ptr<TestingProfile> profile_; arc::<API key>* runner_; std::unique_ptr<<API key>> source_; }; TEST_F(<API key>, Normal) { <API key>(); std::vector<RecentFile> files = GetRecentFiles(); ASSERT_EQ(4u, files.size()); EXPECT_EQ(arc::<API key>(<API key>, kImagesRootId) .Append("cat.png"), files[0].url().path()); EXPECT_EQ(base::Time::FromJavaTime(2), files[0].last_modified()); EXPECT_EQ(arc::<API key>(<API key>, kImagesRootId) .Append("dog.jpg"), files[1].url().path()); EXPECT_EQ(base::Time::FromJavaTime(3), files[1].last_modified()); EXPECT_EQ(arc::<API key>(<API key>, kVideosRootId) .Append("hot.mp4"), files[2].url().path()); EXPECT_EQ(base::Time::FromJavaTime(7), files[2].last_modified()); EXPECT_EQ(arc::<API key>(<API key>, kVideosRootId) .Append("ink.webm"), files[3].url().path()); EXPECT_EQ(base::Time::FromJavaTime(8), files[3].last_modified()); } TEST_F(<API key>, ArcNotAvailable) { std::vector<RecentFile> files = GetRecentFiles(); EXPECT_EQ(0u, files.size()); } TEST_F(<API key>, Deferred) { <API key>(); EnableDefer(); std::vector<RecentFile> files = GetRecentFiles(); EXPECT_EQ(0u, files.size()); } TEST_F(<API key>, GetAudioFiles) { <API key>(); std::vector<RecentFile> files = GetRecentFiles(RecentSource::FileType::kAudio); // Query for recently-modified audio files should be ignored, since // <API key> doesn't support <API key> for audio. ASSERT_EQ(0u, files.size()); } TEST_F(<API key>, GetImageFiles) { <API key>(); std::vector<RecentFile> files = GetRecentFiles(RecentSource::FileType::kImage); ASSERT_EQ(2u, files.size()); EXPECT_EQ(arc::<API key>(<API key>, kImagesRootId) .Append("cat.png"), files[0].url().path()); EXPECT_EQ(base::Time::FromJavaTime(2), files[0].last_modified()); EXPECT_EQ(arc::<API key>(<API key>, kImagesRootId) .Append("dog.jpg"), files[1].url().path()); EXPECT_EQ(base::Time::FromJavaTime(3), files[1].last_modified()); } TEST_F(<API key>, GetVideoFiles) { <API key>(); std::vector<RecentFile> files = GetRecentFiles(RecentSource::FileType::kVideo); ASSERT_EQ(2u, files.size()); EXPECT_EQ(arc::<API key>(<API key>, kVideosRootId) .Append("hot.mp4"), files[0].url().path()); EXPECT_EQ(base::Time::FromJavaTime(7), files[0].last_modified()); EXPECT_EQ(arc::<API key>(<API key>, kVideosRootId) .Append("ink.webm"), files[1].url().path()); EXPECT_EQ(base::Time::FromJavaTime(8), files[1].last_modified()); } TEST_F(<API key>, UmaStats) { <API key>(); base::HistogramTester histogram_tester; GetRecentFiles(); histogram_tester.ExpectTotalCount(<API key>::kLoadHistogramName, 1); } TEST_F(<API key>, UmaStats_Deferred) { <API key>(); EnableDefer(); base::HistogramTester histogram_tester; GetRecentFiles(); histogram_tester.ExpectTotalCount(<API key>::kLoadHistogramName, 0); } } // namespace chromeos
package org.motechproject.osgi.web; import org.osgi.framework.BundleActivator; import org.osgi.framework.BundleContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Map; public class Activator implements BundleActivator { private static Logger logger = LoggerFactory.getLogger(Activator.class); private HttpServiceTracker tracker; @Override public void start(BundleContext context) { this.tracker = new HttpServiceTracker(context, resourceMappings()); tracker.start(); logger.debug(String.format("Started bundle: [%d] %s", context.getBundle().getBundleId(), context.getBundle().getSymbolicName())); } public void stop(BundleContext context) { this.tracker.close(); tracker.unregister(); } protected Map<String, String> resourceMappings() { return null; } }
/* TEMPLATE GENERATED TESTCASE FILE Filename: <API key>.cpp Label Definition File: <API key>.label.xml Template File: sources-sink-72a.tmpl.cpp */ /* * @description * CWE: 36 Absolute Path Traversal * BadSource: file Read input from a file * GoodSource: Full path and file name * Sinks: ofstream * BadSink : Open the file named in data using ofstream::open() * Flow Variant: 72 Data flow: data passed in a vector from one function to another in different source files * * */ #include "std_testcase.h" #include <vector> #ifndef _WIN32 #include <wchar.h> #endif #ifdef _WIN32 #define FILENAME "C:\\temp\\file.txt" #else #define FILENAME "/tmp/file.txt" #endif using namespace std; namespace <API key> { #ifndef OMITBAD /* bad function declaration */ void badSink(vector<char *> dataVector); void bad() { char * data; vector<char *> dataVector; char dataBuffer[FILENAME_MAX] = ""; data = dataBuffer; { /* Read input from a file */ size_t dataLen = strlen(data); FILE * pFile; /* if there is room in data, attempt to read the input from a file */ if (<API key> > 1) { pFile = fopen(FILENAME, "r"); if (pFile != NULL) { /* POTENTIAL FLAW: Read data from a file */ if (fgets(data+dataLen, (int)(<API key>), pFile) == NULL) { printLine("fgets() failed"); /* Restore NUL terminator if fgets fails */ data[dataLen] = '\0'; } fclose(pFile); } } } /* Put data in a vector */ dataVector.insert(dataVector.end(), 1, data); dataVector.insert(dataVector.end(), 1, data); dataVector.insert(dataVector.end(), 1, data); badSink(dataVector); } #endif /* OMITBAD */ #ifndef OMITGOOD /* good function declarations */ /* goodG2B uses the GoodSource with the BadSink */ void goodG2BSink(vector<char *> dataVector); static void goodG2B() { char * data; vector<char *> dataVector; char dataBuffer[FILENAME_MAX] = ""; data = dataBuffer; #ifdef _WIN32 /* FIX: Use a fixed, full path and file name */ strcat(data, "c:\\temp\\file.txt"); #else /* FIX: Use a fixed, full path and file name */ strcat(data, "/tmp/file.txt"); #endif /* Put data in a vector */ dataVector.insert(dataVector.end(), 1, data); dataVector.insert(dataVector.end(), 1, data); dataVector.insert(dataVector.end(), 1, data); goodG2BSink(dataVector); } void good() { goodG2B(); } #endif /* OMITGOOD */ } /* close namespace */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN using namespace <API key>; /* so that we can use good and bad easily */ int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
using System.Collections.Generic; using ServiceStack.Html; using ServiceStack.WebHost.Endpoints.Support.Markdown; using ServiceStack.WebHost.Endpoints; namespace ServiceStack.Markdown { public abstract class MarkdownViewBase<T> : MarkdownViewBase { private HtmlHelper<T> html; public new HtmlHelper<T> Html { get { return html ?? (html = (HtmlHelper<T>)base.Html); } } protected override HtmlHelper GetHtmlHelper() { return base.Html ?? new HtmlHelper<T>(); } public override void Init(IAppHost appHost, MarkdownPage markdownPage, Dictionary<string, object> scopeArgs, object model, bool renderHtml) { this.AppHost = appHost; this.RenderHtml = renderHtml; this.ScopeArgs = scopeArgs; this.MarkdownPage = markdownPage; var typedModel = (T)model; Html.Init(markdownPage, scopeArgs, renderHtml, new ViewDataDictionary<T>(typedModel)); InitHelpers(); } } public abstract class MarkdownViewBase : ITemplatePage { <summary> Reference to MarkdownViewEngine </summary> public IViewEngine ViewEngine { get; set; } <summary> The AppHost so you can access configuration and resolve dependencies, etc. </summary> public IAppHost AppHost { get; set; } <summary> This precompiled Markdown page with Metadata </summary> public MarkdownPage MarkdownPage { get; protected set; } <summary> ASP.NET MVC's HtmlHelper </summary> public HtmlHelper Html { get; protected set; } <summary> All variables passed to and created by your page. The Response DTO is stored and accessible via the 'Model' variable. All variables and outputs created are stored in ScopeArgs which is what's available to your website template. The Generated page is stored in the 'Body' variable. </summary> public Dictionary<string, object> ScopeArgs { get; set; } <summary> Whether HTML or Markdown output is requested </summary> public bool RenderHtml { get; protected set; } <summary> The Response DTO </summary> public object Model { get; protected set; } protected MarkdownViewBase() { Html = GetHtmlHelper(); } <summary> Ensure the same instance is used for subclasses </summary> protected virtual HtmlHelper GetHtmlHelper() { return Html ?? new HtmlHelper(); } public virtual void Init(IAppHost appHost, MarkdownPage markdownPage, Dictionary<string, object> scopeArgs, object model, bool renderHtml) { this.AppHost = appHost; this.RenderHtml = renderHtml; this.ScopeArgs = scopeArgs; this.MarkdownPage = markdownPage; Html.Init(markdownPage, scopeArgs, renderHtml, new ViewDataDictionary(model)); InitHelpers(); } <summary> Called before page is executed </summary> public virtual void InitHelpers() { } <summary> Called after page is executed but before it's merged with the website template if any. </summary> public virtual void OnLoad() { } <summary> Don't HTML encode safe output </summary> <param name="content"></param> <returns></returns> public string Raw(string content) { return Html.Raw(content); } <summary> Return the output of a different view with the specified name using the supplied model </summary> <param name="viewName"></param> <param name="model"></param> <returns></returns> public MvcHtmlString Partial(string viewName, object model) { return Html.Partial(viewName, model); } <summary> Resolve registered Assemblies </summary> <returns></returns> public T Get<T>() { return this.AppHost.TryResolve<T>(); } public string Lower(string name) { return name == null ? null : name.ToLower(); } public string Upper(string name) { return name == null ? null : name.ToUpper(); } public string Combine(string separator, params string[] parts) { return string.Join(separator, parts); } } }
#include <time.h> #if defined(WEBRTC_WIN) #define WIN32_LEAN_AND_MEAN #include <windows.h> #include <winsock2.h> #include <ws2tcpip.h> #define SECURITY_WIN32 #include <security.h> #endif #include <algorithm> #include "webrtc/base/arraysize.h" #include "webrtc/base/base64.h" #include "webrtc/base/checks.h" #include "webrtc/base/cryptstring.h" #include "webrtc/base/httpcommon-inl.h" #include "webrtc/base/httpcommon.h" #include "webrtc/base/messagedigest.h" #include "webrtc/base/socketaddress.h" #include "webrtc/base/stringencode.h" #include "webrtc/base/stringutils.h" namespace rtc { #if defined(WEBRTC_WIN) extern const ConstantLabel SECURITY_ERRORS[]; #endif // Enum - TODO: expose globally later? bool find_string(size_t& index, const std::string& needle, const char* const haystack[], size_t max_index) { for (index=0; index<max_index; ++index) { if (_stricmp(needle.c_str(), haystack[index]) == 0) { return true; } } return false; } template<class E> struct Enum { static const char** Names; static size_t Size; static inline const char* Name(E val) { return Names[val]; } static inline bool Parse(E& val, const std::string& name) { size_t index; if (!find_string(index, name, Names, Size)) return false; val = static_cast<E>(index); return true; } E val; inline operator E&() { return val; } inline Enum& operator=(E rhs) { val = rhs; return *this; } inline const char* name() const { return Name(val); } inline bool assign(const std::string& name) { return Parse(val, name); } inline Enum& operator=(const std::string& rhs) { assign(rhs); return *this; } }; #define ENUM(e,n) \ template<> const char** Enum<e>::Names = n; \ template<> size_t Enum<e>::Size = sizeof(n)/sizeof(n[0]) // HttpCommon static const char* kHttpVersions[HVER_LAST+1] = { "1.0", "1.1", "Unknown" }; ENUM(HttpVersion, kHttpVersions); static const char* kHttpVerbs[HV_LAST+1] = { "GET", "POST", "PUT", "DELETE", "CONNECT", "HEAD" }; ENUM(HttpVerb, kHttpVerbs); static const char* kHttpHeaders[HH_LAST+1] = { "Age", "Cache-Control", "Connection", "Content-Disposition", "Content-Length", "Content-Range", "Content-Type", "Cookie", "Date", "ETag", "Expires", "Host", "If-Modified-Since", "If-None-Match", "Keep-Alive", "Last-Modified", "Location", "Proxy-Authenticate", "Proxy-Authorization", "Proxy-Connection", "Range", "Set-Cookie", "TE", "Trailers", "Transfer-Encoding", "Upgrade", "User-Agent", "WWW-Authenticate", }; ENUM(HttpHeader, kHttpHeaders); const char* ToString(HttpVersion version) { return Enum<HttpVersion>::Name(version); } bool FromString(HttpVersion& version, const std::string& str) { return Enum<HttpVersion>::Parse(version, str); } const char* ToString(HttpVerb verb) { return Enum<HttpVerb>::Name(verb); } bool FromString(HttpVerb& verb, const std::string& str) { return Enum<HttpVerb>::Parse(verb, str); } const char* ToString(HttpHeader header) { return Enum<HttpHeader>::Name(header); } bool FromString(HttpHeader& header, const std::string& str) { return Enum<HttpHeader>::Parse(header, str); } bool HttpCodeHasBody(uint32_t code) { return !<API key>(code) && (code != HC_NO_CONTENT) && (code != HC_NOT_MODIFIED); } bool HttpCodeIsCacheable(uint32_t code) { switch (code) { case HC_OK: case <API key>: case HC_PARTIAL_CONTENT: case HC_MULTIPLE_CHOICES: case <API key>: case HC_GONE: return true; default: return false; } } bool <API key>(HttpHeader header) { switch (header) { case HH_CONNECTION: case HH_KEEP_ALIVE: case <API key>: case <API key>: case HH_PROXY_CONNECTION: // Note part of RFC... this is non-standard header case HH_TE: case HH_TRAILERS: case <API key>: case HH_UPGRADE: return false; default: return true; } } bool <API key>(HttpHeader header) { switch (header) { case HH_SET_COOKIE: case <API key>: case HH_WWW_AUTHENTICATE: return false; default: return true; } } bool HttpShouldKeepAlive(const HttpData& data) { std::string connection; if ((data.hasHeader(HH_PROXY_CONNECTION, &connection) || data.hasHeader(HH_CONNECTION, &connection))) { return (_stricmp(connection.c_str(), "Keep-Alive") == 0); } return (data.version >= HVER_1_1); } namespace { inline bool <API key>(size_t pos, size_t len, const char * data) { if (pos >= len) return true; if (isspace(static_cast<unsigned char>(data[pos]))) return true; // The reason for this complexity is that some attributes may contain trailing // equal signs (like base64 tokens in Negotiate auth headers) if ((pos+1 < len) && (data[pos] == '=') && !isspace(static_cast<unsigned char>(data[pos+1])) && (data[pos+1] != '=')) { return true; } return false; } // TODO: unittest for EscapeAttribute and <API key>. std::string EscapeAttribute(const std::string& attribute) { const size_t kMaxLength = attribute.length() * 2 + 1; char* buffer = STACK_ARRAY(char, kMaxLength); size_t len = escape(buffer, kMaxLength, attribute.data(), attribute.length(), "\"", '\\'); return std::string(buffer, len); } } // anonymous namespace void <API key>(const HttpAttributeList& attributes, char separator, std::string* composed) { std::stringstream ss; for (size_t i=0; i<attributes.size(); ++i) { if (i > 0) { ss << separator << " "; } ss << attributes[i].first; if (!attributes[i].second.empty()) { ss << "=\"" << EscapeAttribute(attributes[i].second) << "\""; } } *composed = ss.str(); } void HttpParseAttributes(const char * data, size_t len, HttpAttributeList& attributes) { size_t pos = 0; while (true) { // Skip leading whitespace while ((pos < len) && isspace(static_cast<unsigned char>(data[pos]))) { ++pos; } // End of attributes? if (pos >= len) return; // Find end of attribute name size_t start = pos; while (!<API key>(pos, len, data)) { ++pos; } HttpAttribute attribute; attribute.first.assign(data + start, data + pos); // Attribute has value? if ((pos < len) && (data[pos] == '=')) { ++pos; // Skip '=' // Check if quoted value if ((pos < len) && (data[pos] == '"')) { while (++pos < len) { if (data[pos] == '"') { ++pos; break; } if ((data[pos] == '\\') && (pos + 1 < len)) ++pos; attribute.second.append(1, data[pos]); } } else { while ((pos < len) && !isspace(static_cast<unsigned char>(data[pos])) && (data[pos] != ',')) { attribute.second.append(1, data[pos++]); } } } attributes.push_back(attribute); if ((pos < len) && (data[pos] == ',')) ++pos; // Skip ',' } } bool HttpHasAttribute(const HttpAttributeList& attributes, const std::string& name, std::string* value) { for (HttpAttributeList::const_iterator it = attributes.begin(); it != attributes.end(); ++it) { if (it->first == name) { if (value) { *value = it->second; } return true; } } return false; } bool HttpHasNthAttribute(HttpAttributeList& attributes, size_t index, std::string* name, std::string* value) { if (index >= attributes.size()) return false; if (name) *name = attributes[index].first; if (value) *value = attributes[index].second; return true; } bool HttpDateToSeconds(const std::string& date, time_t* seconds) { const char* const kTimeZones[] = { "UT", "GMT", "EST", "EDT", "CST", "CDT", "MST", "MDT", "PST", "PDT", "A", "B", "C", "D", "E", "F", "G", "H", "I", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y" }; const int kTimeZoneOffsets[] = { 0, 0, -5, -4, -6, -5, -7, -6, -8, -7, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }; RTC_DCHECK(nullptr != seconds); struct tm tval; memset(&tval, 0, sizeof(tval)); char month[4], zone[6]; memset(month, 0, sizeof(month)); memset(zone, 0, sizeof(zone)); if (7 != sscanf(date.c_str(), "%*3s, %d %3s %d %d:%d:%d %5c", &tval.tm_mday, month, &tval.tm_year, &tval.tm_hour, &tval.tm_min, &tval.tm_sec, zone)) { return false; } switch (toupper(month[2])) { case 'N': tval.tm_mon = (month[1] == 'A') ? 0 : 5; break; case 'B': tval.tm_mon = 1; break; case 'R': tval.tm_mon = (month[0] == 'M') ? 2 : 3; break; case 'Y': tval.tm_mon = 4; break; case 'L': tval.tm_mon = 6; break; case 'G': tval.tm_mon = 7; break; case 'P': tval.tm_mon = 8; break; case 'T': tval.tm_mon = 9; break; case 'V': tval.tm_mon = 10; break; case 'C': tval.tm_mon = 11; break; } tval.tm_year -= 1900; time_t gmt, non_gmt = mktime(&tval); if ((zone[0] == '+') || (zone[0] == '-')) { if (!isdigit(zone[1]) || !isdigit(zone[2]) || !isdigit(zone[3]) || !isdigit(zone[4])) { return false; } int hours = (zone[1] - '0') * 10 + (zone[2] - '0'); int minutes = (zone[3] - '0') * 10 + (zone[4] - '0'); int offset = (hours * 60 + minutes) * 60; gmt = non_gmt + ((zone[0] == '+') ? offset : -offset); } else { size_t zindex; if (!find_string(zindex, zone, kTimeZones, arraysize(kTimeZones))) { return false; } gmt = non_gmt + kTimeZoneOffsets[zindex] * 60 * 60; } // TODO: Android should support timezone, see b/2441195 #if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS) || defined(WEBRTC_ANDROID) || defined(BSD) tm *tm_for_timezone = localtime(&gmt); *seconds = gmt + tm_for_timezone->tm_gmtoff; #else #if defined(_MSC_VER) && _MSC_VER >= 1900 long timezone = 0; _get_timezone(&timezone); #endif *seconds = gmt - timezone; #endif return true; } std::string HttpAddress(const SocketAddress& address, bool secure) { return (address.port() == HttpDefaultPort(secure)) ? address.hostname() : address.ToString(); } // HttpData HttpData::HttpData() : version(HVER_1_1) { } HttpData::~HttpData() = default; void HttpData::clear(bool release_document) { // Clear headers first, since releasing a document may have far-reaching // effects. headers_.clear(); if (release_document) { document.reset(); } } void HttpData::copy(const HttpData& src) { headers_ = src.headers_; } void HttpData::changeHeader(const std::string& name, const std::string& value, HeaderCombine combine) { if (combine == HC_AUTO) { HttpHeader header; // Unrecognized headers are collapsible combine = !FromString(header, name) || <API key>(header) ? HC_YES : HC_NO; } else if (combine == HC_REPLACE) { headers_.erase(name); combine = HC_NO; } // At this point, combine is one of (YES, NO, NEW) if (combine != HC_NO) { HeaderMap::iterator it = headers_.find(name); if (it != headers_.end()) { if (combine == HC_YES) { it->second.append(","); it->second.append(value); } return; } } headers_.insert(HeaderMap::value_type(name, value)); } size_t HttpData::clearHeader(const std::string& name) { return headers_.erase(name); } HttpData::iterator HttpData::clearHeader(iterator header) { iterator deprecated = header++; headers_.erase(deprecated); return header; } bool HttpData::hasHeader(const std::string& name, std::string* value) const { HeaderMap::const_iterator it = headers_.find(name); if (it == headers_.end()) { return false; } else if (value) { *value = it->second; } return true; } void HttpData::setContent(const std::string& content_type, StreamInterface* document) { setHeader(HH_CONTENT_TYPE, content_type); <API key>(document); } void HttpData::<API key>(StreamInterface* document) { // TODO: Consider calling Rewind() here? RTC_DCHECK(!hasHeader(HH_CONTENT_LENGTH, nullptr)); RTC_DCHECK(!hasHeader(<API key>, nullptr)); RTC_DCHECK(document != nullptr); this->document.reset(document); size_t content_length = 0; if (this->document->GetAvailable(&content_length)) { char buffer[32]; sprintfn(buffer, sizeof(buffer), "%d", content_length); setHeader(HH_CONTENT_LENGTH, buffer); } else { setHeader(<API key>, "chunked"); } } // HttpRequestData void HttpRequestData::clear(bool release_document) { verb = HV_GET; path.clear(); HttpData::clear(release_document); } void HttpRequestData::copy(const HttpRequestData& src) { verb = src.verb; path = src.path; HttpData::copy(src); } size_t HttpRequestData::formatLeader(char* buffer, size_t size) const { RTC_DCHECK(path.find(' ') == std::string::npos); return sprintfn(buffer, size, "%s %.*s HTTP/%s", ToString(verb), path.size(), path.data(), ToString(version)); } HttpError HttpRequestData::parseLeader(const char* line, size_t len) { unsigned int vmajor, vminor; int vend, dstart, dend; // sscanf isn't safe with strings that aren't null-terminated, and there is // no guarantee that |line| is. Create a local copy that is null-terminated. std::string line_str(line, len); line = line_str.c_str(); if ((sscanf(line, "%*s%n %n%*s%n HTTP/%u.%u", &vend, &dstart, &dend, &vmajor, &vminor) != 2) || (vmajor != 1)) { return HE_PROTOCOL; } if (vminor == 0) { version = HVER_1_0; } else if (vminor == 1) { version = HVER_1_1; } else { return HE_PROTOCOL; } std::string sverb(line, vend); if (!FromString(verb, sverb.c_str())) { return HE_PROTOCOL; // !?! <API key>? } path.assign(line + dstart, line + dend); return HE_NONE; } bool HttpRequestData::getAbsoluteUri(std::string* uri) const { if (HV_CONNECT == verb) return false; Url<char> url(path); if (url.valid()) { uri->assign(path); return true; } std::string host; if (!hasHeader(HH_HOST, &host)) return false; url.set_address(host); url.set_full_path(path); uri->assign(url.url()); return url.valid(); } bool HttpRequestData::getRelativeUri(std::string* host, std::string* path) const { if (HV_CONNECT == verb) return false; Url<char> url(this->path); if (url.valid()) { host->assign(url.address()); path->assign(url.full_path()); return true; } if (!hasHeader(HH_HOST, host)) return false; path->assign(this->path); return true; } // HttpResponseData void HttpResponseData::clear(bool release_document) { scode = <API key>; message.clear(); HttpData::clear(release_document); } void HttpResponseData::copy(const HttpResponseData& src) { scode = src.scode; message = src.message; HttpData::copy(src); } void HttpResponseData::set_success(uint32_t scode) { this->scode = scode; message.clear(); setHeader(HH_CONTENT_LENGTH, "0", false); } void HttpResponseData::set_success(const std::string& content_type, StreamInterface* document, uint32_t scode) { this->scode = scode; message.erase(message.begin(), message.end()); setContent(content_type, document); } void HttpResponseData::set_redirect(const std::string& location, uint32_t scode) { this->scode = scode; message.clear(); setHeader(HH_LOCATION, location); setHeader(HH_CONTENT_LENGTH, "0", false); } void HttpResponseData::set_error(uint32_t scode) { this->scode = scode; message.clear(); setHeader(HH_CONTENT_LENGTH, "0", false); } size_t HttpResponseData::formatLeader(char* buffer, size_t size) const { size_t len = sprintfn(buffer, size, "HTTP/%s %lu", ToString(version), scode); if (!message.empty()) { len += sprintfn(buffer + len, size - len, " %.*s", message.size(), message.data()); } return len; } HttpError HttpResponseData::parseLeader(const char* line, size_t len) { size_t pos = 0; unsigned int vmajor, vminor, temp_scode; int temp_pos; // sscanf isn't safe with strings that aren't null-terminated, and there is // no guarantee that |line| is. Create a local copy that is null-terminated. std::string line_str(line, len); line = line_str.c_str(); if (sscanf(line, "HTTP %u%n", &temp_scode, &temp_pos) == 1) { // This server's response has no version. :( NOTE: This happens for every // response to requests made from Chrome plugins, regardless of the server's // behaviour. LOG(LS_VERBOSE) << "HTTP version missing from response"; version = HVER_UNKNOWN; } else if ((sscanf(line, "HTTP/%u.%u %u%n", &vmajor, &vminor, &temp_scode, &temp_pos) == 3) && (vmajor == 1)) { // This server's response does have a version. if (vminor == 0) { version = HVER_1_0; } else if (vminor == 1) { version = HVER_1_1; } else { return HE_PROTOCOL; } } else { return HE_PROTOCOL; } scode = temp_scode; pos = static_cast<size_t>(temp_pos); while ((pos < len) && isspace(static_cast<unsigned char>(line[pos]))) ++pos; message.assign(line + pos, len - pos); return HE_NONE; } // Http Authentication std::string quote(const std::string& str) { std::string result; result.push_back('"'); for (size_t i=0; i<str.size(); ++i) { if ((str[i] == '"') || (str[i] == '\\')) result.push_back('\\'); result.push_back(str[i]); } result.push_back('"'); return result; } #if defined(WEBRTC_WIN) struct <API key> : public HttpAuthContext { CredHandle cred; CtxtHandle ctx; size_t steps; bool <API key>; <API key>(const std::string& auth, CredHandle c1, CtxtHandle c2) : HttpAuthContext(auth), cred(c1), ctx(c2), steps(0), <API key>(false) { } virtual ~<API key>() { <API key>(&ctx); <API key>(&cred); } }; #endif // WEBRTC_WIN HttpAuthResult HttpAuthenticate( const char * challenge, size_t len, const SocketAddress& server, const std::string& method, const std::string& uri, const std::string& username, const CryptString& password, HttpAuthContext *& context, std::string& response, std::string& auth_method) { HttpAttributeList args; HttpParseAttributes(challenge, len, args); HttpHasNthAttribute(args, 0, &auth_method, nullptr); if (context && (context->auth_method != auth_method)) return HAR_IGNORE; // BASIC if (_stricmp(auth_method.c_str(), "basic") == 0) { if (context) return HAR_CREDENTIALS; // Bad credentials if (username.empty()) return HAR_CREDENTIALS; // Missing credentials context = new HttpAuthContext(auth_method); // TODO: convert sensitive to a secure buffer that gets securely deleted //std::string decoded = username + ":" + password; size_t len = username.size() + password.GetLength() + 2; char * sensitive = new char[len]; size_t pos = strcpyn(sensitive, len, username.data(), username.size()); pos += strcpyn(sensitive + pos, len - pos, ":"); password.CopyTo(sensitive + pos, true); response = auth_method; response.append(" "); // TODO: create a sensitive-source version of Base64::encode response.append(Base64::Encode(sensitive)); memset(sensitive, 0, len); delete [] sensitive; return HAR_RESPONSE; } // DIGEST if (_stricmp(auth_method.c_str(), "digest") == 0) { if (context) return HAR_CREDENTIALS; // Bad credentials if (username.empty()) return HAR_CREDENTIALS; // Missing credentials context = new HttpAuthContext(auth_method); std::string cnonce, ncount; char buffer[256]; sprintf(buffer, "%d", static_cast<int>(time(0))); cnonce = MD5(buffer); ncount = "00000001"; std::string realm, nonce, qop, opaque; HttpHasAttribute(args, "realm", &realm); HttpHasAttribute(args, "nonce", &nonce); bool has_qop = HttpHasAttribute(args, "qop", &qop); bool has_opaque = HttpHasAttribute(args, "opaque", &opaque); // TODO: convert sensitive to be secure buffer //std::string A1 = username + ":" + realm + ":" + password; size_t len = username.size() + realm.size() + password.GetLength() + 3; char * sensitive = new char[len]; size_t pos = strcpyn(sensitive, len, username.data(), username.size()); pos += strcpyn(sensitive + pos, len - pos, ":"); pos += strcpyn(sensitive + pos, len - pos, realm.c_str()); pos += strcpyn(sensitive + pos, len - pos, ":"); password.CopyTo(sensitive + pos, true); std::string A2 = method + ":" + uri; std::string middle; if (has_qop) { qop = "auth"; middle = nonce + ":" + ncount + ":" + cnonce + ":" + qop; } else { middle = nonce; } std::string HA1 = MD5(sensitive); memset(sensitive, 0, len); delete [] sensitive; std::string HA2 = MD5(A2); std::string dig_response = MD5(HA1 + ":" + middle + ":" + HA2); std::stringstream ss; ss << auth_method; ss << " username=" << quote(username); ss << ", realm=" << quote(realm); ss << ", nonce=" << quote(nonce); ss << ", uri=" << quote(uri); if (has_qop) { ss << ", qop=" << qop; ss << ", nc=" << ncount; ss << ", cnonce=" << quote(cnonce); } ss << ", response=\"" << dig_response << "\""; if (has_opaque) { ss << ", opaque=" << quote(opaque); } response = ss.str(); return HAR_RESPONSE; } #if defined(WEBRTC_WIN) #if 1 bool want_negotiate = (_stricmp(auth_method.c_str(), "negotiate") == 0); bool want_ntlm = (_stricmp(auth_method.c_str(), "ntlm") == 0); // SPNEGO & NTLM if (want_negotiate || want_ntlm) { const size_t MAX_MESSAGE = 12000, MAX_SPN = 256; char out_buf[MAX_MESSAGE], spn[MAX_SPN]; #if 0 // Requires funky windows versions DWORD len = MAX_SPN; if (DsMakeSpn("HTTP", server.HostAsURIString().c_str(), nullptr, server.port(), 0, &len, spn) != ERROR_SUCCESS) { LOG_F(WARNING) << "(Negotiate) - DsMakeSpn failed"; return HAR_IGNORE; } #else sprintfn(spn, MAX_SPN, "HTTP/%s", server.ToString().c_str()); #endif SecBuffer out_sec; out_sec.pvBuffer = out_buf; out_sec.cbBuffer = sizeof(out_buf); out_sec.BufferType = SECBUFFER_TOKEN; SecBufferDesc out_buf_desc; out_buf_desc.ulVersion = 0; out_buf_desc.cBuffers = 1; out_buf_desc.pBuffers = &out_sec; const ULONG NEG_FLAGS_DEFAULT = //<API key> <API key> //| <API key> //| ISC_REQ_INTEGRITY | <API key> | <API key> //| ISC_REQ_STREAM //| <API key> ; ::TimeStamp lifetime; SECURITY_STATUS ret = S_OK; ULONG ret_flags = 0, flags = NEG_FLAGS_DEFAULT; bool specify_credentials = !username.empty(); size_t steps = 0; // uint32_t now = Time(); <API key> * neg = static_cast<<API key> *>(context); if (neg) { const size_t max_steps = 10; if (++neg->steps >= max_steps) { LOG(WARNING) << "<API key>::Authenticate(Negotiate) too many retries"; return HAR_ERROR; } steps = neg->steps; std::string challenge, decoded_challenge; if (HttpHasNthAttribute(args, 1, &challenge, nullptr) && Base64::Decode(challenge, Base64::DO_STRICT, &decoded_challenge, nullptr)) { SecBuffer in_sec; in_sec.pvBuffer = const_cast<char *>(decoded_challenge.data()); in_sec.cbBuffer = static_cast<unsigned long>(decoded_challenge.size()); in_sec.BufferType = SECBUFFER_TOKEN; SecBufferDesc in_buf_desc; in_buf_desc.ulVersion = 0; in_buf_desc.cBuffers = 1; in_buf_desc.pBuffers = &in_sec; ret = <API key>(&neg->cred, &neg->ctx, spn, flags, 0, <API key>, &in_buf_desc, 0, &neg->ctx, &out_buf_desc, &ret_flags, &lifetime); //LOG(INFO) << "$$$ <API key> @ " << TimeSince(now); if (FAILED(ret)) { LOG(LS_ERROR) << "<API key> returned: " << ErrorName(ret, SECURITY_ERRORS); return HAR_ERROR; } } else if (neg-><API key>) { // Try again with default credentials specify_credentials = false; delete context; context = neg = 0; } else { return HAR_CREDENTIALS; } } if (!neg) { unsigned char userbuf[256], passbuf[256], domainbuf[16]; <API key> auth_id, * pauth_id = 0; if (specify_credentials) { memset(&auth_id, 0, sizeof(auth_id)); size_t len = password.GetLength()+1; char * sensitive = new char[len]; password.CopyTo(sensitive, true); std::string::size_type pos = username.find('\\'); if (pos == std::string::npos) { auth_id.UserLength = static_cast<unsigned long>( std::min(sizeof(userbuf) - 1, username.size())); memcpy(userbuf, username.c_str(), auth_id.UserLength); userbuf[auth_id.UserLength] = 0; auth_id.DomainLength = 0; domainbuf[auth_id.DomainLength] = 0; auth_id.PasswordLength = static_cast<unsigned long>( std::min(sizeof(passbuf) - 1, password.GetLength())); memcpy(passbuf, sensitive, auth_id.PasswordLength); passbuf[auth_id.PasswordLength] = 0; } else { auth_id.UserLength = static_cast<unsigned long>( std::min(sizeof(userbuf) - 1, username.size() - pos - 1)); memcpy(userbuf, username.c_str() + pos + 1, auth_id.UserLength); userbuf[auth_id.UserLength] = 0; auth_id.DomainLength = static_cast<unsigned long>(std::min(sizeof(domainbuf) - 1, pos)); memcpy(domainbuf, username.c_str(), auth_id.DomainLength); domainbuf[auth_id.DomainLength] = 0; auth_id.PasswordLength = static_cast<unsigned long>( std::min(sizeof(passbuf) - 1, password.GetLength())); memcpy(passbuf, sensitive, auth_id.PasswordLength); passbuf[auth_id.PasswordLength] = 0; } memset(sensitive, 0, len); delete [] sensitive; auth_id.User = userbuf; auth_id.Domain = domainbuf; auth_id.Password = passbuf; auth_id.Flags = <API key>; pauth_id = &auth_id; LOG(LS_VERBOSE) << "Negotiate protocol: Using specified credentials"; } else { LOG(LS_VERBOSE) << "Negotiate protocol: Using default credentials"; } CredHandle cred; ret = <API key>( 0, const_cast<char*>(want_negotiate ? NEGOSSP_NAME_A : NTLMSP_NAME_A), <API key>, 0, pauth_id, 0, 0, &cred, &lifetime); //LOG(INFO) << "$$$ <API key> @ " << TimeSince(now); if (ret != SEC_E_OK) { LOG(LS_ERROR) << "<API key> error: " << ErrorName(ret, SECURITY_ERRORS); return HAR_IGNORE; } //CSecBufferBundle<5, CSecBufferBase::FreeSSPI> sb_out; CtxtHandle ctx; ret = <API key>(&cred, 0, spn, flags, 0, <API key>, 0, 0, &ctx, &out_buf_desc, &ret_flags, &lifetime); //LOG(INFO) << "$$$ <API key> @ " << TimeSince(now); if (FAILED(ret)) { LOG(LS_ERROR) << "<API key> returned: " << ErrorName(ret, SECURITY_ERRORS); <API key>(&cred); return HAR_IGNORE; } RTC_DCHECK(!context); context = neg = new <API key>(auth_method, cred, ctx); neg-><API key> = specify_credentials; neg->steps = steps; } if ((ret == <API key>) || (ret == <API key>)) { ret = CompleteAuthToken(&neg->ctx, &out_buf_desc); //LOG(INFO) << "$$$ CompleteAuthToken @ " << TimeSince(now); LOG(LS_VERBOSE) << "CompleteAuthToken returned: " << ErrorName(ret, SECURITY_ERRORS); if (FAILED(ret)) { return HAR_ERROR; } } //LOG(INFO) << "$$$ NEGOTIATE took " << TimeSince(now) << "ms"; std::string decoded(out_buf, out_buf + out_sec.cbBuffer); response = auth_method; response.append(" "); response.append(Base64::Encode(decoded)); return HAR_RESPONSE; } #endif #endif // WEBRTC_WIN return HAR_IGNORE; } } // namespace rtc
#include "content/gpu/gpu_child_thread.h" #include "base/bind.h" #include "base/lazy_instance.h" #include "base/threading/worker_pool.h" #include "build/build_config.h" #include "content/child/child_process.h" #include "content/child/thread_safe_sender.h" #include "content/common/gpu/<API key>.h" #include "content/common/gpu/gpu_messages.h" #include "content/gpu/gpu_watchdog_thread.h" #include "content/public/common/content_client.h" #include "content/public/common/content_switches.h" #include "gpu/config/gpu_info_collector.h" #include "ipc/ipc_channel_handle.h" #include "ipc/<API key>.h" #include "ui/gl/gl_implementation.h" #include "ui/gl/<API key>.h" #if defined(USE_OZONE) #include "ui/ozone/public/<API key>.h" #include "ui/ozone/public/ozone_platform.h" #endif namespace content { namespace { static base::LazyInstance<scoped_refptr<ThreadSafeSender> > <API key> = <API key>; bool <API key>(int severity, const char* file, int line, size_t message_start, const std::string& str) { std::string header = str.substr(0, message_start); std::string message = str.substr(message_start); <API key>.Get()->Send(new <API key>( severity, header, message)); return false; } // Message filter used to to handle <API key> messages on // the IO thread. This allows the UI thread in the browser process to remain // fast at all times. class <API key> : public IPC::MessageFilter { public: explicit <API key>( <API key>* <API key>) : <API key>(<API key>), sender_(nullptr) {} // Overridden from IPC::MessageFilter: void OnFilterAdded(IPC::Sender* sender) override { DCHECK(!sender_); sender_ = sender; } void OnFilterRemoved() override { DCHECK(sender_); sender_ = nullptr; } bool OnMessageReceived(const IPC::Message& message) override { DCHECK(sender_); bool handled = true; <API key>(<API key>, message) IPC_MESSAGE_HANDLER(<API key>, <API key>) <API key>(handled = false) IPC_END_MESSAGE_MAP() return handled; } protected: ~<API key>() override {} void <API key>( const <API key>& params) { TRACE_EVENT2("gpu", "<API key>::<API key>", "id", params.id.id, "client_id", params.client_id); sender_->Send(new <API key>( <API key>-><API key>( params.id, params.size, params.format, params.usage, params.client_id, params.surface_handle))); } <API key>* const <API key>; IPC::Sender* sender_; }; ChildThreadImpl::Options GetOptions( <API key>* <API key>) { ChildThreadImpl::Options::Builder builder; builder.AddStartupFilter( new <API key>(<API key>)); #if defined(USE_OZONE) IPC::MessageFilter* message_filter = ui::OzonePlatform::GetInstance() -><API key>() ->GetMessageFilter(); if (message_filter) builder.AddStartupFilter(message_filter); #endif return builder.Build(); } } // namespace GpuChildThread::GpuChildThread( GpuWatchdogThread* watchdog_thread, bool dead_on_arrival, const gpu::GPUInfo& gpu_info, const DeferredMessages& deferred_messages, <API key>* <API key>, gpu::SyncPointManager* sync_point_manager) : ChildThreadImpl(GetOptions(<API key>)), dead_on_arrival_(dead_on_arrival), sync_point_manager_(sync_point_manager), gpu_info_(gpu_info), deferred_messages_(deferred_messages), in_browser_process_(false), <API key>(<API key>) { watchdog_thread_ = watchdog_thread; #if defined(OS_WIN) target_services_ = NULL; #endif <API key>.Get() = thread_safe_sender(); } GpuChildThread::GpuChildThread( const <API key>& params, <API key>* <API key>, gpu::SyncPointManager* sync_point_manager) : ChildThreadImpl(ChildThreadImpl::Options::Builder() .InBrowserProcess(params) .AddStartupFilter(new <API key>( <API key>)) .Build()), dead_on_arrival_(false), sync_point_manager_(sync_point_manager), in_browser_process_(true), <API key>(<API key>) { #if defined(OS_WIN) target_services_ = NULL; #endif DCHECK(base::CommandLine::ForCurrentProcess()->HasSwitch( switches::kSingleProcess) || base::CommandLine::ForCurrentProcess()->HasSwitch( switches::kInProcessGPU)); if (!gfx::GLSurface::InitializeOneOff()) VLOG(1) << "gfx::GLSurface::InitializeOneOff failed"; <API key>.Get() = thread_safe_sender(); } GpuChildThread::~GpuChildThread() { while (!deferred_messages_.empty()) { delete deferred_messages_.front(); deferred_messages_.pop(); } } // static gfx::GpuMemoryBufferType GpuChildThread::<API key>() { std::vector<gfx::GpuMemoryBufferType> supported_types; <API key>::GetSupportedTypes(&supported_types); DCHECK(!supported_types.empty()); // Note: We always use the preferred type. return supported_types[0]; } void GpuChildThread::Shutdown() { ChildThreadImpl::Shutdown(); logging::<API key>(NULL); } void GpuChildThread::Init(const base::Time& process_start_time) { process_start_time_ = process_start_time; } bool GpuChildThread::Send(IPC::Message* msg) { // The GPU process must never send a synchronous IPC message to the browser // process. This could result in deadlock. DCHECK(!msg->is_sync()); return ChildThreadImpl::Send(msg); } bool GpuChildThread::<API key>(const IPC::Message& msg) { bool handled = true; <API key>(GpuChildThread, msg) IPC_MESSAGE_HANDLER(GpuMsg_Initialize, OnInitialize) IPC_MESSAGE_HANDLER(<API key>, <API key>) IPC_MESSAGE_HANDLER(<API key>, <API key>) IPC_MESSAGE_HANDLER(GpuMsg_Clean, OnClean) IPC_MESSAGE_HANDLER(GpuMsg_Crash, OnCrash) IPC_MESSAGE_HANDLER(GpuMsg_Hang, OnHang) IPC_MESSAGE_HANDLER(<API key>, OnDisableWatchdog) IPC_MESSAGE_HANDLER(GpuMsg_GpuSwitched, OnGpuSwitched) <API key>(handled = false) IPC_END_MESSAGE_MAP() if (handled) return true; #if defined(USE_OZONE) if (ui::OzonePlatform::GetInstance() -><API key>() ->OnMessageReceived(msg)) return true; #endif return <API key>.get() && <API key>->OnMessageReceived(msg); } void GpuChildThread::OnInitialize() { // Record initialization only after collecting the GPU info because that can // take a significant amount of time. gpu_info_.initialization_time = base::Time::Now() - process_start_time_; Send(new <API key>(!dead_on_arrival_, gpu_info_)); while (!deferred_messages_.empty()) { Send(deferred_messages_.front()); deferred_messages_.pop(); } if (dead_on_arrival_) { LOG(ERROR) << "Exiting GPU process due to errors during initialization"; base::MessageLoop::current()->Quit(); return; } #if defined(OS_ANDROID) base::PlatformThread::<API key>(base::ThreadPriority::DISPLAY); #endif // We don't need to pipe log messages if we are running the GPU thread in // the browser process. if (!in_browser_process_) logging::<API key>(<API key>); // Defer creation of the render thread. This is to prevent it from handling // IPC messages before the sandbox has been enabled and all other necessary // initialization has succeeded. <API key>.reset(new GpuChannelManager( GetRouter(), watchdog_thread_.get(), ChildProcess::current()->io_task_runner(), ChildProcess::current()->GetShutDownEvent(), channel(), GetAttachmentBroker(), sync_point_manager_, <API key>)); #if defined(USE_OZONE) ui::OzonePlatform::GetInstance() -><API key>() -><API key>(this); #endif } void GpuChildThread::StopWatchdog() { if (watchdog_thread_.get()) { watchdog_thread_->Stop(); } } void GpuChildThread::<API key>() { #if defined(OS_WIN) // GPU full info collection should only happen on un-sandboxed GPU process // or single process/in-process gpu mode on Windows. base::CommandLine* command_line = base::CommandLine::ForCurrentProcess(); DCHECK(command_line->HasSwitch(switches::kDisableGpuSandbox) || in_browser_process_); #endif // OS_WIN gpu::CollectInfoResult result = gpu::<API key>(&gpu_info_); switch (result) { case gpu::<API key>: LOG(ERROR) << "gpu::CollectGraphicsInfo failed (fatal)."; // TODO(piman): can we signal overall failure? break; case gpu::<API key>: DVLOG(1) << "gpu::CollectGraphicsInfo failed (non-fatal)."; break; case gpu::kCollectInfoNone: NOTREACHED(); break; case gpu::kCollectInfoSuccess: break; } GetContentClient()->SetGpuInfo(gpu_info_); #if defined(OS_WIN) // This is slow, but it's the only thing the unsandboxed GPU process does, // and GpuDataManager prevents us from sending multiple collecting requests, // so it's OK to be blocking. gpu::GetDxDiagnostics(&gpu_info_.dx_diagnostics); gpu_info_.<API key> = gpu::kCollectInfoSuccess; #endif // OS_WIN Send(new <API key>(gpu_info_)); #if defined(OS_WIN) if (!in_browser_process_) { // The unsandboxed GPU process fulfilled its duty. Rest in peace. base::MessageLoop::current()->Quit(); } #endif // OS_WIN } void GpuChildThread::<API key>() { <API key> <API key>; if (<API key>) <API key>->gpu_memory_manager()-><API key>( &<API key>); Send(new <API key>(<API key>)); } void GpuChildThread::OnClean() { DVLOG(1) << "GPU: Removing all contexts"; if (<API key>) <API key>->LoseAllContexts(); } void GpuChildThread::OnCrash() { DVLOG(1) << "GPU: Simulating GPU crash"; // Good bye, cruel world. volatile int* <API key> = NULL; *<API key> = 0xdead; } void GpuChildThread::OnHang() { DVLOG(1) << "GPU: Simulating GPU hang"; for (;;) { // Do not sleep here. The GPU watchdog timer tracks the amount of user // time this thread is using and it doesn't use much while calling Sleep. } } void GpuChildThread::OnDisableWatchdog() { DVLOG(1) << "GPU: Disabling watchdog thread"; if (watchdog_thread_.get()) { // Disarm the watchdog before shutting down the message loop. This prevents // the future posting of tasks to the message loop. if (watchdog_thread_->message_loop()) watchdog_thread_->PostAcknowledge(); // Prevent rearming. watchdog_thread_->Stop(); } } void GpuChildThread::OnGpuSwitched() { DVLOG(1) << "GPU: GPU has switched"; // Notify observers in the GPU process. ui::GpuSwitchingManager::GetInstance()->NotifyGpuSwitched(); } } // namespace content
#if defined( INC_ALL ) #include "crypt.h" #include "context.h" #include "sha.h" #else #include "crypt.h" #include "context/context.h" #include "crypt/sha.h" #endif /* Compiler-specific includes */ #define HASH_STATE_SIZE sizeof( SHA_CTX ) #ifndef CONFIG_NO_SELFTEST /* Test the SHA output against the test vectors given in FIPS 180-1. We skip the third test since this takes several seconds to execute, which leads to an unacceptable delay */ static const struct { const char FAR_BSS *data; /* Data to hash */ const int length; /* Length of data */ const BYTE digest[ SHA_DIGEST_LENGTH ]; /* Digest of data */ } FAR_BSS digestValues[] = { { "abc", 3, { 0xA9, 0x99, 0x3E, 0x36, 0x47, 0x06, 0x81, 0x6A, 0xBA, 0x3E, 0x25, 0x71, 0x78, 0x50, 0xC2, 0x6C, 0x9C, 0xD0, 0xD8, 0x9D } }, { "<API key>", 56, { 0x84, 0x98, 0x3E, 0x44, 0x1C, 0x3B, 0xD2, 0x6E, 0xBA, 0xAE, 0x4A, 0xA1, 0xF9, 0x51, 0x29, 0xE5, 0xE5, 0x46, 0x70, 0xF1 } }, /* { "aaaaa...", 1000000L, { 0x34, 0xAA, 0x97, 0x3C, 0xD4, 0xC4, 0xDA, 0xA4, 0xF6, 0x1E, 0xEB, 0x2B, 0xDB, 0xAD, 0x27, 0x31, 0x65, 0x34, 0x01, 0x6F } }, */ { NULL, 0, { 0 } } }; static int selfTest( void ) { const CAPABILITY_INFO *capabilityInfo = getSHA1Capability(); BYTE hashState[ HASH_STATE_SIZE + 8 ]; int i, status; /* Test SHA-1 against values given in FIPS 180-1 */ for( i = 0; digestValues[ i ].data != NULL; i++ ) { status = testHash( capabilityInfo, hashState, digestValues[ i ].data, digestValues[ i ].length, digestValues[ i ].digest ); if( cryptStatusError( status ) ) return( status ); } return( CRYPT_OK ); } #else #define selfTest NULL #endif /* !CONFIG_NO_SELFTEST */ /* Return context subtype-specific information */ CHECK_RETVAL STDC_NONNULL_ARG( ( 3 ) ) \ static int getInfo( IN_ENUM( CAPABILITY_INFO ) const <API key> type, INOUT_OPT CONTEXT_INFO *contextInfoPtr, OUT void *data, IN_INT_Z const int length ) { assert( contextInfoPtr == NULL || \ isWritePtr( contextInfoPtr, sizeof( CONTEXT_INFO ) ) ); assert( ( length == 0 && isWritePtr( data, sizeof( int ) ) ) || \ ( length > 0 && isWritePtr( data, length ) ) ); REQUIRES( type > <API key> && type < <API key> ); if( type == <API key> ) { int *valuePtr = ( int * ) data; *valuePtr = HASH_STATE_SIZE; return( CRYPT_OK ); } return( getDefaultInfo( type, contextInfoPtr, data, length ) ); } /* Hash data using SHA */ static int hash( CONTEXT_INFO *contextInfoPtr, BYTE *buffer, int noBytes ) { SHA_CTX *shaInfo = ( SHA_CTX * ) contextInfoPtr->ctxHash->hashInfo; /* If the hash state was reset to allow another round of hashing, reinitialise things */ if( !( contextInfoPtr->flags & <API key> ) ) SHA1_Init( shaInfo ); if( noBytes > 0 ) { SHA1_Update( shaInfo, buffer, noBytes ); } else SHA1_Final( contextInfoPtr->ctxHash->hash, shaInfo ); return( CRYPT_OK ); } /* Internal API: Hash a single block of memory without the overhead of creating an encryption context. This always uses SHA1 */ void shaHashBuffer( HASHINFO hashInfo, BYTE *outBuffer, const int outBufMaxLength, const void *inBuffer, const int inLength, const HASH_STATE hashState ) { SHA_CTX *shaInfo = ( SHA_CTX * ) hashInfo; assert( isWritePtr( hashInfo, sizeof( HASHINFO ) ) ); assert( ( hashState != HASH_STATE_END && \ outBuffer == NULL && outBufMaxLength == 0 ) || \ ( hashState == HASH_STATE_END && \ isWritePtr( outBuffer, outBufMaxLength ) && \ outBufMaxLength >= 20 ) ); assert( inBuffer == NULL || isReadPtr( inBuffer, inLength ) ); if( ( hashState == HASH_STATE_END && outBufMaxLength < 20 ) || \ ( hashState != HASH_STATE_END && inLength <= 0 ) ) retIntError_Void(); switch( hashState ) { case HASH_STATE_START: SHA1_Init( shaInfo ); /* Drop through */ case HASH_STATE_CONTINUE: SHA1_Update( shaInfo, ( BYTE * ) inBuffer, inLength ); break; case HASH_STATE_END: if( inBuffer != NULL ) SHA1_Update( shaInfo, ( BYTE * ) inBuffer, inLength ); SHA1_Final( outBuffer, shaInfo ); break; default: retIntError_Void(); } } void shaHashBufferAtomic( BYTE *outBuffer, const int outBufMaxLength, const void *inBuffer, const int inLength ) { SHA_CTX shaInfo; assert( isWritePtr( outBuffer, outBufMaxLength ) && \ outBufMaxLength >= 20 ); assert( isReadPtr( inBuffer, inLength ) ); if( outBufMaxLength < 20 || inLength <= 0 ) retIntError_Void(); SHA1_Init( &shaInfo ); SHA1_Update( &shaInfo, ( BYTE * ) inBuffer, inLength ); SHA1_Final( outBuffer, &shaInfo ); zeroise( &shaInfo, sizeof( SHA_CTX ) ); } static const CAPABILITY_INFO FAR_BSS capabilityInfo = { CRYPT_ALGO_SHA1, bitsToBytes( 160 ), "SHA-1", 5, bitsToBytes( 0 ), bitsToBytes( 0 ), bitsToBytes( 0 ), selfTest, getInfo, NULL, NULL, NULL, NULL, hash, hash }; const CAPABILITY_INFO *getSHA1Capability( void ) { return( &capabilityInfo ); }
#include "media/audio/<API key>.h" #include "base/bind.h" #include "base/bind_helpers.h" #include "base/compiler_specific.h" #include "base/metrics/histogram.h" #include "base/<API key>.h" #include "base/time/time.h" #include "build/build_config.h" #include "media/audio/audio_io.h" #include "media/audio/<API key>.h" #include "media/audio/audio_output_proxy.h" #include "media/audio/sample_rates.h" #include "media/base/audio_converter.h" #include "media/base/limits.h" namespace media { class OnMoreDataConverter : public AudioOutputStream::AudioSourceCallback, public AudioConverter::InputCallback { public: OnMoreDataConverter(const AudioParameters& input_params, const AudioParameters& output_params); virtual ~OnMoreDataConverter(); // AudioSourceCallback interface. virtual int OnMoreData(AudioBus* dest, AudioBuffersState buffers_state) OVERRIDE; virtual int OnMoreIOData(AudioBus* source, AudioBus* dest, AudioBuffersState buffers_state) OVERRIDE; virtual void OnError(AudioOutputStream* stream) OVERRIDE; // Sets |source_callback_|. If this is not a new object, then Stop() must be // called before Start(). void Start(AudioOutputStream::AudioSourceCallback* callback); // Clears |source_callback_| and flushes the resampler. void Stop(); bool started() { return source_callback_ != NULL; } private: // AudioConverter::InputCallback implementation. virtual double ProvideInput(AudioBus* audio_bus, base::TimeDelta buffer_delay) OVERRIDE; // Ratio of input bytes to output bytes used to correct playback delay with // regard to buffering and resampling. const double io_ratio_; // Source callback. AudioOutputStream::AudioSourceCallback* source_callback_; // Last AudioBuffersState object received via OnMoreData(), used to correct // playback delay by ProvideInput() and passed on to |source_callback_|. AudioBuffersState <API key>; const int <API key>; // Handles resampling, buffering, and channel mixing between input and output // parameters. AudioConverter audio_converter_; <API key>(OnMoreDataConverter); }; // Record UMA statistics for hardware output configuration. static void RecordStats(const AudioParameters& output_params) { // Note the '<API key>'s below, these silence the PRESUBMIT.py // check for uma enum max usage, since we're abusing <API key> // to report a discrete value. <API key>( "Media.<API key>", output_params.bits_per_sample(), limits::kMaxBitsPerSample); // <API key> <API key>( "Media.<API key>", output_params.channel_layout(), CHANNEL_LAYOUT_MAX + 1); <API key>( "Media.<API key>", output_params.channels(), limits::kMaxChannels); // <API key> AudioSampleRate asr; if (ToAudioSampleRate(output_params.sample_rate(), &asr)) { <API key>( "Media.<API key>", asr, kAudioSampleRateMax + 1); } else { <API key>( "Media.<API key>", output_params.sample_rate()); } } // Record UMA statistics for hardware output configuration after fallback. static void RecordFallbackStats(const AudioParameters& output_params) { <API key>("Media.<API key>", true); // Note the '<API key>'s below, these silence the PRESUBMIT.py // check for uma enum max usage, since we're abusing <API key> // to report a discrete value. <API key>( "Media.<API key>", output_params.bits_per_sample(), limits::kMaxBitsPerSample); // <API key> <API key>( "Media.<API key>", output_params.channel_layout(), CHANNEL_LAYOUT_MAX + 1); <API key>( "Media.<API key>", output_params.channels(), limits::kMaxChannels); // <API key> AudioSampleRate asr; if (ToAudioSampleRate(output_params.sample_rate(), &asr)) { <API key>( "Media.<API key>", asr, kAudioSampleRateMax + 1); } else { <API key>( "Media.<API key>", output_params.sample_rate()); } } // Converts low latency based |output_params| into high latency appropriate // output parameters in error situations. void <API key>::SetupFallbackParams() { // Only Windows has a high latency output driver that is not the same as the low // latency path. #if defined(OS_WIN) // Choose AudioParameters appropriate for opening the device in high latency // mode. |<API key>| is arbitrarily based on Pepper Flash's // MAXIMUM frame size for low latency. static const int <API key> = 2048; const int frames_per_buffer = std::max(params_.frames_per_buffer(), <API key>); output_params_ = AudioParameters( AudioParameters::AUDIO_PCM_LINEAR, params_.channel_layout(), params_.sample_rate(), params_.bits_per_sample(), frames_per_buffer); device_id_ = ""; Initialize(); #endif } <API key>::<API key>(AudioManager* audio_manager, const AudioParameters& input_params, const AudioParameters& output_params, const std::string& output_device_id, const base::TimeDelta& close_delay) : <API key>(audio_manager, input_params, output_device_id), close_delay_(close_delay), output_params_(output_params), streams_opened_(false) { DCHECK(input_params.IsValid()); DCHECK(output_params.IsValid()); DCHECK_EQ(output_params_.format(), AudioParameters::<API key>); // Record UMA statistics for the hardware configuration. RecordStats(output_params); Initialize(); } <API key>::~<API key>() { DCHECK(callbacks_.empty()); } void <API key>::Initialize() { DCHECK(!streams_opened_); DCHECK(callbacks_.empty()); dispatcher_ = new <API key>( audio_manager_, output_params_, device_id_, close_delay_); } bool <API key>::OpenStream() { DCHECK(task_runner_-><API key>()); if (dispatcher_->OpenStream()) { // Only record the UMA statistic if we didn't fallback during construction // and only for the first stream we open. if (!streams_opened_ && output_params_.format() == AudioParameters::<API key>) { <API key>("Media.<API key>", false); } streams_opened_ = true; return true; } // If we've already tried to open the stream in high latency mode or we've // successfully opened a stream previously, there's nothing more to be done. if (output_params_.format() != AudioParameters::<API key> || streams_opened_ || !callbacks_.empty()) { return false; } DCHECK_EQ(output_params_.format(), AudioParameters::<API key>); // Record UMA statistics about the hardware which triggered the failure so // we can debug and triage later. RecordFallbackStats(output_params_); // Only Windows has a high latency output driver that is not the same as the // low latency path. #if defined(OS_WIN) DLOG(ERROR) << "Unable to open audio device in low latency mode. Falling " << "back to high latency audio output."; SetupFallbackParams(); if (dispatcher_->OpenStream()) { streams_opened_ = true; return true; } #endif DLOG(ERROR) << "Unable to open audio device in high latency mode. Falling " << "back to fake audio output."; // Finally fall back to a fake audio output device. output_params_.Reset( AudioParameters::AUDIO_FAKE, params_.channel_layout(), params_.channels(), params_.input_channels(), params_.sample_rate(), params_.bits_per_sample(), params_.frames_per_buffer()); Initialize(); if (dispatcher_->OpenStream()) { streams_opened_ = true; return true; } return false; } bool <API key>::StartStream( AudioOutputStream::AudioSourceCallback* callback, AudioOutputProxy* stream_proxy) { DCHECK(task_runner_-><API key>()); OnMoreDataConverter* resampler_callback = NULL; CallbackMap::iterator it = callbacks_.find(stream_proxy); if (it == callbacks_.end()) { resampler_callback = new OnMoreDataConverter(params_, output_params_); callbacks_[stream_proxy] = resampler_callback; } else { resampler_callback = it->second; } resampler_callback->Start(callback); bool result = dispatcher_->StartStream(resampler_callback, stream_proxy); if (!result) resampler_callback->Stop(); return result; } void <API key>::StreamVolumeSet(AudioOutputProxy* stream_proxy, double volume) { DCHECK(task_runner_-><API key>()); dispatcher_->StreamVolumeSet(stream_proxy, volume); } void <API key>::StopStream(AudioOutputProxy* stream_proxy) { DCHECK(task_runner_-><API key>()); dispatcher_->StopStream(stream_proxy); // Now that StopStream() has completed the underlying physical stream should // be stopped and no longer calling OnMoreData(), making it safe to Stop() the // OnMoreDataConverter. CallbackMap::iterator it = callbacks_.find(stream_proxy); if (it != callbacks_.end()) it->second->Stop(); } void <API key>::CloseStream(AudioOutputProxy* stream_proxy) { DCHECK(task_runner_-><API key>()); dispatcher_->CloseStream(stream_proxy); // We assume that StopStream() is always called prior to CloseStream(), so // that it is safe to delete the OnMoreDataConverter here. CallbackMap::iterator it = callbacks_.find(stream_proxy); if (it != callbacks_.end()) { delete it->second; callbacks_.erase(it); } } void <API key>::Shutdown() { DCHECK(task_runner_-><API key>()); // No AudioOutputProxy objects should hold a reference to us when we get // to this stage. DCHECK(HasOneRef()) << "Only the AudioManager should hold a reference"; dispatcher_->Shutdown(); DCHECK(callbacks_.empty()); } void <API key>::<API key>() { DCHECK(task_runner_-><API key>()); // Stop and close all active streams. Once all streams across all dispatchers // have been closed the AudioManager will call <API key>(). for (CallbackMap::iterator it = callbacks_.begin(); it != callbacks_.end(); ++it) { if (it->second->started()) dispatcher_->StopStream(it->first); dispatcher_->CloseStream(it->first); } // Close all idle streams as well. dispatcher_-><API key>(); } void <API key>::<API key>() { DCHECK(task_runner_-><API key>()); // By opening all streams first and then starting them one by one we ensure // the dispatcher only opens streams for those which will actually be used. for (CallbackMap::iterator it = callbacks_.begin(); it != callbacks_.end(); ++it) { dispatcher_->OpenStream(); } for (CallbackMap::iterator it = callbacks_.begin(); it != callbacks_.end(); ++it) { if (it->second->started()) dispatcher_->StartStream(it->second, it->first); } } OnMoreDataConverter::OnMoreDataConverter(const AudioParameters& input_params, const AudioParameters& output_params) : io_ratio_(static_cast<double>(input_params.GetBytesPerSecond()) / output_params.GetBytesPerSecond()), source_callback_(NULL), <API key>(input_params.GetBytesPerSecond()), audio_converter_(input_params, output_params, false) {} OnMoreDataConverter::~OnMoreDataConverter() { // Ensure Stop() has been called so we don't end up with an AudioOutputStream // calling back into OnMoreData() after destruction. CHECK(!source_callback_); } void OnMoreDataConverter::Start( AudioOutputStream::AudioSourceCallback* callback) { CHECK(!source_callback_); source_callback_ = callback; // While AudioConverter can handle multiple inputs, we're using it only with // a single input currently. Eventually this may be the basis for a browser // side mixer. audio_converter_.AddInput(this); } void OnMoreDataConverter::Stop() { CHECK(source_callback_); source_callback_ = NULL; audio_converter_.RemoveInput(this); } int OnMoreDataConverter::OnMoreData(AudioBus* dest, AudioBuffersState buffers_state) { return OnMoreIOData(NULL, dest, buffers_state); } int OnMoreDataConverter::OnMoreIOData(AudioBus* source, AudioBus* dest, AudioBuffersState buffers_state) { // Note: The input portion of OnMoreIOData() is not supported when a converter // has been injected. Downstream clients prefer silence to potentially split // apart input data. <API key> = buffers_state; audio_converter_.Convert(dest); // Always return the full number of frames requested, ProvideInput() // will pad with silence if it wasn't able to acquire enough data. return dest->frames(); } double OnMoreDataConverter::ProvideInput(AudioBus* dest, base::TimeDelta buffer_delay) { // Adjust playback delay to include |buffer_delay|. // TODO(dalecurtis): Stop passing bytes around, it doesn't make sense since // AudioBus is just float data. Use TimeDelta instead. AudioBuffersState new_buffers_state; new_buffers_state.pending_bytes = io_ratio_ * (<API key>.total_bytes() + buffer_delay.InSecondsF() * <API key>); // Retrieve data from the original callback. const int frames = source_callback_->OnMoreIOData( NULL, dest, new_buffers_state); // Zero any unfilled frames if anything was filled, otherwise we'll just // return a volume of zero and let AudioConverter drop the output. if (frames > 0 && frames < dest->frames()) dest->ZeroFramesPartial(frames, dest->frames() - frames); return frames > 0 ? 1 : 0; } void OnMoreDataConverter::OnError(AudioOutputStream* stream) { source_callback_->OnError(stream); } } // namespace media
title: Git Pull Vs Git Fetch These two commands are regularly used by git users. Let's see the difference between both commands. For the sake of context, it's worth remembering we're probably working in a clone repo. What's a clone? simply a duplicate of another repository. It is basically getting your own copy of someone else's source code. That said, to keep your clone updated with whatever changes may have been applied to the original, you'll need to bring those to your clone. That's where `fetch` and `pull` come in. `git fetch` is the command that tells your local git to retrieve the latest meta-data info from the original (yet doesn't do any file transfering. It's more like just checking to see if there are any changes available). `git pull` on the other hand does that AND brings (copy) those changes from the remote repository. E.g. git pull origin ankur bugfix The take away is to keep in mind that there generally are at least three copies of a project on your workstation. One copy is your own repository with your own commit history (the already saved one, so to say). The second copy is your working copy where you are editing and building (not committed yet to your repo). The third copy is your local "cached" copy of a remote repository (probably the original from where you cloned yours). You can use `git fetch` to know the changes done in the remote repo/branch since your last pull. This is useful to allow for checking before doing an actual pull, which could change files in your current branch and working copy (and potentially losing your changes, etc). git fetch git diff ...origin
package org.knopflerfish.framework; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import org.osgi.framework.BundleContext; import org.osgi.framework.BundleException; import org.osgi.framework.Constants; import org.osgi.framework.ServiceEvent; import org.osgi.framework.ServiceReference; import org.osgi.framework.hooks.service.EventListenerHook; import org.osgi.framework.hooks.service.FindHook; import org.osgi.framework.hooks.service.ListenerHook; import org.osgi.framework.hooks.service.ListenerHook.ListenerInfo; import org.osgi.util.tracker.ServiceTracker; import org.osgi.util.tracker.<API key>; /** * Handle all framework hooks, mostly dispatched from BundleImpl, Services * and <API key> * */ @SuppressWarnings("deprecation") class ServiceHooks { final private FrameworkContext fwCtx; ServiceTracker<ListenerHook,ListenerHook> listenerHookTracker; boolean bOpen; ServiceHooks(FrameworkContext fwCtx) { this.fwCtx = fwCtx; } synchronized void open() { if(fwCtx.debug.hooks) { fwCtx.debug.println("opening hooks"); } listenerHookTracker = new ServiceTracker<ListenerHook,ListenerHook> (fwCtx.systemBundle.bundleContext, ListenerHook.class, new <API key><ListenerHook,ListenerHook>() { public ListenerHook addingService(ServiceReference<ListenerHook> reference) { final ListenerHook lh = fwCtx.systemBundle.bundleContext.getService(reference); try { Collection<<API key>> c = <API key>(); @SuppressWarnings({ "rawtypes", "unchecked" }) final Collection<ListenerInfo> li = (Collection) c; lh.added(li); } catch (final Exception e) { fwCtx.debug.printStackTrace("Failed to call listener hook reference.getProperty(Constants.SERVICE_ID), e); } return lh; } public void modifiedService(ServiceReference<ListenerHook> reference, ListenerHook service) { // noop } public void removedService(ServiceReference<ListenerHook> reference, ListenerHook service) { fwCtx.systemBundle.bundleContext.ungetService(reference); } }); listenerHookTracker.open(); bOpen = true; } synchronized void close() { listenerHookTracker.close(); listenerHookTracker = null; bOpen = false; } synchronized public boolean isOpen() { return bOpen; } void <API key>(BundleContextImpl bc, String service, String filter, boolean allServices, Collection<ServiceReference<?>> refs) { @SuppressWarnings({ "unchecked", "rawtypes" }) final List<<API key><FindHook>> srl = (List) fwCtx.services.get(FindHook.class.getName()); if (srl != null) { final <API key><ServiceReference<?>> filtered = new <API key><ServiceReference<?>>(refs); for (final <API key><FindHook> fhr : srl) { final <API key><FindHook> sr = fhr.reference; final FindHook fh = sr.getService(); if (fh != null) { try { fh.find(bc, service, filter, allServices, filtered); } catch (final Exception e) { fwCtx.frameworkError(bc, new BundleException("Failed to call find hook sr.getProperty(Constants.SERVICE_ID), e)); } } } } } void <API key>(final ServiceEvent evt, final Collection<<API key>> receivers) { @SuppressWarnings({ "unchecked", "rawtypes" }) final List<<API key><org.osgi.framework.hooks.service.EventHook>> eventHooks = (List) fwCtx.services.get(org.osgi.framework.hooks.service.EventHook.class.getName()); if (eventHooks != null) { final HashSet<BundleContext> ctxs = new HashSet<BundleContext>(); for (final <API key> sle : receivers) { ctxs.add(sle.getBundleContext()); } final int start_size = ctxs.size(); final <API key><BundleContext> filtered = new <API key><BundleContext>(ctxs); for (final <API key><org.osgi.framework.hooks.service.EventHook> sregi : eventHooks) { final <API key><org.osgi.framework.hooks.service.EventHook> sr = sregi.reference; final org.osgi.framework.hooks.service.EventHook eh = sr.getService(); if (eh != null) { try { eh.event(evt, filtered); } catch (final Exception e) { fwCtx.debug.printStackTrace("Failed to call event hook sr.getProperty(Constants.SERVICE_ID), e); } } } // TODO, refactor this for speed!? if (start_size != ctxs.size()) { ctxs.add(fwCtx.systemBundle.bundleContext); for (final Iterator<<API key>> ir = receivers.iterator(); ir.hasNext(); ) { if (!ctxs.contains(ir.next().getBundleContext())) { ir.remove(); } } } } @SuppressWarnings({ "rawtypes", "unchecked" }) final List<<API key><EventListenerHook>> eventListenerHooks = (List) fwCtx.services.get(EventListenerHook.class.getName()); if (eventListenerHooks != null) { final HashMap<BundleContext, Collection<ListenerInfo>> listeners = new HashMap<BundleContext, Collection<ListenerInfo>>(); for (final <API key> sle : receivers) { if(!listeners.containsKey(sle.getBundleContext())) { listeners.put(sle.getBundleContext(), new ArrayList<ListenerInfo>()); } listeners.get(sle.getBundleContext()).add(sle); } receivers.clear(); Collection<ListenerInfo> sys = listeners.get(fwCtx.systemBundle.bundleContext); if (sys != null) { final Collection<<API key>> sles = (Collection) sys; receivers.addAll(sles); } for(final Entry<BundleContext, Collection<ListenerInfo>> e : listeners.entrySet()) { e.setValue(new <API key><ListenerInfo>(e.getValue())); } final RemoveOnlyMap<BundleContext, Collection<ListenerInfo>> filtered = new RemoveOnlyMap<BundleContext, Collection<ListenerInfo>>(listeners); for(final <API key><EventListenerHook> sri : eventListenerHooks) { final EventListenerHook elh = sri.reference.getService(); if(elh != null) { try { elh.event(evt, filtered); } catch(final Exception e) { fwCtx.debug.printStackTrace("Failed to call event hook sri.reference.getProperty(Constants.SERVICE_ID), e); } } } for(final Collection<ListenerInfo> li : listeners.values()) { @SuppressWarnings({ "rawtypes", "unchecked" }) final Collection<<API key>> sles = (Collection) li; receivers.addAll(sles); } } } Collection<<API key>> <API key>() { // TODO think about threads?! return Collections.unmodifiableSet(fwCtx.listeners.serviceListeners.serviceSet); } void <API key>(<API key> sle) { if(!isOpen() || listenerHookTracker.size() == 0) { return; } final ServiceReference<ListenerHook>[] srl = listenerHookTracker.<API key>(); final Set<ListenerInfo> set = toImmutableSet((ListenerInfo) sle); if (srl!=null) { for (final ServiceReference<ListenerHook> sr : srl) { final ListenerHook lh = listenerHookTracker.getService(sr); try { lh.added(set); } catch (final Exception e) { fwCtx.debug.printStackTrace("Failed to call listener hook sr.getProperty(Constants.SERVICE_ID), e); } } } } void <API key>(<API key> sle) { if(isOpen()) { <API key>(toImmutableSet(sle)); } } void <API key>(Collection<<API key>> set) { if(!isOpen() || listenerHookTracker.size() == 0) { return; } final ServiceReference<ListenerHook>[] srl = listenerHookTracker.<API key>(); if (srl != null) { @SuppressWarnings({ "rawtypes", "unchecked" }) final Collection<ListenerInfo> lis = (Collection) set; for (final ServiceReference<ListenerHook> sr : srl) { final ListenerHook lh = listenerHookTracker.getService(sr); try { lh.removed(lis); } catch (final Exception e) { fwCtx.debug .printStackTrace("Failed to call listener hook + sr.getProperty(Constants.SERVICE_ID), e); } } } } static <E> Set<E> toImmutableSet(E obj) { Set<E> set = new HashSet<E>(); set.add(obj); set = Collections.unmodifiableSet(set); return set; } static class RemoveOnlyMap<K,V> implements Map<K,V> { final Map<K,V> original; public RemoveOnlyMap(Map<K,V> original) { this.original = original; } public void clear() { original.clear(); } public boolean containsKey(Object k) { return original.containsKey(k); } public boolean containsValue(Object v) { return original.containsValue(v); } public Set<Entry<K,V>> entrySet() { return original.entrySet(); } public V get(Object k) { return original.get(k); } public boolean isEmpty() { return original.isEmpty(); } public Set<K> keySet() { return original.keySet(); } public V put(Object k, Object v) { throw new <API key>("objects can only be removed"); } public void putAll(Map<? extends K,? extends V> m) { throw new <API key>("objects can only be removed"); } public V remove(Object k) { return original.remove(k); } public int size() { return original.size(); } public Collection<V> values() { return original.values(); } } /* void printSLE(String pre, Collection c, String post) { if(pre != null) { System.out.println(pre); System.out.flush(); } for(Object o : c) { <API key> sle = (<API key>)o; System.out.println("SLE: " + sle.listener.getClass().getName() + "@" + sle.getFilter()); System.out.flush(); } if(post != null) { System.out.println(post); System.out.flush(); } } void printSLE(String pre, Map m, String post) { System.out.println(pre); System.out.flush(); for(Object o : m.values()) { Collection c = (Collection)o; printSLE(null, c, null); } System.out.println(post); System.out.flush(); } */ }
#include "stdafx.h" #include "ODBCInterface.h" ODBCInterface::ODBCInterface() { __ENTER_FUNCTION mConnected = FALSE ; mAffectCount = -1; mResult = SQL_SUCCESS ; hEnv = 0 ; hStmt = 0 ; hDbc = 0 ; memset( mErrorMsg, 0, <API key> ) ; m_Query.Clear() ; __LEAVE_FUNCTION } ODBCInterface::~ODBCInterface() { __ENTER_FUNCTION if ( hStmt ) SQLFreeHandle( SQL_HANDLE_STMT, hStmt ) ; if ( hDbc ) SQLDisconnect( hDbc ) ; if ( hDbc ) SQLFreeHandle( SQL_HANDLE_DBC,hDbc ) ; if ( hEnv ) SQLFreeHandle( SQL_HANDLE_ENV, hEnv ) ; __LEAVE_FUNCTION } BOOL ODBCInterface::Connect( const CHAR *ConnectStr, const CHAR *UserName/* =NULL */, const CHAR *Password/* =NULL */ ) { __ENTER_FUNCTION Close() ; strncpy( mConnectStr, ConnectStr, DATABASE_STR_LEN ) ; strncpy( mUserName, UserName, DB_USE_STR_LEN ) ; strncpy( mPassWord, Password, DB_PASSWORD_STR_LEN ) ; SQLAllocHandle( SQL_HANDLE_ENV, SQL_NULL_HANDLE, &hEnv ) ; SQLSetEnvAttr( hEnv, <API key>, ( SQLPOINTER )SQL_OV_ODBC3, SQL_IS_INTEGER ) ; SQLAllocHandle( SQL_HANDLE_DBC, hEnv, &hDbc ) ; mResult = SQLConnect( hDbc, ( SQLCHAR * )mConnectStr, SQL_NTS, ( SQLCHAR * )mUserName, SQL_NTS, ( SQLCHAR * )mPassWord, SQL_NTS ) ; if ( ( SQL_SUCCESS != mResult ) && ( <API key> != mResult ) ) { CHAR LogBuff[ 512 ] ; memset( LogBuff, 0, 512 ); sprintf( LogBuff, "Connect string: %s", mConnectStr ) ; sprintf( LogBuff, "Connect string: %s", mUserName ) ; DiagState() ; return FALSE ; } mResult = SQLAllocHandle( SQL_HANDLE_STMT, hDbc, &hStmt ) ; if ( ( SQL_SUCCESS != mResult ) && ( <API key> != mResult ) ) { hStmt = 0 ; return FALSE ; } mConnected = TRUE ; return TRUE ; __LEAVE_FUNCTION return FALSE; } BOOL ODBCInterface::Connect() { __ENTER_FUNCTION Close(); SQLAllocHandle( SQL_HANDLE_ENV, SQL_NULL_HANDLE, &hEnv ) ; SQLSetEnvAttr( hEnv, <API key>, ( SQLPOINTER )SQL_OV_ODBC3, SQL_IS_INTEGER ) ; SQLAllocHandle( SQL_HANDLE_DBC, hEnv, &hDbc ) ; mResult = SQLConnect( hDbc,( SQLCHAR * )mConnectStr, SQL_NTS, ( SQLCHAR * )mUserName, SQL_NTS, ( SQLCHAR * )mPassWord, SQL_NTS ) ; if ( ( SQL_SUCCESS != mResult) && ( <API key> != mResult ) ) { CHAR LogBuff[ 512 ] ; memset( LogBuff, 0, 512 ) ; sprintf( LogBuff, "Connect string: %s", mConnectStr ) ; sprintf( LogBuff, "Connect string: %s", mUserName ) ; DiagState() ; return FALSE ; } mResult = SQLAllocHandle( SQL_HANDLE_STMT, hDbc, &hStmt ) ; if ( ( SQL_SUCCESS != mResult ) && ( <API key> != mResult ) ) { hStmt = 0 ; return FALSE ; } mConnected = TRUE ; return TRUE ; __LEAVE_FUNCTION return FALSE; } BOOL ODBCInterface::Close() { __ENTER_FUNCTION if ( hStmt ) { SQLCloseCursor( hStmt ) ; SQLFreeStmt( hStmt, SQL_UNBIND ) ; SQLFreeHandle( SQL_HANDLE_STMT, hStmt ) ; hStmt = NULL ; } if ( hDbc ) { SQLFreeHandle( SQL_HANDLE_DBC, hDbc ) ; SQLDisconnect( hDbc ) ; hDbc = NULL ; } if ( hEnv ) { SQLFreeHandle( SQL_HANDLE_ENV, hEnv ) ; hEnv = NULL ; } mConnected = FALSE ; return TRUE ; __LEAVE_FUNCTION return FALSE ; } BOOL ODBCInterface::Execute() { _MY_TRY { INT ColIndex ; mResult = SQLExecDirect( hStmt, ( SQLCHAR * )m_Query.m_SqlStr, SQL_NTS ) ; if ( ( SQL_SUCCESS != mResult ) && ( <API key> != mResult ) && ( SQL_NO_DATA != mResult ) ) { DiagState() ; return FALSE ; } // Update, Delete, Insert SQLRowCount( hStmt, &mAffectCount ) ; SQLNumResultCols( hStmt, &mColCount ) ; if ( mColCount > MAXCOL ) { return FALSE ; } if ( 0 >= mColCount && 0 >= mAffectCount ) { Clear() ; return TRUE ; } for ( ColIndex=0; ColIndex < mColCount; ColIndex++ ) { SQLBindCol( hStmt, ColIndex+1, SQL_C_CHAR, Col[ ColIndex ], MAX_COLUMN_BUFFER, &mCollocate[ ColIndex ] ) ; SQLDescribeCol( hStmt, ColIndex + 1, ColName[ ColIndex ], MAX_COLUMN_NAME, NULL, NULL, NULL, NULL, NULL ) ; } return TRUE ; } _MY_CATCH { SaveLog( ( const CHAR* )m_Query.m_SqlStr ) ; // Sql } return FALSE ; } BOOL ODBCInterface::LongExecute() { _MY_TRY { INT ColIndex ; mResult = SQLExecDirect( hStmt, ( SQLCHAR * )m_LongQuery.m_SqlStr, SQL_NTS ) ; if ( ( SQL_SUCCESS != mResult ) && ( <API key> != mResult ) && ( SQL_NO_DATA != mResult ) ) { DiagStateEx() ; return FALSE ; } // Update, Delete, Insert SQLRowCount( hStmt, &mAffectCount ) ; SQLNumResultCols( hStmt, &mColCount ) ; if ( mColCount > MAXCOL ) { return FALSE ; } if ( 0>= mColCount && 0 >= mAffectCount ) { Clear() ; return TRUE ; } for ( ColIndex = 0; ColIndex < mColCount; ColIndex++ ) { //SQLBindCol(hStmt,ColIndex+1,SQL_C_CHAR,Col[ColIndex],MAX_COLUMN_BUFFER,&mCollocate[ColIndex]); //SQLDescribeCol(hStmt,ColIndex+1,ColName[ColIndex],MAX_COLUMN_NAME,NULL,NULL,NULL,NULL,NULL); } return TRUE ; } _MY_CATCH { SaveErrorLog( "Huge Error occur:" ) ; SaveErrorLog( ( const CHAR* )m_LongQuery.m_SqlStr ) ; // Sql } return FALSE ; } VOID ODBCInterface::SaveErrorLog( const CHAR* pLog ) { __ENTER_FUNCTION if(strlen(pLog)==0) return; FILE* f = fopen( "./Log/dberror.log", "a" ) ; if( f ) { fwrite( pLog, 1, strlen( pLog ), f ) ; fwrite( LF, 1, 2, f ) ; fclose(f) ; } __LEAVE_FUNCTION } VOID ODBCInterface::Clear() { __ENTER_FUNCTION SQLCloseCursor( hStmt ) ; SQLFreeStmt( hStmt, SQL_UNBIND ) ; __LEAVE_FUNCTION } BOOL ODBCInterface::Fetch() { __ENTER_FUNCTION memset( Col, 0, MAXCOL * MAX_COLUMN_BUFFER ) ; mResult = SQLFetch( hStmt ) ; if ( ( SQL_SUCCESS != mResult ) && ( <API key> != mResult ) && ( SQL_NO_DATA != mResult ) ) { DiagState() ; return FALSE ; } if( SQL_NO_DATA == mResult ) { return FALSE ; } return TRUE ; __LEAVE_FUNCTION return FALSE ; } BOOL ODBCInterface::LongFetch() { __ENTER_FUNCTION mResult = SQLFetch( hStmt ) ; if ( ( SQL_SUCCESS != mResult ) && ( <API key> != mResult ) && ( SQL_NO_DATA != mResult ) ) { DiagState() ; return FALSE ; } if( SQL_NO_DATA == mResult ) { return FALSE ; } for ( INT ColIndex = 0; ColIndex < mColCount; ColIndex++ ) { INT TotalGet = 0 ; SQLLEN LenData = 0 ; // changed from SQLINTEGER by viticm INT Ret = 0 ; while( SQL_NO_DATA != ( Ret = SQLGetData( hStmt, ColIndex+1, SQL_C_CHAR, Col[ ColIndex ] + TotalGet, <API key>, &LenData ) ) ) { break ; } //printf("sql length=%d ret=%d,LenData=%d\r\n",TotalGet,Ret,LenData); } return TRUE ; __LEAVE_FUNCTION return FALSE ; } INT ODBCInterface::GetInt( INT ColIndex, INT& ErrorCode ) { __ENTER_FUNCTION if ( ColIndex > mColCount ) { ErrorCode = QUERYNOCOL ; Assert( FALSE ) ; return QUERYNOCOL ; } if ( SQL_NULL_DATA == mCollocate[ ColIndex - 1 ] ) { ErrorCode = QUERYNULL ; Assert( FALSE ) ; return QUERYNULL ; } else { ErrorCode = QUERYOK ; return atoi( Col[ ColIndex - 1 ] ) ; } __LEAVE_FUNCTION return QUERYNULL; } UINT ODBCInterface::GetUInt( INT ColIndex, INT& ErrorCode ) { __ENTER_FUNCTION if ( ColIndex > mColCount ) { ErrorCode = QUERYNOCOL ; Assert( FALSE ) ; return QUERYNOCOL ; } if ( SQL_NULL_DATA == mCollocate[ ColIndex - 1 ] ) { ErrorCode = QUERYNULL ; Assert( FALSE ) ; return QUERYNULL ; } else { ErrorCode = QUERYOK ; return ( UINT )atoi( Col[ ColIndex - 1 ] ) ; } __LEAVE_FUNCTION return QUERYNULL ; } FLOAT ODBCInterface::GetFloat( INT ColIndex, INT& ErrorCode ) { __ENTER_FUNCTION if ( ColIndex > mColCount ) { ErrorCode = QUERYNOCOL ; Assert( FALSE ) ; return QUERYNOCOL ; } if ( SQL_NULL_DATA == mCollocate[ ColIndex - 1 ] ) { ErrorCode = QUERYNULL ; Assert( FALSE ) ; return QUERYNULL ; } else { ErrorCode = QUERYOK ; return ( FLOAT )atof( Col[ ColIndex - 1 ] ) ; } __LEAVE_FUNCTION return QUERYNULL; } VOID ODBCInterface::GetString( INT ColIndex, CHAR * buf, INT BufLen, INT& ErrorCode ) { __ENTER_FUNCTION if ( ColIndex > mColCount ) { ErrorCode = QUERYNOCOL ; buf[ 0 ] = '\0' ; Assert( FALSE ) ; return ; } if ( SQL_NULL_DATA == mCollocate[ ColIndex - 1 ] ) { ErrorCode = QUERYNULL; buf[ 0 ] = '\0'; Assert( FALSE ) ; } else { if( MAX_COLUMN_BUFFER > BufLen) strncpy( buf, Col[ ColIndex - 1 ], BufLen ) ; else { strncpy( buf, Col[ ColIndex - 1 ], MAX_COLUMN_BUFFER ) ; } ErrorCode = QUERYOK ; } __LEAVE_FUNCTION } VOID ODBCInterface::GetField( INT ColIndex, CHAR * buf, INT BufLen, INT& ErrorCode ) { __ENTER_FUNCTION if ( ColIndex > mColCount ) { ErrorCode = QUERYNOCOL ; buf[ 0 ] = '\0' ; Assert( FALSE ) ; return ; } if ( SQL_NULL_DATA == mCollocate[ ColIndex - 1 ] ) { ErrorCode = QUERYNULL ; buf[ 0 ] = '\0' ; Assert( FALSE ) ; } else { if( MAX_COLUMN_BUFFER> BufLen ) { UINT OutLength = 0 ; DBStr2Binary( Col[ ColIndex - 1 ], MAX_COLUMN_BUFFER, buf, BufLen, OutLength ) ; Assert( ( INT )OutLength <= BufLen ) ; } else { memcpy( buf, Col[ ColIndex - 1 ], MAX_COLUMN_BUFFER ) ; Assert( FALSE ) ; } ErrorCode = QUERYOK ; } __LEAVE_FUNCTION } VOID ODBCInterface::GetLongField( INT ColIndex, CHAR * buf, INT BufLen, INT& ErrorCode ) { __ENTER_FUNCTION if ( ColIndex > mColCount ) { ErrorCode = QUERYNOCOL ; buf[ 0 ] = '\0' ; Assert( FALSE ) ; return ; } if ( SQL_NULL_DATA == mCollocate[ ColIndex - 1 ] ) { ErrorCode = QUERYNULL ; buf[ 0 ] = '\0' ; Assert( FALSE ) ; } else { if( MAX_COLUMN_BUFFER > BufLen ) { UINT OutLength = 0 ; DBStr2Binary( Col[ ColIndex - 1 ], <API key>, buf, BufLen, OutLength ) ; Assert( ( INT )OutLength <= BufLen ) ; } else { memcpy( buf,Col[ ColIndex - 1 ], <API key> ) ; Assert( FALSE ) ; } ErrorCode = QUERYOK ; } __LEAVE_FUNCTION } VOID ODBCInterface::DiagState() { __ENTER_FUNCTION INT ii = 1 ; SQLINTEGER NativeError ; SQLCHAR SqlState[ 6 ] ; SQLSMALLINT MsgLen ; memset( mErrorMsg, 0, <API key> ) ; while ( SQL_NO_DATA != ( mResult = SQLGetDiagRec( SQL_HANDLE_DBC, hDbc, ii, SqlState, &NativeError, mErrorMsg, sizeof( mErrorMsg ), &MsgLen ) ) ) { ii++ ; } mErrorMsg[ <API key> - 1 ] = '\0' ; if( strlen( ( const CHAR* )mErrorMsg ) == 0 ) { mResult = SQLError( hEnv, hDbc, hStmt, SqlState, &NativeError, mErrorMsg, sizeof( mErrorMsg ), &MsgLen ) ; } mErrorCode = NativeError ; switch( mErrorCode ) { case 2601: break ; case 1062: break ; default: { Close() ; } } SaveLog( ( const CHAR* )m_Query.m_SqlStr ) ;// Sql CHAR ErrorBuffer[ 512 ] = { 0 } ; sprintf( ErrorBuffer,"ErrorCode = %d, ErrorMessage = %s", mErrorCode, mErrorMsg ) ; SaveLog( ErrorBuffer ) ; __LEAVE_FUNCTION } VOID ODBCInterface::DiagStateEx() { __ENTER_FUNCTION INT ii = 1 ; SQLINTEGER NativeError ; SQLCHAR SqlState[ 6 ] ; SQLSMALLINT MsgLen ; memset( mErrorMsg, 0, <API key> ) ; while ( SQL_NO_DATA != ( mResult = SQLGetDiagRec( SQL_HANDLE_DBC, hDbc, ii, SqlState, &NativeError, mErrorMsg, sizeof( mErrorMsg ), &MsgLen ) ) ) { ii++ ; } mErrorMsg[ <API key> - 1 ] = '\0'; if( 0 == strlen( (const CHAR* )mErrorMsg) ) { mResult = SQLError( hEnv, hDbc, hStmt, SqlState, &NativeError, mErrorMsg, sizeof( mErrorMsg ), &MsgLen ) ; } mErrorCode = NativeError ; switch( mErrorCode ) { case 2601: break ; case 3621: break ; case 1026: break ; default: { Close() ; } } CHAR ErrorBuffer[ 512 ] = { 0 } ; sprintf( ErrorBuffer, "ErrorCode = %d, ErrorMessage = %s, ErrorSql", mErrorCode, mErrorMsg ) ; SaveErrorLog( ErrorBuffer ) ; SaveErrorLog( ( const CHAR* )m_LongQuery.m_SqlStr ) ; // Sql __LEAVE_FUNCTION } VOID ODBCInterface::SaveLog( const CHAR* pLog ) { __ENTER_FUNCTION if( 0 == strlen( pLog ) ) return ; FILE* f = fopen( "./Log/database.log", "a" ) ; fwrite( pLog, 1, strlen(pLog), f ) ; fwrite( LF, 1, 2, f ) ; fclose( f ) ; __LEAVE_FUNCTION } VOID ODBCInterface::CleanEnv() { if ( hStmt ) { SQLCloseCursor( hStmt ) ; SQLFreeStmt( hStmt, SQL_UNBIND ) ; SQLFreeHandle( SQL_HANDLE_STMT, hStmt ) ; hStmt = NULL ; } if ( hDbc ) { SQLDisconnect( hDbc ) ; SQLFreeHandle( SQL_HANDLE_DBC,hDbc ) ; hDbc = NULL ; } if ( hEnv ) { SQLFreeHandle( SQL_HANDLE_ENV, hEnv ) ; hEnv = NULL ; } }
# -*- coding: utf-8 -*- """Python 2/3 compatibility module.""" import sys PY2 = int(sys.version[0]) == 2 if PY2: text_type = unicode # noqa binary_type = str string_types = (str, unicode) # noqa unicode = unicode # noqa basestring = basestring # noqa else: text_type = str binary_type = bytes string_types = (str,) unicode = str basestring = (str, bytes)
#ifndef DragData_h #define DragData_h #include "core/CoreExport.h" #include "core/page/DragActions.h" #include "platform/geometry/IntPoint.h" #include "platform/heap/Handle.h" #include "wtf/Forward.h" #include "wtf/HashMap.h" #include "wtf/Vector.h" namespace blink { class DataObject; class DocumentFragment; class LocalFrame; class Range; enum <API key> { DragApplicationNone = 0, <API key> = 1, <API key> = 2, <API key> = 4, <API key> = 8 }; class CORE_EXPORT DragData { STACK_ALLOCATED(); public: enum <API key> { <API key>, ConvertFilenames }; // clientPosition is taken to be the position of the drag event within the target window, with (0,0) at the top left DragData(DataObject*, const IntPoint& clientPosition, const IntPoint& globalPosition, DragOperation, <API key> = DragApplicationNone); const IntPoint& clientPosition() const { return m_clientPosition; } const IntPoint& globalPosition() const { return m_globalPosition; } <API key> flags() const { return m_applicationFlags; } DataObject* platformData() const { return m_platformDragData; } DragOperation <API key>() const { return <API key>; } bool containsURL(<API key> filenamePolicy = ConvertFilenames) const; bool containsPlainText() const; bool <API key>() const; String asURL(<API key> filenamePolicy = ConvertFilenames, String* title = nullptr) const; String asPlainText() const; void asFilePaths(Vector<String>&) const; unsigned numberOfFiles() const; DocumentFragment* asFragment(LocalFrame*) const; bool canSmartReplace() const; bool containsFiles() const; int modifiers() const; String droppedFileSystemId() const; private: IntPoint m_clientPosition; IntPoint m_globalPosition; Member<DataObject> m_platformDragData; DragOperation <API key>; <API key> m_applicationFlags; }; } // namespace blink #endif // !DragData_h
/* * @description Expression is always true via if (true_expression) * * */ #include "std_testcase.h" #ifndef OMITBAD void <API key>() { int intRand1 = 30; int intRand2 = 31; /* FLAW: This expression is always true */ if (intRand1 == (intRand2 - 1)) { printLine("Always prints"); } } #endif /* OMITBAD */ #ifndef OMITGOOD static void good1() { /* INCIDENTAL: CWE 338 - Use of Cryptographically Weak PRNG */ int intRand1 = rand(); int intRand2 = rand(); /* FIX: Possibly evaluate to false */ if(intRand1 != intRand2) { printLine("Sometimes prints"); } } void <API key>() { good1(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on * its own for testing or for building a binary to use in testing binary * analysis tools. It is not used when compiling all the testcases as one * application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); <API key>(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); <API key>(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
// ASM: a very small and fast Java bytecode manipulation framework // modification, are permitted provided that the following conditions // are met: // documentation and/or other materials provided with the distribution. // contributors may be used to endorse or promote products derived from // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. package nginx.clojure.asm.tree; import java.util.List; import java.util.Map; import nginx.clojure.asm.Label; import nginx.clojure.asm.MethodVisitor; import nginx.clojure.asm.Opcodes; /** * A node that represents a TABLESWITCH instruction. * * @author Eric Bruneton */ public class TableSwitchInsnNode extends AbstractInsnNode { /** The minimum key value. */ public int min; /** The maximum key value. */ public int max; /** Beginning of the default handler block. */ public LabelNode dflt; /** Beginnings of the handler blocks. This list is a list of {@link LabelNode} objects. */ public List<LabelNode> labels; /** * Constructs a new {@link TableSwitchInsnNode}. * * @param min the minimum key value. * @param max the maximum key value. * @param dflt beginning of the default handler block. * @param labels beginnings of the handler blocks. {@code labels[i]} is the beginning of the * handler block for the {@code min + i} key. */ public TableSwitchInsnNode( final int min, final int max, final LabelNode dflt, final LabelNode... labels) { super(Opcodes.TABLESWITCH); this.min = min; this.max = max; this.dflt = dflt; this.labels = Util.asArrayList(labels); } @Override public int getType() { return TABLESWITCH_INSN; } @Override public void accept(final MethodVisitor methodVisitor) { Label[] labelsArray = new Label[this.labels.size()]; for (int i = 0, n = labelsArray.length; i < n; ++i) { labelsArray[i] = this.labels.get(i).getLabel(); } methodVisitor.<API key>(min, max, dflt.getLabel(), labelsArray); acceptAnnotations(methodVisitor); } @Override public AbstractInsnNode clone(final Map<LabelNode, LabelNode> clonedLabels) { return new TableSwitchInsnNode(min, max, clone(dflt, clonedLabels), clone(labels, clonedLabels)) .cloneAnnotations(this); } }
// modification, are permitted provided that the following conditions are // met: // the distribution. // * Neither the name of John Haddon nor the names of // any other contributors to this software may be used to endorse or // promote products derived from this software without specific prior // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "Gaffer/StringAlgo.h" #include "Gaffer/StringPlug.h" #include "GafferScene/AttributeProcessor.h" using namespace IECore; using namespace Gaffer; using namespace GafferScene; <API key>( AttributeProcessor ); size_t AttributeProcessor::g_firstPlugIndex = 0; AttributeProcessor::AttributeProcessor( const std::string &name ) : <API key>( name ) { <API key>( g_firstPlugIndex ); addChild( new StringPlug( "names" ) ); addChild( new BoolPlug( "invertNames" ) ); // Fast pass-throughs for things we don't modify outPlug()->objectPlug()->setInput( inPlug()->objectPlug() ); outPlug()->transformPlug()->setInput( inPlug()->transformPlug() ); outPlug()->boundPlug()->setInput( inPlug()->boundPlug() ); } AttributeProcessor::~AttributeProcessor() { } Gaffer::StringPlug *AttributeProcessor::namesPlug() { return getChild<StringPlug>( g_firstPlugIndex ); } const Gaffer::StringPlug *AttributeProcessor::namesPlug() const { return getChild<StringPlug>( g_firstPlugIndex ); } Gaffer::BoolPlug *AttributeProcessor::invertNamesPlug() { return getChild<BoolPlug>( g_firstPlugIndex + 1 ); } const Gaffer::BoolPlug *AttributeProcessor::invertNamesPlug() const { return getChild<BoolPlug>( g_firstPlugIndex + 1 ); } void AttributeProcessor::affects( const Gaffer::Plug *input, <API key> &outputs ) const { <API key>::affects( input, outputs ); if( input == namesPlug() || input == invertNamesPlug() ) { outputs.push_back( outPlug()->attributesPlug() ); } } bool AttributeProcessor::processesAttributes() const { bool invert = invertNamesPlug()->getValue(); if( invert ) { // we don't know if we're modifying the attributes till we find out what // names they have. return true; } else { // if there are no names, then we know we're not modifying the attributes. std::string names = namesPlug()->getValue(); return names.size(); } } void AttributeProcessor::<API key>( const ScenePath &path, const Gaffer::Context *context, IECore::MurmurHash &h ) const { namesPlug()->hash( h ); invertNamesPlug()->hash( h ); } IECore::<API key> AttributeProcessor::<API key>( const ScenePath &path, const Gaffer::Context *context, IECore::<API key> inputAttributes ) const { if( inputAttributes->members().empty() ) { return inputAttributes; } const std::string names = namesPlug()->getValue(); const bool invert = invertNamesPlug()->getValue(); CompoundObjectPtr result = new CompoundObject; for( CompoundObject::ObjectMap::const_iterator it = inputAttributes->members().begin(), eIt = inputAttributes->members().end(); it != eIt; ++it ) { ConstObjectPtr attribute = it->second; if( matchMultiple( it->first, names ) != invert ) { attribute = processAttribute( path, context, it->first, attribute.get() ); } if( attribute ) { result->members().insert( CompoundObject::ObjectMap::value_type( it->first, // cast is ok - result is const immediately on // returning from this function, and attribute will // therefore not be modified. boost::const_pointer_cast<Object>( attribute ) ) ); } } return result; }
/** * @file <API key>.h * Provides checks if mission is feasible given the navigation capabilities * * @author Lorenz Meier <lm@inf.ethz.ch> * @author Thomas Gubler <thomasgubler@student.ethz.ch> * @author Sander Smeets <sander@droneslab.com> * @author Nuno Marques <nuno.marques@dronesolutions.io> */ #pragma once #include <dataman/dataman.h> #include <uORB/topics/mission.h> class Geofence; class Navigator; class <API key> { private: Navigator *_navigator{nullptr}; /* Checks for all airframes */ bool checkGeofence(const mission_s &mission, float home_alt, bool home_valid); bool <API key>(const mission_s &mission, float home_alt, bool home_alt_valid, bool throw_error); bool <API key>(const mission_s &mission); bool <API key>(const mission_s &mission, float max_distance); bool <API key>(const mission_s &mission, float max_distance); /* Checks specific to fixedwing airframes */ bool checkFixedwing(const mission_s &mission, float home_alt, bool land_start_req); bool checkTakeoff(const mission_s &mission, float home_alt); bool <API key>(const mission_s &mission, bool land_start_req); /* Checks specific to rotarywing airframes */ bool checkRotarywing(const mission_s &mission, float home_alt); public: <API key>(Navigator *navigator) : _navigator(navigator) {} ~<API key>() = default; <API key>(const <API key> &) = delete; <API key> &operator=(const <API key> &) = delete; /* * Returns true if mission is feasible and false otherwise */ bool <API key>(const mission_s &mission, float <API key>, float <API key>, bool land_start_req); };
#ifndef <API key> #define <API key> #include <string> #include "base/basictypes.h" #include "base/files/file_path.h" #include "content/public/common/appcache_info.h" namespace net { class URLRequest; } namespace content { // Defines constants, types, and abstract classes used in the main // process and in child processes. enum AppCacheEventID { <API key>, <API key>, <API key>, <API key>, <API key>, <API key>, <API key>, <API key>, <API key> = <API key> }; // Temporarily renumber them in wierd way, to help remove LOG_TIP from WebKit enum AppCacheLogLevel { APPCACHE_LOG_DEBUG = 4, APPCACHE_LOG_INFO = 1, <API key> = 2, APPCACHE_LOG_ERROR = 3, }; enum <API key> { <API key>, <API key>, <API key> }; enum AppCacheErrorReason { <API key>, <API key>, <API key>, <API key>, <API key>, <API key>, <API key>, <API key>, <API key> = <API key> }; // Type to hold information about a single appcache resource. struct CONTENT_EXPORT <API key> { <API key>(); ~<API key>(); GURL url; int64 size; bool is_master; bool is_manifest; bool is_intercept; bool is_fallback; bool is_foreign; bool is_explicit; int64 response_id; }; struct CONTENT_EXPORT <API key> { <API key>(); <API key>(std::string message, AppCacheErrorReason reason, GURL url, int status, bool is_cross_origin); ~<API key>(); std::string message; AppCacheErrorReason reason; GURL url; int status; bool is_cross_origin; }; typedef std::vector<<API key>> <API key>; struct CONTENT_EXPORT AppCacheNamespace { AppCacheNamespace(); // Type is <API key> by default. AppCacheNamespace(<API key> type, const GURL& url, const GURL& target, bool is_pattern); AppCacheNamespace(<API key> type, const GURL& url, const GURL& target, bool is_pattern, bool is_executable); ~AppCacheNamespace(); bool IsMatch(const GURL& url) const; <API key> type; GURL namespace_url; GURL target_url; bool is_pattern; bool is_executable; }; typedef std::vector<AppCacheNamespace> <API key>; // Interface used by backend (browser-process) to talk to frontend (renderer). class CONTENT_EXPORT AppCacheFrontend { public: virtual void OnCacheSelected( int host_id, const AppCacheInfo& info) = 0; virtual void OnStatusChanged(const std::vector<int>& host_ids, AppCacheStatus status) = 0; virtual void OnEventRaised(const std::vector<int>& host_ids, AppCacheEventID event_id) = 0; virtual void <API key>(const std::vector<int>& host_ids, const GURL& url, int num_total, int num_complete) = 0; virtual void OnErrorEventRaised( const std::vector<int>& host_ids, const <API key>& details) = 0; virtual void OnContentBlocked(int host_id, const GURL& manifest_url) = 0; virtual void OnLogMessage(int host_id, AppCacheLogLevel log_level, const std::string& message) = 0; virtual ~AppCacheFrontend() {} }; // Interface used by frontend (renderer) to talk to backend (browser-process). class CONTENT_EXPORT AppCacheBackend { public: virtual void RegisterHost(int host_id) = 0; virtual void UnregisterHost(int host_id) = 0; virtual void SetSpawningHostId(int host_id, int spawning_host_id) = 0; virtual void SelectCache(int host_id, const GURL& document_url, const int64 <API key>, const GURL& manifest_url) = 0; virtual void <API key>( int host_id, int parent_process_id, int parent_host_id) = 0; virtual void <API key>( int host_id, int64 appcache_id) = 0; virtual void MarkAsForeignEntry(int host_id, const GURL& document_url, int64 <API key>) = 0; virtual AppCacheStatus GetStatus(int host_id) = 0; virtual bool StartUpdate(int host_id) = 0; virtual bool SwapCache(int host_id) = 0; virtual void GetResourceList( int host_id, std::vector<<API key>>* resource_infos) = 0; protected: virtual ~AppCacheBackend() {} }; // Useful string constants. CONTENT_EXPORT extern const char kHttpGETMethod[]; CONTENT_EXPORT extern const char kHttpHEADMethod[]; // base::CommandLine flag to turn this experimental feature on. CONTENT_EXPORT extern const char <API key>[]; CONTENT_EXPORT bool <API key>(const GURL& url); CONTENT_EXPORT bool <API key>( const std::string& method); CONTENT_EXPORT bool <API key>( const net::URLRequest* request); CONTENT_EXPORT extern const base::FilePath::CharType <API key>[]; } // namespace #endif // <API key>
#include "seccomon.h" #include "cert.h" #include "keyhi.h" #include "ssl.h" #include "sslimpl.h" #include "sslproto.h" #include "nspr.h" #include "private/pprio.h" #ifndef NO_PKCS11_BYPASS #include "blapi.h" #endif #include "nss.h" #define SET_ERROR_CODE /* reminder */ static const sslSocketOps ssl_default_ops = { /* No SSL. */ ssl_DefConnect, NULL, ssl_DefBind, ssl_DefListen, ssl_DefShutdown, ssl_DefClose, ssl_DefRecv, ssl_DefSend, ssl_DefRead, ssl_DefWrite, ssl_DefGetpeername, ssl_DefGetsockname }; static const sslSocketOps ssl_secure_ops = { /* SSL. */ ssl_SecureConnect, NULL, ssl_DefBind, ssl_DefListen, ssl_SecureShutdown, ssl_SecureClose, ssl_SecureRecv, ssl_SecureSend, ssl_SecureRead, ssl_SecureWrite, ssl_DefGetpeername, ssl_DefGetsockname }; /* ** default settings for socket enables */ static sslOptions ssl_defaults = { { siBuffer, NULL, 0 }, /* nextProtoNego */ PR_TRUE, /* useSecurity */ PR_FALSE, /* useSocks */ PR_FALSE, /* requestCertificate */ 2, /* requireCertificate */ PR_FALSE, /* handshakeAsClient */ PR_FALSE, /* handshakeAsServer */ PR_FALSE, /* enableSSL2 */ /* now defaults to off in NSS 3.13 */ PR_FALSE, /* unusedBit9 */ PR_FALSE, /* unusedBit10 */ PR_FALSE, /* noCache */ PR_FALSE, /* fdx */ PR_FALSE, /* v2CompatibleHello */ /* now defaults to off in NSS 3.13 */ PR_TRUE, /* detectRollBack */ PR_FALSE, /* noStepDown */ PR_FALSE, /* bypassPKCS11 */ PR_FALSE, /* noLocks */ PR_FALSE, /* <API key> */ PR_FALSE, /* enableDeflate */ 2, /* enableRenegotiation (default: requires extension) */ PR_FALSE, /* <API key> */ PR_FALSE, /* enableFalseStart */ PR_TRUE, /* cbcRandomIV */ PR_FALSE, /* enableOCSPStapling */ PR_TRUE, /* enableNPN */ PR_FALSE /* enableALPN */ }; /* * default range of enabled SSL/TLS protocols */ static SSLVersionRange <API key> = { <API key>, <API key> }; static SSLVersionRange <API key> = { <API key>, <API key> }; #define VERSIONS_DEFAULTS(variant) \ (variant == ssl_variant_stream ? &<API key> : \ &<API key>) <API key> ssl_sid_lookup; <API key> ssl_sid_cache; <API key> ssl_sid_uncache; static PRBool ssl_inited = PR_FALSE; static PRDescIdentity ssl_layer_id; PRBool locksEverDisabled; /* implicitly PR_FALSE */ PRBool ssl_force_locks; /* implicitly PR_FALSE */ int ssl_lock_readers = 1; /* default true. */ char ssl_debug; char ssl_trace; FILE * ssl_trace_iob; FILE * ssl_keylog_iob; char lockStatus[] = "Locks are ENABLED. "; #define LOCKSTATUS_OFFSET 10 /* offset of ENABLED */ /* <API key> and <API key> are not implemented. */ static const PRUint16 srtpCiphers[] = { <API key>, <API key>, 0 }; /* forward declarations. */ static sslSocket *ssl_NewSocket(PRBool makeLocks, SSLProtocolVariant variant); static SECStatus ssl_MakeLocks(sslSocket *ss); static void <API key>(void); static PRStatus ssl_PushIOLayer(sslSocket *ns, PRFileDesc *stack, PRDescIdentity id); /* ** Lookup a socket structure from a file descriptor. ** Only functions called through the PRIOMethods table should use this. ** Other app-callable functions should use ssl_FindSocket. */ static sslSocket * ssl_GetPrivate(PRFileDesc *fd) { sslSocket *ss; PORT_Assert(fd != NULL); PORT_Assert(fd->methods->file_type == PR_DESC_LAYERED); PORT_Assert(fd->identity == ssl_layer_id); if (fd->methods->file_type != PR_DESC_LAYERED || fd->identity != ssl_layer_id) { PORT_SetError(<API key>); return NULL; } ss = (sslSocket *)fd->secret; /* Set ss->fd lazily. We can't rely on the value of ss->fd set by * ssl_PushIOLayer because another PR_PushIOLayer call will switch the * contents of the PRFileDesc pointed by ss->fd and the new layer. * See bug 807250. */ ss->fd = fd; return ss; } /* This function tries to find the SSL layer in the stack. * It searches for the first SSL layer at or below the argument fd, * and failing that, it searches for the nearest SSL layer above the * argument fd. It returns the private sslSocket from the found layer. */ sslSocket * ssl_FindSocket(PRFileDesc *fd) { PRFileDesc *layer; sslSocket *ss; PORT_Assert(fd != NULL); PORT_Assert(ssl_layer_id != 0); layer = <API key>(fd, ssl_layer_id); if (layer == NULL) { PORT_SetError(<API key>); return NULL; } ss = (sslSocket *)layer->secret; /* Set ss->fd lazily. We can't rely on the value of ss->fd set by * ssl_PushIOLayer because another PR_PushIOLayer call will switch the * contents of the PRFileDesc pointed by ss->fd and the new layer. * See bug 807250. */ ss->fd = layer; return ss; } static sslSocket * ssl_DupSocket(sslSocket *os) { sslSocket *ss; SECStatus rv; ss = ssl_NewSocket((PRBool)(!os->opt.noLocks), os->protocolVariant); if (ss) { ss->opt = os->opt; ss->opt.useSocks = PR_FALSE; ss->vrange = os->vrange; ss->peerID = !os->peerID ? NULL : PORT_Strdup(os->peerID); ss->url = !os->url ? NULL : PORT_Strdup(os->url); ss->ops = os->ops; ss->rTimeout = os->rTimeout; ss->wTimeout = os->wTimeout; ss->cTimeout = os->cTimeout; ss->dbHandle = os->dbHandle; /* copy ssl2&3 policy & prefs, even if it's not selected (yet) */ ss->allowedByPolicy = os->allowedByPolicy; ss-><API key>= os-><API key>; ss->chosenPreference = os->chosenPreference; PORT_Memcpy(ss->cipherSuites, os->cipherSuites, sizeof os->cipherSuites); PORT_Memcpy(ss->ssl3.dtlsSRTPCiphers, os->ssl3.dtlsSRTPCiphers, sizeof(PRUint16) * os->ssl3.dtlsSRTPCipherCount); ss->ssl3.dtlsSRTPCipherCount = os->ssl3.dtlsSRTPCipherCount; if (os->cipherSpecs) { ss->cipherSpecs = (unsigned char*)PORT_Alloc(os->sizeCipherSpecs); if (ss->cipherSpecs) PORT_Memcpy(ss->cipherSpecs, os->cipherSpecs, os->sizeCipherSpecs); ss->sizeCipherSpecs = os->sizeCipherSpecs; ss->preferredCipher = os->preferredCipher; } else { ss->cipherSpecs = NULL; /* produced lazily */ ss->sizeCipherSpecs = 0; ss->preferredCipher = NULL; } if (ss->opt.useSecurity) { /* This int should be SSLKEAType, but CC on Irix complains, * during the for loop. */ int i; sslServerCerts * oc = os->serverCerts; sslServerCerts * sc = ss->serverCerts; for (i=kt_null; i < kt_kea_size; i++, oc++, sc++) { if (oc->serverCert && oc->serverCertChain) { sc->serverCert = CERT_DupCertificate(oc->serverCert); sc->serverCertChain = CERT_DupCertList(oc->serverCertChain); if (!sc->serverCertChain) goto loser; } else { sc->serverCert = NULL; sc->serverCertChain = NULL; } sc->serverKeyPair = oc->serverKeyPair ? ssl3_GetKeyPairRef(oc->serverKeyPair) : NULL; if (oc->serverKeyPair && !sc->serverKeyPair) goto loser; sc->serverKeyBits = oc->serverKeyBits; ss->certStatusArray[i] = !os->certStatusArray[i] ? NULL : SECITEM_DupArray(NULL, os->certStatusArray[i]); } ss->stepDownKeyPair = !os->stepDownKeyPair ? NULL : ssl3_GetKeyPairRef(os->stepDownKeyPair); ss-><API key> = !os-><API key> ? NULL : ssl3_GetKeyPairRef(os-><API key>); /* * XXX the preceding CERT_ and SECKEY_ functions can fail and return NULL. * XXX We should detect this, and not just march on with NULL pointers. */ ss->authCertificate = os->authCertificate; ss->authCertificateArg = os->authCertificateArg; ss->getClientAuthData = os->getClientAuthData; ss-><API key> = os-><API key>; ss->sniSocketConfig = os->sniSocketConfig; ss->sniSocketConfigArg = os->sniSocketConfigArg; ss->handleBadCert = os->handleBadCert; ss->badCertArg = os->badCertArg; ss->handshakeCallback = os->handshakeCallback; ss-><API key> = os-><API key>; ss-><API key> = os-><API key>; ss-><API key> = os-><API key>; ss->pkcs11PinArg = os->pkcs11PinArg; /* Create security data */ rv = <API key>(ss, os); if (rv != SECSuccess) { goto loser; } } } return ss; loser: ssl_FreeSocket(ss); return NULL; } static void ssl_DestroyLocks(sslSocket *ss) { /* Destroy locks. */ if (ss->firstHandshakeLock) { PZ_DestroyMonitor(ss->firstHandshakeLock); ss->firstHandshakeLock = NULL; } if (ss->ssl3HandshakeLock) { PZ_DestroyMonitor(ss->ssl3HandshakeLock); ss->ssl3HandshakeLock = NULL; } if (ss->specLock) { NSSRWLock_Destroy(ss->specLock); ss->specLock = NULL; } if (ss->recvLock) { PZ_DestroyLock(ss->recvLock); ss->recvLock = NULL; } if (ss->sendLock) { PZ_DestroyLock(ss->sendLock); ss->sendLock = NULL; } if (ss->xmitBufLock) { PZ_DestroyMonitor(ss->xmitBufLock); ss->xmitBufLock = NULL; } if (ss->recvBufLock) { PZ_DestroyMonitor(ss->recvBufLock); ss->recvBufLock = NULL; } } /* Caller holds any relevant locks */ static void <API key>(sslSocket *ss) { /* "i" should be of type SSLKEAType, but CC on IRIX complains during * the for loop. */ int i; /* Free up socket */ <API key>(&ss->sec); <API key>(ss); PORT_Free(ss->saveBuf.buf); PORT_Free(ss->pendingBuf.buf); ssl_DestroyGather(&ss->gs); if (ss->peerID != NULL) PORT_Free(ss->peerID); if (ss->url != NULL) PORT_Free((void *)ss->url); /* CONST */ if (ss->cipherSpecs) { PORT_Free(ss->cipherSpecs); ss->cipherSpecs = NULL; ss->sizeCipherSpecs = 0; } /* Clean up server configuration */ for (i=kt_null; i < kt_kea_size; i++) { sslServerCerts * sc = ss->serverCerts + i; if (sc->serverCert != NULL) <API key>(sc->serverCert); if (sc->serverCertChain != NULL) <API key>(sc->serverCertChain); if (sc->serverKeyPair != NULL) ssl3_FreeKeyPair(sc->serverKeyPair); if (ss->certStatusArray[i] != NULL) { SECITEM_FreeArray(ss->certStatusArray[i], PR_TRUE); ss->certStatusArray[i] = NULL; } } if (ss->stepDownKeyPair) { ssl3_FreeKeyPair(ss->stepDownKeyPair); ss->stepDownKeyPair = NULL; } if (ss-><API key>) { ssl3_FreeKeyPair(ss-><API key>); ss-><API key> = NULL; } SECITEM_FreeItem(&ss->opt.nextProtoNego, PR_FALSE); PORT_Assert(!ss->xtnData.sniNameArr); if (ss->xtnData.sniNameArr) { PORT_Free(ss->xtnData.sniNameArr); ss->xtnData.sniNameArr = NULL; } } /* * free an sslSocket struct, and all the stuff that hangs off of it */ void ssl_FreeSocket(sslSocket *ss) { /* Get every lock you can imagine! ** Caller already holds these: ** SSL_LOCK_READER(ss); ** SSL_LOCK_WRITER(ss); */ <API key>(ss); ssl_GetRecvBufLock(ss); <API key>(ss); ssl_GetXmitBufLock(ss); <API key>(ss); <API key>(ss); /* Release all the locks acquired above. */ SSL_UNLOCK_READER(ss); SSL_UNLOCK_WRITER(ss); <API key>(ss); <API key>(ss); <API key>(ss); <API key>(ss); <API key>(ss); ssl_DestroyLocks(ss); #ifdef DEBUG PORT_Memset(ss, 0x1f, sizeof *ss); #endif PORT_Free(ss); return; } SECStatus <API key>(sslSocket *ss, PRBool enabled) { PRFileDesc * osfd = ss->fd->lower; SECStatus rv = SECFailure; PRSocketOptionData opt; opt.option = PR_SockOpt_NoDelay; opt.value.no_delay = (PRBool)!enabled; if (osfd->methods->setsocketoption) { rv = (SECStatus) osfd->methods->setsocketoption(osfd, &opt); } else { PR_SetError(<API key>, 0); } return rv; } static void ssl_ChooseOps(sslSocket *ss) { ss->ops = ss->opt.useSecurity ? &ssl_secure_ops : &ssl_default_ops; } /* Called from SSL_Enable (immediately below) */ static SECStatus PrepareSocket(sslSocket *ss) { SECStatus rv = SECSuccess; ssl_ChooseOps(ss); return rv; } SECStatus SSL_Enable(PRFileDesc *fd, int which, PRBool on) { return SSL_OptionSet(fd, which, on); } #ifndef NO_PKCS11_BYPASS static const PRCallOnceType pristineCallOnce; static PRCallOnceType setupBypassOnce; static SECStatus SSL_BypassShutdown(void* appData, void* nssData) { /* unload freeBL shared library from memory */ BL_Unload(); setupBypassOnce = pristineCallOnce; return SECSuccess; } static PRStatus <API key>(void) { SECStatus rv = <API key>(SSL_BypassShutdown, NULL); PORT_Assert(SECSuccess == rv); return SECSuccess == rv ? PR_SUCCESS : PR_FAILURE; } #endif static PRStatus SSL_BypassSetup(void) { #ifdef NO_PKCS11_BYPASS /* Guarantee binary compatibility */ return PR_SUCCESS; #else return PR_CallOnce(&setupBypassOnce, &<API key>); #endif } /* Implements the semantics for SSL_OptionSet(SSL_ENABLE_TLS, on) described in * ssl.h in the section "SSL version range setting API". */ static void ssl_EnableTLS(SSLVersionRange *vrange, PRBool on) { if (<API key>(vrange)) { if (on) { vrange->min = <API key>; vrange->max = <API key>; } /* else don't change anything */ return; } if (on) { /* Expand the range of enabled version to include TLS 1.0 */ vrange->min = PR_MIN(vrange->min, <API key>); vrange->max = PR_MAX(vrange->max, <API key>); } else { /* Disable all TLS versions, leaving only SSL 3.0 if it was enabled */ if (vrange->min == <API key>) { vrange->max = <API key>; } else { /* Only TLS was enabled, so now no versions are. */ vrange->min = <API key>; vrange->max = <API key>; } } } /* Implements the semantics for SSL_OptionSet(SSL_ENABLE_SSL3, on) described in * ssl.h in the section "SSL version range setting API". */ static void ssl_EnableSSL3(SSLVersionRange *vrange, PRBool on) { if (<API key>(vrange)) { if (on) { vrange->min = <API key>; vrange->max = <API key>; } /* else don't change anything */ return; } if (on) { /* Expand the range of enabled versions to include SSL 3.0. We know * SSL 3.0 or some version of TLS is already enabled at this point, so * we don't need to change vrange->max. */ vrange->min = <API key>; } else { /* Disable SSL 3.0, leaving TLS unaffected. */ if (vrange->max > <API key>) { vrange->min = PR_MAX(vrange->min, <API key>); } else { /* Only SSL 3.0 was enabled, so now no versions are. */ vrange->min = <API key>; vrange->max = <API key>; } } } SECStatus SSL_OptionSet(PRFileDesc *fd, PRInt32 which, PRBool on) { sslSocket *ss = ssl_FindSocket(fd); SECStatus rv = SECSuccess; PRBool holdingLocks; if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in Enable", SSL_GETPID(), fd)); return SECFailure; } holdingLocks = (!ss->opt.noLocks); <API key>(ss); <API key>(ss); switch (which) { case SSL_SOCKS: ss->opt.useSocks = PR_FALSE; rv = PrepareSocket(ss); if (on) { PORT_SetError(<API key>); rv = SECFailure; } break; case SSL_SECURITY: ss->opt.useSecurity = on; rv = PrepareSocket(ss); break; case <API key>: ss->opt.requestCertificate = on; break; case <API key>: ss->opt.requireCertificate = on; break; case <API key>: if ( ss->opt.handshakeAsServer && on ) { PORT_SetError(<API key>); rv = SECFailure; break; } ss->opt.handshakeAsClient = on; break; case <API key>: if ( ss->opt.handshakeAsClient && on ) { PORT_SetError(<API key>); rv = SECFailure; break; } ss->opt.handshakeAsServer = on; break; case SSL_ENABLE_TLS: if (IS_DTLS(ss)) { if (on) { PORT_SetError(<API key>); rv = SECFailure; /* not allowed */ } break; } ssl_EnableTLS(&ss->vrange, on); ss->preferredCipher = NULL; if (ss->cipherSpecs) { PORT_Free(ss->cipherSpecs); ss->cipherSpecs = NULL; ss->sizeCipherSpecs = 0; } break; case SSL_ENABLE_SSL3: if (IS_DTLS(ss)) { if (on) { PORT_SetError(<API key>); rv = SECFailure; /* not allowed */ } break; } ssl_EnableSSL3(&ss->vrange, on); ss->preferredCipher = NULL; if (ss->cipherSpecs) { PORT_Free(ss->cipherSpecs); ss->cipherSpecs = NULL; ss->sizeCipherSpecs = 0; } break; case SSL_ENABLE_SSL2: if (IS_DTLS(ss)) { if (on) { PORT_SetError(<API key>); rv = SECFailure; /* not allowed */ } break; } ss->opt.enableSSL2 = on; if (on) { ss->opt.v2CompatibleHello = on; } ss->preferredCipher = NULL; if (ss->cipherSpecs) { PORT_Free(ss->cipherSpecs); ss->cipherSpecs = NULL; ss->sizeCipherSpecs = 0; } break; case SSL_NO_CACHE: ss->opt.noCache = on; break; case SSL_ENABLE_FDX: if (on && ss->opt.noLocks) { PORT_SetError(<API key>); rv = SECFailure; } ss->opt.fdx = on; break; case <API key>: if (IS_DTLS(ss)) { if (on) { PORT_SetError(<API key>); rv = SECFailure; /* not allowed */ } break; } ss->opt.v2CompatibleHello = on; if (!on) { ss->opt.enableSSL2 = on; } break; case <API key>: ss->opt.detectRollBack = on; break; case SSL_NO_STEP_DOWN: ss->opt.noStepDown = on; if (on) <API key>(fd); break; case SSL_BYPASS_PKCS11: if (ss->handshakeBegun) { PORT_SetError(<API key>); rv = SECFailure; } else { if (PR_FALSE != on) { if (PR_SUCCESS == SSL_BypassSetup() ) { #ifdef NO_PKCS11_BYPASS ss->opt.bypassPKCS11 = PR_FALSE; #else ss->opt.bypassPKCS11 = on; #endif } else { rv = SECFailure; } } else { ss->opt.bypassPKCS11 = PR_FALSE; } } break; case SSL_NO_LOCKS: if (on && ss->opt.fdx) { PORT_SetError(<API key>); rv = SECFailure; } if (on && ssl_force_locks) on = PR_FALSE; /* silent override */ ss->opt.noLocks = on; if (on) { locksEverDisabled = PR_TRUE; strcpy(lockStatus + LOCKSTATUS_OFFSET, "DISABLED."); } else if (!holdingLocks) { rv = ssl_MakeLocks(ss); if (rv != SECSuccess) { ss->opt.noLocks = PR_TRUE; } } break; case <API key>: ss->opt.<API key> = on; break; case SSL_ENABLE_DEFLATE: ss->opt.enableDeflate = on; break; case <API key>: ss->opt.enableRenegotiation = on; break; case <API key>: ss->opt.<API key> = on; break; case <API key>: ss->opt.enableFalseStart = on; break; case SSL_CBC_RANDOM_IV: ss->opt.cbcRandomIV = on; break; case <API key>: ss->opt.enableOCSPStapling = on; break; case SSL_ENABLE_NPN: ss->opt.enableNPN = on; break; case SSL_ENABLE_ALPN: ss->opt.enableALPN = on; break; default: PORT_SetError(<API key>); rv = SECFailure; } /* We can't use the macros for releasing the locks here, * because ss->opt.noLocks might have changed just above. * We must release these locks (monitors) here, if we aquired them above, * regardless of the current value of ss->opt.noLocks. */ if (holdingLocks) { PZ_ExitMonitor((ss)->ssl3HandshakeLock); PZ_ExitMonitor((ss)->firstHandshakeLock); } return rv; } SECStatus SSL_OptionGet(PRFileDesc *fd, PRInt32 which, PRBool *pOn) { sslSocket *ss = ssl_FindSocket(fd); SECStatus rv = SECSuccess; PRBool on = PR_FALSE; if (!pOn) { PORT_SetError(<API key>); return SECFailure; } if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in Enable", SSL_GETPID(), fd)); *pOn = PR_FALSE; return SECFailure; } <API key>(ss); <API key>(ss); switch (which) { case SSL_SOCKS: on = PR_FALSE; break; case SSL_SECURITY: on = ss->opt.useSecurity; break; case <API key>: on = ss->opt.requestCertificate; break; case <API key>: on = ss->opt.requireCertificate; break; case <API key>: on = ss->opt.handshakeAsClient; break; case <API key>: on = ss->opt.handshakeAsServer; break; case SSL_ENABLE_TLS: on = ss->vrange.max >= <API key>; break; case SSL_ENABLE_SSL3: on = ss->vrange.min == <API key>; break; case SSL_ENABLE_SSL2: on = ss->opt.enableSSL2; break; case SSL_NO_CACHE: on = ss->opt.noCache; break; case SSL_ENABLE_FDX: on = ss->opt.fdx; break; case <API key>: on = ss->opt.v2CompatibleHello; break; case <API key>: on = ss->opt.detectRollBack; break; case SSL_NO_STEP_DOWN: on = ss->opt.noStepDown; break; case SSL_BYPASS_PKCS11: on = ss->opt.bypassPKCS11; break; case SSL_NO_LOCKS: on = ss->opt.noLocks; break; case <API key>: on = ss->opt.<API key>; break; case SSL_ENABLE_DEFLATE: on = ss->opt.enableDeflate; break; case <API key>: on = ss->opt.enableRenegotiation; break; case <API key>: on = ss->opt.<API key>; break; case <API key>: on = ss->opt.enableFalseStart; break; case SSL_CBC_RANDOM_IV: on = ss->opt.cbcRandomIV; break; case <API key>: on = ss->opt.enableOCSPStapling; break; case SSL_ENABLE_NPN: on = ss->opt.enableNPN; break; case SSL_ENABLE_ALPN: on = ss->opt.enableALPN; break; default: PORT_SetError(<API key>); rv = SECFailure; } <API key>(ss); <API key>(ss); *pOn = on; return rv; } SECStatus <API key>(PRInt32 which, PRBool *pOn) { SECStatus rv = SECSuccess; PRBool on = PR_FALSE; if (!pOn) { PORT_SetError(<API key>); return SECFailure; } <API key>(); switch (which) { case SSL_SOCKS: on = PR_FALSE; break; case SSL_SECURITY: on = ssl_defaults.useSecurity; break; case <API key>: on = ssl_defaults.requestCertificate; break; case <API key>: on = ssl_defaults.requireCertificate; break; case <API key>: on = ssl_defaults.handshakeAsClient; break; case <API key>: on = ssl_defaults.handshakeAsServer; break; case SSL_ENABLE_TLS: on = <API key>.max >= <API key>; break; case SSL_ENABLE_SSL3: on = <API key>.min == <API key>; break; case SSL_ENABLE_SSL2: on = ssl_defaults.enableSSL2; break; case SSL_NO_CACHE: on = ssl_defaults.noCache; break; case SSL_ENABLE_FDX: on = ssl_defaults.fdx; break; case <API key>: on = ssl_defaults.v2CompatibleHello; break; case <API key>: on = ssl_defaults.detectRollBack; break; case SSL_NO_STEP_DOWN: on = ssl_defaults.noStepDown; break; case SSL_BYPASS_PKCS11: on = ssl_defaults.bypassPKCS11; break; case SSL_NO_LOCKS: on = ssl_defaults.noLocks; break; case <API key>: on = ssl_defaults.<API key>; break; case SSL_ENABLE_DEFLATE: on = ssl_defaults.enableDeflate; break; case <API key>: on = ssl_defaults.enableRenegotiation; break; case <API key>: on = ssl_defaults.<API key>; break; case <API key>: on = ssl_defaults.enableFalseStart; break; case SSL_CBC_RANDOM_IV: on = ssl_defaults.cbcRandomIV; break; case <API key>: on = ssl_defaults.enableOCSPStapling; break; case SSL_ENABLE_NPN: on = ssl_defaults.enableNPN; break; case SSL_ENABLE_ALPN: on = ssl_defaults.enableALPN; break; default: PORT_SetError(<API key>); rv = SECFailure; } *pOn = on; return rv; } /* XXX Use Global Lock to protect this stuff. */ SECStatus SSL_EnableDefault(int which, PRBool on) { return <API key>(which, on); } SECStatus <API key>(PRInt32 which, PRBool on) { SECStatus status = ssl_Init(); if (status != SECSuccess) { return status; } <API key>(); switch (which) { case SSL_SOCKS: ssl_defaults.useSocks = PR_FALSE; if (on) { PORT_SetError(<API key>); return SECFailure; } break; case SSL_SECURITY: ssl_defaults.useSecurity = on; break; case <API key>: ssl_defaults.requestCertificate = on; break; case <API key>: ssl_defaults.requireCertificate = on; break; case <API key>: if ( ssl_defaults.handshakeAsServer && on ) { PORT_SetError(<API key>); return SECFailure; } ssl_defaults.handshakeAsClient = on; break; case <API key>: if ( ssl_defaults.handshakeAsClient && on ) { PORT_SetError(<API key>); return SECFailure; } ssl_defaults.handshakeAsServer = on; break; case SSL_ENABLE_TLS: ssl_EnableTLS(&<API key>, on); break; case SSL_ENABLE_SSL3: ssl_EnableSSL3(&<API key>, on); break; case SSL_ENABLE_SSL2: ssl_defaults.enableSSL2 = on; if (on) { ssl_defaults.v2CompatibleHello = on; } break; case SSL_NO_CACHE: ssl_defaults.noCache = on; break; case SSL_ENABLE_FDX: if (on && ssl_defaults.noLocks) { PORT_SetError(<API key>); return SECFailure; } ssl_defaults.fdx = on; break; case <API key>: ssl_defaults.v2CompatibleHello = on; if (!on) { ssl_defaults.enableSSL2 = on; } break; case <API key>: ssl_defaults.detectRollBack = on; break; case SSL_NO_STEP_DOWN: ssl_defaults.noStepDown = on; if (on) <API key>(); break; case SSL_BYPASS_PKCS11: if (PR_FALSE != on) { if (PR_SUCCESS == SSL_BypassSetup()) { #ifdef NO_PKCS11_BYPASS ssl_defaults.bypassPKCS11 = PR_FALSE; #else ssl_defaults.bypassPKCS11 = on; #endif } else { return SECFailure; } } else { ssl_defaults.bypassPKCS11 = PR_FALSE; } break; case SSL_NO_LOCKS: if (on && ssl_defaults.fdx) { PORT_SetError(<API key>); return SECFailure; } if (on && ssl_force_locks) on = PR_FALSE; /* silent override */ ssl_defaults.noLocks = on; if (on) { locksEverDisabled = PR_TRUE; strcpy(lockStatus + LOCKSTATUS_OFFSET, "DISABLED."); } break; case <API key>: ssl_defaults.<API key> = on; break; case SSL_ENABLE_DEFLATE: ssl_defaults.enableDeflate = on; break; case <API key>: ssl_defaults.enableRenegotiation = on; break; case <API key>: ssl_defaults.<API key> = on; break; case <API key>: ssl_defaults.enableFalseStart = on; break; case SSL_CBC_RANDOM_IV: ssl_defaults.cbcRandomIV = on; break; case <API key>: ssl_defaults.enableOCSPStapling = on; break; case SSL_ENABLE_NPN: ssl_defaults.enableNPN = on; break; case SSL_ENABLE_ALPN: ssl_defaults.enableALPN = on; break; default: PORT_SetError(<API key>); return SECFailure; } return SECSuccess; } /* function tells us if the cipher suite is one that we no longer support. */ static PRBool <API key>(PRInt32 suite) { switch (suite) { case <API key>: case <API key>: case <API key>: return PR_TRUE; default: return PR_FALSE; } } /* Part of the public NSS API. * Since this is a global (not per-socket) setting, we cannot use the * HandshakeLock to protect this. Probably want a global lock. */ SECStatus SSL_SetPolicy(long which, int policy) { if ((which & 0xfffe) == <API key>) { /* one of the two old FIPS ciphers */ if (which == <API key>) which = <API key>; else if (which == <API key>) which = <API key>; } if (<API key>(which)) return SECSuccess; return SSL_CipherPolicySet(which, policy); } SECStatus SSL_CipherPolicySet(PRInt32 which, PRInt32 policy) { SECStatus rv = ssl_Init(); if (rv != SECSuccess) { return rv; } if (<API key>(which)) { rv = SECSuccess; } else if (SSL_IS_SSL2_CIPHER(which)) { rv = ssl2_SetPolicy(which, policy); } else { rv = ssl3_SetPolicy((ssl3CipherSuite)which, policy); } return rv; } SECStatus SSL_CipherPolicyGet(PRInt32 which, PRInt32 *oPolicy) { SECStatus rv; if (!oPolicy) { PORT_SetError(<API key>); return SECFailure; } if (<API key>(which)) { *oPolicy = SSL_NOT_ALLOWED; rv = SECSuccess; } else if (SSL_IS_SSL2_CIPHER(which)) { rv = ssl2_GetPolicy(which, oPolicy); } else { rv = ssl3_GetPolicy((ssl3CipherSuite)which, oPolicy); } return rv; } /* Part of the public NSS API. * Since this is a global (not per-socket) setting, we cannot use the * HandshakeLock to protect this. Probably want a global lock. * These changes have no effect on any sslSockets already created. */ SECStatus SSL_EnableCipher(long which, PRBool enabled) { if ((which & 0xfffe) == <API key>) { /* one of the two old FIPS ciphers */ if (which == <API key>) which = <API key>; else if (which == <API key>) which = <API key>; } if (<API key>(which)) return SECSuccess; return <API key>(which, enabled); } SECStatus <API key>(PRInt32 which, PRBool enabled) { SECStatus rv = ssl_Init(); if (rv != SECSuccess) { return rv; } if (<API key>(which)) return SECSuccess; if (enabled && ssl_defaults.noStepDown && <API key>(which)) { PORT_SetError(<API key>); return SECFailure; } if (SSL_IS_SSL2_CIPHER(which)) { rv = <API key>(which, enabled); } else { rv = <API key>((ssl3CipherSuite)which, enabled); } return rv; } SECStatus <API key>(PRInt32 which, PRBool *enabled) { SECStatus rv; if (!enabled) { PORT_SetError(<API key>); return SECFailure; } if (<API key>(which)) { *enabled = PR_FALSE; rv = SECSuccess; } else if (SSL_IS_SSL2_CIPHER(which)) { rv = <API key>(which, enabled); } else { rv = <API key>((ssl3CipherSuite)which, enabled); } return rv; } SECStatus SSL_CipherPrefSet(PRFileDesc *fd, PRInt32 which, PRBool enabled) { SECStatus rv; sslSocket *ss = ssl_FindSocket(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in CipherPrefSet", SSL_GETPID(), fd)); return SECFailure; } if (<API key>(which)) return SECSuccess; if (enabled && ss->opt.noStepDown && <API key>(which)) { PORT_SetError(<API key>); return SECFailure; } if (SSL_IS_SSL2_CIPHER(which)) { rv = ssl2_CipherPrefSet(ss, which, enabled); } else { rv = ssl3_CipherPrefSet(ss, (ssl3CipherSuite)which, enabled); } return rv; } SECStatus SSL_CipherPrefGet(PRFileDesc *fd, PRInt32 which, PRBool *enabled) { SECStatus rv; sslSocket *ss = ssl_FindSocket(fd); if (!enabled) { PORT_SetError(<API key>); return SECFailure; } if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in CipherPrefGet", SSL_GETPID(), fd)); *enabled = PR_FALSE; return SECFailure; } if (<API key>(which)) { *enabled = PR_FALSE; rv = SECSuccess; } else if (SSL_IS_SSL2_CIPHER(which)) { rv = ssl2_CipherPrefGet(ss, which, enabled); } else { rv = ssl3_CipherPrefGet(ss, (ssl3CipherSuite)which, enabled); } return rv; } SECStatus <API key>(void) { SECStatus status = SECSuccess; const PRUint16 *cipher; for (cipher = <API key>; *cipher != 0; ++cipher) { status = SSL_SetPolicy(*cipher, SSL_ALLOWED); if (status != SECSuccess) break; } return status; } SECStatus NSS_SetExportPolicy(void) { return <API key>(); } SECStatus NSS_SetFrancePolicy(void) { return <API key>(); } /* LOCKS ??? XXX */ static PRFileDesc * ssl_ImportFD(PRFileDesc *model, PRFileDesc *fd, SSLProtocolVariant variant) { sslSocket * ns = NULL; PRStatus rv; PRNetAddr addr; SECStatus status = ssl_Init(); if (status != SECSuccess) { return NULL; } if (model == NULL) { /* Just create a default socket if we're given NULL for the model */ ns = ssl_NewSocket((PRBool)(!ssl_defaults.noLocks), variant); } else { sslSocket * ss = ssl_FindSocket(model); if (ss == NULL || ss->protocolVariant != variant) { SSL_DBG(("%d: SSL[%d]: bad model socket in ssl_ImportFD", SSL_GETPID(), model)); return NULL; } ns = ssl_DupSocket(ss); } if (ns == NULL) return NULL; rv = ssl_PushIOLayer(ns, fd, PR_TOP_IO_LAYER); if (rv != PR_SUCCESS) { ssl_FreeSocket(ns); SET_ERROR_CODE return NULL; } #if defined(DEBUG) || defined(FORCE_PR_ASSERT) { sslSocket * ss = ssl_FindSocket(fd); PORT_Assert(ss == ns); } #endif ns->TCPconnected = (PR_SUCCESS == ssl_DefGetpeername(ns, &addr)); return fd; } PRFileDesc * SSL_ImportFD(PRFileDesc *model, PRFileDesc *fd) { return ssl_ImportFD(model, fd, ssl_variant_stream); } PRFileDesc * DTLS_ImportFD(PRFileDesc *model, PRFileDesc *fd) { return ssl_ImportFD(model, fd, <API key>); } SECStatus <API key>(PRFileDesc *fd, <API key> callback, void *arg) { sslSocket *ss = ssl_FindSocket(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in <API key>", SSL_GETPID(), fd)); return SECFailure; } <API key>(ss); ss->nextProtoCallback = callback; ss->nextProtoArg = arg; <API key>(ss); return SECSuccess; } /* <API key> is set as an NPN callback for the case when * <API key> is used. */ static SECStatus <API key>(void *arg, PRFileDesc *fd, const unsigned char *protos, unsigned int protos_len, unsigned char *protoOut, unsigned int *protoOutLen, unsigned int protoMaxLen) { unsigned int i, j; const unsigned char *result; sslSocket *ss = ssl_FindSocket(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in <API key>", SSL_GETPID(), fd)); return SECFailure; } if (protos_len == 0) { /* The server supports the extension, but doesn't have any protocols * configured. In this case we request our favoured protocol. */ goto pick_first; } /* For each protocol in server preference, see if we support it. */ for (i = 0; i < protos_len; ) { for (j = 0; j < ss->opt.nextProtoNego.len; ) { if (protos[i] == ss->opt.nextProtoNego.data[j] && PORT_Memcmp(&protos[i+1], &ss->opt.nextProtoNego.data[j+1], protos[i]) == 0) { /* We found a match. */ ss->ssl3.nextProtoState = <API key>; result = &protos[i]; goto found; } j += 1 + (unsigned int)ss->opt.nextProtoNego.data[j]; } i += 1 + (unsigned int)protos[i]; } pick_first: ss->ssl3.nextProtoState = <API key>; result = ss->opt.nextProtoNego.data; found: if (protoMaxLen < result[0]) { PORT_SetError(<API key>); return SECFailure; } memcpy(protoOut, result + 1, result[0]); *protoOutLen = result[0]; return SECSuccess; } SECStatus <API key>(PRFileDesc *fd, const unsigned char *data, unsigned int length) { sslSocket *ss; SECStatus rv; SECItem dataItem = { siBuffer, (unsigned char *) data, length }; ss = ssl_FindSocket(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in <API key>", SSL_GETPID(), fd)); return SECFailure; } if (<API key>(data, length) != SECSuccess) return SECFailure; <API key>(ss); SECITEM_FreeItem(&ss->opt.nextProtoNego, PR_FALSE); rv = SECITEM_CopyItem(NULL, &ss->opt.nextProtoNego, &dataItem); <API key>(ss); if (rv != SECSuccess) return rv; return <API key>(fd, <API key>, NULL); } SECStatus SSL_GetNextProto(PRFileDesc *fd, SSLNextProtoState *state, unsigned char *buf, unsigned int *bufLen, unsigned int bufLenMax) { sslSocket *ss = ssl_FindSocket(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in SSL_GetNextProto", SSL_GETPID(), fd)); return SECFailure; } if (!state || !buf || !bufLen) { PORT_SetError(<API key>); return SECFailure; } *state = ss->ssl3.nextProtoState; if (ss->ssl3.nextProtoState != <API key> && ss->ssl3.nextProto.data) { if (ss->ssl3.nextProto.len > bufLenMax) { PORT_SetError(<API key>); return SECFailure; } PORT_Memcpy(buf, ss->ssl3.nextProto.data, ss->ssl3.nextProto.len); *bufLen = ss->ssl3.nextProto.len; } else { *bufLen = 0; } return SECSuccess; } SECStatus SSL_SetSRTPCiphers(PRFileDesc *fd, const PRUint16 *ciphers, unsigned int numCiphers) { sslSocket *ss; unsigned int i; ss = ssl_FindSocket(fd); if (!ss || !IS_DTLS(ss)) { SSL_DBG(("%d: SSL[%d]: bad socket in SSL_SetSRTPCiphers", SSL_GETPID(), fd)); PORT_SetError(<API key>); return SECFailure; } if (numCiphers > <API key>) { PORT_SetError(<API key>); return SECFailure; } ss->ssl3.dtlsSRTPCipherCount = 0; for (i = 0; i < numCiphers; i++) { const PRUint16 *srtpCipher = srtpCiphers; while (*srtpCipher) { if (ciphers[i] == *srtpCipher) break; srtpCipher++; } if (*srtpCipher) { ss->ssl3.dtlsSRTPCiphers[ss->ssl3.dtlsSRTPCipherCount++] = ciphers[i]; } else { SSL_DBG(("%d: SSL[%d]: invalid or unimplemented SRTP cipher " "suite specified: 0x%04hx", SSL_GETPID(), fd, ciphers[i])); } } if (ss->ssl3.dtlsSRTPCipherCount == 0) { PORT_SetError(<API key>); return SECFailure; } return SECSuccess; } SECStatus SSL_GetSRTPCipher(PRFileDesc *fd, PRUint16 *cipher) { sslSocket * ss; ss = ssl_FindSocket(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in SSL_GetSRTPCipher", SSL_GETPID(), fd)); PORT_SetError(<API key>); return SECFailure; } if (!ss->ssl3.dtlsSRTPCipherSuite) { PORT_SetError(<API key>); return SECFailure; } *cipher = ss->ssl3.dtlsSRTPCipherSuite; return SECSuccess; } PRFileDesc * SSL_ReconfigFD(PRFileDesc *model, PRFileDesc *fd) { sslSocket * sm = NULL, *ss = NULL; int i; sslServerCerts * mc = NULL; sslServerCerts * sc = NULL; if (model == NULL) { PR_SetError(<API key>, 0); return NULL; } sm = ssl_FindSocket(model); if (sm == NULL) { SSL_DBG(("%d: SSL[%d]: bad model socket in ssl_ReconfigFD", SSL_GETPID(), model)); return NULL; } ss = ssl_FindSocket(fd); PORT_Assert(ss); if (ss == NULL) { PORT_SetError(<API key>); return NULL; } ss->opt = sm->opt; ss->vrange = sm->vrange; PORT_Memcpy(ss->cipherSuites, sm->cipherSuites, sizeof sm->cipherSuites); PORT_Memcpy(ss->ssl3.dtlsSRTPCiphers, sm->ssl3.dtlsSRTPCiphers, sizeof(PRUint16) * sm->ssl3.dtlsSRTPCipherCount); ss->ssl3.dtlsSRTPCipherCount = sm->ssl3.dtlsSRTPCipherCount; if (!ss->opt.useSecurity) { PORT_SetError(<API key>); return NULL; } /* This int should be SSLKEAType, but CC on Irix complains, * during the for loop. */ for (i=kt_null; i < kt_kea_size; i++) { mc = &(sm->serverCerts[i]); sc = &(ss->serverCerts[i]); if (mc->serverCert && mc->serverCertChain) { if (sc->serverCert) { <API key>(sc->serverCert); } sc->serverCert = CERT_DupCertificate(mc->serverCert); if (sc->serverCertChain) { <API key>(sc->serverCertChain); } sc->serverCertChain = CERT_DupCertList(mc->serverCertChain); if (!sc->serverCertChain) goto loser; if (sm->certStatusArray[i]) { if (ss->certStatusArray[i]) { SECITEM_FreeArray(ss->certStatusArray[i], PR_TRUE); ss->certStatusArray[i] = NULL; } ss->certStatusArray[i] = SECITEM_DupArray(NULL, sm->certStatusArray[i]); if (!ss->certStatusArray[i]) goto loser; } } if (mc->serverKeyPair) { if (sc->serverKeyPair) { ssl3_FreeKeyPair(sc->serverKeyPair); } sc->serverKeyPair = ssl3_GetKeyPairRef(mc->serverKeyPair); sc->serverKeyBits = mc->serverKeyBits; } } if (sm->stepDownKeyPair) { if (ss->stepDownKeyPair) { ssl3_FreeKeyPair(ss->stepDownKeyPair); } ss->stepDownKeyPair = ssl3_GetKeyPairRef(sm->stepDownKeyPair); } if (sm-><API key>) { if (ss-><API key>) { ssl3_FreeKeyPair(ss-><API key>); } ss-><API key> = ssl3_GetKeyPairRef(sm-><API key>); } /* copy trust anchor names */ if (sm->ssl3.ca_list) { if (ss->ssl3.ca_list) { CERT_FreeDistNames(ss->ssl3.ca_list); } ss->ssl3.ca_list = CERT_DupDistNames(sm->ssl3.ca_list); if (!ss->ssl3.ca_list) { goto loser; } } if (sm->authCertificate) ss->authCertificate = sm->authCertificate; if (sm->authCertificateArg) ss->authCertificateArg = sm->authCertificateArg; if (sm->getClientAuthData) ss->getClientAuthData = sm->getClientAuthData; if (sm-><API key>) ss-><API key> = sm-><API key>; if (sm->sniSocketConfig) ss->sniSocketConfig = sm->sniSocketConfig; if (sm->sniSocketConfigArg) ss->sniSocketConfigArg = sm->sniSocketConfigArg; if (sm->handleBadCert) ss->handleBadCert = sm->handleBadCert; if (sm->badCertArg) ss->badCertArg = sm->badCertArg; if (sm->handshakeCallback) ss->handshakeCallback = sm->handshakeCallback; if (sm-><API key>) ss-><API key> = sm-><API key>; if (sm->pkcs11PinArg) ss->pkcs11PinArg = sm->pkcs11PinArg; return fd; loser: return NULL; } PRBool <API key>(SSLProtocolVariant protocolVariant, SSL3ProtocolVersion version) { switch (protocolVariant) { case ssl_variant_stream: return (version >= <API key> && version <= <API key>); case <API key>: return (version >= <API key> && version <= <API key>); default: /* Can't get here */ PORT_Assert(PR_FALSE); return PR_FALSE; } } /* Returns PR_TRUE if the given version range is valid and ** fully supported; otherwise, returns PR_FALSE. */ static PRBool <API key>(SSLProtocolVariant protocolVariant, const SSLVersionRange *vrange) { return vrange && vrange->min <= vrange->max && <API key>(protocolVariant, vrange->min) && <API key>(protocolVariant, vrange->max); } SECStatus <API key>(SSLProtocolVariant protocolVariant, SSLVersionRange *vrange) { if (!vrange) { PORT_SetError(<API key>); return SECFailure; } switch (protocolVariant) { case ssl_variant_stream: vrange->min = <API key>; vrange->max = <API key>; break; case <API key>: vrange->min = <API key>; vrange->max = <API key>; break; default: PORT_SetError(<API key>); return SECFailure; } return SECSuccess; } SECStatus <API key>(SSLProtocolVariant protocolVariant, SSLVersionRange *vrange) { if ((protocolVariant != ssl_variant_stream && protocolVariant != <API key>) || !vrange) { PORT_SetError(<API key>); return SECFailure; } *vrange = *VERSIONS_DEFAULTS(protocolVariant); return SECSuccess; } SECStatus <API key>(SSLProtocolVariant protocolVariant, const SSLVersionRange *vrange) { if (!<API key>(protocolVariant, vrange)) { PORT_SetError(<API key>); return SECFailure; } *VERSIONS_DEFAULTS(protocolVariant) = *vrange; return SECSuccess; } SECStatus SSL_VersionRangeGet(PRFileDesc *fd, SSLVersionRange *vrange) { sslSocket *ss = ssl_FindSocket(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in <API key>", SSL_GETPID(), fd)); return SECFailure; } if (!vrange) { PORT_SetError(<API key>); return SECFailure; } <API key>(ss); <API key>(ss); *vrange = ss->vrange; <API key>(ss); <API key>(ss); return SECSuccess; } SECStatus SSL_VersionRangeSet(PRFileDesc *fd, const SSLVersionRange *vrange) { sslSocket *ss = ssl_FindSocket(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in <API key>", SSL_GETPID(), fd)); return SECFailure; } if (!<API key>(ss->protocolVariant, vrange)) { PORT_SetError(<API key>); return SECFailure; } <API key>(ss); <API key>(ss); ss->vrange = *vrange; <API key>(ss); <API key>(ss); return SECSuccess; } const SECItemArray * <API key>(PRFileDesc *fd) { sslSocket *ss = ssl_FindSocket(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in <API key>", SSL_GETPID(), fd)); return NULL; } if (!ss->sec.ci.sid) { PORT_SetError(<API key>); return NULL; } return &ss->sec.ci.sid->peerCertStatus; } /* The following functions are the TOP LEVEL SSL functions. ** They all get called through the NSPRIOMethods table below. */ static PRFileDesc * PR_CALLBACK ssl_Accept(PRFileDesc *fd, PRNetAddr *sockaddr, PRIntervalTime timeout) { sslSocket *ss; sslSocket *ns = NULL; PRFileDesc *newfd = NULL; PRFileDesc *osfd; PRStatus status; ss = ssl_GetPrivate(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in accept", SSL_GETPID(), fd)); return NULL; } /* IF this is a listen socket, there shouldn't be any I/O going on */ SSL_LOCK_READER(ss); SSL_LOCK_WRITER(ss); <API key>(ss); <API key>(ss); ss->cTimeout = timeout; osfd = ss->fd->lower; /* First accept connection */ newfd = osfd->methods->accept(osfd, sockaddr, timeout); if (newfd == NULL) { SSL_DBG(("%d: SSL[%d]: accept failed, errno=%d", SSL_GETPID(), ss->fd, PORT_GetError())); } else { /* Create ssl module */ ns = ssl_DupSocket(ss); } <API key>(ss); <API key>(ss); SSL_UNLOCK_WRITER(ss); SSL_UNLOCK_READER(ss); /* ss isn't used below here. */ if (ns == NULL) goto loser; /* push ssl module onto the new socket */ status = ssl_PushIOLayer(ns, newfd, PR_TOP_IO_LAYER); if (status != PR_SUCCESS) goto loser; /* Now start server connection handshake with client. ** Don't need locks here because nobody else has a reference to ns yet. */ if ( ns->opt.useSecurity ) { if ( ns->opt.handshakeAsClient ) { ns->handshake = <API key>; ss->handshaking = <API key>; } else { ns->handshake = <API key>; ss->handshaking = <API key>; } } ns->TCPconnected = 1; return newfd; loser: if (ns != NULL) ssl_FreeSocket(ns); if (newfd != NULL) PR_Close(newfd); return NULL; } static PRStatus PR_CALLBACK ssl_Connect(PRFileDesc *fd, const PRNetAddr *sockaddr, PRIntervalTime timeout) { sslSocket *ss; PRStatus rv; ss = ssl_GetPrivate(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in connect", SSL_GETPID(), fd)); return PR_FAILURE; } /* IF this is a listen socket, there shouldn't be any I/O going on */ SSL_LOCK_READER(ss); SSL_LOCK_WRITER(ss); ss->cTimeout = timeout; rv = (PRStatus)(*ss->ops->connect)(ss, sockaddr); SSL_UNLOCK_WRITER(ss); SSL_UNLOCK_READER(ss); return rv; } static PRStatus PR_CALLBACK ssl_Bind(PRFileDesc *fd, const PRNetAddr *addr) { sslSocket * ss = ssl_GetPrivate(fd); PRStatus rv; if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in bind", SSL_GETPID(), fd)); return PR_FAILURE; } SSL_LOCK_READER(ss); SSL_LOCK_WRITER(ss); rv = (PRStatus)(*ss->ops->bind)(ss, addr); SSL_UNLOCK_WRITER(ss); SSL_UNLOCK_READER(ss); return rv; } static PRStatus PR_CALLBACK ssl_Listen(PRFileDesc *fd, PRIntn backlog) { sslSocket * ss = ssl_GetPrivate(fd); PRStatus rv; if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in listen", SSL_GETPID(), fd)); return PR_FAILURE; } SSL_LOCK_READER(ss); SSL_LOCK_WRITER(ss); rv = (PRStatus)(*ss->ops->listen)(ss, backlog); SSL_UNLOCK_WRITER(ss); SSL_UNLOCK_READER(ss); return rv; } static PRStatus PR_CALLBACK ssl_Shutdown(PRFileDesc *fd, PRIntn how) { sslSocket * ss = ssl_GetPrivate(fd); PRStatus rv; if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in shutdown", SSL_GETPID(), fd)); return PR_FAILURE; } if (how == PR_SHUTDOWN_RCV || how == PR_SHUTDOWN_BOTH) { SSL_LOCK_READER(ss); } if (how == PR_SHUTDOWN_SEND || how == PR_SHUTDOWN_BOTH) { SSL_LOCK_WRITER(ss); } rv = (PRStatus)(*ss->ops->shutdown)(ss, how); if (how == PR_SHUTDOWN_SEND || how == PR_SHUTDOWN_BOTH) { SSL_UNLOCK_WRITER(ss); } if (how == PR_SHUTDOWN_RCV || how == PR_SHUTDOWN_BOTH) { SSL_UNLOCK_READER(ss); } return rv; } static PRStatus PR_CALLBACK ssl_Close(PRFileDesc *fd) { sslSocket *ss; PRStatus rv; ss = ssl_GetPrivate(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in close", SSL_GETPID(), fd)); return PR_FAILURE; } /* There must not be any I/O going on */ SSL_LOCK_READER(ss); SSL_LOCK_WRITER(ss); /* By the time this function returns, ** ss is an invalid pointer, and the locks to which it points have ** been unlocked and freed. So, this is the ONE PLACE in all of SSL ** where the LOCK calls and the corresponding UNLOCK calls are not in ** the same function scope. The unlock calls are in ssl_FreeSocket(). */ rv = (PRStatus)(*ss->ops->close)(ss); return rv; } static int PR_CALLBACK ssl_Recv(PRFileDesc *fd, void *buf, PRInt32 len, PRIntn flags, PRIntervalTime timeout) { sslSocket *ss; int rv; ss = ssl_GetPrivate(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in recv", SSL_GETPID(), fd)); return SECFailure; } SSL_LOCK_READER(ss); ss->rTimeout = timeout; if (!ss->opt.fdx) ss->wTimeout = timeout; rv = (*ss->ops->recv)(ss, (unsigned char*)buf, len, flags); SSL_UNLOCK_READER(ss); return rv; } static int PR_CALLBACK ssl_Send(PRFileDesc *fd, const void *buf, PRInt32 len, PRIntn flags, PRIntervalTime timeout) { sslSocket *ss; int rv; ss = ssl_GetPrivate(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in send", SSL_GETPID(), fd)); return SECFailure; } SSL_LOCK_WRITER(ss); ss->wTimeout = timeout; if (!ss->opt.fdx) ss->rTimeout = timeout; rv = (*ss->ops->send)(ss, (const unsigned char*)buf, len, flags); SSL_UNLOCK_WRITER(ss); return rv; } static int PR_CALLBACK ssl_Read(PRFileDesc *fd, void *buf, PRInt32 len) { sslSocket *ss; int rv; ss = ssl_GetPrivate(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in read", SSL_GETPID(), fd)); return SECFailure; } SSL_LOCK_READER(ss); ss->rTimeout = <API key>; if (!ss->opt.fdx) ss->wTimeout = <API key>; rv = (*ss->ops->read)(ss, (unsigned char*)buf, len); SSL_UNLOCK_READER(ss); return rv; } static int PR_CALLBACK ssl_Write(PRFileDesc *fd, const void *buf, PRInt32 len) { sslSocket *ss; int rv; ss = ssl_GetPrivate(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in write", SSL_GETPID(), fd)); return SECFailure; } SSL_LOCK_WRITER(ss); ss->wTimeout = <API key>; if (!ss->opt.fdx) ss->rTimeout = <API key>; rv = (*ss->ops->write)(ss, (const unsigned char*)buf, len); SSL_UNLOCK_WRITER(ss); return rv; } static PRStatus PR_CALLBACK ssl_GetPeerName(PRFileDesc *fd, PRNetAddr *addr) { sslSocket *ss; ss = ssl_GetPrivate(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in getpeername", SSL_GETPID(), fd)); return PR_FAILURE; } return (PRStatus)(*ss->ops->getpeername)(ss, addr); } SECStatus ssl_GetPeerInfo(sslSocket *ss) { PRFileDesc * osfd; int rv; PRNetAddr sin; osfd = ss->fd->lower; PORT_Memset(&sin, 0, sizeof(sin)); rv = osfd->methods->getpeername(osfd, &sin); if (rv < 0) { return SECFailure; } ss->TCPconnected = 1; if (sin.inet.family == PR_AF_INET) { <API key>(sin.inet.ip, &ss->sec.ci.peer); ss->sec.ci.port = sin.inet.port; } else if (sin.ipv6.family == PR_AF_INET6) { ss->sec.ci.peer = sin.ipv6.ip; ss->sec.ci.port = sin.ipv6.port; } else { PORT_SetError(<API key>); return SECFailure; } return SECSuccess; } static PRStatus PR_CALLBACK ssl_GetSockName(PRFileDesc *fd, PRNetAddr *name) { sslSocket *ss; ss = ssl_GetPrivate(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in getsockname", SSL_GETPID(), fd)); return PR_FAILURE; } return (PRStatus)(*ss->ops->getsockname)(ss, name); } SECStatus <API key>(PRFileDesc *fd, const SECItemArray *responses, SSLKEAType kea) { sslSocket *ss; ss = ssl_FindSocket(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in <API key>", SSL_GETPID(), fd)); return SECFailure; } if ( kea <= 0 || kea >= kt_kea_size) { SSL_DBG(("%d: SSL[%d]: invalid key in <API key>", SSL_GETPID(), fd)); return SECFailure; } if (ss->certStatusArray[kea]) { SECITEM_FreeArray(ss->certStatusArray[kea], PR_TRUE); ss->certStatusArray[kea] = NULL; } if (responses) { ss->certStatusArray[kea] = SECITEM_DupArray(NULL, responses); } return (ss->certStatusArray[kea] || !responses) ? SECSuccess : SECFailure; } SECStatus SSL_SetSockPeerID(PRFileDesc *fd, const char *peerID) { sslSocket *ss; ss = ssl_FindSocket(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in SSL_SetSockPeerID", SSL_GETPID(), fd)); return SECFailure; } if (ss->peerID) { PORT_Free(ss->peerID); ss->peerID = NULL; } if (peerID) ss->peerID = PORT_Strdup(peerID); return (ss->peerID || !peerID) ? SECSuccess : SECFailure; } #define PR_POLL_RW (PR_POLL_WRITE | PR_POLL_READ) static PRInt16 PR_CALLBACK ssl_Poll(PRFileDesc *fd, PRInt16 how_flags, PRInt16 *p_out_flags) { sslSocket *ss; PRInt16 new_flags = how_flags; /* should select on these flags. */ PRNetAddr addr; *p_out_flags = 0; ss = ssl_GetPrivate(fd); if (!ss) { SSL_DBG(("%d: SSL[%d]: bad socket in SSL_Poll", SSL_GETPID(), fd)); return 0; /* don't poll on this socket */ } if (ss->opt.useSecurity && ss->handshaking != <API key> && !ss->firstHsDone && (how_flags & PR_POLL_RW)) { if (!ss->TCPconnected) { ss->TCPconnected = (PR_SUCCESS == ssl_DefGetpeername(ss, &addr)); } /* If it's not connected, then presumably the application is polling ** on read or write appropriately, so don't change it. */ if (ss->TCPconnected) { if (!ss->handshakeBegun) { /* If the handshake has not begun, poll on read or write ** based on the local application's role in the handshake, ** not based on what the application requested. */ new_flags &= ~PR_POLL_RW; if (ss->handshaking == <API key>) { new_flags |= PR_POLL_WRITE; } else { /* handshaking as server */ new_flags |= PR_POLL_READ; } } else /* First handshake is in progress */ if (ss->lastWriteBlocked) { if (new_flags & PR_POLL_READ) { /* The caller is waiting for data to be received, ** but the initial handshake is blocked on write, or the ** client's first handshake record has not been written. ** The code should select on write, not read. */ new_flags ^= PR_POLL_READ; /* don't select on read. */ new_flags |= PR_POLL_WRITE; /* do select on write. */ } } else if (new_flags & PR_POLL_WRITE) { /* The caller is trying to write, but the handshake is ** blocked waiting for data to read, and the first ** handshake has been sent. So do NOT to poll on write ** unless we did false start. */ if (!(ss->version >= <API key> && ss->ssl3.hs.canFalseStart)) { new_flags ^= PR_POLL_WRITE; /* don't select on write. */ } new_flags |= PR_POLL_READ; /* do select on read. */ } } } else if ((new_flags & PR_POLL_READ) && (SSL_DataPending(fd) > 0)) { *p_out_flags = PR_POLL_READ; /* it's ready already. */ return new_flags; } else if ((ss->lastWriteBlocked) && (how_flags & PR_POLL_READ) && (ss->pendingBuf.len != 0)) { /* write data waiting to be sent */ new_flags |= PR_POLL_WRITE; /* also select on write. */ } if (ss->version >= <API key> && ss->ssl3.hs.restartTarget != NULL) { /* Read and write will block until the asynchronous callback completes * (e.g. until SSL_<API key> is called), so don't tell * the caller to poll the socket unless there is pending write data. */ if (ss->lastWriteBlocked && ss->pendingBuf.len != 0) { /* Ignore any newly-received data on the socket, but do wait for * the socket to become writable again. Here, it is OK for an error * to be detected, because our logic for sending pending write data * will allow us to report the error to the caller without the risk * of the application spinning. */ new_flags &= (PR_POLL_WRITE | PR_POLL_EXCEPT); } else { /* Unfortunately, clearing new_flags will make it impossible for * the application to detect errors that it would otherwise be * able to detect with PR_POLL_EXCEPT, until the asynchronous * callback completes. However, we must clear all the flags to * prevent the application from spinning (alternating between * calling PR_Poll that would return PR_POLL_EXCEPT, and send/recv * which won't actually report the I/O error while we are waiting * for the asynchronous callback to complete). */ new_flags = 0; } } if (new_flags && (fd->lower->methods->poll != NULL)) { PRInt16 lower_out_flags = 0; PRInt16 lower_new_flags; lower_new_flags = fd->lower->methods->poll(fd->lower, new_flags, &lower_out_flags); if ((lower_new_flags & lower_out_flags) && (how_flags != new_flags)) { PRInt16 out_flags = lower_out_flags & ~PR_POLL_RW; if (lower_out_flags & PR_POLL_READ) out_flags |= PR_POLL_WRITE; if (lower_out_flags & PR_POLL_WRITE) out_flags |= PR_POLL_READ; *p_out_flags = out_flags; new_flags = how_flags; } else { *p_out_flags = lower_out_flags; new_flags = lower_new_flags; } } return new_flags; } static PRInt32 PR_CALLBACK ssl_TransmitFile(PRFileDesc *sd, PRFileDesc *fd, const void *headers, PRInt32 hlen, PRTransmitFileFlags flags, PRIntervalTime timeout) { PRSendFileData sfd; sfd.fd = fd; sfd.file_offset = 0; sfd.file_nbytes = 0; sfd.header = headers; sfd.hlen = hlen; sfd.trailer = NULL; sfd.tlen = 0; return sd->methods->sendfile(sd, &sfd, flags, timeout); } PRBool ssl_FdIsBlocking(PRFileDesc *fd) { PRSocketOptionData opt; PRStatus status; opt.option = <API key>; opt.value.non_blocking = PR_FALSE; status = PR_GetSocketOption(fd, &opt); if (status != PR_SUCCESS) return PR_FALSE; return (PRBool)!opt.value.non_blocking; } PRBool <API key>(sslSocket *ss) { return ssl_FdIsBlocking(ss->fd); } PRInt32 sslFirstBufSize = 8 * 1024; PRInt32 sslCopyLimit = 1024; static PRInt32 PR_CALLBACK ssl_WriteV(PRFileDesc *fd, const PRIOVec *iov, PRInt32 vectors, PRIntervalTime timeout) { PRInt32 i; PRInt32 bufLen; PRInt32 left; PRInt32 rv; PRInt32 sent = 0; const PRInt32 first_len = sslFirstBufSize; const PRInt32 limit = sslCopyLimit; PRBool blocking; PRIOVec myIov = { 0, 0 }; char buf[MAX_FRAGMENT_LENGTH]; if (vectors < 0) { PORT_SetError(<API key>); return -1; } if (vectors > <API key>) { PORT_SetError(<API key>); return -1; } for (i = 0; i < vectors; i++) { if (iov[i].iov_len < 0) { PORT_SetError(<API key>); return -1; } } blocking = ssl_FdIsBlocking(fd); #define K16 sizeof(buf) #define KILL_VECTORS while (vectors && !iov->iov_len) { ++iov; --vectors; } #define GET_VECTOR do { myIov = *iov++; --vectors; KILL_VECTORS } while (0) #define HANDLE_ERR(rv, len) \ if (rv != len) { \ if (rv < 0) { \ if (!blocking \ && (PR_GetError() == <API key>) \ && (sent > 0)) { \ return sent; \ } else { \ return -1; \ } \ } \ /* Only a nonblocking socket can have partial sends */ \ PR_ASSERT(!blocking); \ return sent + rv; \ } #define SEND(bfr, len) \ do { \ rv = ssl_Send(fd, bfr, len, 0, timeout); \ HANDLE_ERR(rv, len) \ sent += len; \ } while (0) /* Make sure the first write is at least 8 KB, if possible. */ KILL_VECTORS if (!vectors) return ssl_Send(fd, 0, 0, 0, timeout); GET_VECTOR; if (!vectors) { return ssl_Send(fd, myIov.iov_base, myIov.iov_len, 0, timeout); } if (myIov.iov_len < first_len) { PORT_Memcpy(buf, myIov.iov_base, myIov.iov_len); bufLen = myIov.iov_len; left = first_len - bufLen; while (vectors && left) { int toCopy; GET_VECTOR; toCopy = PR_MIN(left, myIov.iov_len); PORT_Memcpy(buf + bufLen, myIov.iov_base, toCopy); bufLen += toCopy; left -= toCopy; myIov.iov_base += toCopy; myIov.iov_len -= toCopy; } SEND( buf, bufLen ); } while (vectors || myIov.iov_len) { PRInt32 addLen; if (!myIov.iov_len) { GET_VECTOR; } while (myIov.iov_len >= K16) { SEND(myIov.iov_base, K16); myIov.iov_base += K16; myIov.iov_len -= K16; } if (!myIov.iov_len) continue; if (!vectors || myIov.iov_len > limit) { addLen = 0; } else if ((addLen = iov->iov_len % K16) + myIov.iov_len <= limit) { /* Addlen is already computed. */; } else if (vectors > 1 && iov[1].iov_len % K16 + addLen + myIov.iov_len <= 2 * limit) { addLen = limit - myIov.iov_len; } else addLen = 0; if (!addLen) { SEND( myIov.iov_base, myIov.iov_len ); myIov.iov_len = 0; continue; } PORT_Memcpy(buf, myIov.iov_base, myIov.iov_len); bufLen = myIov.iov_len; do { GET_VECTOR; PORT_Memcpy(buf + bufLen, myIov.iov_base, addLen); myIov.iov_base += addLen; myIov.iov_len -= addLen; bufLen += addLen; left = PR_MIN( limit, K16 - bufLen); if (!vectors /* no more left */ || myIov.iov_len > 0 /* we didn't use that one all up */ || bufLen >= K16 /* it's full. */ ) { addLen = 0; } else if ((addLen = iov->iov_len % K16) <= left) { /* Addlen is already computed. */; } else if (vectors > 1 && iov[1].iov_len % K16 + addLen <= left + limit) { addLen = left; } else addLen = 0; } while (addLen); SEND( buf, bufLen ); } return sent; } /* * These functions aren't implemented. */ static PRInt32 PR_CALLBACK ssl_Available(PRFileDesc *fd) { PORT_Assert(0); PR_SetError(<API key>, 0); return SECFailure; } static PRInt64 PR_CALLBACK ssl_Available64(PRFileDesc *fd) { PRInt64 res; PORT_Assert(0); PR_SetError(<API key>, 0); LL_I2L(res, -1L); return res; } static PRStatus PR_CALLBACK ssl_FSync(PRFileDesc *fd) { PORT_Assert(0); PR_SetError(<API key>, 0); return PR_FAILURE; } static PRInt32 PR_CALLBACK ssl_Seek(PRFileDesc *fd, PRInt32 offset, PRSeekWhence how) { PORT_Assert(0); PR_SetError(<API key>, 0); return SECFailure; } static PRInt64 PR_CALLBACK ssl_Seek64(PRFileDesc *fd, PRInt64 offset, PRSeekWhence how) { PRInt64 res; PORT_Assert(0); PR_SetError(<API key>, 0); LL_I2L(res, -1L); return res; } static PRStatus PR_CALLBACK ssl_FileInfo(PRFileDesc *fd, PRFileInfo *info) { PORT_Assert(0); PR_SetError(<API key>, 0); return PR_FAILURE; } static PRStatus PR_CALLBACK ssl_FileInfo64(PRFileDesc *fd, PRFileInfo64 *info) { PORT_Assert(0); PR_SetError(<API key>, 0); return PR_FAILURE; } static PRInt32 PR_CALLBACK ssl_RecvFrom(PRFileDesc *fd, void *buf, PRInt32 amount, PRIntn flags, PRNetAddr *addr, PRIntervalTime timeout) { PORT_Assert(0); PR_SetError(<API key>, 0); return SECFailure; } static PRInt32 PR_CALLBACK ssl_SendTo(PRFileDesc *fd, const void *buf, PRInt32 amount, PRIntn flags, const PRNetAddr *addr, PRIntervalTime timeout) { PORT_Assert(0); PR_SetError(<API key>, 0); return SECFailure; } static const PRIOMethods ssl_methods = { PR_DESC_LAYERED, ssl_Close, /* close */ ssl_Read, /* read */ ssl_Write, /* write */ ssl_Available, /* available */ ssl_Available64, /* available64 */ ssl_FSync, /* fsync */ ssl_Seek, /* seek */ ssl_Seek64, /* seek64 */ ssl_FileInfo, /* fileInfo */ ssl_FileInfo64, /* fileInfo64 */ ssl_WriteV, /* writev */ ssl_Connect, /* connect */ ssl_Accept, /* accept */ ssl_Bind, /* bind */ ssl_Listen, /* listen */ ssl_Shutdown, /* shutdown */ ssl_Recv, /* recv */ ssl_Send, /* send */ ssl_RecvFrom, /* recvfrom */ ssl_SendTo, /* sendto */ ssl_Poll, /* poll */ <API key>, /* acceptread */ ssl_TransmitFile, /* transmitfile */ ssl_GetSockName, /* getsockname */ ssl_GetPeerName, /* getpeername */ NULL, /* getsockopt OBSOLETE */ NULL, /* setsockopt OBSOLETE */ NULL, /* getsocketoption */ NULL, /* setsocketoption */ PR_EmulateSendFile, /* Send a (partial) file with header/trailer*/ NULL, /* reserved for future use */ NULL, /* reserved for future use */ NULL, /* reserved for future use */ NULL, /* reserved for future use */ NULL /* reserved for future use */ }; static PRIOMethods combined_methods; static void ssl_SetupIOMethods(void) { PRIOMethods *new_methods = &combined_methods; const PRIOMethods *nspr_methods = <API key>(); const PRIOMethods *my_methods = &ssl_methods; *new_methods = *nspr_methods; new_methods->file_type = my_methods->file_type; new_methods->close = my_methods->close; new_methods->read = my_methods->read; new_methods->write = my_methods->write; new_methods->available = my_methods->available; new_methods->available64 = my_methods->available64; new_methods->fsync = my_methods->fsync; new_methods->seek = my_methods->seek; new_methods->seek64 = my_methods->seek64; new_methods->fileInfo = my_methods->fileInfo; new_methods->fileInfo64 = my_methods->fileInfo64; new_methods->writev = my_methods->writev; new_methods->connect = my_methods->connect; new_methods->accept = my_methods->accept; new_methods->bind = my_methods->bind; new_methods->listen = my_methods->listen; new_methods->shutdown = my_methods->shutdown; new_methods->recv = my_methods->recv; new_methods->send = my_methods->send; new_methods->recvfrom = my_methods->recvfrom; new_methods->sendto = my_methods->sendto; new_methods->poll = my_methods->poll; new_methods->acceptread = my_methods->acceptread; new_methods->transmitfile = my_methods->transmitfile; new_methods->getsockname = my_methods->getsockname; new_methods->getpeername = my_methods->getpeername; /* new_methods->getsocketoption = my_methods->getsocketoption; */ /* new_methods->setsocketoption = my_methods->setsocketoption; */ new_methods->sendfile = my_methods->sendfile; } static PRCallOnceType initIoLayerOnce; static PRStatus ssl_InitIOLayer(void) { ssl_layer_id = <API key>("SSL"); ssl_SetupIOMethods(); ssl_inited = PR_TRUE; return PR_SUCCESS; } static PRStatus ssl_PushIOLayer(sslSocket *ns, PRFileDesc *stack, PRDescIdentity id) { PRFileDesc *layer = NULL; PRStatus status; if (!ssl_inited) { status = PR_CallOnce(&initIoLayerOnce, &ssl_InitIOLayer); if (status != PR_SUCCESS) goto loser; } if (ns == NULL) goto loser; layer = <API key>(ssl_layer_id, &combined_methods); if (layer == NULL) goto loser; layer->secret = (PRFilePrivate *)ns; /* Here, "stack" points to the PRFileDesc on the top of the stack. ** "layer" points to a new FD that is to be inserted into the stack. ** If layer is being pushed onto the top of the stack, then ** PR_PushIOLayer switches the contents of stack and layer, and then ** puts stack on top of layer, so that after it is done, the top of ** stack is the same "stack" as it was before, and layer is now the ** FD for the former top of stack. ** After this call, stack always points to the top PRFD on the stack. ** If this function fails, the contents of stack and layer are as ** they were before the call. */ status = PR_PushIOLayer(stack, id, layer); if (status != PR_SUCCESS) goto loser; ns->fd = (id == PR_TOP_IO_LAYER) ? stack : layer; return PR_SUCCESS; loser: if (layer) { layer->dtor(layer); /* free layer */ } return PR_FAILURE; } /* if this fails, caller must destroy socket. */ static SECStatus ssl_MakeLocks(sslSocket *ss) { ss->firstHandshakeLock = PZ_NewMonitor(nssILockSSL); if (!ss->firstHandshakeLock) goto loser; ss->ssl3HandshakeLock = PZ_NewMonitor(nssILockSSL); if (!ss->ssl3HandshakeLock) goto loser; ss->specLock = NSSRWLock_New(SSL_LOCK_RANK_SPEC, NULL); if (!ss->specLock) goto loser; ss->recvBufLock = PZ_NewMonitor(nssILockSSL); if (!ss->recvBufLock) goto loser; ss->xmitBufLock = PZ_NewMonitor(nssILockSSL); if (!ss->xmitBufLock) goto loser; ss->writerThread = NULL; if (ssl_lock_readers) { ss->recvLock = PZ_NewLock(nssILockSSL); if (!ss->recvLock) goto loser; ss->sendLock = PZ_NewLock(nssILockSSL); if (!ss->sendLock) goto loser; } return SECSuccess; loser: ssl_DestroyLocks(ss); return SECFailure; } #if defined(XP_UNIX) || defined(XP_WIN32) || defined(XP_BEOS) #define NSS_HAVE_GETENV 1 #endif #define LOWER(x) (x | 0x20) /* cheap ToLower function ignores LOCALE */ static void <API key>(void) { #if defined( NSS_HAVE_GETENV ) static int firsttime = 1; if (firsttime) { char * ev; firsttime = 0; #ifdef DEBUG ev = getenv("SSLDEBUGFILE"); if (ev && ev[0]) { ssl_trace_iob = fopen(ev, "w"); } if (!ssl_trace_iob) { ssl_trace_iob = stderr; } #ifdef TRACE ev = getenv("SSLTRACE"); if (ev && ev[0]) { ssl_trace = atoi(ev); SSL_TRACE(("SSL: tracing set to %d", ssl_trace)); } #endif /* TRACE */ ev = getenv("SSLDEBUG"); if (ev && ev[0]) { ssl_debug = atoi(ev); SSL_TRACE(("SSL: debugging set to %d", ssl_debug)); } #endif /* DEBUG */ ev = getenv("SSLKEYLOGFILE"); if (ev && ev[0]) { ssl_keylog_iob = fopen(ev, "a"); if (!ssl_keylog_iob) { SSL_TRACE(("SSL: failed to open key log file")); } else { if (ftell(ssl_keylog_iob) == 0) { fputs("# SSL/TLS secrets log file, generated by NSS\n", ssl_keylog_iob); } SSL_TRACE(("SSL: logging SSL/TLS secrets to %s", ev)); } } #ifndef NO_PKCS11_BYPASS ev = getenv("SSLBYPASS"); if (ev && ev[0]) { ssl_defaults.bypassPKCS11 = (ev[0] == '1'); SSL_TRACE(("SSL: bypass default set to %d", \ ssl_defaults.bypassPKCS11)); } #endif /* NO_PKCS11_BYPASS */ ev = getenv("SSLFORCELOCKS"); if (ev && ev[0] == '1') { ssl_force_locks = PR_TRUE; ssl_defaults.noLocks = 0; strcpy(lockStatus + LOCKSTATUS_OFFSET, "FORCED. "); SSL_TRACE(("SSL: force_locks set to %d", ssl_force_locks)); } ev = getenv("<API key>"); if (ev) { if (ev[0] == '1' || LOWER(ev[0]) == 'u') ssl_defaults.enableRenegotiation = <API key>; else if (ev[0] == '0' || LOWER(ev[0]) == 'n') ssl_defaults.enableRenegotiation = <API key>; else if (ev[0] == '2' || LOWER(ev[0]) == 'r') ssl_defaults.enableRenegotiation = <API key>; else if (ev[0] == '3' || LOWER(ev[0]) == 't') ssl_defaults.enableRenegotiation = <API key>; SSL_TRACE(("SSL: enableRenegotiation set to %d", ssl_defaults.enableRenegotiation)); } ev = getenv("<API key>"); if (ev && ev[0] == '1') { ssl_defaults.<API key> = PR_TRUE; SSL_TRACE(("SSL: <API key> set to %d", PR_TRUE)); } ev = getenv("<API key>"); if (ev && ev[0] == '0') { ssl_defaults.cbcRandomIV = PR_FALSE; SSL_TRACE(("SSL: cbcRandomIV set to 0")); } } #endif /* NSS_HAVE_GETENV */ } /* ** Create a newsocket structure for a file descriptor. */ static sslSocket * ssl_NewSocket(PRBool makeLocks, SSLProtocolVariant protocolVariant) { sslSocket *ss; <API key>(); if (ssl_force_locks) makeLocks = PR_TRUE; /* Make a new socket and get it ready */ ss = (sslSocket*) PORT_ZAlloc(sizeof(sslSocket)); if (ss) { /* This should be of type SSLKEAType, but CC on IRIX * complains during the for loop. */ int i; SECStatus status; ss->opt = ssl_defaults; ss->opt.useSocks = PR_FALSE; ss->opt.noLocks = !makeLocks; ss->vrange = *VERSIONS_DEFAULTS(protocolVariant); ss->protocolVariant = protocolVariant; ss->peerID = NULL; ss->rTimeout = <API key>; ss->wTimeout = <API key>; ss->cTimeout = <API key>; ss->cipherSpecs = NULL; ss->sizeCipherSpecs = 0; /* produced lazily */ ss->preferredCipher = NULL; ss->url = NULL; for (i=kt_null; i < kt_kea_size; i++) { sslServerCerts * sc = ss->serverCerts + i; sc->serverCert = NULL; sc->serverCertChain = NULL; sc->serverKeyPair = NULL; sc->serverKeyBits = 0; ss->certStatusArray[i] = NULL; } ss->stepDownKeyPair = NULL; ss->dbHandle = <API key>(); /* Provide default implementation of hooks */ ss->authCertificate = SSL_AuthCertificate; ss->authCertificateArg = (void *)ss->dbHandle; ss->sniSocketConfig = NULL; ss->sniSocketConfigArg = NULL; ss->getClientAuthData = NULL; ss->handleBadCert = NULL; ss->badCertArg = NULL; ss->pkcs11PinArg = NULL; ss-><API key> = NULL; ssl_ChooseOps(ss); <API key>(ss); <API key>(ss); PR_INIT_CLIST(&ss->ssl3.hs.lastMessageFlight); if (makeLocks) { status = ssl_MakeLocks(ss); if (status != SECSuccess) goto loser; } status = <API key>(ss); if (status != SECSuccess) goto loser; status = ssl_InitGather(&ss->gs); if (status != SECSuccess) { loser: <API key>(ss); ssl_DestroyLocks(ss); PORT_Free(ss); ss = NULL; } } return ss; }
# -*- coding: utf-8 -*- """URLs for all views.""" from django.urls import path from djmaidez.contact import views urlpatterns = [ path('form/', views.form, name='form'), path('populate/', views.populate, name='populate'), path('save/', views.save, name='save'), path('test/', views.test, name='test'), ]
package edu.utah.sci.cyclist.core.presenter; import javafx.collections.FXCollections; import javafx.collections.ObservableList; import edu.utah.sci.cyclist.core.event.notification.CyclistNotification; import edu.utah.sci.cyclist.core.event.notification.<API key>; import edu.utah.sci.cyclist.core.event.notification.<API key>; import edu.utah.sci.cyclist.core.event.notification.<API key>; import edu.utah.sci.cyclist.core.event.notification.EventBus; import edu.utah.sci.cyclist.core.model.Field; import edu.utah.sci.cyclist.core.model.Schema; import edu.utah.sci.cyclist.core.model.Table; import edu.utah.sci.cyclist.core.ui.panels.SchemaPanel; public class SchemaPresenter extends PresenterBase { private SchemaPanel _fieldsPanel; private Schema _schema; private ObservableList<Field> _fields; public SchemaPresenter(EventBus bus) { super(bus); <API key>(); } public void setPanel(SchemaPanel fields){ _fieldsPanel = fields; } private void <API key>() { <API key>(<API key>.DATASOURCE_FOCUS, new <API key>() { @Override public void handle(CyclistNotification notification) { <API key> tableNotification = (<API key>) notification; Table table = tableNotification.getTable(); _schema = table.getSchema(); _fields = FXCollections.observableArrayList(); for (int f=0; f < _schema.size(); f++) { Field field = _schema.getField(f); _fields.add(field); } _fieldsPanel.setFields(_fields); } }); } }
"""Here is defined the IndexArray class.""" from bisect import bisect_left, bisect_right from .node import NotLoggedMixin from .carray import CArray from .earray import EArray from . import indexesextension # Declarations for inheriting class CacheArray(indexesextension.CacheArray, NotLoggedMixin, EArray): """Container for keeping index caches of 1st and 2nd level.""" # Class identifier. _c_classid = 'CACHEARRAY' class LastRowArray(indexesextension.LastRowArray, NotLoggedMixin, CArray): """Container for keeping sorted and indices values of last row of an index.""" # Class identifier. _c_classid = 'LASTROWARRAY' class IndexArray(indexesextension.IndexArray, NotLoggedMixin, EArray): # Class identifier. _c_classid = 'INDEXARRAY' @property def chunksize(self): """The chunksize for this object.""" return self.chunkshape[1] @property def slicesize(self): """The slicesize for this object.""" return self.shape[1] def __init__(self, parentnode, name, atom=None, title="", filters=None, byteorder=None): """Create an IndexArray instance.""" self._v_pathname = parentnode._g_join(name) if atom is not None: # The shape and chunkshape needs to be fixed here if name == "sorted": reduction = parentnode.reduction shape = (0, parentnode.slicesize // reduction) chunkshape = (1, parentnode.chunksize // reduction) else: shape = (0, parentnode.slicesize) chunkshape = (1, parentnode.chunksize) else: # The shape and chunkshape will be read from disk later on shape = None chunkshape = None super().__init__( parentnode, name, atom, shape, title, filters, chunkshape=chunkshape, byteorder=byteorder) # This version of searchBin uses both ranges (1st level) and # bounds (2nd level) caches. It uses a cache for boundary rows, # but not for 'sorted' rows (this is only supported for the # 'optimized' types). def _search_bin(self, nrow, item): item1, item2 = item result1 = -1 result2 = -1 hi = self.shape[1] ranges = self._v_parent.rvcache boundscache = self.boundscache # First, look at the beginning of the slice begin = ranges[nrow, 0] # Look for items at the beginning of sorted slices if item1 <= begin: result1 = 0 if item2 < begin: result2 = 0 if result1 >= 0 and result2 >= 0: return (result1, result2) # Then, look for items at the end of the sorted slice end = ranges[nrow, 1] if result1 < 0: if item1 > end: result1 = hi if result2 < 0: if item2 >= end: result2 = hi if result1 >= 0 and result2 >= 0: return (result1, result2) # Finally, do a lookup for item1 and item2 if they were not found # Lookup in the middle of slice for item1 chunksize = self.chunksize # Number of elements/chunksize nchunk = -1 # Try to get the bounds row from the LRU cache nslot = boundscache.getslot(nrow) if nslot >= 0: # Cache hit. Use the row kept there. bounds = boundscache.getitem(nslot) else: # No luck with cached data. Read the row and put it in the cache. bounds = self._v_parent.bounds[nrow] size = bounds.size * bounds.itemsize boundscache.setitem(nrow, bounds, size) if result1 < 0: # Search the appropriate chunk in bounds cache nchunk = bisect_left(bounds, item1) chunk = self._read_sorted_slice(nrow, chunksize * nchunk, chunksize * (nchunk + 1)) result1 = indexesextension._bisect_left(chunk, item1, chunksize) result1 += chunksize * nchunk # Lookup in the middle of slice for item2 if result2 < 0: # Search the appropriate chunk in bounds cache nchunk2 = bisect_right(bounds, item2) if nchunk2 != nchunk: chunk = self._read_sorted_slice(nrow, chunksize * nchunk2, chunksize * (nchunk2 + 1)) result2 = indexesextension._bisect_right(chunk, item2, chunksize) result2 += chunksize * nchunk2 return (result1, result2) def __str__(self): """A compact representation of this class""" return f"IndexArray(path={self._v_pathname})" def __repr__(self): """A verbose representation of this class.""" return f"""{self} atom = {self.atom!r} shape = {self.shape} nrows = {self.nrows} chunksize = {self.chunksize} slicesize = {self.slicesize} byteorder = {self.byteorder!r}"""
"use strict"; var ExampleImage = require('../helpers/ExampleImage'); var <API key> = require('../helpers/<API key>'); var FixedDataTable = require('fixed-data-table'); var React = require('react'); var createReactClass = require('create-react-class'); var Column = FixedDataTable.Column; var PropTypes = require('prop-types'); var Table = FixedDataTable.Table; var ROWS = 1000000; function renderImage(/*string*/ cellData) { return <ExampleImage src={cellData} />; } function renderLink(/*string*/ cellData) { return <a href="#">{cellData}</a>; } function renderDate(/*object*/ cellData) { return <span>{cellData.toLocaleString()}</span>; } var ObjectDataExample = createReactClass({ getInitialState() { return { dataList: new <API key>(ROWS) } }, _rowGetter(index){ return this.state.dataList.getObjectAt(index); }, render() { return ( <Table rowHeight={50} headerHeight={50} rowGetter={this._rowGetter} rowsCount={this.state.dataList.getSize()} width={1000} height={500} {...this.props}> <Column cellRenderer={renderImage} dataKey="avatar" fixed={true} label="" width={50} /> <Column dataKey="firstName" fixed={true} label="First Name" width={100} /> <Column dataKey="lastName" fixed={true} label="Last Name" width={100} /> <Column dataKey="city" label="City" width={100} /> <Column label="Street" width={200} dataKey="street" /> <Column label="Zip Code" width={200} dataKey="zipCode" /> <Column cellRenderer={renderLink} label="Email" width={200} dataKey="email" /> <Column cellRenderer={renderDate} label="DOB" width={200} dataKey="date" /> </Table> ); }, }); module.exports = ObjectDataExample;
/* TEMPLATE GENERATED TESTCASE FILE Filename: <API key>.cpp Label Definition File: CWE401_Memory_Leak.c.label.xml Template File: <API key>.tmpl.cpp */ /* * @description * CWE: 401 Memory Leak * BadSource: calloc Allocate data using calloc() * GoodSource: Allocate data on the stack * Sinks: * GoodSink: call free() on data * BadSink : no deallocation of data * Flow Variant: 83 Data flow: data passed to class constructor and destructor by declaring the class object on the stack * * */ #ifndef OMITGOOD #include "std_testcase.h" #include "<API key>.h" namespace <API key> { <API key>::<API key>(twoIntsStruct * dataCopy) { data = dataCopy; /* FIX: Use memory allocated on the stack with ALLOCA */ data = (twoIntsStruct *)ALLOCA(100*sizeof(twoIntsStruct)); /* Initialize and make use of data */ data[0].intOne = 0; data[0].intTwo = 0; printStructLine(&data[0]); } <API key>::~<API key>() { /* POTENTIAL FLAW: No deallocation */ ; /* empty statement needed for some flow variants */ } } #endif /* OMITGOOD */
'use strict'; const React = require('react'); const { StyleSheet, View, Text, TouchableHighlight, Alert, } = require('react-native'); const {examples: SharedAlertExamples} = require('./AlertExample'); import type {<API key>} from '../../types/RNTesterTypes'; type Props = $ReadOnly<{||}>; type State = {|promptValue: ?string|}; class PromptOptions extends React.Component<Props, State> { customButtons: Array<Object>; constructor(props) { super(props); /* $FlowFixMe[cannot-write] this seems to be a Flow bug, `saveResponse` is * defined below */ // $FlowFixMe[method-unbinding] added when improving typing for this parameters this.saveResponse = this.saveResponse.bind(this); this.customButtons = [ { text: 'Custom OK', onPress: this.saveResponse, }, { text: 'Custom Cancel', style: 'cancel', }, ]; this.state = { promptValue: undefined, }; } render() { return ( <View> <Text style={styles.promptValue}> <Text style={styles.promptValueLabel}>Prompt value:</Text>{' '} {this.state.promptValue} </Text> <TouchableHighlight style={styles.wrapper} // $FlowFixMe[method-unbinding] added when improving typing for this parameters // $FlowFixMe[incompatible-call] onPress={() => Alert.prompt('Type a value', null, this.saveResponse)}> <View style={styles.button}> <Text>prompt with title & callback</Text> </View> </TouchableHighlight> <TouchableHighlight style={styles.wrapper} onPress={() => Alert.prompt('Type a value', null, this.customButtons) }> <View style={styles.button}> <Text>prompt with title & custom buttons</Text> </View> </TouchableHighlight> <TouchableHighlight style={styles.wrapper} onPress={() => Alert.prompt( 'Type a phone number', null, null, 'plain-text', undefined, 'phone-pad', ) }> <View style={styles.button}> <Text>prompt with title & custom keyboard</Text> </View> </TouchableHighlight> <TouchableHighlight style={styles.wrapper} onPress={() => Alert.prompt( 'Type a value', null, // $FlowFixMe[method-unbinding] added when improving typing for this parameters // $FlowFixMe[incompatible-call] this.saveResponse, undefined, 'Default value', ) }> <View style={styles.button}> <Text>prompt with title, callback & default value</Text> </View> </TouchableHighlight> <TouchableHighlight style={styles.wrapper} onPress={() => Alert.prompt( 'Type a value', null, this.customButtons, 'login-password', 'admin@site.com', ) }> <View style={styles.button}> <Text> prompt with title, custom buttons, login/password & default value </Text> </View> </TouchableHighlight> </View> ); } saveResponse(promptValue) { this.setState({promptValue: JSON.stringify(promptValue)}); } } const styles = StyleSheet.create({ wrapper: { borderRadius: 5, marginBottom: 5, }, button: { backgroundColor: '#eeeeee', padding: 10, }, promptValue: { marginBottom: 10, }, promptValueLabel: { fontWeight: 'bold', }, }); exports.framework = 'React'; exports.title = 'Alerts'; exports.description = 'iOS alerts and action sheets'; exports.documentationURL = 'https://reactnative.dev/docs/alert'; exports.examples = ([ SharedAlertExamples, { title: 'Prompt Options', render(): React.Element<any> { return <PromptOptions />; }, }, { title: 'Prompt Types', render(): React.Node { return ( <View> <TouchableHighlight style={styles.wrapper} onPress={() => Alert.prompt('Plain Text Entry')}> <View style={styles.button}> <Text>plain-text</Text> </View> </TouchableHighlight> <TouchableHighlight style={styles.wrapper} onPress={() => Alert.prompt('Secure Text', null, null, 'secure-text') }> <View style={styles.button}> <Text>secure-text</Text> </View> </TouchableHighlight> <TouchableHighlight style={styles.wrapper} onPress={() => Alert.prompt('Login & Password', null, null, 'login-password') }> <View style={styles.button}> <Text>login-password</Text> </View> </TouchableHighlight> </View> ); }, }, ]: Array<<API key>>);
#ifndef <API key> #define <API key> #include <string> #include <vector> #include "base/basictypes.h" #include "base/compiler_specific.h" #include "base/memory/ref_counted.h" #include "base/memory/scoped_ptr.h" #include "base/synchronization/lock.h" #include "media/cdm/aes_decryptor.h" #include "media/cdm/ppapi/external_clear_key/<API key>.h" // Enable this to use the fake decoder for testing. // TODO(tomfinegan): Move fake audio decoder into a separate class. #if 0 #define <API key> #endif namespace media { class FileIOTestRunner; class CdmVideoDecoder; class DecoderBuffer; class <API key>; // Clear key implementation of the cdm::<API key> interface. class ClearKeyCdm : public <API key> { public: ClearKeyCdm(Host* host, const std::string& key_system); ~ClearKeyCdm() override; // <API key> implementation. void Initialize(bool <API key>, bool <API key>) override; void <API key>(uint32 promise_id, cdm::SessionType session_type, cdm::InitDataType init_data_type, const uint8* init_data, uint32 init_data_size) override; void LoadSession(uint32 promise_id, cdm::SessionType session_type, const char* session_id, uint32_t session_id_length) override; void UpdateSession(uint32 promise_id, const char* session_id, uint32_t session_id_length, const uint8* response, uint32 response_size) override; void CloseSession(uint32 promise_id, const char* session_id, uint32_t session_id_length) override; void RemoveSession(uint32 promise_id, const char* session_id, uint32_t session_id_length) override; void <API key>(uint32 promise_id, const uint8_t* <API key>, uint32_t <API key>) override; void TimerExpired(void* context) override; cdm::Status Decrypt(const cdm::InputBuffer& encrypted_buffer, cdm::DecryptedBlock* decrypted_block) override; cdm::Status <API key>( const cdm::AudioDecoderConfig& <API key>) override; cdm::Status <API key>( const cdm::VideoDecoderConfig& <API key>) override; void DeinitializeDecoder(cdm::StreamType decoder_type) override; void ResetDecoder(cdm::StreamType decoder_type) override; cdm::Status <API key>(const cdm::InputBuffer& encrypted_buffer, cdm::VideoFrame* video_frame) override; cdm::Status <API key>(const cdm::InputBuffer& encrypted_buffer, cdm::AudioFrames* audio_frames) override; void Destroy() override; void <API key>( const cdm::<API key>& response) override; void <API key>(cdm::QueryResult result, uint32_t link_mask, uint32_t <API key>) override; private: // Emulates a session stored for |<API key>|. This // is necessary since aes_decryptor.cc does not support storing sessions. void LoadLoadableSession(); void <API key>(); // <API key> callbacks. void OnSessionMessage(const std::string& session_id, MediaKeys::MessageType message_type, const std::vector<uint8>& message, const GURL& <API key>); void OnSessionKeysChange(const std::string& session_id, bool <API key>, CdmKeysInfo keys_info); void OnSessionClosed(const std::string& session_id); // Handle the success/failure of a promise. These methods are responsible for // calling |host_| to resolve or reject the promise. void OnSessionCreated(uint32 promise_id, const std::string& session_id); void OnSessionLoaded(uint32 promise_id, const std::string& session_id); void OnPromiseResolved(uint32 promise_id); void OnPromiseFailed(uint32 promise_id, MediaKeys::Exception exception_code, uint32 system_code, const std::string& error_message); // Prepares next renewal message and sets a timer for it. void ScheduleNextRenewal(); // Decrypts the |encrypted_buffer| and puts the result in |decrypted_buffer|. // Returns cdm::kSuccess if decryption succeeded. The decrypted result is // put in |decrypted_buffer|. If |encrypted_buffer| is empty, the // |decrypted_buffer| is set to an empty (EOS) buffer. // Returns cdm::kNoKey if no decryption key was available. In this case // |decrypted_buffer| should be ignored by the caller. // Returns cdm::kDecryptError if any decryption error occurred. In this case // |decrypted_buffer| should be ignored by the caller. cdm::Status <API key>( const cdm::InputBuffer& encrypted_buffer, scoped_refptr<DecoderBuffer>* decrypted_buffer); #if defined(<API key>) int64 <API key>() const; // Generates fake video frames with |<API key>|. // Returns the number of samples generated in the |audio_frames|. int <API key>(int64 <API key>, cdm::AudioFrames* audio_frames) const; // Generates fake video frames given |input_timestamp|. // Returns cdm::kSuccess if any audio frame is successfully generated. cdm::Status <API key>(int64 <API key>, cdm::AudioFrames* audio_frames); #endif // <API key> void StartFileIOTest(); // Callback for CDM File IO test. void <API key>(bool success); // Keep track of the last session created. void SetSessionId(const std::string& session_id); AesDecryptor decryptor_; ClearKeyCdmHost* host_; const std::string key_system_; std::string last_session_id_; std::string <API key>; // In order to simulate LoadSession(), CreateSession() and then // UpdateSession() will be called to create a session with known keys. // |<API key>| is used to keep track of the // session_id allocated by aes_decryptor, as the session_id will be returned // as |kLoadableSessionId|. Future requests for this simulated session // need to use |<API key>| for all calls // to aes_decryptor. // |<API key>| is used to keep track of the // original LoadSession() promise, as it is not resolved until the // UpdateSession() call succeeds. // |<API key>| is used to keep // track of whether a keyschange event has been received for the loadable // session in case it happens before the emulated session is fully created. // |<API key>| is used to keep track of the list // of keys provided as a result of calling UpdateSession() if it happens, // since they can't be forwarded on until the LoadSession() promise is // resolved. // TODO(xhwang): Extract testing code from main implementation. // TODO(jrummell): Once the order of events is fixed, // |<API key>| should be // removed (the event should have either happened or never happened). // |<API key>| may also go away if the event is std::string <API key>; uint32_t <API key>; bool <API key>; CdmKeysInfo <API key>; // Timer delay in milliseconds for the next host_->SetTimer() call. int64 timer_delay_ms_; // Indicates whether a renewal timer has been set to prevent multiple timers // from running. bool renewal_timer_set_; #if defined(<API key>) int channel_count_; int bits_per_channel_; int samples_per_second_; int64 <API key>; int <API key>; #endif // <API key> #if defined(<API key>) scoped_ptr<<API key>> audio_decoder_; #endif // <API key> scoped_ptr<CdmVideoDecoder> video_decoder_; scoped_ptr<FileIOTestRunner> <API key>; <API key>(ClearKeyCdm); }; } // namespace media #endif // <API key>
#region Using directives #endregion namespace Platform.Network.ExtensibleServer.CommandServer { public interface ITextConnection { void Flush(); string ReadTextBlock(); void WriteTextBlock(string block); void WriteTextBlock(string format, params object[] args); } }
<?php declare(strict_types=1); namespace Joindin\Api\Exception; use Teapot\StatusCode\Http; final class <API key> extends \RuntimeException { private const MESSAGE = 'You must be logged in to perform this operation.'; public static function <API key>(string $message = null): self { return new static($message ?? self::MESSAGE, Http::UNAUTHORIZED); } }
<?php namespace ApplicationTest\Integration\Controller; use Application\Controller; use ApplicationTest\Integration\Util\Bootstrap; use User\Mapper\User; use Zend\Http; use Zend\Paginator; use Zend\Test\PHPUnit\Controller\<API key>; use ZfModule\Mapper; class IndexControllerTest extends <API key> { protected function setUp() { parent::setUp(); $this-><API key>(Bootstrap::getConfig()); } public function <API key>() { $moduleMapper = $this->getMockBuilder(Mapper\Module::class)->getMock(); $moduleMapper ->expects($this->once()) ->method('pagination') ->with( $this->equalTo(1), $this->equalTo(Controller\IndexController::MODULES_PER_PAGE), $this->equalTo(null), $this->equalTo('created_at'), $this->equalTo('DESC') ) ->willReturn(new Paginator\Paginator(new Paginator\Adapter\Null())) ; $moduleMapper ->expects($this->once()) ->method('getTotal') ->willReturn(0) ; $userMapper = $this->getMockBuilder(User::class)->getMock(); $userMapper ->expects($this->once()) ->method('findAll') ->with( $this->equalTo(16), $this->equalTo('created_at'), $this->equalTo('DESC') ) ->willReturn([]) ; $this-><API key>() ->setAllowOverride(true) ->setService( Mapper\Module::class, $moduleMapper ) ->setService( 'zfcuser_user_mapper', $userMapper ) ; $this->dispatch('/'); $this-><API key>(Controller\IndexController::class); $this->assertActionName('index'); $this-><API key>(Http\Response::STATUS_CODE_200); } public function <API key>() { $moduleMapper = $this->getMockBuilder(Mapper\Module::class)->getMock(); $moduleMapper ->expects($this->once()) ->method('pagination') ->with( $this->equalTo(1), $this->equalTo(Controller\IndexController::MODULES_PER_PAGE), $this->equalTo(null), $this->equalTo('created_at'), $this->equalTo('DESC') ) ->willReturn([]) ; $this-><API key>() ->setAllowOverride(true) ->setService( Mapper\Module::class, $moduleMapper ) ; $this->dispatch('/feed'); $this-><API key>(Controller\IndexController::class); $this->assertActionName('feed'); $this-><API key>(Http\Response::STATUS_CODE_200); } }
<?php class <API key> implements <API key> { /** * Generate a partial report for a single processed file. * * Function should return TRUE if it printed or stored data about the file * and FALSE if it ignored the file. Returning TRUE indicates that the file and * its data should be counted in the grand totals. * * @param array $report Prepared report data. * @param <API key> $phpcsFile The file being reported on. * @param boolean $showSources Show sources? * @param int $width Maximum allowed line width. * * @return boolean */ public function generateFileReport( $report, <API key> $phpcsFile, $showSources=false, $width=80 ) { $cliValues = $phpcsFile->phpcs->cli-><API key>(); $changed = $phpcsFile->fixer->fixFile(); if (empty($cliValues['files']) === true) { // Replacing STDIN, so output current file to STDOUT // even if nothing was fixed. Exit here because we // can't process any more than 1 file in this setup. echo $phpcsFile->fixer->getContents(); ob_end_flush(); exit(1); } if ($changed === true) { $newFilename = $report['filename'].$cliValues['phpcbf-suffix']; $newContent = $phpcsFile->fixer->getContents(); file_put_contents($newFilename, $newContent); echo 'Fixed '.$report['fixable'].' sniff violations in '.$report['filename'].PHP_EOL; if ($newFilename === $report['filename']) { echo "\t=> file was overwritten".PHP_EOL; } else { echo "\t=> fixed file written to ".basename($newFilename).PHP_EOL; } } return $changed; }//end generateFileReport() /** * Prints all errors and warnings for each file processed. * * @param string $cachedData Any partial report data that was returned from * generateFileReport during the run. * @param int $totalFiles Total number of files processed during the run. * @param int $totalErrors Total number of errors found during the run. * @param int $totalWarnings Total number of warnings found during the run. * @param int $totalFixable Total number of problems that can be fixed. * @param boolean $showSources Show sources? * @param int $width Maximum allowed line width. * @param boolean $toScreen Is the report being printed to screen? * * @return void */ public function generate( $cachedData, $totalFiles, $totalErrors, $totalWarnings, $totalFixable, $showSources=false, $width=80, $toScreen=true ) { echo $cachedData; echo "Fixed $totalFiles files".PHP_EOL; }//end generate() }//end class
#include "test/core/util/test_config.h" #include "test/cpp/util/cli_call.h" #include "test/cpp/util/echo.grpc.pb.h" #include <grpc++/channel_arguments.h> #include <grpc++/channel_interface.h> #include <grpc++/client_context.h> #include <grpc++/create_channel.h> #include <grpc++/credentials.h> #include <grpc++/<API key>.h> #include <grpc++/server.h> #include <grpc++/server_builder.h> #include <grpc++/server_context.h> #include <grpc++/server_credentials.h> #include <grpc++/status.h> #include "test/core/util/port.h" #include <gtest/gtest.h> #include <grpc/grpc.h> using grpc::cpp::test::util::EchoRequest; using grpc::cpp::test::util::EchoResponse; namespace grpc { namespace testing { class TestServiceImpl : public ::grpc::cpp::test::util::TestService::Service { public: Status Echo(ServerContext* context, const EchoRequest* request, EchoResponse* response) GRPC_OVERRIDE { if (!context->client_metadata().empty()) { for (std::multimap<grpc::string, grpc::string>::const_iterator iter = context->client_metadata().begin(); iter != context->client_metadata().end(); ++iter) { context->AddInitialMetadata(iter->first, iter->second); } } context->AddTrailingMetadata("trailing_key", "trailing_value"); response->set_message(request->message()); return Status::OK; } }; class CliCallTest : public ::testing::Test { protected: CliCallTest() : thread_pool_(2) {} void SetUp() GRPC_OVERRIDE { int port = <API key>(); server_address_ << "localhost:" << port; // Setup server ServerBuilder builder; builder.AddListeningPort(server_address_.str(), <API key>()); builder.RegisterService(&service_); builder.SetThreadPool(&thread_pool_); server_ = builder.BuildAndStart(); } void TearDown() GRPC_OVERRIDE { server_->Shutdown(); } void ResetStub() { channel_ = CreateChannel(server_address_.str(), InsecureCredentials(), ChannelArguments()); stub_ = std::move(grpc::cpp::test::util::TestService::NewStub(channel_)); } std::shared_ptr<ChannelInterface> channel_; std::unique_ptr<grpc::cpp::test::util::TestService::Stub> stub_; std::unique_ptr<Server> server_; std::ostringstream server_address_; TestServiceImpl service_; FixedSizeThreadPool thread_pool_; }; // Send a rpc with a normal stub and then a CliCall. Verify they match. TEST_F(CliCallTest, SimpleRpc) { ResetStub(); // Normal stub. EchoRequest request; EchoResponse response; request.set_message("Hello"); ClientContext context; context.AddMetadata("key1", "val1"); Status s = stub_->Echo(&context, request, &response); EXPECT_EQ(response.message(), request.message()); EXPECT_TRUE(s.ok()); const grpc::string kMethod("/grpc.cpp.test.util.TestService/Echo"); grpc::string request_bin, response_bin, <API key>; EXPECT_TRUE(request.SerializeToString(&request_bin)); EXPECT_TRUE(response.SerializeToString(&<API key>)); std::multimap<grpc::string, grpc::string> client_metadata, <API key>, <API key>; client_metadata.insert(std::pair<grpc::string, grpc::string>("key1", "val1")); Status s2 = CliCall::Call(channel_, kMethod, request_bin, &response_bin, client_metadata, &<API key>, &<API key>); EXPECT_TRUE(s2.ok()); EXPECT_EQ(<API key>, response_bin); EXPECT_EQ(context.<API key>(), <API key>); EXPECT_EQ(context.<API key>(), <API key>); } } // namespace testing } // namespace grpc int main(int argc, char** argv) { grpc_test_init(argc, argv); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
require File.expand_path(File.join(File.dirname(__FILE__), 'helper')) # reasons.rb # test decision reasons def explain solver solver.each_to_install { |s| puts "Install } solver.each_to_remove { |s| puts "Remove } solver.each_decision do |d| puts "Decision: #{d.solvable}: #{d.op_s} (#{d.ruleinfo})" end end class ReasonsTest < Test::Unit::TestCase def setup @pool = Satsolver::Pool.new @pool.arch = "i686" @repo = @pool.create_repo( 'test' ) end def <API key> solv1 = @repo.create_solvable( 'A', '1.0-0' ) solv2 = @repo.create_solvable( 'B', '1.0-0' ) rel = @pool.create_relation( "A", Satsolver::REL_EQ, "1.0-0" ) solv2.requires << rel puts "\n---\nB-1.0-0 requires A = 1.0-0" request = @pool.create_request request.install( solv2 ) @pool.prepare solver = @pool.create_solver( ) solver.solve( request ) explain solver end def <API key> solv1 = @repo.create_solvable( 'A', '1.0-0' ) solv2 = @repo.create_solvable( 'B', '1.0-0' ) rel = @pool.create_relation( "a", Satsolver::REL_EQ, "42" ) solv1.provides << rel solv2.requires << rel puts "\n---\nB-1.0-0 requires a = 42, provided by A-1.0-0" request = @pool.create_request request.install( solv2 ) @pool.prepare solver = @pool.create_solver( ) solver.solve( request ) explain solver end def <API key> solv1 = @repo.create_solvable( 'A', '1.0-0' ) solv2 = @repo.create_solvable( 'B', '1.0-0' ) solv3 = @repo.create_solvable( 'C', '1.0-0' ) rel = @pool.create_relation( "a", Satsolver::REL_EQ, "42" ) solv1.provides << rel solv2.requires << rel solv3.provides << rel puts "\n---\nB-1.0-0 requires a = 42, provided by A-1.0-0 and C-1.0-0" request = @pool.create_request request.install( solv2 ) @pool.prepare solver = @pool.create_solver( ) solver.solve( request ) explain solver end def test_install_bash solvpath = Pathname( File.dirname( __FILE__ ) ) + Pathname( "../../testdata" ) + "os11-beta5-i386.solv" repo = @pool.add_solv( solvpath ) repo.name = "beta5" puts "\n---\nInstalling bash" request = @pool.create_request request.install( "bash" ) @pool.prepare solver = @pool.create_solver( ) # solver.<API key> = true solver.solve( request ) explain solver end def test_conflicts installed = @pool.create_repo( 'installed' ) solv1 = installed.create_solvable( 'A', '1.0-0' ) solv2 = @repo.create_solvable( 'B', '1.0-0' ) @pool.installed = installed rel = @pool.create_relation( "a", Satsolver::REL_EQ, "42" ) assert rel solv1.provides << rel solv2.conflicts << rel puts "\n---\nB-1.0-0 conflicts a = 42, provided by installed A-1.0-0" request = @pool.create_request request.install( solv2 ) @pool.prepare solver = @pool.create_solver( ) solver.solve( request ) explain solver end def test_obsoletes installed = @pool.create_repo( 'installed' ) solv1 = installed.create_solvable( 'A', '1.0-0' ) solv2 = @repo.create_solvable( 'B', '1.0-0' ) @pool.installed = installed rel = @pool.create_relation( "A" ) solv2.obsoletes << rel puts "\n---\n#{solv2} obsoletes #{rel}, provided by installed #{solv1}" request = @pool.create_request request.install( solv2 ) @pool.prepare solver = @pool.create_solver( ) solver.solve( request ) explain solver end def <API key> solv1 = @repo.create_solvable( 'A', '1.0-0' ) solv2 = @repo.create_solvable( 'B', '1.0-0' ) rel = @pool.create_relation( "a", Satsolver::REL_EQ, "42" ) solv1.provides << rel solv2.requires << rel puts "\n---\nB-1.0-0 requires a = 42, provided by A-1.0-0. Removal of A should remove B" assert solv2.requires.size == 1 @pool.installed = @repo request = @pool.create_request request.remove( solv1 ) @pool.prepare solver = @pool.create_solver( ) solver.allow_uninstall = true solver.solve( request ) explain solver end end
local mkstr = ZO_CreateStringId local SI = BankerShutUp.SI -- Crafting mkstr(SI.SHUTUP_BLACKSMITH, "Halt die Klappe Gendry") mkstr(SI.SHUTUP_ALCH, "Halt die Klappe Heisenberg") mkstr(SI.SHUTUP_CLOTH, "Halt die Klappe Rüstung") mkstr(SI.SHUTUP_WOODY, "Halt die Klappe Waldig") mkstr(SI.SHUTUP_COOK, "Don't kiss the Köchin") mkstr(SI.SHUTUP_ENCHANTER, "Halt die Klappe Melisandre") -- Banker mkstr(SI.SHUTUP_BANKER, "Halt die Klappe Lannister") -- Infos mkstr(SI.<API key>, "Stumm Schmied") mkstr(SI.SHUTUP_ALCH_INFO, "Stumm Alchimist") mkstr(SI.SHUTUP_CLOTH_INFO, "Stumm Leichte Rüstung") mkstr(SI.SHUTUP_WOODY_INFO, "Stumm Holzbearbeitung") mkstr(SI.SHUTUP_COOK_INFO, "Stumm Kochen") mkstr(SI.<API key>, "Stumm Zauberhaft") -- Banker infos mkstr(SI.SHUTUP_BANKER_INFO, "Stumm Bankers") mkstr(SI.BUGFOUND, "Einen Fehler gefunden??!") mkstr(SI.MISSINGSTUFF, "Dies ermöglicht Protokolle für Dinge, die ich vermisst! \ n Bitte melden Sie, was in den Protokollen die @ awesomebilly gedruckt") mkstr(SI.ONLYGOOD, "Die einzige gute Spenden sind\n ") mkstr(SI.DONATIONS, "Gold Spenden ;) ") mkstr(SI.BADASS, "@Awesomebilly (NA)") --Various App mkstr(SI.APPNAME, "Luminary: ") mkstr(SI.BETA, " Dieses Addon ist in BETA. \n\n") mkstr(SI.BUGSQUASHER, " \n\nWenn Sie einen Fehler finden, aktivieren Sie bitte die Protokollierung über ") mkstr(SI.REPORTTHATSHITYO, "\n\n\n Bitte melden Sie alle Bugs finden.\n Translations by @wookiefrag and Google Translate") -- Volume control --volume stuff mkstr(SI.HOWMUCHTOMUTEVOLUME, "Prozentsatz niedriger Lautstärke") mkstr(SI.<API key>, "Von 0 bis 100 ist. Standard ist 0%") mkstr(SI.<API key>, "Prozentsatz für Lautstärke-und Rückflug") mkstr(SI.<API key>, "Von 0 bis 100 ist. Standard ist 80%")
{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE NoImplicitPrelude #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE OverloadedStrings #-} module Test.Foundation.String ( testStringRefs ) where -- import Control.Monad (replicateM) import Foundation import Foundation.Check import Foundation.String import Foundation.Primitive (AsciiString) import Test.Data.List import Test.Checks.Property.Collection --import Test.Foundation.Encoding testStringRefs :: Test testStringRefs = Group "String" [ Group "UTF8" $ [ <API key> "String" (Proxy :: Proxy String) arbitrary ] <> testStringCases {- <> [ testGroup "Encoding Sample0" (testEncodings sample0) , testGroup "Encoding Sample1" (testEncodings sample1) , testGroup "Encoding Sample2" (testEncodings sample2) ] -} , Group "ASCII" $ [ <API key> "AsciiString" (Proxy :: Proxy AsciiString) arbitrary ] -- <> <API key> ] testStringCases :: [Test] testStringCases = [ Group "Validation" [ Property "fromBytes . toBytes == valid" $ \l -> let s = fromList l in (fromBytes UTF8 $ toBytes UTF8 s) === (s, Nothing, mempty) , Property "Streaming" $ \(l, randomInts) -> let wholeS = fromList l wholeBA = toBytes UTF8 wholeS reconstruct (prevBa, errs, acc) ba = let ba' = prevBa `mappend` ba (s, merr, nextBa) = fromBytes UTF8 ba' in (nextBa, merr : errs, s : acc) (remainingBa, allErrs, chunkS) = foldl' reconstruct (mempty, [], []) $ chunks randomInts wholeBA in (catMaybes allErrs === []) `propertyAnd` (remainingBa === mempty) `propertyAnd` (mconcat (reverse chunkS) === wholeS) ] , Group "ModifiedUTF8" [ <API key> "The foundation Serie" "" "" , <API key> "has null bytes" "let's\0 do \0 it" "let's\0 do \0 it" , <API key> "Vincent's special" "abc\0, \0, " "abc\0, \0, " , <API key> "Long string" "this is only a simple string but quite longer than the 64 bytes used in the modified UTF8 parser" "this is only a simple string but quite longer than the 64 bytes used in the modified UTF8 parser" ] , Group "CaseMapping" [ Property "upper . upper == upper" $ \l -> let s = fromList l in upper (upper s) === upper s , CheckPlan "a should capitalize to A" $ validate "a" $ upper "a" == "A" , CheckPlan "b should capitalize to B" $ validate "b" $ upper "b" == "B" , CheckPlan "B should not capitalize" $ validate "B" $ upper "B" == "B" , CheckPlan "é should capitalize to É" $ validate "é" $ upper "é" == "É" , CheckPlan "ß should capitalize to SS" $ validate "ß" $ upper "ß" == "SS" , CheckPlan " should capitalize to FFL" $ validate "" $ upper "" == "<API key>" , CheckPlan "0a should capitalize to 0A" $ validate "0a" $ upper "\0a" == "\0A" , CheckPlan "0a should capitalize to 0A" $ validate "0a" $ upper "a\0a" == "A\0A" , CheckPlan "0a should capitalize to 0A" $ validate "0a" $ upper "\0\0" == "\0\0" , CheckPlan "00 should not capitalize" $ validate "00" $ upper "00" == "00" ] {- , testGroup "replace" [ testCase "indices '' 'bb' should raise an error" $ do res <- try (evaluate $ indices "" "bb") case res of (Left (_ :: SomeException)) -> return () Right _ -> fail "Expecting an error to be thrown, but it did not." , testCase "indices 'aa' 'bb' == []" $ do indices "aa" "bb" @?= [] , testCase "indices 'aa' '<API key>' is correct" $ do indices "aa" "<API key>" @?= [Offset 0,Offset 13,Offset 15] , testCase "indices 'aa' 'aaccaadd' is correct" $ do indices "aa" "aaccaadd" @?= [Offset 0,Offset 4] , testCase "replace '' 'bb' 'foo' raises an error" $ do (res :: Either SomeException String) <- try (evaluate $ replace "" "bb" "foo") assertBool "Expecting an error to be thrown, but it did not." (isLeft res) , testCase "replace 'aa' 'bb' '' == ''" $ do replace "aa" "bb" "" @?= "" , testCase "replace 'aa' '' 'aabbcc' == 'aabbcc'" $ do replace "aa" "" "aabbcc" @?= "bbcc" , testCase "replace 'aa' 'bb' 'aa' == 'bb'" $ do replace "aa" "bb" "aa" @?= "bb" , testCase "replace 'aa' 'bb' 'aabb' == 'bbbb'" $ do replace "aa" "bb" "aabb" @?= "bbbb" , testCase "replace 'aa' 'bb' 'aaccaadd' == 'bbccbbdd'" $ do replace "aa" "bb" "aaccaadd" @?= "bbccbbdd" , testCase "replace 'aa' 'LongLong' 'aaccaadd' == '<API key>'" $ do replace "aa" "LongLong" "aaccaadd" @?= "<API key>" , testCase "replace 'aa' 'bb' '<API key>' == '<API key>'" $ do replace "aa" "bb" "<API key>" @?= "<API key>" , testCase "replace 'å' 'ä' 'ååññ' == 'ääññ'" $ do replace "å" "ä" "ååññ" @?= "ääññ" ] , testGroup "Cases" [ testGroup "Invalid-UTF8" [ testCase "ff" $ expectFromBytesErr UTF8 ("", Just InvalidHeader, 0) (fromList [0xff]) , testCase "80" $ expectFromBytesErr UTF8 ("", Just InvalidHeader, 0) (fromList [0x80]) , testCase "E2 82 0C" $ expectFromBytesErr UTF8 ("", Just InvalidContinuation, 0) (fromList [0xE2,0x82,0x0c]) , testCase "30 31 E2 82 0C" $ expectFromBytesErr UTF8 ("01", Just InvalidContinuation, 2) (fromList [0x30,0x31,0xE2,0x82,0x0c]) ] ] , testGroup "Lines" [ testCase "Hello<LF>Foundation" $ (breakLine "Hello\nFoundation" @?= Right ("Hello", "Foundation")) , testCase "Hello<CRLF>Foundation" $ (breakLine "Hello\r\nFoundation" @?= Right ("Hello", "Foundation")) , testCase "Hello<LF>Foundation" $ (breakLine (drop 5 "Hello\nFoundation\nSomething") @?= Right ("", "Foundation\nSomething")) , testCase "Hello<CR>" $ (breakLine "Hello\r" @?= Left True) , testCase "CR" $ (breakLine "\r" @?= Left True) , testCase "LF" $ (breakLine "\n" @?= Right ("", "")) , testCase "empty" $ (breakLine "" @?= Left False) ] -} ] {- <API key> :: [Test] <API key> = [ Group "Validation-ASCII7" [ Property "fromBytes . toBytes == valid" $ \l -> let s = fromList . fromLStringASCII $ l in (fromBytes ASCII7 $ toBytes ASCII7 s) === (s, Nothing, mempty) , Property "Streaming" $ \(l, randomInts) -> let wholeS = fromList . fromLStringASCII $ l wholeBA = toBytes ASCII7 wholeS reconstruct (prevBa, errs, acc) ba = let ba' = prevBa `mappend` ba (s, merr, nextBa) = fromBytes ASCII7 ba' in (nextBa, merr : errs, s : acc) (remainingBa, allErrs, chunkS) = foldl' reconstruct (mempty, [], []) $ chunks randomInts wholeBA in (catMaybes allErrs === []) .&&. (remainingBa === mempty) .&&. (mconcat (reverse chunkS) === wholeS) ] , Group "Cases" [ Group "Invalid-ASCII7" [ testCase "ff" $ expectFromBytesErr ASCII7 ("", Just BuildingFailure, 0) (fromList [0xff]) ] ] ] expectFromBytesErr :: Encoding -> ([Char], Maybe ValidationFailure, CountOf Word8) -> UArray Word8 -> IO () expectFromBytesErr enc (expectedString,expectedErr,positionErr) ba = do let x = fromBytes enc ba (s', merr, ba') = x assertEqual "error" expectedErr merr assertEqual "remaining" (drop positionErr ba) ba' assertEqual "string" expectedString (toList s') -} <API key> :: String -> [Char] -> String -> Test <API key> name chars str = Property name $ chars === toList str chunks :: Sequential c => RandomList -> c -> [c] chunks (RandomList randomInts) = loop (randomInts <> [1..]) where loop rx c | null c = [] | otherwise = case rx of r:rs -> let (c1,c2) = splitAt (CountOf r) c in c1 : loop rs c2 [] -> loop randomInts c
// <API key>.h #import "<API key>.h" @interface <API key> : <API key> @end
<h1>Event Pages</h1> <p> Event pages are very similar to <a href="background_pages.html">background pages</a>, with one important difference: event pages are loaded only when they are needed. When the event page is not actively doing something, it is unloaded, freeing memory and other system resources. </p> {{?is_apps}} <p> Chrome Apps always use event pages instead of background pages. It is not possible for a Chrome App to have a persistent background page. </p> {{/is_apps}} <p> Event pages are available in the stable channel as of Chrome 22, and the performance advantages are significant, especially on low-power devices. Please prefer them to persistent background pages whenever possible for new development and begin <a href="#transition">migrating existing background pages</a> to this new model. </p> <h2 id="manifest">Manifest</h2> <p> Register your event page in the <a href="manifest.html">extension manifest</a>: </p> {{^is_apps}} <pre data-filename="manifest.json"> { "name": "My extension", <b>"background": { "scripts": ["eventPage.js"], "persistent": false }</b>, } </pre> <p> Notice that without the "persistent" key, you have a regular background page. Persistence is what differentiates an event page from a background page. </p> {{/is_apps}} {{?is_apps}} <pre data-filename="manifest.json"> { "name": "My app", "app": { <b>"background": { "scripts": ["eventPage.js"] }</b> } } </pre> {{/is_apps}} <h2 id="lifetime">Lifetime</h2> <p> The event page is loaded when it is "needed", and unloaded when it goes idle again. Here are some examples of things that will cause the event page to load: </p> <ul> <li>The app or extension is first installed or is updated to a new version (in order to <a href="#registration">register for events</a>). <li>The event page was listening for an event, and the event is dispatched. <li>A content script or other extension <a href="messaging.html">sends a message.</a> <li>Another view in the extension (for example, a popup) calls <code>$(ref:runtime.getBackgroundPage)</code>. </ul> <p> Once it has been loaded, the event page will stay running as long as it is active (for example, calling an extension API or issuing a network request). Additionally, the event page will not unload until all visible views (for example, popup windows) are closed and all message ports are closed. Note that opening a view does not cause the event page to load, but only prevents it from closing once loaded. </p> <p> Make sure your event page closes as soon as the event that opened it is processed. You can observe the lifetime of your event page by opening Chrome's task manager. You can see when your event page loads and unloads by observing when an entry for your extension appears in the list of processes. </p> <p> Once the event page has been idle a short time (a few seconds), the <code>$(ref:runtime.onSuspend)</code> event is dispatched. The event page has a few more seconds to handle this event before it is forcibly unloaded. If during this time an event occurs which would normally cause the event page to be loaded, the suspend is canceled and the <code>$(ref:runtime.onSuspendCanceled)</code> event is dispatched. </p> <h2 id="registration">Event registration</h2> <p> Chrome keeps track of events that an app or extension has added listeners for. When it dispatches such an event, the event page is loaded. Conversely, if the app or extension removes all of its listeners for an event by calling <code>removeListener</code>, Chrome will no longer load its event page for that event. </p> <p> Because the listeners themselves only exist in the context of the event page, you must use <code>addListener</code> each time the event page loads; only doing so at <code>$(ref:runtime.onInstalled)</code> by itself is insufficient. </p> <p> For an example of event registration in action, you can view the <a href="http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/examples/extensions/gmail/">Google Mail Checker</a> extension. </p> <h2 id="transition">Convert background page to event page</h2> <p> Follow this checklist to convert your extension's (persistent) background page to an event page. <ol> <li>Add <code>"persistent": false</code> to your manifest as shown above. <li>If your extension uses <code>window.setTimeout()</code> or <code>window.setInterval()</code>, switch to using the <a href="alarms.html">alarms API</a> instead. DOM-based timers won't be honored if the event page shuts down. <li>Similarly, other asynchronous HTML5 APIs like notifications and geolocation will not complete if the event page shuts down. Instead, use equivalent extension APIs, like <a href="notifications.html">notifications</a>. <li>If your extension uses, <code>$(ref:extension.getBackgroundPage)</code>, switch to <code>$(ref:runtime.getBackgroundPage)</code> instead. The newer method is asynchronous so that it can start the event page if necessary before returning it. </ol> </p> <h2 id="best-practices">Best practices when using event pages</h2> <p> Keep these tips in mind when using event pages to avoid common subtle pitfalls. <ol> <li>Register to receive any events your extension is interested in each time the event page is loaded. The event page will be loaded once for each new version of your extension. After that it will only be loaded to deliver events you have registered for. This generally means that your event listeners should be added at the top level scope of the event page, otherwise they may not be available when the event page reloads. <li>If you need to do some initialization when your extension is installed or upgraded, listen to the <code>$(ref:runtime.onInstalled)</code> event. This is a good place to register for <a href="<API key>.html"><API key></a> rules, <a href="contextMenus.html">contextMenu</a> entries, and other such one-time initialization. <li>If you need to keep runtime state in memory throughout a browser session, use the <a href="storage.html">storage API</a> or IndexedDB. Since the event page does not stay loaded for long, you can no longer rely on global variables for runtime state. <li>Use <a href="events.html#filtered">event filters</a> to restrict your event notifications to the cases you care about. For example, if you listen to the <code>$(ref:tabs.onUpdated)</code> event, try using the <code>$(ref:webNavigation.onCompleted)</code> event with filters instead (the tabs API does not support filters). That way, your event page will only be loaded for events that interest you. <li>Listen to the <code>$(ref:runtime.onSuspend)</code> event if you need to do last second cleanup before your event page is shut down. However, we recommend persisting periodically instead. That way if your extension crashes without receiving <code>onSuspend</code>, no data will typically be lost. <li>If you're using <a href="messaging.html">message passing</a>, be sure to close unused message ports. The event page will not shut down until all message ports are closed. <li>If you're using the <a href="contextMenus.html">context menus</a> API, pass a string <code>id</code> parameter to <code>$(ref:contextMenus.create)</code>, and use the <code>$(ref:contextMenus.onClicked)</code> callback instead of an <code>onclick</code> parameter to <code>$(ref:contextMenus.create)</code>. <li>Remember to test that your event page works properly when it is unloaded and then reloaded, which only happens after several seconds of inactivity. Common mistakes include doing unnecessary work at page load time (when it should only be done when the extension is installed); setting an alarm at page load time (which resets any previous alarm); or not adding event listeners at page load time. </ol> </p>
# 'make depend' uses makedepend to automatically generate dependencies # (dependencies are added to end of Makefile) # 'make' build executable file 'mycc' # 'make clean' removes all .o and executable files SRCDIR=. OBJDIR=. # define custom target name TARGNAME=lcmmidi-plane2 # define the C source files SRCS = lcmmidi_to_plane2.c # define the C object files # This uses Suffix Replacement within a macro: OBJS = $(SRCS:.c=.o) OBJ = $(patsubst %,$(OBJDIR)/%,$(OBJS)) SRC = $(patsubst %,$(SRCDIR)/%,$(SRCS)) LCMDIR=../../LCM/ LCMLIB=../../LCM/lib/libtypes.a # define the C compiler to use #CC = gcc # define any compile-time flags CFLAGS=`pkg-config --cflags lcm` #-Wall -ftree-vectorize -mfloat-abi=softfp -fomit-frame-pointer -funroll-loops -fno-math-errno -ffinite-math-only -fno-signed-zeros -ffast-math LDFLAGS=`pkg-config --libs lcm glib-2.0 gthread-2.0` # define any directories containing header files other than /usr/include INCLUDES = -I/$(LCMDIR) # define library paths in addition to /usr/lib # define any libraries to link into executable: LIBS = -lm -lrt #define the main target MAIN = $(TARGNAME) # The following part of the makefile is generic; it can be used to # build any executable just by changing the definitions above and by # deleting dependencies appended to the file from 'make depend' .PHONY: depend clean all: $(MAIN) $(MAIN): $(OBJ) $(LCMLIB) $(CC) $(CFLAGS) $(INCLUDES) -o $(MAIN) $(OBJ) $(LCMLIB) $(LDFLAGS) $(LIBS) #$(MATLIB) $(TLIB) # this is a suffix replacement rule for building .o's from .c's $(OBJDIR)/%.o: $(SRCDIR)/%.c $(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@ clean: $(RM) *~ $(MAIN) cd $(OBJDIR); $(RM) *.o *~
shared :tempfile_unlink do |cmd| describe "Tempfile##{cmd}" do before(:each) do @tempfile = Tempfile.new("specs") end after(:each) do @tempfile.close end ruby_bug "", "1.8.6" do it "unlinks self" do File.should_receive(:unlink).with(@tempfile.path) @tempfile.send(cmd) end end end end
{-# LANGUAGE OverloadedStrings #-} module Web.Server ( runServer , formalizerApp -- Exposed for testing. ) where import Network.Wai.Middleware.RequestLogger import Network.Wai.Middleware.Static import Web.Actions as Action import Web.Spock.Safe import Web.Types -- Run the spock app. runServer :: AppConfig -> IO () runServer conf = let port = cPort conf state = AppState (cPath conf) (cSMTP conf) spockCfg = defaultSpockCfg Nothing PCNoDatabase state in runSpock port $ spock spockCfg formalizerApp -- Path for static files like .js and .css. staticPath :: String staticPath = "web/static" -- Middlewares for application. appMiddleware :: FormalizeApp () appMiddleware = do middleware logStdout middleware . staticPolicy $ noDots >-> addBase staticPath -- Routes for application. appRoutes :: FormalizeApp () appRoutes = do get root Action.home post "/submit" Action.submit hookAny GET Action.notFound -- Join middlewares and routes to spock app. formalizerApp :: FormalizeApp () formalizerApp = appMiddleware >> appRoutes
// Generated by CoffeeScript 2.6.1 // # `nikita.ssh.keygen` // Generates keys for use by SSH protocol version 2. // ## Example // Force the generation of a key compatible with SSH2. For example in OSX Mojave, // the default export format is RFC4716. // const {$status} = await nikita.tools.ssh.keygen({ // bits: 2048, // comment: 'my@email.com', // target: './id_rsa', // key_format: 'PEM' // console.info(`Key was generated: ${$status}`) // ## Schema definitions var definitions, handler; definitions = { config: { type: 'object', properties: { 'bits': { type: 'number', default: 4096, description: `Specifies the number of bits in the key to create.` }, 'comment': { type: 'string', description: `Comment such as a name or email.` }, 'key_format': { type: 'string', description: `Specify a key format. The supported key formats are: \`RFC4716\` (RFC 4716/SSH2 public or private key), \`PKCS8\` (PEM PKCS8 public key) or \`PEM\` (PEM public key).` }, 'passphrase': { type: 'string', default: '', description: `Key passphrase, empty string for no passphrase.` }, 'target': { type: 'string', description: `Path of the generated private key.` }, 'type': { type: 'string', default: 'rsa', description: `Type of key to create.` } }, required: ['target'] } }; // ## Handler handler = async function({ config, tools: {path} }) { var ref; if (config.key_format && ((ref = config.key_format) !== 'RFC4716' && ref !== 'PKCS8' && ref !== 'PEM')) { throw Error(`Invalid Option: key_format must be one of RFC4716, PKCS8 or PEM, got ${JSON.stringify(config.key_format)}`); } await this.fs.mkdir({ target: `${path.dirname(config.target)}` }); return (await this.execute({ $unless_exists: `${config.target}`, command: [ 'ssh-keygen', "-q", // Silence `-t ${config.type}`, `-b ${config.bits}`, config.key_format ? `-m ${config.key_format}` : void 0, config.comment ? `-C '${config.comment.replace('\'', '\\\'')}'` : void 0, `-N '${config.passphrase.replace('\'', '\\\'')}'`, `-f ${config.target}` ].join(' ') })); }; // ## Exports module.exports = { handler: handler, metadata: { definitions: definitions } };
<?php /** * EN-Revision: 16.Jul.2013 */ return array( '' => array('plural_forms' => 'nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);'), // Zend\I18n\Validator\Alnum "Invalid type given. String, integer or float expected" => "Неправильний тип даних. Значення має бути рядком, цілим числом або числом з плаваючою комою", "The input contains characters which are non alphabetic and no digits" => "Значення містить символи, які не є літерами або цифрами", "The input is an empty string" => "Значення є порожнім рядком", // Zend\I18n\Validator\Alpha "Invalid type given. String expected" => "Неправильний тип даних. Значення має бути рядком", "The input contains non alphabetic characters" => "Значення містить символи, які не є літерами", "The input is an empty string" => "Значення є порожнім рядком", // Zend\I18n\Validator\DateTime "Invalid type given. String expected" => "Неправильний тип даних. Значення має бути рядком", "The input does not appear to be a valid datetime" => "Значення є некоректною датою", // Zend\I18n\Validator\Float "Invalid type given. String, integer or float expected" => "Неправильний тип даних. Значення має бути рядком, цілим числом або числом з плаваючою комою", "The input does not appear to be a float" => "Значення не є числом з плаваючою комою", // Zend\I18n\Validator\Int "Invalid type given. String or integer expected" => "Неправильний тип даних. Значення має бути рядком або цілим числом", "The input does not appear to be an integer" => "Значення не є цілим числом", // Zend\I18n\Validator\PhoneNumber "The input does not match a phone number format" => "Значення не відповідає формату телефонного номера", "The country provided is currently unsupported" => "Обрана країна наразі не підтримується", "Invalid type given. String expected" => "Неправильний тип даних. Значення має бути рядком", // Zend\I18n\Validator\PostCode "Invalid type given. String or integer expected" => "Неправильний тип даних. Значення має бути рядком або цілим числом", "The input does not appear to be a postal code" => "Значення не є поштовим індексом", "An exception has been raised while validating the input" => "Під час валідації значення згенеровано виняток", // Zend\Validator\Barcode "The input failed checksum validation" => "Значення не пройшло перевірку контрольної суми", "The input contains invalid characters" => "Значення містить неприпустимі символи", "The input should have a length of %length% characters" => "Значення має мати довжину в %length% символів", "Invalid type given. String expected" => "Неправильний тип даних, значення має бути рядком", // Zend\Validator\Between "The input is not between '%min%' and '%max%', inclusively" => "Значення лежить за межами діапазону '%min%' - '%max%' (включно)", "The input is not strictly between '%min%' and '%max%'" => "Значення лежить за межами діапазону '%min%' - '%max%' (виключно)", // Zend\Validator\Callback "The input is not valid" => "Значення є неправильним", "An exception has been raised within the callback" => "В зворотньому виклику згенеровано виняток", // Zend\Validator\CreditCard "The input seems to contain an invalid checksum" => "Значення має неправильну контрольну суму", "The input must contain only digits" => "Значення має містити тільки цифри", "Invalid type given. String expected" => "Неправильний тип даних. Значення має бути рядком", "The input contains an invalid amount of digits" => "Значення містить неприпустиму кількість цифр", "The input is not from an allowed institute" => "Значення не належить до дозволенних платіжних систем", "The input seems to be an invalid credit card number" => "Значення не є правильним номером банківської картки", "An exception has been raised while validating the input" => "Під час валідації значення згенеровано виняток", // Zend\Validator\Csrf "The form submitted did not originate from the expected site" => "Надіслана форма не походить з очікуваного сайту", // Zend\Validator\Date "Invalid type given. String, integer, array or DateTime expected" => "Неправильний тип даних. Значення має бути рядком, цілим числом, масивом або об'єктом Zend_Date", "The input does not appear to be a valid date" => "Значення не є коректною датою", "The input does not fit the date format '%format%'" => "Значення не відповідає формату дати '%format%'", // Zend\Validator\DateStep "The input is not a valid step" => "Значення не є коректним кроком", // Zend\Validator\Db\AbstractDb "No record matching the input was found" => "Не знайдено записів, що відповідають значенню", "A record matching the input was found" => "Знайдено запис, що відповідає значенню", // Zend\Validator\Digits "The input must contain only digits" => "Значення має містити тільки цифри", "The input is an empty string" => "Значення є порожнім рядком", "Invalid type given. String, integer or float expected" => "Неправильний тип даних. Значення має бути рядком, цілим числом або числом з плаваючою комою", // Zend\Validator\EmailAddress "Invalid type given. String expected" => "Неправильний тип даних. Значення має бути рядком", "The input is not a valid email address. Use the basic format local-part@hostname" => "Значення не є допустимою адресою електронної пошти. Використовуйте стандартний формат ім'я@домен", "'%hostname%' is not a valid hostname for the email address" => "'%hostname%' не є допустимим ім'ям хосту для адреси '%value%'", "'%hostname%' does not appear to have any valid MX or A records for the email address" => "'%hostname%' не має коректного MX- або A-запису про адресу електронної пошти", "'%hostname%' is not in a routable network segment. The email address should not be resolved from public network." => "'%hostname%' не є маршрутизованим сегментом мережі. Адреса електронної пошти не має бути отримана з публічної мережі.", "'%localPart%' can not be matched against dot-atom format" => "'%localPart%' не відповідає формату dot-atom", "'%localPart%' can not be matched against quoted-string format" => "'%localPart%' не відповідає формату quoted-string", "'%localPart%' is not a valid local part for the email address" => "'%localPart%' не є допустимим ім'ям для адреси електронної пошти", "The input exceeds the allowed length" => "Значення перевищує дозволену довжину", // Zend\Validator\Explode "Invalid type given" => "Неправильний тип даних", // Zend\Validator\File\Count "Too many files, maximum '%max%' are allowed but '%count%' are given" => "Занадто багато файлів, дозволено максимум '%max%', а отримано - '%count%'", "Too few files, minimum '%min%' are expected but '%count%' are given" => "Занадто мало файлів, дозволено мінімум '%min%', а отримано - '%count%'", // Zend\Validator\File\Crc32 "File does not match the given crc32 hashes" => "Файл не відповідає заданому crc32-хешу", "A crc32 hash could not be evaluated for the given file" => "Неможливо обчислити crc32-хеш для даного файлу", "File is not readable or does not exist" => "Файл не вдається прочитати або він не існує", // Zend\Validator\File\ExcludeExtension "File has an incorrect extension" => "Файл має недопустиме розширення", "File is not readable or does not exist" => "Файл не вдається прочитати або він не існує", // Zend\Validator\File\Exists "File does not exist" => "Файл не існує", // Zend\Validator\File\Extension "File has an incorrect extension" => "Файл має неправильне розширення", "File is not readable or does not exist" => "Файл не вдається прочитати або він не існує", // Zend\Validator\File\FilesSize "All files in sum should have a maximum size of '%max%' but '%size%' were detected" => "Загальний розмір файлів не повинен перевищувати '%max%', виявлено '%size%'", "All files in sum should have a minimum size of '%min%' but '%size%' were detected" => "Загальний розмір файлів має бути меншим за '%min%', виявлено '%size%'", "One or more files can not be read" => "Неможливо прочитати один чи декілька файлів", // Zend\Validator\File\Hash "File does not match the given hashes" => "Файл не відповідає вказаному хешу", "A hash could not be evaluated for the given file" => "Неможливо обчислити хеш для вказаного файлу", "File is not readable or does not exist" => "Файл не вдається прочитати або він не існує", // Zend\Validator\File\ImageSize "Maximum allowed width for image should be '%maxwidth%' but '%width%' detected" => "Максимальна допустима ширина для зображення складає '%maxwidth%', виявлено '%width%'", "Minimum expected width for image should be '%minwidth%' but '%width%' detected" => "Мінімальна очкувана ширина для зображення складає '%minwidth%', виявлено '%width%'", "Maximum allowed height for image should be '%maxheight%' but '%height%' detected" => "Максимальна допустима висота для зображення складає '%maxheight%', виявлено '%height%'", "Minimum expected height for image should be '%minheight%' but '%height%' detected" => "Мінімальна очікувана висота для зображення складає '%minheight%', виявлено '%height%'", "The size of image could not be detected" => "Неможливо визначити розмір зображення", "File is not readable or does not exist" => "Файл не вдається прочитати або він не існує", // Zend\Validator\File\IsCompressed "File is not compressed, '%type%' detected" => "Файл не є стиснутим, виявлено тип '%type%'", "The mimetype could not be detected from the file" => "Неможливо визначити MIME-тип із файлу", "File is not readable or does not exist" => "Файл не вдається прочитати або він не існує", // Zend\Validator\File\IsImage "File is no image, '%type%' detected" => "Файл не є зображенням, виявлено тип '%type%'", "The mimetype could not be detected from the file" => "Неможливо визначити MIME-тип із файлу", "File is not readable or does not exist" => "Файл не вдається прочитати або він не існує", // Zend\Validator\File\Md5 "File does not match the given md5 hashes" => "Файл не відповідає вказаному md5-хешу", "An md5 hash could not be evaluated for the given file" => "Неможливо обчислити md5-хеш для вказаного файлу", "File is not readable or does not exist" => "Файл не вдається прочитати або він не існує", // Zend\Validator\File\MimeType "File has an incorrect mimetype of '%type%'" => "Файл має неправильний MIME-тип '%type%'", "The mimetype could not be detected from the file" => "Неможливо визначити MIME-тип із файлу", "File is not readable or does not exist" => "Файл не вдається прочитати або він не існує", // Zend\Validator\File\NotExists "File exists" => "Файл вже існує", // Zend\Validator\File\Sha1 "File does not match the given sha1 hashes" => "Файл не відповідає sha1-хешу", "A sha1 hash could not be evaluated for the given file" => "Неможливо обчислити sha1-хеш для вказаного файлу", "File is not readable or does not exist" => "Файл не вдається прочитати або він не існує", // Zend\Validator\File\Size "Maximum allowed size for file is '%max%' but '%size%' detected" => "Максимальний дозволений розмір файлу складає '%max%', виявлено '%size%'", "Minimum expected size for file is '%min%' but '%size%' detected" => "Мінімальний очікуваний розмір файлу складає '%min%', виявлено '%size%'", "File is not readable or does not exist" => "Файл не вдається прочитати або він не існує", // Zend\Validator\File\Upload "File '%value%' exceeds the defined ini size" => "Розмір файлу '%value%' перевищує дозволений, вказаний в php.ini", "File '%value%' exceeds the defined form size" => "Розмір файлу '%value%' перевищує дозволений, вказаний у формі", "File '%value%' was only partially uploaded" => "Файл '%value%' було завантажено тільки частково", "File '%value%' was not uploaded" => "Файл '%value%' не було завантажений", "No temporary directory was found for file '%value%'" => "Не знайдено тимчасову теку для файлу '%value%'", "File '%value%' can't be written" => "Файл '%value%' не може бути записаний", "A PHP extension returned an error while uploading the file '%value%'" => "PHP-розширення повернуло помилку під час завантаження фалу '%value%'", "File '%value%' was illegally uploaded. This could be a possible attack" => "Файл '%value%' завантажено протиправно. Можлива атака", "File '%value%' was not found" => "Файл '%value%' не знайдено", "Unknown error while uploading file '%value%'" => "Під час завантаження файлу '%value%' виникла невідома помилка", // Zend\Validator\File\UploadFile "File exceeds the defined ini size" => "Розмір файлу перевищує дозволений, вказаний в php.ini", "File exceeds the defined form size" => "Розмір файлу перевищує дозволений, вказаний у формі", "File was only partially uploaded" => "Файл було завантажено тільки частково", "File was not uploaded" => "Файл '%value%' не було завантажено", "No temporary directory was found for file" => "Не знайдено тимчасову теку для файлу", "File can't be written" => "Файл '%value%' не може бути записаний", "A PHP extension returned an error while uploading the file" => "PHP-розширення повернуло помилку під час завантаження файлу", "File was illegally uploaded. This could be a possible attack" => "Файл завантажено протиправно. Можлива атака", "File was not found" => "Файл не знайдено", "Unknown error while uploading file" => "Під час завантаження файлу виникла невідома помилка", // Zend\Validator\File\WordCount "Too many words, maximum '%max%' are allowed but '%count%' were counted" => "Занадто багато слів: дозволено максимум '%max%', виявлено '%count%'", "Too few words, minimum '%min%' are expected but '%count%' were counted" => "Занадто мало слів: дозволено мінімум '%min%', виявлено '%count%'", "File is not readable or does not exist" => "Файл не вдається прочитати або він не існує", // Zend\Validator\GreaterThan "The input is not greater than '%min%'" => "Значення не є більшим за '%min%'", "The input is not greater or equal than '%min%'" => "Значення не дорівнює і не є більшим за '%min%'", // Zend\Validator\Hex "Invalid type given. String expected" => "Неправильний тип даних. Значення має бути рядком", "The input contains non-hexadecimal characters" => "Значення містить не тільки шістнадцяткові символи", // Zend\Validator\Hostname "The input appears to be a DNS hostname but the given punycode notation cannot be decoded" => "Значення є DNS-ім’ям хосту, але вказане значення не може бути перетворене в припустимий для DNS набір символів", "Invalid type given. String expected" => "Неправильний тип даних. Значення має бути рядком", "The input appears to be a DNS hostname but contains a dash in an invalid position" => "Значення є DNS-ім’ям хосту, але знак '-' знаходиться в неправильному місці", "The input does not match the expected structure for a DNS hostname" => "Значення не відповідає очікуваній структурі для DNS-імені хосту", "The input appears to be a DNS hostname but cannot match against hostname schema for TLD '%tld%'" => "Значення є DNS-ім’ям хосту, але воно не відповідає шаблону для доменних імен верхнього рівня '%tld%'", "The input does not appear to be a valid local network name" => "Значення не є коректним ім'ям локальної мережі", "The input does not appear to be a valid URI hostname" => "Значення не є коректним URI-ім'ям хосту", "The input appears to be an IP address, but IP addresses are not allowed" => "Значення є IP-адресою, але IP-адреси не дозволені", "The input appears to be a local network name but local network names are not allowed" => "Значення є ім’ям локальної мережі, але імена локальних мереж не дозволені", "The input appears to be a DNS hostname but cannot extract TLD part" => "Значення є DNS-ім’ям хосту, але не вдається визначити домен верхнього рівня", "The input appears to be a DNS hostname but cannot match TLD against known list" => "Значення є DNS-ім’ям хосту, але його не вдається співставити із значенням зі списку відомих доменів верхнього рівня", // Zend\Validator\Iban "Unknown country within the IBAN" => "Невідома IBAN-країна", "Countries outside the Single Euro Payments Area (SEPA) are not supported" => "Країни поза межами Єдиної Зони Платежів у Євро (SEPA) не підтримуються", "The input has a false IBAN format" => "Значення має неправильний IBAN-формат", "The input has failed the IBAN check" => "Значення не пройшло IBAN-перевірку", // Zend\Validator\Identical "The two given tokens do not match" => "Два вказаних значення не співпадають", "No token was provided to match against" => "Не вказано значення для перевірки на ідентичність", // Zend\Validator\InArray "The input was not found in the haystack" => "Значення не знайдено в списку допустимих значень", // Zend\Validator\Ip "Invalid type given. String expected" => "Неправильний тип даних. Значення має бути рядком", "The input does not appear to be a valid IP address" => "Значення не є коректною IP-адресою", // Zend\Validator\IsInstanceOf "The input is not an instance of '%className%'" => "Значення не є екземпляром '%className%'", // Zend\Validator\Isbn "Invalid type given. String or integer expected" => "Неправильний тип даних. Значення має бути рядком або цілим числом", "The input is not a valid ISBN number" => "Значення не є коректним номером ISBN", // Zend\Validator\LessThan "The input is not less than '%max%'" => "Значення не є меншим за '%max%'", "The input is not less or equal than '%max%'" => "Значення не дорівнює і не є меншим за '%max%'", // Zend\Validator\NotEmpty "Value is required and can't be empty" => "Значення обов'язкове і не може бути порожнім", "Invalid type given. String, integer, float, boolean or array expected" => "Неправильний тип даних. Значення має бути рядком, цілим числом, числом з плаваючою комою або масивом", // Zend\Validator\Regex "Invalid type given. String, integer or float expected" => "Неправильний тип даних. Значення має бути рядком, цілим числом або числом з плаваючою комою", "The input does not match against pattern '%pattern%'" => "Значення не відповідає шаблону '%pattern%'", "There was an internal error while using the pattern '%pattern%'" => "Під час використання шаблону '%pattern%' трапилася внутрішня помилка", // Zend\Validator\Sitemap\Changefreq "The input is not a valid sitemap changefreq" => "Значенння не є коректним для sitemap changefreq", "Invalid type given. String expected" => "Неправильний тип даних. Значення має бути рядком", // Zend\Validator\Sitemap\Lastmod "The input is not a valid sitemap lastmod" => "Значення не є коректним для sitemap lastmod", "Invalid type given. String expected" => "Неправильний тип даних. Значення має бути рядком", // Zend\Validator\Sitemap\Loc "The input is not a valid sitemap location" => "Значення не є коректним для sitemap location", "Invalid type given. String expected" => "Неправильний тип даних. Значення має бути рядком", // Zend\Validator\Sitemap\Priority "The input is not a valid sitemap priority" => "Значення не є коректним для sitemap priority", "Invalid type given. Numeric string, integer or float expected" => "Неправильний тип даних. Значення має бути числовим рядком, цілим числом або числом з плаваючою комою", // Zend\Validator\Step "Invalid value given. Scalar expected" => "Значення є некоректним. Очікується скалярна величина", "The input is not a valid step" => "Значення не є коректним кроком", // Zend\Validator\StringLength "Invalid type given. String expected" => "Неправильний тип даних. Значення має бути рядком", "The input is less than %min% characters long" => "Значення має довжину, меншу за %min% символів", "The input is more than %max% characters long" => "Значення має довжину, більшу за %max% символів", // Zend\Validator\Uri "Invalid type given. String expected" => "Неправильний тип даних. Значення має бути рядком", "The input does not appear to be a valid Uri" => "Значення не є коректним Uri", );
/* TEMPLATE GENERATED TESTCASE FILE Filename: <API key>.c Label Definition File: <API key>.label.xml Template File: sources-sink-54b.tmpl.c */ /* * @description * CWE: 121 Stack Based Buffer Overflow * BadSource: Initialize data as a large string * GoodSource: Initialize data as a small string * Sink: snprintf * BadSink : Copy data to string using snprintf * Flow Variant: 54 Data flow: data passed as an argument from one function through three others to a fifth; all five functions are in different source files * * */ #include "std_testcase.h" #include <wchar.h> #ifdef _WIN32 #define SNPRINTF _snprintf #else #define SNPRINTF snprintf #endif /* all the sinks are the same, we just want to know where the hit originated if a tool flags one */ #ifndef OMITBAD /* bad function declaration */ void <API key>(char * data); void <API key>(char * data) { <API key>(data); } #endif /* OMITBAD */ #ifndef OMITGOOD /* good function declaration */ void <API key>(char * data); /* goodG2B uses the GoodSource with the BadSink */ void <API key>(char * data) { <API key>(data); } #endif /* OMITGOOD */
// Use of this source code is governed by a BSD-style // This file contains the implementation of the 'gomovepkg' command // whose main function is in golang.org/x/tools/cmd/gomovepkg. package rename // TODO(matloob): // - think about what happens if the package is moving across version control systems. // - think about windows, which uses "\" as its directory separator. // - dot imports are not supported. Make sure it's clearly documented. import ( "bytes" "fmt" "go/ast" "go/build" "log" "os" "os/exec" "path" "path/filepath" "regexp" "runtime" "strconv" "strings" "sync" "text/template" "golang.org/x/tools/go/buildutil" "golang.org/x/tools/go/loader" "golang.org/x/tools/refactor/importgraph" ) // Move, given a package path and a destination package path, will try // to move the given package to the new path. The Move function will // first check for any conflicts preventing the move, such as a // package already existing at the destination package path. If the // move can proceed, it builds an import graph to find all imports of // the packages whose paths need to be renamed. This includes uses of // the subpackages of the package to be moved as those packages will // also need to be moved. It then renames all imports to point to the // new paths, and then moves the packages to their new paths. func Move(ctxt *build.Context, from, to, moveTmpl string) error { srcDir, err := srcDir(ctxt, from) if err != nil { return err } // This should be the only place in the program that constructs // file paths. // TODO(matloob): test on Microsoft Windows. fromDir := buildutil.JoinPath(ctxt, srcDir, filepath.FromSlash(from)) toDir := buildutil.JoinPath(ctxt, srcDir, filepath.FromSlash(to)) toParent := filepath.Dir(toDir) if !buildutil.IsDir(ctxt, toParent) { return fmt.Errorf("parent directory does not exist for path %s", toDir) } // Build the import graph and figure out which packages to update. fwd, rev, errors := importgraph.Build(ctxt) if len(errors) > 0 { fmt.Fprintf(os.Stderr, "While scanning Go workspace:\n") for path, err := range errors { fmt.Fprintf(os.Stderr, "Package %q: %s.\n", path, err) } return fmt.Errorf("failed to construct import graph") } // statements need updating. affectedPackages := map[string]bool{from: true} destinations := map[string]string{} // maps old dir to new dir for pkg := range subpackages(ctxt, srcDir, from) { for r := range rev[pkg] { affectedPackages[r] = true } destinations[pkg] = strings.Replace(pkg, // Ensure directories have a trailing "/". filepath.Join(from, ""), filepath.Join(to, ""), 1) } // Load all the affected packages. iprog, err := loadProgram(ctxt, affectedPackages) if err != nil { return err } // Prepare the move command, if one was supplied. var cmd string if moveTmpl != "" { if cmd, err = moveCmd(moveTmpl, fromDir, toDir); err != nil { return err } } m := mover{ ctxt: ctxt, fwd: fwd, rev: rev, iprog: iprog, from: from, to: to, fromDir: fromDir, toDir: toDir, affectedPackages: affectedPackages, destinations: destinations, cmd: cmd, } if err := m.checkValid(); err != nil { return err } m.move() return nil } // srcDir returns the absolute path of the srcdir containing pkg. func srcDir(ctxt *build.Context, pkg string) (string, error) { for _, srcDir := range ctxt.SrcDirs() { path := buildutil.JoinPath(ctxt, srcDir, pkg) if buildutil.IsDir(ctxt, path) { return srcDir, nil } } return "", fmt.Errorf("src dir not found for package: %s", pkg) } // subpackages returns the set of packages in the given srcDir whose // import paths start with dir. func subpackages(ctxt *build.Context, srcDir string, dir string) map[string]bool { var mu sync.Mutex subs := map[string]bool{dir: true} // Find all packages under srcDir whose import paths start with dir. buildutil.ForEachPackage(ctxt, func(pkg string, err error) { if err != nil { log.Fatalf("unexpected error in ForEackPackage: %v", err) } if !strings.HasPrefix(pkg, path.Join(dir, "")) { return } p, err := ctxt.Import(pkg, "", build.FindOnly) if err != nil { log.Fatalf("unexpected: package %s can not be located by build context: %s", pkg, err) } if p.SrcRoot == "" { log.Fatalf("unexpected: could not determine srcDir for package %s: %s", pkg, err) } if p.SrcRoot != srcDir { return } mu.Lock() subs[pkg] = true mu.Unlock() }) return subs } type mover struct { // iprog contains all packages whose contents need to be updated // with new package names or import paths. iprog *loader.Program ctxt *build.Context // fwd and rev are the forward and reverse import graphs fwd, rev importgraph.Graph // from and to are the source and destination import // paths. fromDir and toDir are the source and destination // absolute paths that package source files will be moved between. from, to, fromDir, toDir string // affectedPackages is the set of all packages whose contents need // to be updated to reflect new package names or import paths. affectedPackages map[string]bool // destinations maps each subpackage to be moved to its // destination path. destinations map[string]string // cmd, if not empty, will be executed to move fromDir to toDir. cmd string } func (m *mover) checkValid() error { const prefix = "invalid move destination" match, err := regexp.MatchString("^[_\\pL][_\\pL\\p{Nd}]*$", path.Base(m.to)) if err != nil { panic("regexp.MatchString failed") } if !match { return fmt.Errorf("%s: %s; gomvpkg does not support move destinations "+ "whose base names are not valid go identifiers", prefix, m.to) } if buildutil.FileExists(m.ctxt, m.toDir) { return fmt.Errorf("%s: %s conflicts with file %s", prefix, m.to, m.toDir) } if buildutil.IsDir(m.ctxt, m.toDir) { return fmt.Errorf("%s: %s conflicts with directory %s", prefix, m.to, m.toDir) } for _, toSubPkg := range m.destinations { if _, err := m.ctxt.Import(toSubPkg, "", build.FindOnly); err == nil { return fmt.Errorf("%s: %s; package or subpackage %s already exists", prefix, m.to, toSubPkg) } } return nil } // moveCmd produces the version control move command used to move fromDir to toDir by // executing the given template. func moveCmd(moveTmpl, fromDir, toDir string) (string, error) { tmpl, err := template.New("movecmd").Parse(moveTmpl) if err != nil { return "", err } var buf bytes.Buffer err = tmpl.Execute(&buf, struct { Src string Dst string }{fromDir, toDir}) return buf.String(), err } func (m *mover) move() error { filesToUpdate := make(map[*ast.File]bool) // Change the moved package's "package" declaration to its new base name. pkg, ok := m.iprog.Imported[m.from] if !ok { log.Fatalf("unexpected: package %s is not in import map", m.from) } newName := filepath.Base(m.to) for _, f := range pkg.Files { f.Name.Name = newName // change package decl filesToUpdate[f] = true } // Update imports of that package to use the new import name. // itself will. for p := range m.rev[m.from] { _, err := importName( m.iprog, m.iprog.Imported[p], m.from, path.Base(m.from), newName) if err != nil { return err } } // For each affected package, rewrite all imports of the package to // use the new import path. for ap := range m.affectedPackages { if ap == m.from { continue } info, ok := m.iprog.Imported[ap] if !ok { log.Fatalf("unexpected: package %s is not in import map", ap) } for _, f := range info.Files { for _, imp := range f.Imports { importPath, _ := strconv.Unquote(imp.Path.Value) if newPath, ok := m.destinations[importPath]; ok { imp.Path.Value = strconv.Quote(newPath) oldName := path.Base(importPath) if imp.Name != nil { oldName = imp.Name.Name } newName := path.Base(newPath) if imp.Name == nil && oldName != newName { imp.Name = ast.NewIdent(oldName) } else if imp.Name == nil || imp.Name.Name == newName { imp.Name = nil } filesToUpdate[f] = true } } } } for f := range filesToUpdate { tokenFile := m.iprog.Fset.File(f.Pos()) rewriteFile(m.iprog.Fset, f, tokenFile.Name()) } // Move the directories. // If either the fromDir or toDir are contained under version control it is // the user's responsibility to provide a custom move command that updates // version control to reflect the move. // TODO(matloob): If the parent directory of toDir does not exist, create it. // For now, it's required that it does exist. if m.cmd != "" { // TODO(matloob): Verify that the windows and plan9 cases are correct. var cmd *exec.Cmd switch runtime.GOOS { case "windows": cmd = exec.Command("cmd", "/c", m.cmd) case "plan9": cmd = exec.Command("rc", "-c", m.cmd) default: cmd = exec.Command("sh", "-c", m.cmd) } cmd.Stderr = os.Stderr cmd.Stdout = os.Stdout if err := cmd.Run(); err != nil { return fmt.Errorf("version control system's move command failed: %v", err) } return nil } return moveDirectory(m.fromDir, m.toDir) } var moveDirectory = func(from, to string) error { return os.Rename(from, to) }
<?xml version="1.0" encoding="ascii"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "DTD/xhtml1-transitional.dtd"> <html xmlns="http: <head> <title>PyQt4.QtGui.QPainter.RenderHints</title> <link rel="stylesheet" href="epydoc.css" type="text/css" /> <script type="text/javascript" src="epydoc.js"></script> </head> <body bgcolor="white" text="black" link="blue" vlink="#204080" alink="#204080"> <table class="navbar" border="0" width="100%" cellpadding="0" bgcolor="#a0c0ff" cellspacing="0"> <tr valign="middle"> <!-- Home link --> <th>&nbsp;&nbsp;&nbsp;<a href="<API key>.html">Home</a>&nbsp;&nbsp;&nbsp;</th> <!-- Tree link --> <th>&nbsp;&nbsp;&nbsp;<a href="module-tree.html">Trees</a>&nbsp;&nbsp;&nbsp;</th> <!-- Index link --> <th>&nbsp;&nbsp;&nbsp;<a href="identifier-index.html">Indices</a>&nbsp;&nbsp;&nbsp;</th> <!-- Help link --> <th>&nbsp;&nbsp;&nbsp;<a href="help.html">Help</a>&nbsp;&nbsp;&nbsp;</th> <th class="navbar" width="100%"></th> </tr> </table> <table width="100%" cellpadding="0" cellspacing="0"> <tr valign="top"> <td width="100%"> <span class="breadcrumbs"> PyQt4 :: QtGui :: QPainter :: RenderHints :: Class&nbsp;RenderHints </span> </td> <td> <table cellpadding="0" cellspacing="0"> <!-- hide/show private --> <tr><td align="right"><span class="options">[<a href="javascript:void(0);" class="privatelink" onclick="toggle_private();">hide&nbsp;private</a>]</span></td></tr> <tr><td align="right"><span class="options" >[<a href="frames.html" target="_top">frames</a >]&nbsp;|&nbsp;<a href="PyQt4.QtGui.QPainter.RenderHints-class.html" target="_top">no&nbsp;frames</a>]</span></td></tr> </table> </td> </tr> </table> <h1 class="epydoc">Class RenderHints</h1><p class="nomargin-top"></p> <pre class="base-tree"> object | sip.simplewrapper | <strong class="uidshort">QPainter.RenderHints</strong> </pre> <hr /> <p>QPainter.RenderHints(QPainter.RenderHints) QPainter.RenderHints(int) QPainter.RenderHints()</p> <a name="<API key>"></a> <table class="summary" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr bgcolor="#70b0f0" class="table-header"> <td colspan="2" class="table-header"> <table border="0" cellpadding="0" cellspacing="0" width="100%"> <tr valign="top"> <td align="left"><span class="table-header">Instance Methods</span></td> <td align="right" valign="top" ><span class="options">[<a href="#<API key>" class="privatelink" onclick="toggle_private();" >hide private</a>]</span></td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__and__"></a><span class="summary-sig-name">__and__</span>(<span class="summary-sig-arg">x</span>, <span class="summary-sig-arg">y</span>)</span><br /> x&amp;y</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__eq__"></a><span class="summary-sig-name">__eq__</span>(<span class="summary-sig-arg">x</span>, <span class="summary-sig-arg">y</span>)</span><br /> x==y</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__ge__"></a><span class="summary-sig-name">__ge__</span>(<span class="summary-sig-arg">x</span>, <span class="summary-sig-arg">y</span>)</span><br /> x&gt;=y</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__gt__"></a><span class="summary-sig-name">__gt__</span>(<span class="summary-sig-arg">x</span>, <span class="summary-sig-arg">y</span>)</span><br /> x&gt;y</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__iand__"></a><span class="summary-sig-name">__iand__</span>(<span class="summary-sig-arg">x</span>, <span class="summary-sig-arg">y</span>)</span><br /> x&amp;=y</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__int__"></a><span class="summary-sig-name">__int__</span>(<span class="summary-sig-arg">x</span>)</span><br /> int(x)</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__invert__"></a><span class="summary-sig-name">__invert__</span>(<span class="summary-sig-arg">x</span>)</span><br /> ~x</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__ior__"></a><span class="summary-sig-name">__ior__</span>(<span class="summary-sig-arg">x</span>, <span class="summary-sig-arg">y</span>)</span><br /> x|=y</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__ixor__"></a><span class="summary-sig-name">__ixor__</span>(<span class="summary-sig-arg">x</span>, <span class="summary-sig-arg">y</span>)</span><br /> x^=y</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__le__"></a><span class="summary-sig-name">__le__</span>(<span class="summary-sig-arg">x</span>, <span class="summary-sig-arg">y</span>)</span><br /> x&lt;=y</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__lt__"></a><span class="summary-sig-name">__lt__</span>(<span class="summary-sig-arg">x</span>, <span class="summary-sig-arg">y</span>)</span><br /> x&lt;y</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__ne__"></a><span class="summary-sig-name">__ne__</span>(<span class="summary-sig-arg">x</span>, <span class="summary-sig-arg">y</span>)</span><br /> x!=y</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__nonzero__"></a><span class="summary-sig-name">__nonzero__</span>(<span class="summary-sig-arg">x</span>)</span><br /> x != 0</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__or__"></a><span class="summary-sig-name">__or__</span>(<span class="summary-sig-arg">x</span>, <span class="summary-sig-arg">y</span>)</span><br /> x|y</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__rand__"></a><span class="summary-sig-name">__rand__</span>(<span class="summary-sig-arg">x</span>, <span class="summary-sig-arg">y</span>)</span><br /> y&amp;x</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__ror__"></a><span class="summary-sig-name">__ror__</span>(<span class="summary-sig-arg">x</span>, <span class="summary-sig-arg">y</span>)</span><br /> y|x</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__rxor__"></a><span class="summary-sig-name">__rxor__</span>(<span class="summary-sig-arg">x</span>, <span class="summary-sig-arg">y</span>)</span><br /> y^x</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__xor__"></a><span class="summary-sig-name">__xor__</span>(<span class="summary-sig-arg">x</span>, <span class="summary-sig-arg">y</span>)</span><br /> x^y</td> <td align="right" valign="top"> </td> </tr> </table> </td> </tr> <tr> <td colspan="2" class="summary"> <p class="<API key>"><b>Inherited from <code>sip.simplewrapper</code></b>: <code>__init__</code>, <code>__new__</code> </p> <p class="<API key>"><b>Inherited from <code>object</code></b>: <code>__delattr__</code>, <code>__format__</code>, <code>__getattribute__</code>, <code>__hash__</code>, <code>__reduce__</code>, <code>__reduce_ex__</code>, <code>__repr__</code>, <code>__setattr__</code>, <code>__sizeof__</code>, <code>__str__</code>, <code>__subclasshook__</code> </p> </td> </tr> </table> <a name="section-Properties"></a> <table class="summary" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr bgcolor="#70b0f0" class="table-header"> <td colspan="2" class="table-header"> <table border="0" cellpadding="0" cellspacing="0" width="100%"> <tr valign="top"> <td align="left"><span class="table-header">Properties</span></td> <td align="right" valign="top" ><span class="options">[<a href="#section-Properties" class="privatelink" onclick="toggle_private();" >hide private</a>]</span></td> </tr> </table> </td> </tr> <tr> <td colspan="2" class="summary"> <p class="<API key>"><b>Inherited from <code>object</code></b>: <code>__class__</code> </p> </td> </tr> </table> <table class="navbar" border="0" width="100%" cellpadding="0" bgcolor="#a0c0ff" cellspacing="0"> <tr valign="middle"> <!-- Home link --> <th>&nbsp;&nbsp;&nbsp;<a href="<API key>.html">Home</a>&nbsp;&nbsp;&nbsp;</th> <!-- Tree link --> <th>&nbsp;&nbsp;&nbsp;<a href="module-tree.html">Trees</a>&nbsp;&nbsp;&nbsp;</th> <!-- Index link --> <th>&nbsp;&nbsp;&nbsp;<a href="identifier-index.html">Indices</a>&nbsp;&nbsp;&nbsp;</th> <!-- Help link --> <th>&nbsp;&nbsp;&nbsp;<a href="help.html">Help</a>&nbsp;&nbsp;&nbsp;</th> <th class="navbar" width="100%"></th> </tr> </table> <table border="0" cellpadding="0" cellspacing="0" width="100%%"> <tr> <td align="left" class="footer"> Generated by Epydoc 3.0.1 on Tue Jun 14 13:29:16 2016 </td> <td align="right" class="footer"> <a target="mainFrame" href="http://epydoc.sourceforge.net" >http://epydoc.sourceforge.net</a> </td> </tr> </table> <script type="text/javascript"> <! // Private objects are initially displayed (because if // javascript is turned off then we want them to be // visible); but by default, we want to hide them. So hide // them unless we have a cookie that says to show them. checkCookie(); </script> </body> </html>
--[er]create a vclass with condition 'numberic auto_increment not null' create vclass vxoo ( id numberic auto_increment , title varchar(100)); drop vclass vxoo;
package edu.wpi.first.wpilibj.networktables; import edu.wpi.first.wpilibj.tables.*; import java.io.File; import java.io.InputStream; import java.io.OutputStream; import java.io.FileOutputStream; import java.io.IOException; import java.nio.ByteBuffer; public class NetworkTablesJNI { static boolean libraryLoaded = false; static File jniLibrary = null; static { if (!libraryLoaded) { try { System.loadLibrary("ntcore"); } catch (<API key> e) { try { String osname = System.getProperty("os.name"); String resname; if (osname.startsWith("Windows")) resname = "/Windows/" + System.getProperty("os.arch") + "/"; else resname = "/" + osname + "/" + System.getProperty("os.arch") + "/"; System.out.println("platform: " + resname); if (osname.startsWith("Windows")) resname += "ntcore.dll"; else if (osname.startsWith("Mac")) resname += "libntcore.dylib"; else resname += "libntcore.so"; InputStream is = NetworkTablesJNI.class.getResourceAsStream(resname); if (is != null) { // create temporary file if (System.getProperty("os.name").startsWith("Windows")) jniLibrary = File.createTempFile("NetworkTablesJNI", ".dll"); else if (System.getProperty("os.name").startsWith("Mac")) jniLibrary = File.createTempFile("libNetworkTablesJNI", ".dylib"); else jniLibrary = File.createTempFile("libNetworkTablesJNI", ".so"); // flag for delete on exit jniLibrary.deleteOnExit(); OutputStream os = new FileOutputStream(jniLibrary); byte[] buffer = new byte[1024]; int readBytes; try { while ((readBytes = is.read(buffer)) != -1) { os.write(buffer, 0, readBytes); } } finally { os.close(); is.close(); } System.load(jniLibrary.getAbsolutePath()); } else { System.loadLibrary("ntcore"); } } catch (IOException ex) { ex.printStackTrace(); System.exit(1); } } libraryLoaded = true; } } public static native boolean containsKey(String key); public static native int getType(String key); public static native boolean putBoolean(String key, boolean value); public static native boolean putDouble(String key, double value); public static native boolean putString(String key, String value); public static native boolean putRaw(String key, byte[] value); public static native boolean putRaw(String key, ByteBuffer value, int len); public static native boolean putBooleanArray(String key, boolean[] value); public static native boolean putDoubleArray(String key, double[] value); public static native boolean putStringArray(String key, String[] value); public static native void forcePutBoolean(String key, boolean value); public static native void forcePutDouble(String key, double value); public static native void forcePutString(String key, String value); public static native void forcePutRaw(String key, byte[] value); public static native void forcePutRaw(String key, ByteBuffer value, int len); public static native void <API key>(String key, boolean[] value); public static native void forcePutDoubleArray(String key, double[] value); public static native void forcePutStringArray(String key, String[] value); public static native Object getValue(String key) throws <API key>; public static native boolean getBoolean(String key) throws <API key>; public static native double getDouble(String key) throws <API key>; public static native String getString(String key) throws <API key>; public static native byte[] getRaw(String key) throws <API key>; public static native boolean[] getBooleanArray(String key) throws <API key>; public static native double[] getDoubleArray(String key) throws <API key>; public static native String[] getStringArray(String key) throws <API key>; public static native Object getValue(String key, Object defaultValue); public static native boolean getBoolean(String key, boolean defaultValue); public static native double getDouble(String key, double defaultValue); public static native String getString(String key, String defaultValue); public static native byte[] getRaw(String key, byte[] defaultValue); public static native boolean[] getBooleanArray(String key, boolean[] defaultValue); public static native double[] getDoubleArray(String key, double[] defaultValue); public static native String[] getStringArray(String key, String[] defaultValue); public static native boolean setDefaultBoolean(String key, boolean defaultValue); public static native boolean setDefaultDouble(String key, double defaultValue); public static native boolean setDefaultString(String key, String defaultValue); public static native boolean setDefaultRaw(String key, byte[] defaultValue); public static native boolean <API key>(String key, boolean[] defaultValue); public static native boolean <API key>(String key, double[] defaultValue); public static native boolean <API key>(String key, String[] defaultValue); public static native void setEntryFlags(String key, int flags); public static native int getEntryFlags(String key); public static native void deleteEntry(String key); public static native void deleteAllEntries(); public static native EntryInfo[] getEntries(String prefix, int types); public static native void flush(); @FunctionalInterface public interface <API key> { void apply(int uid, String key, Object value, int flags); } public static native int addEntryListener(String prefix, <API key> listener, int flags); public static native void removeEntryListener(int entryListenerUid); @FunctionalInterface public interface <API key> { void apply(int uid, boolean connected, ConnectionInfo conn); } public static native int <API key>(<API key> listener, boolean immediateNotify); public static native void <API key>(int connListenerUid); // public static native void createRpc(String key, byte[] def, IRpc rpc); // public static native void createRpc(String key, ByteBuffer def, int def_len, IRpc rpc); public static native byte[] getRpc(String key) throws <API key>; public static native byte[] getRpc(String key, byte[] defaultValue); public static native int callRpc(String key, byte[] params); public static native int callRpc(String key, ByteBuffer params, int params_len); // public static native byte[] <API key>(int callUid); // public static native byte[] <API key>(int callUid) throws <API key>; public static native void setNetworkIdentity(String name); public static native void startServer(String persistFilename, String listenAddress, int port); public static native void stopServer(); public static native void startClient(); public static native void startClient(String serverName, int port); public static native void startClient(String[] serverNames, int[] ports); public static native void stopClient(); public static native void setServer(String serverName, int port); public static native void setServer(String[] serverNames, int[] ports); public static native void startDSClient(int port); public static native void stopDSClient(); public static native void setUpdateRate(double interval); public static native ConnectionInfo[] getConnections(); public static native void savePersistent(String filename) throws PersistentException; public static native String[] loadPersistent(String filename) throws PersistentException; // returns warnings public static native long now(); @FunctionalInterface public interface LoggerFunction { void apply(int level, String file, int line, String msg); } public static native void setLogger(LoggerFunction func, int minLevel); }
/*! @file ilu_dpivotL.c * \brief Performs numerical pivoting * * <pre> * -- SuperLU routine (version 4.0) -- * Lawrence Berkeley National Laboratory * June 30, 2009 * </pre> */ #include <math.h> #include <stdlib.h> #include "slu_ddefs.h" #ifndef SGN #define SGN(x) ((x)>=0?1:-1) #endif int ilu_dpivotL( const int jcol, const double u, /* in - diagonal pivoting threshold */ int *usepr, /* re-use the pivot sequence given by * perm_r/iperm_r */ int *perm_r, /* may be modified */ int diagind, /* diagonal of Pc*A*Pc' */ int *swap, /* in/out record the row permutation */ int *iswap, /* in/out inverse of swap, it is the same as perm_r after the factorization */ int *marker, int *pivrow, /* in/out, as an input if *usepr!=0 */ double fill_tol, /* in - fill tolerance of current column * used for a singular column */ milu_t milu, double drop_sum, /* in - computed in ilu_dcopy_to_ucol() (MILU only) */ GlobalLU_t *Glu, /* modified - global LU data structures */ SuperLUStat_t *stat /* output */ ) { int n; /* number of columns */ int fsupc; /* first column in the supernode */ int nsupc; /* no of columns in the supernode */ int nsupr; /* no of rows in the supernode */ int lptr; /* points to the starting subscript of the supernode */ register int pivptr; int old_pivptr, diag, ptr0; register double pivmax, rtemp; double thresh; double temp; double *lu_sup_ptr; double *lu_col_ptr; int *lsub_ptr; register int isub, icol, k, itemp; int *lsub, *xlsub; double *lusup; int *xlusup; flops_t *ops = stat->ops; int info; /* Initialize pointers */ n = Glu->n; lsub = Glu->lsub; xlsub = Glu->xlsub; lusup = (double *) Glu->lusup; xlusup = Glu->xlusup; fsupc = (Glu->xsup)[(Glu->supno)[jcol]]; nsupc = jcol - fsupc; /* excluding jcol; nsupc >= 0 */ lptr = xlsub[fsupc]; nsupr = xlsub[fsupc+1] - lptr; lu_sup_ptr = &lusup[xlusup[fsupc]]; /* start of the current supernode */ lu_col_ptr = &lusup[xlusup[jcol]]; /* start of jcol in the supernode */ lsub_ptr = &lsub[lptr]; /* start of row indices of the supernode */ /* Determine the largest abs numerical value for partial pivoting; Also search for user-specified pivot, and diagonal element. */ pivmax = -1.0; pivptr = nsupc; diag = EMPTY; old_pivptr = nsupc; ptr0 = EMPTY; for (isub = nsupc; isub < nsupr; ++isub) { if (marker[lsub_ptr[isub]] > jcol) continue; /* do not overlap with a later relaxed supernode */ switch (milu) { case SMILU_1: rtemp = fabs(lu_col_ptr[isub] + drop_sum); break; case SMILU_2: case SMILU_3: /* In this case, drop_sum contains the sum of the abs. value */ rtemp = fabs(lu_col_ptr[isub]); break; case SILU: default: rtemp = fabs(lu_col_ptr[isub]); break; } if (rtemp > pivmax) { pivmax = rtemp; pivptr = isub; } if (*usepr && lsub_ptr[isub] == *pivrow) old_pivptr = isub; if (lsub_ptr[isub] == diagind) diag = isub; if (ptr0 == EMPTY) ptr0 = isub; } if (milu == SMILU_2 || milu == SMILU_3) pivmax += drop_sum; /* Test for singularity */ if (pivmax < 0.0) { fprintf(stderr, "[0]: jcol=%d, SINGULAR!!!\n", jcol); fflush(stderr); exit(1); } if ( pivmax == 0.0 ) { if (diag != EMPTY) *pivrow = lsub_ptr[pivptr = diag]; else if (ptr0 != EMPTY) *pivrow = lsub_ptr[pivptr = ptr0]; else { /* look for the first row which does not belong to any later supernodes */ for (icol = jcol; icol < n; icol++) if (marker[swap[icol]] <= jcol) break; if (icol >= n) { fprintf(stderr, "[1]: jcol=%d, SINGULAR!!!\n", jcol); fflush(stderr); exit(1); } *pivrow = swap[icol]; /* pick up the pivot row */ for (isub = nsupc; isub < nsupr; ++isub) if ( lsub_ptr[isub] == *pivrow ) { pivptr = isub; break; } } pivmax = fill_tol; lu_col_ptr[pivptr] = pivmax; *usepr = 0; #ifdef DEBUG printf("[0] ZERO PIVOT: FILL (%d, %d).\n", *pivrow, jcol); fflush(stdout); #endif info =jcol + 1; } /* if (*pivrow == 0.0) */ else { thresh = u * pivmax; /* Choose appropriate pivotal element by our policy. */ if ( *usepr ) { switch (milu) { case SMILU_1: rtemp = fabs(lu_col_ptr[old_pivptr] + drop_sum); break; case SMILU_2: case SMILU_3: rtemp = fabs(lu_col_ptr[old_pivptr]) + drop_sum; break; case SILU: default: rtemp = fabs(lu_col_ptr[old_pivptr]); break; } if ( rtemp != 0.0 && rtemp >= thresh ) pivptr = old_pivptr; else *usepr = 0; } if ( *usepr == 0 ) { /* Use diagonal pivot? */ if ( diag >= 0 ) { /* diagonal exists */ switch (milu) { case SMILU_1: rtemp = fabs(lu_col_ptr[diag] + drop_sum); break; case SMILU_2: case SMILU_3: rtemp = fabs(lu_col_ptr[diag]) + drop_sum; break; case SILU: default: rtemp = fabs(lu_col_ptr[diag]); break; } if ( rtemp != 0.0 && rtemp >= thresh ) pivptr = diag; } *pivrow = lsub_ptr[pivptr]; } info = 0; /* Reset the diagonal */ switch (milu) { case SMILU_1: lu_col_ptr[pivptr] += drop_sum; break; case SMILU_2: case SMILU_3: lu_col_ptr[pivptr] += SGN(lu_col_ptr[pivptr]) * drop_sum; break; case SILU: default: break; } } /* else */ /* Record pivot row */ perm_r[*pivrow] = jcol; if (jcol < n - 1) { register int t1, t2, t; t1 = iswap[*pivrow]; t2 = jcol; if (t1 != t2) { t = swap[t1]; swap[t1] = swap[t2]; swap[t2] = t; t1 = swap[t1]; t2 = t; t = iswap[t1]; iswap[t1] = iswap[t2]; iswap[t2] = t; } } /* if (jcol < n - 1) */ /* Interchange row subscripts */ if ( pivptr != nsupc ) { itemp = lsub_ptr[pivptr]; lsub_ptr[pivptr] = lsub_ptr[nsupc]; lsub_ptr[nsupc] = itemp; /* Interchange numerical values as well, for the whole snode, such * that L is indexed the same way as A. */ for (icol = 0; icol <= nsupc; icol++) { itemp = pivptr + icol * nsupr; temp = lu_sup_ptr[itemp]; lu_sup_ptr[itemp] = lu_sup_ptr[nsupc + icol*nsupr]; lu_sup_ptr[nsupc + icol*nsupr] = temp; } } /* cdiv operation */ ops[FACT] += nsupr - nsupc; temp = 1.0 / lu_col_ptr[nsupc]; for (k = nsupc+1; k < nsupr; k++) lu_col_ptr[k] *= temp; return info; }
""" Tests for the core module. """ import itertools from contextlib import nullcontext from astropy.modeling.models import Gaussian1D, Gaussian2D from astropy.utils.exceptions import AstropyUserWarning import numpy as np from numpy.testing import assert_allclose import pytest from ..gaussian import centroid_1dg, centroid_2dg, _gaussian1d_moments from ...utils._optional_deps import HAS_SCIPY # noqa XCEN = 25.7 YCEN = 26.2 XSTDS = [3.2, 4.0] YSTDS = [5.7, 4.1] THETAS = np.array([30., 45.]) * np.pi / 180. DATA = np.zeros((3, 3)) DATA[0:2, 1] = 1. DATA[1, 0:2] = 1. DATA[1, 1] = 2. # NOTE: the fitting routines in astropy use scipy.optimize @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize(('x_std', 'y_std', 'theta'), list(itertools.product(XSTDS, YSTDS, THETAS))) def test_centroids(x_std, y_std, theta): model = Gaussian2D(2.4, XCEN, YCEN, x_stddev=x_std, y_stddev=y_std, theta=theta) y, x = np.mgrid[0:50, 0:47] data = model(x, y) xc, yc = centroid_1dg(data) assert_allclose((xc, yc), (XCEN, YCEN), rtol=0, atol=1.e-3) xc, yc = centroid_2dg(data) assert_allclose((xc, yc), (XCEN, YCEN), rtol=0, atol=1.e-3) # test with errors error = np.sqrt(data) xc, yc = centroid_1dg(data, error=error) assert_allclose((xc, yc), (XCEN, YCEN), rtol=0, atol=1.e-3) xc, yc = centroid_2dg(data, error=error) assert_allclose((xc, yc), (XCEN, YCEN), rtol=0, atol=1.e-3) # test with mask mask = np.zeros(data.shape, dtype=bool) data[10, 10] = 1.e5 mask[10, 10] = True xc, yc = centroid_1dg(data, mask=mask) assert_allclose((xc, yc), (XCEN, YCEN), rtol=0, atol=1.e-3) xc, yc = centroid_2dg(data, mask=mask) assert_allclose((xc, yc), (XCEN, YCEN), rtol=0, atol=1.e-3) @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize('use_mask', [True, False]) def <API key>(use_mask): xc_ref = 24.7 yc_ref = 25.2 model = Gaussian2D(2.4, xc_ref, yc_ref, x_stddev=5.0, y_stddev=5.0) y, x = np.mgrid[0:50, 0:50] data = model(x, y) data[20, :] = np.nan if use_mask: mask = np.zeros(data.shape, dtype=bool) mask[20, :] = True nwarn = 0 ctx = nullcontext() else: mask = None nwarn = 1 ctx = pytest.warns(AstropyUserWarning, match='Input data contains non-finite values') with ctx as warnlist: xc, yc = centroid_1dg(data, mask=mask) assert_allclose([xc, yc], [xc_ref, yc_ref], rtol=0, atol=1.e-3) if nwarn == 1: assert len(warnlist) == nwarn with ctx as warnlist: xc, yc = centroid_2dg(data, mask=mask) assert_allclose([xc, yc], [xc_ref, yc_ref], rtol=0, atol=1.e-3) if nwarn == 1: assert len(warnlist) == nwarn @pytest.mark.skipif('not HAS_SCIPY') def <API key>(): data = np.zeros((4, 4)) mask = np.zeros((2, 2), dtype=bool) with pytest.raises(ValueError): centroid_1dg(data, mask=mask) with pytest.raises(ValueError): centroid_2dg(data, mask=mask) with pytest.raises(ValueError): _gaussian1d_moments(data, mask=mask) @pytest.mark.skipif('not HAS_SCIPY') def <API key>(): error = np.zeros((2, 2), dtype=bool) with pytest.raises(ValueError): centroid_1dg(np.zeros((4, 4)), error=error) with pytest.raises(ValueError): centroid_2dg(np.zeros((4, 4)), error=error) @pytest.mark.skipif('not HAS_SCIPY') def <API key>(): data = np.ones((2, 2)) with pytest.raises(ValueError): centroid_2dg(data) def <API key>(): x = np.arange(100) desired = (75, 50, 5) g = Gaussian1D(*desired) data = g(x) result = _gaussian1d_moments(data) assert_allclose(result, desired, rtol=0, atol=1.e-6) data[0] = 1.e5 mask = np.zeros(data.shape).astype(bool) mask[0] = True result = _gaussian1d_moments(data, mask=mask) assert_allclose(result, desired, rtol=0, atol=1.e-6) data[0] = np.nan mask = np.zeros(data.shape).astype(bool) mask[0] = True with pytest.warns(AstropyUserWarning) as warnlist: result = _gaussian1d_moments(data, mask=mask) assert_allclose(result, desired, rtol=0, atol=1.e-6) assert len(warnlist) == 1
package org.hisp.dhis.dxf2.events.event; import org.hisp.dhis.scheduling.JobConfiguration; import org.hisp.dhis.security.<API key>; import org.hisp.dhis.dxf2.common.ImportOptions; import java.util.List; /** * @author Morten Olav Hansen <mortenoh@gmail.com> */ public class ImportEventsTask extends <API key> { private final List<Event> events; private final EventService eventService; private final ImportOptions importOptions; private final JobConfiguration id; public ImportEventsTask( List<Event> events, EventService eventService, ImportOptions importOptions, JobConfiguration id ) { super(); this.events = events; this.eventService = eventService; this.importOptions = importOptions; this.id = id; } @Override public void call() { eventService.addEvents( events, importOptions, id ); } }
<?php /* vim: set expandtab softtabstop=4 tabstop=4 shiftwidth=4: */ // | PHP Version 4 | // | available at through the world-wide-web at | // | obtain it through the world-wide-web, please send a note to | // | Authors: Chuck Hagenbuch <chuck@horde.org> | // | Jon Parise <jon@php.net> | // | Damian Alejandro Fernandez Sosa <damlists@cnba.uba.ar> | require_once 'PEAR.php'; require_once 'Net/Socket.php'; /** * Provides an implementation of the SMTP protocol using PEAR's * Net_Socket:: class. * * @package Net_SMTP * @author Chuck Hagenbuch <chuck@horde.org> * @author Jon Parise <jon@php.net> * @author Damian Alejandro Fernandez Sosa <damlists@cnba.uba.ar> * * @example basic.php A basic implementation of the Net_SMTP package. */ class Net_SMTP { /** * The server to connect to. * @var string * @access public */ var $host = 'localhost'; /** * The port to connect to. * @var int * @access public */ var $port = 25; /** * The value to give when sending EHLO or HELO. * @var string * @access public */ var $localhost = 'localhost'; /** * List of supported authentication methods, in preferential order. * @var array * @access public */ var $auth_methods = array('DIGEST-MD5', 'CRAM-MD5', 'LOGIN', 'PLAIN'); /** * Should debugging output be enabled? * @var boolean * @access private */ var $_debug = false; /** * The socket resource being used to connect to the SMTP server. * @var resource * @access private */ var $_socket = null; /** * The most recent server response code. * @var int * @access private */ var $_code = -1; /** * The most recent server response arguments. * @var array * @access private */ var $_arguments = array(); /** * Stores detected features of the SMTP server. * @var array * @access private */ var $_esmtp = array(); /** * Instantiates a new Net_SMTP object, overriding any defaults * with parameters that are passed in. * * @param string The server to connect to. * @param int The port to connect to. * @param string The value to give when sending EHLO or HELO. * * @access public * @since 1.0 */ function Net_SMTP($host = null, $port = null, $localhost = null) { if (isset($host)) $this->host = $host; if (isset($port)) $this->port = $port; if (isset($localhost)) $this->localhost = $localhost; $this->_socket = new Net_Socket(); /* * Include the Auth_SASL package. If the package is not available, * we disable the authentication methods that depend upon it. */ if ((@include_once 'Auth/SASL.php') === false) { $pos = array_search('DIGEST-MD5', $this->auth_methods); unset($this->auth_methods[$pos]); $pos = array_search('CRAM-MD5', $this->auth_methods); unset($this->auth_methods[$pos]); } } /** * Set the value of the debugging flag. * * @param boolean $debug New value for the debugging flag. * * @access public * @since 1.1.0 */ function setDebug($debug) { $this->_debug = $debug; } /** * Send the given string of data to the server. * * @param string $data The string of data to send. * * @return mixed True on success or a PEAR_Error object on failure. * * @access private * @since 1.1.0 */ function _send($data) { if ($this->_debug) { echo "DEBUG: Send: $data\n"; } if (PEAR::isError($error = $this->_socket->write($data))) { return new PEAR_Error('Failed to write to socket: ' . $error->getMessage()); } return true; } /** * Send a command to the server with an optional string of arguments. * A carriage return / linefeed (CRLF) sequence will be appended to each * command string before it is sent to the SMTP server. * * @param string $command The SMTP command to send to the server. * @param string $args A string of optional arguments to append * to the command. * * @return mixed The result of the _send() call. * * @access private * @since 1.1.0 */ function _put($command, $args = '') { if (!empty($args)) { return $this->_send($command . ' ' . $args . "\r\n"); } return $this->_send($command . "\r\n"); } /** * Read a reply from the SMTP server. The reply consists of a response * code and a response message. * * @param mixed $valid The set of valid response codes. These * may be specified as an array of integer * values or as a single integer value. * * @return mixed True if the server returned a valid response code or * a PEAR_Error object is an error condition is reached. * * @access private * @since 1.1.0 * * @see getResponse */ function _parseResponse($valid) { $this->_code = -1; $this->_arguments = array(); while ($line = $this->_socket->readLine()) { if ($this->_debug) { echo "DEBUG: Recv: $line\n"; } /* If we receive an empty line, the connection has been closed. */ if (empty($line)) { $this->disconnect(); return new PEAR_Error("Connection was unexpectedly closed"); } /* Read the code and store the rest in the arguments array. */ $code = substr($line, 0, 3); $this->_arguments[] = trim(substr($line, 4)); /* Check the syntax of the response code. */ if (is_numeric($code)) { $this->_code = (int)$code; } else { $this->_code = -1; break; } /* If this is not a multiline response, we're done. */ if (substr($line, 3, 1) != '-') { break; } } /* Compare the server's response code with the valid code. */ if (is_int($valid) && ($this->_code === $valid)) { return true; } /* If we were given an array of valid response codes, check each one. */ if (is_array($valid)) { foreach ($valid as $valid_code) { if ($this->_code === $valid_code) { return true; } } } return new PEAR_Error("Invalid response code received from server"); } /** * Return a 2-tuple containing the last response from the SMTP server. * * @return array A two-element array: the first element contains the * response code as an integer and the second element * contains the response's arguments as a string. * * @access public * @since 1.1.0 */ function getResponse() { return array($this->_code, join("\n", $this->_arguments)); } /** * Attempt to connect to the SMTP server. * * @param int $timeout The timeout value (in seconds) for the * socket connection. * @param bool $persistent Should a persistent socket connection * be used? * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * @access public * @since 1.0 */ function connect($timeout = null, $persistent = false) { $result = $this->_socket->connect($this->host, $this->port, $persistent, $timeout); if (PEAR::isError($result)) { return new PEAR_Error('Failed to connect socket: ' . $result->getMessage()); } if (PEAR::isError($error = $this->_parseResponse(220))) { return $error; } if (PEAR::isError($error = $this->_negotiate())) { return $error; } return true; } /** * Attempt to disconnect from the SMTP server. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * @access public * @since 1.0 */ function disconnect() { if (PEAR::isError($error = $this->_put('QUIT'))) { return $error; } if (PEAR::isError($error = $this->_parseResponse(221))) { return $error; } if (PEAR::isError($error = $this->_socket->disconnect())) { return new PEAR_Error('Failed to disconnect socket: ' . $error->getMessage()); } return true; } /** * Attempt to send the EHLO command and obtain a list of ESMTP * extensions available, and failing that just send HELO. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * * @access private * @since 1.1.0 */ function _negotiate() { if (PEAR::isError($error = $this->_put('EHLO', $this->localhost))) { return $error; } if (PEAR::isError($this->_parseResponse(250))) { /* If we receive a 503 response, we're already authenticated. */ if ($this->_code === 503) { return true; } /* If the EHLO failed, try the simpler HELO command. */ if (PEAR::isError($error = $this->_put('HELO', $this->localhost))) { return $error; } if (PEAR::isError($this->_parseResponse(250))) { return new PEAR_Error('HELO was not accepted: ', $this->_code); } return true; } foreach ($this->_arguments as $argument) { $verb = strtok($argument, ' '); $arguments = substr($argument, strlen($verb) + 1, strlen($argument) - strlen($verb) - 1); $this->_esmtp[$verb] = $arguments; } return true; } /** * Returns the name of the best authentication method that the server * has advertised. * * @return mixed Returns a string containing the name of the best * supported authentication method or a PEAR_Error object * if a failure condition is encountered. * @access private * @since 1.1.0 */ function _getBestAuthMethod() { $available_methods = explode(' ', $this->_esmtp['AUTH']); foreach ($this->auth_methods as $method) { if (in_array($method, $available_methods)) { return $method; } } return new PEAR_Error('No supported authentication methods'); } /** * Attempt to do SMTP authentication. * * @param string The userid to authenticate as. * @param string The password to authenticate with. * @param string The requested authentication method. If none is * specified, the best supported method will be used. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * @access public * @since 1.0 */ function auth($uid, $pwd , $method = '') { if (empty($this->_esmtp['AUTH'])) { return new PEAR_Error('SMTP server does no support authentication'); } /* * If no method has been specified, get the name of the best supported * method advertised by the SMTP server. */ if (empty($method)) { if (PEAR::isError($method = $this->_getBestAuthMethod())) { /* Return the PEAR_Error object from _getBestAuthMethod(). */ return $method; } } else { $method = strtoupper($method); if (!in_array($method, $this->auth_methods)) { return new PEAR_Error("$method is not a supported authentication method"); } } switch ($method) { case 'DIGEST-MD5': $result = $this->_authDigest_MD5($uid, $pwd); break; case 'CRAM-MD5': $result = $this->_authCRAM_MD5($uid, $pwd); break; case 'LOGIN': $result = $this->_authLogin($uid, $pwd); break; case 'PLAIN': $result = $this->_authPlain($uid, $pwd); break; default: $result = new PEAR_Error("$method is not a supported authentication method"); break; } /* If an error was encountered, return the PEAR_Error object. */ if (PEAR::isError($result)) { return $result; } /* RFC-2554 requires us to re-negotiate ESMTP after an AUTH. */ if (PEAR::isError($error = $this->_negotiate())) { return $error; } return true; } /** * Authenticates the user using the DIGEST-MD5 method. * * @param string The userid to authenticate as. * @param string The password to authenticate with. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * @access private * @since 1.1.0 */ function _authDigest_MD5($uid, $pwd) { if (PEAR::isError($error = $this->_put('AUTH', 'DIGEST-MD5'))) { return $error; } /* 334: Continue authentication request */ if (PEAR::isError($error = $this->_parseResponse(334))) { /* 503: Error: already authenticated */ if ($this->_code === 503) { return true; } return $error; } $challenge = base64_decode($this->_arguments[0]); $digest = &Auth_SASL::factory('digestmd5'); $auth_str = base64_encode($digest->getResponse($uid, $pwd, $challenge, $this->host, "smtp")); if (PEAR::isError($error = $this->_put($auth_str))) { return $error; } /* 334: Continue authentication request */ if (PEAR::isError($error = $this->_parseResponse(334))) { return $error; } /* * We don't use the protocol's third step because SMTP doesn't allow * subsequent authentication, so we just silently ignore it. */ if (PEAR::isError($error = $this->_put(' '))) { return $error; } /* 235: Authentication successful */ if (PEAR::isError($error = $this->_parseResponse(235))) { return $error; } } /** * Authenticates the user using the CRAM-MD5 method. * * @param string The userid to authenticate as. * @param string The password to authenticate with. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * @access private * @since 1.1.0 */ function _authCRAM_MD5($uid, $pwd) { if (PEAR::isError($error = $this->_put('AUTH', 'CRAM-MD5'))) { return $error; } /* 334: Continue authentication request */ if (PEAR::isError($error = $this->_parseResponse(334))) { /* 503: Error: already authenticated */ if ($this->_code === 503) { return true; } return $error; } $challenge = base64_decode($this->_arguments[0]); $cram = &Auth_SASL::factory('crammd5'); $auth_str = base64_encode($cram->getResponse($uid, $pwd, $challenge)); if (PEAR::isError($error = $this->_put($auth_str))) { return $error; } /* 235: Authentication successful */ if (PEAR::isError($error = $this->_parseResponse(235))) { return $error; } } /** * Authenticates the user using the LOGIN method. * * @param string The userid to authenticate as. * @param string The password to authenticate with. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * @access private * @since 1.1.0 */ function _authLogin($uid, $pwd) { if (PEAR::isError($error = $this->_put('AUTH', 'LOGIN'))) { return $error; } /* 334: Continue authentication request */ if (PEAR::isError($error = $this->_parseResponse(334))) { /* 503: Error: already authenticated */ if ($this->_code === 503) { return true; } return $error; } if (PEAR::isError($error = $this->_put(base64_encode($uid)))) { return $error; } /* 334: Continue authentication request */ if (PEAR::isError($error = $this->_parseResponse(334))) { return $error; } if (PEAR::isError($error = $this->_put(base64_encode($pwd)))) { return $error; } /* 235: Authentication successful */ if (PEAR::isError($error = $this->_parseResponse(235))) { return $error; } return true; } /** * Authenticates the user using the PLAIN method. * * @param string The userid to authenticate as. * @param string The password to authenticate with. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * @access private * @since 1.1.0 */ function _authPlain($uid, $pwd) { if (PEAR::isError($error = $this->_put('AUTH', 'PLAIN'))) { return $error; } /* 334: Continue authentication request */ if (PEAR::isError($error = $this->_parseResponse(334))) { /* 503: Error: already authenticated */ if ($this->_code === 503) { return true; } return $error; } $auth_str = base64_encode(chr(0) . $uid . chr(0) . $pwd); if (PEAR::isError($error = $this->_put($auth_str))) { return $error; } /* 235: Authentication successful */ if (PEAR::isError($error = $this->_parseResponse(235))) { return $error; } return true; } /** * Send the HELO command. * * @param string The domain name to say we are. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * @access public * @since 1.0 */ function helo($domain) { if (PEAR::isError($error = $this->_put('HELO', $domain))) { return $error; } if (PEAR::isError($error = $this->_parseResponse(250))) { return $error; } return true; } /** * Send the MAIL FROM: command. * * @param string The sender (reverse path) to set. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * @access public * @since 1.0 */ function mailFrom($sender) { if (PEAR::isError($error = $this->_put('MAIL', "FROM:<$sender>"))) { return $error; } if (PEAR::isError($error = $this->_parseResponse(250))) { return $error; } return true; } /** * Send the RCPT TO: command. * * @param string The recipient (forward path) to add. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * @access public * @since 1.0 */ function rcptTo($recipient) { if (PEAR::isError($error = $this->_put('RCPT', "TO:<$recipient>"))) { return $error; } if (PEAR::isError($error = $this->_parseResponse(array(250, 251)))) { return $error; } return true; } /** * Quote the data so that it meets SMTP standards. * * This is provided as a separate public function to facilitate easier * overloading for the cases where it is desirable to customize the * quoting behavior. * * @param string The message text to quote. The string must be passed * by reference, and the text will be modified in place. * * @access public * @since 1.2 */ function quotedata(&$data) { /* * Change Unix (\n) and Mac (\r) linefeeds into Internet-standard CRLF * (\r\n) linefeeds. */ $data = preg_replace("/([^\r]{1})\n/", "\\1\r\n", $data); $data = preg_replace("/\n\n/", "\n\r\n", $data); /* * Because a single leading period (.) signifies an end to the data, * legitimate leading periods need to be "doubled" (e.g. '..'). */ $data = preg_replace("/\n\./", "\n..", $data); } /** * Send the DATA command. * * @param string The message body to send. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * @access public * @since 1.0 */ function data($data) { /* * RFC 1870, section 3, subsection 3 states "a value of zero indicates * that no fixed maximum message size is in force". Furthermore, it * says that if "the parameter is omitted no information is conveyed * about the server's fixed maximum message size". */ if (isset($this->_esmtp['SIZE']) && ($this->_esmtp['SIZE'] > 0)) { if (strlen($data) >= $this->_esmtp['SIZE']) { $this->disconnect(); return new PEAR_Error('Message size excedes the server limit'); } } /* Quote the data based on the SMTP standards. */ $this->quotedata($data); if (PEAR::isError($error = $this->_put('DATA'))) { return $error; } if (PEAR::isError($error = $this->_parseResponse(354))) { return $error; } if (PEAR::isError($this->_send($data . "\r\n.\r\n"))) { return new PEAR_Error('write to socket failed'); } if (PEAR::isError($error = $this->_parseResponse(250))) { return $error; } return true; } /** * Send the SEND FROM: command. * * @param string The reverse path to send. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * @access public * @since 1.2.6 */ function sendFrom($path) { if (PEAR::isError($error = $this->_put('SEND', "FROM:<$path>"))) { return $error; } if (PEAR::isError($error = $this->_parseResponse(250))) { return $error; } return true; } /** * <API key> wrapper for sendFrom(). * * @param string The reverse path to send. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * * @access public * @since 1.0 * @deprecated 1.2.6 */ function send_from($path) { return sendFrom($path); } /** * Send the SOML FROM: command. * * @param string The reverse path to send. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * @access public * @since 1.2.6 */ function somlFrom($path) { if (PEAR::isError($error = $this->_put('SOML', "FROM:<$path>"))) { return $error; } if (PEAR::isError($error = $this->_parseResponse(250))) { return $error; } return true; } /** * <API key> wrapper for somlFrom(). * * @param string The reverse path to send. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * * @access public * @since 1.0 * @deprecated 1.2.6 */ function soml_from($path) { return somlFrom($path); } /** * Send the SAML FROM: command. * * @param string The reverse path to send. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * @access public * @since 1.2.6 */ function samlFrom($path) { if (PEAR::isError($error = $this->_put('SAML', "FROM:<$path>"))) { return $error; } if (PEAR::isError($error = $this->_parseResponse(250))) { return $error; } return true; } /** * <API key> wrapper for samlFrom(). * * @param string The reverse path to send. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * * @access public * @since 1.0 * @deprecated 1.2.6 */ function saml_from($path) { return samlFrom($path); } /** * Send the RSET command. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * @access public * @since 1.0 */ function rset() { if (PEAR::isError($error = $this->_put('RSET'))) { return $error; } if (PEAR::isError($error = $this->_parseResponse(250))) { return $error; } return true; } /** * Send the VRFY command. * * @param string The string to verify * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * @access public * @since 1.0 */ function vrfy($string) { /* Note: 251 is also a valid response code */ if (PEAR::isError($error = $this->_put('VRFY', $string))) { return $error; } if (PEAR::isError($error = $this->_parseResponse(250))) { return $error; } return true; } /** * Send the NOOP command. * * @return mixed Returns a PEAR_Error with an error message on any * kind of failure, or true on success. * @access public * @since 1.0 */ function noop() { if (PEAR::isError($error = $this->_put('NOOP'))) { return $error; } if (PEAR::isError($error = $this->_parseResponse(250))) { return $error; } return true; } /** * <API key> method. identifySender()'s functionality is * now handled internally. * * @return boolean This method always return true. * * @access public * @since 1.0 */ function identifySender() { return true; } } ?>
using Orchard.ContentManagement.Metadata.Settings; using Orchard.ContentManagement.MetaData; using Orchard.Data.Migration; namespace Orchard.Widgets { public class Migrations : DataMigration { <API key> <API key>; public Migrations(<API key> <API key>) { <API key> = <API key>; } public int Create() { <API key>.AlterPartDefinition("WidgetsListPart", builder => builder .Attachable() .WithDescription("Provides a way to add widgets per content items.") ); return 1; } } }
import os def get_package_data(): paths = [os.path.join('data', '*.json')] return {'astroquery.sdss': paths}
<!DOCTYPE HTML PUBLIC "- <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (1.8.0_77) on Sun May 08 11:38:48 PDT 2016 --> <title>FactorsResponse (saRestApi-sdk 1.0.0 API)</title> <meta name="date" content="2016-05-08"> <link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style"> <script type="text/javascript" src="../../../../../script.js"></script> </head> <body> <script type="text/javascript"><! try { if (location.href.indexOf('is-external=true') == -1) { parent.document.title="FactorsResponse (saRestApi-sdk 1.0.0 API)"; } } catch(err) { } var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; var tableTab = "tableTab"; var activeTableTab = "activeTableTab"; </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <div class="topNav"><a name="navbar.top"> </a> <div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div> <a name="navbar.top.firstrow"> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../overview-summary.html">Overview</a></li> <li><a href="package-summary.html">Package</a></li> <li class="navBarCell1Rev">Class</li> <li><a href="package-tree.html">Tree</a></li> <li><a href="../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../index-all.html">Index</a></li> <li><a href="../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li><a href="../../../../../org/secureauth/sarestapi/data/Response/DFPValidateResponse.html" title="class in org.secureauth.sarestapi.data.Response"><span class="typeNameLink">Prev&nbsp;Class</span></a></li> <li><a href="../../../../../org/secureauth/sarestapi/data/Response/<API key>.html" title="class in org.secureauth.sarestapi.data.Response"><span class="typeNameLink">Next&nbsp;Class</span></a></li> </ul> <ul class="navList"> <li><a href="../../../../../index.html?org/secureauth/sarestapi/data/Response/FactorsResponse.html" target="_top">Frames</a></li> <li><a href="FactorsResponse.html" target="_top">No&nbsp;Frames</a></li> </ul> <ul class="navList" id="<API key>"> <li><a href="../../../../../allclasses-noframe.html">All&nbsp;Classes</a></li> </ul> <div> <script type="text/javascript"><! allClassesLink = document.getElementById("<API key>"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } </script> </div> <div> <ul class="subNavList"> <li>Summary:&nbsp;</li> <li>Nested&nbsp;|&nbsp;</li> <li>Field&nbsp;|&nbsp;</li> <li><a href="#constructor.summary">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method.summary">Method</a></li> </ul> <ul class="subNavList"> <li>Detail:&nbsp;</li> <li>Field&nbsp;|&nbsp;</li> <li><a href="#constructor.detail">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method.detail">Method</a></li> </ul> </div> <a name="skip.navbar.top"> </a></div> <div class="header"> <div class="subTitle">org.secureauth.sarestapi.data.Response</div> <h2 title="Class FactorsResponse" class="title">Class FactorsResponse</h2> </div> <div class="contentContainer"> <ul class="inheritance"> <li>java.lang.Object</li> <li> <ul class="inheritance"> <li><a href="../../../../../org/secureauth/sarestapi/data/Response/BaseResponse.html" title="class in org.secureauth.sarestapi.data.Response">org.secureauth.sarestapi.data.Response.BaseResponse</a></li> <li> <ul class="inheritance"> <li>org.secureauth.sarestapi.data.Response.FactorsResponse</li> </ul> </li> </ul> </li> </ul> <div class="description"> <ul class="blockList"> <li class="blockList"> <hr> <br> <pre>public class <span class="typeNameLabel">FactorsResponse</span> extends <a href="../../../../../org/secureauth/sarestapi/data/Response/BaseResponse.html" title="class in org.secureauth.sarestapi.data.Response">BaseResponse</a></pre> </li> </ul> </div> <div class="summary"> <ul class="blockList"> <li class="blockList"> <ul class="blockList"> <li class="blockList"><a name="constructor.summary"> </a> <h3>Constructor Summary</h3> <table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Constructor Summary table, listing constructors, and an explanation"> <caption><span>Constructors</span><span class="tabEnd">&nbsp;</span></caption> <tr> <th class="colOne" scope="col">Constructor and Description</th> </tr> <tr class="altColor"> <td class="colOne"><code><span class="memberNameLink"><a href="../../../../../org/secureauth/sarestapi/data/Response/FactorsResponse.html#FactorsResponse--">FactorsResponse</a></span>()</code>&nbsp;</td> </tr> </table> </li> </ul> <ul class="blockList"> <li class="blockList"><a name="method.summary"> </a> <h3>Method Summary</h3> <table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation"> <caption><span id="t0" class="activeTableTab"><span>All Methods</span><span class="tabEnd">&nbsp;</span></span><span id="t2" class="tableTab"><span><a href="javascript:show(2);">Instance Methods</a></span><span class="tabEnd">&nbsp;</span></span><span id="t4" class="tableTab"><span><a href="javascript:show(8);">Concrete Methods</a></span><span class="tabEnd">&nbsp;</span></span></caption> <tr> <th class="colFirst" scope="col">Modifier and Type</th> <th class="colLast" scope="col">Method and Description</th> </tr> <tr id="i0" class="altColor"> <td class="colFirst"><code>java.util.ArrayList&lt;<a href="../../../../../org/secureauth/sarestapi/data/Factors.html" title="class in org.secureauth.sarestapi.data">Factors</a>&gt;</code></td> <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/secureauth/sarestapi/data/Response/FactorsResponse.html#getFactors--">getFactors</a></span>()</code>&nbsp;</td> </tr> <tr id="i1" class="rowColor"> <td class="colFirst"><code>java.lang.String</code></td> <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/secureauth/sarestapi/data/Response/FactorsResponse.html#getUser_id--">getUser_id</a></span>()</code>&nbsp;</td> </tr> <tr id="i2" class="altColor"> <td class="colFirst"><code>void</code></td> <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/secureauth/sarestapi/data/Response/FactorsResponse.html#setFactors-java.util.ArrayList-">setFactors</a></span>(java.util.ArrayList&lt;<a href="../../../../../org/secureauth/sarestapi/data/Factors.html" title="class in org.secureauth.sarestapi.data">Factors</a>&gt;&nbsp;factors)</code>&nbsp;</td> </tr> <tr id="i3" class="rowColor"> <td class="colFirst"><code>void</code></td> <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/secureauth/sarestapi/data/Response/FactorsResponse.html#setUser_id-java.lang.String-">setUser_id</a></span>(java.lang.String&nbsp;user_id)</code>&nbsp;</td> </tr> <tr id="i4" class="altColor"> <td class="colFirst"><code>java.lang.String</code></td> <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/secureauth/sarestapi/data/Response/FactorsResponse.html#toString--">toString</a></span>()</code>&nbsp;</td> </tr> </table> <ul class="blockList"> <li class="blockList"><a name="methods.inherited.from.class.org.secureauth.sarestapi.data.Response.BaseResponse"> </a> <h3>Methods inherited from class&nbsp;org.secureauth.sarestapi.data.Response.<a href="../../../../../org/secureauth/sarestapi/data/Response/BaseResponse.html" title="class in org.secureauth.sarestapi.data.Response">BaseResponse</a></h3> <code><a href="../../../../../org/secureauth/sarestapi/data/Response/BaseResponse.html#getMessage--">getMessage</a>, <a href="../../../../../org/secureauth/sarestapi/data/Response/BaseResponse.html#getStatus--">getStatus</a>, <a href="../../../../../org/secureauth/sarestapi/data/Response/BaseResponse.html#setMessage-java.lang.String-">setMessage</a>, <a href="../../../../../org/secureauth/sarestapi/data/Response/BaseResponse.html#setStatus-java.lang.String-">setStatus</a></code></li> </ul> <ul class="blockList"> <li class="blockList"><a name="methods.inherited.from.class.java.lang.Object"> </a> <h3>Methods inherited from class&nbsp;java.lang.Object</h3> <code>clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait</code></li> </ul> </li> </ul> </li> </ul> </div> <div class="details"> <ul class="blockList"> <li class="blockList"> <ul class="blockList"> <li class="blockList"><a name="constructor.detail"> </a> <h3>Constructor Detail</h3> <a name="FactorsResponse </a> <ul class="blockListLast"> <li class="blockList"> <h4>FactorsResponse</h4> <pre>public&nbsp;FactorsResponse()</pre> </li> </ul> </li> </ul> <ul class="blockList"> <li class="blockList"><a name="method.detail"> </a> <h3>Method Detail</h3> <a name="getUser_id </a> <ul class="blockList"> <li class="blockList"> <h4>getUser_id</h4> <pre>public&nbsp;java.lang.String&nbsp;getUser_id()</pre> </li> </ul> <a name="setUser_id-java.lang.String-"> </a> <ul class="blockList"> <li class="blockList"> <h4>setUser_id</h4> <pre>public&nbsp;void&nbsp;setUser_id(java.lang.String&nbsp;user_id)</pre> </li> </ul> <a name="getFactors </a> <ul class="blockList"> <li class="blockList"> <h4>getFactors</h4> <pre>public&nbsp;java.util.ArrayList&lt;<a href="../../../../../org/secureauth/sarestapi/data/Factors.html" title="class in org.secureauth.sarestapi.data">Factors</a>&gt;&nbsp;getFactors()</pre> </li> </ul> <a name="setFactors-java.util.ArrayList-"> </a> <ul class="blockList"> <li class="blockList"> <h4>setFactors</h4> <pre>public&nbsp;void&nbsp;setFactors(java.util.ArrayList&lt;<a href="../../../../../org/secureauth/sarestapi/data/Factors.html" title="class in org.secureauth.sarestapi.data">Factors</a>&gt;&nbsp;factors)</pre> </li> </ul> <a name="toString </a> <ul class="blockListLast"> <li class="blockList"> <h4>toString</h4> <pre>public&nbsp;java.lang.String&nbsp;toString()</pre> <dl> <dt><span class="<API key>">Overrides:</span></dt> <dd><code>toString</code>&nbsp;in class&nbsp;<code>java.lang.Object</code></dd> </dl> </li> </ul> </li> </ul> </li> </ul> </div> </div> <div class="bottomNav"><a name="navbar.bottom"> </a> <div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div> <a name="navbar.bottom.firstrow"> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../overview-summary.html">Overview</a></li> <li><a href="package-summary.html">Package</a></li> <li class="navBarCell1Rev">Class</li> <li><a href="package-tree.html">Tree</a></li> <li><a href="../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../index-all.html">Index</a></li> <li><a href="../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li><a href="../../../../../org/secureauth/sarestapi/data/Response/DFPValidateResponse.html" title="class in org.secureauth.sarestapi.data.Response"><span class="typeNameLink">Prev&nbsp;Class</span></a></li> <li><a href="../../../../../org/secureauth/sarestapi/data/Response/<API key>.html" title="class in org.secureauth.sarestapi.data.Response"><span class="typeNameLink">Next&nbsp;Class</span></a></li> </ul> <ul class="navList"> <li><a href="../../../../../index.html?org/secureauth/sarestapi/data/Response/FactorsResponse.html" target="_top">Frames</a></li> <li><a href="FactorsResponse.html" target="_top">No&nbsp;Frames</a></li> </ul> <ul class="navList" id="<API key>"> <li><a href="../../../../../allclasses-noframe.html">All&nbsp;Classes</a></li> </ul> <div> <script type="text/javascript"><! allClassesLink = document.getElementById("<API key>"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } </script> </div> <div> <ul class="subNavList"> <li>Summary:&nbsp;</li> <li>Nested&nbsp;|&nbsp;</li> <li>Field&nbsp;|&nbsp;</li> <li><a href="#constructor.summary">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method.summary">Method</a></li> </ul> <ul class="subNavList"> <li>Detail:&nbsp;</li> <li>Field&nbsp;|&nbsp;</li> <li><a href="#constructor.detail">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method.detail">Method</a></li> </ul> </div> <a name="skip.navbar.bottom"> </a></div> </body> </html>
// Code generated by "bitstringer -type=BranchSampleType"; DO NOT EDIT package perffile import "strconv" func (i BranchSampleType) String() string { if i == 0 { return "0" } s := "" if i&BranchSampleAbortTX != 0 { s += "AbortTX|" } if i&BranchSampleAny != 0 { s += "Any|" } if i&BranchSampleAnyCall != 0 { s += "AnyCall|" } if i&<API key> != 0 { s += "AnyReturn|" } if i&BranchSampleCall != 0 { s += "Call|" } if i&<API key> != 0 { s += "CallStack|" } if i&BranchSampleCond != 0 { s += "Cond|" } if i&BranchSampleHV != 0 { s += "HV|" } if i&BranchSampleInTX != 0 { s += "InTX|" } if i&BranchSampleIndCall != 0 { s += "IndCall|" } if i&BranchSampleIndJump != 0 { s += "IndJump|" } if i&BranchSampleKernel != 0 { s += "Kernel|" } if i&<API key> != 0 { s += "NoCycles|" } if i&BranchSampleNoFlags != 0 { s += "NoFlags|" } if i&BranchSampleNoTX != 0 { s += "NoTX|" } if i&<API key> != 0 { s += "TypeSave|" } if i&BranchSampleUser != 0 { s += "User|" } i &^= 131071 if i == 0 { return s[:len(s)-1] } return s + "0x" + strconv.FormatUint(uint64(i), 16) }
#include "gin/converter.h" #include "v8/include/v8.h" using v8::ArrayBuffer; using v8::Boolean; using v8::External; using v8::Function; using v8::Handle; using v8::Integer; using v8::Isolate; using v8::Number; using v8::Object; using v8::String; using v8::Value; namespace gin { Handle<Value> Converter<bool>::ToV8(Isolate* isolate, bool val) { return Boolean::New(isolate, val).As<Value>(); } bool Converter<bool>::FromV8(Isolate* isolate, Handle<Value> val, bool* out) { *out = val->BooleanValue(); return true; } Handle<Value> Converter<int32_t>::ToV8(Isolate* isolate, int32_t val) { return Integer::New(isolate, val).As<Value>(); } bool Converter<int32_t>::FromV8(Isolate* isolate, Handle<Value> val, int32_t* out) { if (!val->IsNumber()) return false; *out = val->Int32Value(); return true; } Handle<Value> Converter<uint32_t>::ToV8(Isolate* isolate, uint32_t val) { return Integer::NewFromUnsigned(isolate, val).As<Value>(); } bool Converter<uint32_t>::FromV8(Isolate* isolate, Handle<Value> val, uint32_t* out) { if (!val->IsNumber()) return false; *out = val->Uint32Value(); return true; } Handle<Value> Converter<int64_t>::ToV8(Isolate* isolate, int64_t val) { return Number::New(isolate, static_cast<double>(val)).As<Value>(); } bool Converter<int64_t>::FromV8(Isolate* isolate, Handle<Value> val, int64_t* out) { if (!val->IsNumber()) return false; // Even though IntegerValue returns int64_t, JavaScript cannot represent // the full precision of int64_t, which means some rounding might occur. *out = val->IntegerValue(); return true; } Handle<Value> Converter<uint64_t>::ToV8(Isolate* isolate, uint64_t val) { return Number::New(isolate, static_cast<double>(val)).As<Value>(); } bool Converter<uint64_t>::FromV8(Isolate* isolate, Handle<Value> val, uint64_t* out) { if (!val->IsNumber()) return false; *out = static_cast<uint64_t>(val->IntegerValue()); return true; } Handle<Value> Converter<float>::ToV8(Isolate* isolate, float val) { return Number::New(isolate, val).As<Value>(); } bool Converter<float>::FromV8(Isolate* isolate, Handle<Value> val, float* out) { if (!val->IsNumber()) return false; *out = static_cast<float>(val->NumberValue()); return true; } Handle<Value> Converter<double>::ToV8(Isolate* isolate, double val) { return Number::New(isolate, val).As<Value>(); } bool Converter<double>::FromV8(Isolate* isolate, Handle<Value> val, double* out) { if (!val->IsNumber()) return false; *out = val->NumberValue(); return true; } Handle<Value> Converter<base::StringPiece>::ToV8( Isolate* isolate, const base::StringPiece& val) { return String::NewFromUtf8(isolate, val.data(), String::kNormalString, static_cast<uint32_t>(val.length())); } Handle<Value> Converter<std::string>::ToV8(Isolate* isolate, const std::string& val) { return Converter<base::StringPiece>::ToV8(isolate, val); } bool Converter<std::string>::FromV8(Isolate* isolate, Handle<Value> val, std::string* out) { if (!val->IsString()) return false; Handle<String> str = Handle<String>::Cast(val); int length = str->Utf8Length(); out->resize(length); str->WriteUtf8(&(*out)[0], length, NULL, String::NO_NULL_TERMINATION); return true; } bool Converter<Handle<Function> >::FromV8(Isolate* isolate, Handle<Value> val, Handle<Function>* out) { if (!val->IsFunction()) return false; *out = Handle<Function>::Cast(val); return true; } Handle<Value> Converter<Handle<Object> >::ToV8(Isolate* isolate, Handle<Object> val) { return val.As<Value>(); } bool Converter<Handle<Object> >::FromV8(Isolate* isolate, Handle<Value> val, Handle<Object>* out) { if (!val->IsObject()) return false; *out = Handle<Object>::Cast(val); return true; } Handle<Value> Converter<Handle<ArrayBuffer> >::ToV8(Isolate* isolate, Handle<ArrayBuffer> val) { return val.As<Value>(); } bool Converter<Handle<ArrayBuffer> >::FromV8(Isolate* isolate, Handle<Value> val, Handle<ArrayBuffer>* out) { if (!val->IsArrayBuffer()) return false; *out = Handle<ArrayBuffer>::Cast(val); return true; } Handle<Value> Converter<Handle<External> >::ToV8(Isolate* isolate, Handle<External> val) { return val.As<Value>(); } bool Converter<Handle<External> >::FromV8(Isolate* isolate, v8::Handle<Value> val, Handle<External>* out) { if (!val->IsExternal()) return false; *out = Handle<External>::Cast(val); return true; } Handle<Value> Converter<Handle<Value> >::ToV8(Isolate* isolate, Handle<Value> val) { return val; } bool Converter<Handle<Value> >::FromV8(Isolate* isolate, Handle<Value> val, Handle<Value>* out) { *out = val; return true; } v8::Handle<v8::String> StringToSymbol(v8::Isolate* isolate, const base::StringPiece& val) { return String::NewFromUtf8(isolate, val.data(), String::kInternalizedString, static_cast<uint32_t>(val.length())); } std::string V8ToString(v8::Handle<v8::Value> value) { if (value.IsEmpty()) return std::string(); std::string result; if (!ConvertFromV8(NULL, value, &result)) return std::string(); return result; } } // namespace gin
package gov.hhs.fha.nhinc.docretrieve.aspect; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import gov.hhs.fha.nhinc.common.nhinccommon.AssertionType; import gov.hhs.fha.nhinc.event.<API key>; import gov.hhs.fha.nhinc.event.EventDescription; import gov.hhs.fha.nhinc.event.builder.<API key>; import gov.hhs.fha.nhinc.properties.<API key>; import gov.hhs.fha.nhinc.properties.PropertyAccessor; import ihe.iti.xds_b._2007.<API key>; import javax.xml.ws.WebServiceContext; import org.junit.Before; import org.junit.Test; import static org.mockito.Matchers.anyString; import org.springframework.util.CollectionUtils; public class <API key> extends <API key> { private <API key> builder; private <API key> request; private AssertionType assertion; private <API key> assertionExtractor; @Before public void before() throws <API key> { final PropertyAccessor mockProperties = mock(PropertyAccessor.class); builder = new <API key>(){ @Override protected PropertyAccessor getPropertyAccessor(String fileName){ return mockProperties; } }; request = new <API key>(); assertion = new AssertionType(); assertionExtractor = mock(<API key>.class); when(mockProperties.getProperty(anyString())).thenReturn(null); when(assertionExtractor.getInitiatingHCID(assertion)).thenReturn("hcid"); when(assertionExtractor.getNPI(assertion)).thenReturn("npi"); } @Test public void emptyBuild() { EventDescription eventDescription = getEventDescription(builder); assertNotNull(eventDescription); } @Test public void noAssertion() { builder.setArguments(request); EventDescription eventDescription = assertBasicBuild(); assertNull(eventDescription.getNPI()); assertNull(eventDescription.getInitiatingHCID()); } @Test public void withAssertion() { builder.<API key>(assertionExtractor); Object[] arguments = { request, assertion }; builder.setArguments(arguments); EventDescription eventDescription = assertBasicBuild(); assertEquals("hcid", eventDescription.getInitiatingHCID()); assertEquals("npi", eventDescription.getNPI()); } private EventDescription assertBasicBuild() { EventDescription eventDescription = getEventDescription(builder); assertNull(eventDescription.getTimeStamp()); assertTrue(CollectionUtils.isEmpty(eventDescription.getStatuses())); assertTrue(CollectionUtils.isEmpty(eventDescription.getRespondingHCIDs())); assertTrue(CollectionUtils.isEmpty(eventDescription.getPayloadTypes())); assertTrue(CollectionUtils.isEmpty(eventDescription.getPayloadSizes())); assertTrue(CollectionUtils.isEmpty(eventDescription.getErrorCodes())); return eventDescription; } }
<?php use yii\helpers\Html; use yii\grid\GridView; /* @var $this yii\web\View */ /* @var $searchModel backend\models\DialogsSearch */ /* @var $dataProvider yii\data\ActiveDataProvider */ $this->title = Yii::t('app', 'Dialogs'); $this->params['breadcrumbs'][] = $this->title; ?> <div class="dialogs-index"> <h1><?= Html::encode($this->title) ?></h1> <?php // echo $this->render('_search', ['model' => $searchModel]); ?> <p> <?= Html::a(Yii::t('app', 'Create Dialogs'), ['create'], ['class' => 'btn btn-success']) ?> </p> <?= GridView::widget([ 'dataProvider' => $dataProvider, 'filterModel' => $searchModel, 'columns' => [ ['class' => 'yii\grid\SerialColumn'], 'id', 'id_creator', 'users:ntext', 'title', 'gols', // 'rank', // 'status', // 'dcreated', // 'dmodified', ['class' => 'yii\grid\ActionColumn'], ], ]); ?> </div>
// <auto-generated> // This code was generated by a tool. // Changes to this file may cause incorrect behavior and will be lost if // the code is regenerated. // </auto-generated> namespace Cuyahoga.Modules.LanguageSwitcher { <summary> LanguageSwitcher class. </summary> <remarks> Auto-generated class. </remarks> public partial class LanguageSwitcher { <summary> pnlLinks control. </summary> <remarks> Auto-generated field. To modify move field declaration from designer file to code-behind file. </remarks> protected global::System.Web.UI.WebControls.Panel pnlLinks; <summary> plhLanguageLinks control. </summary> <remarks> Auto-generated field. To modify move field declaration from designer file to code-behind file. </remarks> protected global::System.Web.UI.WebControls.PlaceHolder plhLanguageLinks; <summary> pnlDropDown control. </summary> <remarks> Auto-generated field. To modify move field declaration from designer file to code-behind file. </remarks> protected global::System.Web.UI.WebControls.Panel pnlDropDown; <summary> ddlLanguage control. </summary> <remarks> Auto-generated field. To modify move field declaration from designer file to code-behind file. </remarks> protected global::System.Web.UI.WebControls.DropDownList ddlLanguage; <summary> imbGo control. </summary> <remarks> Auto-generated field. To modify move field declaration from designer file to code-behind file. </remarks> protected global::System.Web.UI.WebControls.ImageButton imbGo; } }
<?php declare(strict_types=1); namespace PHPUnit\Util\Xml; use ArrayIterator; use Countable; use DOMNode; use DOMNodeList; use IteratorAggregate; /** * @internal This class is not covered by the backward compatibility promise for PHPUnit */ final class SnapshotNodeList implements Countable, IteratorAggregate { /** * @var DOMNode[] */ private array $nodes = []; public static function fromNodeList(DOMNodeList $list): self { $snapshot = new self; foreach ($list as $node) { $snapshot->nodes[] = $node; } return $snapshot; } public function count(): int { return count($this->nodes); } public function getIterator(): ArrayIterator { return new ArrayIterator($this->nodes); } }
<?php class Route extends CActiveRecord { /** * The followings are the available columns in table 'tbl_post': * @var integer $id * @var string $title * @var string $content * @var string $tags * @var integer $status * @var integer $create_time * @var integer $update_time * @var integer $author_id */ /** * Returns the static model of the specified AR class. * @return CActiveRecord the static model class */ public static function model($className=__CLASS__) { return parent::model($className); } /** * @return string the associated database table name */ public function tableName() { return 'g_route'; } }
/* * repo_rpmdb * * convert rpm db to repo * */ #include <sys/types.h> #include <sys/stat.h> #include <limits.h> #include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <assert.h> #include <stdint.h> #include <errno.h> #include <rpm/rpmio.h> #include <rpm/rpmpgp.h> #ifndef RPM5 #include <rpm/header.h> #endif #include <rpm/rpmdb.h> #ifndef DB_CREATE # if defined(SUSE) || defined(HAVE_RPM_DB_H) # include <rpm/db.h> # else # include <db.h> # endif #endif #include "pool.h" #include "repo.h" #include "hash.h" #include "util.h" #include "queue.h" #include "chksum.h" #include "repo_rpmdb.h" #include "repo_solv.h" #ifdef ENABLE_COMPLEX_DEPS #include "<API key>.h" #endif /* 3: added triggers */ /* 4: fixed triggers */ /* 5: fixed checksum copying */ #define <API key> 5 #define TAG_NAME 1000 #define TAG_VERSION 1001 #define TAG_RELEASE 1002 #define TAG_EPOCH 1003 #define TAG_SUMMARY 1004 #define TAG_DESCRIPTION 1005 #define TAG_BUILDTIME 1006 #define TAG_BUILDHOST 1007 #define TAG_INSTALLTIME 1008 #define TAG_SIZE 1009 #define TAG_DISTRIBUTION 1010 #define TAG_VENDOR 1011 #define TAG_LICENSE 1014 #define TAG_PACKAGER 1015 #define TAG_GROUP 1016 #define TAG_URL 1020 #define TAG_ARCH 1022 #define TAG_FILESIZES 1028 #define TAG_FILEMODES 1030 #define TAG_FILEMD5S 1035 #define TAG_FILELINKTOS 1036 #define TAG_FILEFLAGS 1037 #define TAG_SOURCERPM 1044 #define TAG_PROVIDENAME 1047 #define TAG_REQUIREFLAGS 1048 #define TAG_REQUIRENAME 1049 #define TAG_REQUIREVERSION 1050 #define TAG_NOSOURCE 1051 #define TAG_NOPATCH 1052 #define TAG_CONFLICTFLAGS 1053 #define TAG_CONFLICTNAME 1054 #define TAG_CONFLICTVERSION 1055 #define TAG_TRIGGERNAME 1066 #define TAG_TRIGGERVERSION 1067 #define TAG_TRIGGERFLAGS 1068 #define TAG_CHANGELOGTIME 1080 #define TAG_CHANGELOGNAME 1081 #define TAG_CHANGELOGTEXT 1082 #define TAG_OBSOLETENAME 1090 #define TAG_FILEDEVICES 1095 #define TAG_FILEINODES 1096 #define TAG_SOURCEPACKAGE 1106 #define TAG_PROVIDEFLAGS 1112 #define TAG_PROVIDEVERSION 1113 #define TAG_OBSOLETEFLAGS 1114 #define TAG_OBSOLETEVERSION 1115 #define TAG_DIRINDEXES 1116 #define TAG_BASENAMES 1117 #define TAG_DIRNAMES 1118 #define TAG_PAYLOADFORMAT 1124 #define TAG_PATCHESNAME 1133 #define TAG_FILECOLORS 1140 #define TAG_OLDSUGGESTSNAME 1156 #define <API key> 1157 #define <API key> 1158 #define TAG_OLDENHANCESNAME 1159 #define <API key> 1160 #define <API key> 1161 /* rpm5 tags */ #define TAG_DISTEPOCH 1218 /* rpm4 tags */ #define TAG_LONGFILESIZES 5008 #define TAG_LONGSIZE 5009 #define TAG_RECOMMENDNAME 5046 #define <API key> 5047 #define TAG_RECOMMENDFLAGS 5048 #define TAG_SUGGESTNAME 5049 #define TAG_SUGGESTVERSION 5050 #define TAG_SUGGESTFLAGS 5051 #define TAG_SUPPLEMENTNAME 5052 #define <API key> 5053 #define TAG_SUPPLEMENTFLAGS 5054 #define TAG_ENHANCENAME 5055 #define TAG_ENHANCEVERSION 5056 #define TAG_ENHANCEFLAGS 5057 /* signature tags */ #define TAG_SIGBASE 256 #define TAG_SIGMD5 (TAG_SIGBASE + 5) #define TAG_SHA1HEADER (TAG_SIGBASE + 13) #define SIGTAG_SIZE 1000 #define SIGTAG_PGP 1002 /* RSA signature */ #define SIGTAG_MD5 1004 /* header+payload md5 checksum */ #define SIGTAG_GPG 1005 /* DSA signature */ #define DEP_LESS (1 << 1) #define DEP_GREATER (1 << 2) #define DEP_EQUAL (1 << 3) #define DEP_STRONG (1 << 27) #define DEP_PRE_IN ((1 << 6) | (1 << 9) | (1 << 10)) #define DEP_PRE_UN ((1 << 6) | (1 << 11) | (1 << 12)) #define FILEFLAG_GHOST (1 << 6) #ifdef RPM5 # define RPM_INDEX_SIZE 4 /* just the rpmdbid */ #else # define RPM_INDEX_SIZE 8 /* rpmdbid + array index */ #endif typedef struct rpmhead { int cnt; int dcnt; unsigned char *dp; int forcebinary; /* sigh, see rh#478907 */ unsigned char data[1]; } RpmHead; static inline unsigned char * headfindtag(RpmHead *h, int tag) { unsigned int i; unsigned char *d, taga[4]; d = h->dp - 16; taga[0] = tag >> 24; taga[1] = tag >> 16; taga[2] = tag >> 8; taga[3] = tag; for (i = 0; i < h->cnt; i++, d -= 16) if (d[3] == taga[3] && d[2] == taga[2] && d[1] == taga[1] && d[0] == taga[0]) return d; return 0; } static int headexists(RpmHead *h, int tag) { return headfindtag(h, tag) ? 1 : 0; } static unsigned int * headint32array(RpmHead *h, int tag, int *cnt) { unsigned int i, o, *r; unsigned char *d = headfindtag(h, tag); if (!d || d[4] != 0 || d[5] != 0 || d[6] != 0 || d[7] != 4) return 0; o = d[8] << 24 | d[9] << 16 | d[10] << 8 | d[11]; i = d[12] << 24 | d[13] << 16 | d[14] << 8 | d[15]; if (o + 4 * i > h->dcnt) return 0; d = h->dp + o; r = solv_calloc(i ? i : 1, sizeof(unsigned int)); if (cnt) *cnt = i; for (o = 0; o < i; o++, d += 4) r[o] = d[0] << 24 | d[1] << 16 | d[2] << 8 | d[3]; return r; } /* returns the first entry of an integer array */ static unsigned int headint32(RpmHead *h, int tag) { unsigned int i, o; unsigned char *d = headfindtag(h, tag); if (!d || d[4] != 0 || d[5] != 0 || d[6] != 0 || d[7] != 4) return 0; o = d[8] << 24 | d[9] << 16 | d[10] << 8 | d[11]; i = d[12] << 24 | d[13] << 16 | d[14] << 8 | d[15]; if (i == 0 || o + 4 * i > h->dcnt) return 0; d = h->dp + o; return d[0] << 24 | d[1] << 16 | d[2] << 8 | d[3]; } static unsigned long long * headint64array(RpmHead *h, int tag, int *cnt) { unsigned int i, o; unsigned long long *r; unsigned char *d = headfindtag(h, tag); if (!d || d[4] != 0 || d[5] != 0 || d[6] != 0 || d[7] != 5) return 0; o = d[8] << 24 | d[9] << 16 | d[10] << 8 | d[11]; i = d[12] << 24 | d[13] << 16 | d[14] << 8 | d[15]; if (o + 8 * i > h->dcnt) return 0; d = h->dp + o; r = solv_calloc(i ? i : 1, sizeof(unsigned long long)); if (cnt) *cnt = i; for (o = 0; o < i; o++, d += 8) { unsigned int x = d[0] << 24 | d[1] << 16 | d[2] << 8 | d[3]; r[o] = (unsigned long long)x << 32 | (d[4] << 24 | d[5] << 16 | d[6] << 8 | d[7]); } return r; } /* returns the first entry of an 64bit integer array */ static unsigned long long headint64(RpmHead *h, int tag) { unsigned int i, o; unsigned char *d = headfindtag(h, tag); if (!d || d[4] != 0 || d[5] != 0 || d[6] != 0 || d[7] != 5) return 0; o = d[8] << 24 | d[9] << 16 | d[10] << 8 | d[11]; i = d[12] << 24 | d[13] << 16 | d[14] << 8 | d[15]; if (i == 0 || o + 8 * i > h->dcnt) return 0; d = h->dp + o; i = d[0] << 24 | d[1] << 16 | d[2] << 8 | d[3]; return (unsigned long long)i << 32 | (d[4] << 24 | d[5] << 16 | d[6] << 8 | d[7]); } static unsigned int * headint16array(RpmHead *h, int tag, int *cnt) { unsigned int i, o, *r; unsigned char *d = headfindtag(h, tag); if (!d || d[4] != 0 || d[5] != 0 || d[6] != 0 || d[7] != 3) return 0; o = d[8] << 24 | d[9] << 16 | d[10] << 8 | d[11]; i = d[12] << 24 | d[13] << 16 | d[14] << 8 | d[15]; if (o + 4 * i > h->dcnt) return 0; d = h->dp + o; r = solv_calloc(i ? i : 1, sizeof(unsigned int)); if (cnt) *cnt = i; for (o = 0; o < i; o++, d += 2) r[o] = d[0] << 8 | d[1]; return r; } static char * headstring(RpmHead *h, int tag) { unsigned int o; unsigned char *d = headfindtag(h, tag); /* 6: STRING, 9: I18NSTRING */ if (!d || d[4] != 0 || d[5] != 0 || d[6] != 0 || (d[7] != 6 && d[7] != 9)) return 0; o = d[8] << 24 | d[9] << 16 | d[10] << 8 | d[11]; if (o >= h->dcnt) return 0; return (char *)h->dp + o; } static char ** headstringarray(RpmHead *h, int tag, int *cnt) { unsigned int i, o; unsigned char *d = headfindtag(h, tag); char **r; if (!d || d[4] != 0 || d[5] != 0 || d[6] != 0 || d[7] != 8) return 0; o = d[8] << 24 | d[9] << 16 | d[10] << 8 | d[11]; i = d[12] << 24 | d[13] << 16 | d[14] << 8 | d[15]; r = solv_calloc(i ? i : 1, sizeof(char *)); if (cnt) *cnt = i; d = h->dp + o; for (o = 0; o < i; o++) { r[o] = (char *)d; if (o + 1 < i) d += strlen((char *)d) + 1; if (d >= h->dp + h->dcnt) { solv_free(r); return 0; } } return r; } static unsigned char * headbinary(RpmHead *h, int tag, unsigned int *sizep) { unsigned int i, o; unsigned char *d = headfindtag(h, tag); if (!d || d[4] != 0 || d[5] != 0 || d[6] != 0 || d[7] != 7) return 0; o = d[8] << 24 | d[9] << 16 | d[10] << 8 | d[11]; i = d[12] << 24 | d[13] << 16 | d[14] << 8 | d[15]; if (o > h->dcnt || o + i < o || o + i > h->dcnt) return 0; if (sizep) *sizep = i; return h->dp + o; } static char *headtoevr(RpmHead *h) { unsigned int epoch; char *version, *v; char *release; char *evr; char *distepoch; version = headstring(h, TAG_VERSION); release = headstring(h, TAG_RELEASE); epoch = headint32(h, TAG_EPOCH); if (!version || !release) { fprintf(stderr, "headtoevr: bad rpm header\n"); return 0; } for (v = version; *v >= '0' && *v <= '9'; v++) ; if (epoch || (v != version && *v == ':')) { char epochbuf[11]; /* 32bit decimal will fit in */ sprintf(epochbuf, "%u", epoch); evr = solv_malloc(strlen(epochbuf) + 1 + strlen(version) + 1 + strlen(release) + 1); sprintf(evr, "%s:%s-%s", epochbuf, version, release); } else { evr = solv_malloc(strlen(version) + 1 + strlen(release) + 1); sprintf(evr, "%s-%s", version, release); } distepoch = headstring(h, TAG_DISTEPOCH); if (distepoch && *distepoch) { int l = strlen(evr); evr = solv_realloc(evr, l + strlen(distepoch) + 2); evr[l++] = ':'; strcpy(evr + l, distepoch); } return evr; } static void setutf8string(Repodata *repodata, Id handle, Id tag, const char *str) { if (str[solv_validutf8(str)]) { char *ustr = solv_latin1toutf8(str); /* not utf8, assume latin1 */ repodata_set_str(repodata, handle, tag, ustr); solv_free(ustr); } else repodata_set_str(repodata, handle, tag, str); } /* * strong: 0: ignore strongness * 1: filter to strong * 2: filter to weak */ static unsigned int makedeps(Pool *pool, Repo *repo, RpmHead *rpmhead, int tagn, int tagv, int tagf, int flags) { char **n, **v; unsigned int *f; int i, cc, nc, vc, fc; int haspre, premask; unsigned int olddeps; Id *ida; int strong = 0; n = headstringarray(rpmhead, tagn, &nc); if (!n) { switch (tagn) { case TAG_SUGGESTNAME: tagn = TAG_OLDSUGGESTSNAME; tagv = <API key>; tagf = <API key>; strong = -1; break; case TAG_ENHANCENAME: tagn = TAG_OLDENHANCESNAME; tagv = <API key>; tagf = <API key>; strong = -1; break; case TAG_RECOMMENDNAME: tagn = TAG_OLDSUGGESTSNAME; tagv = <API key>; tagf = <API key>; strong = 1; break; case TAG_SUPPLEMENTNAME: tagn = TAG_OLDENHANCESNAME; tagv = <API key>; tagf = <API key>; strong = 1; break; default: return 0; } n = headstringarray(rpmhead, tagn, &nc); } if (!n || !nc) return 0; vc = fc = 0; v = headstringarray(rpmhead, tagv, &vc); f = headint32array(rpmhead, tagf, &fc); if (!v || !f || nc != vc || nc != fc) { char *pkgname = rpm_query(rpmhead, 0); pool_error(pool, 0, "bad dependency entries for %s: %d %d %d", pkgname ? pkgname : "<NULL>", nc, vc, fc); solv_free(pkgname); solv_free(n); solv_free(v); solv_free(f); return 0; } cc = nc; haspre = 0; /* add no prereq marker */ premask = tagn == TAG_REQUIRENAME ? DEP_PRE_IN | DEP_PRE_UN : 0; if ((flags & <API key>) || strong) { /* we do filtering */ cc = 0; for (i = 0; i < nc; i++) { if (strong && (f[i] & DEP_STRONG) != (strong < 0 ? 0 : DEP_STRONG)) continue; if ((flags & <API key>) != 0) if (!strncmp(n[i], "rpmlib(", 7)) continue; if ((f[i] & premask) != 0) haspre = 1; cc++; } } else if (premask) { /* no filtering, just look for the first prereq */ for (i = 0; i < nc; i++) if ((f[i] & premask) != 0) { haspre = 1; break; } } if (cc == 0) { solv_free(n); solv_free(v); solv_free(f); return 0; } cc += haspre; /* add slot for the prereq marker */ olddeps = repo_reserve_ids(repo, 0, cc); ida = repo->idarraydata + olddeps; for (i = 0; ; i++) { Id id; if (i == nc) { if (haspre != 1) break; haspre = 2; /* pass two: prereqs */ i = 0; *ida++ = <API key>; } if (strong && (f[i] & DEP_STRONG) != (strong < 0 ? 0 : DEP_STRONG)) continue; if (haspre) { if (haspre == 1 && (f[i] & premask) != 0) continue; if (haspre == 2 && (f[i] & premask) == 0) continue; } if ((flags & <API key>) != 0) if (!strncmp(n[i], "rpmlib(", 7)) continue; #ifdef ENABLE_COMPLEX_DEPS if ((f[i] & (DEP_LESS|DEP_EQUAL|DEP_GREATER)) == 0 && n[i][0] == '(') { id = <API key>(pool, n[i]); if (id) *ida++ = id; else cc continue; } #endif id = pool_str2id(pool, n[i], 1); if (f[i] & (DEP_LESS|DEP_GREATER|DEP_EQUAL)) { Id evr; int fl = 0; if ((f[i] & DEP_LESS) != 0) fl |= REL_LT; if ((f[i] & DEP_EQUAL) != 0) fl |= REL_EQ; if ((f[i] & DEP_GREATER) != 0) fl |= REL_GT; if (v[i][0] == '0' && v[i][1] == ':' && v[i][2]) evr = pool_str2id(pool, v[i] + 2, 1); else evr = pool_str2id(pool, v[i], 1); id = pool_rel2id(pool, id, evr, fl, 1); } *ida++ = id; } *ida++ = 0; repo->idarraysize += cc + 1; solv_free(n); solv_free(v); solv_free(f); return olddeps; } static void adddudata(Repodata *data, Id handle, RpmHead *rpmhead, char **dn, unsigned int *di, int fc, int dc) { Id did; int i, fszc; unsigned int *fkb, *fn, *fsz, *fm, *fino; unsigned long long *fsz64; unsigned int inotest[256], inotestok; if (!fc) return; if ((fsz64 = headint64array(rpmhead, TAG_LONGFILESIZES, &fszc)) != 0) { /* convert to kbyte */ fsz = solv_malloc2(fszc, sizeof(*fsz)); for (i = 0; i < fszc; i++) fsz[i] = fsz64[i] ? fsz64[i] / 1024 + 1 : 0; solv_free(fsz64); } else if ((fsz = headint32array(rpmhead, TAG_FILESIZES, &fszc)) != 0) { /* convert to kbyte */ for (i = 0; i < fszc; i++) if (fsz[i]) fsz[i] = fsz[i] / 1024 + 1; } else return; if (fc != fszc) { solv_free(fsz); return; } /* stupid rpm records sizes of directories, so we have to check the mode */ fm = headint16array(rpmhead, TAG_FILEMODES, &fszc); if (!fm || fc != fszc) { solv_free(fsz); solv_free(fm); return; } fino = headint32array(rpmhead, TAG_FILEINODES, &fszc); if (!fino || fc != fszc) { solv_free(fsz); solv_free(fm); solv_free(fino); return; } /* kill hardlinked entries */ inotestok = 0; if (fc < sizeof(inotest)) { /* quick test just hashing the inode numbers */ memset(inotest, 0, sizeof(inotest)); for (i = 0; i < fc; i++) { int off, bit; if (fsz[i] == 0 || !S_ISREG(fm[i])) continue; /* does not matter */ off = (fino[i] >> 5) & (sizeof(inotest)/sizeof(*inotest) - 1); bit = 1 << (fino[i] & 31); if ((inotest[off] & bit) != 0) break; inotest[off] |= bit; } if (i == fc) inotestok = 1; /* no conflict found */ } if (!inotestok) { /* hardlinked files are possible, check ino/dev pairs */ unsigned int *fdev = headint32array(rpmhead, TAG_FILEDEVICES, &fszc); unsigned int *fx, j; unsigned int mask, hash, hh; if (!fdev || fc != fszc) { solv_free(fsz); solv_free(fm); solv_free(fdev); solv_free(fino); return; } mask = fc; while ((mask & (mask - 1)) != 0) mask = mask & (mask - 1); mask <<= 2; if (mask > sizeof(inotest)/sizeof(*inotest)) fx = solv_calloc(mask, sizeof(unsigned int)); else { fx = inotest; memset(fx, 0, mask * sizeof(unsigned int)); } mask for (i = 0; i < fc; i++) { if (fsz[i] == 0 || !S_ISREG(fm[i])) continue; hash = (fino[i] + fdev[i] * 31) & mask; hh = 7; while ((j = fx[hash]) != 0) { if (fino[j - 1] == fino[i] && fdev[j - 1] == fdev[i]) { fsz[i] = 0; /* kill entry */ break; } hash = (hash + hh++) & mask; } if (!j) fx[hash] = i + 1; } if (fx != inotest) solv_free(fx); solv_free(fdev); } solv_free(fino); /* sum up inode count and kbytes for each directory */ fn = solv_calloc(dc, sizeof(unsigned int)); fkb = solv_calloc(dc, sizeof(unsigned int)); for (i = 0; i < fc; i++) { if (di[i] >= dc) continue; /* corrupt entry */ fn[di[i]]++; if (fsz[i] == 0 || !S_ISREG(fm[i])) continue; fkb[di[i]] += fsz[i]; } solv_free(fsz); solv_free(fm); /* commit */ for (i = 0; i < dc; i++) { if (!fn[i]) continue; if (!*dn[i]) { Solvable *s = data->repo->pool->solvables + handle; if (s->arch == ARCH_SRC || s->arch == ARCH_NOSRC) did = repodata_str2dir(data, "/usr/src", 1); else continue; /* work around rpm bug */ } else did = repodata_str2dir(data, dn[i], 1); <API key>(data, handle, SOLVABLE_DISKUSAGE, did, fkb[i], fn[i]); } solv_free(fn); solv_free(fkb); } static int is_filtered(const char *dir) { if (!dir) return 1; /* the dirs always have a trailing / in rpm */ if (strstr(dir, "bin/")) return 0; if (!strncmp(dir, "/etc/", 5)) return 0; if (!strcmp(dir, "/usr/lib/")) return 2; return 1; } static void addfilelist(Repodata *data, Id handle, RpmHead *rpmhead, int flags) { char **bn; char **dn; unsigned int *di; int bnc, dnc, dic; int i; Id lastdid = 0; unsigned int lastdii = -1; int lastfiltered = 0; if (!data) return; bn = headstringarray(rpmhead, TAG_BASENAMES, &bnc); if (!bn) return; dn = headstringarray(rpmhead, TAG_DIRNAMES, &dnc); if (!dn) { solv_free(bn); return; } di = headint32array(rpmhead, TAG_DIRINDEXES, &dic); if (!di) { solv_free(bn); solv_free(dn); return; } if (bnc != dic) { pool_error(data->repo->pool, 0, "bad filelist"); return; } adddudata(data, handle, rpmhead, dn, di, bnc, dnc); for (i = 0; i < bnc; i++) { Id did; char *b = bn[i]; if (di[i] == lastdii) did = lastdid; else { if (di[i] >= dnc) continue; /* corrupt entry */ lastdii = di[i]; if ((flags & <API key>) != 0) { lastfiltered = is_filtered(dn[di[i]]); if (lastfiltered == 1) continue; } did = repodata_str2dir(data, dn[lastdii], 1); if (!did) did = repodata_str2dir(data, "/", 1); lastdid = did; } if (b && *b == '/') /* work around rpm bug */ b++; if (lastfiltered) { if (lastfiltered != 2 || strcmp(b, "sendmail")) continue; } repodata_add_dirstr(data, handle, SOLVABLE_FILELIST, did, b); } solv_free(bn); solv_free(dn); solv_free(di); } static void addchangelog(Repodata *data, Id handle, RpmHead *rpmhead) { char **cn; char **cx; unsigned int *ct; int i, cnc, cxc, ctc; Queue hq; ct = headint32array(rpmhead, TAG_CHANGELOGTIME, &ctc); cx = headstringarray(rpmhead, TAG_CHANGELOGTEXT, &cxc); cn = headstringarray(rpmhead, TAG_CHANGELOGNAME, &cnc); if (!ct || !cx || !cn || !ctc || ctc != cxc || ctc != cnc) { solv_free(ct); solv_free(cx); solv_free(cn); return; } queue_init(&hq); for (i = 0; i < ctc; i++) { Id h = repodata_new_handle(data); if (ct[i]) repodata_set_num(data, h, <API key>, ct[i]); if (cn[i]) setutf8string(data, h, <API key>, cn[i]); if (cx[i]) setutf8string(data, h, <API key>, cx[i]); queue_push(&hq, h); } for (i = 0; i < hq.count; i++) <API key>(data, handle, SOLVABLE_CHANGELOG, hq.elements[i]); queue_free(&hq); solv_free(ct); solv_free(cx); solv_free(cn); } static void <API key>(Repodata *data, Id handle, char *str) { char *aut, *p; for (aut = str; (aut = strchr(aut, '\n')) != 0; aut++) if (!strncmp(aut, "\nAuthors:\n break; if (aut) { /* oh my, found SUSE special author section */ int l = aut - str; str = solv_strdup(str); aut = str + l; str[l] = 0; while (l > 0 && str[l - 1] == '\n') str[--l] = 0; if (l) setutf8string(data, handle, <API key>, str); p = aut + 19; aut = str; /* copy over */ while (*p == ' ' || *p == '\n') p++; while (*p) { if (*p == '\n') { *aut++ = *p++; while (*p == ' ') p++; continue; } *aut++ = *p++; } while (aut != str && aut[-1] == '\n') aut *aut = 0; if (*str) setutf8string(data, handle, SOLVABLE_AUTHORS, str); free(str); } else if (*str) setutf8string(data, handle, <API key>, str); } static int rpm2solv(Pool *pool, Repo *repo, Repodata *data, Solvable *s, RpmHead *rpmhead, int flags) { char *name; char *evr; char *sourcerpm; name = headstring(rpmhead, TAG_NAME); if (!name) { pool_error(pool, 0, "package has no name"); return 0; } if (!strcmp(name, "gpg-pubkey")) return 0; s->name = pool_str2id(pool, name, 1); sourcerpm = headstring(rpmhead, TAG_SOURCERPM); if (sourcerpm || (rpmhead->forcebinary && !headexists(rpmhead, TAG_SOURCEPACKAGE))) s->arch = pool_str2id(pool, headstring(rpmhead, TAG_ARCH), 1); else { if (headexists(rpmhead, TAG_NOSOURCE) || headexists(rpmhead, TAG_NOPATCH)) s->arch = ARCH_NOSRC; else s->arch = ARCH_SRC; } if (!s->arch) s->arch = ARCH_NOARCH; evr = headtoevr(rpmhead); s->evr = pool_str2id(pool, evr, 1); s->vendor = pool_str2id(pool, headstring(rpmhead, TAG_VENDOR), 1); s->provides = makedeps(pool, repo, rpmhead, TAG_PROVIDENAME, TAG_PROVIDEVERSION, TAG_PROVIDEFLAGS, 0); if (s->arch != ARCH_SRC && s->arch != ARCH_NOSRC) s->provides = repo_addid_dep(repo, s->provides, pool_rel2id(pool, s->name, s->evr, REL_EQ, 1), 0); s->requires = makedeps(pool, repo, rpmhead, TAG_REQUIRENAME, TAG_REQUIREVERSION, TAG_REQUIREFLAGS, flags); s->conflicts = makedeps(pool, repo, rpmhead, TAG_CONFLICTNAME, TAG_CONFLICTVERSION, TAG_CONFLICTFLAGS, 0); s->obsoletes = makedeps(pool, repo, rpmhead, TAG_OBSOLETENAME, TAG_OBSOLETEVERSION, TAG_OBSOLETEFLAGS, 0); s->recommends = makedeps(pool, repo, rpmhead, TAG_RECOMMENDNAME, <API key>, TAG_RECOMMENDFLAGS, 0); s->suggests = makedeps(pool, repo, rpmhead, TAG_SUGGESTNAME, TAG_SUGGESTVERSION, TAG_SUGGESTFLAGS, 0); s->supplements = makedeps(pool, repo, rpmhead, TAG_SUPPLEMENTNAME, <API key>, TAG_SUPPLEMENTFLAGS, 0); s->enhances = makedeps(pool, repo, rpmhead, TAG_ENHANCENAME, TAG_ENHANCEVERSION, TAG_ENHANCEFLAGS, 0); s->supplements = <API key>(repo, s->provides, s->supplements, 0); s->conflicts = repo_fix_conflicts(repo, s->conflicts); if (data) { Id handle; char *str; unsigned int u32; unsigned long long u64; handle = s - pool->solvables; str = headstring(rpmhead, TAG_SUMMARY); if (str) setutf8string(data, handle, SOLVABLE_SUMMARY, str); str = headstring(rpmhead, TAG_DESCRIPTION); if (str) <API key>(data, handle, str); str = headstring(rpmhead, TAG_GROUP); if (str) <API key>(data, handle, SOLVABLE_GROUP, str); str = headstring(rpmhead, TAG_LICENSE); if (str) <API key>(data, handle, SOLVABLE_LICENSE, str); str = headstring(rpmhead, TAG_URL); if (str) repodata_set_str(data, handle, SOLVABLE_URL, str); str = headstring(rpmhead, TAG_DISTRIBUTION); if (str) <API key>(data, handle, <API key>, str); str = headstring(rpmhead, TAG_PACKAGER); if (str) <API key>(data, handle, SOLVABLE_PACKAGER, str); if ((flags & RPM_ADD_WITH_PKGID) != 0) { unsigned char *chksum; unsigned int chksumsize; chksum = headbinary(rpmhead, TAG_SIGMD5, &chksumsize); if (chksum && chksumsize == 16) <API key>(data, handle, SOLVABLE_PKGID, REPOKEY_TYPE_MD5, chksum); } if ((flags & RPM_ADD_WITH_HDRID) != 0) { str = headstring(rpmhead, TAG_SHA1HEADER); if (str && strlen(str) == 40) <API key>(data, handle, SOLVABLE_HDRID, REPOKEY_TYPE_SHA1, str); else if (str && strlen(str) == 64) <API key>(data, handle, SOLVABLE_HDRID, REPOKEY_TYPE_SHA256, str); } u32 = headint32(rpmhead, TAG_BUILDTIME); if (u32) repodata_set_num(data, handle, SOLVABLE_BUILDTIME, u32); u32 = headint32(rpmhead, TAG_INSTALLTIME); if (u32) repodata_set_num(data, handle, <API key>, u32); u64 = headint64(rpmhead, TAG_LONGSIZE); if (u64) repodata_set_num(data, handle, <API key>, u64); else { u32 = headint32(rpmhead, TAG_SIZE); if (u32) repodata_set_num(data, handle, <API key>, u32); } if (sourcerpm) <API key>(data, handle, sourcerpm); if ((flags & RPM_ADD_TRIGGERS) != 0) { Id id, lastid; unsigned int ida = makedeps(pool, repo, rpmhead, TAG_TRIGGERNAME, TAG_TRIGGERVERSION, TAG_TRIGGERFLAGS, 0); lastid = 0; for (; (id = repo->idarraydata[ida]) != 0; ida++) { /* we currently do not support rel ids in incore data, so * strip off versioning information */ while (ISRELDEP(id)) { Reldep *rd = GETRELDEP(pool, id); id = rd->name; } if (id == lastid) continue; <API key>(data, handle, SOLVABLE_TRIGGERS, id); lastid = id; } } if ((flags & RPM_ADD_NO_FILELIST) == 0) addfilelist(data, handle, rpmhead, flags); if ((flags & <API key>) != 0) addchangelog(data, handle, rpmhead); } solv_free(evr); return 1; } /* Rpm Database stuff */ struct rpmdbstate { Pool *pool; char *rootdir; RpmHead *rpmhead; /* header storage space */ int rpmheadsize; int dbopened; DB_ENV *dbenv; /* database environment */ DB *db; /* packages database */ int byteswapped; /* endianess of packages database */ int is_ostree; /* read-only db that lives in /usr/share/rpm */ }; struct rpmdbentry { Id rpmdbid; Id nameoff; }; #define ENTRIES_BLOCK 255 #define NAMEDATA_BLOCK 1023 static inline Id db2rpmdbid(unsigned char *db, int byteswapped) { #ifdef RPM5 return db[0] << 24 | db[1] << 16 | db[2] << 8 | db[3]; #else # if defined(WORDS_BIGENDIAN) if (!byteswapped) # else if (byteswapped) # endif return db[0] << 24 | db[1] << 16 | db[2] << 8 | db[3]; else return db[3] << 24 | db[2] << 16 | db[1] << 8 | db[0]; #endif } static inline void rpmdbid2db(unsigned char *db, Id id, int byteswapped) { #ifdef RPM5 db[0] = id >> 24, db[1] = id >> 16, db[2] = id >> 8, db[3] = id; #else # if defined(WORDS_BIGENDIAN) if (!byteswapped) # else if (byteswapped) # endif db[0] = id >> 24, db[1] = id >> 16, db[2] = id >> 8, db[3] = id; else db[3] = id >> 24, db[2] = id >> 16, db[1] = id >> 8, db[0] = id; #endif } #ifdef FEDORA int serialize_dbenv_ops(struct rpmdbstate *state) { char lpath[PATH_MAX]; mode_t oldmask; int fd; struct flock fl; snprintf(lpath, PATH_MAX, "%s/var/lib/rpm/.dbenv.lock", state->rootdir ? state->rootdir : ""); oldmask = umask(022); fd = open(lpath, (O_RDWR|O_CREAT), 0644); umask(oldmask); if (fd < 0) return -1; memset(&fl, 0, sizeof(fl)); fl.l_type = F_WRLCK; fl.l_whence = SEEK_SET; for (;;) { if (fcntl(fd, F_SETLKW, &fl) != -1) return fd; if (errno != EINTR) break; } close(fd); return -1; } #endif /* should look in /usr/lib/rpm/macros instead, but we want speed... */ static int opendbenv(struct rpmdbstate *state) { const char *rootdir = state->rootdir; char dbpath[PATH_MAX]; DB_ENV *dbenv = 0; int r; if (db_env_create(&dbenv, 0)) return pool_error(state->pool, 0, "db_env_create: %s", strerror(errno)); #if defined(FEDORA) && (DB_VERSION_MAJOR >= 5 || (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 5)) dbenv->set_thread_count(dbenv, 8); #endif snprintf(dbpath, PATH_MAX, "%s/var/lib/rpm", rootdir ? rootdir : ""); if (access(dbpath, W_OK) == -1) { snprintf(dbpath, PATH_MAX, "%s/usr/share/rpm/Packages", rootdir ? rootdir : ""); if (access(dbpath, R_OK) == 0) state->is_ostree = 1; snprintf(dbpath, PATH_MAX, "%s%s", rootdir ? rootdir : "", state->is_ostree ? "/usr/share/rpm" : "/var/lib/rpm"); r = dbenv->open(dbenv, dbpath, DB_CREATE|DB_PRIVATE|DB_INIT_MPOOL, 0); } else { #ifdef FEDORA int serialize_fd = serialize_dbenv_ops(state); r = dbenv->open(dbenv, dbpath, DB_CREATE|DB_INIT_CDB|DB_INIT_MPOOL, 0644); if (serialize_fd >= 0) close(serialize_fd); #else r = dbenv->open(dbenv, dbpath, DB_CREATE|DB_PRIVATE|DB_INIT_MPOOL, 0); #endif } if (r) { pool_error(state->pool, 0, "dbenv->open: %s", strerror(errno)); dbenv->close(dbenv, 0); return 0; } state->dbenv = dbenv; return 1; } static void closedbenv(struct rpmdbstate *state) { #ifdef FEDORA uint32_t eflags = 0; #endif if (!state->dbenv) return; #ifdef FEDORA (void)state->dbenv->get_open_flags(state->dbenv, &eflags); if (!(eflags & DB_PRIVATE)) { int serialize_fd = serialize_dbenv_ops(state); state->dbenv->close(state->dbenv, 0); if (serialize_fd >= 0) close(serialize_fd); } else state->dbenv->close(state->dbenv, 0); #else state->dbenv->close(state->dbenv, 0); #endif state->dbenv = 0; } static int openpkgdb(struct rpmdbstate *state) { if (state->dbopened) return state->dbopened > 0 ? 1 : 0; state->dbopened = -1; if (!state->dbenv && !opendbenv(state)) return 0; if (db_create(&state->db, state->dbenv, 0)) { pool_error(state->pool, 0, "db_create: %s", strerror(errno)); state->db = 0; closedbenv(state); return 0; } if (state->db->open(state->db, 0, "Packages", 0, DB_UNKNOWN, DB_RDONLY, 0664)) { pool_error(state->pool, 0, "db->open Packages: %s", strerror(errno)); state->db->close(state->db, 0); state->db = 0; closedbenv(state); return 0; } if (state->db->get_byteswapped(state->db, &state->byteswapped)) { pool_error(state->pool, 0, "db->get_byteswapped: %s", strerror(errno)); state->db->close(state->db, 0); state->db = 0; closedbenv(state); return 0; } state->dbopened = 1; return 1; } /* get the rpmdbids of all installed packages from the Name index database. * This is much faster then querying the big Packages database */ static struct rpmdbentry * <API key>(struct rpmdbstate *state, const char *index, const char *match, int *nentriesp, char **namedatap) { DB_ENV *dbenv = 0; DB *db = 0; DBC *dbc = 0; int byteswapped; DBT dbkey; DBT dbdata; unsigned char *dp; int dl; Id nameoff; char *namedata = 0; int namedatal = 0; struct rpmdbentry *entries = 0; int nentries = 0; *nentriesp = 0; if (namedatap) *namedatap = 0; if (!state->dbenv && !opendbenv(state)) return 0; dbenv = state->dbenv; if (db_create(&db, dbenv, 0)) { pool_error(state->pool, 0, "db_create: %s", strerror(errno)); return 0; } if (db->open(db, 0, index, 0, DB_UNKNOWN, DB_RDONLY, 0664)) { pool_error(state->pool, 0, "db->open %s: %s", index, strerror(errno)); db->close(db, 0); return 0; } if (db->get_byteswapped(db, &byteswapped)) { pool_error(state->pool, 0, "db->get_byteswapped: %s", strerror(errno)); db->close(db, 0); return 0; } if (db->cursor(db, NULL, &dbc, 0)) { pool_error(state->pool, 0, "db->cursor: %s", strerror(errno)); db->close(db, 0); return 0; } memset(&dbkey, 0, sizeof(dbkey)); memset(&dbdata, 0, sizeof(dbdata)); if (match) { dbkey.data = (void *)match; dbkey.size = strlen(match); } while (dbc->c_get(dbc, &dbkey, &dbdata, match ? DB_SET : DB_NEXT) == 0) { if (!match && dbkey.size == 10 && !memcmp(dbkey.data, "gpg-pubkey", 10)) continue; dl = dbdata.size; dp = dbdata.data; nameoff = namedatal; if (namedatap) { namedata = solv_extend(namedata, namedatal, dbkey.size + 1, 1, NAMEDATA_BLOCK); memcpy(namedata + namedatal, dbkey.data, dbkey.size); namedata[namedatal + dbkey.size] = 0; namedatal += dbkey.size + 1; } while(dl >= RPM_INDEX_SIZE) { entries = solv_extend(entries, nentries, 1, sizeof(*entries), ENTRIES_BLOCK); entries[nentries].rpmdbid = db2rpmdbid(dp, byteswapped); entries[nentries].nameoff = nameoff; nentries++; dp += RPM_INDEX_SIZE; dl -= RPM_INDEX_SIZE; } if (match) break; } dbc->c_close(dbc); db->close(db, 0); /* make sure that enteries is != 0 if there was no error */ if (!entries) entries = solv_extend(entries, 1, 1, sizeof(*entries), ENTRIES_BLOCK); *nentriesp = nentries; if (namedatap) *namedatap = namedata; return entries; } /* retrive header by rpmdbid */ static int getrpmdbid(struct rpmdbstate *state, Id rpmdbid) { unsigned char buf[16]; DBT dbkey; DBT dbdata; RpmHead *rpmhead; if (!rpmdbid) { pool_error(state->pool, 0, "illegal rpmdbid"); return -1; } if (state->dbopened != 1 && !openpkgdb(state)) return -1; rpmdbid2db(buf, rpmdbid, state->byteswapped); memset(&dbkey, 0, sizeof(dbkey)); memset(&dbdata, 0, sizeof(dbdata)); dbkey.data = buf; dbkey.size = 4; dbdata.data = 0; dbdata.size = 0; if (state->db->get(state->db, NULL, &dbkey, &dbdata, 0)) return 0; if (dbdata.size < 8) { pool_error(state->pool, 0, "corrupt rpm database (size)"); return -1; } if (dbdata.size > state->rpmheadsize) { state->rpmheadsize = dbdata.size + 128; state->rpmhead = solv_realloc(state->rpmhead, sizeof(*rpmhead) + state->rpmheadsize); } rpmhead = state->rpmhead; memcpy(buf, dbdata.data, 8); rpmhead->forcebinary = 1; rpmhead->cnt = buf[0] << 24 | buf[1] << 16 | buf[2] << 8 | buf[3]; rpmhead->dcnt = buf[4] << 24 | buf[5] << 16 | buf[6] << 8 | buf[7]; if (8 + rpmhead->cnt * 16 + rpmhead->dcnt > dbdata.size) { pool_error(state->pool, 0, "corrupt rpm database (data size)"); return -1; } memcpy(rpmhead->data, (unsigned char *)dbdata.data + 8, rpmhead->cnt * 16 + rpmhead->dcnt); rpmhead->dp = rpmhead->data + rpmhead->cnt * 16; return 1; } /* retrive header by berkeleydb cursor */ static Id getrpmcursor(struct rpmdbstate *state, DBC *dbc) { unsigned char buf[16]; DBT dbkey; DBT dbdata; RpmHead *rpmhead; Id dbid; memset(&dbkey, 0, sizeof(dbkey)); memset(&dbdata, 0, sizeof(dbdata)); while (dbc->c_get(dbc, &dbkey, &dbdata, DB_NEXT) == 0) { if (dbkey.size != 4) return pool_error(state->pool, -1, "corrupt Packages database (key size)"); dbid = db2rpmdbid(dbkey.data, state->byteswapped); if (dbid == 0) /* the join key */ continue; if (dbdata.size < 8) return pool_error(state->pool, -1, "corrupt rpm database (size %u)\n", dbdata.size); if (dbdata.size > state->rpmheadsize) { state->rpmheadsize = dbdata.size + 128; state->rpmhead = solv_realloc(state->rpmhead, sizeof(*state->rpmhead) + state->rpmheadsize); } rpmhead = state->rpmhead; memcpy(buf, dbdata.data, 8); rpmhead->forcebinary = 1; rpmhead->cnt = buf[0] << 24 | buf[1] << 16 | buf[2] << 8 | buf[3]; rpmhead->dcnt = buf[4] << 24 | buf[5] << 16 | buf[6] << 8 | buf[7]; if (8 + rpmhead->cnt * 16 + rpmhead->dcnt > dbdata.size) return pool_error(state->pool, -1, "corrupt rpm database (data size)\n"); memcpy(rpmhead->data, (unsigned char *)dbdata.data + 8, rpmhead->cnt * 16 + rpmhead->dcnt); rpmhead->dp = rpmhead->data + rpmhead->cnt * 16; return dbid; } return 0; } static void freestate(struct rpmdbstate *state) { /* close down */ if (!state) return; if (state->db) state->db->close(state->db, 0); if (state->dbenv) closedbenv(state); if (state->rootdir) solv_free(state->rootdir); solv_free(state->rpmhead); } void * rpm_state_create(Pool *pool, const char *rootdir) { struct rpmdbstate *state; state = solv_calloc(1, sizeof(*state)); state->pool = pool; if (rootdir) state->rootdir = solv_strdup(rootdir); return state; } void * rpm_state_free(void *state) { freestate(state); return solv_free(state); } static int count_headers(struct rpmdbstate *state) { Pool *pool = state->pool; char dbpath[PATH_MAX]; struct stat statbuf; DB *db = 0; DBC *dbc = 0; int count = 0; DBT dbkey; DBT dbdata; snprintf(dbpath, PATH_MAX, "%s%s/Name", state->rootdir ? state->rootdir : "", state->is_ostree ? "/usr/share/rpm" : "/var/lib/rpm"); if (stat(dbpath, &statbuf)) return 0; memset(&dbkey, 0, sizeof(dbkey)); memset(&dbdata, 0, sizeof(dbdata)); if (db_create(&db, state->dbenv, 0)) { pool_error(pool, 0, "db_create: %s", strerror(errno)); return 0; } if (db->open(db, 0, "Name", 0, DB_UNKNOWN, DB_RDONLY, 0664)) { pool_error(pool, 0, "db->open Name: %s", strerror(errno)); db->close(db, 0); return 0; } if (db->cursor(db, NULL, &dbc, 0)) { db->close(db, 0); pool_error(pool, 0, "db->cursor: %s", strerror(errno)); return 0; } while (dbc->c_get(dbc, &dbkey, &dbdata, DB_NEXT) == 0) count += dbdata.size / RPM_INDEX_SIZE; dbc->c_close(dbc); db->close(db, 0); return count; } static Offset copydeps(Pool *pool, Repo *repo, Offset fromoff, Repo *fromrepo) { int cc; Id *ida, *from; Offset ido; if (!fromoff) return 0; from = fromrepo->idarraydata + fromoff; for (ida = from, cc = 0; *ida; ida++, cc++) ; if (cc == 0) return 0; ido = repo_reserve_ids(repo, 0, cc); ida = repo->idarraydata + ido; memcpy(ida, from, (cc + 1) * sizeof(Id)); repo->idarraysize += cc + 1; return ido; } #define <API key> 512 static Id copydir_complex(Pool *pool, Repodata *data, Repodata *fromdata, Id did, Id *cache); static inline Id copydir(Pool *pool, Repodata *data, Repodata *fromdata, Id did, Id *cache) { if (cache && cache[did & 255] == did) return cache[(did & 255) + 256]; return copydir_complex(pool, data, fromdata, did, cache); } static Id copydir_complex(Pool *pool, Repodata *data, Repodata *fromdata, Id did, Id *cache) { Id parent = dirpool_parent(&fromdata->dirpool, did); Id compid = dirpool_compid(&fromdata->dirpool, did); if (parent) parent = copydir(pool, data, fromdata, parent, cache); if (data->localpool || fromdata->localpool) compid = <API key>(data, fromdata, compid, 1); compid = dirpool_add_dir(&data->dirpool, parent, compid, 1); if (cache) { cache[did & 255] = did; cache[(did & 255) + 256] = compid; } return compid; } struct <API key> { Repodata *data; Id handle; Id subhandle; Id *dircache; }; static int solvable_copy_cb(void *vcbdata, Solvable *r, Repodata *fromdata, Repokey *key, KeyValue *kv) { struct <API key> *cbdata = vcbdata; Id id, keyname; Repodata *data = cbdata->data; Id handle = cbdata->handle; Pool *pool = data->repo->pool; keyname = key->name; switch(key->type) { case REPOKEY_TYPE_ID: case <API key>: case <API key>: /* used for triggers */ id = kv->id; if (data->localpool || fromdata->localpool) id = <API key>(data, fromdata, id, 1); if (key->type == REPOKEY_TYPE_ID) repodata_set_id(data, handle, keyname, id); else if (key->type == <API key>) <API key>(data, handle, keyname, id); else <API key>(data, handle, keyname, id); break; case REPOKEY_TYPE_STR: repodata_set_str(data, handle, keyname, kv->str); break; case REPOKEY_TYPE_VOID: repodata_set_void(data, handle, keyname); break; case REPOKEY_TYPE_NUM: repodata_set_num(data, handle, keyname, SOLV_KV_NUM64(kv)); break; case <API key>: <API key>(data, handle, keyname, kv->num); break; case <API key>: id = kv->id; id = copydir(pool, data, fromdata, id, cbdata->dircache); <API key>(data, handle, keyname, id, kv->num, kv->num2); break; case <API key>: id = kv->id; id = copydir(pool, data, fromdata, id, cbdata->dircache); repodata_add_dirstr(data, handle, keyname, id, kv->str); break; case <API key>: if (kv->eof == 2) { assert(cbdata->subhandle); cbdata->handle = cbdata->subhandle; cbdata->subhandle = 0; break; } if (!kv->entry) { assert(!cbdata->subhandle); cbdata->subhandle = cbdata->handle; } cbdata->handle = repodata_new_handle(data); <API key>(data, cbdata->subhandle, keyname, cbdata->handle); break; default: if (solv_chksum_len(key->type)) { <API key>(data, handle, keyname, key->type, (const unsigned char *)kv->str); break; } break; } return 0; } static void solvable_copy(Solvable *s, Solvable *r, Repodata *data, Id *dircache) { int p, i; Repo *repo = s->repo; Pool *pool = repo->pool; Repo *fromrepo = r->repo; struct <API key> cbdata; /* copy solvable data */ s->name = r->name; s->evr = r->evr; s->arch = r->arch; s->vendor = r->vendor; s->provides = copydeps(pool, repo, r->provides, fromrepo); s->requires = copydeps(pool, repo, r->requires, fromrepo); s->conflicts = copydeps(pool, repo, r->conflicts, fromrepo); s->obsoletes = copydeps(pool, repo, r->obsoletes, fromrepo); s->recommends = copydeps(pool, repo, r->recommends, fromrepo); s->suggests = copydeps(pool, repo, r->suggests, fromrepo); s->supplements = copydeps(pool, repo, r->supplements, fromrepo); s->enhances = copydeps(pool, repo, r->enhances, fromrepo); /* copy all attributes */ if (!data) return; cbdata.data = data; cbdata.handle = s - pool->solvables; cbdata.subhandle = 0; cbdata.dircache = dircache; p = r - fromrepo->pool->solvables; #if 0 repo_search(fromrepo, p, 0, 0, <API key> | SEARCH_SUB | <API key>, solvable_copy_cb, &cbdata); #else FOR_REPODATAS(fromrepo, i, data) { if (p >= data->start && p < data->end) repodata_search(data, p, 0, SEARCH_SUB | <API key>, solvable_copy_cb, &cbdata); cbdata.dircache = 0; /* only for first repodata */ } #endif } /* used to sort entries by package name that got returned in some database order */ static int rpmids_sort_cmp(const void *va, const void *vb, void *dp) { struct rpmdbentry const *a = va, *b = vb; char *namedata = dp; int r; r = strcmp(namedata + a->nameoff, namedata + b->nameoff); if (r) return r; return a->rpmdbid - b->rpmdbid; } static int pkgids_sort_cmp(const void *va, const void *vb, void *dp) { Repo *repo = dp; Pool *pool = repo->pool; Solvable *a = pool->solvables + *(Id *)va; Solvable *b = pool->solvables + *(Id *)vb; Id *rpmdbid; if (a->name != b->name) return strcmp(pool_id2str(pool, a->name), pool_id2str(pool, b->name)); rpmdbid = repo->rpmdbid; return rpmdbid[(a - pool->solvables) - repo->start] - rpmdbid[(b - pool->solvables) - repo->start]; } static void swap_solvables(Repo *repo, Repodata *data, Id pa, Id pb) { Pool *pool = repo->pool; Solvable tmp; tmp = pool->solvables[pa]; pool->solvables[pa] = pool->solvables[pb]; pool->solvables[pb] = tmp; if (repo->rpmdbid) { Id tmpid = repo->rpmdbid[pa - repo->start]; repo->rpmdbid[pa - repo->start] = repo->rpmdbid[pb - repo->start]; repo->rpmdbid[pb - repo->start] = tmpid; } /* only works if nothing is already internalized! */ if (data) repodata_swap_attrs(data, pa, pb); } static void mkrpmdbcookie(struct stat *st, unsigned char *cookie, int flags) { int f = 0; memset(cookie, 0, 32); cookie[3] = <API key>; memcpy(cookie + 16, &st->st_ino, sizeof(st->st_ino)); memcpy(cookie + 24, &st->st_dev, sizeof(st->st_dev)); if ((flags & RPM_ADD_WITH_PKGID) != 0) f |= 1; if ((flags & RPM_ADD_WITH_HDRID) != 0) f |= 2; if ((flags & <API key>) != 0) f |= 4; if ((flags & RPM_ADD_NO_FILELIST) == 0) f |= 8; if ((flags & <API key>) != 0) cookie[1] = 1; cookie[0] = f; } /* * read rpm db as repo * */ int repo_add_rpmdb(Repo *repo, Repo *ref, int flags) { Pool *pool = repo->pool; char dbpath[PATH_MAX]; struct stat packagesstat; unsigned char newcookie[32]; const unsigned char *oldcookie = 0; Id oldcookietype = 0; Repodata *data; int count = 0, done = 0; struct rpmdbstate state; int i; Solvable *s; unsigned int now; now = solv_timems(0); memset(&state, 0, sizeof(state)); state.pool = pool; if (flags & REPO_USE_ROOTDIR) state.rootdir = solv_strdup(pool_get_rootdir(pool)); data = repo_add_repodata(repo, flags); if (ref && !(ref->nsolvables && ref->rpmdbid && ref->pool == repo->pool)) { if ((flags & RPMDB_EMPTY_REFREPO) != 0) repo_empty(ref, 1); ref = 0; } if (!opendbenv(&state)) { solv_free(state.rootdir); return -1; } /* XXX: should get ro lock of Packages database! */ snprintf(dbpath, PATH_MAX, "%s%s/Packages", state.rootdir ? state.rootdir : "", state.is_ostree ? "/usr/share/rpm" : "/var/lib/rpm"); if (stat(dbpath, &packagesstat)) { pool_error(pool, -1, "%s: %s", dbpath, strerror(errno)); freestate(&state); return -1; } mkrpmdbcookie(&packagesstat, newcookie, flags); <API key>(data, SOLVID_META, <API key>, REPOKEY_TYPE_SHA256, newcookie); if (ref) oldcookie = <API key>(ref, SOLVID_META, <API key>, &oldcookietype); if (!ref || !oldcookie || oldcookietype != REPOKEY_TYPE_SHA256 || memcmp(oldcookie, newcookie, 32) != 0) { int solvstart = 0, solvend = 0; Id dbid; DBC *dbc = 0; if (ref && (flags & RPMDB_EMPTY_REFREPO) != 0) repo_empty(ref, 1); /* get it out of the way */ if ((flags & <API key>) != 0) count = count_headers(&state); if (!openpkgdb(&state)) { freestate(&state); return -1; } if (state.db->cursor(state.db, NULL, &dbc, 0)) { freestate(&state); return pool_error(pool, -1, "db->cursor failed"); } i = 0; s = 0; while ((dbid = getrpmcursor(&state, dbc)) != 0) { if (dbid == -1) { dbc->c_close(dbc); freestate(&state); return -1; } if (!s) { s = pool_id2solvable(pool, repo_add_solvable(repo)); if (!solvstart) solvstart = s - pool->solvables; solvend = s - pool->solvables + 1; } if (!repo->rpmdbid) repo->rpmdbid = <API key>(repo, sizeof(Id)); repo->rpmdbid[(s - pool->solvables) - repo->start] = dbid; if (rpm2solv(pool, repo, data, s, state.rpmhead, flags | RPM_ADD_TRIGGERS)) { i++; s = 0; } else { /* We can reuse this solvable, but make sure it's still associated with this repo. */ memset(s, 0, sizeof(*s)); s->repo = repo; } if ((flags & <API key>) != 0) { if (done < count) done++; if (done < count && (done - 1) * 100 / count != done * 100 / count) pool_debug(pool, SOLV_ERROR, "%%%% %d\n", done * 100 / count); } } dbc->c_close(dbc); if (s) { /* oops, could not reuse. free it instead */ repo_free_solvable(repo, s - pool->solvables, 1); solvend s = 0; } /* now sort all solvables in the new solvstart..solvend block */ if (solvend - solvstart > 1) { Id *pkgids = solv_malloc2(solvend - solvstart, sizeof(Id)); for (i = solvstart; i < solvend; i++) pkgids[i - solvstart] = i; solv_sort(pkgids, solvend - solvstart, sizeof(Id), pkgids_sort_cmp, repo); /* adapt order */ for (i = solvstart; i < solvend; i++) { int j = pkgids[i - solvstart]; while (j < i) j = pkgids[i - solvstart] = pkgids[j - solvstart]; if (j != i) swap_solvables(repo, data, i, j); } solv_free(pkgids); } } else { Id dircache[<API key>]; /* see copydir */ struct rpmdbentry *entries = 0, *rp; int nentries = 0; char *namedata = 0; unsigned int refmask, h; Id id, *refhash; int res; memset(dircache, 0, sizeof(dircache)); /* get ids of installed rpms */ entries = <API key>(&state, "Name", 0, &nentries, &namedata); if (!entries) { freestate(&state); return -1; } /* sort by name */ if (nentries > 1) solv_sort(entries, nentries, sizeof(*entries), rpmids_sort_cmp, namedata); /* create hash from dbid to ref */ refmask = mkmask(ref->nsolvables); refhash = solv_calloc(refmask + 1, sizeof(Id)); for (i = 0; i < ref->end - ref->start; i++) { if (!ref->rpmdbid[i]) continue; h = ref->rpmdbid[i] & refmask; while (refhash[h]) h = (h + 317) & refmask; refhash[h] = i + 1; /* make it non-zero */ } /* count the misses, they will cost us time */ if ((flags & <API key>) != 0) { for (i = 0, rp = entries; i < nentries; i++, rp++) { if (refhash) { Id dbid = rp->rpmdbid; h = dbid & refmask; while ((id = refhash[h])) { if (ref->rpmdbid[id - 1] == dbid) break; h = (h + 317) & refmask; } if (id) continue; } count++; } } if (ref && (flags & RPMDB_EMPTY_REFREPO) != 0) s = pool_id2solvable(pool, <API key>(repo, nentries, ref)); else s = pool_id2solvable(pool, <API key>(repo, nentries)); if (!repo->rpmdbid) repo->rpmdbid = <API key>(repo, sizeof(Id)); for (i = 0, rp = entries; i < nentries; i++, rp++, s++) { Id dbid = rp->rpmdbid; repo->rpmdbid[(s - pool->solvables) - repo->start] = rp->rpmdbid; if (refhash) { h = dbid & refmask; while ((id = refhash[h])) { if (ref->rpmdbid[id - 1] == dbid) break; h = (h + 317) & refmask; } if (id) { Solvable *r = ref->pool->solvables + ref->start + (id - 1); if (r->repo == ref) { solvable_copy(s, r, data, dircache); continue; } } } res = getrpmdbid(&state, dbid); if (res <= 0) { if (!res) pool_error(pool, -1, "inconsistent rpm database, key %d not found. run 'rpm --rebuilddb' to fix.", dbid); freestate(&state); solv_free(entries); solv_free(namedata); solv_free(refhash); return -1; } rpm2solv(pool, repo, data, s, state.rpmhead, flags | RPM_ADD_TRIGGERS); if ((flags & <API key>) != 0) { if (done < count) done++; if (done < count && (done - 1) * 100 / count != done * 100 / count) pool_debug(pool, SOLV_ERROR, "%%%% %d\n", done * 100 / count); } } solv_free(entries); solv_free(namedata); solv_free(refhash); if (ref && (flags & RPMDB_EMPTY_REFREPO) != 0) repo_empty(ref, 1); } freestate(&state); if (!(flags & REPO_NO_INTERNALIZE)) <API key>(data); if ((flags & <API key>) != 0) pool_debug(pool, SOLV_ERROR, "%%%% 100\n"); POOL_DEBUG(SOLV_DEBUG_STATS, "repo_add_rpmdb took %d ms\n", solv_timems(now)); POOL_DEBUG(SOLV_DEBUG_STATS, "repo size: %d solvables\n", repo->nsolvables); POOL_DEBUG(SOLV_DEBUG_STATS, "repo memory used: %d K incore, %d K idarray\n", repodata_memused(data)/1024, repo->idarraysize / (int)(1024/sizeof(Id))); return 0; } int <API key>(Repo *repo, FILE *fp, int flags) { int res; Repo *ref = 0; if (!fp) return repo_add_rpmdb(repo, 0, flags); ref = repo_create(repo->pool, "add_rpmdb_reffp"); if (repo_add_solv(ref, fp, 0) != 0) { repo_free(ref, 1); ref = 0; } if (ref && ref->start == ref->end) { repo_free(ref, 1); ref = 0; } if (ref) repo_disable_paging(ref); res = repo_add_rpmdb(repo, ref, flags | RPMDB_EMPTY_REFREPO); if (ref) repo_free(ref, 1); return res; } static inline unsigned int getu32(const unsigned char *dp) { return dp[0] << 24 | dp[1] << 16 | dp[2] << 8 | dp[3]; } Id repo_add_rpm(Repo *repo, const char *rpm, int flags) { unsigned int sigdsize, sigcnt, l; Pool *pool = repo->pool; Solvable *s; RpmHead *rpmhead = 0; int rpmheadsize = 0; char *payloadformat; FILE *fp; unsigned char lead[4096]; int headerstart, headerend; struct stat stb; Repodata *data; unsigned char pkgid[16]; unsigned char leadsigid[16]; unsigned char hdrid[32]; int pkgidtype, leadsigidtype, hdridtype; Id chksumtype = 0; Chksum *chksumh = 0; Chksum *leadsigchksumh = 0; int forcebinary = 0; data = repo_add_repodata(repo, flags); if ((flags & <API key>) != 0) chksumtype = REPOKEY_TYPE_SHA256; else if ((flags & <API key>) != 0) chksumtype = REPOKEY_TYPE_SHA1; if ((fp = fopen(flags & REPO_USE_ROOTDIR ? <API key>(pool, rpm) : rpm, "r")) == 0) { pool_error(pool, -1, "%s: %s", rpm, strerror(errno)); return 0; } if (fstat(fileno(fp), &stb)) { pool_error(pool, -1, "fstat: %s", strerror(errno)); fclose(fp); return 0; } if (chksumtype) chksumh = solv_chksum_create(chksumtype); if ((flags & <API key>) != 0) leadsigchksumh = solv_chksum_create(REPOKEY_TYPE_MD5); if (fread(lead, 96 + 16, 1, fp) != 1 || getu32(lead) != 0xedabeedb) { pool_error(pool, -1, "%s: not a rpm", rpm); fclose(fp); return 0; } forcebinary = lead[6] != 0 || lead[7] != 1; if (chksumh) solv_chksum_add(chksumh, lead, 96 + 16); if (leadsigchksumh) solv_chksum_add(leadsigchksumh, lead, 96 + 16); if (lead[78] != 0 || lead[79] != 5) { pool_error(pool, -1, "%s: not a rpm v5 header", rpm); fclose(fp); return 0; } if (getu32(lead + 96) != 0x8eade801) { pool_error(pool, -1, "%s: bad signature header", rpm); fclose(fp); return 0; } sigcnt = getu32(lead + 96 + 8); sigdsize = getu32(lead + 96 + 12); if (sigcnt >= 0x100000 || sigdsize >= 0x100000) { pool_error(pool, -1, "%s: bad signature header", rpm); fclose(fp); return 0; } sigdsize += sigcnt * 16; sigdsize = (sigdsize + 7) & ~7; headerstart = 96 + 16 + sigdsize; pkgidtype = leadsigidtype = hdridtype = 0; if ((flags & (RPM_ADD_WITH_PKGID | RPM_ADD_WITH_HDRID)) != 0) { /* extract pkgid or hdrid from the signature header */ if (sigdsize > rpmheadsize) { rpmheadsize = sigdsize + 128; rpmhead = solv_realloc(rpmhead, sizeof(*rpmhead) + rpmheadsize); } if (fread(rpmhead->data, sigdsize, 1, fp) != 1) { pool_error(pool, -1, "%s: unexpected EOF", rpm); fclose(fp); return 0; } if (chksumh) solv_chksum_add(chksumh, rpmhead->data, sigdsize); if (leadsigchksumh) solv_chksum_add(leadsigchksumh, rpmhead->data, sigdsize); rpmhead->forcebinary = 0; rpmhead->cnt = sigcnt; rpmhead->dcnt = sigdsize - sigcnt * 16; rpmhead->dp = rpmhead->data + rpmhead->cnt * 16; if ((flags & RPM_ADD_WITH_PKGID) != 0) { unsigned char *chksum; unsigned int chksumsize; chksum = headbinary(rpmhead, SIGTAG_MD5, &chksumsize); if (chksum && chksumsize == 16) { pkgidtype = REPOKEY_TYPE_MD5; memcpy(pkgid, chksum, 16); } } if ((flags & RPM_ADD_WITH_HDRID) != 0) { const char *str = headstring(rpmhead, TAG_SHA1HEADER); if (str && strlen(str) == 40) { if (solv_hex2bin(&str, hdrid, 20) == 20) hdridtype = REPOKEY_TYPE_SHA1; } else if (str && strlen(str) == 64) { if (solv_hex2bin(&str, hdrid, 32) == 32) hdridtype = REPOKEY_TYPE_SHA256; } } } else { /* just skip the signature header */ while (sigdsize) { l = sigdsize > 4096 ? 4096 : sigdsize; if (fread(lead, l, 1, fp) != 1) { pool_error(pool, -1, "%s: unexpected EOF", rpm); fclose(fp); return 0; } if (chksumh) solv_chksum_add(chksumh, lead, l); if (leadsigchksumh) solv_chksum_add(leadsigchksumh, lead, l); sigdsize -= l; } } if (leadsigchksumh) { leadsigchksumh = solv_chksum_free(leadsigchksumh, leadsigid); leadsigidtype = REPOKEY_TYPE_MD5; } if (fread(lead, 16, 1, fp) != 1) { pool_error(pool, -1, "%s: unexpected EOF", rpm); fclose(fp); return 0; } if (chksumh) solv_chksum_add(chksumh, lead, 16); if (getu32(lead) != 0x8eade801) { pool_error(pool, -1, "%s: bad header", rpm); fclose(fp); return 0; } sigcnt = getu32(lead + 8); sigdsize = getu32(lead + 12); if (sigcnt >= 0x100000 || sigdsize >= 0x2000000) { pool_error(pool, -1, "%s: bad header", rpm); fclose(fp); return 0; } l = sigdsize + sigcnt * 16; headerend = headerstart + 16 + l; if (l > rpmheadsize) { rpmheadsize = l + 128; rpmhead = solv_realloc(rpmhead, sizeof(*rpmhead) + rpmheadsize); } if (fread(rpmhead->data, l, 1, fp) != 1) { pool_error(pool, -1, "%s: unexpected EOF", rpm); fclose(fp); return 0; } if (chksumh) solv_chksum_add(chksumh, rpmhead->data, l); rpmhead->forcebinary = forcebinary; rpmhead->cnt = sigcnt; rpmhead->dcnt = sigdsize; rpmhead->dp = rpmhead->data + rpmhead->cnt * 16; if (headexists(rpmhead, TAG_PATCHESNAME)) { /* this is a patch rpm, ignore */ pool_error(pool, -1, "%s: is patch rpm", rpm); fclose(fp); solv_chksum_free(chksumh, 0); solv_free(rpmhead); return 0; } payloadformat = headstring(rpmhead, TAG_PAYLOADFORMAT); if (payloadformat && !strcmp(payloadformat, "drpm")) { /* this is a delta rpm */ pool_error(pool, -1, "%s: is delta rpm", rpm); fclose(fp); solv_chksum_free(chksumh, 0); solv_free(rpmhead); return 0; } if (chksumh) while ((l = fread(lead, 1, sizeof(lead), fp)) > 0) solv_chksum_add(chksumh, lead, l); fclose(fp); s = pool_id2solvable(pool, repo_add_solvable(repo)); if (!rpm2solv(pool, repo, data, s, rpmhead, flags & ~(RPM_ADD_WITH_HDRID | RPM_ADD_WITH_PKGID))) { repo_free_solvable(repo, s - pool->solvables, 1); solv_chksum_free(chksumh, 0); solv_free(rpmhead); return 0; } if (!(flags & REPO_NO_LOCATION)) <API key>(data, s - pool->solvables, 0, 0, rpm); if (S_ISREG(stb.st_mode)) repodata_set_num(data, s - pool->solvables, <API key>, (unsigned long long)stb.st_size); repodata_set_num(data, s - pool->solvables, SOLVABLE_HEADEREND, headerend); if (pkgidtype) <API key>(data, s - pool->solvables, SOLVABLE_PKGID, pkgidtype, pkgid); if (hdridtype) <API key>(data, s - pool->solvables, SOLVABLE_HDRID, hdridtype, hdrid); if (leadsigidtype) <API key>(data, s - pool->solvables, SOLVABLE_LEADSIGID, leadsigidtype, leadsigid); if (chksumh) { <API key>(data, s - pool->solvables, SOLVABLE_CHECKSUM, chksumtype, solv_chksum_get(chksumh, 0)); chksumh = solv_chksum_free(chksumh, 0); } solv_free(rpmhead); if (!(flags & REPO_NO_INTERNALIZE)) <API key>(data); return s - pool->solvables; } Id repo_add_rpm_handle(Repo *repo, void *rpmhandle, int flags) { Pool *pool = repo->pool; Repodata *data; RpmHead *rpmhead = rpmhandle; Solvable *s; char *payloadformat; data = repo_add_repodata(repo, flags); if (headexists(rpmhead, TAG_PATCHESNAME)) { pool_error(pool, -1, "is a patch rpm"); return 0; } payloadformat = headstring(rpmhead, TAG_PAYLOADFORMAT); if (payloadformat && !strcmp(payloadformat, "drpm")) { /* this is a delta rpm */ pool_error(pool, -1, "is a delta rpm"); return 0; } s = pool_id2solvable(pool, repo_add_solvable(repo)); if (!rpm2solv(pool, repo, data, s, rpmhead, flags)) { repo_free_solvable(repo, s - pool->solvables, 1); return 0; } if (!(flags & REPO_NO_INTERNALIZE)) <API key>(data); return s - pool->solvables; } static inline void linkhash(const char *lt, char *hash) { unsigned int r = 0; const unsigned char *str = (const unsigned char *)lt; int l, c; l = strlen(lt); while ((c = *str++) != 0) r += (r << 3) + c; sprintf(hash, "%08x%08x%08x%08x", r, l, 0, 0); } void <API key>(void *rpmhandle, int flags, void (*cb)(void *, const char *, struct filelistinfo *), void *cbdata) { RpmHead *rpmhead = rpmhandle; char **bn; char **dn; char **md = 0; char **lt = 0; unsigned int *di, diidx; unsigned int *co = 0; unsigned int *ff = 0; unsigned int lastdir; int lastdirl; unsigned int *fm; int cnt, dcnt, cnt2; int i, l1, l; char *space = 0; int spacen = 0; char md5[33]; struct filelistinfo info; dn = headstringarray(rpmhead, TAG_DIRNAMES, &dcnt); if (!dn) return; if ((flags & <API key>) != 0) { for (i = 0; i < dcnt; i++) (*cb)(cbdata, dn[i], 0); solv_free(dn); return; } bn = headstringarray(rpmhead, TAG_BASENAMES, &cnt); if (!bn) { solv_free(dn); return; } di = headint32array(rpmhead, TAG_DIRINDEXES, &cnt2); if (!di || cnt != cnt2) { solv_free(di); solv_free(bn); solv_free(dn); return; } fm = headint16array(rpmhead, TAG_FILEMODES, &cnt2); if (!fm || cnt != cnt2) { solv_free(fm); solv_free(di); solv_free(bn); solv_free(dn); return; } if ((flags & <API key>) != 0) { md = headstringarray(rpmhead, TAG_FILEMD5S, &cnt2); if (!md || cnt != cnt2) { solv_free(md); solv_free(fm); solv_free(di); solv_free(bn); solv_free(dn); return; } } if ((flags & <API key>) != 0) { co = headint32array(rpmhead, TAG_FILECOLORS, &cnt2); if (!co || cnt != cnt2) { solv_free(co); solv_free(md); solv_free(fm); solv_free(di); solv_free(bn); solv_free(dn); return; } } if ((flags & <API key>) != 0) { ff = headint32array(rpmhead, TAG_FILEFLAGS, &cnt2); if (!ff || cnt != cnt2) { solv_free(ff); solv_free(co); solv_free(md); solv_free(fm); solv_free(di); solv_free(bn); solv_free(dn); return; } } lastdir = dcnt; lastdirl = 0; memset(&info, 0, sizeof(info)); for (i = 0; i < cnt; i++) { if (ff && (ff[i] & FILEFLAG_GHOST) != 0) continue; diidx = di[i]; if (diidx >= dcnt) continue; l1 = lastdir == diidx ? lastdirl : strlen(dn[diidx]); l = l1 + strlen(bn[i]) + 1; if (l > spacen) { spacen = l + 16; space = solv_realloc(space, spacen); } if (lastdir != diidx) { strcpy(space, dn[diidx]); lastdir = diidx; lastdirl = l1; } strcpy(space + l1, bn[i]); info.diridx = diidx; info.dirlen = l1; if (fm) info.mode = fm[i]; if (md) { info.digest = md[i]; if (fm && S_ISLNK(fm[i])) { info.digest = 0; if (!lt) { lt = headstringarray(rpmhead, TAG_FILELINKTOS, &cnt2); if (cnt != cnt2) lt = solv_free(lt); } if (lt) { linkhash(lt[i], md5); info.digest = md5; } } if (!info.digest) { sprintf(md5, "%08x%08x%08x%08x", (fm[i] >> 12) & 65535, 0, 0, 0); info.digest = md5; } } if (co) info.color = co[i]; (*cb)(cbdata, space, &info); } solv_free(space); solv_free(lt); solv_free(md); solv_free(fm); solv_free(di); solv_free(bn); solv_free(dn); solv_free(co); solv_free(ff); } char * rpm_query(void *rpmhandle, Id what) { const char *name, *arch, *sourcerpm; char *evr, *r; int l; RpmHead *rpmhead = rpmhandle; r = 0; switch (what) { case 0: name = headstring(rpmhead, TAG_NAME); if (!name) name = ""; sourcerpm = headstring(rpmhead, TAG_SOURCERPM); if (sourcerpm || (rpmhead->forcebinary && !headexists(rpmhead, TAG_SOURCEPACKAGE))) arch = headstring(rpmhead, TAG_ARCH); else { if (headexists(rpmhead, TAG_NOSOURCE) || headexists(rpmhead, TAG_NOPATCH)) arch = "nosrc"; else arch = "src"; } if (!arch) arch = "noarch"; evr = headtoevr(rpmhead); l = strlen(name) + 1 + strlen(evr ? evr : "") + 1 + strlen(arch) + 1; r = solv_malloc(l); sprintf(r, "%s-%s.%s", name, evr ? evr : "", arch); solv_free(evr); break; case SOLVABLE_NAME: name = headstring(rpmhead, TAG_NAME); r = solv_strdup(name); break; case SOLVABLE_SUMMARY: name = headstring(rpmhead, TAG_SUMMARY); r = solv_strdup(name); break; case <API key>: name = headstring(rpmhead, TAG_DESCRIPTION); r = solv_strdup(name); break; case SOLVABLE_EVR: r = headtoevr(rpmhead); break; } return r; } unsigned long long rpm_query_num(void *rpmhandle, Id what, unsigned long long notfound) { RpmHead *rpmhead = rpmhandle; unsigned int u32; switch (what) { case <API key>: u32 = headint32(rpmhead, TAG_INSTALLTIME); return u32 ? u32 : notfound; } return notfound; } int <API key>(void *rpmstate, const char *index, const char *match, Queue *rpmdbidq) { struct rpmdbentry *entries; int nentries, i; entries = <API key>(rpmstate, index ? index : "Name", match, &nentries, 0); if (rpmdbidq) { queue_empty(rpmdbidq); for (i = 0; i < nentries; i++) queue_push(rpmdbidq, entries[i].rpmdbid); } solv_free(entries); return nentries; } void * rpm_byrpmdbid(void *rpmstate, Id rpmdbid) { struct rpmdbstate *state = rpmstate; int r; r = getrpmdbid(state, rpmdbid); if (!r) pool_error(state->pool, 0, "header #%d not in database", rpmdbid); return r <= 0 ? 0 : state->rpmhead; } void * rpm_byfp(void *rpmstate, FILE *fp, const char *name) { struct rpmdbstate *state = rpmstate; /* int headerstart, headerend; */ RpmHead *rpmhead; unsigned int sigdsize, sigcnt, l; unsigned char lead[4096]; int forcebinary = 0; if (fread(lead, 96 + 16, 1, fp) != 1 || getu32(lead) != 0xedabeedb) { pool_error(state->pool, 0, "%s: not a rpm", name); return 0; } forcebinary = lead[6] != 0 || lead[7] != 1; if (lead[78] != 0 || lead[79] != 5) { pool_error(state->pool, 0, "%s: not a V5 header", name); return 0; } if (getu32(lead + 96) != 0x8eade801) { pool_error(state->pool, 0, "%s: bad signature header", name); return 0; } sigcnt = getu32(lead + 96 + 8); sigdsize = getu32(lead + 96 + 12); if (sigcnt >= 0x100000 || sigdsize >= 0x100000) { pool_error(state->pool, 0, "%s: bad signature header", name); return 0; } sigdsize += sigcnt * 16; sigdsize = (sigdsize + 7) & ~7; /* headerstart = 96 + 16 + sigdsize; */ while (sigdsize) { l = sigdsize > 4096 ? 4096 : sigdsize; if (fread(lead, l, 1, fp) != 1) { pool_error(state->pool, 0, "%s: unexpected EOF", name); return 0; } sigdsize -= l; } if (fread(lead, 16, 1, fp) != 1) { pool_error(state->pool, 0, "%s: unexpected EOF", name); return 0; } if (getu32(lead) != 0x8eade801) { pool_error(state->pool, 0, "%s: bad header", name); return 0; } sigcnt = getu32(lead + 8); sigdsize = getu32(lead + 12); if (sigcnt >= 0x100000 || sigdsize >= 0x2000000) { pool_error(state->pool, 0, "%s: bad header", name); return 0; } l = sigdsize + sigcnt * 16; /* headerend = headerstart + 16 + l; */ if (l > state->rpmheadsize) { state->rpmheadsize = l + 128; state->rpmhead = solv_realloc(state->rpmhead, sizeof(*state->rpmhead) + state->rpmheadsize); } rpmhead = state->rpmhead; if (fread(rpmhead->data, l, 1, fp) != 1) { pool_error(state->pool, 0, "%s: unexpected EOF", name); return 0; } rpmhead->forcebinary = forcebinary; rpmhead->cnt = sigcnt; rpmhead->dcnt = sigdsize; rpmhead->dp = rpmhead->data + rpmhead->cnt * 16; return rpmhead; } #ifdef <API key> void * rpm_byrpmh(void *rpmstate, Header h) { struct rpmdbstate *state = rpmstate; const unsigned char *uh; unsigned int sigdsize, sigcnt, l; RpmHead *rpmhead; #ifndef RPM5 uh = headerUnload(h); #else uh = headerUnload(h, NULL); #endif if (!uh) return 0; sigcnt = getu32(uh); sigdsize = getu32(uh + 4); l = sigdsize + sigcnt * 16; if (l > state->rpmheadsize) { state->rpmheadsize = l + 128; state->rpmhead = solv_realloc(state->rpmhead, sizeof(*state->rpmhead) + state->rpmheadsize); } rpmhead = state->rpmhead; memcpy(rpmhead->data, uh + 8, l - 8); free((void *)uh); rpmhead->forcebinary = 0; rpmhead->cnt = sigcnt; rpmhead->dcnt = sigdsize; rpmhead->dp = rpmhead->data + rpmhead->cnt * 16; return rpmhead; } #endif
<!doctype html> <html> <head> <title><API key> - setup</title> <script src="../../bower_components/webcomponentsjs/webcomponents-lite.js"></script> <script src="../../bower_components/<API key>/browser.js"></script> <script src="../../bower_components/iron-test-helpers/test-helpers.js"></script> <script src="../../bower_components/test-fixture/test-fixture-mocha.js"></script> <link rel="import" href="../../bower_components/test-fixture/test-fixture.html"> <link rel="import" href="../../elements/<API key>/<API key>.html"> </head> <body> <test-fixture id="empty"> <template> <<API key> verbose="log"> </<API key>> </template> </test-fixture> <test-fixture id="remote-program"> <template> <<API key> id="remote-id" verbose="log"> </<API key>> </template> </test-fixture> <script> suite('<API key> - add to profile', function() { var server; setup(function() { server = sinon.fakeServer.create(); server.autoRespond = true; server.autoRespondAfter = 500; server.respondWith( 'PUT', /\/responds_with_new.*/, [ 200, { 'Content-Type': 'application/json' }, '{"status": "ok", "success": true, "result" : { "id": "new-remote-id", "modifiedAt" : 50, "author" : "user-a" }}' ] ); server.respondWith( 'GET', /\/<API key>.*/, [ 200, { 'Content-Type': 'application/json' }, '{"status": "ok", "success": true, "result" : { "id": "remote-id", "modifiedAt" : 50, "author" : "user-b" }}' ] ); window.localStorage.setItem('qb_user', JSON.stringify({ id: 'user-a', programs : [] })); }); teardown(function() { server.restore(); window.localStorage.removeItem('qb_user'); }); test('success - creates a new program, add to profile, sync', function(done) { var data = fixture('empty'); data.api = '/responds_with_new' data.addEventListener('setupError', function(event){ assert.fail('','', 'setupError fired with "'+event.detail+'"'); done() }) data.addEventListener('setupStart', function(event){ // Setup Started data.addEventListener('setupSuccess', function(event){ // Setup Succeded assert.equal(event.detail, 'new-program'); var oldId = data.id; data.addEventListener('addToProfileSuccess', function(event){ // Added to profile assert.equal(event.detail, ''); data.addEventListener('syncSuccess', function(event){ // Synced assert.equal(event.detail, 'created-on-server'); assert.notEqual(oldId, data.id); assert.include(data.user.programs, data.id, 'user.programs do not contains new id'); assert.notInclude(data.user.programs, oldId, 'user.programs contains old id'); var localUser = JSON.parse(window.localStorage.getItem('qb_user')); assert.include(localUser.programs, data.id, 'user in localStorage do not contains new id'); assert.notInclude(localUser.programs, oldId, 'user in localStorage contains old id'); done() }) }) data.addToProfile().catch(function(){}) }) }); }); test('success - loads a program not in profile, add to profile, sync', function(done) { var data = fixture('remote-program'); data.api = '/<API key>' data.addEventListener('setupError', function(event){ assert.fail('','', 'setupError fired with "'+event.detail+'"'); done() }) data.addEventListener('setupStart', function(event){ // Setup Started data.addEventListener('setupSuccess', function(event){ // Setup Succeded assert.equal(event.detail, 'not-in-profile'); var oldId = data.id; data.addEventListener('addToProfileSuccess', function(event){ // Added to profile assert.equal(event.detail, ''); data.api = '/responds_with_new' data.addEventListener('syncSuccess', function(event){ // Synced assert.equal(event.detail, 'created-on-server'); assert.notEqual(oldId, data.id); assert.include(data.user.programs, data.id, 'user.programs do not contains new id'); assert.notInclude(data.user.programs, oldId, 'user.programs contains old id'); var localUser = JSON.parse(window.localStorage.getItem('qb_user')); assert.include(localUser.programs, data.id, 'user in localStorage do not contains new id'); assert.notInclude(localUser.programs, oldId, 'user in localStorage contains old id'); done() }) }) data.addToProfile().catch(function(){}) }) }); }); test('error - no user', function(done) { var data = fixture('empty'); data.api = '/responds_with_new' data.addEventListener('setupError', function(event){ assert.fail('','', 'setupError fired with "'+event.detail+'"'); done() }) data.addEventListener('setupStart', function(event){ // Setup Started data.addEventListener('setupSuccess', function(event){ // Setup Succeded assert.equal(event.detail, 'new-program'); data.user = null; data.addEventListener('addToProfileError', function(event){ // Added to profile assert.equal(event.detail, 'no-user'); done(); }) data.addToProfile().catch(function(){}) }) }); }); test('error - busy - trying to add to profile during setup', function(done) { var data = fixture('remote-program'); data.api = '/<API key>' data.addEventListener('setupError', function(event){ assert.fail('','', 'setupError fired with "'+event.detail+'"'); done() }) data.addEventListener('addToProfileError', function(event){ // Added to profile assert.equal(event.detail, 'busy'); done(); }) setTimeout(function () { data.addToProfile().catch(function(){}) }, 100); }); }); </script> </body> </html>
'use strict'; angular.module("ngLocale", [], ["$provide", function ($provide) { var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"}; $provide.value("$locale", { "DATETIME_FORMATS": { "AMPMS": [ "[AM]", "[PM]" ], "DAY": [ "\u0c06\u0c26\u0c3f\u0c35\u0c3e\u0c30\u0c02", "\u0c38\u0c4b\u0c2e\u0c35\u0c3e\u0c30\u0c02", "\u0c2e\u0c02\u0c17\u0c33\u0c35\u0c3e\u0c30\u0c02", "\u0c2c\u0c41\u0c27\u0c35\u0c3e\u0c30\u0c02", "\u0c17\u0c41\u0c30\u0c41\u0c35\u0c3e\u0c30\u0c02", "\u0c36\u0c41\u0c15\u0c4d\u0c30\u0c35\u0c3e\u0c30\u0c02", "\u0c36\u0c28\u0c3f\u0c35\u0c3e\u0c30\u0c02" ], "ERANAMES": [ "\u0c15\u0c4d\u0c30\u0c40\u0c38\u0c4d\u0c24\u0c41 \u0c2a\u0c42\u0c30\u0c4d\u0c35\u0c02", "\u0c15\u0c4d\u0c30\u0c40\u0c38\u0c4d\u0c24\u0c41 \u0c36\u0c15\u0c02" ], "ERAS": [ "\u0c15\u0c4d\u0c30\u0c40\u0c2a\u0c42", "\u0c15\u0c4d\u0c30\u0c40\u0c36" ], "FIRSTDAYOFWEEK": 6, "MONTH": [ "\u0c1c\u0c28\u0c35\u0c30\u0c3f", "\u0c2b\u0c3f\u0c2c\u0c4d\u0c30\u0c35\u0c30\u0c3f", "\u0c2e\u0c3e\u0c30\u0c4d\u0c1a\u0c3f", "\u0c0f\u0c2a\u0c4d\u0c30\u0c3f\u0c32\u0c4d", "\u0c2e\u0c47", "\u0c1c\u0c42\u0c28\u0c4d", "\u0c1c\u0c41\u0c32\u0c48", "\u0c06\u0c17\u0c38\u0c4d\u0c1f\u0c41", "\u0c38\u0c46\u0c2a\u0c4d\u0c1f\u0c46\u0c02\u0c2c\u0c30\u0c4d", "\u0c05\u0c15\u0c4d\u0c1f\u0c4b\u0c2c\u0c30\u0c4d", "\u0c28\u0c35\u0c02\u0c2c\u0c30\u0c4d", "\u0c21\u0c3f\u0c38\u0c46\u0c02\u0c2c\u0c30\u0c4d" ], "SHORTDAY": [ "\u0c06\u0c26\u0c3f", "\u0c38\u0c4b\u0c2e", "\u0c2e\u0c02\u0c17\u0c33", "\u0c2c\u0c41\u0c27", "\u0c17\u0c41\u0c30\u0c41", "\u0c36\u0c41\u0c15\u0c4d\u0c30", "\u0c36\u0c28\u0c3f" ], "SHORTMONTH": [ "\u0c1c\u0c28", "\u0c2b\u0c3f\u0c2c\u0c4d\u0c30", "\u0c2e\u0c3e\u0c30\u0c4d\u0c1a\u0c3f", "\u0c0f\u0c2a\u0c4d\u0c30\u0c3f", "\u0c2e\u0c47", "\u0c1c\u0c42\u0c28\u0c4d", "\u0c1c\u0c41\u0c32\u0c48", "\u0c06\u0c17", "\u0c38\u0c46\u0c2a\u0c4d\u0c1f\u0c46\u0c02", "\u0c05\u0c15\u0c4d\u0c1f\u0c4b", "\u0c28\u0c35\u0c02", "\u0c21\u0c3f\u0c38\u0c46\u0c02" ], "STANDALONEMONTH": [ "\u0c1c\u0c28\u0c35\u0c30\u0c3f", "\u0c2b\u0c3f\u0c2c\u0c4d\u0c30\u0c35\u0c30\u0c3f", "\u0c2e\u0c3e\u0c30\u0c4d\u0c1a\u0c3f", "\u0c0f\u0c2a\u0c4d\u0c30\u0c3f\u0c32\u0c4d", "\u0c2e\u0c47", "\u0c1c\u0c42\u0c28\u0c4d", "\u0c1c\u0c41\u0c32\u0c48", "\u0c06\u0c17\u0c38\u0c4d\u0c1f\u0c41", "\u0c38\u0c46\u0c2a\u0c4d\u0c1f\u0c46\u0c02\u0c2c\u0c30\u0c4d", "\u0c05\u0c15\u0c4d\u0c1f\u0c4b\u0c2c\u0c30\u0c4d", "\u0c28\u0c35\u0c02\u0c2c\u0c30\u0c4d", "\u0c21\u0c3f\u0c38\u0c46\u0c02\u0c2c\u0c30\u0c4d" ], "WEEKENDRANGE": [ 6, 6 ], "fullDate": "d, MMMM y, EEEE", "longDate": "d MMMM, y", "medium": "d MMM, y h:mm:ss a", "mediumDate": "d MMM, y", "mediumTime": "h:mm:ss a", "short": "dd-MM-yy h:mm a", "shortDate": "dd-MM-yy", "shortTime": "h:mm a" }, "NUMBER_FORMATS": { "CURRENCY_SYM": "\u20b9", "DECIMAL_SEP": ".", "GROUP_SEP": ",", "PATTERNS": [ { "gSize": 2, "lgSize": 3, "maxFrac": 3, "minFrac": 0, "minInt": 1, "negPre": "-", "negSuf": "", "posPre": "", "posSuf": "" }, { "gSize": 2, "lgSize": 3, "maxFrac": 2, "minFrac": 2, "minInt": 1, "negPre": "-\u00a4", "negSuf": "", "posPre": "\u00a4", "posSuf": "" } ] }, "id": "te-in", "localeID": "te_IN", "pluralCat": function (n, opt_precision) { if (n == 1) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER; } }); }]);
#include "gpu_rasterizer.h" #include <string> #include <utility> #include "flutter/common/threads.h" #include "flutter/glue/trace_event.h" #include "flutter/shell/common/platform_view.h" #include "flutter/shell/common/shell.h" #include "flutter/shell/common/picture_serializer.h" #include "mojo/public/cpp/system/data_pipe.h" #include "third_party/skia/include/core/SkCanvas.h" #include "third_party/skia/include/core/SkPicture.h" namespace shell { GPURasterizer::GPURasterizer() : platform_view_(nullptr), weak_factory_(this) { auto weak_ptr = weak_factory_.GetWeakPtr(); blink::Threads::Gpu()->PostTask( [weak_ptr]() { Shell::Shared().AddRasterizer(weak_ptr); }); } GPURasterizer::~GPURasterizer() { weak_factory_.InvalidateWeakPtrs(); Shell::Shared().PurgeRasterizers(); } std::unique_ptr<Rasterizer> Rasterizer::Create() { return std::unique_ptr<GPURasterizer>(new GPURasterizer()); } ftl::WeakPtr<Rasterizer> GPURasterizer::<API key>() { return weak_factory_.GetWeakPtr(); } bool GPURasterizer::Setup(PlatformView* platform_view) { if (platform_view == nullptr) { return false; } if (!platform_view->ContextMakeCurrent()) { return false; } auto gpu_canvas = GPUCanvas::<API key>(*platform_view); if (gpu_canvas == nullptr) { return false; } if (!gpu_canvas->Setup()) { return false; } gpu_canvas_ = std::move(gpu_canvas); platform_view_ = platform_view; return true; } void GPURasterizer::Setup(PlatformView* platform_view, ftl::Closure continuation, ftl::<API key>* <API key>) { auto setup_result = Setup(platform_view); FTL_CHECK(setup_result) << "Must be able to setup the GPU canvas."; continuation(); <API key>->Signal(); } void GPURasterizer::Clear(SkColor color) { if (gpu_canvas_ == nullptr) { return; } SkCanvas* canvas = gpu_canvas_->AcquireCanvas(platform_view_->GetSize()); if (canvas == nullptr) { return; } canvas->clear(color); canvas->flush(); platform_view_->SwapBuffers(); } void GPURasterizer::Teardown( ftl::<API key>* <API key>) { platform_view_ = nullptr; last_layer_tree_.reset(); compositor_context_.<API key>(); <API key>->Signal(); } flow::LayerTree* GPURasterizer::GetLastLayerTree() { return last_layer_tree_.get(); } void GPURasterizer::Draw( ftl::RefPtr<flutter::Pipeline<flow::LayerTree>> pipeline) { TRACE_EVENT0("flutter", "GPURasterizer::Draw"); if (!platform_view_) return; flutter::Pipeline<flow::LayerTree>::Consumer consumer = std::bind(&GPURasterizer::DoDraw, this, std::placeholders::_1); // Consume as many pipeline items as possible. But yield the event loop // between successive tries. switch (pipeline->Consume(consumer)) { case flutter::<API key>::MoreAvailable: { auto weak_this = weak_factory_.GetWeakPtr(); blink::Threads::Gpu()->PostTask([weak_this, pipeline]() { if (weak_this) { weak_this->Draw(pipeline); } }); break; } default: break; } } void GPURasterizer::DoDraw(std::unique_ptr<flow::LayerTree> layer_tree) { if (!layer_tree || !gpu_canvas_) { return; } // There is no way for the compositor to know how long the layer tree // construction took. Fortunately, the layer tree does. Grab that time // for instrumentation. compositor_context_.engine_time().SetLapTime(layer_tree->construction_time()); SkISize size = layer_tree->frame_size(); if (platform_view_->GetSize() != size) { platform_view_->Resize(size); } if (!platform_view_->ContextMakeCurrent() || !layer_tree->root_layer()) { return; } { SkCanvas* canvas = gpu_canvas_->AcquireCanvas(layer_tree->frame_size()); flow::CompositorContext::ScopedFrame frame = compositor_context_.AcquireFrame(gpu_canvas_->GetContext(), *canvas); canvas->clear(SK_ColorBLACK); layer_tree->Raster(frame); { TRACE_EVENT0("flutter", "SkCanvas::Flush"); canvas->flush(); } platform_view_->SwapBuffers(); } // Trace to a file if necessary static const double kOneFrameDuration = 1e3 / 60.0; bool <API key> = false; uint32_t thresholdInterval = layer_tree-><API key>(); if (thresholdInterval != 0 && compositor_context_.frame_time().LastLap().ToMillisecondsF() > thresholdInterval * kOneFrameDuration) { // While rendering the last frame, if we exceeded the tracing threshold // specified in the layer tree, we force a trace to disk. <API key> = true; } const auto& tracingController = Shell::Shared().tracing_controller(); if (<API key> || tracingController.<API key>()) { std::string path = tracingController.<API key>(); LOG(INFO) << "Frame threshold exceeded. Capturing SKP to " << path; SkPictureRecorder recorder; recorder.beginRecording(SkRect::MakeWH(size.width(), size.height())); { auto frame = compositor_context_.AcquireFrame( nullptr, *recorder.getRecordingCanvas(), false); layer_tree->Raster(frame, true); } sk_sp<SkPicture> picture = recorder.<API key>(); SerializePicture(path, picture.get()); } last_layer_tree_ = std::move(layer_tree); } } // namespace shell
#include "content/browser/service_worker/<API key>.h" #include <string> #include <vector> #include "base/profiler/scoped_tracker.h" #include "base/trace_event/trace_event.h" #include "content/browser/service_worker/<API key>.h" #include "content/browser/service_worker/<API key>.h" #include "content/browser/service_worker/<API key>.h" #include "net/base/io_buffer.h" #include "net/base/net_errors.h" #include "net/http/<API key>.h" #include "net/http/<API key>.h" #include "net/http/http_util.h" #include "net/url_request/url_request.h" #include "net/url_request/url_request_status.h" namespace content { <API key>::<API key>( net::URLRequest* request, net::NetworkDelegate* network_delegate, base::WeakPtr<<API key>> context, const scoped_refptr<<API key>>& version, int64 response_id) : net::URLRequestJob(request, network_delegate), context_(context), version_(version), response_id_(response_id), has_been_killed_(false), weak_factory_(this) { } <API key>::~<API key>() { } void <API key>::Start() { <API key>("ServiceWorker", "<API key>::ReadInfo", this, "URL", request_->url().spec()); if (!context_) { NotifyStartError(net::URLRequestStatus( net::URLRequestStatus::FAILED, net::ERR_FAILED)); return; } // Create a response reader and start reading the headers, // we'll continue when thats done. reader_ = context_->storage()-><API key>(response_id_); <API key> = new <API key>; reader_->ReadInfo( <API key>.get(), base::Bind(&<API key>::OnReadInfoComplete, weak_factory_.GetWeakPtr())); SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0)); } void <API key>::Kill() { if (has_been_killed_) return; weak_factory_.InvalidateWeakPtrs(); has_been_killed_ = true; reader_.reset(); context_.reset(); <API key> = NULL; http_info_.reset(); <API key>.reset(); net::URLRequestJob::Kill(); } net::LoadState <API key>::GetLoadState() const { // TODO(pkasting): Remove ScopedTracker below once crbug.com/455952 is // fixed. tracked_objects::ScopedTracker tracking_profile( <API key>( "455952 <API key>::GetLoadState")); if (reader_.get() && reader_->IsReadPending()) return net::<API key>; return net::LOAD_STATE_IDLE; } bool <API key>::GetCharset(std::string* charset) { if (!http_info()) return false; return http_info()->headers->GetCharset(charset); } bool <API key>::GetMimeType(std::string* mime_type) const { if (!http_info()) return false; return http_info()->headers->GetMimeType(mime_type); } void <API key>::GetResponseInfo( net::HttpResponseInfo* info) { if (!http_info()) return; *info = *http_info(); } int <API key>::GetResponseCode() const { if (!http_info()) return -1; return http_info()->headers->response_code(); } void <API key>::<API key>( const net::HttpRequestHeaders& headers) { std::string value; std::vector<net::HttpByteRange> ranges; if (!headers.GetHeader(net::HttpRequestHeaders::kRange, &value) || !net::HttpUtil::ParseRangeHeader(value, &ranges)) { return; } // If multiple ranges are requested, we play dumb and // return the entire response with 200 OK. if (ranges.size() == 1U) range_requested_ = ranges[0]; } bool <API key>::ReadRawData( net::IOBuffer* buf, int buf_size, int *bytes_read) { // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed. tracked_objects::ScopedTracker tracking_profile( <API key>( "423948 <API key>::ReadRawData")); DCHECK_NE(buf_size, 0); DCHECK(bytes_read); DCHECK(!reader_->IsReadPending()); <API key>("ServiceWorker", "<API key>::ReadRawData", this, "URL", request_->url().spec()); reader_->ReadData( buf, buf_size, base::Bind(&<API key>::OnReadComplete, weak_factory_.GetWeakPtr())); SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0)); return false; } const net::HttpResponseInfo* <API key>::http_info() const { if (!http_info_) return NULL; if (<API key>) return <API key>.get(); return http_info_.get(); } void <API key>::OnReadInfoComplete(int result) { scoped_refptr<<API key>> protect(this); if (!<API key>->http_info) { DCHECK_LT(result, 0); <API key>::<API key>( <API key>::READ_HEADERS_ERROR); NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, result)); return; } DCHECK_GE(result, 0); SetStatus(net::URLRequestStatus()); // Clear the IO_PENDING status http_info_.reset(<API key>->http_info.release()); if (is_range_request()) SetupRangeResponse(<API key>->response_data_size); <API key> = NULL; if (request_->url() == version_->script_url()) version_-><API key>(*http_info_); <API key>("ServiceWorker", "<API key>::ReadInfo", this, "Result", result); <API key>(); } void <API key>::SetupRangeResponse(int resource_size) { DCHECK(is_range_request() && http_info_.get() && reader_.get()); if (resource_size < 0 || !range_requested_.ComputeBounds(resource_size)) { range_requested_ = net::HttpByteRange(); return; } DCHECK(range_requested_.IsValid()); int offset = static_cast<int>(range_requested_.first_byte_position()); int length = static_cast<int>(range_requested_.last_byte_position() - range_requested_.first_byte_position() + 1); // Tell the reader about the range to read. reader_->SetReadRange(offset, length); // Make a copy of the full response headers and fix them up // for the range we'll be returning. <API key>.reset(new net::HttpResponseInfo(*http_info_)); net::HttpResponseHeaders* headers = <API key>->headers.get(); headers->UpdateWithNewRange( range_requested_, resource_size, true /* replace status line */); } void <API key>::OnReadComplete(int result) { <API key>::ReadResponseResult check_result; if (result == 0) { check_result = <API key>::READ_OK; NotifyDone(net::URLRequestStatus()); } else if (result < 0) { check_result = <API key>::READ_DATA_ERROR; NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, result)); } else { check_result = <API key>::READ_OK; SetStatus(net::URLRequestStatus()); // Clear the IO_PENDING status } <API key>::<API key>(check_result); NotifyReadComplete(result); <API key>("ServiceWorker", "<API key>::ReadRawData", this, "Result", result); } } // namespace content
#!/usr/bin/env python import os from setuptools import setup setup(name='pymarkdown', version='0.1.4', description='Evaluate code in markdown', url='http://github.com/mrocklin/pymarkdown', author='Matthew Rocklin', author_email='mrocklin@gmail.com', license='BSD', keywords='markdown documentation', packages=['pymarkdown'], install_requires=['toolz'], long_description=(open('README.rst').read() if os.path.exists('README.rst') else ''), zip_safe=False, scripts=[os.path.join('bin', 'pymarkdown')])
/* TEMPLATE GENERATED TESTCASE FILE Filename: <API key>.c Label Definition File: CWE401_Memory_Leak.c.label.xml Template File: sources-sinks-66b.tmpl.c */ /* * @description * CWE: 401 Memory Leak * BadSource: malloc Allocate data using malloc() * GoodSource: Allocate data on the stack * Sinks: * GoodSink: call free() on data * BadSink : no deallocation of data * Flow Variant: 66 Data flow: data passed in an array from one function to another in different source files * * */ #include "std_testcase.h" #include <wchar.h> #ifndef OMITBAD void <API key>(twoIntsStruct * dataArray[]) { /* copy data out of dataArray */ twoIntsStruct * data = dataArray[2]; /* POTENTIAL FLAW: No deallocation */ ; /* empty statement needed for some flow variants */ } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ void <API key>(twoIntsStruct * dataArray[]) { twoIntsStruct * data = dataArray[2]; /* POTENTIAL FLAW: No deallocation */ ; /* empty statement needed for some flow variants */ } /* goodB2G uses the BadSource with the GoodSink */ void <API key>(twoIntsStruct * dataArray[]) { twoIntsStruct * data = dataArray[2]; /* FIX: Deallocate memory */ free(data); } #endif /* OMITGOOD */
package org.rti.webgenome.util; import java.util.Properties; import java.io.FileInputStream; import java.io.InputStream; import java.io.IOException; import org.apache.log4j.Logger; import org.rti.webgenome.core.<API key>; /** * Contains general utility methods used by multiple classes. */ public final class SystemUtils { /** Logger. */ private static final Logger LOGGER = Logger.getLogger(SystemUtils.class); // Constants /** * Properties applying to application. These are read in * from file given by SYS_PROPS_FILE. */ private static Properties <API key> = null; /** The System property that will indicate the location of an externalized property file, * if one exists. */ private static final String <API key> = "webGenome.configFile" ; /** Classpath-relative application properties file. */ private static final String INTERNAL_PROPS_FILE = "webgenome.properties"; /** * Constructor. This is declared private * to protect against instantiation of * instances of this class. */ private SystemUtils() { } // Utility methods /** * Get application properties. * @return System properties */ public static Properties <API key>() { if (<API key> == null) { <API key> = loadProperties(); } return <API key>; } /** * Get an application property. Property retrieval is done by checking * the System properties first, and if no property is found, then checking the * application properties. * * @param key Name of property * @return property (either System (first) or "Application") */ public static String <API key>( final String key) { String propertySetting = System.getProperty ( key ) ; if ( propertySetting == null ) { Properties props = <API key>() ; propertySetting = (String) props.get ( key ) ; } return propertySetting ; } /** * Get long integer format application property defined by * the given key. * @param key Property name. * @return Long integer format application property. */ public static long <API key>(final String key) { return Long.parseLong(<API key>(key)); } /** * Get a system property and determine whether it is set * to either 'yes' or 'true' * (case insensitive). * @param key Property name * @return true, if the property exists and * is set to yes or true, false otherwise. */ public static boolean <API key>( final String key) { String propertySetting = <API key>(key); return propertySetting != null && ("true".equalsIgnoreCase(propertySetting) || "yes".equalsIgnoreCase(propertySetting)); } /** * Handle loading properties from both external and internal properties * file. External property settings will override internal property settings. * If no external and no internal properties exist, then an Exception will be thrown. * * @return Properties - the full set of internal and external properties. * */ public static Properties loadProperties() { Properties allProperties = new Properties(); // Load internal properties Properties <API key> = loadProperties ( INTERNAL_PROPS_FILE ) ; // Load external properties if they have been specified String <API key> = System.getProperty( <API key> ) ; if ( <API key> != null ) { LOGGER.info( "Loading from external Properties file '" + <API key> + "'" ) ; allProperties = <API key> ( <API key>, <API key> ) ; } else { allProperties = <API key> ; String internalPropsInfo = "" ; if ( <API key>.size() > 0 ) internalPropsInfo = " Using " + <API key>.size() + " internal properties from '" + INTERNAL_PROPS_FILE + "'" ; LOGGER.warn( "No External Properties File specified." + internalPropsInfo ) ; } if ( allProperties.size() == 0 ) { throw new <API key>( "No External or Internal Properties were loaded." ) ; } else LOGGER.info( allProperties.size() + " Properties loaded" ) ; return allProperties ; } /** * Load properties from a file. * @param fname File name * @param defaultProperties - a list of separate properies which can optionally be * specified as the defaults, in case a property doesn't exist * @see java.util.Properties * @return Properties */ public static Properties <API key>( final String fname, final Properties defaultProperties ) { Properties props = null ; if ( defaultProperties != null ) props = new Properties ( defaultProperties ) ; // provide any default, if specified else props = new Properties(); try { FileInputStream in = new FileInputStream ( fname ) ; if ( in != null ) props.load(in); else LOGGER.warn ( "Error creating InputStream. Unable to load properties from file '" + fname + "'." ) ; } catch (IOException e) { LOGGER.warn ( "Unable to load properties file '" + fname + "'." ) ; } return props; } /** * Load properties from a file. * @param fname File name * @param defaultProperties - a list of separate properies which can optionally be * specified as the defaults, in case a property doesn't exist * @see java.util.Properties * @return Properties */ public static Properties loadProperties( final String fname ) { Properties props = new Properties(); try { InputStream in = Thread.currentThread(). <API key>().getResourceAsStream(fname); if ( in != null ) props.load(in); else LOGGER.warn ( "Error creating InputStream. Unable to load properties file '" + fname + "'." ) ; } catch (IOException e) { LOGGER.warn ( "Unable to load properties file '" + fname + "'." ) ; } return props; } /** * Find a property. Look first in system properties, then in application * properties (i.e. webgenome.properties file). * @param propName Property name * @return Property * @deprecated */ public static String findProperty(final String propName) { String prop = System.getProperty(propName); if (prop == null) { prop = SystemUtils.<API key>(propName); } return prop; } }
""" Serialization ``django.core.serializers`` provides interfaces to converting Django ``QuerySet`` objects to and from "flat" data (i.e. strings). """ from decimal import Decimal from django.db import models class <API key>(models.Manager): def get_by_natural_key(self, kind, name): return self.get(kind=kind, name=name) class CategoryMetaData(models.Model): kind = models.CharField(max_length=10) name = models.CharField(max_length=10) value = models.CharField(max_length=10) objects = <API key>() class Meta: unique_together = (('kind', 'name'),) def __str__(self): return '[%s:%s]=%s' % (self.kind, self.name, self.value) def natural_key(self): return (self.kind, self.name) class Category(models.Model): name = models.CharField(max_length=20) meta_data = models.ForeignKey(CategoryMetaData, models.SET_NULL, null=True, default=None) class Meta: ordering = ('name',) def __str__(self): return self.name class Author(models.Model): name = models.CharField(max_length=20) class Meta: ordering = ('name',) def __str__(self): return self.name class Article(models.Model): author = models.ForeignKey(Author, models.CASCADE) headline = models.CharField(max_length=50) pub_date = models.DateTimeField() categories = models.ManyToManyField(Category) meta_data = models.ManyToManyField(CategoryMetaData) class Meta: ordering = ('pub_date',) def __str__(self): return self.headline class AuthorProfile(models.Model): author = models.OneToOneField(Author, models.CASCADE, primary_key=True) date_of_birth = models.DateField() def __str__(self): return "Profile of %s" % self.author class Actor(models.Model): name = models.CharField(max_length=20, primary_key=True) class Meta: ordering = ('name',) def __str__(self): return self.name class Movie(models.Model): actor = models.ForeignKey(Actor, models.CASCADE) title = models.CharField(max_length=50) price = models.DecimalField(max_digits=6, decimal_places=2, default=Decimal('0.00')) class Meta: ordering = ('title',) def __str__(self): return self.title class Score(models.Model): score = models.FloatField() class Team: def __init__(self, title): self.title = title def __str__(self): raise NotImplementedError("Not so simple") def to_string(self): return "%s" % self.title class TeamField(models.CharField): def __init__(self): super(TeamField, self).__init__(max_length=100) def get_db_prep_save(self, value, connection): return str(value.title) def to_python(self, value): if isinstance(value, Team): return value return Team(value) def from_db_value(self, value, expression, connection, context): return Team(value) def value_to_string(self, obj): return self.value_from_object(obj).to_string() def deconstruct(self): name, path, args, kwargs = super(TeamField, self).deconstruct() del kwargs['max_length'] return name, path, args, kwargs class Player(models.Model): name = models.CharField(max_length=50) rank = models.IntegerField() team = TeamField() def __str__(self): return '%s (%d) playing for %s' % (self.name, self.rank, self.team.to_string()) class BaseModel(models.Model): parent_data = models.IntegerField() class ProxyBaseModel(BaseModel): class Meta: proxy = True class ProxyProxyBaseModel(ProxyBaseModel): class Meta: proxy = True class ComplexModel(models.Model): field1 = models.CharField(max_length=10) field2 = models.CharField(max_length=10) field3 = models.CharField(max_length=10)
package net.sourceforge.pmd.lang.dfa; import java.util.ArrayList; import java.util.BitSet; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.StringTokenizer; import net.sourceforge.pmd.lang.ast.Node; /** * @author raik * <p/> * Each data flow contains a set of DataFlowNodes. */ public abstract class <API key> implements DataFlowNode { protected Node node; protected Map<Integer, String> typeMap = new HashMap<Integer, String>(); protected List<DataFlowNode> parents = new ArrayList<DataFlowNode>(); protected List<DataFlowNode> children = new ArrayList<DataFlowNode>(); protected BitSet type = new BitSet(); protected List<VariableAccess> variableAccess = new ArrayList<VariableAccess>(); protected List<DataFlowNode> dataFlow; protected int line; public <API key>(List<DataFlowNode> dataFlow) { this.dataFlow = dataFlow; if (!this.dataFlow.isEmpty()) { DataFlowNode parent = this.dataFlow.get(this.dataFlow.size() - 1); parent.addPathToChild(this); } this.dataFlow.add(this); } public <API key>(List<DataFlowNode> dataFlow, Node node) { this(dataFlow); this.node = node; node.setDataFlowNode(this); this.line = node.getBeginLine(); } public void addPathToChild(DataFlowNode child) { DataFlowNode thisChild = child; // TODO - throw an exception if already contained in children list? if (!this.children.contains(thisChild) || this.equals(thisChild)) { this.children.add(thisChild); thisChild.getParents().add(this); } } public boolean removePathToChild(DataFlowNode child) { DataFlowNode thisChild = child; thisChild.getParents().remove(this); return this.children.remove(thisChild); } public void <API key>(DataFlowNode destination) { while (!parents.isEmpty()) { DataFlowNode parent = parents.get(0); parent.removePathToChild(this); parent.addPathToChild(destination); } } public int getLine() { return this.line; } public void setType(int type) { this.type.set(type); } public boolean isType(int intype) { try { return type.get(intype); } catch (<API key> e) { e.printStackTrace(); } return false; } public Node getNode() { return this.node; } public List<DataFlowNode> getChildren() { return this.children; } public List<DataFlowNode> getParents() { return this.parents; } public List<DataFlowNode> getFlow() { return this.dataFlow; } public int getIndex() { return this.dataFlow.indexOf(this); } public void setVariableAccess(List<VariableAccess> variableAccess) { if (this.variableAccess.isEmpty()) { this.variableAccess = variableAccess; } else { this.variableAccess.addAll(variableAccess); } } public List<VariableAccess> getVariableAccess() { return this.variableAccess; } @Override public String toString() { String res = "DataFlowNode: line " + this.getLine() + ", "; String tmp = type.toString(); String newTmp = ""; for (char c : tmp.toCharArray()) { if (c != '{' && c != '}' && c != ' ') { newTmp += c; } } for (StringTokenizer st = new StringTokenizer(newTmp, ","); st.hasMoreTokens();) { int newTmpInt = Integer.parseInt(st.nextToken()); res += "(" + stringFromType(newTmpInt) + ")"; } res += ", " + this.node.getClass().getName().substring(node.getClass().getName().lastIndexOf('.') + 1); res += node.getImage() == null ? "" : "(" + this.node.getImage() + ")"; return res; } private String stringFromType(int intype) { if (typeMap.isEmpty()) { typeMap.put(NodeType.IF_EXPR, "IF_EXPR"); typeMap.put(NodeType.IF_LAST_STATEMENT, "IF_LAST_STATEMENT"); typeMap.put(NodeType.<API key>, "<API key>"); typeMap.put(NodeType.ELSE_LAST_STATEMENT, "ELSE_LAST_STATEMENT"); typeMap.put(NodeType.<API key>, "<API key>"); typeMap.put(NodeType.WHILE_EXPR, "WHILE_EXPR"); typeMap.put(NodeType.SWITCH_START, "SWITCH_START"); typeMap.put(NodeType.CASE_LAST_STATEMENT, "CASE_LAST_STATEMENT"); typeMap.put(NodeType.<API key>, "<API key>"); typeMap.put(NodeType.SWITCH_END, "SWITCH_END"); typeMap.put(NodeType.FOR_INIT, "FOR_INIT"); typeMap.put(NodeType.FOR_EXPR, "FOR_EXPR"); typeMap.put(NodeType.FOR_UPDATE, "FOR_UPDATE"); typeMap.put(NodeType.<API key>, "<API key>"); typeMap.put(NodeType.FOR_END, "FOR_END"); typeMap.put(NodeType.<API key>, "<API key>"); typeMap.put(NodeType.DO_EXPR, "DO_EXPR"); typeMap.put(NodeType.RETURN_STATEMENT, "RETURN_STATEMENT"); typeMap.put(NodeType.BREAK_STATEMENT, "BREAK_STATEMENT"); typeMap.put(NodeType.CONTINUE_STATEMENT, "CONTINUE_STATEMENT"); typeMap.put(NodeType.LABEL_STATEMENT, "LABEL_STATEMENT"); typeMap.put(NodeType.<API key>, "LABEL_END"); typeMap.put(NodeType.THROW_STATEMENT, "THROW_STATEMENT"); } if (!typeMap.containsKey(intype)) { throw new RuntimeException("Couldn't find type id " + intype); } return typeMap.get(intype); } }
package gov.nih.nci.ess.ae.service.aeadvancedquery.client; import java.io.InputStream; import java.rmi.RemoteException; import javax.xml.namespace.QName; import org.apache.axis.EngineConfiguration; import org.apache.axis.client.AxisClient; import org.apache.axis.client.Stub; import org.apache.axis.configuration.FileProvider; import org.apache.axis.message.addressing.<API key>; import org.apache.axis.types.URI.<API key>; import org.oasis.wsrf.properties.<API key>; import org.globus.gsi.GlobusCredential; import gov.nih.nci.ess.ae.service.aeadvancedquery.stubs.<API key>; import gov.nih.nci.ess.ae.service.aeadvancedquery.stubs.service.<API key>; import gov.nih.nci.ess.ae.service.aeadvancedquery.common.AEAdvancedQueryI; import gov.nih.nci.cagrid.introduce.security.client.<API key>; public class <API key> extends <API key> implements AEAdvancedQueryI { public <API key>(String url) throws <API key>, RemoteException { this(url,null); } public <API key>(String url, GlobusCredential proxy) throws <API key>, RemoteException { super(url,proxy); } public <API key>(<API key> epr) throws <API key>, RemoteException { this(epr,null); } public <API key>(<API key> epr, GlobusCredential proxy) throws <API key>, RemoteException { super(epr,proxy); } public static void usage(){ System.out.println(<API key>.class.getName() + " -url <service url>"); } public static void main(String [] args){ System.out.println("Running the Grid Service Client"); try{ if(!(args.length < 2)){ if(args[0].equals("-url")){ <API key> client = new <API key>(args[1]); // place client calls here if you want to use this main as a // test.... } else { usage(); System.exit(1); } } else { usage(); System.exit(1); } } catch (Exception e) { e.printStackTrace(); System.exit(1); } } public ess.caaers.nci.nih.gov.DSET_AdverseEvent findAdverseEvents(ess.caaers.nci.nih.gov.AdverseEventQuery adverseEventQuery,ess.caaers.nci.nih.gov.LimitOffset limitOffset) throws RemoteException, gov.nih.nci.ess.ae.service.management.stubs.types.<API key> { synchronized(portTypeMutex){ <API key>((Stub)portType,"findAdverseEvents"); gov.nih.nci.ess.ae.service.aeadvancedquery.stubs.<API key> params = new gov.nih.nci.ess.ae.service.aeadvancedquery.stubs.<API key>(); gov.nih.nci.ess.ae.service.aeadvancedquery.stubs.<API key> <API key> = new gov.nih.nci.ess.ae.service.aeadvancedquery.stubs.<API key>(); <API key>.<API key>(adverseEventQuery); params.<API key>(<API key>); gov.nih.nci.ess.ae.service.aeadvancedquery.stubs.<API key> <API key> = new gov.nih.nci.ess.ae.service.aeadvancedquery.stubs.<API key>(); <API key>.setLimitOffset(limitOffset); params.setLimitOffset(<API key>); gov.nih.nci.ess.ae.service.aeadvancedquery.stubs.<API key> boxedResult = portType.findAdverseEvents(params); return boxedResult.<API key>(); } } public ess.caaers.nci.nih.gov.DSET_AuditTrail <API key>(ess.caaers.nci.nih.gov.Id <API key>,ess.caaers.nci.nih.gov.TsDateTime minDate) throws RemoteException, gov.nih.nci.ess.ae.service.management.stubs.types.<API key> { synchronized(portTypeMutex){ <API key>((Stub)portType,"<API key>"); gov.nih.nci.ess.ae.service.aeadvancedquery.stubs.<API key> params = new gov.nih.nci.ess.ae.service.aeadvancedquery.stubs.<API key>(); gov.nih.nci.ess.ae.service.aeadvancedquery.stubs.<API key> <API key> = new gov.nih.nci.ess.ae.service.aeadvancedquery.stubs.<API key>(); <API key>.setId(<API key>); params.<API key>(<API key>); gov.nih.nci.ess.ae.service.aeadvancedquery.stubs.<API key> minDateContainer = new gov.nih.nci.ess.ae.service.aeadvancedquery.stubs.<API key>(); minDateContainer.setTsDateTime(minDate); params.setMinDate(minDateContainer); gov.nih.nci.ess.ae.service.aeadvancedquery.stubs.<API key> boxedResult = portType.<API key>(params); return boxedResult.getDSET_AuditTrail(); } } }
/* TEMPLATE GENERATED TESTCASE FILE Filename: <API key>.c Label Definition File: <API key>.label.xml Template File: sources-sinks-54c.tmpl.c */ /* * @description * CWE: 319 Cleartext Transmission of Sensitive Information * BadSource: listen_socket Read the password using a listen socket (server side) * GoodSource: Use a hardcoded password (one that was not sent over the network) * Sinks: * GoodSink: Decrypt the password before using it in an authentication API call to show that it was transferred as ciphertext * BadSink : Use the password directly from the source in an authentication API call to show that it was transferred as plaintext * Flow Variant: 54 Data flow: data passed as an argument from one function through three others to a fifth; all five functions are in different source files * * */ #include "std_testcase.h" #include <winsock2.h> #include <windows.h> #include <direct.h> #pragma comment(lib, "ws2_32") /* include ws2_32.lib when linking */ #define TCP_PORT 27015 #define LISTEN_BACKLOG 5 #pragma comment(lib, "advapi32.lib") #define HASH_INPUT "ABCDEFG123456" /* INCIDENTAL: Hardcoded crypto */ #ifndef OMITBAD /* bad function declaration */ void <API key>(char * password); void <API key>(char * password) { <API key>(password); } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ void <API key>(char * password); void <API key>(char * password) { <API key>(password); } /* goodB2G uses the BadSource with the GoodSink */ void <API key>(char * password); void <API key>(char * password) { <API key>(password); } #endif /* OMITGOOD */
#ifndef <API key> #define <API key> #include "base/callback.h" #include "content/common/content_export.h" #include "mojo/public/cpp/bindings/pending_receiver.h" #include "mojo/public/cpp/bindings/receiver.h" #include "third_party/blink/public/mojom/worker/<API key>.mojom.h" #include "url/origin.h" namespace content { class SharedWorkerHost; // <API key> passes content settings to its renderer // counterpart blink::<API key>. // at the moment. // SharedWorkerHost owns this class, so the lifetime of this class is strongly // associated to it. class CONTENT_EXPORT <API key> : public blink::mojom::<API key> { public: <API key>( const GURL& script_url, SharedWorkerHost* owner, mojo::PendingReceiver<blink::mojom::<API key>> receiver); <API key>( const <API key>&) = delete; <API key>& operator=( const <API key>&) = delete; ~<API key>() override; // blink::mojom::<API key> implementation. void AllowIndexedDB(<API key> callback) override; void AllowCacheStorage(<API key> callback) override; void AllowWebLocks(<API key> callback) override; void <API key>( <API key> callback) override; private: const url::Origin origin_; SharedWorkerHost* owner_; mojo::Receiver<blink::mojom::<API key>> receiver_; }; } // namespace content #endif // <API key>
using System; using System.Collections; using System.Collections.Generic; using System.Threading; namespace OpenMetaverse { public sealed class WrappedObject<T> : IDisposable where T : class { private T _instance; internal readonly ObjectPoolSegment<T> _owningSegment; internal readonly ObjectPoolBase<T> _owningObjectPool; private bool _disposed = false; internal WrappedObject(ObjectPoolBase<T> owningPool, ObjectPoolSegment<T> ownerSegment, T activeInstance) { _owningObjectPool = owningPool; _owningSegment = ownerSegment; _instance = activeInstance; } ~WrappedObject() { #if !PocketPC // If the AppDomain is being unloaded, or the CLR is // shutting down, just exit gracefully if (Environment.HasShutdownStarted) return; #endif // Object Resurrection in Action! GC.<API key>(this); // Return this instance back to the owning queue _owningObjectPool.CheckIn(_owningSegment, _instance); } <summary> Returns an instance of the class that has been checked out of the Object Pool. </summary> public T Instance { get { if (_disposed) throw new <API key>("WrappedObject"); return _instance; } } <summary> Checks the instance back into the object pool </summary> public void Dispose() { if (_disposed) return; _disposed = true; _owningObjectPool.CheckIn(_owningSegment, _instance); GC.SuppressFinalize(this); } } public abstract class ObjectPoolBase<T> : IDisposable where T : class { private int _itemsPerSegment = 32; private int <API key> = 1; // A segment won't be eligible for cleanup unless it's at least this old... private TimeSpan <API key> = new TimeSpan(0, 5, 0); // ever increasing segment counter private int _activeSegment = 0; private bool _gc = true; private volatile bool _disposed = false; private Dictionary<int, ObjectPoolSegment<T>> _segments = new Dictionary<int, ObjectPoolSegment<T>>(); private object _syncRoot = new object(); private object _timerLock = new object(); // create a timer that starts in 5 minutes, and gets called every 5 minutes. System.Threading.Timer _timer; int _cleanupFrequency; <summary> Creates a new instance of the ObjectPoolBase class. Initialize MUST be called after using this constructor. </summary> protected ObjectPoolBase() { } <summary> Creates a new instance of the ObjectPool Base class. </summary> <param name="itemsPerSegment">The object pool is composed of segments, which are allocated whenever the size of the pool is exceeded. The number of items in a segment should be large enough that allocating a new segmeng is a rare thing. For example, on a server that will have 10k people logged in at once, the receive buffer object pool should have segment sizes of at least 1000 byte arrays per segment. </param> <param name="minimumSegmentCount">The minimun number of segments that may exist.</param> <param name="gcOnPoolGrowth">Perform a full GC.Collect whenever a segment is allocated, and then again after allocation to compact the heap.</param> <param name="cleanupFrequenceMS">The frequency which segments are checked to see if they're eligible for cleanup.</param> protected ObjectPoolBase(int itemsPerSegment, int minimumSegmentCount, bool gcOnPoolGrowth, int cleanupFrequenceMS) { Initialize(itemsPerSegment, minimumSegmentCount, gcOnPoolGrowth, cleanupFrequenceMS); } protected void Initialize(int itemsPerSegment, int minimumSegmentCount, bool gcOnPoolGrowth, int cleanupFrequenceMS) { _itemsPerSegment = itemsPerSegment; <API key> = minimumSegmentCount; _gc = gcOnPoolGrowth; // force garbage collection to make sure these new long lived objects // cause as little fragmentation as possible if (_gc) System.GC.Collect(); lock (_syncRoot) { while (_segments.Count < this.MinimumSegmentCount) { ObjectPoolSegment<T> segment = CreateSegment(false); _segments.Add(segment.SegmentNumber, segment); } } // This forces a compact, to make sure our objects fill in any holes in the heap. if (_gc) { System.GC.Collect(); } _timer = new Timer(<API key>, null, cleanupFrequenceMS, cleanupFrequenceMS); } <summary> Forces the segment cleanup algorithm to be run. This method is intended primarly for use from the Unit Test libraries. </summary> internal void ForceCleanup() { <API key>(null); } private void <API key>(object state) { if (_disposed) return; if (Monitor.TryEnter(_timerLock) == false) return; try { lock (_syncRoot) { // If we're below, or at, or minimum segment count threshold, // there's no point in going any further. if (_segments.Count <= <API key>) return; for (int i = _activeSegment; i > 0; i { ObjectPoolSegment<T> segment; if (_segments.TryGetValue(i, out segment) == true) { // For the "old" segments that were allocated at startup, this will // always be false, as their expiration dates are set at infinity. if (segment.CanBeCleanedUp()) { _segments.Remove(i); segment.Dispose(); } } } } } finally { Monitor.Exit(_timerLock); } } <summary> Responsible for allocate 1 instance of an object that will be stored in a segment. </summary> <returns>An instance of whatever objec the pool is pooling.</returns> protected abstract T GetObjectInstance(); private ObjectPoolSegment<T> CreateSegment(bool <API key>) { if (_disposed) throw new <API key>("ObjectPoolBase"); if (<API key>) Logger.Log("Creating new object pool segment", Helpers.LogLevel.Info); // This method is called inside a lock, so no interlocked stuff required. int segmentToAdd = _activeSegment; _activeSegment++; Queue<T> buffers = new Queue<T>(); for (int i = 1; i <= this._itemsPerSegment; i++) { T obj = GetObjectInstance(); buffers.Enqueue(obj); } // certain segments we don't want to ever be cleaned up (the initial segments) DateTime cleanupTime = (<API key>) ? DateTime.Now.Add(this.<API key>) : DateTime.MaxValue; ObjectPoolSegment<T> segment = new ObjectPoolSegment<T>(segmentToAdd, buffers, cleanupTime); return segment; } <summary> Checks in an instance of T owned by the object pool. This method is only intended to be called by the <c>WrappedObject</c> class. </summary> <param name="owningSegment">The segment from which the instance is checked out.</param> <param name="instance">The instance of <c>T</c> to check back into the segment.</param> internal void CheckIn(ObjectPoolSegment<T> owningSegment, T instance) { lock (_syncRoot) { owningSegment.CheckInObject(instance); } } <summary> Checks an instance of <c>T</c> from the pool. If the pool is not sufficient to allow the checkout, a new segment is created. </summary> <returns>A <c>WrappedObject</c> around the instance of <c>T</c>. To check the instance back into the segment, be sureto dispose the WrappedObject when finished. </returns> public WrappedObject<T> CheckOut() { if (_disposed) throw new <API key>("ObjectPoolBase"); // It's key that this CheckOut always, always, uses a pooled object // from the oldest available segment. This will help keep the "newer" // segments from being used - which in turn, makes them eligible // for deletion. lock (_syncRoot) { ObjectPoolSegment<T> targetSegment = null; // find the oldest segment that has items available for checkout for (int i = 0; i < _activeSegment; i++) { ObjectPoolSegment<T> segment; if (_segments.TryGetValue(i, out segment) == true) { if (segment.AvailableItems > 0) { targetSegment = segment; break; } } } if (targetSegment == null) { // We couldn't find a sigment that had any available space in it, // so it's time to create a new segment. // Before creating the segment, do a GC to make sure the heap // is compacted. if (_gc) GC.Collect(); targetSegment = CreateSegment(true); if (_gc) GC.Collect(); _segments.Add(targetSegment.SegmentNumber, targetSegment); } WrappedObject<T> obj = new WrappedObject<T>(this, targetSegment, targetSegment.CheckOutObject()); return obj; } } <summary> The total number of segments created. Intended to be used by the Unit Tests. </summary> public int TotalSegments { get { if (_disposed) throw new <API key>("ObjectPoolBase"); lock (_syncRoot) { return _segments.Count; } } } <summary> The number of items that are in a segment. Items in a segment are all allocated at the same time, and are hopefully close to each other in the managed heap. </summary> public int ItemsPerSegment { get { if (_disposed) throw new <API key>("ObjectPoolBase"); return _itemsPerSegment; } } <summary> The minimum number of segments. When segments are reclaimed, this number of segments will always be left alone. These segments are allocated at startup. </summary> public int MinimumSegmentCount { get { if (_disposed) throw new <API key>("ObjectPoolBase"); return <API key>; } } <summary> The age a segment must be before it's eligible for cleanup. This is used to prevent thrash, and typical values are in the 5 minute range. </summary> public TimeSpan <API key> { get { if (_disposed) throw new <API key>("ObjectPoolBase"); return <API key>; } set { if (_disposed) throw new <API key>("ObjectPoolBase"); <API key> = value; } } <summary> The frequence which the cleanup thread runs. This is typically expected to be in the 5 minute range. </summary> public int <API key> { get { if (_disposed) throw new <API key>("ObjectPoolBase"); return _cleanupFrequency; } set { if (_disposed) throw new <API key>("ObjectPoolBase"); Interlocked.Exchange(ref _cleanupFrequency, value); _timer.Change(_cleanupFrequency, _cleanupFrequency); } } #region IDisposable Members public void Dispose() { if (_disposed) return; Dispose(true); GC.SuppressFinalize(this); } protected virtual void Dispose(bool disposing) { if (disposing) { lock (_syncRoot) { if (_disposed) return; _timer.Dispose(); _disposed = true; foreach (KeyValuePair<int, ObjectPoolSegment<T>> kvp in _segments) { try { kvp.Value.Dispose(); } catch (Exception) { } } _segments.Clear(); } } } #endregion } internal class ObjectPoolSegment<T> : IDisposable where T : class { private Queue<T> _liveInstances = new Queue<T>(); private int _segmentNumber; private int _originalCount; private bool _isDisposed = false; private DateTime <API key>; public int SegmentNumber { get { return _segmentNumber; } } public int AvailableItems { get { return _liveInstances.Count; } } public DateTime <API key> { get { return <API key>; } } public ObjectPoolSegment(int segmentNumber, Queue<T> liveInstances, DateTime <API key>) { _segmentNumber = segmentNumber; _liveInstances = liveInstances; _originalCount = liveInstances.Count; <API key> = <API key>; } public bool CanBeCleanedUp() { if (_isDisposed == true) throw new <API key>("ObjectPoolSegment"); return ((_originalCount == _liveInstances.Count) && (DateTime.Now > <API key>)); } public void Dispose() { if (_isDisposed) return; _isDisposed = true; bool shouldDispose = (typeof(T) is IDisposable); while (_liveInstances.Count != 0) { T instance = _liveInstances.Dequeue(); if (shouldDispose) { try { (instance as IDisposable).Dispose(); } catch (Exception) { } } } } internal void CheckInObject(T o) { if (_isDisposed == true) throw new <API key>("ObjectPoolSegment"); _liveInstances.Enqueue(o); } internal T CheckOutObject() { if (_isDisposed == true) throw new <API key>("ObjectPoolSegment"); if (0 == _liveInstances.Count) throw new <API key>("No Objects Available for Checkout"); T o = _liveInstances.Dequeue(); return o; } } }
.matte-Resizer { display: inline-block; } .<API key> { position: absolute; border: 1px solid black; } .<API key> { position: absolute; width: 7px; height: 7px; border: 1px solid black; -moz-border-radius: 4px; border-radius: 4px; background: white; } .<API key>:hover { width: 9px; height: 9px; -moz-border-radius: 5px; border-radius: 5px; } .<API key>.n { top: -4px; left: 50%; margin-left: -4px; cursor: n-resize; } .<API key>.n:hover { top: -5px; margin-left: -5px; } .<API key>.ne { top: -4px; right: -4px; cursor: ne-resize; } .<API key>.ne:hover { top: -5px; right: -5px; } .<API key>.e { right: -4px; top: 50%; margin-top: -4px; cursor: e-resize; } .<API key>.e:hover { right: -5px; margin-top: -5px; } .<API key>.se { bottom: -4px; right: -4px; cursor: se-resize; } .<API key>.se:hover { bottom: -5px; right: -5px; } .<API key>.s { bottom: -4px; left: 50%; margin-left: -4px; cursor: s-resize; } .<API key>.s:hover { bottom: -5px; margin-left: -5px; } .<API key>.sw { bottom: -4px; left: -4px; cursor: sw-resize; } .<API key>.sw:hover { bottom: -5px; left: -5px; } .<API key>.w { left: -4px; top: 50%; margin-top: -4px; cursor: w-resize; } .<API key>.w:hover { left: -5px; margin-top: -5px; } .<API key>.nw { top: -4px; left: -4px; cursor: nw-resize; } .<API key>.nw:hover { top: -5px; left: -5px; } .matte-Resizer-image { position: absolute; display: none; top: 0; left: 0; width: 100%; height: 100%; opacity: 0.5; } .<API key> Resizer-image { display: block; } .<API key>::selection { background: rgba(0,0,0,0); }
var path = require( 'path' ); exports.input = path.resolve( __dirname, 'src' ); exports.output = path.resolve( __dirname, 'dist' ); // var moduleEntries = 'html,htm,phtml,tpl,vm,js'; // var pageEntries = 'html,htm,phtml,tpl,vm'; exports.getProcessors = function () { var jsProcessor = new JsCompressor(); return [jsProcessor]; }; exports.exclude = [ 'test' ]; exports.injectProcessor = function ( processors ) { for ( var key in processors ) { global[ key ] = processors[ key ]; } };
# Powered by the Bokeh Development Team. # Boilerplate from __future__ import absolute_import, division, print_function, unicode_literals import logging log = logging.getLogger(__name__) # Imports # Standard library imports import json from warnings import warn from uuid import uuid4 # External imports # Bokeh imports from .state import curstate from ..util.serialization import make_id # Globals and constants HTML_MIME_TYPE = 'text/html' JS_MIME_TYPE = 'application/javascript' LOAD_MIME_TYPE = 'application/vnd.bokehjs_load.v0+json' EXEC_MIME_TYPE = 'application/vnd.bokehjs_exec.v0+json' # General API class CommsHandle(object): _json = {} def __init__(self, comms, cell_doc): self._cellno = None try: from IPython import get_ipython ip = get_ipython() hm = ip.history_manager p_prompt = list(hm.get_tail(1, include_latest=True))[0][1] self._cellno = p_prompt except Exception as e: log.debug("Could not get Notebook cell number, reason: %s", e) self._comms = comms self._doc = cell_doc # Our internal copy of the doc is in perpetual "hold". Events from the # originating doc will be triggered and collected it it. Events are # processed/cleared when push_notebook is called for this comms handle self._doc.hold() def _repr_html_(self): if self._cellno is not None: return "<p><code>&lt;Bokeh Notebook handle for <strong>In[%s]</strong>&gt;</code></p>" % str(self._cellno) else: return "<p><code>&lt;Bokeh Notebook handle&gt;</code></p>" @property def comms(self): return self._comms @property def doc(self): return self._doc # Adding this method makes curdoc dispatch to this Comms to handle # and Document model changed events. If we find that the event is # for a model in our internal copy of the docs, then trigger the # internal doc with the event so that it is collected (until a # call to push_notebook processes and clear colleted events) def <API key>(self, event): if event.model._id in self.doc._all_models: self.doc._trigger_on_change(event) def <API key>(notebook_type, load, show_doc, show_app, overwrite=False): ''' Install a new notebook display hook. Bokeh comes with support for Jupyter notebooks built-in. However, there are other kinds of notebooks in use by different communities. This function provides a mechanism for other projects to instruct Bokeh how to display content in other notebooks. This function is primarily of use to developers wishing to integrate Bokeh with new notebook types. Args: notebook_type (str) : A name for the notebook type, e.e. ``'Jupyter'`` or ``'Zeppelin'`` If the name has previously been installed, a ``RuntimeError`` will be raised, unless ``overwrite=True`` load (callable) : A function for loading BokehJS in a notebook type. The function will be called with the following arguments: .. code-block:: python load( resources, # A Resources object for how to load BokehJS verbose, # Whether to display verbose loading banner hide_banner, # Whether to hide the output banner entirely load_timeout # Time after which to report a load fail error ) show_doc (callable) : A function for displaying Bokeh standalone documents in the notebook type. This function will be called with the following arguments: .. code-block:: python show_doc( obj, # the Bokeh object to display state, # current bokeh.io "state" notebook_handle # whether a notebook handle was requested ) If the notebook platform is capable of supporting in-place updates to plots then this function may return an opaque notebook handle that can be used for that purpose. The handle will be returned by ``show()``, and can be used by as appropriate to update plots, etc. by additional functions in the library that installed the hooks. show_app (callable) : A function for displaying Bokeh applications in the notebook type. This function will be called with the following arguments: .. code-block:: python show_app( app, # the Bokeh Application to display state, # current bokeh.io "state" notebook_url # URL to the current active notebook page ) overwrite (bool, optional) : Whether to allow an existing hook to be overwritten by a new definition (default: False) Returns: None Raises: RuntimeError If ``notebook_type`` is already installed and ``overwrite=False`` ''' if notebook_type in _HOOKS and not overwrite: raise RuntimeError("hook for notebook type %r already exists" % notebook_type) _HOOKS[notebook_type] = dict(load=load, doc=show_doc, app=show_app) def push_notebook(document=None, state=None, handle=None): ''' Update Bokeh plots in a Jupyter notebook output cells with new data or property values. When working the the notebook, the ``show`` function can be passed the argument ``notebook_handle=True``, which will cause it to return a handle object that can be used to update the Bokeh output later. When ``push_notebook`` is called, any property updates (e.g. plot titles or data source values, etc.) since the last call to ``push_notebook`` or the original ``show`` call are applied to the Bokeh output in the previously rendered Jupyter output cell. Several example notebooks can be found in the GitHub repository in the :bokeh-tree:`examples/howto/notebook_comms` directory. Args: document (Document, optional) : A :class:`~bokeh.document.Document` to push from. If None, uses ``curdoc()``. (default: None) state (State, optional) : A :class:`State` object. If None, then the current default state (set by ``output_file``, etc.) is used. (default: None) Returns: None Examples: Typical usage is typically similar to this: .. code-block:: python from bokeh.plotting import figure from bokeh.io import output_notebook, push_notebook, show output_notebook() plot = figure() plot.circle([1,2,3], [4,6,5]) handle = show(plot, notebook_handle=True) # Update the plot title in the earlier cell plot.title.text = "New Title" push_notebook(handle=handle) ''' from ..protocol import Protocol if state is None: state = curstate() if not document: document = state.document if not document: warn("No document to push") return if handle is None: handle = state.last_comms_handle if not handle: warn("Cannot find a last shown plot to update. Call output_notebook() and show(..., notebook_handle=True) before push_notebook()") return events = list(handle.doc._held_events) # This is to avoid having an exception raised for attempting to create a # PATCH-DOC with no events. In the notebook, we just want to silenty # ignore calls to push_notebook when there are no new events if len(events) == 0: return handle.doc._held_events = [] msg = Protocol("1.0").create("PATCH-DOC", events) handle.comms.send(msg.header_json) handle.comms.send(msg.metadata_json) handle.comms.send(msg.content_json) for header, payload in msg.buffers: handle.comms.send(json.dumps(header)) handle.comms.send(buffers=[payload]) def run_notebook_hook(notebook_type, action, *args, **kw): ''' Run an installed notebook hook with supplied arguments. Args: noteboook_type (str) : Name of an existing installed notebook hook actions (str) : Name of the hook action to execute, ``'doc'`` or ``'app'`` All other arguments and keyword arguments are passed to the hook action exactly as supplied. Returns: Result of the hook action, as-is Raises: RunetimeError If the hook or specific action is not installed ''' if notebook_type not in _HOOKS: raise RuntimeError("no display hook installed for notebook type %r" % notebook_type) if _HOOKS[notebook_type][action] is None: raise RuntimeError("notebook hook for %r did not install %r action" % notebook_type, action) return _HOOKS[notebook_type][action](*args, **kw) # Dev API def destroy_server(server_id): ''' Given a UUID id of a div removed or replaced in the Jupyter notebook, destroy the corresponding server sessions and stop it. ''' server = curstate().uuid_to_server.get(server_id, None) if server is None: log.debug("No server instance found for uuid: %r" % server_id) return try: for session in server.get_sessions(): session.destroy() server.stop() del curstate().uuid_to_server[server_id] except Exception as e: log.debug("Could not destroy server for id %r: %s" % (server_id, e)) def get_comms(target_name): ''' Create a Jupyter comms object for a specific target, that can be used to update Bokeh documents in the Jupyter notebook. Args: target_name (str) : the target name the Comms object should connect to Returns Jupyter Comms ''' # NOTE: must defer all IPython imports inside functions from ipykernel.comm import Comm return Comm(target_name=target_name, data={}) def <API key>(): <API key>('jupyter', load_notebook, show_doc, show_app) def load_notebook(resources=None, verbose=False, hide_banner=False, load_timeout=5000): ''' Prepare the IPython notebook for displaying Bokeh plots. Args: resources (Resource, optional) : how and where to load BokehJS from (default: CDN) verbose (bool, optional) : whether to report detailed settings (default: False) hide_banner (bool, optional): whether to hide the Bokeh banner (default: False) load_timeout (int, optional) : Timeout in milliseconds when plots assume load timed out (default: 5000) .. warning:: Clearing the output cell containing the published BokehJS resources HTML code may cause Bokeh CSS styling to be removed. Returns: None ''' global _NOTEBOOK_LOADED from .. import __version__ from ..core.templates import NOTEBOOK_LOAD from ..util.serialization import make_id from ..resources import CDN from ..util.compiler import bundle_all_models if resources is None: resources = CDN if not hide_banner: if resources.mode == 'inline': js_info = 'inline' css_info = 'inline' else: js_info = resources.js_files[0] if len(resources.js_files) == 1 else resources.js_files css_info = resources.css_files[0] if len(resources.css_files) == 1 else resources.css_files warnings = ["Warning: " + msg['text'] for msg in resources.messages if msg['type'] == 'warn'] if _NOTEBOOK_LOADED and verbose: warnings.append('Warning: BokehJS previously loaded') element_id = make_id() html = NOTEBOOK_LOAD.render( element_id = element_id, verbose = verbose, js_info = js_info, css_info = css_info, bokeh_version = __version__, warnings = warnings, ) else: element_id = None _NOTEBOOK_LOADED = resources custom_models_js = bundle_all_models() nb_js = _loading_js(resources, element_id, custom_models_js, load_timeout, register_mime=True) jl_js = _loading_js(resources, element_id, custom_models_js, load_timeout, register_mime=False) if not hide_banner: <API key>({'text/html': html}) <API key>({ JS_MIME_TYPE : nb_js, LOAD_MIME_TYPE : jl_js }) def <API key>(*args, **kw): # This import MUST be deferred or it will introduce a hard dependency on IPython from IPython.display import <API key> return <API key>(*args, **kw) def show_app(app, state, notebook_url, port=0): ''' Embed a Bokeh serer application in a Jupyter Notebook output cell. Args: app (Application or callable) : A Bokeh Application to embed inline in a Jupyter notebook. state (State) : ** Unused ** notebook_url (str or callable) : The URL of the notebook server that is running the embedded app. If ``notebook_url`` is a string, the value string is parsed to construct the origin and full server URLs. If notebook_url is a callable, it must accept one parameter, which will be the server port, or None. If passed a port, the callable must generate the server URL, otherwise if passed None, it must generate the origin URL for the server. port (int) : A port for the embedded server will listen on. By default the port is 0, which results in the server listening on a random dynamic port. Returns: None ''' logging.basicConfig() from tornado.ioloop import IOLoop from ..server.server import Server loop = IOLoop.current() if callable(notebook_url): origin = notebook_url(None) else: origin = _origin_url(notebook_url) server = Server({"/": app}, io_loop=loop, port=port, <API key>=[origin]) server_id = uuid4().hex curstate().uuid_to_server[server_id] = server server.start() if callable(notebook_url): url = notebook_url(server.port) else: url = _server_url(notebook_url, server.port) logging.debug("Server URL is %s" % url) logging.debug("Origin URL is %s" % origin) from ..embed import server_document script = server_document(url, resources=None) <API key>({ HTML_MIME_TYPE: script, EXEC_MIME_TYPE: "" }, metadata={ EXEC_MIME_TYPE: {"server_id": server_id} }) def show_doc(obj, state, notebook_handle): from ..embed.notebook import notebook_content comms_target = make_id() if notebook_handle else None (script, div, cell_doc) = notebook_content(obj, comms_target) <API key>({HTML_MIME_TYPE: div}) <API key>({JS_MIME_TYPE: script, EXEC_MIME_TYPE: ""}, metadata={EXEC_MIME_TYPE: {"id": obj._id}}) # Comms handling relies on the fact that the cell_doc returned by # notebook copy has models with the same IDs as the original curdoc # they were copied from if comms_target: handle = CommsHandle(get_comms(comms_target), cell_doc) state.document.<API key>(handle) state.last_comms_handle = handle return handle # Private API _HOOKS = {} _NOTEBOOK_LOADED = None def _loading_js(resources, element_id, custom_models_js, load_timeout=5000, register_mime=True): from ..core.templates import AUTOLOAD_NB_JS return AUTOLOAD_NB_JS.render( elementid = element_id, js_urls = resources.js_files, css_urls = resources.css_files, js_raw = resources.js_raw + [custom_models_js], css_raw = resources.css_raw_str, force = True, timeout = load_timeout, register_mime = register_mime ) def _origin_url(url): if url.startswith("http"): url = url.split(" return url def _server_url(url, port): if url.startswith("http"): return '%s:%d%s' % (url.rsplit(':', 1)[0], port, "/") else: return 'http://%s:%d%s' % (url.split(':')[0], port, "/") # Code
#Stage 2 Update (Python 3) import datetime from django.test import TestCase from django.core.exceptions import <API key> from dynamic_scraper.utils.scheduler import Scheduler class SchedulerTest(TestCase): def <API key>(self): conf_dict_str = '\ "MIN_TIME" ---- 15,\n\ "MAX_TIME": 10080,\n\ "<API key>": 10,\n\ "<API key>": 20,\n\ "<API key>": 1.3,\n' self.assertRaises(<API key>, Scheduler, conf_dict_str) def <API key>(self): conf_dict_str = '\ "MIN_TIME": 15,\n\ "<API key>": 10,\n\ "<API key>": 20,\n\ "<API key>": 1.3,\n' self.assertRaises(<API key>, Scheduler, conf_dict_str) def <API key>(self): conf_dict_str = '\ "MIN_TIME": 15,\n\ "MAX_TIME": 10080,\n\ "<API key>": 10,\n\ "<API key>": 20,\n\ "<API key>": 1.3,\n' sched = Scheduler(conf_dict_str) # Successful action, not-initialized next action factor result = sched.<API key>(True, None, 0) self.assertEqual(result, (datetime.timedelta(minutes=115), 7.692, 0)) # Successful action result = sched.<API key>(True, 13, 9) self.assertEqual(result, (datetime.timedelta(minutes=150), 10, 0)) # Successful action, new time delta under min time result = sched.<API key>(True, 1, 9) self.assertEqual(result, (datetime.timedelta(minutes=15), 0.769, 0)) # Successful action, not-initialized next action factor result = sched.<API key>(False, None, 0) self.assertEqual(result, (datetime.timedelta(minutes=150), 10, 1)) # Unsuccessful action, no new action factor result = sched.<API key>(False, 10, 18) self.assertEqual(result, (datetime.timedelta(minutes=150), 10, 19)) # Unsuccessful action, new action factor result = sched.<API key>(False, 10, 19) self.assertEqual(result, (datetime.timedelta(minutes=195), 13, 0))
<?php // Protocol Buffers - Google's data interchange format // modification, are permitted provided that the following conditions are // met: // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** * RepeatedField and RepeatedFieldIter are used by generated protocol message * classes to manipulate repeated fields. */ namespace Google\Protobuf\Internal; use Google\Protobuf\Internal\GPBType; use Google\Protobuf\Internal\GPBUtil; /** * RepeatedFieldIter is used to iterate RepeatedField. It is also need for the * foreach syntax. */ class RepeatedFieldIter implements \Iterator { /** * @ignore */ private $position; /** * @ignore */ private $container; /** * Create iterator instance for RepeatedField. * * @param RepeatedField The RepeatedField instance for which this iterator * is created. * @ignore */ public function __construct($container) { $this->position = 0; $this->container = $container; } /** * Reset the status of the iterator * * @return void */ public function rewind() { $this->position = 0; } /** * Return the element at the current position. * * @return object The element at the current position. */ public function current() { return $this->container[$this->position]; } /** * Return the current position. * * @return integer The current position. */ public function key() { return $this->position; } /** * Move to the next position. * * @return void */ public function next() { ++$this->position; } /** * Check whether there are more elements to iterate. * * @return bool True if there are more elements to iterate. */ public function valid() { return isset($this->container[$this->position]); } } /** * RepeatedField is used by generated protocol message classes to manipulate * repeated fields. It can be used like native PHP array. */ class RepeatedField implements \ArrayAccess, \IteratorAggregate, \Countable { /** * @ignore */ private $container; /** * @ignore */ private $type; /** * @ignore */ private $klass; /** * Constructs an instance of RepeatedField. * * @param long $type Type of the stored element. * @param string $klass Message/Enum class name (message/enum fields only). * @ignore */ public function __construct($type, $klass = null) { $this->container = []; $this->type = $type; $this->klass = $klass; } /** * @ignore */ public function getType() { return $this->type; } /** * @ignore */ public function getClass() { return $this->klass; } /** * Return the element at the given index. * * This will also be called for: $ele = $arr[0] * * @param long $offset The index of the element to be fetched. * @return object The stored element at given index. * @throws ErrorException Invalid type for index. * @throws ErrorException Non-existing index. */ public function offsetGet($offset) { return $this->container[$offset]; } /** * Assign the element at the given index. * * This will also be called for: $arr []= $ele and $arr[0] = ele * * @param long $offset The index of the element to be assigned. * @param object $value The element to be assigned. * @return void * @throws ErrorException Invalid type for index. * @throws ErrorException Non-existing index. * @throws ErrorException Incorrect type of the element. */ public function offsetSet($offset, $value) { switch ($this->type) { case GPBType::INT32: GPBUtil::checkInt32($value); break; case GPBType::UINT32: GPBUtil::checkUint32($value); break; case GPBType::INT64: GPBUtil::checkInt64($value); break; case GPBType::UINT64: GPBUtil::checkUint64($value); break; case GPBType::FLOAT: GPBUtil::checkFloat($value); break; case GPBType::DOUBLE: GPBUtil::checkDouble($value); break; case GPBType::BOOL: GPBUtil::checkBool($value); break; case GPBType::STRING: GPBUtil::checkString($value, true); break; case GPBType::MESSAGE: GPBUtil::checkMessage($value, $this->klass); break; default: break; } if (is_null($offset)) { $this->container[] = $value; } else { $count = count($this->container); if (!is_numeric($offset) || $offset < 0 || $offset >= $count) { trigger_error( "Cannot modify element at the given index", E_USER_ERROR); return; } $this->container[$offset] = $value; } } /** * Remove the element at the given index. * * This will also be called for: unset($arr) * * @param long $offset The index of the element to be removed. * @return void * @throws ErrorException Invalid type for index. * @throws ErrorException The element to be removed is not at the end of the * RepeatedField. */ public function offsetUnset($offset) { $count = count($this->container); if (!is_numeric($offset) || $count === 0 || $offset !== $count - 1) { trigger_error( "Cannot remove element at the given index", E_USER_ERROR); return; } array_pop($this->container); } /** * Check the existence of the element at the given index. * * This will also be called for: isset($arr) * * @param long $offset The index of the element to be removed. * @return bool True if the element at the given offset exists. * @throws ErrorException Invalid type for index. */ public function offsetExists($offset) { return isset($this->container[$offset]); } /** * @ignore */ public function getIterator() { return new RepeatedFieldIter($this->container); } /** * Return the number of stored elements. * * This will also be called for: count($arr) * * @return integer The number of stored elements. */ public function count() { return count($this->container); } }
// modification, are permitted provided that the following conditions are // met: // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "src/core/lib/transport/mdstr_hash_table.h" #include <stdbool.h> #include <string.h> #include <grpc/support/alloc.h> #include <grpc/support/log.h> #include "src/core/lib/transport/metadata.h" struct <API key> { gpr_refcount refs; size_t num_entries; size_t size; <API key>* entries; }; // Helper function for insert and get operations that performs quadratic static size_t <API key>( const <API key>* table, const grpc_mdstr* key, bool find_empty) { for (size_t i = 0; i < table->size; ++i) { const size_t idx = (key->hash + i * i) % table->size; if (table->entries[idx].key == NULL) return find_empty ? idx : table->size; if (table->entries[idx].key == key) return idx; } return table->size; // Not found. } static void <API key>( <API key>* table, grpc_mdstr* key, void* value, const <API key>* vtable) { GPR_ASSERT(value != NULL); const size_t idx = <API key>(table, key, true /* find_empty */); GPR_ASSERT(idx != table->size); // Table should never be full. <API key>* entry = &table->entries[idx]; entry->key = GRPC_MDSTR_REF(key); entry->value = vtable->copy_value(value); entry->vtable = vtable; } <API key>* <API key>( size_t num_entries, <API key>* entries) { <API key>* table = gpr_malloc(sizeof(*table)); memset(table, 0, sizeof(*table)); gpr_ref_init(&table->refs, 1); table->num_entries = num_entries; // Quadratic probing gets best performance when the table is no more // than half full. table->size = num_entries * 2; const size_t entry_size = sizeof(<API key>) * table->size; table->entries = gpr_malloc(entry_size); memset(table->entries, 0, entry_size); for (size_t i = 0; i < num_entries; ++i) { <API key>* entry = &entries[i]; <API key>(table, entry->key, entry->value, entry->vtable); } return table; } <API key>* <API key>(<API key>* table) { if (table != NULL) gpr_ref(&table->refs); return table; } int <API key>(<API key>* table) { if (table != NULL && gpr_unref(&table->refs)) { for (size_t i = 0; i < table->size; ++i) { <API key>* entry = &table->entries[i]; if (entry->key != NULL) { GRPC_MDSTR_UNREF(entry->key); entry->vtable->destroy_value(entry->value); } } gpr_free(table->entries); gpr_free(table); return 1; } return 0; } size_t <API key>(const <API key>* table) { return table->num_entries; } void* <API key>(const <API key>* table, const grpc_mdstr* key) { const size_t idx = <API key>(table, key, false /* find_empty */); if (idx == table->size) return NULL; // Not found. return table->entries[idx].value; } int <API key>(const <API key>* table1, const <API key>* table2) { // Compare by num_entries. if (table1->num_entries < table2->num_entries) return -1; if (table1->num_entries > table2->num_entries) return 1; for (size_t i = 0; i < table1->num_entries; ++i) { <API key>* e1 = &table1->entries[i]; <API key>* e2 = &table2->entries[i]; // Compare keys by hash value. if (e1->key->hash < e2->key->hash) return -1; if (e1->key->hash > e2->key->hash) return 1; // Compare by vtable (pointer equality). if (e1->vtable < e2->vtable) return -1; if (e1->vtable > e2->vtable) return 1; // Compare values via vtable. const int value_result = e1->vtable->compare_value(e1->value, e2->value); if (value_result != 0) return value_result; } return 0; } void <API key>( const <API key>* table, void (*func)(const <API key>* entry, void* user_data), void* user_data) { for (size_t i = 0; i < table->size; ++i) { if (table->entries[i].key != NULL) { func(&table->entries[i], user_data); } } }
#include "chrome/browser/ui/views/tab_contents/<API key>.h" #include "chrome/browser/ui/view_ids.h" #include "chrome/browser/ui/views/tab_contents/<API key>.h" #include "chrome/browser/ui/views/tab_contents/<API key>.h" #include "content/public/browser/web_contents.h" #include "ui/aura/window.h" #include "ui/base/accessibility/<API key>.h" #include "ui/views/focus/focus_manager.h" #include "ui/views/focus/<API key>.h" #include "ui/views/views_delegate.h" using content::RenderViewHost; using content::WebContents; // <API key>, public: <API key>::<API key>( <API key>* container) : container_(container) { set_id(<API key>); } <API key>::~<API key>() { } // <API key>, <API key> overrides: void <API key>::AttachContents(WebContents* contents) { // We need to register the tab contents window with the BrowserContainer so // that the BrowserContainer is the focused view when the focus is on the // TabContents window (for the TabContents case). set_focus_view(this); Attach(contents->GetNativeView()); } void <API key>::DetachContents(WebContents* contents) { Detach(); } void <API key>::SetFastResize(bool fast_resize) { set_fast_resize(fast_resize); } bool <API key>::GetFastResize() const { return fast_resize(); } bool <API key>::<API key>() const { return <API key>(); } void <API key>::<API key>( RenderViewHost* old_host, RenderViewHost* new_host) { // If we are focused, we need to pass the focus to the new RenderViewHost. if (GetFocusManager()->GetFocusedView() == this) OnFocus(); } views::View* <API key>::GetView() { return this; } void <API key>::WebContentsFocused(WebContents* contents) { views::FocusManager* focus_manager = GetFocusManager(); if (!focus_manager) { NOTREACHED(); return; } focus_manager->SetFocusedView(this); } // <API key>, views::View overrides: bool <API key>::<API key>( const views::KeyEvent& e) { // Don't look-up accelerators or tab-traversal if we are showing a non-crashed // TabContents. // We'll first give the page a chance to process the key events. If it does // not process them, they'll be returned to us and we'll treat them as // accelerators then. return container_->web_contents() && !container_->web_contents()->IsCrashed(); } bool <API key>::IsFocusable() const { // We need to be focusable when our contents is not a view hierarchy, as // clicking on the contents needs to focus us. return container_->web_contents() != NULL; } void <API key>::OnFocus() { if (container_->web_contents()) container_->web_contents()->Focus(); } void <API key>::RequestFocus() { // This is a hack to circumvent the fact that a the OnFocus() method is not // invoked when RequestFocus() is called on an already focused view. // The <API key> is the view focused when the TabContents has // focus. When switching between from one tab that has focus to another tab // that should also have focus, RequestFocus() is invoked one the // <API key>. In order to make sure OnFocus() is invoked we need // to clear the focus before hands. if (GetFocusManager()) { // Disable notifications. Clear focus will assign the focus to the main // browser window. Because this change of focus was not user requested, // don't send it to listeners. views::<API key> <API key>; GetFocusManager()->ClearFocus(); } View::RequestFocus(); } void <API key>::<API key>( bool reverse) { container_->web_contents()-><API key>(reverse); } void <API key>::GetAccessibleState( ui::AccessibleViewState* state) { state->role = ui::AccessibilityTypes::ROLE_GROUPING; } gfx::<API key> <API key>::<API key>() { // TODO(beng): NOTIMPLEMENTED(); return View::<API key>(); } // <API key>, public: // static <API key>* <API key>::<API key>( <API key>* container) { return new <API key>(container); }
"""Write back all data it receives.""" import sys data = sys.stdin.read(1) while data: sys.stdout.write(data) sys.stdout.flush() data = sys.stdin.read(1) sys.stderr.write("byebye") sys.stderr.flush()
#!/usr/bin/env python from dingus import Dingus, DingusTestCase, DontCare import nose.tools as nose_tools import sys import socket from stompy import simple from stompy.simple import Client, TransactionError class <API key>(DingusTestCase(Client, exclude=['TransactionError', 'Empty'])): def setup(self): super(<API key>, self).setup() self.client = Client() def should_connect(self): self.client.connect() assert self.client.stomp.calls('connect') def should_disconnect(self): self.client.disconnect() assert self.client.stomp.calls('disconnect') def should_subscribe(self): self.client.subscribe('/queue/nose_test') print self.client.stomp.calls assert self.client.stomp.calls('subscribe', {'ack': 'auto', 'destination': '/queue/nose_test'}) def should_unsubscribe(self): self.client.unsubscribe('/queue/nose_test') assert self.client.stomp.calls('unsubscribe', {'destination': '/queue/nose_test'}) def <API key>(self): self.client.begin('bah') assert self.client.stomp.calls('begin', {"transaction": self.client.<API key>}) def <API key>(self): self.client.<API key> = "meh" nose_tools.assert_raises(TransactionError, self.client.begin, 'bah') def <API key>(self): self.client.<API key> = 'meh' self.client.commit('bah') assert self.client.stomp.calls('commit', {'transaction': 'meh'}) def <API key>(self): nose_tools.assert_raises(TransactionError, self.client.commit, 'bah') def <API key>(self): self.client.<API key> = 'meh' self.client.abort() assert self.client.stomp.calls('abort', {'transaction': 'meh'}) def <API key>(self): nose_tools.assert_raises(TransactionError, self.client.abort) def should_ack_message(self): self.client.ack("fake_frame") assert self.client.stomp.calls('ack', "fake_frame") def should_make_conf(self): conf = self.client._make_conf(None, destination='/queue/nose_test', ack='auto') assert isinstance(conf, type({})) def <API key>(self): self.client.<API key> = 'meh' conf = self.client._make_conf({}, destination='/queue/nose_test', ack='auto') assert isinstance(conf, type({})) def <API key>(self): self.client.put('bah', '/queue/nose_test') conf = self.client._make_conf(None, body='bah', destination='/queue/nose_test', persistent='true') assert self.client.stomp.calls('send', conf) def should_get_message(self): self.client.get() assert self.client.stomp.calls('receive_frame', nonblocking=False, callback=None) def <API key>(self): self.client.get_nowait() assert self.client.stomp.calls('receive_frame', nonblocking=True, callback=None) def <API key>(self): self.client.stomp.receive_frame.return_value = None nose_tools.assert_raises(self.client.Empty, self.client.get, block=False)
<?php use yii\helpers\Html; use yii\widgets\ActiveForm; use app\commands\MainFunctions; /* @var $this yii\web\View */ /* @var $model common\models\StageRepairPart */ /* @var $form yii\widgets\ActiveForm */ ?> <div class="<API key>"> <?php $form = ActiveForm::begin(); ?> <?= $form->field($model, 'repairPartUuid')->textInput(['maxlength' => true]) ?> <?= $form->field($model, 'stagePatternUuid')->textInput(['maxlength' => true]) ?> <div class="form-group"> <?= Html::submitButton($model->isNewRecord ? Yii::t('app', 'Создать') : Yii::t('app', 'Обновить'), [ 'class' => $model->isNewRecord ? 'btn btn-success' : 'btn btn-primary' ]) ?> </div> <?php ActiveForm::end(); ?> </div>
from __future__ import unicode_literals import copy from dateutil.relativedelta import relativedelta import six from dash.utils import get_month_range from django import forms from django.forms.forms import <API key> from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from . import fields as filter_fields from . import utils class FilterForm(forms.Form): def __init__(self, *args, **kwargs): self.org = kwargs.pop('org') super(FilterForm, self).__init__(*args, **kwargs) # Create a shallow copy of the data to ensure that it is # mutable. Some filters need the ability to overwrite the # data that was passed in. if self.data is not None: self.data = copy.copy(self.data) class Filter(six.with_metaclass(<API key>, object)): # The metaclass is what does the work to set up fields # that are declared as attributes of the class. pass class DateRangeFilter(Filter): DATE_WINDOW_CHOICES = ( ('', ''), ('month', _("Current month")), ('30-days', _("Last 30 days")), ('60-days', _("Last 60 days")), ('90-days', _("Last 90 days")), ('6-months', _("Last 6 months")), ('12-months', _("Last 12 months")), ('custom', _("Custom range...")), ) date_range = forms.ChoiceField( label=_("Date range"), choices=DATE_WINDOW_CHOICES) start_date = filter_fields.FilterDateField( label=_("Start date"), required=False) end_date = filter_fields.FilterDateField( label=_("End date"), required=False) def clean(self): self.cleaned_data = super(DateRangeFilter, self).clean() window = self.cleaned_data.get('date_range') if window == 'custom': # Only apply additional checks if data did not have errors. if 'start_date' not in self.errors and 'end_date' not in self.errors: start_date = self.cleaned_data.get('start_date') end_date = self.cleaned_data.get('end_date') # Require at least one date filter. if not start_date and not end_date: self.add_error( forms.ALL_FIELDS, _("Please choose a start date or an end date.")) # Ensure date filter order makes sense. elif (start_date and end_date) and start_date > end_date: self.add_error( 'end_date', _("End date must be after start date.")) # Set default values for start date and end date. else: self.cleaned_data.setdefault('start_date', None) self.cleaned_data.setdefault('end_date', None) self.data.setdefault('start_date', None) self.data.setdefault('end_date', None) else: # Throw out user-submitted dates. self.cleaned_data.pop('start_date', None) self.cleaned_data.pop('end_date', None) self.data.pop('start_date', None) self.data.pop('end_date', None) self._errors.pop('start_date', None) self._errors.pop('end_date', None) # Calculate the correct date window. if window: if window == 'month': # get_month_range() a tuple with datetimes representing # midnight of the first day of the current month, and # midnight of the first day of the following month. start_date, end_date = get_month_range() # Show the user the last day of the month, # e.g., show June 1 to June 30 rather than June 1 to July 1. end_date = end_date - relativedelta(days=1) else: number, unit = window.split('-') # e.g., 6-months end_date = utils.midnight(timezone.now()) start_date = end_date - relativedelta(**{unit: int(number)}) self.cleaned_data['start_date'] = start_date self.cleaned_data['end_date'] = end_date self.data['start_date'] = start_date self.data['end_date'] = end_date # Pad the end_date by one day so that results for all times during # the end_date are accounted for in the query. end_date = self.cleaned_data.get('end_date') if end_date is not None: self.cleaned_data['end_date'] = end_date + relativedelta(days=1) return self.cleaned_data class DataFieldFilter(Filter): def __init__(self, *args, **kwargs): super(DataFieldFilter, self).__init__(*args, **kwargs) self.contact_fields = [] for data_field in self.org.datafield_set.visible(): field_name = 'contact_{}'.format(data_field.key) self.contact_fields.append((field_name, data_field)) self.fields[field_name] = forms.CharField( label='Contact: {}'.format(data_field.display_name), required=False) def filter_contacts(self, queryset=None): """Filter queryset to match all contact field search input.""" contacts = queryset if queryset is not None else self.org.contacts.all() for name, data_field in self.contact_fields: value = self.cleaned_data.get(name) if value: contacts = contacts.filter( contactfield__field=data_field, <API key>=value) return contacts
#ifndef __JUNZIP_H #define __JUNZIP_H #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ #include <stdint.h> // If you don't have stdint.h, the following two lines should work for most 32/64 bit systems // typedef unsigned int uint32_t; // typedef unsigned short uint16_t; typedef struct JZFile JZFile; struct JZFile { unsigned char *start; off_t length; long position; int numEntries; uint32_t <API key>; }; #define zf_tell(ZF) ((ZF)->position) #define zf_available(ZF) ((ZF)->length - (ZF)->position) #define zf_current(ZF) ((ZF)->start + (ZF)->position) #define <API key> 30 typedef struct { uint16_t compressionMethod; uint32_t crc32; uint32_t compressedSize; uint32_t uncompressedSize; long fileNameStart; uint16_t fileNameLength; uint16_t extraFieldLength; // unsupported uint32_t offset; } JZFileHeader; // Callback prototype for central and local file record reading functions typedef int (*JZRecordCallback)(JZFile *zip, int index, JZFileHeader *header); // Read ZIP file end record. Will move within file. int jzReadEndRecord(JZFile *zip); // Read ZIP file global directory. Will move within file. // Callback is called for each record, until callback returns zero int <API key>(JZFile *zip, JZRecordCallback callback); // See to the start of the actual data of the given entry. int jzSeekData(JZFile *zip, JZFileHeader *header); // Read data from file stream, described by header, to preallocated buffer // Return value is zlib coded, e.g. Z_OK, or error code int jzReadData(JZFile *zip, JZFileHeader *header, void *buffer); #ifdef __cplusplus }; #endif /* __cplusplus */ #endif
/* TEMPLATE GENERATED TESTCASE FILE Filename: <API key>.cpp Label Definition File: <API key>.pointer.label.xml Template File: sources-sink-67b.tmpl.cpp */ /* * @description * CWE: 590 Free Memory Not on Heap * BadSource: placement_new Data buffer is declared on the stack * GoodSource: Allocate memory on the heap * Sinks: * BadSink : Print then free data * Flow Variant: 67 Data flow: data passed in a struct from one function to another in different source files * * */ #include "std_testcase.h" #include <wchar.h> namespace <API key> { typedef struct _structType { int * structFirst; } structType; #ifndef OMITBAD void badSink(structType myStruct) { int * data = myStruct.structFirst; printIntLine(*data); /* POTENTIAL FLAW: Possibly deallocating memory allocated on the stack */ delete data; } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ void goodG2BSink(structType myStruct) { int * data = myStruct.structFirst; printIntLine(*data); /* POTENTIAL FLAW: Possibly deallocating memory allocated on the stack */ delete data; } #endif /* OMITGOOD */ } /* close namespace */
{% extends "speakers/base.html" %} {% load timezone_filters %} {% load thumbnail %} {% block body_class %}full schedule{% endblock %} {% block content %} <h1>Speaker Profile</h1> <h2>{{ speaker.name }}</h2> {% if speaker.photo %} {% thumbnail speaker.photo "128" as speakerphoto %} <div class="photo"><img src="{{ speakerphoto.url }}" width="128" height="128" alt="{{ speaker.name }}" /></div> {% endthumbnail %} {% endif %} <div class="bio">{{ speaker.biography|safe }}</div> {% if sessions %} <h3>Presentations</h3> <dl class="sessions"> {% for session in sessions %} <dt>{{ session.slot.start|localtime:timezone|date:"F jS" }} {{ session.slot.start|localtime:timezone|date:"P" }} &ndash; {{ session.slot.end|localtime:timezone|date:"P" }}</dt> <dd><a href="{% url <API key> session.id %}">{{ session.title }}</a></dd> {% endfor %} </dl> {% endif %} {% endblock %}
package org.javacc.parser; import java.io.*; import java.util.ArrayList; import java.util.Enumeration; import java.util.HashMap; import java.util.Hashtable; import java.util.Iterator; import java.util.List; import java.util.Map; import org.javacc.utils.JavaFileGenerator; import static org.javacc.parser.JavaCCGlobals.*; /** * Generate lexer. */ public class LexGenCPP extends LexGen //CodeGenerator implements <API key> { @Override void PrintClassHead() { int i, j; List tn = new ArrayList(toolNames); tn.add(toolName); switchToStaticsFile(); //standard includes switchToIncludeFile(); genCodeLine("#include \"JavaCC.h\""); genCodeLine("#include \"CharStream.h\""); genCodeLine("#include \"Token.h\""); genCodeLine("#include \"ErrorHandler.h\""); genCodeLine("#include \"TokenManager.h\""); genCodeLine("#include \"" + cu_name + "Constants.h\""); if (Options.stringValue(Options.USEROPTION__CPP_<API key>).length() > 0) { genCodeLine("#include \"" + Options.stringValue(Options.USEROPTION__CPP_<API key>) + "\"\n"); } if (Options.stringValue(Options.<API key>).length() > 0) { genCodeLine("namespace " + Options.stringValue("NAMESPACE_OPEN")); } genCodeLine("class " + cu_name + ";"); int l = 0, kind; i = 1; /* namespace? for (;;) { if (<API key>.size() <= l) break; kind = ((Token)<API key>.get(l)).kind; if(kind == PACKAGE || kind == IMPORT) { for (; i < <API key>.size(); i++) { kind = ((Token)<API key>.get(i)).kind; if (kind == CLASS) { cline = ((Token)(<API key>.get(l))).beginLine; ccol = ((Token)(<API key>.get(l))).beginColumn; for (j = l; j < i; j++) { printToken((Token)(<API key>.get(j))); } if (kind == SEMICOLON) printToken((Token)(<API key>.get(j))); genCodeLine(""); break; } } l = ++i; } else break; }*/ genCodeLine(""); genCodeLine("/** Token Manager. */"); String superClass = Options.stringValue(Options.USEROPTION__<API key>); genClassStart(null, tokMgrClassName, new String[]{}, new String[]{"public TokenManager" + (superClass == null ? "" : ", public " + superClass) }); if (token_mgr_decls != null && token_mgr_decls.size() > 0) { Token t = (Token)token_mgr_decls.get(0); boolean <API key> = false; boolean <API key> = Options.<API key>(); printTokenSetup((Token)token_mgr_decls.get(0)); ccol = 1; for (j = 0; j < token_mgr_decls.size(); j++) { t = (Token)token_mgr_decls.get(j); if (t.kind == IDENTIFIER && <API key> && !<API key>) <API key> = t.image.equals("CommonTokenAction"); printToken(t); } genCodeLine(""); if (<API key> && !<API key>) JavaCCErrors.warning("You have the COMMON_TOKEN_ACTION option set. " + "But it appears you have not defined the method :\n"+ " " + staticString + "void CommonTokenAction(Token *t)\n" + "in your TOKEN_MGR_DECLS. The generated token manager will not compile."); } else if (Options.<API key>()) { JavaCCErrors.warning("You have the COMMON_TOKEN_ACTION option set. " + "But you have not defined the method :\n"+ " " + staticString + "void CommonTokenAction(Token *t)\n" + "in your TOKEN_MGR_DECLS. The generated token manager will not compile."); } genCodeLine(""); genCodeLine(" /** Debug output. */"); genCodeLine(" FILE *debugStream;"); genCodeLine(" /** Set debug output. */"); <API key>("void ", tokMgrClassName, "setDebugStream(FILE *ds)"); genCodeLine("{ debugStream = ds; }"); switchToIncludeFile(); if(Options.get<API key>() && !Options.getStatic()){ genCodeLine(""); genCodeLine(" /** The parser. */"); genCodeLine(" public: " + cu_name + " parser = NULL;"); } switchToMainFile(); } void DumpDebugMethods() throws IOException { writeTemplate("/templates/cpp/DumpDebugMethods.template", "maxOrdinal", maxOrdinal, "stateSetSize", stateSetSize); } static void BuildLexStatesTable() { Iterator it = rexprlist.iterator(); TokenProduction tp; int i; String[] tmpLexStateName = new String[lexstate_I2S.size()]; while (it.hasNext()) { tp = (TokenProduction)it.next(); List respecs = tp.respecs; List tps; for (i = 0; i < tp.lexStates.length; i++) { if ((tps = (List)allTpsForState.get(tp.lexStates[i])) == null) { tmpLexStateName[maxLexStates++] = tp.lexStates[i]; allTpsForState.put(tp.lexStates[i], tps = new ArrayList()); } tps.add(tp); } if (respecs == null || respecs.size() == 0) continue; RegularExpression re; for (i = 0; i < respecs.size(); i++) if (maxOrdinal <= (re = ((RegExprSpec)respecs.get(i)).rexp).ordinal) maxOrdinal = re.ordinal + 1; } kinds = new int[maxOrdinal]; toSkip = new long[maxOrdinal / 64 + 1]; toSpecial = new long[maxOrdinal / 64 + 1]; toMore = new long[maxOrdinal / 64 + 1]; toToken = new long[maxOrdinal / 64 + 1]; toToken[0] = 1L; actions = new Action[maxOrdinal]; actions[0] = actForEof; hasTokenActions = actForEof != null; initStates = new Hashtable(); canMatchAnyChar = new int[maxLexStates]; canLoop = new boolean[maxLexStates]; stateHasActions = new boolean[maxLexStates]; lexStateName = new String[maxLexStates]; singlesToSkip = new NfaState[maxLexStates]; System.arraycopy(tmpLexStateName, 0, lexStateName, 0, maxLexStates); for (i = 0; i < maxLexStates; i++) canMatchAnyChar[i] = -1; hasNfa = new boolean[maxLexStates]; mixed = new boolean[maxLexStates]; maxLongsReqd = new int[maxLexStates]; initMatch = new int[maxLexStates]; newLexState = new String[maxOrdinal]; newLexState[0] = nextStateForEof; hasEmptyMatch = false; lexStates = new int[maxOrdinal]; ignoreCase = new boolean[maxOrdinal]; rexprs = new RegularExpression[maxOrdinal]; RStringLiteral.allImages = new String[maxOrdinal]; canReachOnMore = new boolean[maxLexStates]; } static int GetIndex(String name) { for (int i = 0; i < lexStateName.length; i++) if (lexStateName[i] != null && lexStateName[i].equals(name)) return i; throw new Error(); // Should never come here } public static void AddCharToSkip(char c, int kind) { singlesToSkip[lexStateIndex].AddChar(c); singlesToSkip[lexStateIndex].kind = kind; } public void start() throws IOException { if (!Options.<API key>() || Options.getUserTokenManager() || JavaCCErrors.get_error_count() > 0) return; keepLineCol = Options.getKeepLineColumn(); List choices = new ArrayList(); Enumeration e; TokenProduction tp; int i, j; staticString = (Options.getStatic() ? "static " : ""); tokMgrClassName = cu_name + "TokenManager"; PrintClassHead(); BuildLexStatesTable(); e = allTpsForState.keys(); boolean ignoring = false; while (e.hasMoreElements()) { NfaState.ReInit(); RStringLiteral.ReInit(); String key = (String)e.nextElement(); lexStateIndex = GetIndex(key); lexStateSuffix = "_" + lexStateIndex; List allTps = (List)allTpsForState.get(key); initStates.put(key, initialState = new NfaState()); ignoring = false; singlesToSkip[lexStateIndex] = new NfaState(); singlesToSkip[lexStateIndex].dummy = true; if (key.equals("DEFAULT")) defaultLexState = lexStateIndex; for (i = 0; i < allTps.size(); i++) { tp = (TokenProduction)allTps.get(i); int kind = tp.kind; boolean ignore = tp.ignoreCase; List rexps = tp.respecs; if (i == 0) ignoring = ignore; for (j = 0; j < rexps.size(); j++) { RegExprSpec respec = (RegExprSpec)rexps.get(j); curRE = respec.rexp; rexprs[curKind = curRE.ordinal] = curRE; lexStates[curRE.ordinal] = lexStateIndex; ignoreCase[curRE.ordinal] = ignore; if (curRE.private_rexp) { kinds[curRE.ordinal] = -1; continue; } if (curRE instanceof RStringLiteral && !((RStringLiteral)curRE).image.equals("")) { ((RStringLiteral)curRE).GenerateDfa(this, curRE.ordinal); if (i != 0 && !mixed[lexStateIndex] && ignoring != ignore) mixed[lexStateIndex] = true; } else if (curRE.CanMatchAnyChar()) { if (canMatchAnyChar[lexStateIndex] == -1 || canMatchAnyChar[lexStateIndex] > curRE.ordinal) canMatchAnyChar[lexStateIndex] = curRE.ordinal; } else { Nfa temp; if (curRE instanceof RChoice) choices.add(curRE); temp = curRE.GenerateNfa(ignore); temp.end.isFinal = true; temp.end.kind = curRE.ordinal; initialState.AddMove(temp.start); } if (kinds.length < curRE.ordinal) { int[] tmp = new int[curRE.ordinal + 1]; System.arraycopy(kinds, 0, tmp, 0, kinds.length); kinds = tmp; } //System.out.println(" ordina : " + curRE.ordinal); kinds[curRE.ordinal] = kind; if (respec.nextState != null && !respec.nextState.equals(lexStateName[lexStateIndex])) newLexState[curRE.ordinal] = respec.nextState; if (respec.act != null && respec.act.getActionTokens() != null && respec.act.getActionTokens().size() > 0) actions[curRE.ordinal] = respec.act; switch(kind) { case TokenProduction.SPECIAL : hasSkipActions |= (actions[curRE.ordinal] != null) || (newLexState[curRE.ordinal] != null); hasSpecial = true; toSpecial[curRE.ordinal / 64] |= 1L << (curRE.ordinal % 64); toSkip[curRE.ordinal / 64] |= 1L << (curRE.ordinal % 64); break; case TokenProduction.SKIP : hasSkipActions |= (actions[curRE.ordinal] != null); hasSkip = true; toSkip[curRE.ordinal / 64] |= 1L << (curRE.ordinal % 64); break; case TokenProduction.MORE : hasMoreActions |= (actions[curRE.ordinal] != null); hasMore = true; toMore[curRE.ordinal / 64] |= 1L << (curRE.ordinal % 64); if (newLexState[curRE.ordinal] != null) canReachOnMore[GetIndex(newLexState[curRE.ordinal])] = true; else canReachOnMore[lexStateIndex] = true; break; case TokenProduction.TOKEN : hasTokenActions |= (actions[curRE.ordinal] != null); toToken[curRE.ordinal / 64] |= 1L << (curRE.ordinal % 64); break; } } } // Generate a static block for initializing the nfa transitions NfaState.ComputeClosures(); for (i = 0; i < initialState.epsilonMoves.size(); i++) ((NfaState)initialState.epsilonMoves.elementAt(i)).GenerateCode(); if (hasNfa[lexStateIndex] = (NfaState.generatedStates != 0)) { initialState.GenerateCode(); initialState.GenerateInitMoves(this); } if (initialState.kind != Integer.MAX_VALUE && initialState.kind != 0) { if ((toSkip[initialState.kind / 64] & (1L << initialState.kind)) != 0L || (toSpecial[initialState.kind / 64] & (1L << initialState.kind)) != 0L) hasSkipActions = true; else if ((toMore[initialState.kind / 64] & (1L << initialState.kind)) != 0L) hasMoreActions = true; else hasTokenActions = true; if (initMatch[lexStateIndex] == 0 || initMatch[lexStateIndex] > initialState.kind) { initMatch[lexStateIndex] = initialState.kind; hasEmptyMatch = true; } } else if (initMatch[lexStateIndex] == 0) initMatch[lexStateIndex] = Integer.MAX_VALUE; RStringLiteral.FillSubString(); if (hasNfa[lexStateIndex] && !mixed[lexStateIndex]) RStringLiteral.<API key>(this, initialState); RStringLiteral.DumpDfaCode(this); if (hasNfa[lexStateIndex]) NfaState.DumpMoveNfa(this); if (stateSetSize < NfaState.generatedStates) stateSetSize = NfaState.generatedStates; } for (i = 0; i < choices.size(); i++) ((RChoice)choices.get(i)).CheckUnmatchability(); NfaState.DumpStateSets(this); <API key>(); NfaState.<API key>(this); RStringLiteral.<API key>(this); DumpFillToken(); DumpGetNextToken(); if (Options.<API key>()) { NfaState.DumpStatesForKind(this); DumpDebugMethods(); } if (hasLoop) { switchToStaticsFile(); genCodeLine("static int jjemptyLineNo[" + maxLexStates + "];"); genCodeLine("static int jjemptyColNo[" + maxLexStates + "];"); genCodeLine("static bool jjbeenHere[" + maxLexStates + "];"); switchToMainFile(); } if (hasSkipActions) DumpSkipActions(); if (hasMoreActions) DumpMoreActions(); if (hasTokenActions) DumpTokenActions(); NfaState.PrintBoilerPlateCPP(this); String charStreamName; if (Options.getUserCharStream()) charStreamName = "CharStream"; else { if (Options.<API key>()) charStreamName = "JavaCharStream"; else charStreamName = "SimpleCharStream"; } writeTemplate("/templates/cpp/<API key>.template", "charStreamName", "CharStream", "parserClassName", cu_name, "defaultLexState", "defaultLexState", "lexStateNameLength", lexStateName.length); <API key>(); // in the include file close the class signature <API key>(); // static vars actually inst switchToIncludeFile(); // remaining variables writeTemplate("/templates/cpp/DumpVarDeclarations.template", "charStreamName", "CharStream", "lexStateNameLength", lexStateName.length); genCodeLine( "};"); switchToStaticsFile(); // TODO :: CBA -- Require Unification of output language specific processing into a single Enum class String fileName = Options.getOutputDirectory() + File.separator + tokMgrClassName + getFileExtension(Options.getOutputLanguage()); saveOutput(fileName); } private void <API key>() { switchToIncludeFile(); genCodeLine("#ifndef JAVACC_CHARSTREAM"); genCodeLine("#define JAVACC_CHARSTREAM CharStream"); genCodeLine("#endif"); genCodeLine(" private: " + cu_name + "*parser;"); genCodeLine(" private: void ReInitRounds();"); genCodeLine(" public: " + tokMgrClassName + "(JAVACC_CHARSTREAM *stream, int lexState = " + defaultLexState + ", " + cu_name + " *parserArg = NULL);"); genCodeLine(" public: virtual ~" + tokMgrClassName + "();"); genCodeLine(" void ReInit(JAVACC_CHARSTREAM *stream, int lexState = " + defaultLexState + ", " + cu_name + " *parserArg = NULL);"); genCodeLine(" void SwitchTo(int lexState);"); genCodeLine(" const <API key> jjKindsForBitVector(int i, " + Options.getLongType() + " vec);"); genCodeLine(" const <API key> <API key>(int lexState, int vec[], int start, int end);"); } private void <API key>() throws IOException { int i; switchToStaticsFile(); // remaining variables genCodeLine(""); genCodeLine("/** Lexer state names. */"); <API key>("lexStateNames", lexStateName); if (maxLexStates > 1) { genCodeLine(""); genCodeLine("/** Lex State array. */"); genCode("static const int jjnewLexState[] = {"); for (i = 0; i < maxOrdinal; i++) { if (i % 25 == 0) genCode("\n "); if (newLexState[i] == null) genCode("-1, "); else genCode(GetIndex(newLexState[i]) + ", "); } genCodeLine("\n};"); } if (hasSkip || hasMore || hasSpecial) { // Bit vector for TOKEN genCode("static const " + Options.getLongType() + " jjtoToken[] = {"); for (i = 0; i < maxOrdinal / 64 + 1; i++) { if (i % 4 == 0) genCode("\n "); genCode("0x" + Long.toHexString(toToken[i]) + "L, "); } genCodeLine("\n};"); } if (hasSkip || hasSpecial) { // Bit vector for SKIP genCode("static const " + Options.getLongType() + " jjtoSkip[] = {"); for (i = 0; i < maxOrdinal / 64 + 1; i++) { if (i % 4 == 0) genCode("\n "); genCode("0x" + Long.toHexString(toSkip[i]) + "L, "); } genCodeLine("\n};"); } if (hasSpecial) { // Bit vector for SPECIAL genCode("static const " + Options.getLongType() + " jjtoSpecial[] = {"); for (i = 0; i < maxOrdinal / 64 + 1; i++) { if (i % 4 == 0) genCode("\n "); genCode("0x" + Long.toHexString(toSpecial[i]) + "L, "); } genCodeLine("\n};"); } /*if (hasMore) // Not needed as we just use else { // Bit vector for MORE genCode("static const " + Options.getLongType() + " jjtoMore[] = {"); for (i = 0; i < maxOrdinal / 64 + 1; i++) { if (i % 4 == 0) genCode("\n "); genCode("0x" + Long.toHexString(toMore[i]) + "L, "); } genCodeLine("\n};"); }*/ } void DumpFillToken() { final double tokenVersion = JavaFiles.getVersion("Token.java"); final boolean hasBinaryNewToken = tokenVersion > 4.09; <API key>("Token *", tokMgrClassName, "jjFillToken()"); genCodeLine("{"); genCodeLine(" Token *t;"); genCodeLine(" JAVACC_STRING_TYPE curTokenImage;"); if (keepLineCol) { genCodeLine(" int beginLine;"); genCodeLine(" int endLine;"); genCodeLine(" int beginColumn;"); genCodeLine(" int endColumn;"); } if (hasEmptyMatch) { genCodeLine(" if (jjmatchedPos < 0)"); genCodeLine(" {"); genCodeLine(" curTokenImage = image.c_str();"); if (keepLineCol) { genCodeLine(" if (input_stream->getTrackLineColumn()) {"); genCodeLine(" beginLine = endLine = input_stream->getEndLine();"); genCodeLine(" beginColumn = endColumn = input_stream->getEndColumn();"); genCodeLine(" }"); } genCodeLine(" }"); genCodeLine(" else"); genCodeLine(" {"); genCodeLine(" JAVACC_STRING_TYPE im = jjstrLiteralImages[jjmatchedKind];"); genCodeLine(" curTokenImage = (im.length() == 0) ? input_stream->GetImage() : im;"); if (keepLineCol) { genCodeLine(" if (input_stream->getTrackLineColumn()) {"); genCodeLine(" beginLine = input_stream->getBeginLine();"); genCodeLine(" beginColumn = input_stream->getBeginColumn();"); genCodeLine(" endLine = input_stream->getEndLine();"); genCodeLine(" endColumn = input_stream->getEndColumn();"); genCodeLine(" }"); } genCodeLine(" }"); } else { genCodeLine(" JAVACC_STRING_TYPE im = jjstrLiteralImages[jjmatchedKind];"); genCodeLine(" curTokenImage = (im.length() == 0) ? input_stream->GetImage() : im;"); if (keepLineCol) { genCodeLine(" if (input_stream->getTrackLineColumn()) {"); genCodeLine(" beginLine = input_stream->getBeginLine();"); genCodeLine(" beginColumn = input_stream->getBeginColumn();"); genCodeLine(" endLine = input_stream->getEndLine();"); genCodeLine(" endColumn = input_stream->getEndColumn();"); genCodeLine(" }"); } } if (Options.getTokenFactory().length() > 0) { genCodeLine(" t = " + getClassQualifier(Options.getTokenFactory()) + "newToken(jjmatchedKind, curTokenImage);"); } else if (hasBinaryNewToken) { genCodeLine(" t = " + getClassQualifier("Token") + "newToken(jjmatchedKind, curTokenImage);"); } else { genCodeLine(" t = " + getClassQualifier("Token") + "newToken(jjmatchedKind);"); genCodeLine(" t->kind = jjmatchedKind;"); genCodeLine(" t->image = curTokenImage;"); } genCodeLine(" t->specialToken = NULL;"); genCodeLine(" t->next = NULL;"); if (keepLineCol) { genCodeLine(""); genCodeLine(" if (input_stream->getTrackLineColumn()) {"); genCodeLine(" t->beginLine = beginLine;"); genCodeLine(" t->endLine = endLine;"); genCodeLine(" t->beginColumn = beginColumn;"); genCodeLine(" t->endColumn = endColumn;"); genCodeLine(" }"); } genCodeLine(""); genCodeLine(" return t;"); genCodeLine("}"); } void DumpGetNextToken() { int i; switchToIncludeFile(); genCodeLine(""); genCodeLine("public: int curLexState;"); genCodeLine("public: int jjnewStateCnt;"); genCodeLine("public: int jjround;"); genCodeLine("public: int jjmatchedPos;"); genCodeLine("public: int jjmatchedKind;"); genCodeLine(""); switchToMainFile(); genCodeLine("const int defaultLexState = " + defaultLexState + ";"); genCodeLine("/** Get the next Token. */"); <API key>("Token *", tokMgrClassName, "getNextToken()"); genCodeLine("{"); if (hasSpecial) { genCodeLine(" Token *specialToken = NULL;"); } genCodeLine(" Token *matchedToken;"); genCodeLine(" int curPos = 0;"); genCodeLine(""); genCodeLine(" for (;;)"); genCodeLine(" {"); genCodeLine(" EOFLoop: "); //genCodeLine(" {"); //genCodeLine(" curChar = input_stream->BeginToken();"); //genCodeLine(" }"); genCodeLine(" if (input_stream->endOfInput())"); genCodeLine(" {"); //genCodeLine(" input_stream->backup(1);"); if (Options.<API key>()) genCodeLine(" fprintf(debugStream, \"Returning the <EOF> token.\\n\");"); genCodeLine(" jjmatchedKind = 0;"); genCodeLine(" jjmatchedPos = -1;"); genCodeLine(" matchedToken = jjFillToken();"); if (hasSpecial) genCodeLine(" matchedToken->specialToken = specialToken;"); if (nextStateForEof != null || actForEof != null) genCodeLine(" TokenLexicalActions(matchedToken);"); if (Options.<API key>()) genCodeLine(" CommonTokenAction(matchedToken);"); genCodeLine(" return matchedToken;"); genCodeLine(" }"); genCodeLine(" curChar = input_stream->BeginToken();"); if (hasMoreActions || hasSkipActions || hasTokenActions) { genCodeLine(" image = jjimage;"); genCodeLine(" image.clear();"); genCodeLine(" jjimageLen = 0;"); } genCodeLine(""); String prefix = ""; if (hasMore) { genCodeLine(" for (;;)"); genCodeLine(" {"); prefix = " "; } String endSwitch = ""; String caseStr = ""; // this also sets up the start state of the nfa if (maxLexStates > 1) { genCodeLine(prefix + " switch(curLexState)"); genCodeLine(prefix + " {"); endSwitch = prefix + " }"; caseStr = prefix + " case "; prefix += " "; } prefix += " "; for(i = 0; i < maxLexStates; i++) { if (maxLexStates > 1) genCodeLine(caseStr + i + ":"); if (singlesToSkip[i].HasTransitions()) { // added the backup(0) to make JIT happy genCodeLine(prefix + "{ input_stream->backup(0);"); if (singlesToSkip[i].asciiMoves[0] != 0L && singlesToSkip[i].asciiMoves[1] != 0L) { genCodeLine(prefix + " while ((curChar < 64" + " && (0x" + Long.toHexString(singlesToSkip[i].asciiMoves[0]) + "L & (1L << curChar)) != 0L) || \n" + prefix + " (curChar >> 6) == 1" + " && (0x" + Long.toHexString(singlesToSkip[i].asciiMoves[1]) + "L & (1L << (curChar & 077))) != 0L)"); } else if (singlesToSkip[i].asciiMoves[1] == 0L) { genCodeLine(prefix + " while (curChar <= " + (int)MaxChar(singlesToSkip[i].asciiMoves[0]) + " && (0x" + Long.toHexString(singlesToSkip[i].asciiMoves[0]) + "L & (1L << curChar)) != 0L)"); } else if (singlesToSkip[i].asciiMoves[0] == 0L) { genCodeLine(prefix + " while (curChar > 63 && curChar <= " + ((int)MaxChar(singlesToSkip[i].asciiMoves[1]) + 64) + " && (0x" + Long.toHexString(singlesToSkip[i].asciiMoves[1]) + "L & (1L << (curChar & 077))) != 0L)"); } genCodeLine(prefix + "{"); if (Options.<API key>()) { if (maxLexStates > 1) { genCodeLine(" fprintf(debugStream, \"<%s>\" , addUnicodeEscapes(lexStateNames[curLexState]).c_str());"); } genCodeLine(" fprintf(debugStream, \"Skipping character : %c(%d)\\n\", curChar, (int)curChar);"); } genCodeLine(prefix + "if (input_stream->endOfInput()) { goto EOFLoop; }"); genCodeLine(prefix + "curChar = input_stream->BeginToken();"); genCodeLine(prefix + "}"); genCodeLine(prefix + "}"); } if (initMatch[i] != Integer.MAX_VALUE && initMatch[i] != 0) { if (Options.<API key>()) genCodeLine(" fprintf(debugStream, \" Matched the empty string as %s token.\\n\", addUnicodeEscapes(tokenImage[" + initMatch[i] + "]).c_str());"); genCodeLine(prefix + "jjmatchedKind = " + initMatch[i] + ";"); genCodeLine(prefix + "jjmatchedPos = -1;"); genCodeLine(prefix + "curPos = 0;"); } else { genCodeLine(prefix + "jjmatchedKind = 0x" + Integer.toHexString(Integer.MAX_VALUE) + ";"); genCodeLine(prefix + "jjmatchedPos = 0;"); } if (Options.<API key>()) { genCodeLine(" fprintf(debugStream, " + "\"<%s>Current character : %c(%d) at line %d column %d\\n\","+ "addUnicodeEscapes(lexStateNames[curLexState]).c_str(), curChar, (int)curChar, " + "input_stream->getEndLine(), input_stream->getEndColumn());"); } genCodeLine(prefix + "curPos = <API key>" + i + "();"); if (canMatchAnyChar[i] != -1) { if (initMatch[i] != Integer.MAX_VALUE && initMatch[i] != 0) genCodeLine(prefix + "if (jjmatchedPos < 0 || (jjmatchedPos == 0 && jjmatchedKind > " + canMatchAnyChar[i] + "))"); else genCodeLine(prefix + "if (jjmatchedPos == 0 && jjmatchedKind > " + canMatchAnyChar[i] + ")"); genCodeLine(prefix + "{"); if (Options.<API key>()) { genCodeLine(" fprintf(debugStream, \" Current character matched as a %s token.\\n\", addUnicodeEscapes(tokenImage[" + canMatchAnyChar[i] + "]).c_str());"); } genCodeLine(prefix + " jjmatchedKind = " + canMatchAnyChar[i] + ";"); if (initMatch[i] != Integer.MAX_VALUE && initMatch[i] != 0) genCodeLine(prefix + " jjmatchedPos = 0;"); genCodeLine(prefix + "}"); } if (maxLexStates > 1) genCodeLine(prefix + "break;"); } if (maxLexStates > 1) genCodeLine(endSwitch); else if (maxLexStates == 0) genCodeLine(" jjmatchedKind = 0x" + Integer.toHexString(Integer.MAX_VALUE) + ";"); if (maxLexStates > 1) prefix = " "; else prefix = ""; if (maxLexStates > 0) { genCodeLine(prefix + " if (jjmatchedKind != 0x" + Integer.toHexString(Integer.MAX_VALUE) + ")"); genCodeLine(prefix + " {"); genCodeLine(prefix + " if (jjmatchedPos + 1 < curPos)"); if (Options.<API key>()) { genCodeLine(prefix + " {"); genCodeLine(prefix + " fprintf(debugStream, " + "\" Putting back %d characters into the input stream.\\n\", (curPos - jjmatchedPos - 1));"); } genCodeLine(prefix + " input_stream->backup(curPos - jjmatchedPos - 1);"); if (Options.<API key>()) { genCodeLine(prefix + " }"); } if (Options.<API key>()) { genCodeLine(" fprintf(debugStream, " + "\"****** FOUND A %d(%s) MATCH (%s) ******\\n\", jjmatchedKind, addUnicodeEscapes(tokenImage[jjmatchedKind]).c_str(), addUnicodeEscapes(input_stream->GetSuffix(jjmatchedPos + 1)).c_str());"); } if (hasSkip || hasMore || hasSpecial) { genCodeLine(prefix + " if ((jjtoToken[jjmatchedKind >> 6] & " + "(1L << (jjmatchedKind & 077))) != 0L)"); genCodeLine(prefix + " {"); } genCodeLine(prefix + " matchedToken = jjFillToken();"); if (hasSpecial) genCodeLine(prefix + " matchedToken->specialToken = specialToken;"); if (hasTokenActions) genCodeLine(prefix + " TokenLexicalActions(matchedToken);"); if (maxLexStates > 1) { genCodeLine(" if (jjnewLexState[jjmatchedKind] != -1)"); genCodeLine(prefix + " curLexState = jjnewLexState[jjmatchedKind];"); } if (Options.<API key>()) genCodeLine(prefix + " CommonTokenAction(matchedToken);"); genCodeLine(prefix + " return matchedToken;"); if (hasSkip || hasMore || hasSpecial) { genCodeLine(prefix + " }"); if (hasSkip || hasSpecial) { if (hasMore) { genCodeLine(prefix + " else if ((jjtoSkip[jjmatchedKind >> 6] & " + "(1L << (jjmatchedKind & 077))) != 0L)"); } else genCodeLine(prefix + " else"); genCodeLine(prefix + " {"); if (hasSpecial) { genCodeLine(prefix + " if ((jjtoSpecial[jjmatchedKind >> 6] & " + "(1L << (jjmatchedKind & 077))) != 0L)"); genCodeLine(prefix + " {"); genCodeLine(prefix + " matchedToken = jjFillToken();"); genCodeLine(prefix + " if (specialToken == NULL)"); genCodeLine(prefix + " specialToken = matchedToken;"); genCodeLine(prefix + " else"); genCodeLine(prefix + " {"); genCodeLine(prefix + " matchedToken->specialToken = specialToken;"); genCodeLine(prefix + " specialToken = (specialToken->next = matchedToken);"); genCodeLine(prefix + " }"); if (hasSkipActions) genCodeLine(prefix + " SkipLexicalActions(matchedToken);"); genCodeLine(prefix + " }"); if (hasSkipActions) { genCodeLine(prefix + " else"); genCodeLine(prefix + " SkipLexicalActions(NULL);"); } } else if (hasSkipActions) genCodeLine(prefix + " SkipLexicalActions(NULL);"); if (maxLexStates > 1) { genCodeLine(" if (jjnewLexState[jjmatchedKind] != -1)"); genCodeLine(prefix + " curLexState = jjnewLexState[jjmatchedKind];"); } genCodeLine(prefix + " goto EOFLoop;"); genCodeLine(prefix + " }"); } if (hasMore) { if (hasMoreActions) genCodeLine(prefix + " MoreLexicalActions();"); else if (hasSkipActions || hasTokenActions) genCodeLine(prefix + " jjimageLen += jjmatchedPos + 1;"); if (maxLexStates > 1) { genCodeLine(" if (jjnewLexState[jjmatchedKind] != -1)"); genCodeLine(prefix + " curLexState = jjnewLexState[jjmatchedKind];"); } genCodeLine(prefix + " curPos = 0;"); genCodeLine(prefix + " jjmatchedKind = 0x" + Integer.toHexString(Integer.MAX_VALUE) + ";"); genCodeLine(prefix + " if (!input_stream->endOfInput()) {"); genCodeLine(prefix + " curChar = input_stream->readChar();"); if (Options.<API key>()) { genCodeLine(" fprintf(debugStream, " + "\"<%s>Current character : %c(%d) at line %d column %d\\n\","+ "addUnicodeEscapes(lexStateNames[curLexState]).c_str(), curChar, (int)curChar, " + "input_stream->getEndLine(), input_stream->getEndColumn());"); } genCodeLine(prefix + " continue;"); genCodeLine(prefix + " }"); } } genCodeLine(prefix + " }"); genCodeLine(prefix + " int error_line = input_stream->getEndLine();"); genCodeLine(prefix + " int error_column = input_stream->getEndColumn();"); genCodeLine(prefix + " JAVACC_STRING_TYPE error_after;"); genCodeLine(prefix + " bool EOFSeen = false;"); genCodeLine(prefix + " if (input_stream->endOfInput()) {"); genCodeLine(prefix + " EOFSeen = true;"); genCodeLine(prefix + " error_after = curPos <= 1 ? EMPTY : input_stream->GetImage();"); genCodeLine(prefix + " if (curChar == '\\n' || curChar == '\\r') {"); genCodeLine(prefix + " error_line++;"); genCodeLine(prefix + " error_column = 0;"); genCodeLine(prefix + " }"); genCodeLine(prefix + " else"); genCodeLine(prefix + " error_column++;"); genCodeLine(prefix + " }"); genCodeLine(prefix + " if (!EOFSeen) {"); genCodeLine(prefix + " error_after = curPos <= 1 ? EMPTY : input_stream->GetImage();"); genCodeLine(prefix + " }"); genCodeLine(prefix + " errorHandler->lexicalError(EOFSeen, curLexState, error_line, error_column, error_after, curChar, this);"); } if (hasMore) genCodeLine(prefix + " }"); genCodeLine(" }"); genCodeLine("}"); genCodeLine(""); } public void DumpSkipActions() { Action act; <API key>("void ", tokMgrClassName, "SkipLexicalActions(Token *matchedToken)"); genCodeLine("{"); genCodeLine(" switch(jjmatchedKind)"); genCodeLine(" {"); Outer: for (int i = 0; i < maxOrdinal; i++) { if ((toSkip[i / 64] & (1L << (i % 64))) == 0L) continue; for (;;) { if (((act = (Action)actions[i]) == null || act.getActionTokens() == null || act.getActionTokens().size() == 0) && !canLoop[lexStates[i]]) continue Outer; genCodeLine(" case " + i + " : {"); if (initMatch[lexStates[i]] == i && canLoop[lexStates[i]]) { genCodeLine(" if (jjmatchedPos == -1)"); genCodeLine(" {"); genCodeLine(" if (jjbeenHere[" + lexStates[i] + "] &&"); genCodeLine(" jjemptyLineNo[" + lexStates[i] + "] == input_stream->getBeginLine() &&"); genCodeLine(" jjemptyColNo[" + lexStates[i] + "] == input_stream->getBeginColumn())"); genCodeLine(" errorHandler->lexicalError(JAVACC_STRING_TYPE(\"(\"Error: Bailing out of infinite loop caused by repeated empty string matches \" + \"at line \" + input_stream->getBeginLine() + \", \" + \"column \" + input_stream->getBeginColumn() + \".\")), this);"); genCodeLine(" jjemptyLineNo[" + lexStates[i] + "] = input_stream->getBeginLine();"); genCodeLine(" jjemptyColNo[" + lexStates[i] + "] = input_stream->getBeginColumn();"); genCodeLine(" jjbeenHere[" + lexStates[i] + "] = true;"); genCodeLine(" }"); } if ((act = (Action)actions[i]) == null || act.getActionTokens().size() == 0) break; genCode( " image.append"); if (RStringLiteral.allImages[i] != null) { genCodeLine("(jjstrLiteralImages[" + i + "]);"); genCodeLine(" lengthOfMatch = jjstrLiteralImages[" + i + "].length();"); } else { genCodeLine("(input_stream->GetSuffix(jjimageLen + (lengthOfMatch = jjmatchedPos + 1)));"); } printTokenSetup((Token)act.getActionTokens().get(0)); ccol = 1; for (int j = 0; j < act.getActionTokens().size(); j++) printToken((Token)act.getActionTokens().get(j)); genCodeLine(""); break; } genCodeLine(" break;"); genCodeLine(" }"); } genCodeLine(" default :"); genCodeLine(" break;"); genCodeLine(" }"); genCodeLine("}"); } public void DumpMoreActions() { Action act; <API key>("void ", tokMgrClassName, "MoreLexicalActions()"); genCodeLine("{"); genCodeLine(" jjimageLen += (lengthOfMatch = jjmatchedPos + 1);"); genCodeLine(" switch(jjmatchedKind)"); genCodeLine(" {"); Outer: for (int i = 0; i < maxOrdinal; i++) { if ((toMore[i / 64] & (1L << (i % 64))) == 0L) continue; for (;;) { if (((act = (Action)actions[i]) == null || act.getActionTokens() == null || act.getActionTokens().size() == 0) && !canLoop[lexStates[i]]) continue Outer; genCodeLine(" case " + i + " : {"); if (initMatch[lexStates[i]] == i && canLoop[lexStates[i]]) { genCodeLine(" if (jjmatchedPos == -1)"); genCodeLine(" {"); genCodeLine(" if (jjbeenHere[" + lexStates[i] + "] &&"); genCodeLine(" jjemptyLineNo[" + lexStates[i] + "] == input_stream->getBeginLine() &&"); genCodeLine(" jjemptyColNo[" + lexStates[i] + "] == input_stream->getBeginColumn())"); genCodeLine(" errorHandler->lexicalError(JAVACC_STRING_TYPE(\"(\"Error: Bailing out of infinite loop caused by repeated empty string matches \" + \"at line \" + input_stream->getBeginLine() + \", \" + \"column \" + input_stream->getBeginColumn() + \".\")), this);"); genCodeLine(" jjemptyLineNo[" + lexStates[i] + "] = input_stream->getBeginLine();"); genCodeLine(" jjemptyColNo[" + lexStates[i] + "] = input_stream->getBeginColumn();"); genCodeLine(" jjbeenHere[" + lexStates[i] + "] = true;"); genCodeLine(" }"); } if ((act = (Action)actions[i]) == null || act.getActionTokens().size() == 0) { break; } genCode( " image.append"); if (RStringLiteral.allImages[i] != null) genCodeLine("(jjstrLiteralImages[" + i + "]);"); else genCodeLine("(input_stream->GetSuffix(jjimageLen));"); genCodeLine(" jjimageLen = 0;"); printTokenSetup((Token)act.getActionTokens().get(0)); ccol = 1; for (int j = 0; j < act.getActionTokens().size(); j++) printToken((Token)act.getActionTokens().get(j)); genCodeLine(""); break; } genCodeLine(" break;"); genCodeLine(" }"); } genCodeLine(" default :"); genCodeLine(" break;"); genCodeLine(" }"); genCodeLine("}"); } public void DumpTokenActions() { Action act; int i; <API key>("void ", tokMgrClassName, "TokenLexicalActions(Token *matchedToken)"); genCodeLine("{"); genCodeLine(" switch(jjmatchedKind)"); genCodeLine(" {"); Outer: for (i = 0; i < maxOrdinal; i++) { if ((toToken[i / 64] & (1L << (i % 64))) == 0L) continue; for (;;) { if (((act = (Action)actions[i]) == null || act.getActionTokens() == null || act.getActionTokens().size() == 0) && !canLoop[lexStates[i]]) continue Outer; genCodeLine(" case " + i + " : {"); if (initMatch[lexStates[i]] == i && canLoop[lexStates[i]]) { genCodeLine(" if (jjmatchedPos == -1)"); genCodeLine(" {"); genCodeLine(" if (jjbeenHere[" + lexStates[i] + "] &&"); genCodeLine(" jjemptyLineNo[" + lexStates[i] + "] == input_stream->getBeginLine() &&"); genCodeLine(" jjemptyColNo[" + lexStates[i] + "] == input_stream->getBeginColumn())"); genCodeLine(" errorHandler->lexicalError(JAVACC_STRING_TYPE(\"Error: Bailing out of infinite loop caused by repeated empty string matches " + "at line \" + input_stream->getBeginLine() + \", " + "column \" + input_stream->getBeginColumn() + \".\"), this);"); genCodeLine(" jjemptyLineNo[" + lexStates[i] + "] = input_stream->getBeginLine();"); genCodeLine(" jjemptyColNo[" + lexStates[i] + "] = input_stream->getBeginColumn();"); genCodeLine(" jjbeenHere[" + lexStates[i] + "] = true;"); genCodeLine(" }"); } if ((act = (Action)actions[i]) == null || act.getActionTokens().size() == 0) break; if (i == 0) { genCodeLine(" image.setLength(0);"); // For EOF no image is there } else { genCode( " image.append"); if (RStringLiteral.allImages[i] != null) { genCodeLine("(jjstrLiteralImages[" + i + "]);"); genCodeLine(" lengthOfMatch = jjstrLiteralImages[" + i + "].length();"); } else { genCodeLine("(input_stream->GetSuffix(jjimageLen + (lengthOfMatch = jjmatchedPos + 1)));"); } } printTokenSetup((Token)act.getActionTokens().get(0)); ccol = 1; for (int j = 0; j < act.getActionTokens().size(); j++) printToken((Token)act.getActionTokens().get(j)); genCodeLine(""); break; } genCodeLine(" break;"); genCodeLine(" }"); } genCodeLine(" default :"); genCodeLine(" break;"); genCodeLine(" }"); genCodeLine("}"); } }
#include "world.h" #include "platformer/resources/animation.h" #include "platformer/resources/background.h" #include "platformer/resources/camera.h" #include "platformer/resources/collisions.h" #include "platformer/resources/control.h" #include "platformer/script/script.h" #include "util/debug.h" #include "util/file-system.h" #include "util/exceptions/load_exception.h" #include "util/exceptions/exception.h" #include "util/token.h" #include "util/tokenreader.h" using namespace std; using namespace Platformer; World::World(const Token * token): resolutionX(0), resolutionY(0), dimensionsX(0), dimensionsY(0), gravityX(0), gravityY(0), acceleration(0), fillColor(Graphics::makeColor(0,0,0)), quitRequest(false), paused(false){ if ( *token != "world" ){ throw LoadException(__FILE__, __LINE__, "Not world."); } //! Setup script engine scriptEngine = Scriptable::getInstance(this); if (token->numTokens() == 1){ std::string temp; token->view() >> temp; load(Storage::instance().find(Filesystem::RelativePath(temp))); } else { load(token); } } void World::load(const Filesystem::AbsolutePath & filename){ // Load up tokenizer try{ Global::debug(1,"World") << "Loading world " << filename.path() << endl; TokenReader tr; Token * token = tr.readTokenFromFile(filename.path()); load(token); } catch (const TokenException & e){ throw LoadException(__FILE__, __LINE__, e, "Error loading World"); } } void World::load(const Token * token){ TokenView view = token->view(); while (view.hasMore()){ try{ const Token * tok; view >> tok; if (*tok == "name"){ // get the name tok->view() >> name; Global::debug(0, "Platformer") << "Loading: " << name << endl; } else if (*tok == "resolution"){ // Get the resolution of the world tok->view() >> resolutionX >> resolutionY; } else if (*tok == "dimensions"){ // Get the dimensions of the world tok->view() >> dimensionsX >> dimensionsY; } else if (*tok == "players"){ // Handle player info eventually } else if (*tok == "mechanics"){ TokenView mechView = tok->view(); while (mechView.hasMore()){ const Token * mechTok; mechView >> mechTok; if (*mechTok == "gravity"){ // get the gravity mechTok->view() >> gravityX >> gravityY; } else if (*mechTok == "acceleration"){ // Get the acceleration mechTok->view() >> acceleration; } else { Global::debug( 3 ) << "Unhandled mechanics attribute: "<<endl; } } } else if (*tok == "fill-color"){ int r, g, b; tok->view() >> r >> g >> b; fillColor = Graphics::makeColor(r,g,b); } else if (*tok == "camera"){ // Handle camera info Util::ReferenceCount<Camera> camera = Util::ReferenceCount<Camera>(new Camera(resolutionX, resolutionY, dimensionsX, dimensionsY, tok)); CameraInfo info = { -1 , camera }; cameras[camera->getId()] = info; } else if (*tok == "animation"){ Util::ReferenceCount<Animation> animation(new Animation(tok)); animations[animation->getId()] = animation; } else if (*tok == "background"){ Util::ReferenceCount<Background> background(new Background(tok, animations)); backgrounds.push_back(background); } else if (*tok == "foreground"){ Util::ReferenceCount<Background> foreground(new Background(tok, animations)); foregrounds.push_back(foreground); } else if (*tok == "collision-map"){ collisionMap = Util::ReferenceCount<Collisions::Map>(new Collisions::Map(tok)); } else if (*tok == "script-import-path"){ std::string path; tok->view() >> path; scriptEngine->addImportPath(path); } else if (*tok == "script"){ std::string module, function; TokenView scriptView = tok->view(); while (scriptView.hasMore()){ const Token * scriptTok; scriptView >> scriptTok; if (*scriptTok == "id"){ // get the name? } else if (*scriptTok == "module"){ // Get the module scriptTok->view() >> module; } else if (*scriptTok == "function"){ // Get the function scriptTok->view() >> function; } else { Global::debug( 3 ) << "Unhandled script attribute: "<<endl; } } if (!module.empty() && !function.empty()){ scriptEngine->runScript(module, function); } } else if (*tok == "object-script"){ // No need look below //scriptEngine->importObject(tok); } else if (*tok == "object"){ int x=0, y=0; std::string scriptName; tok->match("_/position", x, y); tok->match("_/script", scriptName); Global::debug(2) << "Found object with script name: " << scriptName << std::endl; std::vector<const Token *> tokens = token->findTokens("_/object-script"); for (std::vector<const Token *>::iterator i = tokens.begin(); i != tokens.end(); i++){ const Token * scriptToken = *i; std::string check; scriptToken->match("_/id", check); if (scriptName == check){ Util::ReferenceCount<Token> copy = Util::ReferenceCount<Token>(scriptToken->copy()); Token * position = new Token("position", false); *position << "position" << x << y; copy->addToken(position); Global::debug(2) << "Copied token: " << copy->toString() << std::endl; scriptEngine->importObject(copy.raw()); break; } } } else { Global::debug( 3 ) << "Unhandled World attribute: "<<endl; if (Global::getDebug() >= 3){ token->print(" "); } } } catch ( const TokenException & ex ) { throw LoadException(__FILE__, __LINE__, ex, "World parse error"); } catch ( const LoadException & ex ) { throw ex; } } } World::~World(){ } void World::act(){ if (!paused){ for (std::map< int, CameraInfo >::iterator c = cameras.begin(); c != cameras.end(); ++c){ Util::ReferenceCount<Camera> camera = c->second.camera; camera->act(); } for (std::map< std::string, Util::ReferenceCount<Animation> >::iterator i = animations.begin(); i != animations.end(); ++i){ Util::ReferenceCount<Animation> animation = i->second; if (animation != NULL){ animation->act(); } } // Backgrounds for (std::vector< Util::ReferenceCount<Background> >::iterator i = backgrounds.begin(); i != backgrounds.end(); ++i){ Util::ReferenceCount<Background> background = *i; if (background != NULL){ background->act(); } } // Objects for (std::deque< Util::ReferenceCount<Object> >::iterator i = objects.begin(); i != objects.end(); ++i){ Util::ReferenceCount<Object> object = *i; // make gravity affect the object and append acceleration if (gravityX != 0 && object->getVelocityX() == 0){ object->setVelocityX(gravityX); } else if (gravityX != 0){ if (gravityX > 0){ object->addVelocity(acceleration, 0); } else if (gravityX < 0){ object->addVelocity(acceleration * -1, 0); } } if (gravityY != 0 && object->getVelocityY() == 0){ object->setVelocityY(gravityY); } else if (gravityY != 0){ if (gravityY > 0){ object->addVelocity(0, acceleration); } else if (gravityY < 0){ object->addVelocity(0, acceleration * -1); } } if (collisionMap != NULL){ object->act(collisionMap, objects); } } // foregrounds for (std::vector< Util::ReferenceCount<Background> >::iterator i = foregrounds.begin(); i != foregrounds.end(); ++i){ Util::ReferenceCount<Background> foreground = *i; foreground->act(); } } // Controls for (std::map< int, Util::ReferenceCount<Control> >::iterator i = controls.begin(); i != controls.end(); i++){ Util::ReferenceCount<Control> control = i->second; control->act(); } scriptEngine->act(paused); if (quitRequest){ throw Exception::Quit(__FILE__, __LINE__); } } void World::draw(const Graphics::Bitmap & bmp){ // Go through all cameras for (std::map< int, CameraInfo >::iterator c = cameras.begin(); c != cameras.end(); ++c){ Util::ReferenceCount<Camera> camera = c->second.camera; // Fill to color camera->getWindow().fill(fillColor); // Backgrounds for (std::vector< Util::ReferenceCount<Background> >::iterator i = backgrounds.begin(); i != backgrounds.end(); ++i){ Util::ReferenceCount<Background> background = *i; if (background != NULL){ background->draw(*camera); } } // Render objects to camera for (std::deque< Util::ReferenceCount<Object> >::iterator i = objects.begin(); i != objects.end(); ++i){ Util::ReferenceCount<Object> object = *i; object->draw(*camera); } // Render collision maps (NOTE Debugging only remove later) if (collisionMap != NULL){ collisionMap->render(*camera); } // foregrounds for (std::vector< Util::ReferenceCount<Background> >::iterator i = foregrounds.begin(); i != foregrounds.end(); ++i){ Util::ReferenceCount<Background> foreground = *i; foreground->draw(*camera); } // Render scriptable items to camera scriptEngine->render(*camera); // Render camera to bmp camera->draw(bmp); } } void World::setCamera(int id, double x, double y){ std::map<int, CameraInfo>::iterator found = cameras.find(id); if (found != cameras.end()){ found->second.camera->set(x,y); } } void World::moveCamera(int id, double x, double y){ std::map<int, CameraInfo>::iterator found = cameras.find(id); if (found != cameras.end()){ found->second.camera->move(x,y); } } void World::followNextObject(int id){ std::map<int, CameraInfo>::iterator found = cameras.find(id); if (found != cameras.end()){ if (found->second.currentObject >= -1 && found->second.currentObject < (int)objects.size()-1){ found->second.currentObject++; found->second.camera->stopFollowing(); Util::ReferenceCount<Object> object = objects[(unsigned int)found->second.currentObject]; found->second.camera->followObject(object); } else { found->second.currentObject = -1; found->second.camera->stopFollowing(); } } } void World::followObject(int cameraId, int objectId){ for (unsigned int i = 0; i < objects.size(); i++){ Util::ReferenceCount<Object> object = objects[i]; if (object->getID() == objectId){ std::map<int, CameraInfo>::iterator found = cameras.find(cameraId); if (found != cameras.end()){ found->second.camera->stopFollowing(); found->second.currentObject = i; found->second.camera->followObject(object); } break; } } } Util::ReferenceCount<Camera> World::getCamera(int id){ std::map<int, CameraInfo>::iterator found = cameras.find(id); if (found != cameras.end()){ return found->second.camera; } return Util::ReferenceCount<Camera>(NULL); } void World::addObject(Util::ReferenceCount<Object> object){ objects.push_front(object); } Util::ReferenceCount<Object> World::getObject(int id){ for (std::deque< Util::ReferenceCount<Object> >::iterator i = objects.begin(); i != objects.end(); i++){ Util::ReferenceCount<Object> object = *i; if (object->getID() == id){ return object; } } return Util::ReferenceCount<Object>(NULL); } void World::addControl(Util::ReferenceCount<Control> control){ controls.insert(std::pair<int, Util::ReferenceCount<Control> >(control->getID(), control)); } Util::ReferenceCount<Control> World::getControl(int id){ std::map<int, Util::ReferenceCount<Control> >::iterator found = controls.find(id); if (found != controls.end()){ return found->second; } return Util::ReferenceCount<Control>(NULL); } void World::invokeScript(const std::string & module, const std::string & func){ scriptEngine->runScript(module, func); }
#ifndef InterpolationType_h #define InterpolationType_h #include "core/animation/InterpolationValue.h" #include "core/animation/Keyframe.h" #include "core/animation/<API key>.h" #include "core/animation/<API key>.h" #include "core/animation/PropertyHandle.h" #include "core/animation/<API key>.h" #include "platform/heap/Handle.h" #include "wtf/Allocator.h" namespace blink { class <API key>; // Subclasses of InterpolationType implement the logic for a specific value type of a specific PropertyHandle to: // - Convert <API key> values to (Pairwise)?InterpolationValues: <API key>() and maybeConvertSingle() // - Convert the target Element's property value to an InterpolationValue: <API key>() // - Apply an InterpolationValue to a target Element's property: apply(). class InterpolationType { USING_FAST_MALLOC(InterpolationType); <API key>(InterpolationType); public: virtual ~InterpolationType() { ASSERT_NOT_REACHED(); } PropertyHandle getProperty() const { return m_property; } // ConversionCheckers are returned from calls to <API key>() and maybeConvertSingle() to enable the caller to check // whether the result is still valid given changes in the <API key> and underlying InterpolationValue. class ConversionChecker { USING_FAST_MALLOC(ConversionChecker); <API key>(ConversionChecker); public: virtual ~ConversionChecker() { } void setType(const InterpolationType& type) { m_type = &type; } const InterpolationType& type() const { return *m_type; } virtual bool isValid(const <API key>&, const InterpolationValue& underlying) const = 0; protected: ConversionChecker() : m_type(nullptr) { } const InterpolationType* m_type; }; using ConversionCheckers = Vector<OwnPtr<ConversionChecker>>; virtual <API key> <API key>(const <API key>& startKeyframe, const <API key>& endKeyframe, const <API key>& environment, const InterpolationValue& underlying, ConversionCheckers& conversionCheckers) const { InterpolationValue start = maybeConvertSingle(startKeyframe, environment, underlying, conversionCheckers); if (!start) return nullptr; InterpolationValue end = maybeConvertSingle(endKeyframe, environment, underlying, conversionCheckers); if (!end) return nullptr; return <API key>(std::move(start), std::move(end)); } virtual InterpolationValue maybeConvertSingle(const <API key>&, const <API key>&, const InterpolationValue& underlying, ConversionCheckers&) const = 0; virtual InterpolationValue <API key>(const <API key>&) const = 0; virtual void composite(<API key>& <API key>, double underlyingFraction, const InterpolationValue& value, double <API key>) const { ASSERT(!<API key>.value().<API key>); ASSERT(!value.<API key>); <API key>.mutableValue().interpolableValue->scaleAndAdd(underlyingFraction, *value.interpolableValue); } virtual void apply(const InterpolableValue&, const <API key>*, <API key>&) const = 0; // Implement reference equality checking via pointer equality checking as these are singletons. bool operator==(const InterpolationType& other) const { return this == &other; } bool operator!=(const InterpolationType& other) const { return this != &other; } protected: InterpolationType(PropertyHandle property) : m_property(property) { } virtual <API key> <API key>(InterpolationValue&& start, InterpolationValue&& end) const { ASSERT(!start.<API key>); ASSERT(!end.<API key>); return <API key>( start.interpolableValue.release(), end.interpolableValue.release(), nullptr); } const PropertyHandle m_property; }; } // namespace blink #endif // InterpolationType_h
package info.tregmine.commands; import info.tregmine.Tregmine; import info.tregmine.api.GenericPlayer; import info.tregmine.database.DAOException; import info.tregmine.database.IContext; import info.tregmine.database.IPlayerDAO; import static org.bukkit.ChatColor.AQUA; import static org.bukkit.ChatColor.RED; public class ChannelViewCommand extends AbstractCommand { public ChannelViewCommand(Tregmine tregmine) { super(tregmine, "invlog", Tregmine.<API key>.ADMIN_REQUIRED); } @Override public boolean handlePlayer(GenericPlayer player, String[] args) { if (args.length < 1) { player.sendMessage("Your ChannelView is set to " + (player.hasFlag(GenericPlayer.Flags.CHANNEL_VIEW) ? "on" : "off") + "."); return true; } String state = args[0]; if ("on".equalsIgnoreCase(state)) { player.setFlag(GenericPlayer.Flags.CHANNEL_VIEW); player.sendMessage(AQUA + "Channel View display is now turned on for you."); } else if ("off".equalsIgnoreCase(state)) { player.removeFlag(GenericPlayer.Flags.CHANNEL_VIEW); player.sendMessage(AQUA + "Channel View display is now turned off for you."); } else if ("status".equalsIgnoreCase(state)) { player.sendMessage("Your Channel View display is set to " + (player.hasFlag(GenericPlayer.Flags.CHANNEL_VIEW) ? "on" : "off") + "."); } else { player.sendMessage( RED + "The commands are /channelview on, /channelview off and /channelview status."); } try (IContext ctx = tregmine.createContext()) { IPlayerDAO playerDAO = ctx.getPlayerDAO(); playerDAO.updatePlayer(player); } catch (DAOException e) { throw new RuntimeException(e); } return true; } }
#include "config.h" #include "modules/app_banner/<API key>.h" #include "bindings/core/v8/ScriptPromise.h" #include "core/dom/DOMException.h" #include "core/dom/ExceptionCode.h" #include "modules/app_banner/<API key>.h" namespace blink { <API key>::<API key>() { } <API key>::<API key>(const AtomicString& name, const String& platform) : Event(name, false, true) , m_platform(platform) { } <API key>::<API key>(const AtomicString& name, const <API key>& init) : Event(name, false, true) , m_platform(init.platform()) { } <API key>::~<API key>() { } String <API key>::platform() const { return m_platform; } ScriptPromise <API key>::userChoice(ScriptState* scriptState) const { return ScriptPromise::<API key>(scriptState, DOMException::create(NotSupportedError, "Subscription failed - no active Service Worker")); } const AtomicString& <API key>::interfaceName() const { return EventNames::<API key>; } } // namespace blink
from .settings import <API key> from django.utils.encoding import smart_str from hashlib import sha1 from mediagenerator.settings import MEDIA_DEV_MODE from mediagenerator.utils import load_backend, find_file, read_text_file import os import time class Filter(object): takes_input = True def __init__(self, **kwargs): self.file_filter = FileFilter self.config(kwargs, filetype=None, filter=None, bundle=None, _from_default=None) # We assume that if this is e.g. a 'js' backend then all input must # also be 'js'. Subclasses must override this if they expect a special # input file type. Also, subclasses have to check if their file type # is supported. self.input_filetype = self.filetype if self.takes_input: self.config(kwargs, input=()) if not isinstance(self.input, (tuple, list)): self.input = (self.input,) self._input_filters = None assert not kwargs, 'Unknown parameters: %s' % ', '.join(kwargs.keys()) @classmethod def from_default(cls, name): return {'input': name} def <API key>(self, ext): return ext != self._from_default def get_variations(self): """ Returns all possible variations that get generated by this filter. The result must be a dict whose values are tuples. """ return {} def get_output(self, variation): """ Yields content for each output item for the given variation. """ raise NotImplementedError() def get_dev_output(self, name, variation): """ Returns content for the given file name and variation in development mode. """ index, child = name.split('/', 1) index = int(index) filter = self.get_input_filters()[index] return filter.get_dev_output(child, variation) def <API key>(self, variation): """ Yields file names for the given variation in development mode. """ # By default we simply return our input filters' file names for index, filter in enumerate(self.get_input_filters()): for name, hash in filter.<API key>(variation): yield '%d/%s' % (index, name), hash def get_input(self, variation): """Yields contents for each input item.""" for filter in self.get_input_filters(): for input in filter.get_output(variation): yield input def get_input_filters(self): """Returns a Filter instance for each input item.""" if not self.takes_input: raise ValueError("The %s media filter doesn't take any input" % self.__class__.__name__) if self._input_filters is not None: return self._input_filters self._input_filters = [] for input in self.input: if isinstance(input, dict): filter = self.get_filter(input) else: filter = self.get_item(input) self._input_filters.append(filter) return self._input_filters def get_filter(self, config): backend_class = load_backend(config.get('filter')) return backend_class(filetype=self.input_filetype, bundle=self.bundle, **config) def get_item(self, name): ext = os.path.splitext(name)[1].lstrip('.') backend_classes = [] if ext in <API key> and self.<API key>(ext): ext_class = <API key>[ext] if isinstance(ext_class, basestring): backend_classes.append(load_backend(<API key>[ext])) elif isinstance(ext_class, tuple): backend_classes.append(FilterPipe) for pipe_entry in ext_class: backend_classes.append(load_backend(pipe_entry)) else: backend_classes.append(self.file_filter) backends = [] for backend_class in backend_classes: config = backend_class.from_default(name) config.setdefault('filter', '%s.%s' % (backend_class.__module__, backend_class.__name__)) config.setdefault('filetype', self.input_filetype) config['bundle'] = self.bundle # This is added to make really sure we don't instantiate the same # filter in an endless loop. Normally, the child class should # take care of this in <API key>(). config.setdefault('_from_default', ext) backends.append(backend_class(**config)) backend = backends.pop(0) for pipe_entry in backends: backend.grow_pipe(pipe_entry) return backend def <API key>(self): """Utility function to get variations including input variations""" variations = self.get_variations() if not self.takes_input: return variations for filter in self.get_input_filters(): subvariations = filter.<API key>() for k, v in subvariations.items(): if k in variations and v != variations[k]: raise ValueError('Conflicting variations for "%s": %r != %r' % ( k, v, variations[k])) variations.update(subvariations) return variations def config(self, init, **defaults): for key in defaults: setattr(self, key, init.pop(key, defaults[key])) class FileFilter(Filter): """A filter that just returns the given file.""" takes_input = False def __init__(self, **kwargs): self.config(kwargs, name=None) self.mtime = self.hash = None super(FileFilter, self).__init__(**kwargs) @classmethod def from_default(cls, name): return {'name': name} def get_output(self, variation): yield self.get_dev_output(self.name, variation) def get_dev_output(self, name, variation): self.name = name return read_text_file(self._get_path()) def get_last_modified(self): path = find_file(self.name) return path and os.path.getmtime(path) def <API key>(self, variation): mtime = self.get_last_modified() # In dev mode, where a lot of requests # we can reduce proc time of filters # making hash = mtime of source file # instead of sha1(filtered_content) if MEDIA_DEV_MODE: hash = str(mtime) elif mtime != self.mtime: output = self.get_dev_output(self.name, variation) hash = sha1(smart_str(output)).hexdigest() else: hash = self.hash yield self.name, hash def _get_path(self): path = find_file(self.name) assert path, """File name "%s" doesn't exist.""" % self.name return path class RawFileFilter(FileFilter): takes_input = False def __init__(self, **kwargs): self.config(kwargs, path=None) super(RawFileFilter, self).__init__(**kwargs) def get_dev_output(self, name, variation): assert name == self.name, ( '''File name "%s" doesn't match the one in GENERATE_MEDIA ("%s")''' % (name, self.name)) return read_text_file(self.path) def <API key>(self, variation): mtime = os.path.getmtime(self.path) if mtime != self.mtime: output = self.get_dev_output(self.name, variation) hash = sha1(smart_str(output)).hexdigest() else: hash = self.hash yield self.name, hash class FilterPipe(FileFilter): def __init__(self, **kwargs): super(FilterPipe, self).__init__(**kwargs) self.pipe = [] def grow_pipe(self, pipe_entry): self.pipe.append(pipe_entry) def get_dev_output(self, name, variation): output = super(FilterPipe, self).get_dev_output(name, variation) for filter in self.pipe: output = filter.get_dev_output(name, variation, content=output) return output def get_last_modified(self): lmod = 0 for entry in self.pipe: entry_lm = entry.get_last_modified() if not entry_lm: return time.time() if entry_lm > lmod: lmod = entry_lm return lmod
<?php namespace plugins\locale; use Yii; use yii\base\Behavior; use yii\web\Application; /** * Class LocaleBehavior * @package common\behaviors */ class LocaleBehavior extends Behavior { /** * @var string */ public $cookieName = '_locale'; /** * @var bool */ public $<API key> = true; /** * @return array */ public function events() { return [ Application::<API key> => 'beforeRequest' ]; } /** * Resolve application language by checking user cookies, preferred language and profile settings */ public function beforeRequest() { if (!Yii::$app instanceof Application) { return true; } $hasCookie = Yii::$app->getRequest()->getCookies()->has($this->cookieName); $forceUpdate = Yii::$app->session->hasFlash('forceUpdateLocale'); if ($hasCookie && !$forceUpdate) { $locale = Yii::$app->getRequest()->getCookies()->getValue($this->cookieName); } else { $locale = $this->resolveLocale(); } Yii::$app->language = $locale; } public function resolveLocale() { $locale = Yii::$app->language; if (!Yii::$app->user->isGuest && Yii::$app->user->identity->profile->locale) { $locale = Yii::$app->user->getIdentity()->profile->locale; } elseif ($this-><API key>) { $locale = Yii::$app->request-><API key>($this->getAvailableLocales()); } return $locale; } /** * @return array */ protected function getAvailableLocales() { return array_keys(Plugin::$language); } }
package com.ericsson.research.trap.utils; public class StringUtilImpl extends StringUtil { StringUtilImpl() {} public String[] doSplit(String s, char c) { return s.split(Character.toString(c)); } }
package vtctld import ( "flag" "sync" "time" log "github.com/golang/glog" "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo" "golang.org/x/net/context" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) // This file maintains a tablet health cache. It establishes streaming // connections with tablets, and updates its internal state with the // result. var ( <API key> = flag.Duration("<API key>", 5*time.Minute, "close streaming tablet health connection if there are no requests for this long") ) type tabletHealth struct { mu sync.Mutex // result stores the most recent response. result *querypb.<API key> // accessed stores the time of the most recent access. accessed time.Time // err stores the result of the stream attempt. err error // done is closed when the stream attempt ends. done chan struct{} // ready is closed when there is at least one result to read. ready chan struct{} } func newTabletHealth() *tabletHealth { return &tabletHealth{ accessed: time.Now(), ready: make(chan struct{}), done: make(chan struct{}), } } func (th *tabletHealth) lastResult(ctx context.Context) (*querypb.<API key>, error) { // Wait until at least the first result comes in, or the stream ends. select { case <-ctx.Done(): return nil, ctx.Err() case <-th.ready: case <-th.done: } th.mu.Lock() defer th.mu.Unlock() th.accessed = time.Now() return th.result, th.err } func (th *tabletHealth) lastAccessed() time.Time { th.mu.Lock() defer th.mu.Unlock() return th.accessed } func (th *tabletHealth) stream(ctx context.Context, ts topo.Server, tabletAlias *topodatapb.TabletAlias) (err error) { defer func() { th.mu.Lock() th.err = err th.mu.Unlock() close(th.done) }() ti, err := ts.GetTablet(ctx, tabletAlias) if err != nil { return err } conn, err := tabletconn.GetDialer()(ti.Tablet, 30*time.Second) if err != nil { return err } defer conn.Close(ctx) stream, err := conn.StreamHealth(ctx) if err != nil { return err } first := true for time.Since(th.lastAccessed()) < *<API key> { select { case <-ctx.Done(): return ctx.Err() default: } result, err := stream.Recv() if err != nil { return err } th.mu.Lock() th.result = result th.mu.Unlock() if first { // We got the first result, so we're ready to be accessed. close(th.ready) first = false } } return nil } type tabletHealthCache struct { ts topo.Server mu sync.Mutex tabletMap map[topodatapb.TabletAlias]*tabletHealth } func <API key>(ts topo.Server) *tabletHealthCache { return &tabletHealthCache{ ts: ts, tabletMap: make(map[topodatapb.TabletAlias]*tabletHealth), } } func (thc *tabletHealthCache) Get(ctx context.Context, tabletAlias *topodatapb.TabletAlias) (*querypb.<API key>, error) { thc.mu.Lock() th, ok := thc.tabletMap[*tabletAlias] if !ok { // No existing stream, so start one. th = newTabletHealth() thc.tabletMap[*tabletAlias] = th go func() { log.Infof("starting health stream for tablet %v", tabletAlias) err := th.stream(context.Background(), thc.ts, tabletAlias) log.Infof("tablet %v health stream ended, error: %v", tabletAlias, err) thc.delete(tabletAlias) }() } thc.mu.Unlock() return th.lastResult(ctx) } func (thc *tabletHealthCache) delete(tabletAlias *topodatapb.TabletAlias) { thc.mu.Lock() delete(thc.tabletMap, *tabletAlias) thc.mu.Unlock() }