text
stringlengths 5
1.04M
|
|---|
/* -*-C++-*- */
/*
(c) Copyright 2004-2008, Hewlett-Packard Development Company, LP
See the file named COPYING for license details
*/
/** @file
\brief Utilities for dealing with libxml2 bits
using lintel::strGetXMLAttr to pull it into your namespace.
*/
#ifndef LINTEL_XMLUTIL_HPP
#define LINTEL_XMLUTIL_HPP
#include <string>
#include <libxml/tree.h>
namespace lintel {
/// get the value for an attribute of an element. Returns an empty
/// string if the attribute doesn't exist (so you can't actually tell
/// the difference between an attribute not existing and it set to the
/// empty string)
std::string strGetXMLAttr(xmlNodePtr cur, const std::string &attr_name);
};
#endif
|
#include "UI.h"
#include <SDL_events.h>
#include <SDL_timer.h>
#include <iostream>
#include <iomanip>
UI::UI() {
window = SDL_CreateWindow("Triangles",
SDL_WINDOWPOS_UNDEFINED,
SDL_WINDOWPOS_UNDEFINED,
UI_WINDOW_WIDTH,
UI_WINDOW_HEIGHT,
SDL_WINDOW_SHOWN);
renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED);
}
void UI::run() {
bool quit = false;
SDL_Event event;
Uint32 initial = SDL_GetTicks();
int lastReport = 0;
int epoc = 0;
while (!quit) {
while (SDL_PollEvent(&event) != 0) {
if (event.type == SDL_QUIT) {
quit = true;
}
}
int time = (SDL_GetTicks() - initial)/10000;
population->evolve();
epoc++;
if (time > lastReport) {
lastReport = time;
std::cout << time << "\t" << epoc << "\t" << population->debug() << std::endl;
}
render();
}
}
void UI::render() {
int width = 128;
int height = 192;
SDL_SetRenderDrawColor(renderer, 0xFF, 0xFF, 0xFF, 0xFF);
SDL_RenderClear(renderer);
// Render target
SDL_Rect targetView = { 12, 12, width, height};
SDL_RenderCopy(renderer, targetTexture, NULL, &targetView);
// Render best
SDL_Rect bestView = { 24 + width, 12, width, height};
SDL_Surface* bestSurface = population->renderBest();
SDL_Texture* bestTexture = SDL_CreateTextureFromSurface(renderer, bestSurface);
SDL_FreeSurface(bestSurface);
SDL_RenderCopy(renderer, bestTexture, NULL, &bestView);
SDL_DestroyTexture(bestTexture);
// Render elements here
SDL_RenderPresent(renderer);
}
void UI::setTarget(Target *target) {
targetTexture = SDL_CreateTextureFromSurface(renderer, target->getSurface());
}
void UI::setPopulation(Population *population) {
this->population = population;
}
void UI::free() {
SDL_DestroyWindow(window);
SDL_DestroyRenderer(renderer);
renderer = nullptr;
window = nullptr;
}
|
// Copyright © 2017-2020 Trust Wallet.
//
// This file is part of Trust. The full Trust copyright notice, including
// terms governing use, modification, and redistribution, is contained in the
// file LICENSE at the root of the source code distribution tree.
#include "ParamsBuilder.h"
#include "Data.h"
#include "OpCode.h"
#include <TrezorCrypto/bignum.h>
#include <TrezorCrypto/ecdsa.h>
#include <TrezorCrypto/nist256p1.h>
#include <list>
using namespace TW;
using namespace TW::Ontology;
void ParamsBuilder::buildNeoVmParam(ParamsBuilder& builder, const boost::any& param) {
if (param.type() == typeid(std::string)) {
builder.push(boost::any_cast<std::string>(param));
} else if (param.type() == typeid(std::array<uint8_t, 20>)) {
builder.push(boost::any_cast<std::array<uint8_t, 20>>(param));
} else if (param.type() == typeid(Data)) {
builder.push(boost::any_cast<Data>(param));
} else if (param.type() == typeid(uint64_t)) {
builder.push(boost::any_cast<uint64_t>(param));
} else if (param.type() == typeid(std::vector<boost::any>)) {
auto paramVec = boost::any_cast<std::vector<boost::any>>(param);
for (const auto& item : paramVec) {
ParamsBuilder::buildNeoVmParam(builder, item);
}
builder.push(static_cast<uint8_t>(paramVec.size()));
builder.pushBack(PACK);
} else if (param.type() == typeid(std::list<boost::any>)) {
builder.pushBack(PUSH0);
builder.pushBack(NEW_STRUCT);
builder.pushBack(TO_ALT_STACK);
for (auto const& p : boost::any_cast<std::list<boost::any>>(param)) {
ParamsBuilder::buildNeoVmParam(builder, p);
builder.pushBack(DUP_FROM_ALT_STACK);
builder.pushBack(SWAP);
builder.pushBack(HAS_KEY);
}
builder.pushBack(FROM_ALT_STACK);
} else {
throw std::runtime_error("Unsupported param type.");
}
}
void ParamsBuilder::buildNeoVmParam(ParamsBuilder& builder, const std::string& param) {
builder.pushBack(param);
}
void ParamsBuilder::buildNeoVmParam(ParamsBuilder& builder, const std::array<uint8_t, 20>& param) {
builder.pushBack(Data(param.begin(), param.end()));
}
void ParamsBuilder::buildNeoVmParam(ParamsBuilder& builder, const Data& param) {
builder.push(param);
}
void ParamsBuilder::pushVar(const Data& data) {
pushVar(data.size());
bytes.insert(bytes.end(), data.begin(), data.end());
}
void ParamsBuilder::pushVar(std::size_t value) {
if (value < 0xFD) {
ParamsBuilder::pushBack(static_cast<uint8_t>(value));
} else if (value < 0xFFFF) {
bytes.push_back(0xFD);
encode16LE(static_cast<uint16_t>(value), bytes);
} else if (value < 0xFFFFFFFF) {
bytes.push_back(0xFE);
encode32LE(static_cast<uint32_t>(value), bytes);
} else {
bytes.push_back(0xFF);
encode64LE(value, bytes);
}
}
void ParamsBuilder::push(const std::string& data) {
push(Data(data.begin(), data.end()));
}
void ParamsBuilder::push(const std::array<uint8_t, 20>& data) {
push(Data(data.begin(), data.end()));
}
void ParamsBuilder::push(const Data& data) {
auto dataSize = data.size();
if (dataSize < 75) {
bytes.push_back(static_cast<uint8_t>(dataSize));
} else if (dataSize < 256) {
bytes.push_back(PUSH_DATA1);
bytes.push_back(static_cast<uint8_t>(dataSize));
} else if (dataSize < 65536) {
bytes.push_back(PUSH_DATA2);
encode16LE(static_cast<uint16_t>(dataSize), bytes);
} else {
bytes.push_back(PUSH_DATA4);
encode32LE(static_cast<uint16_t>(dataSize), bytes);
}
bytes.insert(bytes.end(), data.begin(), data.end());
}
void ParamsBuilder::push(uint64_t num, uint8_t len) {
Data data;
for (auto i = 0; i < len; i++) {
data.push_back(static_cast<uint8_t>(num));
num >>= 8;
}
if (data.back() >> 7 == 1) {
data.push_back(0x00);
}
push(data);
}
void ParamsBuilder::push(uint64_t num) {
if (num == 0) {
bytes.push_back(PUSH0);
} else if (num < 16) {
num += 80;
bytes.push_back(static_cast<uint8_t>(num));
} else if (num < 128) {
push(Data{static_cast<uint8_t>(num)});
} else if (num <= 0xFFFF) {
push(num, 2);
} else if (num <= 0xFFFFFF) {
push(num, 3);
} else if (num <= 0xFFFFFFFF) {
push(num, 4);
} else if (num <= 0xFFFFFFFFFF) {
push(num, 5);
} else if (num <= 0xFFFFFFFFFFFF) {
push(num, 6);
} else if (num <= 0xFFFFFFFFFFFFFF) {
push(num, 7);
} else {
push(num, 8);
}
}
void ParamsBuilder::pushBack(uint8_t data) {
bytes.push_back(data);
}
void ParamsBuilder::pushBack(uint32_t data) {
encode32LE(data, bytes);
}
void ParamsBuilder::pushBack(uint64_t data) {
encode64LE(data, bytes);
}
void ParamsBuilder::pushBack(const std::string& data) {
bytes.insert(bytes.end(), data.begin(), data.end());
}
void ParamsBuilder::pushBack(const std::array<uint8_t, 20>& data) {
bytes.insert(bytes.end(), data.begin(), data.end());
}
void ParamsBuilder::push(uint8_t num) {
if (num == 0) {
bytes.push_back(PUSH0);
} else if (num < 16) {
num += 80;
bytes.push_back(static_cast<uint8_t>(num));
} else if (num < 128) {
push(Data{num});
} else {
push(Data{num, PUSH0});
}
}
Data ParamsBuilder::fromSigs(const std::vector<Data>& sigs) {
ParamsBuilder builder;
for (auto const& sig : sigs) {
builder.push(sig);
}
return builder.getBytes();
}
Data ParamsBuilder::fromPubkey(const Data& publicKey) {
ParamsBuilder builder;
builder.push(publicKey);
builder.pushBack(CHECK_SIG);
return builder.getBytes();
}
Data ParamsBuilder::fromMultiPubkey(uint8_t m, const std::vector<Data>& pubKeys) {
if (m > pubKeys.size()) {
throw std::runtime_error("Invalid m in signature data.");
}
if (pubKeys.size() > MAX_PK_SIZE) {
throw std::runtime_error("Too many public key found.");
}
ParamsBuilder builder;
builder.push(m);
auto sortedPubKeys = pubKeys;
std::sort(sortedPubKeys.begin(), sortedPubKeys.end(), [](Data& o1, Data& o2) -> int {
curve_point p1, p2;
ecdsa_read_pubkey(&nist256p1, o1.data(), &p1);
ecdsa_read_pubkey(&nist256p1, o2.data(), &p2);
auto result = bn_is_less(&p1.x, &p2.x);
if (result != 0) {
return result;
}
return bn_is_less(&p1.y, &p2.y);
});
for (auto const& pk : sortedPubKeys) {
builder.push(pk);
}
builder.push((uint8_t)sortedPubKeys.size());
builder.pushBack(CHECK_MULTI_SIG);
return builder.getBytes();
}
Data ParamsBuilder::buildNativeInvokeCode(const Data& contractAddress, uint8_t version,
const std::string& method, const boost::any& params) {
ParamsBuilder builder;
ParamsBuilder::buildNeoVmParam(builder, params);
builder.push(Data(method.begin(), method.end()));
builder.push(contractAddress);
builder.push(version);
builder.pushBack(SYS_CALL);
std::string nativeInvoke = "Ontology.Native.Invoke";
builder.push(Data(nativeInvoke.begin(), nativeInvoke.end()));
return builder.getBytes();
}
|
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "modules/canbus/vehicle/tayron/protocol/gas_report_323.h"
#include "glog/logging.h"
#include "modules/drivers/canbus/common/byte.h"
#include "modules/drivers/canbus/common/canbus_consts.h"
namespace apollo {
namespace canbus {
namespace tayron {
using ::apollo::drivers::canbus::Byte;
Gasreport323::Gasreport323() {}
const int32_t Gasreport323::ID = 0x323;
void Gasreport323::Parse(const std::uint8_t* bytes, int32_t length,
ChassisDetail* chassis) const {
chassis->mutable_check_response()->set_is_vcu_online(true);
chassis->mutable_tayron()->mutable_gas_report_323()->set_gas_control_status(gas_control_status(bytes, length));
chassis->mutable_tayron()->mutable_gas_report_323()->set_gas_pedal_status(gas_pedal_status(bytes, length));
chassis->mutable_tayron()->mutable_gas_report_323()->set_gas_pedal_out_status(gas_pedal_out_status(bytes, length));
chassis->mutable_tayron()->mutable_gas_report_323()->set_gas_fehler_status(gas_fehler_status(bytes, length));
}
// config detail: {'name': 'gas_control_status', 'enum': {0: 'GAS_CONTROL_STATUS_INVALID', 1: 'GAS_CONTROL_STATUS_INTELLIGENCE', 2: 'GAS_CONTROL_STATUS_MANUAL', 3: 'GAS_CONTROL_STATUS_RESERVED'}, 'precision': 1.0, 'len': 2, 'is_signed_var': False, 'offset': 0.0, 'physical_range': '[0|3]', 'bit': 0, 'type': 'enum', 'order': 'intel', 'physical_unit': ''}
int Gasreport323::gas_control_status(const std::uint8_t* bytes, int32_t length) const {
Byte t0(bytes + 0);
int32_t x = t0.get_byte(0, 2);
return x;
}
// config detail: {'name': 'gas_pedal_status', 'offset': 0.0, 'precision': 0.39, 'len': 8, 'is_signed_var': False, 'physical_range': '[0|99.45]', 'bit': 16, 'type': 'double', 'order': 'intel', 'physical_unit': ''}
double Gasreport323::gas_pedal_status(const std::uint8_t* bytes, int32_t length) const {
Byte t0(bytes + 2);
int32_t x = t0.get_byte(0, 8);
double ret = x * 0.390000*100;
return ret;
}
// config detail: {'name': 'gas_pedal_out_status', 'offset': 0.0, 'precision': 0.39, 'len': 8, 'is_signed_var': False, 'physical_range': '[0|99.45]', 'bit': 8, 'type': 'double', 'order': 'intel', 'physical_unit': ''}
double Gasreport323::gas_pedal_out_status(const std::uint8_t* bytes, int32_t length) const {
Byte t0(bytes + 1);
int32_t x = t0.get_byte(0, 8);
double ret = x * 0.390000*100;
return ret;
}
// config detail: {'name': 'gas_fehler_status', 'enum': {0: 'GAS_FEHLER_STATUS_INVALID', 1: 'GAS_FEHLER_STATUS_NORMAL', 2: 'GAS_FEHLER_STATUS_FEHLER', 3: 'GAS_FEHLER_STATUS_RESERVED'}, 'precision': 1.0, 'len': 2, 'is_signed_var': False, 'offset': 0.0, 'physical_range': '[0|3]', 'bit': 2, 'type': 'enum', 'order': 'intel', 'physical_unit': ''}
int Gasreport323::gas_fehler_status(const std::uint8_t* bytes, int32_t length) const {
Byte t0(bytes + 0);
int32_t x = t0.get_byte(2, 2);
return x;
}
} // namespace tayron
} // namespace canbus
} // namespace apollo
|
// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Platform specific code for Solaris 10 goes here. For the POSIX comaptible
// parts the implementation is in platform-posix.cc.
#ifdef __sparc
# error "V8 does not support the SPARC CPU architecture."
#endif
#include <sys/stack.h> // for stack alignment
#include <unistd.h> // getpagesize(), usleep()
#include <sys/mman.h> // mmap()
#include <ucontext.h> // walkstack(), getcontext()
#include <dlfcn.h> // dladdr
#include <pthread.h>
#include <sched.h> // for sched_yield
#include <semaphore.h>
#include <time.h>
#include <sys/time.h> // gettimeofday(), timeradd()
#include <errno.h>
#include <ieeefp.h> // finite()
#include <signal.h> // sigemptyset(), etc
#undef MAP_TYPE
#include "v8.h"
#include "platform.h"
// It seems there is a bug in some Solaris distributions (experienced in
// SunOS 5.10 Generic_141445-09) which make it difficult or impossible to
// access signbit() despite the availability of other C99 math functions.
#ifndef signbit
// Test sign - usually defined in math.h
int signbit(double x) {
// We need to take care of the special case of both positive and negative
// versions of zero.
if (x == 0) {
return fpclass(x) & FP_NZERO;
} else {
// This won't detect negative NaN but that should be okay since we don't
// assume that behavior.
return x < 0;
}
}
#endif // signbit
namespace v8 {
namespace internal {
// 0 is never a valid thread id on Solaris since the main thread is 1 and
// subsequent have their ids incremented from there
static const pthread_t kNoThread = (pthread_t) 0;
double ceiling(double x) {
return ceil(x);
}
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
// to an unsigned. Going directly will cause an overflow and the seed to be
// set to all ones. The seed will be identical for different instances that
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
}
uint64_t OS::CpuFeaturesImpliedByPlatform() {
return 0; // Solaris runs on a lot of things.
}
int OS::ActivationFrameAlignment() {
return STACK_ALIGN;
}
void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
__asm__ __volatile__("" : : : "memory");
*ptr = value;
}
const char* OS::LocalTimezone(double time) {
if (isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return tzname[0]; // The location of the timezone string on Solaris.
}
double OS::LocalTimeOffset() {
// On Solaris, struct tm does not contain a tm_gmtoff field.
time_t utc = time(NULL);
ASSERT(utc != -1);
struct tm* loc = localtime(&utc);
ASSERT(loc != NULL);
return static_cast<double>((mktime(loc) - utc) * msPerSecond);
}
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, ie, not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
}
bool OS::IsOutsideAllocatedSpace(void* address) {
return address < lowest_ever_allocated || address >= highest_ever_allocated;
}
size_t OS::AllocateAlignment() {
return static_cast<size_t>(getpagesize());
}
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
LOG(StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
void OS::Free(void* address, const size_t size) {
// TODO(1240712): munmap has a return value which is ignored here.
int result = munmap(address, size);
USE(result);
ASSERT(result == 0);
}
#ifdef ENABLE_HEAP_PROTECTION
void OS::Protect(void* address, size_t size) {
// TODO(1240712): mprotect has a return value which is ignored here.
mprotect(address, size, PROT_READ);
}
void OS::Unprotect(void* address, size_t size, bool is_executable) {
// TODO(1240712): mprotect has a return value which is ignored here.
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
mprotect(address, size, prot);
}
#endif
void OS::Sleep(int milliseconds) {
useconds_t ms = static_cast<useconds_t>(milliseconds);
usleep(1000 * ms);
}
void OS::Abort() {
// Redirect to std abort to signal abnormal program termination.
abort();
}
void OS::DebugBreak() {
asm("int $3");
}
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
private:
FILE* file_;
void* memory_;
int size_;
};
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
if (file == NULL) return NULL;
int result = fwrite(initial, size, 1, file);
if (result < 1) {
fclose(file);
return NULL;
}
void* memory =
mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
if (memory_) munmap(memory_, size_);
fclose(file_);
}
void OS::LogSharedLibraryAddresses() {
}
void OS::SignalCodeMovingGC() {
}
struct StackWalker {
Vector<OS::StackFrame>& frames;
int index;
};
static int StackWalkCallback(uintptr_t pc, int signo, void* data) {
struct StackWalker* walker = static_cast<struct StackWalker*>(data);
Dl_info info;
int i = walker->index;
walker->frames[i].address = reinterpret_cast<void*>(pc);
// Make sure line termination is in place.
walker->frames[i].text[OS::kStackWalkMaxTextLen - 1] = '\0';
Vector<char> text = MutableCStrVector(walker->frames[i].text,
OS::kStackWalkMaxTextLen);
if (dladdr(reinterpret_cast<void*>(pc), &info) == 0) {
OS::SNPrintF(text, "[0x%p]", pc);
} else if ((info.dli_fname != NULL && info.dli_sname != NULL)) {
// We have symbol info.
OS::SNPrintF(text, "%s'%s+0x%x", info.dli_fname, info.dli_sname, pc);
} else {
// No local symbol info.
OS::SNPrintF(text,
"%s'0x%p [0x%p]",
info.dli_fname,
pc - reinterpret_cast<uintptr_t>(info.dli_fbase),
pc);
}
walker->index++;
return 0;
}
int OS::StackWalk(Vector<OS::StackFrame> frames) {
ucontext_t ctx;
struct StackWalker walker = { frames, 0 };
if (getcontext(&ctx) < 0) return kStackWalkError;
if (!walkcontext(&ctx, StackWalkCallback, &walker)) {
return kStackWalkError;
}
return walker.index;
}
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory(size_t size) {
address_ = mmap(NULL, size, PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
size_ = size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
if (0 == munmap(address(), size())) address_ = MAP_FAILED;
}
}
bool VirtualMemory::IsReserved() {
return address_ != MAP_FAILED;
}
bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
kMmapFd, kMmapFdOffset)) {
return false;
}
UpdateAllocatedSpaceLimits(address, size);
return true;
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
return mmap(address, size, PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
kMmapFd, kMmapFdOffset) != MAP_FAILED;
}
class ThreadHandle::PlatformData : public Malloced {
public:
explicit PlatformData(ThreadHandle::Kind kind) {
Initialize(kind);
}
void Initialize(ThreadHandle::Kind kind) {
switch (kind) {
case ThreadHandle::SELF: thread_ = pthread_self(); break;
case ThreadHandle::INVALID: thread_ = kNoThread; break;
}
}
pthread_t thread_; // Thread handle for pthread.
};
ThreadHandle::ThreadHandle(Kind kind) {
data_ = new PlatformData(kind);
}
void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
data_->Initialize(kind);
}
ThreadHandle::~ThreadHandle() {
delete data_;
}
bool ThreadHandle::IsSelf() const {
return pthread_equal(data_->thread_, pthread_self());
}
bool ThreadHandle::IsValid() const {
return data_->thread_ != kNoThread;
}
Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
}
Thread::~Thread() {
}
static void* ThreadEntry(void* arg) {
Thread* thread = reinterpret_cast<Thread*>(arg);
// This is also initialized by the first argument to pthread_create() but we
// don't know which thread will run first (the original thread or the new
// one) so we initialize it here too.
thread->thread_handle_data()->thread_ = pthread_self();
ASSERT(thread->IsValid());
thread->Run();
return NULL;
}
void Thread::Start() {
pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
ASSERT(IsValid());
}
void Thread::Join() {
pthread_join(thread_handle_data()->thread_, NULL);
}
Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
pthread_key_t key;
int result = pthread_key_create(&key, NULL);
USE(result);
ASSERT(result == 0);
return static_cast<LocalStorageKey>(key);
}
void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
int result = pthread_key_delete(pthread_key);
USE(result);
ASSERT(result == 0);
}
void* Thread::GetThreadLocal(LocalStorageKey key) {
pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
return pthread_getspecific(pthread_key);
}
void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
pthread_setspecific(pthread_key, value);
}
void Thread::YieldCPU() {
sched_yield();
}
class SolarisMutex : public Mutex {
public:
SolarisMutex() {
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&mutex_, &attr);
}
~SolarisMutex() { pthread_mutex_destroy(&mutex_); }
int Lock() { return pthread_mutex_lock(&mutex_); }
int Unlock() { return pthread_mutex_unlock(&mutex_); }
private:
pthread_mutex_t mutex_;
};
Mutex* OS::CreateMutex() {
return new SolarisMutex();
}
class SolarisSemaphore : public Semaphore {
public:
explicit SolarisSemaphore(int count) { sem_init(&sem_, 0, count); }
virtual ~SolarisSemaphore() { sem_destroy(&sem_); }
virtual void Wait();
virtual bool Wait(int timeout);
virtual void Signal() { sem_post(&sem_); }
private:
sem_t sem_;
};
void SolarisSemaphore::Wait() {
while (true) {
int result = sem_wait(&sem_);
if (result == 0) return; // Successfully got semaphore.
CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
}
}
#ifndef TIMEVAL_TO_TIMESPEC
#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
(ts)->tv_sec = (tv)->tv_sec; \
(ts)->tv_nsec = (tv)->tv_usec * 1000; \
} while (false)
#endif
#ifndef timeradd
#define timeradd(a, b, result) \
do { \
(result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
(result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
if ((result)->tv_usec >= 1000000) { \
++(result)->tv_sec; \
(result)->tv_usec -= 1000000; \
} \
} while (0)
#endif
bool SolarisSemaphore::Wait(int timeout) {
const long kOneSecondMicros = 1000000; // NOLINT
// Split timeout into second and nanosecond parts.
struct timeval delta;
delta.tv_usec = timeout % kOneSecondMicros;
delta.tv_sec = timeout / kOneSecondMicros;
struct timeval current_time;
// Get the current time.
if (gettimeofday(¤t_time, NULL) == -1) {
return false;
}
// Calculate time for end of timeout.
struct timeval end_time;
timeradd(¤t_time, &delta, &end_time);
struct timespec ts;
TIMEVAL_TO_TIMESPEC(&end_time, &ts);
// Wait for semaphore signalled or timeout.
while (true) {
int result = sem_timedwait(&sem_, &ts);
if (result == 0) return true; // Successfully got semaphore.
if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
}
}
Semaphore* OS::CreateSemaphore(int count) {
return new SolarisSemaphore(count);
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static Sampler* active_sampler_ = NULL;
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
if (active_sampler_ == NULL) return;
TickSample sample;
sample.pc = 0;
sample.sp = 0;
sample.fp = 0;
// We always sample the VM state.
sample.state = VMState::current_state();
active_sampler_->Tick(&sample);
}
class Sampler::PlatformData : public Malloced {
public:
PlatformData() {
signal_handler_installed_ = false;
}
bool signal_handler_installed_;
struct sigaction old_signal_handler_;
struct itimerval old_timer_value_;
};
Sampler::Sampler(int interval, bool profiling)
: interval_(interval),
profiling_(profiling),
synchronous_(profiling),
active_(false),
samples_taken_(0) {
data_ = new PlatformData();
}
Sampler::~Sampler() {
delete data_;
}
void Sampler::Start() {
// There can only be one active sampler at the time on POSIX
// platforms.
if (active_sampler_ != NULL) return;
// Request profiling signals.
struct sigaction sa;
sa.sa_sigaction = ProfilerSignalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_SIGINFO;
if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
data_->signal_handler_installed_ = true;
// Set the itimer to generate a tick for each interval.
itimerval itimer;
itimer.it_interval.tv_sec = interval_ / 1000;
itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
// Set this sampler as the active sampler.
active_sampler_ = this;
active_ = true;
}
void Sampler::Stop() {
// Restore old signal handler
if (data_->signal_handler_installed_) {
setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
sigaction(SIGPROF, &data_->old_signal_handler_, 0);
data_->signal_handler_installed_ = false;
}
// This sampler is no longer the active sampler.
active_sampler_ = NULL;
active_ = false;
}
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
|
/*
* Copyright (c) 2011, Intel Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <boost/format.hpp>
#include "cidAcceptedASQ_r10b.h"
#include "globals.h"
#include "grpDefs.h"
#include "../Utils/io.h"
namespace GrpGeneralCmds {
#define MAX_CMDS (65536 + 1)
CIDAcceptedASQ_r10b::CIDAcceptedASQ_r10b(
string grpName, string testName) :
Test(grpName, testName, SPECREV_10b)
{
// 63 chars allowed: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
mTestDesc.SetCompliance("revision 1.0b, section 4");
mTestDesc.SetShort( "Verify all CID values are accepted in ASQ.");
// No string size limit for the long description
mTestDesc.SetLong(
"Issue Identify cmd (65536 + 1) times and verify that the dnvme "
"assigned CID value is unique each time. Each command must be "
"completed in success and be reaped from the ACQ.");
}
CIDAcceptedASQ_r10b::~CIDAcceptedASQ_r10b()
{
///////////////////////////////////////////////////////////////////////////
// Allocations taken from the heap and not under the control of the
// RsrcMngr need to be freed/deleted here.
///////////////////////////////////////////////////////////////////////////
}
CIDAcceptedASQ_r10b::
CIDAcceptedASQ_r10b(const CIDAcceptedASQ_r10b &other) :
Test(other)
{
///////////////////////////////////////////////////////////////////////////
// All pointers in this object must be NULL, never allow shallow or deep
// copies, see Test::Clone() header comment.
///////////////////////////////////////////////////////////////////////////
}
CIDAcceptedASQ_r10b &
CIDAcceptedASQ_r10b::operator=(const CIDAcceptedASQ_r10b
&other)
{
///////////////////////////////////////////////////////////////////////////
// All pointers in this object must be NULL, never allow shallow or deep
// copies, see Test::Clone() header comment.
///////////////////////////////////////////////////////////////////////////
Test::operator=(other);
return *this;
}
Test::RunType
CIDAcceptedASQ_r10b::RunnableCoreTest(bool preserve)
{
///////////////////////////////////////////////////////////////////////////
// All code contained herein must never permanently modify the state or
// configuration of the DUT. Permanence is defined as state or configuration
// changes that will not be restored after a cold hard reset.
///////////////////////////////////////////////////////////////////////////
preserve = preserve; // Suppress compiler error/warning
return RUN_TRUE; // This test is never destructive
}
void
CIDAcceptedASQ_r10b::RunCoreTest()
{
/** \verbatim
* Assumptions:
* 1) Test CreateResources_r10b has run prior.
* \endverbatim
*/
uint32_t isrCount;
// Lookup objs which were created in a prior test within group
SharedASQPtr asq = CAST_TO_ASQ(gRsrcMngr->GetObj(ASQ_GROUP_ID))
SharedACQPtr acq = CAST_TO_ACQ(gRsrcMngr->GetObj(ACQ_GROUP_ID))
LOG_NRM("Verifying that the ACQ is empty");
if (acq->ReapInquiry(isrCount, true) != 0) {
acq->Dump(
FileSystem::PrepDumpFile(mGrpName, mTestName, "acq",
"notEmpty"), "Test assumption have not been met");
throw FrmwkEx(HERE,
"The ACQ should not have any CE's waiting before testing");
}
LOG_NRM("Create identify cmd and assoc some buffer memory");
SharedIdentifyPtr idCmdCap = SharedIdentifyPtr(new Identify());
LOG_NRM("Force identify to request ctrlr capabilities struct");
idCmdCap->SetCNS(CNS_Controller);
SharedMemBufferPtr idMemCap = SharedMemBufferPtr(new MemBuffer());
idMemCap->InitAlignment(Identify::IDEAL_DATA_SIZE, PRP_BUFFER_ALIGNMENT,
false, 0);
send_64b_bitmask idPrpCap =
(send_64b_bitmask)(MASK_PRP1_PAGE | MASK_PRP2_PAGE);
idCmdCap->SetPrpBuffer(idPrpCap, idMemCap);
LOG_NRM("Learn initial unique command id assigned by dnvme.");
uint16_t currCID;
asq->Send(idCmdCap, currCID);
uint16_t prevCID = currCID;
for (uint32_t nCmds = 0; nCmds < MAX_CMDS; nCmds++) {
asq->Ring();
LOG_NRM("Verify unique CID #%d for Cmd #%d", currCID, nCmds + 1);
ReapVerifyCID(asq, acq, currCID);
asq->Send(idCmdCap, currCID);
if (currCID != (uint16_t)(prevCID + 1)) {
asq->Dump(FileSystem::PrepDumpFile(mGrpName, mTestName, "asq.fail"),
"Dump Entire ASQ");
acq->Dump(FileSystem::PrepDumpFile(mGrpName, mTestName, "acq.fail"),
"Dump Entire ACQ");
throw FrmwkEx(HERE, "Current CID(%d) != prev + 1(%d)", currCID,
prevCID);
}
prevCID = currCID;
}
}
void
CIDAcceptedASQ_r10b::ReapVerifyCID(SharedASQPtr asq, SharedACQPtr acq,
uint16_t currCID)
{
uint32_t isrCount;
uint32_t ceRemain;
uint32_t numReaped;
uint32_t numCE;
if (acq->ReapInquiryWaitSpecify(CALC_TIMEOUT_ms(1), 1, numCE, isrCount)
== false) {
acq->Dump(FileSystem::PrepDumpFile(mGrpName, mTestName, "acq.fail"),
"Dump Entire ACQ");
throw FrmwkEx(HERE, "Unable to see CEs for issued cmd");
}
SharedMemBufferPtr ceMem = SharedMemBufferPtr(new MemBuffer());
if ((numReaped = acq->Reap(ceRemain, ceMem, isrCount, numCE, true)) != 1) {
acq->Dump(FileSystem::PrepDumpFile(mGrpName, mTestName, "acq.fail"),
"Dump Entire ACQ");
throw FrmwkEx(HERE, "Unable to reap on ACQ");
}
union CE *ce = (union CE *)ceMem->GetBuffer();
ProcessCE::Validate(*ce); // throws upon error
if (ce->n.CID != currCID) {
asq->Dump(FileSystem::PrepDumpFile(mGrpName, mTestName, "asq.fail"),
"Dump Entire ASQ");
acq->Dump(FileSystem::PrepDumpFile(mGrpName, mTestName, "acq.fail"),
"Dump Entire ACQ");
throw FrmwkEx(HERE, "Received CID %d but expected %d", ce->n.CID,
currCID);
}
}
} // namespace
|
#include "Include.h"
#include "Hell.h"
DWORD WINAPI IdentThread(LPVOID param)
{
char user[12], buffer[1024];
BOOL success = FALSE;
SOCKET ssock,csock;
SOCKADDR_IN ssin, csin;
memset(&ssin, 0, sizeof(ssin));
ssin.sin_family = AF_INET;
ssin.sin_port = htons((unsigned short)6667);
ssin.sin_addr.s_addr=INADDR_ANY;
if ((ssock = socket(AF_INET, SOCK_STREAM, 0)) != INVALID_SOCKET) {
if (bind(ssock, (LPSOCKADDR)&ssin, sizeof(ssin)) != SOCKET_ERROR) {
if (listen(ssock, 5) != SOCKET_ERROR) {
int csin_len = sizeof(csin);
while (1) {
if ((csock = accept(ssock,(LPSOCKADDR)&csin,&csin_len)) == INVALID_SOCKET)
break;
sprintf(buffer, "Client connection from IP: %s:%d.", inet_ntoa(csin.sin_addr), csin.sin_port);
BOT->cIRC.SendData("PRIVMSG %s :-IDENTD- [%s]\r\n",BOT->cIRC.cConf.cChan.c_str(),buffer);
if (recv(csock,buffer,sizeof(buffer),0) != SOCKET_ERROR) {
Split(buffer,0);
memset(user, 0, sizeof(user));
recv(csock,buffer,sizeof(buffer),0);
BOT->cIRC.SendData("PRIVMSG %s :-IDENTD- [%s]\r\n",BOT->cIRC.cConf.cChan.c_str(),buffer);
_snprintf(buffer,sizeof(buffer),":NOTICE AUTH :*** Looking up your hostname...\r\n");
_snprintf(buffer,sizeof(buffer),":NOTICE AUTH :*** Found your hostname, cached\r\n");
_snprintf(buffer,sizeof(buffer),":NOTICE AUTH :*** Checking Ident\r\n");
_snprintf(buffer,sizeof(buffer),":001 RiMp :Welcome to the DALnet IRC Network\r\n");
_snprintf(buffer,sizeof(buffer),":002 RiMp :Your host is swiftco.wa.us.dal.net\r\n");
_snprintf(buffer,sizeof(buffer),":003 RiMp :This server was created Thu Dec 7 2006 at 16:29:38 PST\r\n");
_snprintf(buffer,sizeof(buffer),":004 RiMp swiftco.wa.us.dal.net \r\n");
_snprintf(buffer,sizeof(buffer),":005 RiMp NETWORK=DALnet\r\n");
_snprintf(buffer,sizeof(buffer),":375 RiMp :- swiftco.wa.us.dal.net Message of the Day - \r\n");
_snprintf(buffer,sizeof(buffer),":376 RiMp :End of /MOTD command.\r\n");
if (send(csock,buffer,strlen(buffer),0) != SOCKET_ERROR)
success = TRUE;
}
}
}
}
}
if (!success) {
sprintf(buffer, "Error: server failed, returned: <%d>.", WSAGetLastError());
BOT->cIRC.SendData("PRIVMSG %s :-IDENTD- [%s]\r\n",BOT->cIRC.cConf.cChan.c_str(),buffer);
}
closesocket(ssock);
closesocket(csock);
ExitThread(0);
return 0;
}
|
/*
* Copyright 2010,
* François Bleibel,
* Olivier Stasse,
*
* CNRS/AIST
*
*/
/* --------------------------------------------------------------------- */
/* --- INCLUDE --------------------------------------------------------- */
/* --------------------------------------------------------------------- */
/* --- DYNAMIC-GRAPH --- */
#include "dynamic-graph/pool.h"
#include "dynamic-graph/debug.h"
#include "dynamic-graph/entity.h"
#include <list>
#include <sstream>
#include <string>
#include <typeinfo>
using namespace dynamicgraph;
/* --------------------------------------------------------------------- */
/* --- CLASS ----------------------------------------------------------- */
/* --------------------------------------------------------------------- */
PoolStorage *PoolStorage::getInstance() {
if (instance_ == 0) {
instance_ = new PoolStorage;
}
return instance_;
}
void PoolStorage::destroy() {
delete instance_;
instance_ = NULL;
}
PoolStorage::~PoolStorage() {
dgDEBUGIN(15);
for (Entities::iterator iter = entityMap.begin(); iter != entityMap.end();
// Here, this is normal that the next iteration is at the beginning
// of the map as deregisterEntity remove the element iter from the map.
iter = entityMap.begin()) {
dgDEBUG(15) << "Delete \"" << (iter->first) << "\"" << std::endl;
Entity *entity = iter->second;
deregisterEntity(iter);
delete (entity);
}
instance_ = 0;
dgDEBUGOUT(15);
}
/* --------------------------------------------------------------------- */
void PoolStorage::registerEntity(const std::string &entname, Entity *ent) {
Entities::iterator entkey = entityMap.find(entname);
if (entkey != entityMap.end()) // key does exist
{
throw ExceptionFactory(
ExceptionFactory::OBJECT_CONFLICT,
"Another entity already defined with the same name. ",
"Entity name is <%s>.", entname.c_str());
} else {
dgDEBUG(10) << "Register entity <" << entname << "> in the pool."
<< std::endl;
entityMap[entname] = ent;
}
}
void PoolStorage::deregisterEntity(const std::string &entname) {
Entities::iterator entkey = entityMap.find(entname);
if (entkey == entityMap.end()) // key doesnot exist
{
throw ExceptionFactory(ExceptionFactory::OBJECT_CONFLICT,
"Entity not defined yet. ", "Entity name is <%s>.",
entname.c_str());
} else {
dgDEBUG(10) << "Deregister entity <" << entname << "> from the pool."
<< std::endl;
deregisterEntity(entkey);
}
}
void PoolStorage::deregisterEntity(const Entities::iterator &entity) {
entityMap.erase(entity);
}
Entity &PoolStorage::getEntity(const std::string &name) {
dgDEBUG(25) << "Get <" << name << ">" << std::endl;
Entities::iterator entPtr = entityMap.find(name);
if (entPtr == entityMap.end()) {
DG_THROW ExceptionFactory(ExceptionFactory::UNREFERED_OBJECT,
"Unknown entity.", " (while calling <%s>)",
name.c_str());
} else
return *entPtr->second;
}
const PoolStorage::Entities &PoolStorage::getEntityMap() const {
return entityMap;
}
bool PoolStorage::existEntity(const std::string &name) {
return entityMap.find(name) != entityMap.end();
}
bool PoolStorage::existEntity(const std::string &name, Entity *&ptr) {
Entities::iterator entPtr = entityMap.find(name);
if (entPtr == entityMap.end())
return false;
else {
ptr = entPtr->second;
return true;
}
}
void PoolStorage::clearPlugin(const std::string &name) {
dgDEBUGIN(5);
std::list<Entity *> toDelete;
for (Entities::iterator entPtr = entityMap.begin(); entPtr != entityMap.end();
++entPtr)
if (entPtr->second->getClassName() == name)
toDelete.push_back(entPtr->second);
for (std::list<Entity *>::iterator iter = toDelete.begin();
iter != toDelete.end(); ++iter)
delete (Entity *)*iter;
dgDEBUGOUT(5);
}
/* --------------------------------------------------------------------- */
#include <dynamic-graph/entity.h>
#ifdef WIN32
#include <time.h>
#endif /*WIN32*/
void PoolStorage::writeGraph(const std::string &aFileName) {
size_t IdxPointFound = aFileName.rfind(".");
std::string tmp1 = aFileName.substr(0, IdxPointFound);
size_t IdxSeparatorFound = aFileName.rfind("/");
std::string GenericName;
if (IdxSeparatorFound != std::string::npos)
GenericName = tmp1.substr(IdxSeparatorFound, tmp1.length());
else
GenericName = tmp1;
/* Reading local time */
time_t ltime;
ltime = time(NULL);
struct tm ltimeformatted;
#ifdef WIN32
localtime_s(<imeformatted, <ime);
#else
localtime_r(<ime, <imeformatted);
#endif /*WIN32*/
/* Opening the file and writing the first comment. */
std::ofstream GraphFile(aFileName.c_str(), std::ofstream::out);
GraphFile << "/* This graph has been automatically generated. " << std::endl;
GraphFile << " " << 1900 + ltimeformatted.tm_year
<< " Month: " << 1 + ltimeformatted.tm_mon
<< " Day: " << ltimeformatted.tm_mday
<< " Time: " << ltimeformatted.tm_hour << ":"
<< ltimeformatted.tm_min;
GraphFile << " */" << std::endl;
GraphFile << "digraph \"" << GenericName << "\" { ";
GraphFile << "\t graph [ label=\"" << GenericName
<< "\" bgcolor = white rankdir=LR ]" << std::endl
<< "\t node [ fontcolor = black, color = black,"
<< "fillcolor = gold1, style=filled, shape=box ] ; " << std::endl;
GraphFile << "\tsubgraph cluster_Entities { " << std::endl;
GraphFile << "\t} " << std::endl;
for (Entities::iterator iter = entityMap.begin(); iter != entityMap.end();
++iter) {
Entity *ent = iter->second;
GraphFile << "\"" << ent->getName() << "\""
<< " [ label = \"" << ent->getName() << "\" ," << std::endl
<< " fontcolor = black, color = black, fillcolor=cyan,"
<< " style=filled, shape=box ]" << std::endl;
ent->writeGraph(GraphFile);
}
GraphFile << "}" << std::endl;
GraphFile.close();
}
void PoolStorage::writeCompletionList(std::ostream &os) {
for (Entities::iterator iter = entityMap.begin(); iter != entityMap.end();
++iter) {
Entity *ent = iter->second;
ent->writeCompletionList(os);
}
}
static bool objectNameParser(std::istringstream &cmdparse, std::string &objName,
std::string &funName) {
const int SIZE = 128;
char buffer[SIZE];
cmdparse >> std::ws;
cmdparse.getline(buffer, SIZE, '.');
if (!cmdparse.good()) // The callback is not an object method
return false;
objName = buffer;
// cmdparse.getline( buffer,SIZE );
// funName = buffer;
cmdparse >> funName;
return true;
}
SignalBase<int> &PoolStorage::getSignal(std::istringstream &sigpath) {
std::string objname, signame;
if (!objectNameParser(sigpath, objname, signame)) {
DG_THROW ExceptionFactory(ExceptionFactory::UNREFERED_SIGNAL,
"Parse error in signal name");
}
Entity &ent = getEntity(objname);
return ent.getSignal(signame);
}
PoolStorage *PoolStorage::instance_ = 0;
|
#ifndef OSMIUM_IO_HEADER_HPP
#define OSMIUM_IO_HEADER_HPP
/*
This file is part of Osmium (http://osmcode.org/libosmium).
Copyright 2013-2018 Jochen Topf <jochen@topf.org> and others (see README).
Boost Software License - Version 1.0 - August 17th, 2003
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*/
#include <osmium/osm/box.hpp>
#include <osmium/util/options.hpp>
#include <initializer_list>
#include <vector>
namespace osmium {
namespace io {
/**
* Meta information from the header of an OSM file.
*
* The header can contain any number of bounding boxes, although
* usually there is only a single one (or none). PBF files only
* allow a single bounding box, but XML files can have multiple ones,
* although it is unusual and the semantics are unclear, so it is
* discouraged to create files with multiple bounding boxes.
*
* The header contains a flag telling you whether this file can
* contain multiple versions of the same object. This is true for
* history files and for change files, but not for normal OSM data
* files. Not all OSM file formats can distinguish between those
* cases, so the flag might be wrong.
*
* In addition the header can contain any number of key-value pairs
* with additional information. Most often this is used to set the
* "generator", the program that generated the file. Depending on
* the file format some of these key-value pairs are handled
* specially. The the Options parent class for details on how to
* set and get those key-value pairs.
*/
class Header : public osmium::Options {
/// Bounding boxes
std::vector<osmium::Box> m_boxes{};
/**
* Are there possibly multiple versions of the same object in
* this stream of objects? This should be true for history files
* and for change files, but not for normal OSM data files.
*/
bool m_has_multiple_object_versions = false;
public:
Header() = default;
Header(const std::initializer_list<osmium::Options::value_type>& values) :
Options(values) {
}
/**
* Get the bounding boxes defined in the header.
*/
std::vector<osmium::Box>& boxes() noexcept {
return m_boxes;
}
/**
* Get the bounding boxes defined in the header.
*/
const std::vector<osmium::Box>& boxes() const noexcept {
return m_boxes;
}
/**
* Set all the bounding boxes in the header.
*/
Header& boxes(const std::vector<osmium::Box>& boxes) noexcept {
m_boxes = boxes;
return *this;
}
/**
* Get the first (or only if there is only one) bounding box.
*
* Returns an empty, invalid box if there is none.
*/
osmium::Box box() const {
return m_boxes.empty() ? osmium::Box{} : m_boxes.front();
}
/**
* Join up all the bounding boxes in the header into one and return
* it. This method is what you probably want to use unless you want
* to handle the possibly multiple bounding boxes yourself.
*
* Returns an empty, invalid box if there is none.
*/
osmium::Box joined_boxes() const {
osmium::Box box;
for (const auto& b : m_boxes) {
box.extend(b);
}
return box;
}
/**
* Add the given bounding box to the list of bounding boxes in the
* header.
*
* @returns The header itself to allow chaining.
*/
Header& add_box(const osmium::Box& box) {
m_boxes.push_back(box);
return *this;
}
/**
* Can this file contain multiple versions of the same object?
*/
bool has_multiple_object_versions() const noexcept {
return m_has_multiple_object_versions;
}
/**
* Set the flag that tells us whether this file can contain
* multiple versions of the same object?
*
* @returns The header itself to allow chaining.
*/
Header& set_has_multiple_object_versions(bool value) noexcept {
m_has_multiple_object_versions = value;
return *this;
}
}; // class Header
} // namespace io
} // namespace osmium
#endif // OSMIUM_IO_HEADER_HPP
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the Apache 2.0 License.
#include "hash.h"
#include <mbedtls/sha256.h>
#include <stdexcept>
extern "C"
{
#include <evercrypt/EverCrypt_Hash.h>
}
using namespace std;
namespace crypto
{
constexpr auto block_size = 64u; // No way to ask evercrypt for this
void Sha256Hash::mbedtls_sha256(const CBuffer& data, uint8_t* h)
{
mbedtls_sha256_context ctx;
mbedtls_sha256_starts_ret(&ctx, 0);
mbedtls_sha256_update_ret(&ctx, data.p, data.rawSize());
mbedtls_sha256_finish_ret(&ctx, h);
mbedtls_sha256_free(&ctx);
}
Sha256Hash::Sha256Hash() : h{0} {}
Sha256Hash::Sha256Hash(const CBuffer& data) : h{0}
{
evercrypt_sha256(data, h.data());
}
class CSha256HashImpl
{
public:
CSha256HashImpl()
{
state = EverCrypt_Hash_create(Spec_Hash_Definitions_SHA2_256);
EverCrypt_Hash_init(state);
}
void finalize(std::array<uint8_t, Sha256Hash::SIZE>& h)
{
EverCrypt_Hash_finish(state, h.data());
}
void update(const CBuffer& data)
{
const auto data_begin = const_cast<uint8_t*>(data.p);
const auto size = data.rawSize();
const auto full_blocks = size / block_size;
const auto full_blocks_size = full_blocks * block_size;
const auto full_blocks_end = data_begin + full_blocks_size;
// update_multi takes complete blocks
EverCrypt_Hash_update_multi(state, data_begin, full_blocks_size);
// update_last takes start of last chunk (NOT a full block!), and _total
// size_
EverCrypt_Hash_update_last(state, full_blocks_end, size);
}
~CSha256HashImpl()
{
EverCrypt_Hash_free(state);
}
private:
EverCrypt_Hash_state_s* state;
};
void Sha256Hash::evercrypt_sha256(const CBuffer& data, uint8_t* h)
{
CSha256HashImpl csha;
csha.update(data);
csha.finalize(reinterpret_cast<std::array<uint8_t, Sha256Hash::SIZE>&>(*h));
}
CSha256Hash::CSha256Hash() : p(std::make_unique<CSha256HashImpl>()) {}
CSha256Hash::~CSha256Hash() {}
void CSha256Hash::update_hash(CBuffer data)
{
if (p == nullptr)
{
throw std::logic_error("Attempting to use hash after it was finalized");
}
p->update(data);
}
Sha256Hash CSha256Hash::finalize()
{
if (p == nullptr)
{
throw std::logic_error("Attempting to use hash after it was finalized");
}
Sha256Hash h;
p->finalize(h.h);
p = nullptr;
return h;
}
}
|
#include <common.cxx>
#include <kernel/timers.hpp>
using namespace std::chrono;
extern delegate<uint64_t()> systime_override;
static uint64_t current_time = 0;
static int magic_performed = 0;
void perform_magic(int) {
magic_performed += 1;
}
CASE("Initialize timer system")
{
systime_override =
[] () -> uint64_t { return current_time; };
Timers::init(
[] (Timers::duration_t) {},
[] () {}
);
Timers::ready();
EXPECT(Timers::is_ready());
EXPECT(Timers::active() == 0);
EXPECT(Timers::existing() == 0);
EXPECT(Timers::free() == 0);
}
CASE("Start a single timer, execute it")
{
current_time = 0;
magic_performed = 0;
// start timer
Timers::oneshot(1ms, perform_magic);
EXPECT(Timers::active() == 1);
EXPECT(Timers::existing() == 1);
EXPECT(Timers::free() == 0);
// execute timer interrupt
Timers::timers_handler();
// verify timer did not execute
EXPECT(magic_performed == 0);
// set time to where it should happen
current_time = 1000000;
// execute timer interrupt
Timers::timers_handler();
// verify timer did execute
EXPECT(magic_performed == 1);
}
CASE("Start a single timer, execute it precisely")
{
current_time = 0;
magic_performed = 0;
// start timer
Timers::oneshot(1ms, perform_magic);
// verify timer did not execute for all times before 1ms
for (uint64_t time = 0; time < 1000000; time += 1000)
{
Timers::timers_handler();
EXPECT(magic_performed == 0);
}
// set time to where it should happen
current_time = 1000000;
// execute timer interrupt
Timers::timers_handler();
// verify timer did execute
EXPECT(magic_performed == 1);
}
CASE("Start many timers, execute all at once")
{
current_time = 0;
magic_performed = 0;
// start many timers, starting at 1ms and ending at 2ms
for (int i = 0; i < 1000; i++)
Timers::oneshot(microseconds(1000 + i), perform_magic);
// verify timer did not execute for all times before 1ms
for (uint64_t time = 0; time < 1000000; time += 1000)
{
Timers::timers_handler();
EXPECT(magic_performed == 0);
}
current_time = 2000000;
// verify many timers executed
Timers::timers_handler();
EXPECT(magic_performed == 1000);
EXPECT(Timers::active() == 0);
EXPECT(Timers::existing() == 1000);
EXPECT(Timers::free() == 1000);
// restore time
current_time = 0;
}
CASE("Catch some Timers exceptions")
{
EXPECT_THROWS(Timers::stop(-1));
}
CASE("Stop a timer")
{
current_time = 0;
magic_performed = 0;
// start timer
int id = Timers::oneshot(1ms, perform_magic);
EXPECT(Timers::active() == 1);
// execute timer interrupt
Timers::timers_handler();
// verify timer did not execute
EXPECT(magic_performed == 0);
// set time to where it should happen
current_time = 1000000;
// stop timer
Timers::stop(id);
// execute timer interrupt
Timers::timers_handler();
// verify timer did not execute, since it was stopped
EXPECT(magic_performed == 0);
}
CASE("Test a periodic timer")
{
current_time = 0;
magic_performed = 0;
// start timer
int id = Timers::periodic(microseconds(1), perform_magic);
EXPECT(Timers::active() == 1);
for (int i = 0; i < 100000; i += 1000)
{
current_time = i;
// execute timer interrupt
Timers::timers_handler();
// verify timer did not execute
EXPECT(magic_performed == i / 1000);
}
// stop timer
Timers::stop(id);
current_time = 0;
}
#include <util/timer.hpp>
CASE("Test util timer")
{
Timer timer{ [] () {} };
EXPECT(!timer.is_running());
timer.start( std::chrono::milliseconds(1), [] () {});
EXPECT(timer.is_running());
timer.restart( std::chrono::milliseconds(1), [] () {});
EXPECT(timer.is_running());
timer.stop();
EXPECT(!timer.is_running());
}
|
// Copyright (c) 2017-2018 Dr. Colin Hirsch and Daniel Frey
// Please see LICENSE for license or visit https://github.com/taocpp/json/
#ifndef TAO_JSON_MSGPACK_TO_STRING_HPP
#define TAO_JSON_MSGPACK_TO_STRING_HPP
#include <string>
#include "../value.hpp"
#include "../events/from_value.hpp"
#include "../events/transformer.hpp"
#include "events/to_string.hpp"
namespace tao
{
namespace json
{
namespace msgpack
{
template< template< typename... > class... Transformers, template< typename... > class Traits, typename Base >
std::string to_string( const basic_value< Traits, Base >& v )
{
json::events::transformer< msgpack::events::to_string, Transformers... > consumer;
json::events::from_value( consumer, v );
return consumer.value();
}
} // namespace msgpack
} // namespace json
} // namespace tao
#endif
|
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <climits>
#include <grpc/grpc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpcpp/channel.h>
#include <grpcpp/client_context.h>
#include <grpcpp/create_channel.h>
#include <grpcpp/server.h>
#include <grpcpp/server_builder.h>
#include <grpcpp/server_context.h>
#include <grpcpp/test/default_reactor_test_peer.h>
#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "src/proto/grpc/testing/echo_mock.grpc.pb.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
#include <grpcpp/test/mock_stream.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <iostream>
using ::testing::AtLeast;
using ::testing::DoAll;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::SaveArg;
using ::testing::SetArgPointee;
using ::testing::WithArg;
using ::testing::_;
using grpc::testing::DefaultReactorTestPeer;
using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
using grpc::testing::EchoTestService;
using grpc::testing::MockClientReaderWriter;
using std::chrono::system_clock;
using std::vector;
namespace grpc {
namespace testing {
namespace {
class FakeClient {
public:
explicit FakeClient(EchoTestService::StubInterface* stub) : stub_(stub) {}
void DoEcho() {
ClientContext context;
EchoRequest request;
EchoResponse response;
request.set_message("hello world");
Status s = stub_->Echo(&context, request, &response);
EXPECT_EQ(request.message(), response.message());
EXPECT_TRUE(s.ok());
}
void DoRequestStream() {
EchoRequest request;
EchoResponse response;
ClientContext context;
grpc::string msg("hello");
grpc::string exp(msg);
std::unique_ptr<ClientWriterInterface<EchoRequest>> cstream =
stub_->RequestStream(&context, &response);
request.set_message(msg);
EXPECT_TRUE(cstream->Write(request));
msg = ", world";
request.set_message(msg);
exp.append(msg);
EXPECT_TRUE(cstream->Write(request));
cstream->WritesDone();
Status s = cstream->Finish();
EXPECT_EQ(exp, response.message());
EXPECT_TRUE(s.ok());
}
void DoResponseStream() {
EchoRequest request;
EchoResponse response;
request.set_message("hello world");
ClientContext context;
std::unique_ptr<ClientReaderInterface<EchoResponse>> cstream =
stub_->ResponseStream(&context, request);
grpc::string exp = "";
EXPECT_TRUE(cstream->Read(&response));
exp.append(response.message() + " ");
EXPECT_TRUE(cstream->Read(&response));
exp.append(response.message());
EXPECT_FALSE(cstream->Read(&response));
EXPECT_EQ(request.message(), exp);
Status s = cstream->Finish();
EXPECT_TRUE(s.ok());
}
void DoBidiStream() {
EchoRequest request;
EchoResponse response;
ClientContext context;
grpc::string msg("hello");
std::unique_ptr<ClientReaderWriterInterface<EchoRequest, EchoResponse>>
stream = stub_->BidiStream(&context);
request.set_message(msg + "0");
EXPECT_TRUE(stream->Write(request));
EXPECT_TRUE(stream->Read(&response));
EXPECT_EQ(response.message(), request.message());
request.set_message(msg + "1");
EXPECT_TRUE(stream->Write(request));
EXPECT_TRUE(stream->Read(&response));
EXPECT_EQ(response.message(), request.message());
request.set_message(msg + "2");
EXPECT_TRUE(stream->Write(request));
EXPECT_TRUE(stream->Read(&response));
EXPECT_EQ(response.message(), request.message());
stream->WritesDone();
EXPECT_FALSE(stream->Read(&response));
Status s = stream->Finish();
EXPECT_TRUE(s.ok());
}
void ResetStub(EchoTestService::StubInterface* stub) { stub_ = stub; }
private:
EchoTestService::StubInterface* stub_;
};
class CallbackTestServiceImpl
: public EchoTestService::ExperimentalCallbackService {
public:
experimental::ServerUnaryReactor* Echo(
experimental::CallbackServerContext* context, const EchoRequest* request,
EchoResponse* response) override {
// Make the mock service explicitly treat empty input messages as invalid
// arguments so that we can test various results of status. In general, a
// mocked service should just use the original service methods, but we are
// adding this variance in Status return value just to improve coverage in
// this test.
auto* reactor = context->DefaultReactor();
if (request->message().length() > 0) {
response->set_message(request->message());
reactor->Finish(Status::OK);
} else {
reactor->Finish(Status(StatusCode::INVALID_ARGUMENT, "Invalid request"));
}
return reactor;
}
};
class MockCallbackTest : public ::testing::Test {
protected:
CallbackTestServiceImpl service_;
ServerContext context_;
};
TEST_F(MockCallbackTest, MockedCallSucceedsWithWait) {
experimental::CallbackServerContext ctx;
EchoRequest req;
EchoResponse resp;
grpc::internal::Mutex mu;
grpc::internal::CondVar cv;
grpc::Status status;
bool status_set = false;
DefaultReactorTestPeer peer(&ctx, [&](::grpc::Status s) {
grpc::internal::MutexLock l(&mu);
status_set = true;
status = std::move(s);
cv.Signal();
});
req.set_message("mock 1");
auto* reactor = service_.Echo(&ctx, &req, &resp);
cv.WaitUntil(&mu, [&] {
grpc::internal::MutexLock l(&mu);
return status_set;
});
EXPECT_EQ(reactor, peer.reactor());
EXPECT_TRUE(peer.test_status_set());
EXPECT_TRUE(peer.test_status().ok());
EXPECT_TRUE(status_set);
EXPECT_TRUE(status.ok());
EXPECT_EQ(req.message(), resp.message());
}
TEST_F(MockCallbackTest, MockedCallSucceeds) {
experimental::CallbackServerContext ctx;
EchoRequest req;
EchoResponse resp;
DefaultReactorTestPeer peer(&ctx);
req.set_message("ha ha, consider yourself mocked.");
auto* reactor = service_.Echo(&ctx, &req, &resp);
EXPECT_EQ(reactor, peer.reactor());
EXPECT_TRUE(peer.test_status_set());
EXPECT_TRUE(peer.test_status().ok());
}
TEST_F(MockCallbackTest, MockedCallFails) {
experimental::CallbackServerContext ctx;
EchoRequest req;
EchoResponse resp;
DefaultReactorTestPeer peer(&ctx);
auto* reactor = service_.Echo(&ctx, &req, &resp);
EXPECT_EQ(reactor, peer.reactor());
EXPECT_TRUE(peer.test_status_set());
EXPECT_EQ(peer.test_status().error_code(), StatusCode::INVALID_ARGUMENT);
}
class TestServiceImpl : public EchoTestService::Service {
public:
Status Echo(ServerContext* /*context*/, const EchoRequest* request,
EchoResponse* response) override {
response->set_message(request->message());
return Status::OK;
}
Status RequestStream(ServerContext* /*context*/,
ServerReader<EchoRequest>* reader,
EchoResponse* response) override {
EchoRequest request;
grpc::string resp("");
while (reader->Read(&request)) {
gpr_log(GPR_INFO, "recv msg %s", request.message().c_str());
resp.append(request.message());
}
response->set_message(resp);
return Status::OK;
}
Status ResponseStream(ServerContext* /*context*/, const EchoRequest* request,
ServerWriter<EchoResponse>* writer) override {
EchoResponse response;
vector<grpc::string> tokens = split(request->message());
for (const grpc::string& token : tokens) {
response.set_message(token);
writer->Write(response);
}
return Status::OK;
}
Status BidiStream(
ServerContext* /*context*/,
ServerReaderWriter<EchoResponse, EchoRequest>* stream) override {
EchoRequest request;
EchoResponse response;
while (stream->Read(&request)) {
gpr_log(GPR_INFO, "recv msg %s", request.message().c_str());
response.set_message(request.message());
stream->Write(response);
}
return Status::OK;
}
private:
const vector<grpc::string> split(const grpc::string& input) {
grpc::string buff("");
vector<grpc::string> result;
for (auto n : input) {
if (n != ' ') {
buff += n;
continue;
}
if (buff == "") continue;
result.push_back(buff);
buff = "";
}
if (buff != "") result.push_back(buff);
return result;
}
};
class MockTest : public ::testing::Test {
protected:
MockTest() {}
void SetUp() override {
int port = grpc_pick_unused_port_or_die();
server_address_ << "localhost:" << port;
// Setup server
ServerBuilder builder;
builder.AddListeningPort(server_address_.str(),
InsecureServerCredentials());
builder.RegisterService(&service_);
server_ = builder.BuildAndStart();
}
void TearDown() override { server_->Shutdown(); }
void ResetStub() {
std::shared_ptr<Channel> channel = grpc::CreateChannel(
server_address_.str(), InsecureChannelCredentials());
stub_ = grpc::testing::EchoTestService::NewStub(channel);
}
std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
std::unique_ptr<Server> server_;
std::ostringstream server_address_;
TestServiceImpl service_;
};
// Do one real rpc and one mocked one
TEST_F(MockTest, SimpleRpc) {
ResetStub();
FakeClient client(stub_.get());
client.DoEcho();
MockEchoTestServiceStub stub;
EchoResponse resp;
resp.set_message("hello world");
EXPECT_CALL(stub, Echo(_, _, _))
.Times(AtLeast(1))
.WillOnce(DoAll(SetArgPointee<2>(resp), Return(Status::OK)));
client.ResetStub(&stub);
client.DoEcho();
}
TEST_F(MockTest, ClientStream) {
ResetStub();
FakeClient client(stub_.get());
client.DoRequestStream();
MockEchoTestServiceStub stub;
auto w = new MockClientWriter<EchoRequest>();
EchoResponse resp;
resp.set_message("hello, world");
EXPECT_CALL(*w, Write(_, _)).Times(2).WillRepeatedly(Return(true));
EXPECT_CALL(*w, WritesDone());
EXPECT_CALL(*w, Finish()).WillOnce(Return(Status::OK));
EXPECT_CALL(stub, RequestStreamRaw(_, _))
.WillOnce(DoAll(SetArgPointee<1>(resp), Return(w)));
client.ResetStub(&stub);
client.DoRequestStream();
}
TEST_F(MockTest, ServerStream) {
ResetStub();
FakeClient client(stub_.get());
client.DoResponseStream();
MockEchoTestServiceStub stub;
auto r = new MockClientReader<EchoResponse>();
EchoResponse resp1;
resp1.set_message("hello");
EchoResponse resp2;
resp2.set_message("world");
EXPECT_CALL(*r, Read(_))
.WillOnce(DoAll(SetArgPointee<0>(resp1), Return(true)))
.WillOnce(DoAll(SetArgPointee<0>(resp2), Return(true)))
.WillOnce(Return(false));
EXPECT_CALL(*r, Finish()).WillOnce(Return(Status::OK));
EXPECT_CALL(stub, ResponseStreamRaw(_, _)).WillOnce(Return(r));
client.ResetStub(&stub);
client.DoResponseStream();
}
ACTION_P(copy, msg) { arg0->set_message(msg->message()); }
TEST_F(MockTest, BidiStream) {
ResetStub();
FakeClient client(stub_.get());
client.DoBidiStream();
MockEchoTestServiceStub stub;
auto rw = new MockClientReaderWriter<EchoRequest, EchoResponse>();
EchoRequest msg;
EXPECT_CALL(*rw, Write(_, _))
.Times(3)
.WillRepeatedly(DoAll(SaveArg<0>(&msg), Return(true)));
EXPECT_CALL(*rw, Read(_))
.WillOnce(DoAll(WithArg<0>(copy(&msg)), Return(true)))
.WillOnce(DoAll(WithArg<0>(copy(&msg)), Return(true)))
.WillOnce(DoAll(WithArg<0>(copy(&msg)), Return(true)))
.WillOnce(Return(false));
EXPECT_CALL(*rw, WritesDone());
EXPECT_CALL(*rw, Finish()).WillOnce(Return(Status::OK));
EXPECT_CALL(stub, BidiStreamRaw(_)).WillOnce(Return(rw));
client.ResetStub(&stub);
client.DoBidiStream();
}
} // namespace
} // namespace testing
} // namespace grpc
int main(int argc, char** argv) {
grpc::testing::TestEnvironment env(argc, argv);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
class Solution {
public:
int islandPerimeter(vector<vector<int>>& grid) {
int total=0;
for(int i = 0; i < grid.size(); i++){
for(int j = 0; j < grid[0].size(); j++){
if(grid[i][j] == 1){
// UP
if(i == 0){
total++;
}
else{
if(grid[i-1][j] == 0){
total++;
}
}
// DOWN
if(i == grid.size()-1){
total++;
}
else{
if(grid[i+1][j] == 0){
total++;
}
}
// RIGHT
if(j == 0){
total++;
}
else{
if(grid[i][j-1] == 0){
total++;
}
}
// LEFT
if(j == grid[0].size()-1){
total++;
}
else{
if(grid[i][j+1] == 0){
total++;
}
}
}
}
}
return total;
}
};
|
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef COMMANDDESCRIPTIONPROVIDER_HXX
#define COMMANDDESCRIPTIONPROVIDER_HXX
/** === begin UNO includes === **/
#include <com/sun/star/frame/XModel.hpp>
/** === end UNO includes === **/
#include <comphelper/componentcontext.hxx>
#include <boost/shared_ptr.hpp>
//........................................................................
namespace frm
{
//........................................................................
//====================================================================
//= ICommandDescriptionProvider
//====================================================================
class SAL_NO_VTABLE ICommandDescriptionProvider
{
public:
virtual ::rtl::OUString getCommandDescription( const ::rtl::OUString& _rCommandURL ) const = 0;
virtual ~ICommandDescriptionProvider() { }
};
typedef ::boost::shared_ptr< const ICommandDescriptionProvider > PCommandDescriptionProvider;
//=====================================================================
//= factory
//=====================================================================
PCommandDescriptionProvider
createDocumentCommandDescriptionProvider(
const ::comphelper::ComponentContext& _rContext,
const ::com::sun::star::uno::Reference< ::com::sun::star::frame::XModel >& _rxDocument
);
//........................................................................
} // namespace frm
//........................................................................
#endif // COMMANDDESCRIPTIONPROVIDER_HXX
|
#pragma once
#include <vector>
#include <cassert>
/**
* manacher(S): return the maximum palindromic substring of S centered at each point
*
* Input: string (or vector) of length N (no restrictions on character-set)
* Output: vector res of length 2*N+1
* For any 0 <= i <= 2*N:
* * i % 2 == res[i] % 2
* * the half-open substring S[(i-res[i])/2, (i+res[i])/2) is a palindrome of length res[i]
* * For odd palindromes, take odd i, and vice versa
*/
template <typename V> std::vector<int> manacher(const V& S) {
int N = int(S.size());
std::vector<int> res(2*N+1, 0);
for (int i = 1, j = -1, r = 0; i < 2*N; i++, j--) {
if (i > r) {
r = i+1, res[i] = 1;
} else {
res[i] = res[j];
}
if (i+res[i] >= r) {
int b = r>>1, a = i-b;
while (a > 0 && b < N && S[a-1] == S[b]) {
a--, b++;
}
res[i] = b-a, j = i, r = b<<1;
}
}
return res;
}
/**
* manacher_odd(S): return the maximum palindromic substring of S centered at each point
*
* Input: string (or vector) of length N (no restrictions on character-set)
* Output: vector res of length N
* For any 0 <= i < N:
* * the half-open substring S[i-res[i], i+res[i]] is a palindrome of length 2*res[i]+1
*/
template <typename V> std::vector<int> manacher_odd(const V& S) {
int N = int(S.size());
std::vector<int> res(N);
for (int i = 1, j = -1, r = 0; i < N; i++, j--) {
if (i > r) {
r = i, res[i] = 0;
} else {
res[i] = res[j];
}
if (i+res[i] >= r) {
int b = r, a = 2*i-r;
while (a-1 >= 0 && b+1 < N && S[a-1] == S[b+1]) {
a--, b++;
}
res[i] = b-i, j = i, r = b;
}
}
return res;
}
|
// panama.cpp - written and placed in the public domain by Wei Dai
// use "cl /EP /P /DCRYPTOPP_GENERATE_X64_MASM panama.cpp" to generate MASM code
#include "pch.h"
#ifndef CRYPTOPP_GENERATE_X64_MASM
#include "panama.h"
#include "misc.h"
#include "cpu.h"
NAMESPACE_BEGIN(CryptoPP)
template <class B>
void Panama<B>::Reset()
{
memset(m_state, 0, m_state.SizeInBytes());
#if CRYPTOPP_BOOL_SSSE3_ASM_AVAILABLE
m_state[17] = HasSSSE3();
#endif
}
#endif // #ifndef CRYPTOPP_GENERATE_X64_MASM
#ifdef CRYPTOPP_X64_MASM_AVAILABLE
extern "C" {
void Panama_SSE2_Pull(size_t count, word32 *state, word32 *z, const word32 *y);
}
#elif CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE
#ifdef CRYPTOPP_GENERATE_X64_MASM
Panama_SSE2_Pull PROC FRAME
rex_push_reg rdi
alloc_stack(2*16)
save_xmm128 xmm6, 0h
save_xmm128 xmm7, 10h
.endprolog
#else
#pragma warning(disable: 4731) // frame pointer register 'ebp' modified by inline assembly code
void CRYPTOPP_NOINLINE Panama_SSE2_Pull(size_t count, word32 *state, word32 *z, const word32 *y)
{
#ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
asm __volatile__
(
".intel_syntax noprefix;"
AS_PUSH_IF86( bx)
#else
AS2( mov AS_REG_1, count)
AS2( mov AS_REG_2, state)
AS2( mov AS_REG_3, z)
AS2( mov AS_REG_4, y)
#endif
#endif // #ifdef CRYPTOPP_GENERATE_X64_MASM
#if CRYPTOPP_BOOL_X86
#define REG_loopEnd [esp]
#elif defined(CRYPTOPP_GENERATE_X64_MASM)
#define REG_loopEnd rdi
#else
#define REG_loopEnd r8
#endif
AS2( shl AS_REG_1, 5)
ASJ( jz, 5, f)
AS2( mov AS_REG_6d, [AS_REG_2+4*17])
AS2( add AS_REG_1, AS_REG_6)
#if CRYPTOPP_BOOL_X64
AS2( mov REG_loopEnd, AS_REG_1)
#else
AS1( push ebp)
AS1( push AS_REG_1)
#endif
AS2( movdqa xmm0, XMMWORD_PTR [AS_REG_2+0*16])
AS2( movdqa xmm1, XMMWORD_PTR [AS_REG_2+1*16])
AS2( movdqa xmm2, XMMWORD_PTR [AS_REG_2+2*16])
AS2( movdqa xmm3, XMMWORD_PTR [AS_REG_2+3*16])
AS2( mov eax, dword ptr [AS_REG_2+4*16])
ASL(4)
// gamma and pi
#if CRYPTOPP_BOOL_SSSE3_ASM_AVAILABLE
AS2( test AS_REG_6, 1)
ASJ( jnz, 6, f)
#endif
AS2( movdqa xmm6, xmm2)
AS2( movss xmm6, xmm3)
ASS( pshufd xmm5, xmm6, 0, 3, 2, 1)
AS2( movd xmm6, eax)
AS2( movdqa xmm7, xmm3)
AS2( movss xmm7, xmm6)
ASS( pshufd xmm6, xmm7, 0, 3, 2, 1)
#if CRYPTOPP_BOOL_SSSE3_ASM_AVAILABLE
ASJ( jmp, 7, f)
ASL(6)
AS2( movdqa xmm5, xmm3)
AS3( palignr xmm5, xmm2, 4)
AS2( movd xmm6, eax)
AS3( palignr xmm6, xmm3, 4)
ASL(7)
#endif
AS2( movd AS_REG_1d, xmm2)
AS1( not AS_REG_1d)
AS2( movd AS_REG_7d, xmm3)
AS2( or AS_REG_1d, AS_REG_7d)
AS2( xor eax, AS_REG_1d)
#define SSE2_Index(i) ASM_MOD(((i)*13+16), 17)
#define pi(i) \
AS2( movd AS_REG_1d, xmm7)\
AS2( rol AS_REG_1d, ASM_MOD((ASM_MOD(5*i,17)*(ASM_MOD(5*i,17)+1)/2), 32))\
AS2( mov [AS_REG_2+SSE2_Index(ASM_MOD(5*(i), 17))*4], AS_REG_1d)
#define pi4(x, y, z, a, b, c, d) \
AS2( pcmpeqb xmm7, xmm7)\
AS2( pxor xmm7, x)\
AS2( por xmm7, y)\
AS2( pxor xmm7, z)\
pi(a)\
ASS( pshuflw xmm7, xmm7, 1, 0, 3, 2)\
pi(b)\
AS2( punpckhqdq xmm7, xmm7)\
pi(c)\
ASS( pshuflw xmm7, xmm7, 1, 0, 3, 2)\
pi(d)
pi4(xmm1, xmm2, xmm3, 1, 5, 9, 13)
pi4(xmm0, xmm1, xmm2, 2, 6, 10, 14)
pi4(xmm6, xmm0, xmm1, 3, 7, 11, 15)
pi4(xmm5, xmm6, xmm0, 4, 8, 12, 16)
// output keystream and update buffer here to hide partial memory stalls between pi and theta
AS2( movdqa xmm4, xmm3)
AS2( punpcklqdq xmm3, xmm2) // 1 5 2 6
AS2( punpckhdq xmm4, xmm2) // 9 10 13 14
AS2( movdqa xmm2, xmm1)
AS2( punpcklqdq xmm1, xmm0) // 3 7 4 8
AS2( punpckhdq xmm2, xmm0) // 11 12 15 16
// keystream
AS2( test AS_REG_3, AS_REG_3)
ASJ( jz, 0, f)
AS2( movdqa xmm6, xmm4)
AS2( punpcklqdq xmm4, xmm2)
AS2( punpckhqdq xmm6, xmm2)
AS2( test AS_REG_4, 15)
ASJ( jnz, 2, f)
AS2( test AS_REG_4, AS_REG_4)
ASJ( jz, 1, f)
AS2( pxor xmm4, [AS_REG_4])
AS2( pxor xmm6, [AS_REG_4+16])
AS2( add AS_REG_4, 32)
ASJ( jmp, 1, f)
ASL(2)
AS2( movdqu xmm0, [AS_REG_4])
AS2( movdqu xmm2, [AS_REG_4+16])
AS2( pxor xmm4, xmm0)
AS2( pxor xmm6, xmm2)
AS2( add AS_REG_4, 32)
ASL(1)
AS2( test AS_REG_3, 15)
ASJ( jnz, 3, f)
AS2( movdqa XMMWORD_PTR [AS_REG_3], xmm4)
AS2( movdqa XMMWORD_PTR [AS_REG_3+16], xmm6)
AS2( add AS_REG_3, 32)
ASJ( jmp, 0, f)
ASL(3)
AS2( movdqu XMMWORD_PTR [AS_REG_3], xmm4)
AS2( movdqu XMMWORD_PTR [AS_REG_3+16], xmm6)
AS2( add AS_REG_3, 32)
ASL(0)
// buffer update
AS2( lea AS_REG_1, [AS_REG_6 + 32])
AS2( and AS_REG_1, 31*32)
AS2( lea AS_REG_7, [AS_REG_6 + (32-24)*32])
AS2( and AS_REG_7, 31*32)
AS2( movdqa xmm0, XMMWORD_PTR [AS_REG_2+20*4+AS_REG_1+0*8])
AS2( pxor xmm3, xmm0)
ASS( pshufd xmm0, xmm0, 2, 3, 0, 1)
AS2( movdqa XMMWORD_PTR [AS_REG_2+20*4+AS_REG_1+0*8], xmm3)
AS2( pxor xmm0, XMMWORD_PTR [AS_REG_2+20*4+AS_REG_7+2*8])
AS2( movdqa XMMWORD_PTR [AS_REG_2+20*4+AS_REG_7+2*8], xmm0)
AS2( movdqa xmm4, XMMWORD_PTR [AS_REG_2+20*4+AS_REG_1+2*8])
AS2( pxor xmm1, xmm4)
AS2( movdqa XMMWORD_PTR [AS_REG_2+20*4+AS_REG_1+2*8], xmm1)
AS2( pxor xmm4, XMMWORD_PTR [AS_REG_2+20*4+AS_REG_7+0*8])
AS2( movdqa XMMWORD_PTR [AS_REG_2+20*4+AS_REG_7+0*8], xmm4)
// theta
AS2( movdqa xmm3, XMMWORD_PTR [AS_REG_2+3*16])
AS2( movdqa xmm2, XMMWORD_PTR [AS_REG_2+2*16])
AS2( movdqa xmm1, XMMWORD_PTR [AS_REG_2+1*16])
AS2( movdqa xmm0, XMMWORD_PTR [AS_REG_2+0*16])
#if CRYPTOPP_BOOL_SSSE3_ASM_AVAILABLE
AS2( test AS_REG_6, 1)
ASJ( jnz, 8, f)
#endif
AS2( movd xmm6, eax)
AS2( movdqa xmm7, xmm3)
AS2( movss xmm7, xmm6)
AS2( movdqa xmm6, xmm2)
AS2( movss xmm6, xmm3)
AS2( movdqa xmm5, xmm1)
AS2( movss xmm5, xmm2)
AS2( movdqa xmm4, xmm0)
AS2( movss xmm4, xmm1)
ASS( pshufd xmm7, xmm7, 0, 3, 2, 1)
ASS( pshufd xmm6, xmm6, 0, 3, 2, 1)
ASS( pshufd xmm5, xmm5, 0, 3, 2, 1)
ASS( pshufd xmm4, xmm4, 0, 3, 2, 1)
#if CRYPTOPP_BOOL_SSSE3_ASM_AVAILABLE
ASJ( jmp, 9, f)
ASL(8)
AS2( movd xmm7, eax)
AS3( palignr xmm7, xmm3, 4)
AS2( movq xmm6, xmm3)
AS3( palignr xmm6, xmm2, 4)
AS2( movq xmm5, xmm2)
AS3( palignr xmm5, xmm1, 4)
AS2( movq xmm4, xmm1)
AS3( palignr xmm4, xmm0, 4)
ASL(9)
#endif
AS2( xor eax, 1)
AS2( movd AS_REG_1d, xmm0)
AS2( xor eax, AS_REG_1d)
AS2( movd AS_REG_1d, xmm3)
AS2( xor eax, AS_REG_1d)
AS2( pxor xmm3, xmm2)
AS2( pxor xmm2, xmm1)
AS2( pxor xmm1, xmm0)
AS2( pxor xmm0, xmm7)
AS2( pxor xmm3, xmm7)
AS2( pxor xmm2, xmm6)
AS2( pxor xmm1, xmm5)
AS2( pxor xmm0, xmm4)
// sigma
AS2( lea AS_REG_1, [AS_REG_6 + (32-4)*32])
AS2( and AS_REG_1, 31*32)
AS2( lea AS_REG_7, [AS_REG_6 + 16*32])
AS2( and AS_REG_7, 31*32)
AS2( movdqa xmm4, XMMWORD_PTR [AS_REG_2+20*4+AS_REG_1+0*16])
AS2( movdqa xmm5, XMMWORD_PTR [AS_REG_2+20*4+AS_REG_7+0*16])
AS2( movdqa xmm6, xmm4)
AS2( punpcklqdq xmm4, xmm5)
AS2( punpckhqdq xmm6, xmm5)
AS2( pxor xmm3, xmm4)
AS2( pxor xmm2, xmm6)
AS2( movdqa xmm4, XMMWORD_PTR [AS_REG_2+20*4+AS_REG_1+1*16])
AS2( movdqa xmm5, XMMWORD_PTR [AS_REG_2+20*4+AS_REG_7+1*16])
AS2( movdqa xmm6, xmm4)
AS2( punpcklqdq xmm4, xmm5)
AS2( punpckhqdq xmm6, xmm5)
AS2( pxor xmm1, xmm4)
AS2( pxor xmm0, xmm6)
// loop
AS2( add AS_REG_6, 32)
AS2( cmp AS_REG_6, REG_loopEnd)
ASJ( jne, 4, b)
// save state
AS2( mov [AS_REG_2+4*16], eax)
AS2( movdqa XMMWORD_PTR [AS_REG_2+3*16], xmm3)
AS2( movdqa XMMWORD_PTR [AS_REG_2+2*16], xmm2)
AS2( movdqa XMMWORD_PTR [AS_REG_2+1*16], xmm1)
AS2( movdqa XMMWORD_PTR [AS_REG_2+0*16], xmm0)
#if CRYPTOPP_BOOL_X86
AS2( add esp, 4)
AS1( pop ebp)
#endif
ASL(5)
#ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
AS_POP_IF86( bx)
".att_syntax prefix;"
:
#if CRYPTOPP_BOOL_X64
: "D" (count), "S" (state), "d" (z), "c" (y)
: "%r8", "%r9", "r10", "%eax", "memory", "cc", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7"
#else
: "c" (count), "d" (state), "S" (z), "D" (y)
: "%eax", "memory", "cc"
#endif
);
#endif
#ifdef CRYPTOPP_GENERATE_X64_MASM
movdqa xmm6, [rsp + 0h]
movdqa xmm7, [rsp + 10h]
add rsp, 2*16
pop rdi
ret
Panama_SSE2_Pull ENDP
#else
}
#endif
#endif // #ifdef CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE
#ifndef CRYPTOPP_GENERATE_X64_MASM
template <class B>
void Panama<B>::Iterate(size_t count, const word32 *p, byte *output, const byte *input, KeystreamOperation operation)
{
word32 bstart = m_state[17];
word32 *const aPtr = m_state;
word32 cPtr[17];
#define bPtr ((byte *)(aPtr+20))
// reorder the state for SSE2
// a and c: 4 8 12 16 | 3 7 11 15 | 2 6 10 14 | 1 5 9 13 | 0
// xmm0 xmm1 xmm2 xmm3 eax
#define a(i) aPtr[((i)*13+16) % 17] // 13 is inverse of 4 mod 17
#define c(i) cPtr[((i)*13+16) % 17]
// b: 0 4 | 1 5 | 2 6 | 3 7
#define b(i, j) b##i[(j)*2%8 + (j)/4]
// buffer update
#define US(i) {word32 t=b(0,i); b(0,i)=ConditionalByteReverse(B::ToEnum(), p[i])^t; b(25,(i+6)%8)^=t;}
#define UL(i) {word32 t=b(0,i); b(0,i)=a(i+1)^t; b(25,(i+6)%8)^=t;}
// gamma and pi
#define GP(i) c(5*i%17) = rotlFixed(a(i) ^ (a((i+1)%17) | ~a((i+2)%17)), ((5*i%17)*((5*i%17)+1)/2)%32)
// theta and sigma
#define T(i,x) a(i) = c(i) ^ c((i+1)%17) ^ c((i+4)%17) ^ x
#define TS1S(i) T(i+1, ConditionalByteReverse(B::ToEnum(), p[i]))
#define TS1L(i) T(i+1, b(4,i))
#define TS2(i) T(i+9, b(16,i))
while (count--)
{
if (output)
{
#define PANAMA_OUTPUT(x) \
CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, B::ToEnum(), 0, a(0+9));\
CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, B::ToEnum(), 1, a(1+9));\
CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, B::ToEnum(), 2, a(2+9));\
CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, B::ToEnum(), 3, a(3+9));\
CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, B::ToEnum(), 4, a(4+9));\
CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, B::ToEnum(), 5, a(5+9));\
CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, B::ToEnum(), 6, a(6+9));\
CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, B::ToEnum(), 7, a(7+9));
typedef word32 WordType;
CRYPTOPP_KEYSTREAM_OUTPUT_SWITCH(PANAMA_OUTPUT, 4*8);
}
word32 *const b16 = (word32 *)(bPtr+((bstart+16*32) & 31*32));
word32 *const b4 = (word32 *)(bPtr+((bstart+(32-4)*32) & 31*32));
bstart += 32;
word32 *const b0 = (word32 *)(bPtr+((bstart) & 31*32));
word32 *const b25 = (word32 *)(bPtr+((bstart+(32-25)*32) & 31*32));
if (p)
{
US(0); US(1); US(2); US(3); US(4); US(5); US(6); US(7);
}
else
{
UL(0); UL(1); UL(2); UL(3); UL(4); UL(5); UL(6); UL(7);
}
GP(0);
GP(1);
GP(2);
GP(3);
GP(4);
GP(5);
GP(6);
GP(7);
GP(8);
GP(9);
GP(10);
GP(11);
GP(12);
GP(13);
GP(14);
GP(15);
GP(16);
T(0,1);
if (p)
{
TS1S(0); TS1S(1); TS1S(2); TS1S(3); TS1S(4); TS1S(5); TS1S(6); TS1S(7);
p += 8;
}
else
{
TS1L(0); TS1L(1); TS1L(2); TS1L(3); TS1L(4); TS1L(5); TS1L(6); TS1L(7);
}
TS2(0); TS2(1); TS2(2); TS2(3); TS2(4); TS2(5); TS2(6); TS2(7);
}
m_state[17] = bstart;
}
namespace Weak {
template <class B>
size_t PanamaHash<B>::HashMultipleBlocks(const word32 *input, size_t length)
{
this->Iterate(length / this->BLOCKSIZE, input);
return length % this->BLOCKSIZE;
}
template <class B>
void PanamaHash<B>::TruncatedFinal(byte *hash, size_t size)
{
this->ThrowIfInvalidTruncatedSize(size);
this->PadLastBlock(this->BLOCKSIZE, 0x01);
HashEndianCorrectedBlock(this->m_data);
this->Iterate(32); // pull
FixedSizeSecBlock<word32, 8> buf;
this->Iterate(1, NULL, buf.BytePtr(), NULL);
memcpy(hash, buf, size);
this->Restart(); // reinit for next use
}
}
template <class B>
void PanamaCipherPolicy<B>::CipherSetKey(const NameValuePairs ¶ms, const byte *key, size_t length)
{
assert(length==32);
memcpy(m_key, key, 32);
}
template <class B>
void PanamaCipherPolicy<B>::CipherResynchronize(byte *keystreamBuffer, const byte *iv, size_t length)
{
assert(length==32);
this->Reset();
this->Iterate(1, m_key);
if (iv && IsAligned<word32>(iv))
this->Iterate(1, (const word32 *)iv);
else
{
FixedSizeSecBlock<word32, 8> buf;
if (iv)
memcpy(buf, iv, 32);
else
memset(buf, 0, 32);
this->Iterate(1, buf);
}
#if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE)
if (B::ToEnum() == LITTLE_ENDIAN_ORDER && HasSSE2() && !IsP4()) // SSE2 code is slower on P4 Prescott
Panama_SSE2_Pull(32, this->m_state, NULL, NULL);
else
#endif
this->Iterate(32);
}
#if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X64
template <class B>
unsigned int PanamaCipherPolicy<B>::GetAlignment() const
{
#if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE)
if (B::ToEnum() == LITTLE_ENDIAN_ORDER && HasSSE2())
return 16;
else
#endif
return 1;
}
#endif
template <class B>
void PanamaCipherPolicy<B>::OperateKeystream(KeystreamOperation operation, byte *output, const byte *input, size_t iterationCount)
{
#if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE)
if (B::ToEnum() == LITTLE_ENDIAN_ORDER && HasSSE2())
Panama_SSE2_Pull(iterationCount, this->m_state, (word32 *)output, (const word32 *)input);
else
#endif
this->Iterate(iterationCount, NULL, output, input, operation);
}
template class Panama<BigEndian>;
template class Panama<LittleEndian>;
template class Weak::PanamaHash<BigEndian>;
template class Weak::PanamaHash<LittleEndian>;
template class PanamaCipherPolicy<BigEndian>;
template class PanamaCipherPolicy<LittleEndian>;
NAMESPACE_END
#endif // #ifndef CRYPTOPP_GENERATE_X64_MASM
|
//Find the length of the longest valid parentheses substring of a given string
/*we start traversing the string from the left towards the right and for every ‘(’,
encountered, we increment the left counter and for every )’ encountered, we increment the right counter*/
/* maxans --> contains max length of string*/
class Solution {
public:
int longestValidParentheses(string s) {
int left=0 , right = 0 , maxans = 0;
for(int i=0;i<s.size();i++){
if(s[i]=='('){
left++; //increment left counter
}
else{
right++; //increment right counter
}
if(left==right){
maxans=max(maxans, 2*right); //update max counter if left and right counters are equal
}
else if(right>left){ //if right>left --> make both the counters 0
left=right=0;
}
}
/* traverse from right to left with the same procedure*/
left=right=0;
for(int i=s.size()-1 ; i>=0;i--){
if(s[i]=='('){
left++;
}
else{
right++;
}
if(left==right){
maxans=max(maxans, 2*left);
}
else if(left>right){
left=right=0;
}
}
return maxans;
}
};
|
#include "compression.h"
#include "dxtbx/error.h"
#include <assert.h>
typedef union {
char b[2];
short s;
} union_short;
typedef union {
char b[4];
int i;
} union_int;
void byte_swap_short(char *b) {
char c;
c = b[0];
b[0] = b[1];
b[1] = c;
return;
}
void byte_swap_int(char *b) {
char c;
c = b[0];
b[0] = b[3];
b[3] = c;
c = b[1];
b[1] = b[2];
b[2] = c;
return;
}
bool little_endian() {
int i = 0x1;
char b = ((union_int *)&i)[0].b[0];
if (b == 0) {
return false;
} else {
return true;
}
}
std::vector<char> dxtbx::boost_python::cbf_compress(const int *values,
const std::size_t &sz) {
std::vector<char> packed(0);
int current = 0;
int delta, i;
unsigned int j;
bool le = little_endian();
short s;
char c;
char *b;
for (j = 0; j < sz; j++) {
delta = values[j] - current;
if ((-0x7f <= delta) && (delta < 0x80)) {
c = (char)delta;
packed.push_back(c);
current += delta;
continue;
}
packed.push_back(-0x80);
if ((-0x7fff <= delta) && (delta < 0x8000)) {
s = (short)delta;
b = ((union_short *)&s)[0].b;
if (!le) {
byte_swap_short(b);
}
packed.push_back(b[0]);
packed.push_back(b[1]);
current += delta;
continue;
}
s = -0x8000;
b = ((union_short *)&s)[0].b;
if (!le) {
byte_swap_short(b);
}
packed.push_back(b[0]);
packed.push_back(b[1]);
assert(delta != -0x8000000);
i = delta;
b = ((union_int *)&i)[0].b;
if (!le) {
byte_swap_int(b);
}
packed.push_back(b[0]);
packed.push_back(b[1]);
packed.push_back(b[2]);
packed.push_back(b[3]);
current += delta;
}
return packed;
}
unsigned int dxtbx::boost_python::cbf_decompress(const char *packed,
std::size_t packed_sz,
int *values,
std::size_t values_sz) {
int current = 0;
int *original = values;
unsigned int j = 0;
short s;
char c;
int i;
bool le = little_endian();
while ((j < packed_sz) && ((values - original) < values_sz)) {
c = packed[j];
j += 1;
if (c != -0x80) {
current += c;
*values = current;
values++;
continue;
}
DXTBX_ASSERT(j + 1 < packed_sz);
((union_short *)&s)[0].b[0] = packed[j];
((union_short *)&s)[0].b[1] = packed[j + 1];
j += 2;
if (!le) {
byte_swap_short((char *)&s);
}
if (s != -0x8000) {
current += s;
*values = current;
values++;
continue;
}
DXTBX_ASSERT(j + 3 < packed_sz);
((union_int *)&i)[0].b[0] = packed[j];
((union_int *)&i)[0].b[1] = packed[j + 1];
((union_int *)&i)[0].b[2] = packed[j + 2];
((union_int *)&i)[0].b[3] = packed[j + 3];
j += 4;
if (!le) {
byte_swap_int((char *)&i);
}
current += i;
*values = current;
values++;
}
return values - original;
}
|
/**
* @file LefDriver.cc
* @author Yibo Lin
* @date Oct 2014
* @brief Implementation of @ref LefParser::Driver
*/
#include <limbo/parsers/lef/adapt/LefDriver.h>
#include <limbo/preprocessor/Msg.h>
#include <cctype>
#include <cstring>
#include <unistd.h>
#include <cstdlib>
#include <cmath>
namespace LefParser
{
Driver::Driver(LefDataBase& db)
: trace_scanning(false),
trace_parsing(false),
m_db(db)
{
}
Driver::~Driver()
{
}
/// =========== callbacks ============
///@{
/// a local temporary global variable
/// refer to Driver::m_db everytime parsing a file
/// reset to NULL once parsing finished
LefDataBase* lefDB = NULL;
int parse65nm = 0;
int parseLef58Type = 0;
int isSessionless = 0;
int relax = 0;
const char* version = "N/A";
int setVer = 0;
int verStr = 0;
int msgCb = 0;
char* userData = NULL;
void checkType(lefrCallbackType_e c)
{
if (c >= 0 && c <= lefrLibraryEndCbkType)
{
// OK
}
else
{
limboPrint(limbo::kERROR, "callback type is out of bounds!\n");
}
}
int antennaCB(lefrCallbackType_e c, double value, lefiUserData)
{
checkType(c);
switch (c)
{
case lefrAntennaInputCbkType:
lefDB->lef_antennainput_cbk(value);
break;
case lefrAntennaInoutCbkType:
lefDB->lef_antennainout_cbk(value);
break;
case lefrAntennaOutputCbkType:
lefDB->lef_antennaoutput_cbk(value);
break;
case lefrInputAntennaCbkType:
lefDB->lef_inputantenna_cbk(value);
break;
case lefrOutputAntennaCbkType:
lefDB->lef_outputantenna_cbk(value);
break;
case lefrInoutAntennaCbkType:
lefDB->lef_inoutantenna_cbk(value);
break;
default:
fprintf(stderr, "BOGUS antenna type.\n");
break;
}
return 0;
}
int arrayBeginCB(lefrCallbackType_e c, const char* /*name*/, lefiUserData)
{
checkType(c);
return 0;
}
int arrayCB(lefrCallbackType_e c, lefiArray* a, lefiUserData)
{
checkType(c);
lefDB->lef_array_cbk(*a);
return 0;
}
int arrayEndCB(lefrCallbackType_e c, const char* /*name*/, lefiUserData)
{
checkType(c);
return 0;
}
int busBitCharsCB(lefrCallbackType_e c, const char* busBit, lefiUserData)
{
checkType(c);
lefDB->lef_busbitchars_cbk(busBit);
return 0;
}
int caseSensCB(lefrCallbackType_e c, int caseSense, lefiUserData)
{
checkType(c);
lefDB->lef_casesensitive_cbk(caseSense);
return 0;
}
int fixedMaskCB(lefrCallbackType_e c, int fixedMask, lefiUserData)
{
checkType(c);
if (fixedMask == 1)
fprintf(stderr, "FIXEDMASK ;\n");
return 0;
}
int clearanceCB(lefrCallbackType_e c, const char* name, lefiUserData)
{
checkType(c);
lefDB->lef_clearancemeasure_cbk(name);
return 0;
}
int dividerCB(lefrCallbackType_e c, const char* name, lefiUserData)
{
checkType(c);
lefDB->lef_dividerchar_cbk(name);
return 0;
}
int noWireExtCB(lefrCallbackType_e c, const char* name, lefiUserData)
{
checkType(c);
lefDB->lef_nowireextension_cbk(name);
return 0;
}
int noiseMarCB(lefrCallbackType_e c, lefiNoiseMargin *data, lefiUserData)
{
checkType(c);
lefDB->lef_noisemargin_cbk(*data);
return 0;
}
int edge1CB(lefrCallbackType_e c, double name, lefiUserData)
{
checkType(c);
lefDB->lef_edgeratethreshold1_cbk(name);
return 0;
}
int edge2CB(lefrCallbackType_e c, double name, lefiUserData)
{
checkType(c);
lefDB->lef_edgeratethreshold2_cbk(name);
return 0;
}
int edgeScaleCB(lefrCallbackType_e c, double name, lefiUserData)
{
checkType(c);
lefDB->lef_edgeratescalefactor_cbk(name);
return 0;
}
int noiseTableCB(lefrCallbackType_e c, lefiNoiseTable *table, lefiUserData)
{
checkType(c);
lefDB->lef_noisetable_cbk(*table);
return 0;
}
int correctionCB(lefrCallbackType_e c, lefiCorrectionTable *table, lefiUserData)
{
checkType(c);
lefDB->lef_correctiontable_cbk(*table);
return 0;
}
int dielectricCB(lefrCallbackType_e c, double dielectric, lefiUserData)
{
checkType(c);
lefDB->lef_dielectric_cbk(dielectric);
return 0;
}
int irdropBeginCB(lefrCallbackType_e c, void*, lefiUserData)
{
checkType(c);
return 0;
}
int irdropCB(lefrCallbackType_e c, lefiIRDrop* irdrop, lefiUserData)
{
checkType(c);
lefDB->lef_irdrop_cbk(*irdrop);
return 0;
}
int irdropEndCB(lefrCallbackType_e c, void*, lefiUserData)
{
checkType(c);
return 0;
}
int layerCB(lefrCallbackType_e c, lefiLayer* layer, lefiUserData)
{
checkType(c);
lefrSetCaseSensitivity(0);
// Call parse65nmRules for 5.7 syntax in 5.6
if (parse65nm)
layer->lefiLayer::parse65nmRules();
// Call parseLef58Type for 5.8 syntax in 5.7
if (parseLef58Type)
layer->lefiLayer::parseLEF58Layer();
lefDB->lef_layer_cbk(*layer);
// Set it to case sensitive from here on
lefrSetCaseSensitivity(1);
return 0;
}
int macroBeginCB(lefrCallbackType_e c, const char* macroName, lefiUserData)
{
checkType(c);
lefDB->lef_macrobegin_cbk(macroName);
return 0;
}
int macroFixedMaskCB(lefrCallbackType_e c, int,
lefiUserData)
{
checkType(c);
return 0;
}
int macroClassTypeCB(lefrCallbackType_e c, const char* /*macroClassType*/,
lefiUserData)
{
checkType(c);
return 0;
}
int macroOriginCB(lefrCallbackType_e c, lefiNum,
lefiUserData)
{
checkType(c);
return 0;
}
int macroSizeCB(lefrCallbackType_e c, lefiNum,
lefiUserData)
{
checkType(c);
// fprintf(stderr, " SIZE %g BY %g ;\n", macroNum.x, macroNum.y);
return 0;
}
int macroCB(lefrCallbackType_e c, lefiMacro* macro, lefiUserData)
{
checkType(c);
lefDB->lef_macro_cbk(*macro);
return 0;
}
int macroEndCB(lefrCallbackType_e c, const char* /*macroName*/, lefiUserData)
{
checkType(c);
return 0;
}
int manufacturingCB(lefrCallbackType_e c, double num, lefiUserData)
{
checkType(c);
lefDB->lef_manufacturing_cbk(num);
return 0;
}
int maxStackViaCB(lefrCallbackType_e c, lefiMaxStackVia* maxStack,
lefiUserData)
{
checkType(c);
lefDB->lef_maxstackvia_cbk(*maxStack);
return 0;
}
int minFeatureCB(lefrCallbackType_e c, lefiMinFeature* min, lefiUserData)
{
checkType(c);
lefDB->lef_minfeature_cbk(*min);
return 0;
}
int nonDefaultCB(lefrCallbackType_e c, lefiNonDefault* def, lefiUserData)
{
checkType(c);
lefDB->lef_nondefault_cbk(*def);
return 0;
}
int obstructionCB(lefrCallbackType_e c, lefiObstruction* obs, lefiUserData)
{
checkType(c);
lefDB->lef_obstruction_cbk(*obs);
return 0;
}
int pinCB(lefrCallbackType_e c, lefiPin* pin, lefiUserData)
{
checkType(c);
lefDB->lef_pin_cbk(*pin);
return 0;
}
int densityCB(lefrCallbackType_e c, lefiDensity* density,
lefiUserData)
{
checkType(c);
lefDB->lef_density_cbk(*density);
return 0;
}
int propDefBeginCB(lefrCallbackType_e c, void*, lefiUserData)
{
checkType(c);
return 0;
}
int propDefCB(lefrCallbackType_e c, lefiProp* prop, lefiUserData)
{
checkType(c);
lefDB->lef_prop_cbk(*prop);
return 0;
}
int propDefEndCB(lefrCallbackType_e c, void*, lefiUserData)
{
checkType(c);
return 0;
}
int siteCB(lefrCallbackType_e c, lefiSite* site, lefiUserData)
{
checkType(c);
lefDB->lef_site_cbk(*site);
return 0;
}
int spacingBeginCB(lefrCallbackType_e c, void*, lefiUserData)
{
checkType(c);
return 0;
}
int spacingCB(lefrCallbackType_e c, lefiSpacing* spacing, lefiUserData)
{
checkType(c);
lefDB->lef_spacing_cbk(*spacing);
return 0;
}
int spacingEndCB(lefrCallbackType_e c, void*, lefiUserData)
{
checkType(c);
return 0;
}
int timingCB(lefrCallbackType_e c, lefiTiming* timing, lefiUserData)
{
checkType(c);
lefDB->lef_timing_cbk(*timing);
return 0;
}
int unitsCB(lefrCallbackType_e c, lefiUnits* unit, lefiUserData)
{
checkType(c);
lefDB->lef_units_cbk(*unit);
return 0;
}
int useMinSpacingCB(lefrCallbackType_e c, lefiUseMinSpacing* spacing,
lefiUserData)
{
checkType(c);
lefDB->lef_useminspacing_cbk(*spacing);
return 0;
}
int versionCB(lefrCallbackType_e c, double num, lefiUserData)
{
checkType(c);
lefDB->lef_version_cbk(num);
return 0;
}
int versionStrCB(lefrCallbackType_e c, const char* versionName, lefiUserData)
{
checkType(c);
lefDB->lef_version_cbk(versionName);
return 0;
}
int viaCB(lefrCallbackType_e c, lefiVia* via, lefiUserData)
{
checkType(c);
lefDB->lef_via_cbk(*via);
return 0;
}
int viaRuleCB(lefrCallbackType_e c, lefiViaRule* viaRule, lefiUserData)
{
checkType(c);
lefDB->lef_viarule_cbk(*viaRule);
return 0;
}
int extensionCB(lefrCallbackType_e c, const char* extsn, lefiUserData)
{
checkType(c);
lefDB->lef_extension_cbk(extsn);
return 0;
}
int doneCB(lefrCallbackType_e c, void*, lefiUserData)
{
checkType(c);
return 0;
}
void errorCB(const char* msg)
{
printf ("%s : %s\n", (const char*)lefrGetUserData(), msg);
}
void warningCB(const char* msg)
{
printf ("%s : %s\n", (const char*)lefrGetUserData(), msg);
}
void* mallocCB(int size)
{
return malloc(size);
}
void* reallocCB(void* name, int size)
{
return realloc(name, size);
}
void freeCB(void* name)
{
free(name);
return;
}
void lineNumberCB(int lineNo)
{
limboPrint(limbo::kINFO, "Parsed %d number of lines!!\n", lineNo);
return;
}
void printWarning(const char *str)
{
limboPrint(limbo::kWARN, "%s\n", str);
}
///@} =========== end of callbacks ============
bool Driver::parse_file(const std::string &filename)
{
parse65nm = 0;
parseLef58Type = 0;
isSessionless = 0;
relax = 0;
version = "N/A";
setVer = 0;
verStr = 1;
msgCb = 0;
userData = strdup ("(lefrw-5100)");
if (isSessionless)
{
lefrSetOpenLogFileAppend();
}
lefrInitSession(isSessionless ? 0 : 1);
lefrSetWarningLogFunction(printWarning);
lefrSetAntennaInputCbk(antennaCB);
lefrSetAntennaInoutCbk(antennaCB);
lefrSetAntennaOutputCbk(antennaCB);
lefrSetArrayBeginCbk(arrayBeginCB);
lefrSetArrayCbk(arrayCB);
lefrSetArrayEndCbk(arrayEndCB);
lefrSetBusBitCharsCbk(busBitCharsCB);
lefrSetCaseSensitiveCbk(caseSensCB);
lefrSetFixedMaskCbk(fixedMaskCB);
lefrSetClearanceMeasureCbk(clearanceCB);
lefrSetDensityCbk(densityCB);
lefrSetDividerCharCbk(dividerCB);
lefrSetNoWireExtensionCbk(noWireExtCB);
lefrSetNoiseMarginCbk(noiseMarCB);
lefrSetEdgeRateThreshold1Cbk(edge1CB);
lefrSetEdgeRateThreshold2Cbk(edge2CB);
lefrSetEdgeRateScaleFactorCbk(edgeScaleCB);
lefrSetExtensionCbk(extensionCB);
lefrSetNoiseTableCbk(noiseTableCB);
lefrSetCorrectionTableCbk(correctionCB);
lefrSetDielectricCbk(dielectricCB);
lefrSetIRDropBeginCbk(irdropBeginCB);
lefrSetIRDropCbk(irdropCB);
lefrSetIRDropEndCbk(irdropEndCB);
lefrSetLayerCbk(layerCB);
lefrSetLibraryEndCbk(doneCB);
lefrSetMacroBeginCbk(macroBeginCB);
lefrSetMacroCbk(macroCB);
lefrSetMacroClassTypeCbk(macroClassTypeCB);
lefrSetMacroOriginCbk(macroOriginCB);
lefrSetMacroSizeCbk(macroSizeCB);
lefrSetMacroFixedMaskCbk(macroFixedMaskCB);
lefrSetMacroEndCbk(macroEndCB);
lefrSetManufacturingCbk(manufacturingCB);
lefrSetMaxStackViaCbk(maxStackViaCB);
lefrSetMinFeatureCbk(minFeatureCB);
lefrSetNonDefaultCbk(nonDefaultCB);
lefrSetObstructionCbk(obstructionCB);
lefrSetPinCbk(pinCB);
lefrSetPropBeginCbk(propDefBeginCB);
lefrSetPropCbk(propDefCB);
lefrSetPropEndCbk(propDefEndCB);
lefrSetSiteCbk(siteCB);
lefrSetSpacingBeginCbk(spacingBeginCB);
lefrSetSpacingCbk(spacingCB);
lefrSetSpacingEndCbk(spacingEndCB);
lefrSetTimingCbk(timingCB);
lefrSetUnitsCbk(unitsCB);
lefrSetUseMinSpacingCbk(useMinSpacingCB);
lefrSetUserData((void*)3);
if (!verStr)
lefrSetVersionCbk(versionCB);
else
lefrSetVersionStrCbk(versionStrCB);
lefrSetViaCbk(viaCB);
lefrSetViaRuleCbk(viaRuleCB);
lefrSetInputAntennaCbk(antennaCB);
lefrSetOutputAntennaCbk(antennaCB);
lefrSetInoutAntennaCbk(antennaCB);
if (msgCb)
{
lefrSetLogFunction(errorCB);
lefrSetWarningLogFunction(warningCB);
}
lefrSetMallocFunction(mallocCB);
lefrSetReallocFunction(reallocCB);
lefrSetFreeFunction(freeCB);
//lefrSetLineNumberFunction(lineNumberCB);
lefrSetDeltaNumberLines(50);
lefrSetRegisterUnusedCallbacks();
if (relax)
lefrSetRelaxMode();
if (setVer)
(void)lefrSetVersionValue(version);
lefrSetAntennaInoutWarnings(30);
lefrSetAntennaInputWarnings(30);
lefrSetAntennaOutputWarnings(30);
lefrSetArrayWarnings(30);
lefrSetCaseSensitiveWarnings(30);
lefrSetCorrectionTableWarnings(30);
lefrSetDielectricWarnings(30);
lefrSetEdgeRateThreshold1Warnings(30);
lefrSetEdgeRateThreshold2Warnings(30);
lefrSetEdgeRateScaleFactorWarnings(30);
lefrSetInoutAntennaWarnings(30);
lefrSetInputAntennaWarnings(30);
lefrSetIRDropWarnings(30);
lefrSetLayerWarnings(30);
lefrSetMacroWarnings(30);
lefrSetMaxStackViaWarnings(30);
lefrSetMinFeatureWarnings(30);
lefrSetNoiseMarginWarnings(30);
lefrSetNoiseTableWarnings(30);
lefrSetNonDefaultWarnings(30);
lefrSetNoWireExtensionWarnings(30);
lefrSetOutputAntennaWarnings(30);
lefrSetPinWarnings(30);
lefrSetSiteWarnings(30);
lefrSetSpacingWarnings(30);
lefrSetTimingWarnings(30);
lefrSetUnitsWarnings(30);
lefrSetUseMinSpacingWarnings(30);
lefrSetViaRuleWarnings(30);
lefrSetViaWarnings(30);
lefrReset();
FILE* f = fopen(filename.c_str(), "r");
if (!f)
{
std::cerr << "Could not open input file " << filename << "\n";
return false;
}
(void)lefrEnableReadEncrypted();
// set lefDB
lefDB = &m_db;
// kernel to read lef file
int res = lefrRead(f, filename.c_str(), (void*)userData);
// reset to NULL
lefDB = NULL;
if (res)
std::cerr << "Reader returns bad status\n";
(void)lefrPrintUnusedCallbacks(stderr);
(void)lefrReleaseNResetMemory();
//(void)lefrUnsetCallbacks();
(void)lefrUnsetLayerCbk();
(void)lefrUnsetNonDefaultCbk();
(void)lefrUnsetViaCbk();
// Unset all the callbacks
void lefrUnsetAntennaInputCbk();
void lefrUnsetAntennaInoutCbk();
void lefrUnsetAntennaOutputCbk();
void lefrUnsetArrayBeginCbk();
void lefrUnsetArrayCbk();
void lefrUnsetArrayEndCbk();
void lefrUnsetBusBitCharsCbk();
void lefrUnsetCaseSensitiveCbk();
void lefrUnsetFixedMaskCbk();
void lefrUnsetClearanceMeasureCbk();
void lefrUnsetCorrectionTableCbk();
void lefrUnsetDensityCbk();
void lefrUnsetDielectricCbk();
void lefrUnsetDividerCharCbk();
void lefrUnsetEdgeRateScaleFactorCbk();
void lefrUnsetEdgeRateThreshold1Cbk();
void lefrUnsetEdgeRateThreshold2Cbk();
void lefrUnsetExtensionCbk();
void lefrUnsetInoutAntennaCbk();
void lefrUnsetInputAntennaCbk();
void lefrUnsetIRDropBeginCbk();
void lefrUnsetIRDropCbk();
void lefrUnsetIRDropEndCbk();
void lefrUnsetLayerCbk();
void lefrUnsetLibraryEndCbk();
void lefrUnsetMacroBeginCbk();
void lefrUnsetMacroCbk();
void lefrUnsetMacroClassTypeCbk();
void lefrUnsetMacroEndCbk();
void lefrUnsetMacroOriginCbk();
void lefrUnsetMacroSizeCbk();
void lefrUnsetManufacturingCbk();
void lefrUnsetMaxStackViaCbk();
void lefrUnsetMinFeatureCbk();
void lefrUnsetNoiseMarginCbk();
void lefrUnsetNoiseTableCbk();
void lefrUnsetNonDefaultCbk();
void lefrUnsetNoWireExtensionCbk();
void lefrUnsetObstructionCbk();
void lefrUnsetOutputAntennaCbk();
void lefrUnsetPinCbk();
void lefrUnsetPropBeginCbk();
void lefrUnsetPropCbk();
void lefrUnsetPropEndCbk();
void lefrUnsetSiteCbk();
void lefrUnsetSpacingBeginCbk();
void lefrUnsetSpacingCbk();
void lefrUnsetSpacingEndCbk();
void lefrUnsetTimingCbk();
void lefrUnsetUseMinSpacingCbk();
void lefrUnsetUnitsCbk();
void lefrUnsetVersionCbk();
void lefrUnsetVersionStrCbk();
void lefrUnsetViaCbk();
void lefrUnsetViaRuleCbk();
lefrClear();
fclose(f);
free(userData);
return true;
}
bool read(LefDataBase& db, const string& lefFile)
{
Driver driver (db);
//driver.trace_scanning = true;
//driver.trace_parsing = true;
return driver.parse_file(lefFile);
}
} // namespace example
|
#include <quazip.h>
int main()
{
QuaZip zip;
}
|
#include <mmu.h>
#include <kdebug.h>
#include <ostream.h>
MMU::MMU() {
}
MMU::SegDesc MMU::setSegDesc(uint32_t type,uint32_t base, uint32_t lim, uint32_t dpl) {
SegDesc sd;
sd.sd_lim_15_0 = lim & 0xffff;
sd.sd_base_15_0 = (base) & 0xffff;
sd.sd_base_23_16 = ((base) >> 16) & 0xff;
sd.sd_type = type;
sd.sd_s = 1;
sd.sd_dpl = dpl;
sd.sd_p = 1;
sd.sd_lim_19_16 = (uint16_t)(lim >> 16);
sd.sd_avl = 0;
sd.sd_l = 0;
sd.sd_db = 1;
sd.sd_g = 1;
sd.sd_base_31_24 = (uint16_t)(base >> 24);
OStream out("\nsetGDT-->Desc type ", "red");
out.writeValue(type);
return sd;
}
MMU::SegDesc MMU::setTssDesc(uint32_t type,uint32_t base, uint32_t lim, uint32_t dpl) {
SegDesc td;
td.sd_lim_15_0 = lim & 0xffff;
td.sd_base_15_0 = (base) & 0xffff;
td.sd_base_23_16 = ((base) >> 16) & 0xff;
td.sd_type = type;
td.sd_s = 0;
td.sd_dpl = dpl;
td.sd_p = 1;
td.sd_lim_19_16 = (uint16_t)(lim >> 16);
td.sd_avl = 0;
td.sd_l = 0;
td.sd_db = 1;
td.sd_g = 0;
td.sd_base_31_24 = (uint16_t)(base >> 24);
return td;
}
void MMU::setGateDesc(GateDesc &gate, uint32_t istrap, uint32_t sel, uint32_t off, uint32_t dpl) {
gate.gd_off_15_0 = (uint32_t)(off) & 0xffff;
gate.gd_ss = (sel);
gate.gd_args = 0;
gate.gd_rsv1 = 0;
gate.gd_type = (istrap) ? STS_TG32 : STS_IG32;
gate.gd_s = 0;
gate.gd_dpl = (dpl);
gate.gd_p = 1;
gate.gd_off_31_16 = (uint32_t)(off) >> 16;
}
void MMU::setCallGate(GateDesc &gate, uint32_t ss, uint32_t off, uint32_t dpl) {
gate.gd_off_15_0 = (uint32_t)(off) & 0xffff;
gate.gd_ss = (ss);
gate.gd_args = 0;
gate.gd_rsv1 = 0;
gate.gd_type = STS_CG32;
gate.gd_s = 0;
gate.gd_dpl = (dpl);
gate.gd_p = 1;
gate.gd_off_31_16 = (uint32_t)(off) >> 16;
}
void MMU::setTCB() {
}
void MMU::setPageReserved(Page &p) {
p.status |= 0x1;
}
void MMU::setPageProperty(Page &p) {
p.status |= 0x2;
}
void MMU::clearPageProperty(Page &p) {
p.status &= ~(0x2); // clear 2-bits to 0
}
|
/*
* Copyright (c) 2015, University of Michigan.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
/**
* TODO:
*
* @author: Johann Hauswald
* @contact: jahausw@umich.edu
*/
#include <assert.h>
#include <iostream>
#include <string>
#include <sstream>
#include <fstream>
#include <stdio.h>
#include <pthread.h>
#include <time.h>
#include "../../utils/timer.h"
#include "opencv2/core/core.hpp"
#include "opencv2/core/types_c.h"
#include "opencv2/xfeatures2d/nonfree.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/gpu.hpp"
#include "opencv2/objdetect/objdetect.hpp"
using namespace cv;
using namespace std;
vector<KeyPoint> keys;
int minHessian = 400;
Ptr<xfeatures2d::SURF> surf = xfeatures2d::SURF::create(minHessian);
int iterations;
vector<KeyPoint> exec_feature(const Mat &img) {
vector<KeyPoint> keypoints;
surf->detect(img, keypoints, Mat());
return keypoints;
}
Mat exec_desc(const Mat &img, vector<KeyPoint> keypoints) {
Mat descriptors;
surf->detectAndCompute(img, Mat(), keypoints, descriptors, true);
return descriptors;
}
int main(int argc, char **argv) {
if (argc < 2) {
fprintf(stderr, "[ERROR] Input file required.\n\n");
fprintf(stderr, "Usage: %s [INPUT FILE]\n\n", argv[0]);
exit(0);
}
cvUseOptimized(1);
// Generate test keys
Mat img = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
if (img.empty()) {
printf("image not found\n");
exit(-1);
}
STATS_INIT("kernel", "feature_description");
PRINT_STAT_STRING("abrv", "fd");
PRINT_STAT_INT("rows", img.rows);
PRINT_STAT_INT("columns", img.cols);
tic();
keys = exec_feature(img);
PRINT_STAT_DOUBLE("fe", toc());
tic();
Mat testDesc = exec_desc(img, keys);
PRINT_STAT_DOUBLE("fd", toc());
STATS_END();
#ifdef TESTING
FILE *f = fopen("../input/surf-fd.baseline", "w");
fprintf(f, "number of descriptors: %d\n", testDesc.size().height);
fclose(f);
#endif
return 0;
}
|
/*
* Copyright (c) 2021 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "core/common/ace_application_info.h"
#include <thread>
#include "base/json/json_util.h"
#include "base/log/log.h"
#include "base/utils/string_utils.h"
#include "core/common/ace_page.h"
namespace OHOS::Ace {
std::string AceApplicationInfo::GetUnicodeSetting() const
{
std::vector<std::string> keyValuePairs;
StringUtils::StringSpliter(keywordsAndValues_, ';', keyValuePairs);
auto keyValuePairsJson = JsonUtil::Create(true);
for (const auto& pair : keyValuePairs) {
// [pair] is like "nu=arab" or "nu=" for most occasions, but may be "=" under extreme scenarios
std::vector<std::string> res;
StringUtils::StringSpliter(pair, '=', res);
if (res.size() == 0) {
continue;
}
auto value = (res.size() == 2) ? res[1] : "";
keyValuePairsJson->Put(res[0].c_str(), value.c_str());
}
return keyValuePairsJson->ToString(); // Return a string in json format
}
} // namespace OHOS::Ace
|
// MIT License
//
// Copyright (c) 2021 PingzhouMing
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include "ThreadPrivateSolver.h"
/**
* @brief Constructor initializes array pointers for Tracks and Materials.
* @details The constructor retrieves the number of energy groups and FSRs
* and azimuthal angles from the Geometry and TrackGenerator if
* passed in as parameters by the user. The constructor initalizes
* the number of OpenMP threads to a default of 1.
* @param geometry an optional pointer to the Geometry
* @param track_generator an optional pointer to the TrackgGenerator
* @param cmfd an optional pointer to a Cmfd object object
*/
ThreadPrivateSolver::ThreadPrivateSolver(Geometry* geometry,
TrackGenerator* track_generator,
Cmfd* cmfd) :
CPUSolver(geometry, track_generator, cmfd) {
_thread_flux = NULL;
_thread_currents = NULL;
}
/**
* @brief Destructor calls Solver subclass destructor to deletes arrays
* for fluxes and sources.
*/
ThreadPrivateSolver::~ThreadPrivateSolver() {
if (_thread_flux != NULL) {
for (int t=0; t < _num_threads; t++)
delete [] _thread_flux[t];
delete [] _thread_flux;
_thread_flux = NULL;
}
if (_thread_currents != NULL) {
delete [] _thread_currents;
_thread_currents = NULL;
}
}
/**
* @brief Allocates memory for Track boundary angular flux and leakage and
* FSR scalar flux arrays.
* @details Deletes memory for old flux arrays if they were allocated for a
* previous simulation.
*/
void ThreadPrivateSolver::initializeFluxArrays() {
CPUSolver::initializeFluxArrays();
/* Delete old flux arrays if they exist */
if (_thread_flux != NULL) {
for (int t=0; t < _num_threads; t++)
delete [] _thread_flux[t];
delete [] _thread_flux;
}
int size;
/* Allocate memory for the flux and leakage arrays */
try{
/* Allocate a thread local array of FSR scalar fluxes */
_thread_flux = new FP_PRECISION*[_num_threads];
for (int t=0; t < _num_threads; t++)
_thread_flux[t] = new FP_PRECISION[_num_FSRs * _num_groups];
}
catch(std::exception &e) {
log_printf(ERROR_LOG, "Could not allocate memory for the Solver's fluxes. "
"Backtrace:%s", e.what());
}
}
/**
* @brief Initializes Cmfd object for acceleration prior to source iteration.
* @details Instantiates a dummy Cmfd object if one was not assigned to
* the Solver by the user and initializes FSRs, Materials, fluxes
* and the Mesh. This method intializes thread private arrays
* for the Cmfd Mesh surface currents.
*/
void ThreadPrivateSolver::initializeCmfd() {
/* Call parent class method */
CPUSolver::initializeCmfd();
/* Delete old thread private Cmfd Mesh surface currents array it it exists */
if (_thread_currents != NULL)
delete [] _thread_currents;
int size;
/* Allocate memory for the thread private Cmfd Mesh surface currents array */
try{
/* Allocate a thread local array of Cmfd Mesh cell surface currents */
if (_cmfd->getMesh()->getCmfdOn()){
size = _num_threads * _num_mesh_cells * 8 * _cmfd->getNumCmfdGroups();
_thread_currents = new FP_PRECISION[size];
}
}
catch(std::exception &e) {
log_printf(ERROR_LOG, "Could not allocate memory for the Solver's Cmfd Mesh"
" surface currents. Backtrace:%s", e.what());
}
return;
}
/**
* @brief Set the FSR scalar flux for each energy group to some value.
* @details This method also flattens the thread private FSR scalar flux array.
* @param value the value to assign to each FSR scalar flux
*/
void ThreadPrivateSolver::flattenFSRFluxes(FP_PRECISION value) {
CPUSolver::flattenFSRFluxes(value);
/* Flatten the thread private FSR scalar flux array */
#ifdef _OPENMP
#pragma omp parallel for schedule(guided)
#endif
for (int tid=0; tid < _num_threads; tid++) {
for (int r=0; r < _num_FSRs; r++) {
for (int e=0; e < _num_groups; e++)
_thread_flux(tid,r,e) = 0.0;
}
}
return;
}
/**
* @brief Set the surface currents for each energy group inside each Cmfd
* Mesh cell to zero.
*/
void ThreadPrivateSolver::zeroSurfaceCurrents() {
CPUSolver::zeroSurfaceCurrents();
#ifdef _OPENMP
#pragma omp parallel for schedule(guided)
#endif
for (int tid=0; tid < _num_threads; tid++){
for (int r=0; r < _num_mesh_cells; r++) {
for (int s=0; s < 8; s++) {
for (int e=0; e < _num_groups; e++)
_thread_currents(tid,r*8+s,e) = 0.0;
}
}
}
return;
}
/**
* @brief This method performs one transport sweep of all azimuthal angles,
* Tracks, Track segments, polar angles and energy groups.
* @details The method integrates the flux along each track and updates the
* boundary fluxes for the corresponding output Track, while updating
* the scalar flux in each flat source region.
*/
void ThreadPrivateSolver::transportSweep() {
int tid;
int fsr_id;
Track* curr_track;
int azim_index;
int num_segments;
segment* curr_segment;
segment* segments;
FP_PRECISION* track_flux;
log_printf(DEBUG_LOG, "Transport sweep with %d OpenMP threads", _num_threads);
/* Initialize flux in each FSR to zero */
flattenFSRFluxes(0.0);
if (_cmfd->getMesh()->getCmfdOn())
zeroSurfaceCurrents();
/* Loop over azimuthal angle halfspaces */
for (int i=0; i < 2; i++) {
/* Compute the minimum and maximum Track IDs corresponding to this
* this azimuthal angular halfspace */
int min = i * (_tot_num_tracks / 2);
int max = (i + 1) * (_tot_num_tracks / 2);
/* Loop over each thread within this azimuthal angle halfspace */
#ifdef _OPENMP
#pragma omp parallel for private(tid, fsr_id, curr_track, azim_index, \
num_segments, segments, curr_segment, track_flux) schedule(guided)
#endif
for (int track_id=min; track_id < max; track_id++) {
#ifdef _OPENMP
tid = omp_get_thread_num();
#else
tid = 0;
#endif
/* Initialize local pointers to important data structures */
curr_track = _tracks[track_id];
azim_index = curr_track->getAzimAngleIndex();
num_segments = curr_track->getNumSegments();
segments = curr_track->getSegments();
track_flux = &_boundary_flux(track_id,0,0,0);
/* Loop over each Track segment in forward direction */
for (int s=0; s < num_segments; s++) {
curr_segment = &segments[s];
fsr_id = curr_segment->_region_id;
scalarFluxTally(curr_segment, azim_index, track_flux,
&_thread_flux(tid,fsr_id,0),true);
}
/* Transfer boundary angular flux to outgoing track */
transferBoundaryFlux(track_id, azim_index, true, track_flux);
/* Loop over each Track segment in reverse direction */
track_flux += _polar_times_groups;
for (int s=num_segments-1; s > -1; s--) {
curr_segment = &segments[s];
fsr_id = curr_segment->_region_id;
scalarFluxTally(curr_segment, azim_index, track_flux,
&_thread_flux(tid,fsr_id,0),false);
}
/* Transfer boundary angular flux to outgoing Track */
transferBoundaryFlux(track_id, azim_index, false, track_flux);
}
}
reduceThreadScalarFluxes();
if (_cmfd->getMesh()->getCmfdOn())
reduceThreadSurfaceCurrents();
return;
}
/**
* @brief Computes the contribution to the FSR scalar flux from a Track segment.
* @details This method integrates the angular flux for a Track segment across
* energy groups and polar angles, and tallies it into the FSR scalar
* flux, and updates the Track's angular flux.
* @param curr_segment a pointer to the Track segment of interest
* @param azim_index a pointer to the azimuthal angle index for this segment
* @param track_flux a pointer to the Track's angular flux
* @param fsr_flux a pointer to the temporary FSR scalar flux buffer
* @param fwd
*/
void ThreadPrivateSolver::scalarFluxTally(segment* curr_segment,
int azim_index,
FP_PRECISION* track_flux,
FP_PRECISION* fsr_flux,
bool fwd){
#ifdef _OPENMP
int tid = omp_get_thread_num();
#else
int tid = 0;
#endif
int fsr_id = curr_segment->_region_id;
FP_PRECISION length = curr_segment->_length;
FP_PRECISION* sigma_t = curr_segment->_material->getSigmaT();
/* The change in angular flux along this Track segment in the FSR */
FP_PRECISION delta_psi;
FP_PRECISION exponential;
/* Loop over energy groups */
for (int e=0; e < _num_groups; e++) {
/* Loop over polar angles */
for (int p=0; p < _num_polar; p++){
exponential = computeExponential(sigma_t[e], length, p);
delta_psi = (track_flux(p,e)-_reduced_source(fsr_id,e))*exponential;
fsr_flux[e] += delta_psi * _polar_weights(azim_index,p);
track_flux(p,e) -= delta_psi;
}
}
if (_cmfd->getMesh()->getCmfdOn()){
if (curr_segment->_mesh_surface_fwd != -1 && fwd){
int pe = 0;
/* Loop over energy groups */
for (int e = 0; e < _num_groups; e++) {
/* Loop over polar angles */
for (int p = 0; p < _num_polar; p++){
/* Increment current (polar and azimuthal weighted flux, group)*/
_thread_currents(tid,curr_segment->_mesh_surface_fwd,e) +=
track_flux(p,e)*_polar_weights(azim_index, p)/2.0;
pe++;
}
}
}
else if (curr_segment->_mesh_surface_bwd != -1 && !fwd){
/* Set polar angle * energy group to 0 */
int pe = 0;
/* Loop over energy groups */
for (int e = 0; e < _num_groups; e++) {
/* Loop over polar angles */
for (int p = 0; p < _num_polar; p++){
/* increment current (polar and azimuthal weighted flux, group)*/
_thread_currents(tid,curr_segment->_mesh_surface_bwd,e) +=
track_flux(p,e)*_polar_weights(azim_index, p)/2.0;
pe++;
}
}
}
}
return;
}
/**
* @brief Reduces the FSR scalar fluxes from private thread private arrays to a
* global array FSR scalar flux array.
*/
void ThreadPrivateSolver::reduceThreadScalarFluxes() {
for (int tid=0; tid < _num_threads; tid++) {
for (int r=0; r < _num_FSRs; r++) {
for (int e=0; e < _num_groups; e++)
_scalar_flux(r,e) += _thread_flux(tid,r,e);
}
}
return;
}
/**
* @brief Reduces the Cmfd Mesh surface currents from private thread arrays to
* a global Mesh surface current array.
*/
void ThreadPrivateSolver::reduceThreadSurfaceCurrents() {
for (int tid=0; tid < _num_threads; tid++){
for (int r=0; r < _num_mesh_cells; r++) {
for (int s=0; s < 8; s++) {
for (int e=0; e < _cmfd->getNumCmfdGroups(); e++){
_surface_currents[(r*8+s)*_cmfd->getNumCmfdGroups() + e] +=
_thread_currents[(tid)*_num_mesh_cells*8*
_cmfd->getNumCmfdGroups() + (r*8+s)*
_cmfd->getNumCmfdGroups() + e];
}
}
}
}
return;
}
|
/*******************************************************************************
* Copyright 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#ifndef JIT_AVX512_COMMON_GEMM_F32_HPP
#define JIT_AVX512_COMMON_GEMM_F32_HPP
#include "c_types_map.hpp"
#include "jit_generator.hpp"
namespace mkldnn {
namespace impl {
namespace cpu {
class jit_avx512_common_gemm_f32 {
public:
void sgemm(const char *transa, const char *transb, const int *M,
const int *N, const int *K, const float *alpha, const float *A,
const int *lda, const float *B, const int *ldb, const float *beta,
float *C, const int *ldc, const float *bias = NULL);
jit_avx512_common_gemm_f32(
char transa, char transb, float beta, bool hasBias = false);
~jit_avx512_common_gemm_f32();
private:
typedef void (*ker)(long long int, long long int, long long int, float *,
float *, long long int, float *, long long int, float *, float *,
long long int, float *, float *);
void sgemm_nocopy_driver(const char *transa, const char *transb, int m,
int n, int k, const float *alpha, const float *a, int lda,
const float *b, int ldb, const float *beta, float *c, int ldc,
const float *bias, float *ws);
inline void partition_unit_diff(
int ithr, int nthr, int n, int *t_offset, int *t_block);
inline void sum_two_matrices(
int m, int n, float *p_src, int ld_src, float *p_dst, int ld_dst);
inline void calc_nthr_nocopy_avx512_common(int m, int n, int k, int nthrs,
int *nthrs_m, int *nthrs_n, int *nthrs_k, int *BM, int *BN,
int *BK);
char transa_, transb_;
float beta_;
bool hasBias_;
struct xbyak_gemm;
xbyak_gemm *ker_bn_, *ker_b1_, *ker_b0_;
unsigned int *ompstatus_;
int nthrs_;
};
}
}
}
#endif
|
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <sys/stat.h>
#include <cldnn/cldnn_config.hpp>
#include "cldnn_config.h"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
#include "ie_api.h"
#include "file_utils.h"
#include "cldnn_itt.h"
#include <thread>
#ifdef _WIN32
# include <direct.h>
#ifdef ENABLE_UNICODE_PATH_SUPPORT
# define mkdir(dir, mode) _wmkdir(dir)
#else
# define mkdir(dir, mode) _mkdir(dir)
#endif // ENABLE_UNICODE_PATH_SUPPORT
#endif // _WIN32
using namespace InferenceEngine;
namespace CLDNNPlugin {
static void createDirectory(std::string _path) {
#if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
std::wstring widepath = FileUtils::multiByteCharToWString(_path.c_str());
const wchar_t* path = widepath.c_str();
#else
const char* path = _path.c_str();
#endif
auto err = mkdir(path, 0755);
if (err != 0 && errno != EEXIST) {
IE_THROW() << "Couldn't create directory! (err=" << err << "; errno=" << errno << ")";
}
}
void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap) {
OV_ITT_SCOPED_TASK(itt::domains::CLDNNPlugin, "Config::UpdateFromMap");
for (auto& kvp : configMap) {
std::string key = kvp.first;
std::string val = kvp.second;
if (key.compare(PluginConfigParams::KEY_PERF_COUNT) == 0) {
if (val.compare(PluginConfigParams::YES) == 0) {
useProfiling = true;
} else if (val.compare(PluginConfigParams::NO) == 0) {
useProfiling = false;
} else {
IE_THROW(NotFound) << "Unsupported property value by plugin: " << val;
}
} else if (key.compare(PluginConfigParams::KEY_DYN_BATCH_ENABLED) == 0) {
if (val.compare(PluginConfigParams::YES) == 0) {
enableDynamicBatch = true;
} else if (val.compare(PluginConfigParams::NO) == 0) {
enableDynamicBatch = false;
} else {
IE_THROW(NotFound) << "Unsupported property value by plugin: " << val;
}
} else if (key.compare(PluginConfigParams::KEY_DUMP_KERNELS) == 0) {
if (val.compare(PluginConfigParams::YES) == 0) {
dumpCustomKernels = true;
} else if (val.compare(PluginConfigParams::NO) == 0) {
dumpCustomKernels = false;
} else {
IE_THROW(NotFound) << "Unsupported property value by plugin: " << val;
}
} else if (key.compare(CLDNNConfigParams::KEY_CLDNN_PLUGIN_PRIORITY) == 0) {
std::stringstream ss(val);
uint32_t uVal(0);
ss >> uVal;
if (ss.fail()) {
IE_THROW(NotFound) << "Unsupported property value by plugin: " << val;
}
switch (uVal) {
case 0:
queuePriority = cldnn::priority_mode_types::disabled;
break;
case 1:
queuePriority = cldnn::priority_mode_types::low;
break;
case 2:
queuePriority = cldnn::priority_mode_types::med;
break;
case 3:
queuePriority = cldnn::priority_mode_types::high;
break;
default:
IE_THROW(ParameterMismatch) << "Unsupported queue priority value: " << uVal;
}
} else if (key.compare(CLDNNConfigParams::KEY_CLDNN_PLUGIN_THROTTLE) == 0) {
std::stringstream ss(val);
uint32_t uVal(0);
ss >> uVal;
if (ss.fail()) {
IE_THROW(NotFound) << "Unsupported property value by plugin: " << val;
}
switch (uVal) {
case 0:
queueThrottle = cldnn::throttle_mode_types::disabled;
break;
case 1:
queueThrottle = cldnn::throttle_mode_types::low;
break;
case 2:
queueThrottle = cldnn::throttle_mode_types::med;
break;
case 3:
queueThrottle = cldnn::throttle_mode_types::high;
break;
default:
IE_THROW(ParameterMismatch) << "Unsupported queue throttle value: " << uVal;
}
} else if (key.compare(PluginConfigParams::KEY_CONFIG_FILE) == 0) {
std::stringstream ss(val);
std::istream_iterator<std::string> begin(ss);
std::istream_iterator<std::string> end;
std::vector<std::string> configFiles(begin, end);
for (auto& file : configFiles) {
CLDNNCustomLayer::LoadFromFile(file, customLayers);
}
} else if (key.compare(PluginConfigParams::KEY_TUNING_MODE) == 0) {
if (val.compare(PluginConfigParams::TUNING_DISABLED) == 0) {
tuningConfig.mode = cldnn::tuning_mode::tuning_disabled;
} else if (val.compare(PluginConfigParams::TUNING_CREATE) == 0) {
tuningConfig.mode = cldnn::tuning_mode::tuning_tune_and_cache;
} else if (val.compare(PluginConfigParams::TUNING_USE_EXISTING) == 0) {
tuningConfig.mode = cldnn::tuning_mode::tuning_use_cache;
} else if (val.compare(PluginConfigParams::TUNING_UPDATE) == 0) {
tuningConfig.mode = cldnn::tuning_mode::tuning_use_and_update;
} else if (val.compare(PluginConfigParams::TUNING_RETUNE) == 0) {
tuningConfig.mode = cldnn::tuning_mode::tuning_retune_and_cache;
} else {
IE_THROW(NotFound) << "Unsupported tuning mode value by plugin: " << val;
}
} else if (key.compare(PluginConfigParams::KEY_TUNING_FILE) == 0) {
tuningConfig.cache_file_path = val;
} else if (key.compare(CLDNNConfigParams::KEY_CLDNN_MEM_POOL) == 0) {
if (val.compare(PluginConfigParams::YES) == 0) {
memory_pool_on = true;
} else if (val.compare(PluginConfigParams::NO) == 0) {
memory_pool_on = false;
} else {
IE_THROW(NotFound) << "Unsupported memory pool flag value: " << val;
}
} else if (key.compare(CLDNNConfigParams::KEY_CLDNN_GRAPH_DUMPS_DIR) == 0) {
if (!val.empty()) {
graph_dumps_dir = val;
createDirectory(graph_dumps_dir);
}
} else if (key.compare(PluginConfigParams::KEY_CACHE_DIR) == 0) {
if (!val.empty()) {
kernels_cache_dir = val;
createDirectory(kernels_cache_dir);
}
} else if (key.compare(CLDNNConfigParams::KEY_CLDNN_SOURCES_DUMPS_DIR) == 0) {
if (!val.empty()) {
sources_dumps_dir = val;
createDirectory(sources_dumps_dir);
}
} else if (key.compare(PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS) == 0) {
if (val.compare(PluginConfigParams::YES) == 0) {
exclusiveAsyncRequests = true;
} else if (val.compare(PluginConfigParams::NO) == 0) {
exclusiveAsyncRequests = false;
} else {
IE_THROW(NotFound) << "Unsupported property value by plugin: " << val;
}
} else if (key.compare(PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS) == 0) {
if (val.compare(PluginConfigParams::GPU_THROUGHPUT_AUTO) == 0) {
throughput_streams = 2;
} else {
int val_i;
try {
val_i = std::stoi(val);
} catch (const std::exception&) {
IE_THROW() << "Wrong value for property key " << PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS
<< ". Expected only positive numbers (#streams) or "
<< "PluginConfigParams::GPU_THROUGHPUT_AUTO";
}
if (val_i > 0)
throughput_streams = static_cast<uint16_t>(val_i);
}
} else if (key.compare(PluginConfigParams::KEY_DEVICE_ID) == 0) {
// Validate if passed value is postivie number.
try {
int val_i = std::stoi(val);
(void)val_i;
} catch (const std::exception&) {
IE_THROW() << "Wrong value for property key " << PluginConfigParams::KEY_DEVICE_ID
<< ". DeviceIDs are only represented by positive numbers";
}
// Set this value.
device_id = val;
} else if (key.compare(PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE) == 0) {
if (val.compare(PluginConfigParams::YES) == 0) {
enableInt8 = true;
} else if (val.compare(PluginConfigParams::NO) == 0) {
enableInt8 = false;
} else {
IE_THROW(NotFound) << "Unsupported property value by plugin: " << val;
}
} else if (key.compare(CLDNNConfigParams::KEY_CLDNN_NV12_TWO_INPUTS) == 0) {
if (val.compare(PluginConfigParams::YES) == 0) {
nv12_two_inputs = true;
} else if (val.compare(PluginConfigParams::NO) == 0) {
nv12_two_inputs = false;
} else {
IE_THROW(NotFound) << "Unsupported NV12 flag value: " << val;
}
} else if (key.compare(CLDNNConfigParams::KEY_CLDNN_ENABLE_FP16_FOR_QUANTIZED_MODELS) == 0) {
if (val.compare(PluginConfigParams::YES) == 0) {
enable_fp16_for_quantized_models = true;
} else if (val.compare(PluginConfigParams::NO) == 0) {
enable_fp16_for_quantized_models = false;
} else {
IE_THROW(NotFound) << "Unsupported KEY_CLDNN_ENABLE_FP16_FOR_QUANTIZED_MODELS flag value: " << val;
}
} else if (key.compare(CLDNNConfigParams::KEY_CLDNN_MAX_NUM_THREADS) == 0) {
int max_threads = std::max(1, static_cast<int>(std::thread::hardware_concurrency()));
try {
int val_i = std::stoi(val);
if (val_i <= 0 || val_i > max_threads) {
n_threads = max_threads;
} else {
n_threads = val_i;
}
} catch (const std::exception&) {
IE_THROW() << "Wrong value for property key " << CLDNNConfigParams::KEY_CLDNN_MAX_NUM_THREADS << ": " << val
<< "\nSpecify the number of threads use for build as an integer."
<< "\nOut of range value will be set as a default value, maximum concurrent threads.";
}
} else if (key.compare(CLDNNConfigParams::KEY_CLDNN_ENABLE_LOOP_UNROLLING) == 0) {
if (val.compare(PluginConfigParams::YES) == 0) {
enable_loop_unrolling = true;
} else if (val.compare(PluginConfigParams::NO) == 0) {
enable_loop_unrolling = false;
} else {
IE_THROW(ParameterMismatch) << "Unsupported KEY_CLDNN_ENABLE_LOOP_UNROLLING flag value: " << val;
}
} else {
IE_THROW(NotFound) << "Unsupported property key by plugin: " << key;
}
adjustKeyMapValues();
}
}
void Config::adjustKeyMapValues() {
OV_ITT_SCOPED_TASK(itt::domains::CLDNNPlugin, "Config::AdjustKeyMapValues");
if (useProfiling)
key_config_map[PluginConfigParams::KEY_PERF_COUNT] = PluginConfigParams::YES;
else
key_config_map[PluginConfigParams::KEY_PERF_COUNT] = PluginConfigParams::NO;
if (dumpCustomKernels)
key_config_map[PluginConfigParams::KEY_DUMP_KERNELS] = PluginConfigParams::YES;
else
key_config_map[PluginConfigParams::KEY_DUMP_KERNELS] = PluginConfigParams::NO;
if (exclusiveAsyncRequests)
key_config_map[PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS] = PluginConfigParams::YES;
else
key_config_map[PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS] = PluginConfigParams::NO;
if (memory_pool_on)
key_config_map[CLDNNConfigParams::KEY_CLDNN_MEM_POOL] = PluginConfigParams::YES;
else
key_config_map[CLDNNConfigParams::KEY_CLDNN_MEM_POOL] = PluginConfigParams::NO;
if (enableDynamicBatch)
key_config_map[PluginConfigParams::KEY_DYN_BATCH_ENABLED] = PluginConfigParams::YES;
else
key_config_map[PluginConfigParams::KEY_DYN_BATCH_ENABLED] = PluginConfigParams::NO;
if (nv12_two_inputs)
key_config_map[CLDNNConfigParams::KEY_CLDNN_NV12_TWO_INPUTS] = PluginConfigParams::YES;
else
key_config_map[CLDNNConfigParams::KEY_CLDNN_NV12_TWO_INPUTS] = PluginConfigParams::NO;
if (enable_fp16_for_quantized_models)
key_config_map[CLDNNConfigParams::KEY_CLDNN_ENABLE_FP16_FOR_QUANTIZED_MODELS] = PluginConfigParams::YES;
else
key_config_map[CLDNNConfigParams::KEY_CLDNN_ENABLE_FP16_FOR_QUANTIZED_MODELS] = PluginConfigParams::NO;
{
std::string qp = "0";
switch (queuePriority) {
case cldnn::priority_mode_types::low: qp = "1"; break;
case cldnn::priority_mode_types::med: qp = "2"; break;
case cldnn::priority_mode_types::high: qp = "3"; break;
default: break;
}
key_config_map[CLDNNConfigParams::KEY_CLDNN_PLUGIN_PRIORITY] = qp;
}
{
std::string qt = "0";
switch (queueThrottle) {
case cldnn::throttle_mode_types::low: qt = "1"; break;
case cldnn::throttle_mode_types::med: qt = "2"; break;
case cldnn::throttle_mode_types::high: qt = "3"; break;
default: break;
}
key_config_map[CLDNNConfigParams::KEY_CLDNN_PLUGIN_THROTTLE] = qt;
}
{
std::string tm = PluginConfigParams::TUNING_DISABLED;
switch (tuningConfig.mode) {
case cldnn::tuning_mode::tuning_tune_and_cache: tm = PluginConfigParams::TUNING_CREATE; break;
case cldnn::tuning_mode::tuning_use_cache: tm = PluginConfigParams::TUNING_USE_EXISTING; break;
case cldnn::tuning_mode::tuning_use_and_update: tm = PluginConfigParams::TUNING_UPDATE; break;
case cldnn::tuning_mode::tuning_retune_and_cache: tm = PluginConfigParams::TUNING_RETUNE; break;
default: break;
}
key_config_map[PluginConfigParams::KEY_TUNING_MODE] = tm;
key_config_map[PluginConfigParams::KEY_TUNING_FILE] = tuningConfig.cache_file_path;
}
key_config_map[CLDNNConfigParams::KEY_CLDNN_GRAPH_DUMPS_DIR] = graph_dumps_dir;
key_config_map[CLDNNConfigParams::KEY_CLDNN_SOURCES_DUMPS_DIR] = sources_dumps_dir;
key_config_map[PluginConfigParams::KEY_CACHE_DIR] = kernels_cache_dir;
key_config_map[PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS] = std::to_string(throughput_streams);
key_config_map[PluginConfigParams::KEY_DEVICE_ID] = device_id;
key_config_map[PluginConfigParams::KEY_CONFIG_FILE] = "";
key_config_map[CLDNNConfigParams::KEY_CLDNN_MAX_NUM_THREADS] = std::to_string(n_threads);
if (enable_loop_unrolling)
key_config_map[CLDNNConfigParams::KEY_CLDNN_ENABLE_LOOP_UNROLLING] = PluginConfigParams::YES;
else
key_config_map[CLDNNConfigParams::KEY_CLDNN_ENABLE_LOOP_UNROLLING] = PluginConfigParams::NO;
}
} // namespace CLDNNPlugin
|
/**
* Mojang Authentication API
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* OpenAPI spec version: 2020-06-05
*
* NOTE: This class is auto generated by OpenAPI-Generator 3.3.4.
* https://openapi-generator.tech
* Do not edit the class manually.
*/
#include "RefreshResponse.h"
namespace com {
namespace github {
namespace asyncmc {
namespace mojang {
namespace authentication {
namespace cpp {
namespace restsdk {
namespace model {
RefreshResponse::RefreshResponse()
{
}
RefreshResponse::~RefreshResponse()
{
}
void RefreshResponse::validate()
{
// TODO: implement validation
}
web::json::value RefreshResponse::toJson() const
{
web::json::value val = this->RefreshRequest::toJson();
return val;
}
void RefreshResponse::fromJson(const web::json::value& val)
{
this->RefreshRequest::fromJson(val);
}
void RefreshResponse::toMultipart(std::shared_ptr<MultipartFormData> multipart, const utility::string_t& prefix) const
{
utility::string_t namePrefix = prefix;
if(namePrefix.size() > 0 && namePrefix.substr(namePrefix.size() - 1) != utility::conversions::to_string_t("."))
{
namePrefix += utility::conversions::to_string_t(".");
}
multipart->add(ModelBase::toHttpContent(namePrefix + utility::conversions::to_string_t("accessToken"), m_AccessToken));
if(m_ClientTokenIsSet)
{
multipart->add(ModelBase::toHttpContent(namePrefix + utility::conversions::to_string_t("clientToken"), m_ClientToken));
}
}
void RefreshResponse::fromMultiPart(std::shared_ptr<MultipartFormData> multipart, const utility::string_t& prefix)
{
utility::string_t namePrefix = prefix;
if(namePrefix.size() > 0 && namePrefix.substr(namePrefix.size() - 1) != utility::conversions::to_string_t("."))
{
namePrefix += utility::conversions::to_string_t(".");
}
setAccessToken(ModelBase::stringFromHttpContent(multipart->getContent(utility::conversions::to_string_t("accessToken"))));
if(multipart->hasContent(utility::conversions::to_string_t("clientToken")))
{
setClientToken(ModelBase::stringFromHttpContent(multipart->getContent(utility::conversions::to_string_t("clientToken"))));
}
}
}
}
}
}
}
}
}
}
|
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// UNSUPPORTED: libcpp-has-no-threads
// UNSUPPORTED: c++03
// ALLOW_RETRIES: 3
// <future>
// class future<R>
// template <class Rep, class Period>
// future_status
// wait_for(const chrono::duration<Rep, Period>& rel_time) const;
#include <future>
#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
typedef std::chrono::milliseconds ms;
static const ms sleepTime(500);
static const ms waitTime(5000);
void func1(std::promise<int> p)
{
std::this_thread::sleep_for(sleepTime);
p.set_value(3);
}
int j = 0;
void func3(std::promise<int&> p)
{
std::this_thread::sleep_for(sleepTime);
j = 5;
p.set_value(j);
}
void func5(std::promise<void> p)
{
std::this_thread::sleep_for(sleepTime);
p.set_value();
}
template <typename T, typename F>
void test(F func, bool waitFirst) {
typedef std::chrono::high_resolution_clock Clock;
std::promise<T> p;
std::future<T> f = p.get_future();
Clock::time_point t1, t0 = Clock::now();
support::make_test_thread(func, std::move(p)).detach();
assert(f.valid());
assert(f.wait_for(ms(1)) == std::future_status::timeout);
assert(f.valid());
if (waitFirst) {
f.wait();
assert(f.valid());
t1 = Clock::now();
assert(f.wait_for(ms(waitTime)) == std::future_status::ready);
assert(f.valid());
} else {
assert(f.wait_for(ms(waitTime)) == std::future_status::ready);
assert(f.valid());
t1 = Clock::now();
f.wait();
assert(f.valid());
}
assert(t1 - t0 >= sleepTime);
}
int main(int, char**)
{
test<int>(func1, true);
test<int&>(func3, true);
test<void>(func5, true);
test<int>(func1, false);
test<int&>(func3, false);
test<void>(func5, false);
return 0;
}
|
#include "nan.h"
#include <windows.h>
#include <process.h>
#include <Tlhelp32.h>
#include <winbase.h>
#include <string.h>
#include <iostream>
using namespace Nan;
using namespace v8;
bool killProcessByName(const char *filename)
{
HANDLE hSnapShot = CreateToolhelp32Snapshot(TH32CS_SNAPALL, NULL);
PROCESSENTRY32 pEntry;
pEntry.dwSize = sizeof(pEntry);
BOOL hRes = Process32First(hSnapShot, &pEntry);
bool success;
while (hRes)
{
if (strcmp(pEntry.szExeFile, filename) == 0)
{
HANDLE hProcess = OpenProcess(PROCESS_TERMINATE, 0,
(DWORD)pEntry.th32ProcessID);
if (hProcess != NULL)
{
success = TerminateProcess(hProcess, 9);
CloseHandle(hProcess);
}
}
hRes = Process32Next(hSnapShot, &pEntry);
}
CloseHandle(hSnapShot);
if (success == true)
{
return true;
}
return false;
}
NAN_METHOD(KillProcessByName)
{
auto argCount = info.Length();
if (argCount != 1)
{
Nan::ThrowTypeError("Wrong number of arguments");
return;
}
if (!info[0]->IsString())
{
Nan::ThrowTypeError("A string was expected for the first argument, but wasn't received.");
return;
}
Nan::Utf8String subkeyArg(Nan::To<v8::String>(info[0]).ToLocalChecked());
bool success = killProcessByName(std::string(*subkeyArg).c_str());
if (success != true)
{
Nan::ThrowError("Unexpected error while stopping the process.");
return;
}
info.GetReturnValue().SetNull();
}
NAN_MODULE_INIT(Init)
{
Nan::SetMethod(target, "killProcessByName", KillProcessByName);
}
#if NODE_MAJOR_VERSION >= 10
NAN_MODULE_WORKER_ENABLED(registryNativeModule, Init)
#else
NODE_MODULE(registryNativeModule, Init);
#endif
|
//
// Created by corentin on 7/3/19.
//
#include <boost/foreach.hpp>
#include <dynamic-graph/debug.h>
#include <dynamic-graph/factory.h>
#include <dynamic-graph/signal-array.h>
#include <dynamic-graph/tracer.h>
#include <dynamic-graph/signal-cast-helper.h>
#include <assert.h>
#include <boost/test/unit_test.hpp>
#include <boost/test/output_test_stream.hpp>
#define BOOST_TEST_MODULE signal_array
using boost::test_tools::output_test_stream;
dynamicgraph::SignalArray_const<double> sig;
using namespace std;
using namespace dynamicgraph;
using namespace dynamicgraph::command;
BOOST_AUTO_TEST_CASE (test_array) {
SignalBase<int> sigBa("add test");
SignalArray_const<int> sigArr_C(1);
sigArr_C.operator<<(sigBa);
sigArr_C.operator<<(sigBa);
BOOST_CHECK_EQUAL(2, sigArr_C.getSize());
SignalArray<int> sigArr(1);
sigArr.operator<<(sigBa);
sigArr.operator<<(sigBa);
BOOST_CHECK_EQUAL(2,sigArr.getSize());
SignalBase<int> sigB("constructor test");
SignalArray<int> sigA(2);
sigA << sigB;
sigA.operator<<(sigB);
SignalArray_const<int> sig_C(sigA);
BOOST_CHECK_EQUAL(sigA.getSize(),sig_C.getSize());
}
BOOST_AUTO_TEST_CASE(test_base)
{
SignalBase<int> sigB("test");
sigB.setReady();
BOOST_CHECK_EQUAL(true,sigB.getReady());
}
BOOST_AUTO_TEST_CASE(test_cast_helper)
{
DefaultCastRegisterer<int> defaultCR;
std::istringstream iss;
iss.str("1");
defaultCR.cast(iss);
try{
std::istringstream iss_fail;
iss.str("test");
defaultCR.cast(iss_fail);
} catch(ExceptionSignal e){
//Take int, not string
}
}
|
/*
-----------------------------------------------------------------------------
This source file is part of OGRE
(Object-oriented Graphics Rendering Engine)
For the latest info, see http://www.ogre3d.org
Copyright (c) 2000-2014 Torus Knot Software Ltd
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
-----------------------------------------------------------------------------
*/
#include "OgreGLRenderSystem.h"
#include "OgreGLNativeSupport.h"
#include "OgreLogManager.h"
#include "OgreStringConverter.h"
#include "OgreLight.h"
#include "OgreCamera.h"
#include "OgreGLTextureManager.h"
#include "OgreGLHardwareVertexBuffer.h"
#include "OgreGLHardwareIndexBuffer.h"
#include "OgreDefaultHardwareBufferManager.h"
#include "OgreGLUtil.h"
#include "OgreGLGpuProgram.h"
#include "OgreGLGpuNvparseProgram.h"
#include "ATI_FS_GLGpuProgram.h"
#include "OgreGLGpuProgramManager.h"
#include "OgreException.h"
#include "OgreGLSLExtSupport.h"
#include "OgreGLHardwareOcclusionQuery.h"
#include "OgreGLDepthBuffer.h"
#include "OgreGLHardwarePixelBuffer.h"
#include "OgreGLContext.h"
#include "OgreGLSLProgramFactory.h"
#include "OgreGLStateCacheManager.h"
#include "OgreGLFBORenderTexture.h"
#include "OgreGLPBRenderTexture.h"
#include "OgreConfig.h"
#include "OgreViewport.h"
#include "OgreGLPixelFormat.h"
#include "OgreGLSLProgramCommon.h"
#include "OgreGLFBOMultiRenderTarget.h"
#if OGRE_PLATFORM == OGRE_PLATFORM_APPLE
extern "C" void glFlushRenderAPPLE();
#endif
// Convenience macro from ARB_vertex_buffer_object spec
#define VBO_BUFFER_OFFSET(i) ((char *)(i))
namespace Ogre {
// Callback function used when registering GLGpuPrograms
static GpuProgram* createGLArbGpuProgram(ResourceManager* creator,
const String& name, ResourceHandle handle,
const String& group, bool isManual, ManualResourceLoader* loader,
GpuProgramType gptype, const String& syntaxCode)
{
GLArbGpuProgram* ret = new GLArbGpuProgram(
creator, name, handle, group, isManual, loader);
ret->setType(gptype);
ret->setSyntaxCode(syntaxCode);
return ret;
}
static GpuProgram* createGLGpuNvparseProgram(ResourceManager* creator,
const String& name, ResourceHandle handle,
const String& group, bool isManual, ManualResourceLoader* loader,
GpuProgramType gptype, const String& syntaxCode)
{
GLGpuNvparseProgram* ret = new GLGpuNvparseProgram(
creator, name, handle, group, isManual, loader);
ret->setType(gptype);
ret->setSyntaxCode(syntaxCode);
return ret;
}
static GpuProgram* createGL_ATI_FS_GpuProgram(ResourceManager* creator,
const String& name, ResourceHandle handle,
const String& group, bool isManual, ManualResourceLoader* loader,
GpuProgramType gptype, const String& syntaxCode)
{
ATI_FS_GLGpuProgram* ret = new ATI_FS_GLGpuProgram(
creator, name, handle, group, isManual, loader);
ret->setType(gptype);
ret->setSyntaxCode(syntaxCode);
return ret;
}
static GLint getCombinedMinMipFilter(FilterOptions min, FilterOptions mip)
{
switch(min)
{
case FO_ANISOTROPIC:
case FO_LINEAR:
switch (mip)
{
case FO_ANISOTROPIC:
case FO_LINEAR:
// linear min, linear mip
return GL_LINEAR_MIPMAP_LINEAR;
case FO_POINT:
// linear min, point mip
return GL_LINEAR_MIPMAP_NEAREST;
case FO_NONE:
// linear min, no mip
return GL_LINEAR;
}
break;
case FO_POINT:
case FO_NONE:
switch (mip)
{
case FO_ANISOTROPIC:
case FO_LINEAR:
// nearest min, linear mip
return GL_NEAREST_MIPMAP_LINEAR;
case FO_POINT:
// nearest min, point mip
return GL_NEAREST_MIPMAP_NEAREST;
case FO_NONE:
// nearest min, no mip
return GL_NEAREST;
}
break;
}
// should never get here
return 0;
}
GLRenderSystem::GLRenderSystem()
: mStopRendering(false),
mFixedFunctionTextureUnits(0),
mStencilWriteMask(0xFFFFFFFF),
mDepthWrite(true),
mScissorsEnabled(false),
mUseAutoTextureMatrix(false),
mHardwareBufferManager(0),
mGpuProgramManager(0),
mGLSLProgramFactory(0),
mStateCacheManager(0),
mActiveTextureUnit(0),
mMaxBuiltInTextureAttribIndex(0)
{
size_t i;
LogManager::getSingleton().logMessage(getName() + " created.");
mRenderAttribsBound.reserve(100);
mRenderInstanceAttribsBound.reserve(100);
// Get our GLSupport
mGLSupport = getGLSupport(GLNativeSupport::CONTEXT_COMPATIBILITY);
mWorldMatrix = Matrix4::IDENTITY;
mViewMatrix = Matrix4::IDENTITY;
initConfigOptions();
mColourWrite[0] = mColourWrite[1] = mColourWrite[2] = mColourWrite[3] = true;
for (i = 0; i < OGRE_MAX_TEXTURE_LAYERS; i++)
{
// Dummy value
mTextureCoordIndex[i] = 99;
mTextureTypes[i] = 0;
}
mActiveRenderTarget = 0;
mCurrentContext = 0;
mMainContext = 0;
mGLInitialised = false;
mCurrentLights = 0;
mMinFilter = FO_LINEAR;
mMipFilter = FO_POINT;
mCurrentVertexProgram = 0;
mCurrentGeometryProgram = 0;
mCurrentFragmentProgram = 0;
mRTTManager = NULL;
}
GLRenderSystem::~GLRenderSystem()
{
shutdown();
delete mGLSupport;
}
const GpuProgramParametersPtr& GLRenderSystem::getFixedFunctionParams(TrackVertexColourType tracking,
FogMode fog)
{
_setSurfaceTracking(tracking);
_setFog(fog);
return mFixedFunctionParams;
}
void GLRenderSystem::applyFixedFunctionParams(const GpuProgramParametersPtr& params, uint16 mask)
{
bool updateLightPos = false;
// Autoconstant index is not a physical index
for (const auto& ac : params->getAutoConstants())
{
// Only update needed slots
if (ac.variability & mask)
{
const float* ptr = params->getFloatPointer(ac.physicalIndex);
switch(ac.paramType)
{
case GpuProgramParameters::ACT_WORLD_MATRIX:
setWorldMatrix(Matrix4(ptr));
break;
case GpuProgramParameters::ACT_VIEW_MATRIX:
// force light update
updateLightPos = true;
mask |= GPV_LIGHTS;
setViewMatrix(Matrix4(ptr));
break;
case GpuProgramParameters::ACT_PROJECTION_MATRIX:
setProjectionMatrix(Matrix4(ptr));
break;
case GpuProgramParameters::ACT_SURFACE_AMBIENT_COLOUR:
mStateCacheManager->setMaterialAmbient(ptr[0], ptr[1], ptr[2], ptr[3]);
break;
case GpuProgramParameters::ACT_SURFACE_DIFFUSE_COLOUR:
mStateCacheManager->setMaterialDiffuse(ptr[0], ptr[1], ptr[2], ptr[3]);
break;
case GpuProgramParameters::ACT_SURFACE_SPECULAR_COLOUR:
mStateCacheManager->setMaterialSpecular(ptr[0], ptr[1], ptr[2], ptr[3]);
break;
case GpuProgramParameters::ACT_SURFACE_EMISSIVE_COLOUR:
mStateCacheManager->setMaterialEmissive(ptr[0], ptr[1], ptr[2], ptr[3]);
break;
case GpuProgramParameters::ACT_SURFACE_SHININESS:
mStateCacheManager->setMaterialShininess(ptr[0]);
break;
case GpuProgramParameters::ACT_POINT_PARAMS:
mStateCacheManager->setPointSize(ptr[0]);
mStateCacheManager->setPointParameters(ptr + 1);
break;
case GpuProgramParameters::ACT_FOG_PARAMS:
glFogf(GL_FOG_DENSITY, ptr[0]);
glFogf(GL_FOG_START, ptr[1]);
glFogf(GL_FOG_END, ptr[2]);
break;
case GpuProgramParameters::ACT_FOG_COLOUR:
glFogfv(GL_FOG_COLOR, ptr);
break;
case GpuProgramParameters::ACT_AMBIENT_LIGHT_COLOUR:
mStateCacheManager->setLightAmbient(ptr[0], ptr[1], ptr[2]);
break;
case GpuProgramParameters::ACT_LIGHT_DIFFUSE_COLOUR:
glLightfv(GL_LIGHT0 + ac.data, GL_DIFFUSE, ptr);
break;
case GpuProgramParameters::ACT_LIGHT_SPECULAR_COLOUR:
glLightfv(GL_LIGHT0 + ac.data, GL_SPECULAR, ptr);
break;
case GpuProgramParameters::ACT_LIGHT_ATTENUATION:
glLightf(GL_LIGHT0 + ac.data, GL_CONSTANT_ATTENUATION, ptr[1]);
glLightf(GL_LIGHT0 + ac.data, GL_LINEAR_ATTENUATION, ptr[2]);
glLightf(GL_LIGHT0 + ac.data, GL_QUADRATIC_ATTENUATION, ptr[3]);
break;
case GpuProgramParameters::ACT_SPOTLIGHT_PARAMS:
{
float cutoff = ptr[3] ? Math::RadiansToDegrees(std::acos(ptr[1])) : 180;
glLightf(GL_LIGHT0 + ac.data, GL_SPOT_CUTOFF, cutoff);
glLightf(GL_LIGHT0 + ac.data, GL_SPOT_EXPONENT, ptr[2]);
break;
}
case GpuProgramParameters::ACT_LIGHT_POSITION:
case GpuProgramParameters::ACT_LIGHT_DIRECTION:
// handled below
updateLightPos = true;
break;
default:
OgreAssert(false, "unknown autoconstant");
break;
}
}
}
if(!updateLightPos) return;
// GL lights use eye coordinates, which we only know now
// Save previous modelview
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glLoadMatrixf(mViewMatrix.transpose()[0]);
for (const auto& ac : params->getAutoConstants())
{
// Only update needed slots
if ((GPV_GLOBAL | GPV_LIGHTS) & mask)
{
const float* ptr = params->getFloatPointer(ac.physicalIndex);
switch(ac.paramType)
{
case GpuProgramParameters::ACT_LIGHT_POSITION:
glLightfv(GL_LIGHT0 + ac.data, GL_POSITION, ptr);
break;
case GpuProgramParameters::ACT_LIGHT_DIRECTION:
glLightfv(GL_LIGHT0 + ac.data, GL_SPOT_DIRECTION, ptr);
break;
default:
break;
}
}
}
glPopMatrix();
}
const String& GLRenderSystem::getName(void) const
{
static String strName("OpenGL Rendering Subsystem");
return strName;
}
void GLRenderSystem::_initialise()
{
RenderSystem::_initialise();
mGLSupport->start();
// Create the texture manager
mTextureManager = new GLTextureManager(this);
}
RenderSystemCapabilities* GLRenderSystem::createRenderSystemCapabilities() const
{
RenderSystemCapabilities* rsc = new RenderSystemCapabilities();
rsc->setCategoryRelevant(CAPS_CATEGORY_GL, true);
rsc->setDriverVersion(mDriverVersion);
const char* deviceName = (const char*)glGetString(GL_RENDERER);
const char* vendorName = (const char*)glGetString(GL_VENDOR);
rsc->setDeviceName(deviceName);
rsc->setRenderSystemName(getName());
// determine vendor
if (strstr(vendorName, "NVIDIA"))
rsc->setVendor(GPU_NVIDIA);
else if (strstr(vendorName, "ATI"))
rsc->setVendor(GPU_AMD);
else if (strstr(vendorName, "AMD"))
rsc->setVendor(GPU_AMD);
else if (strstr(vendorName, "Intel"))
rsc->setVendor(GPU_INTEL);
else
rsc->setVendor(GPU_UNKNOWN);
// Supports fixed-function
rsc->setCapability(RSC_FIXED_FUNCTION);
rsc->setCapability(RSC_AUTOMIPMAP_COMPRESSED);
// Check for Multitexturing support and set number of texture units
GLint units;
glGetIntegerv( GL_MAX_TEXTURE_UNITS, &units );
if (GLEW_ARB_fragment_program)
{
// Also check GL_MAX_TEXTURE_IMAGE_UNITS_ARB since NV at least
// only increased this on the FX/6x00 series
GLint arbUnits;
glGetIntegerv( GL_MAX_TEXTURE_IMAGE_UNITS_ARB, &arbUnits );
if (arbUnits > units)
units = arbUnits;
}
rsc->setNumTextureUnits(std::min(OGRE_MAX_TEXTURE_LAYERS, units));
// Check for Anisotropy support
if(GLEW_EXT_texture_filter_anisotropic)
{
GLfloat maxAnisotropy = 0;
glGetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &maxAnisotropy);
rsc->setMaxSupportedAnisotropy(maxAnisotropy);
rsc->setCapability(RSC_ANISOTROPY);
}
rsc->setCapability(RSC_DOT3);
// Point sprites
if (GLEW_VERSION_2_0 || GLEW_ARB_point_sprite)
{
rsc->setCapability(RSC_POINT_SPRITES);
}
if(GLEW_ARB_point_parameters)
{
glPointParameterf = glPointParameterfARB;
glPointParameterfv = glPointParameterfvARB;
}
else if(GLEW_EXT_point_parameters)
{
glPointParameterf = glPointParameterfEXT;
glPointParameterfv = glPointParameterfvEXT;
}
rsc->setCapability(RSC_POINT_EXTENDED_PARAMETERS);
// Check for hardware stencil support and set bit depth
GLint stencil;
glGetIntegerv(GL_STENCIL_BITS,&stencil);
if(stencil)
{
rsc->setCapability(RSC_HWSTENCIL);
rsc->setStencilBufferBitDepth(stencil);
}
rsc->setCapability(RSC_HW_GAMMA);
rsc->setCapability(RSC_MAPBUFFER);
rsc->setCapability(RSC_32BIT_INDEX);
if(GLEW_ARB_vertex_program)
{
rsc->setCapability(RSC_VERTEX_PROGRAM);
// Vertex Program Properties
rsc->setVertexProgramConstantBoolCount(0);
rsc->setVertexProgramConstantIntCount(0);
GLint floatConstantCount;
glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, GL_MAX_PROGRAM_LOCAL_PARAMETERS_ARB, &floatConstantCount);
rsc->setVertexProgramConstantFloatCount(floatConstantCount);
GLint attrs;
glGetIntegerv( GL_MAX_VERTEX_ATTRIBS_ARB, &attrs);
rsc->setNumVertexAttributes(attrs);
rsc->addShaderProfile("arbvp1");
if (GLEW_NV_vertex_program2_option)
{
rsc->addShaderProfile("vp30");
}
if (GLEW_NV_vertex_program3)
{
rsc->addShaderProfile("vp40");
}
if (GLEW_NV_gpu_program4)
{
rsc->addShaderProfile("gp4vp");
rsc->addShaderProfile("gpu_vp");
}
}
if (GLEW_NV_register_combiners2 &&
GLEW_NV_texture_shader)
{
rsc->setCapability(RSC_FRAGMENT_PROGRAM);
rsc->addShaderProfile("fp20");
}
// NFZ - check for ATI fragment shader support
if (GLEW_ATI_fragment_shader)
{
rsc->setCapability(RSC_FRAGMENT_PROGRAM);
// no boolean params allowed
rsc->setFragmentProgramConstantBoolCount(0);
// no integer params allowed
rsc->setFragmentProgramConstantIntCount(0);
// only 8 Vector4 constant floats supported
rsc->setFragmentProgramConstantFloatCount(8);
rsc->addShaderProfile("ps_1_4");
rsc->addShaderProfile("ps_1_3");
rsc->addShaderProfile("ps_1_2");
rsc->addShaderProfile("ps_1_1");
}
if (GLEW_ARB_fragment_program)
{
rsc->setCapability(RSC_FRAGMENT_PROGRAM);
// Fragment Program Properties
rsc->setFragmentProgramConstantBoolCount(0);
rsc->setFragmentProgramConstantIntCount(0);
GLint floatConstantCount;
glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_LOCAL_PARAMETERS_ARB, &floatConstantCount);
rsc->setFragmentProgramConstantFloatCount(floatConstantCount);
rsc->addShaderProfile("arbfp1");
if (GLEW_NV_fragment_program_option)
{
rsc->addShaderProfile("fp30");
}
if (GLEW_NV_fragment_program2)
{
rsc->addShaderProfile("fp40");
}
if (GLEW_NV_gpu_program4)
{
rsc->addShaderProfile("gp4fp");
rsc->addShaderProfile("gpu_fp");
}
}
// NFZ - Check if GLSL is supported
if ( GLEW_VERSION_2_0 ||
(GLEW_ARB_shading_language_100 &&
GLEW_ARB_shader_objects &&
GLEW_ARB_fragment_shader &&
GLEW_ARB_vertex_shader) )
{
rsc->addShaderProfile("glsl");
if(getNativeShadingLanguageVersion() >= 120)
rsc->addShaderProfile("glsl120");
if(getNativeShadingLanguageVersion() >= 110)
rsc->addShaderProfile("glsl110");
if(getNativeShadingLanguageVersion() >= 100)
rsc->addShaderProfile("glsl100");
}
// Check if geometry shaders are supported
if (GLEW_VERSION_2_0 &&
GLEW_EXT_geometry_shader4)
{
rsc->setCapability(RSC_GEOMETRY_PROGRAM);
rsc->setGeometryProgramConstantBoolCount(0);
rsc->setGeometryProgramConstantIntCount(0);
GLint floatConstantCount = 0;
glGetIntegerv(GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_EXT, &floatConstantCount);
rsc->setGeometryProgramConstantFloatCount(floatConstantCount);
GLint maxOutputVertices;
glGetIntegerv(GL_MAX_GEOMETRY_OUTPUT_VERTICES_EXT,&maxOutputVertices);
rsc->setGeometryProgramNumOutputVertices(maxOutputVertices);
}
if(GLEW_NV_gpu_program4)
{
rsc->setCapability(RSC_GEOMETRY_PROGRAM);
rsc->addShaderProfile("nvgp4");
//Also add the CG profiles
rsc->addShaderProfile("gpu_gp");
rsc->addShaderProfile("gp4gp");
}
if (checkExtension("GL_ARB_get_program_binary"))
{
// states 3.0 here: http://developer.download.nvidia.com/opengl/specs/GL_ARB_get_program_binary.txt
// but not here: http://www.opengl.org/sdk/docs/man4/xhtml/glGetProgramBinary.xml
// and here states 4.1: http://www.geeks3d.com/20100727/opengl-4-1-allows-the-use-of-binary-shaders/
GLint formats;
glGetIntegerv(GL_NUM_PROGRAM_BINARY_FORMATS, &formats);
if(formats > 0)
rsc->setCapability(RSC_CAN_GET_COMPILED_SHADER_BUFFER);
}
if (GLEW_VERSION_3_3 || GLEW_ARB_instanced_arrays)
{
// states 3.3 here: http://www.opengl.org/sdk/docs/man3/xhtml/glVertexAttribDivisor.xml
rsc->setCapability(RSC_VERTEX_BUFFER_INSTANCE_DATA);
}
//Check if render to vertex buffer (transform feedback in OpenGL)
if (GLEW_VERSION_2_0 &&
GLEW_NV_transform_feedback)
{
rsc->setCapability(RSC_HWRENDER_TO_VERTEX_BUFFER);
}
// Check for texture compression
rsc->setCapability(RSC_TEXTURE_COMPRESSION);
// Check for dxt compression
if(GLEW_EXT_texture_compression_s3tc)
{
#if defined(__APPLE__) && defined(__PPC__)
// Apple on ATI & PPC has errors in DXT
if (mGLSupport->getGLVendor().find("ATI") == std::string::npos)
#endif
rsc->setCapability(RSC_TEXTURE_COMPRESSION_DXT);
}
// Check for vtc compression
if(GLEW_NV_texture_compression_vtc)
{
rsc->setCapability(RSC_TEXTURE_COMPRESSION_VTC);
}
// Scissor test is standard in GL 1.2 (is it emulated on some cards though?)
rsc->setCapability(RSC_SCISSOR_TEST);
// As are user clipping planes
rsc->setCapability(RSC_USER_CLIP_PLANES);
// 2-sided stencil?
if (GLEW_VERSION_2_0 || GLEW_EXT_stencil_two_side)
{
rsc->setCapability(RSC_TWO_SIDED_STENCIL);
}
rsc->setCapability(RSC_STENCIL_WRAP);
rsc->setCapability(RSC_HWOCCLUSION);
// UBYTE4 always supported
rsc->setCapability(RSC_VERTEX_FORMAT_UBYTE4);
// Infinite far plane always supported
rsc->setCapability(RSC_INFINITE_FAR_PLANE);
// Check for non-power-of-2 texture support
if(GLEW_ARB_texture_non_power_of_two)
{
rsc->setCapability(RSC_NON_POWER_OF_2_TEXTURES);
}
// Check for Float textures
if(GLEW_ATI_texture_float || GLEW_ARB_texture_float)
{
rsc->setCapability(RSC_TEXTURE_FLOAT);
}
// 3D textures should be supported by GL 1.2, which is our minimum version
rsc->setCapability(RSC_TEXTURE_1D);
rsc->setCapability(RSC_TEXTURE_3D);
// Check for framebuffer object extension
if(GLEW_EXT_framebuffer_object)
{
// Probe number of draw buffers
// Only makes sense with FBO support, so probe here
if(GLEW_VERSION_2_0 ||
GLEW_ARB_draw_buffers ||
GLEW_ATI_draw_buffers)
{
GLint buffers;
glGetIntegerv(GL_MAX_DRAW_BUFFERS_ARB, &buffers);
rsc->setNumMultiRenderTargets(std::min<int>(buffers, (GLint)OGRE_MAX_MULTIPLE_RENDER_TARGETS));
rsc->setCapability(RSC_MRT_DIFFERENT_BIT_DEPTHS);
}
rsc->setCapability(RSC_HWRENDER_TO_TEXTURE);
}
// Check GLSupport for PBuffer support
if(GLEW_ARB_pixel_buffer_object || GLEW_EXT_pixel_buffer_object)
{
// Use PBuffers
rsc->setCapability(RSC_HWRENDER_TO_TEXTURE);
rsc->setCapability(RSC_PBUFFER);
}
// Point size
float ps;
glGetFloatv(GL_POINT_SIZE_MAX, &ps);
rsc->setMaxPointSize(ps);
// Vertex texture fetching
if (checkExtension("GL_ARB_vertex_shader"))
{
GLint vUnits;
glGetIntegerv(GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB, &vUnits);
rsc->setNumVertexTextureUnits(static_cast<ushort>(vUnits));
if (vUnits > 0)
{
rsc->setCapability(RSC_VERTEX_TEXTURE_FETCH);
}
// GL always shares vertex and fragment texture units (for now?)
rsc->setVertexTextureUnitsShared(true);
}
rsc->setCapability(RSC_MIPMAP_LOD_BIAS);
// Alpha to coverage?
if (checkExtension("GL_ARB_multisample"))
{
// Alpha to coverage always 'supported' when MSAA is available
// although card may ignore it if it doesn't specifically support A2C
rsc->setCapability(RSC_ALPHA_TO_COVERAGE);
}
// Advanced blending operations
if(GLEW_VERSION_2_0)
{
rsc->setCapability(RSC_ADVANCED_BLEND_OPERATIONS);
}
GLfloat lineWidth[2] = {1, 1};
glGetFloatv(GL_ALIASED_LINE_WIDTH_RANGE, lineWidth);
if(lineWidth[1] != 1 && lineWidth[1] != lineWidth[0])
rsc->setCapability(RSC_WIDE_LINES);
return rsc;
}
void GLRenderSystem::initialiseFromRenderSystemCapabilities(RenderSystemCapabilities* caps, RenderTarget* primary)
{
if(caps->getRenderSystemName() != getName())
{
OGRE_EXCEPT(Exception::ERR_INVALIDPARAMS,
"Trying to initialize GLRenderSystem from RenderSystemCapabilities that do not support OpenGL",
"GLRenderSystem::initialiseFromRenderSystemCapabilities");
}
// set texture the number of texture units
mFixedFunctionTextureUnits = caps->getNumTextureUnits();
//In GL there can be less fixed function texture units than general
//texture units. Get the minimum of the two.
if (caps->hasCapability(RSC_FRAGMENT_PROGRAM))
{
GLint maxTexCoords = 0;
glGetIntegerv(GL_MAX_TEXTURE_COORDS_ARB, &maxTexCoords);
if (mFixedFunctionTextureUnits > maxTexCoords)
{
mFixedFunctionTextureUnits = maxTexCoords;
}
}
if(!GLEW_ARB_vertex_buffer_object)
{
// Assign ARB functions same to GL 1.5 version since
// interface identical
glBindBufferARB = glBindBuffer;
glBufferDataARB = glBufferData;
glBufferSubDataARB = glBufferSubData;
glDeleteBuffersARB = glDeleteBuffers;
glGenBuffersARB = glGenBuffers;
glGetBufferParameterivARB = glGetBufferParameteriv;
glGetBufferPointervARB = glGetBufferPointerv;
glGetBufferSubDataARB = glGetBufferSubData;
glIsBufferARB = glIsBuffer;
glMapBufferARB = glMapBuffer;
glUnmapBufferARB = glUnmapBuffer;
}
mHardwareBufferManager = new GLHardwareBufferManager;
// XXX Need to check for nv2 support and make a program manager for it
// XXX Probably nv1 as well for older cards
// GPU Program Manager setup
mGpuProgramManager = new GLGpuProgramManager();
if(caps->hasCapability(RSC_VERTEX_PROGRAM))
{
if(caps->isShaderProfileSupported("arbvp1"))
{
mGpuProgramManager->registerProgramFactory("arbvp1", createGLArbGpuProgram);
}
if(caps->isShaderProfileSupported("vp30"))
{
mGpuProgramManager->registerProgramFactory("vp30", createGLArbGpuProgram);
}
if(caps->isShaderProfileSupported("vp40"))
{
mGpuProgramManager->registerProgramFactory("vp40", createGLArbGpuProgram);
}
if(caps->isShaderProfileSupported("gp4vp"))
{
mGpuProgramManager->registerProgramFactory("gp4vp", createGLArbGpuProgram);
}
if(caps->isShaderProfileSupported("gpu_vp"))
{
mGpuProgramManager->registerProgramFactory("gpu_vp", createGLArbGpuProgram);
}
}
if(caps->hasCapability(RSC_GEOMETRY_PROGRAM))
{
//TODO : Should these be createGLArbGpuProgram or createGLGpuNVparseProgram?
if(caps->isShaderProfileSupported("nvgp4"))
{
mGpuProgramManager->registerProgramFactory("nvgp4", createGLArbGpuProgram);
}
if(caps->isShaderProfileSupported("gp4gp"))
{
mGpuProgramManager->registerProgramFactory("gp4gp", createGLArbGpuProgram);
}
if(caps->isShaderProfileSupported("gpu_gp"))
{
mGpuProgramManager->registerProgramFactory("gpu_gp", createGLArbGpuProgram);
}
}
if(caps->hasCapability(RSC_FRAGMENT_PROGRAM))
{
if(caps->isShaderProfileSupported("fp20"))
{
mGpuProgramManager->registerProgramFactory("fp20", createGLGpuNvparseProgram);
}
if(caps->isShaderProfileSupported("ps_1_4"))
{
mGpuProgramManager->registerProgramFactory("ps_1_4", createGL_ATI_FS_GpuProgram);
}
if(caps->isShaderProfileSupported("ps_1_3"))
{
mGpuProgramManager->registerProgramFactory("ps_1_3", createGL_ATI_FS_GpuProgram);
}
if(caps->isShaderProfileSupported("ps_1_2"))
{
mGpuProgramManager->registerProgramFactory("ps_1_2", createGL_ATI_FS_GpuProgram);
}
if(caps->isShaderProfileSupported("ps_1_1"))
{
mGpuProgramManager->registerProgramFactory("ps_1_1", createGL_ATI_FS_GpuProgram);
}
if(caps->isShaderProfileSupported("arbfp1"))
{
mGpuProgramManager->registerProgramFactory("arbfp1", createGLArbGpuProgram);
}
if(caps->isShaderProfileSupported("fp40"))
{
mGpuProgramManager->registerProgramFactory("fp40", createGLArbGpuProgram);
}
if(caps->isShaderProfileSupported("fp30"))
{
mGpuProgramManager->registerProgramFactory("fp30", createGLArbGpuProgram);
}
if(caps->isShaderProfileSupported("gp4fp"))
{
mGpuProgramManager->registerProgramFactory("gp4fp", createGLArbGpuProgram);
}
if(caps->isShaderProfileSupported("gpu_fp"))
{
mGpuProgramManager->registerProgramFactory("gpu_fp", createGLArbGpuProgram);
}
}
if(caps->isShaderProfileSupported("glsl"))
{
// NFZ - check for GLSL vertex and fragment shader support successful
mGLSLProgramFactory = new GLSL::GLSLProgramFactory();
HighLevelGpuProgramManager::getSingleton().addFactory(mGLSLProgramFactory);
LogManager::getSingleton().logMessage("GLSL support detected");
}
if(caps->hasCapability(RSC_HWOCCLUSION) && !GLEW_ARB_occlusion_query)
{
// Assign ARB functions same to GL 1.5 version since
// interface identical
glBeginQueryARB = glBeginQuery;
glDeleteQueriesARB = glDeleteQueries;
glEndQueryARB = glEndQuery;
glGenQueriesARB = glGenQueries;
glGetQueryObjectivARB = glGetQueryObjectiv;
glGetQueryObjectuivARB = glGetQueryObjectuiv;
glGetQueryivARB = glGetQueryiv;
glIsQueryARB = glIsQuery;
}
/// Do this after extension function pointers are initialised as the extension
/// is used to probe further capabilities.
auto cfi = getConfigOptions().find("RTT Preferred Mode");
// RTT Mode: 0 use whatever available, 1 use PBuffers, 2 force use copying
int rttMode = 0;
if (cfi != getConfigOptions().end())
{
if (cfi->second.currentValue == "PBuffer")
{
rttMode = 1;
}
else if (cfi->second.currentValue == "Copy")
{
rttMode = 2;
}
}
// Check for framebuffer object extension
if(caps->hasCapability(RSC_HWRENDER_TO_TEXTURE) && rttMode < 1)
{
// Before GL version 2.0, we need to get one of the extensions
if(GLEW_ARB_draw_buffers)
GLEW_GET_FUN(__glewDrawBuffers) = glDrawBuffersARB;
else if(GLEW_ATI_draw_buffers)
GLEW_GET_FUN(__glewDrawBuffers) = glDrawBuffersATI;
// Create FBO manager
LogManager::getSingleton().logMessage("GL: Using GL_EXT_framebuffer_object for rendering to textures (best)");
mRTTManager = new GLFBOManager(false);
//TODO: Check if we're using OpenGL 3.0 and add RSC_RTT_DEPTHBUFFER_RESOLUTION_LESSEQUAL flag
}
else
{
// Check GLSupport for PBuffer support
if(caps->hasCapability(RSC_PBUFFER) && rttMode < 2)
{
if(caps->hasCapability(RSC_HWRENDER_TO_TEXTURE))
{
// Use PBuffers
mRTTManager = new GLPBRTTManager(mGLSupport, primary);
LogManager::getSingleton().logMessage("GL: Using PBuffers for rendering to textures");
//TODO: Depth buffer sharing in pbuffer is left unsupported
}
}
else
{
// No pbuffer support either -- fallback to simplest copying from framebuffer
mRTTManager = new GLCopyingRTTManager();
LogManager::getSingleton().logMessage("GL: Using framebuffer copy for rendering to textures (worst)");
LogManager::getSingleton().logMessage("GL: Warning: RenderTexture size is restricted to size of framebuffer. If you are on Linux, consider using GLX instead of SDL.");
//Copy method uses the main depth buffer but no other depth buffer
caps->setCapability(RSC_RTT_MAIN_DEPTHBUFFER_ATTACHABLE);
caps->setCapability(RSC_RTT_DEPTHBUFFER_RESOLUTION_LESSEQUAL);
}
// Downgrade number of simultaneous targets
caps->setNumMultiRenderTargets(1);
}
Log* defaultLog = LogManager::getSingleton().getDefaultLog();
if (defaultLog)
{
caps->log(defaultLog);
}
mGLInitialised = true;
}
void GLRenderSystem::shutdown(void)
{
RenderSystem::shutdown();
// Deleting the GLSL program factory
if (mGLSLProgramFactory)
{
// Remove from manager safely
if (HighLevelGpuProgramManager::getSingletonPtr())
HighLevelGpuProgramManager::getSingleton().removeFactory(mGLSLProgramFactory);
delete mGLSLProgramFactory;
mGLSLProgramFactory = 0;
}
// Deleting the GPU program manager and hardware buffer manager. Has to be done before the mGLSupport->stop().
delete mGpuProgramManager;
mGpuProgramManager = 0;
delete mHardwareBufferManager;
mHardwareBufferManager = 0;
delete mRTTManager;
mRTTManager = 0;
// Delete extra threads contexts
for (GLContextList::iterator i = mBackgroundContextList.begin();
i != mBackgroundContextList.end(); ++i)
{
GLContext* pCurContext = *i;
pCurContext->releaseContext();
delete pCurContext;
}
mBackgroundContextList.clear();
mGLSupport->stop();
mStopRendering = true;
delete mTextureManager;
mTextureManager = 0;
// There will be a new initial window and so forth, thus any call to test
// some params will access an invalid pointer, so it is best to reset
// the whole state.
mGLInitialised = 0;
}
void GLRenderSystem::setShadingType(ShadeOptions so)
{
// XXX Don't do this when using shader
switch(so)
{
case SO_FLAT:
mStateCacheManager->setShadeModel(GL_FLAT);
break;
default:
mStateCacheManager->setShadeModel(GL_SMOOTH);
break;
}
}
//---------------------------------------------------------------------
bool GLRenderSystem::_createRenderWindows(const RenderWindowDescriptionList& renderWindowDescriptions,
RenderWindowList& createdWindows)
{
// Call base render system method.
if (false == RenderSystem::_createRenderWindows(renderWindowDescriptions, createdWindows))
return false;
// Simply call _createRenderWindow in a loop.
for (size_t i = 0; i < renderWindowDescriptions.size(); ++i)
{
const RenderWindowDescription& curRenderWindowDescription = renderWindowDescriptions[i];
RenderWindow* curWindow = NULL;
curWindow = _createRenderWindow(curRenderWindowDescription.name,
curRenderWindowDescription.width,
curRenderWindowDescription.height,
curRenderWindowDescription.useFullScreen,
&curRenderWindowDescription.miscParams);
createdWindows.push_back(curWindow);
}
return true;
}
//---------------------------------------------------------------------
RenderWindow* GLRenderSystem::_createRenderWindow(const String &name,
unsigned int width, unsigned int height, bool fullScreen,
const NameValuePairList *miscParams)
{
if (mRenderTargets.find(name) != mRenderTargets.end())
{
OGRE_EXCEPT(
Exception::ERR_INVALIDPARAMS,
"Window with name '" + name + "' already exists",
"GLRenderSystem::_createRenderWindow" );
}
// Log a message
StringStream ss;
ss << "GLRenderSystem::_createRenderWindow \"" << name << "\", " <<
width << "x" << height << " ";
if(fullScreen)
ss << "fullscreen ";
else
ss << "windowed ";
if(miscParams)
{
ss << " miscParams: ";
NameValuePairList::const_iterator it;
for(it=miscParams->begin(); it!=miscParams->end(); ++it)
{
ss << it->first << "=" << it->second << " ";
}
LogManager::getSingleton().logMessage(ss.str());
}
// Create the window
RenderWindow* win = mGLSupport->newWindow(name, width, height,
fullScreen, miscParams);
attachRenderTarget( *win );
if (!mGLInitialised)
{
// set up glew and GLSupport
initialiseContext(win);
const char* shadingLangVersion = (const char*)glGetString(GL_SHADING_LANGUAGE_VERSION);
StringVector tokens = StringUtil::split(shadingLangVersion, ". ");
mNativeShadingLanguageVersion = (StringConverter::parseUnsignedInt(tokens[0]) * 100) + StringConverter::parseUnsignedInt(tokens[1]);
// Initialise GL after the first window has been created
// TODO: fire this from emulation options, and don't duplicate Real and Current capabilities
mRealCapabilities = createRenderSystemCapabilities();
initFixedFunctionParams(); // create params
// use real capabilities if custom capabilities are not available
if(!mUseCustomCapabilities)
mCurrentCapabilities = mRealCapabilities;
fireEvent("RenderSystemCapabilitiesCreated");
initialiseFromRenderSystemCapabilities(mCurrentCapabilities, win);
// Initialise the main context
_oneTimeContextInitialization();
if(mCurrentContext)
mCurrentContext->setInitialized();
}
if( win->getDepthBufferPool() != DepthBuffer::POOL_NO_DEPTH )
{
//Unlike D3D9, OGL doesn't allow sharing the main depth buffer, so keep them separate.
//Only Copy does, but Copy means only one depth buffer...
GLContext *windowContext = dynamic_cast<GLRenderTarget*>(win)->getContext();;
GLDepthBuffer *depthBuffer = new GLDepthBuffer( DepthBuffer::POOL_DEFAULT, this,
windowContext, 0, 0,
win->getWidth(), win->getHeight(),
win->getFSAA(), true );
mDepthBufferPool[depthBuffer->getPoolId()].push_back( depthBuffer );
win->attachDepthBuffer( depthBuffer );
}
return win;
}
//---------------------------------------------------------------------
DepthBuffer* GLRenderSystem::_createDepthBufferFor( RenderTarget *renderTarget )
{
GLDepthBuffer *retVal = 0;
if( auto fbo = dynamic_cast<GLRenderTarget*>(renderTarget)->getFBO() )
{
//Presence of an FBO means the manager is an FBO Manager, that's why it's safe to downcast
//Find best depth & stencil format suited for the RT's format
GLuint depthFormat, stencilFormat;
static_cast<GLFBOManager*>(mRTTManager)->getBestDepthStencil( fbo->getFormat(),
&depthFormat, &stencilFormat );
GLRenderBuffer *depthBuffer = new GLRenderBuffer( depthFormat, fbo->getWidth(),
fbo->getHeight(), fbo->getFSAA() );
GLRenderBuffer *stencilBuffer = NULL;
if ( depthFormat == GL_DEPTH24_STENCIL8_EXT)
{
// If we have a packed format, the stencilBuffer is the same as the depthBuffer
stencilBuffer = depthBuffer;
}
else if(stencilFormat)
{
stencilBuffer = new GLRenderBuffer( stencilFormat, fbo->getWidth(),
fbo->getHeight(), fbo->getFSAA() );
}
//No "custom-quality" multisample for now in GL
retVal = new GLDepthBuffer( 0, this, mCurrentContext, depthBuffer, stencilBuffer,
fbo->getWidth(), fbo->getHeight(), fbo->getFSAA(), false );
}
else
{
// Only FBO support different depth buffers, so everything
// else creates dummy (empty) containers
retVal = new GLDepthBuffer(0, this, mCurrentContext, NULL, NULL, renderTarget->getWidth(),
renderTarget->getHeight(), renderTarget->getFSAA(), false);
}
return retVal;
}
void GLRenderSystem::initialiseContext(RenderWindow* primary)
{
// Set main and current context
mMainContext = dynamic_cast<GLRenderTarget*>(primary)->getContext();
mCurrentContext = mMainContext;
// Set primary context as active
if(mCurrentContext)
mCurrentContext->setCurrent();
// Setup GLSupport
initialiseExtensions();
LogManager::getSingleton().logMessage("***************************");
LogManager::getSingleton().logMessage("*** GL Renderer Started ***");
LogManager::getSingleton().logMessage("***************************");
// Get extension function pointers
glewInit();
if (!GLEW_VERSION_1_5) {
OGRE_EXCEPT(Exception::ERR_RENDERINGAPI_ERROR,
"OpenGL 1.5 is not supported",
"GLRenderSystem::initialiseContext");
}
mStateCacheManager = mCurrentContext->createOrRetrieveStateCacheManager<GLStateCacheManager>();
}
//-----------------------------------------------------------------------
MultiRenderTarget * GLRenderSystem::createMultiRenderTarget(const String & name)
{
auto fboMgr = dynamic_cast<GLFBOManager*>(mRTTManager);
if (!fboMgr)
OGRE_EXCEPT(Exception::ERR_RENDERINGAPI_ERROR, "MultiRenderTarget is not supported");
MultiRenderTarget *retval = new GLFBOMultiRenderTarget(fboMgr, name);
attachRenderTarget( *retval );
return retval;
}
//-----------------------------------------------------------------------
void GLRenderSystem::destroyRenderWindow(const String& name)
{
// Find it to remove from list.
RenderTarget* pWin = detachRenderTarget(name);
OgreAssert(pWin, "unknown RenderWindow name");
GLContext *windowContext = dynamic_cast<GLRenderTarget*>(pWin)->getContext();
//1 Window <-> 1 Context, should be always true
assert( windowContext );
bool bFound = false;
//Find the depth buffer from this window and remove it.
DepthBufferMap::iterator itMap = mDepthBufferPool.begin();
DepthBufferMap::iterator enMap = mDepthBufferPool.end();
while( itMap != enMap && !bFound )
{
DepthBufferVec::iterator itor = itMap->second.begin();
DepthBufferVec::iterator end = itMap->second.end();
while( itor != end )
{
//A DepthBuffer with no depth & stencil pointers is a dummy one,
//look for the one that matches the same GL context
GLDepthBuffer *depthBuffer = static_cast<GLDepthBuffer*>(*itor);
GLContext *glContext = depthBuffer->getGLContext();
if( glContext == windowContext &&
(depthBuffer->getDepthBuffer() || depthBuffer->getStencilBuffer()) )
{
bFound = true;
delete *itor;
itMap->second.erase( itor );
break;
}
++itor;
}
++itMap;
}
delete pWin;
}
//---------------------------------------------------------------------
void GLRenderSystem::_useLights(unsigned short limit)
{
if(limit == mCurrentLights)
return;
unsigned short num = 0;
for (;num < limit; ++num)
{
setGLLight(num, true);
}
// Disable extra lights
for (; num < mCurrentLights; ++num)
{
setGLLight(num, false);
}
mCurrentLights = limit;
}
void GLRenderSystem::setGLLight(size_t index, bool lt)
{
setFFPLightParams(index, lt);
GLenum gl_index = GL_LIGHT0 + index;
if (!lt)
{
// Disable in the scene
mStateCacheManager->setEnabled(gl_index, false);
}
else
{
GLfloat f4vals[4] = {0, 0, 0, 1};
// Disable ambient light for movables;
glLightfv(gl_index, GL_AMBIENT, f4vals);
// Enable in the scene
mStateCacheManager->setEnabled(gl_index, true);
}
}
//-----------------------------------------------------------------------------
void GLRenderSystem::makeGLMatrix(GLfloat gl_matrix[16], const Matrix4& m)
{
size_t x = 0;
for (size_t i = 0; i < 4; i++)
{
for (size_t j = 0; j < 4; j++)
{
gl_matrix[x] = m[j][i];
x++;
}
}
}
//-----------------------------------------------------------------------------
void GLRenderSystem::setWorldMatrix( const Matrix4 &m )
{
mWorldMatrix = m;
glMatrixMode(GL_MODELVIEW);
glLoadMatrixf((mViewMatrix * mWorldMatrix).transpose()[0]);
}
//-----------------------------------------------------------------------------
void GLRenderSystem::setViewMatrix( const Matrix4 &m )
{
mViewMatrix = m;
glMatrixMode(GL_MODELVIEW);
glLoadMatrixf((mViewMatrix * mWorldMatrix).transpose()[0]);
// also mark clip planes dirty
if (!mClipPlanes.empty())
mClipPlanesDirty = true;
}
//-----------------------------------------------------------------------------
void GLRenderSystem::setProjectionMatrix(const Matrix4 &m)
{
glMatrixMode(GL_PROJECTION);
glLoadMatrixf(m.transpose()[0]);
glMatrixMode(GL_MODELVIEW);
// also mark clip planes dirty
if (!mClipPlanes.empty())
mClipPlanesDirty = true;
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setSurfaceTracking(TrackVertexColourType tracking)
{
// Track vertex colour
if(tracking != TVC_NONE)
{
GLenum gt = GL_DIFFUSE;
// There are actually 15 different combinations for tracking, of which
// GL only supports the most used 5. This means that we have to do some
// magic to find the best match. NOTE:
// GL_AMBIENT_AND_DIFFUSE != GL_AMBIENT | GL__DIFFUSE
if(tracking & TVC_AMBIENT)
{
if(tracking & TVC_DIFFUSE)
{
gt = GL_AMBIENT_AND_DIFFUSE;
}
else
{
gt = GL_AMBIENT;
}
}
else if(tracking & TVC_DIFFUSE)
{
gt = GL_DIFFUSE;
}
else if(tracking & TVC_SPECULAR)
{
gt = GL_SPECULAR;
}
else if(tracking & TVC_EMISSIVE)
{
gt = GL_EMISSION;
}
glColorMaterial(GL_FRONT_AND_BACK, gt);
mStateCacheManager->setEnabled(GL_COLOR_MATERIAL, true);
}
else
{
mStateCacheManager->setEnabled(GL_COLOR_MATERIAL, false);
}
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setPointParameters(bool attenuationEnabled, Real minSize, Real maxSize)
{
if(attenuationEnabled)
{
// Point size is still calculated in pixels even when attenuation is
// enabled, which is pretty awkward, since you typically want a viewport
// independent size if you're looking for attenuation.
// So, scale the point size up by viewport size (this is equivalent to
// what D3D does as standard)
minSize = minSize * mActiveViewport->getActualHeight();
if (maxSize == 0.0f)
maxSize = mCurrentCapabilities->getMaxPointSize(); // pixels
else
maxSize = maxSize * mActiveViewport->getActualHeight();
if (mCurrentCapabilities->hasCapability(RSC_VERTEX_PROGRAM))
mStateCacheManager->setEnabled(GL_VERTEX_PROGRAM_POINT_SIZE, true);
}
else
{
if (maxSize == 0.0f)
maxSize = mCurrentCapabilities->getMaxPointSize();
if (mCurrentCapabilities->hasCapability(RSC_VERTEX_PROGRAM))
mStateCacheManager->setEnabled(GL_VERTEX_PROGRAM_POINT_SIZE, false);
}
mStateCacheManager->setPointParameters(NULL, minSize, maxSize);
}
void GLRenderSystem::_setLineWidth(float width)
{
glLineWidth(width);
}
//---------------------------------------------------------------------
void GLRenderSystem::_setPointSpritesEnabled(bool enabled)
{
if (!getCapabilities()->hasCapability(RSC_POINT_SPRITES))
return;
mStateCacheManager->setEnabled(GL_POINT_SPRITE, enabled);
// Set sprite texture coord generation
// Don't offer this as an option since D3D links it to sprite enabled
for (ushort i = 0; i < mFixedFunctionTextureUnits; ++i)
{
mStateCacheManager->activateGLTextureUnit(i);
glTexEnvi(GL_POINT_SPRITE, GL_COORD_REPLACE,
enabled ? GL_TRUE : GL_FALSE);
}
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setTexture(size_t stage, bool enabled, const TexturePtr &texPtr)
{
GLenum lastTextureType = mTextureTypes[stage];
if (!mStateCacheManager->activateGLTextureUnit(stage))
return;
if (enabled)
{
GLTexturePtr tex = static_pointer_cast<GLTexture>(texPtr);
// note used
tex->touch();
mTextureTypes[stage] = tex->getGLTextureTarget();
if(lastTextureType != mTextureTypes[stage] && lastTextureType != 0)
{
if (stage < mFixedFunctionTextureUnits)
{
if(lastTextureType != GL_TEXTURE_2D_ARRAY_EXT)
glDisable( lastTextureType );
}
}
if (stage < mFixedFunctionTextureUnits)
{
if(mTextureTypes[stage] != GL_TEXTURE_2D_ARRAY_EXT)
glEnable( mTextureTypes[stage] );
}
mStateCacheManager->bindGLTexture( mTextureTypes[stage], tex->getGLID() );
}
else
{
if (stage < mFixedFunctionTextureUnits)
{
if (lastTextureType != 0)
{
if(mTextureTypes[stage] != GL_TEXTURE_2D_ARRAY_EXT)
glDisable( mTextureTypes[stage] );
}
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
}
// bind zero texture
mStateCacheManager->bindGLTexture(GL_TEXTURE_2D, 0);
}
}
void GLRenderSystem::_setSampler(size_t unit, Sampler& sampler)
{
if (!mStateCacheManager->activateGLTextureUnit(unit))
return;
GLenum target = mTextureTypes[unit];
const Sampler::UVWAddressingMode& uvw = sampler.getAddressingMode();
mStateCacheManager->setTexParameteri(target, GL_TEXTURE_WRAP_S, getTextureAddressingMode(uvw.u));
mStateCacheManager->setTexParameteri(target, GL_TEXTURE_WRAP_T, getTextureAddressingMode(uvw.v));
mStateCacheManager->setTexParameteri(target, GL_TEXTURE_WRAP_R, getTextureAddressingMode(uvw.w));
if (uvw.u == TAM_BORDER || uvw.v == TAM_BORDER || uvw.w == TAM_BORDER)
glTexParameterfv( target, GL_TEXTURE_BORDER_COLOR, sampler.getBorderColour().ptr());
if (mCurrentCapabilities->hasCapability(RSC_MIPMAP_LOD_BIAS))
{
glTexEnvf(GL_TEXTURE_FILTER_CONTROL_EXT, GL_TEXTURE_LOD_BIAS_EXT, sampler.getMipmapBias());
}
if (mCurrentCapabilities->hasCapability(RSC_ANISOTROPY))
mStateCacheManager->setTexParameteri(
target, GL_TEXTURE_MAX_ANISOTROPY_EXT,
std::min<uint>(mCurrentCapabilities->getMaxSupportedAnisotropy(), sampler.getAnisotropy()));
if(GLEW_VERSION_2_0)
{
mStateCacheManager->setTexParameteri(target, GL_TEXTURE_COMPARE_MODE,
sampler.getCompareEnabled() ? GL_COMPARE_REF_TO_TEXTURE
: GL_NONE);
if (sampler.getCompareEnabled())
mStateCacheManager->setTexParameteri(target, GL_TEXTURE_COMPARE_FUNC,
convertCompareFunction(sampler.getCompareFunction()));
}
// Combine with existing mip filter
mStateCacheManager->setTexParameteri(
target, GL_TEXTURE_MIN_FILTER,
getCombinedMinMipFilter(sampler.getFiltering(FT_MIN), sampler.getFiltering(FT_MIP)));
switch (sampler.getFiltering(FT_MAG))
{
case FO_ANISOTROPIC: // GL treats linear and aniso the same
case FO_LINEAR:
mStateCacheManager->setTexParameteri(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
break;
case FO_POINT:
case FO_NONE:
mStateCacheManager->setTexParameteri(target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
break;
}
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setTextureCoordSet(size_t stage, size_t index)
{
mTextureCoordIndex[stage] = index;
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setTextureCoordCalculation(size_t stage, TexCoordCalcMethod m,
const Frustum* frustum)
{
if (stage >= mFixedFunctionTextureUnits)
{
// Can't do this
return;
}
GLfloat M[16];
Matrix4 projectionBias;
// Default to no extra auto texture matrix
mUseAutoTextureMatrix = false;
GLfloat eyePlaneS[] = {1.0, 0.0, 0.0, 0.0};
GLfloat eyePlaneT[] = {0.0, 1.0, 0.0, 0.0};
GLfloat eyePlaneR[] = {0.0, 0.0, 1.0, 0.0};
GLfloat eyePlaneQ[] = {0.0, 0.0, 0.0, 1.0};
if (!mStateCacheManager->activateGLTextureUnit(stage))
return;
switch( m )
{
case TEXCALC_NONE:
mStateCacheManager->disableTextureCoordGen( GL_TEXTURE_GEN_S );
mStateCacheManager->disableTextureCoordGen( GL_TEXTURE_GEN_T );
mStateCacheManager->disableTextureCoordGen( GL_TEXTURE_GEN_R );
mStateCacheManager->disableTextureCoordGen( GL_TEXTURE_GEN_Q );
break;
case TEXCALC_ENVIRONMENT_MAP:
glTexGeni( GL_S, GL_TEXTURE_GEN_MODE, GL_SPHERE_MAP );
glTexGeni( GL_T, GL_TEXTURE_GEN_MODE, GL_SPHERE_MAP );
mStateCacheManager->enableTextureCoordGen( GL_TEXTURE_GEN_S );
mStateCacheManager->enableTextureCoordGen( GL_TEXTURE_GEN_T );
mStateCacheManager->disableTextureCoordGen( GL_TEXTURE_GEN_R );
mStateCacheManager->disableTextureCoordGen( GL_TEXTURE_GEN_Q );
// Need to use a texture matrix to flip the spheremap
mUseAutoTextureMatrix = true;
memset(mAutoTextureMatrix, 0, sizeof(GLfloat)*16);
mAutoTextureMatrix[0] = mAutoTextureMatrix[10] = mAutoTextureMatrix[15] = 1.0f;
mAutoTextureMatrix[5] = -1.0f;
break;
case TEXCALC_ENVIRONMENT_MAP_PLANAR:
// XXX This doesn't seem right?!
#ifdef GL_VERSION_1_3
glTexGeni( GL_S, GL_TEXTURE_GEN_MODE, GL_REFLECTION_MAP );
glTexGeni( GL_T, GL_TEXTURE_GEN_MODE, GL_REFLECTION_MAP );
glTexGeni( GL_R, GL_TEXTURE_GEN_MODE, GL_REFLECTION_MAP );
mStateCacheManager->enableTextureCoordGen( GL_TEXTURE_GEN_S );
mStateCacheManager->enableTextureCoordGen( GL_TEXTURE_GEN_T );
mStateCacheManager->enableTextureCoordGen( GL_TEXTURE_GEN_R );
mStateCacheManager->disableTextureCoordGen( GL_TEXTURE_GEN_Q );
#else
glTexGeni( GL_S, GL_TEXTURE_GEN_MODE, GL_SPHERE_MAP );
glTexGeni( GL_T, GL_TEXTURE_GEN_MODE, GL_SPHERE_MAP );
mStateCacheManager->enableTextureCoordGen( GL_TEXTURE_GEN_S );
mStateCacheManager->enableTextureCoordGen( GL_TEXTURE_GEN_T );
mStateCacheManager->disableTextureCoordGen( GL_TEXTURE_GEN_R );
mStateCacheManager->disableTextureCoordGen( GL_TEXTURE_GEN_Q );
#endif
break;
case TEXCALC_ENVIRONMENT_MAP_REFLECTION:
glTexGeni( GL_S, GL_TEXTURE_GEN_MODE, GL_REFLECTION_MAP );
glTexGeni( GL_T, GL_TEXTURE_GEN_MODE, GL_REFLECTION_MAP );
glTexGeni( GL_R, GL_TEXTURE_GEN_MODE, GL_REFLECTION_MAP );
mStateCacheManager->enableTextureCoordGen( GL_TEXTURE_GEN_S );
mStateCacheManager->enableTextureCoordGen( GL_TEXTURE_GEN_T );
mStateCacheManager->enableTextureCoordGen( GL_TEXTURE_GEN_R );
mStateCacheManager->disableTextureCoordGen( GL_TEXTURE_GEN_Q );
// We need an extra texture matrix here
// This sets the texture matrix to be the inverse of the view matrix
mUseAutoTextureMatrix = true;
makeGLMatrix( M, mViewMatrix);
// Transpose 3x3 in order to invert matrix (rotation)
// Note that we need to invert the Z _before_ the rotation
// No idea why we have to invert the Z at all, but reflection is wrong without it
mAutoTextureMatrix[0] = M[0]; mAutoTextureMatrix[1] = M[4]; mAutoTextureMatrix[2] = -M[8];
mAutoTextureMatrix[4] = M[1]; mAutoTextureMatrix[5] = M[5]; mAutoTextureMatrix[6] = -M[9];
mAutoTextureMatrix[8] = M[2]; mAutoTextureMatrix[9] = M[6]; mAutoTextureMatrix[10] = -M[10];
mAutoTextureMatrix[3] = mAutoTextureMatrix[7] = mAutoTextureMatrix[11] = 0.0f;
mAutoTextureMatrix[12] = mAutoTextureMatrix[13] = mAutoTextureMatrix[14] = 0.0f;
mAutoTextureMatrix[15] = 1.0f;
break;
case TEXCALC_ENVIRONMENT_MAP_NORMAL:
glTexGeni( GL_S, GL_TEXTURE_GEN_MODE, GL_NORMAL_MAP );
glTexGeni( GL_T, GL_TEXTURE_GEN_MODE, GL_NORMAL_MAP );
glTexGeni( GL_R, GL_TEXTURE_GEN_MODE, GL_NORMAL_MAP );
mStateCacheManager->enableTextureCoordGen( GL_TEXTURE_GEN_S );
mStateCacheManager->enableTextureCoordGen( GL_TEXTURE_GEN_T );
mStateCacheManager->enableTextureCoordGen( GL_TEXTURE_GEN_R );
mStateCacheManager->disableTextureCoordGen( GL_TEXTURE_GEN_Q );
break;
case TEXCALC_PROJECTIVE_TEXTURE:
glTexGeni(GL_S, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR);
glTexGeni(GL_T, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR);
glTexGeni(GL_R, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR);
glTexGeni(GL_Q, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR);
glTexGenfv(GL_S, GL_EYE_PLANE, eyePlaneS);
glTexGenfv(GL_T, GL_EYE_PLANE, eyePlaneT);
glTexGenfv(GL_R, GL_EYE_PLANE, eyePlaneR);
glTexGenfv(GL_Q, GL_EYE_PLANE, eyePlaneQ);
mStateCacheManager->enableTextureCoordGen(GL_TEXTURE_GEN_S);
mStateCacheManager->enableTextureCoordGen(GL_TEXTURE_GEN_T);
mStateCacheManager->enableTextureCoordGen(GL_TEXTURE_GEN_R);
mStateCacheManager->enableTextureCoordGen(GL_TEXTURE_GEN_Q);
mUseAutoTextureMatrix = true;
// Set scale and translation matrix for projective textures
projectionBias = Matrix4::CLIPSPACE2DTOIMAGESPACE;
projectionBias = projectionBias * frustum->getProjectionMatrix();
if(mTexProjRelative)
{
Matrix4 viewMatrix;
frustum->calcViewMatrixRelative(mTexProjRelativeOrigin, viewMatrix);
projectionBias = projectionBias * viewMatrix;
}
else
{
projectionBias = projectionBias * frustum->getViewMatrix();
}
projectionBias = projectionBias * mWorldMatrix;
makeGLMatrix(mAutoTextureMatrix, projectionBias);
break;
default:
break;
}
}
//-----------------------------------------------------------------------------
GLint GLRenderSystem::getTextureAddressingMode(
TextureAddressingMode tam) const
{
switch(tam)
{
default:
case TextureUnitState::TAM_WRAP:
return GL_REPEAT;
case TextureUnitState::TAM_MIRROR:
return GL_MIRRORED_REPEAT;
case TextureUnitState::TAM_CLAMP:
return GL_CLAMP_TO_EDGE;
case TextureUnitState::TAM_BORDER:
return GL_CLAMP_TO_BORDER;
}
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setTextureAddressingMode(size_t stage, const Sampler::UVWAddressingMode& uvw)
{
if (!mStateCacheManager->activateGLTextureUnit(stage))
return;
mStateCacheManager->setTexParameteri( mTextureTypes[stage], GL_TEXTURE_WRAP_S,
getTextureAddressingMode(uvw.u));
mStateCacheManager->setTexParameteri( mTextureTypes[stage], GL_TEXTURE_WRAP_T,
getTextureAddressingMode(uvw.v));
mStateCacheManager->setTexParameteri( mTextureTypes[stage], GL_TEXTURE_WRAP_R,
getTextureAddressingMode(uvw.w));
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setTextureMatrix(size_t stage, const Matrix4& xform)
{
if (stage >= mFixedFunctionTextureUnits)
{
// Can't do this
return;
}
if (!mStateCacheManager->activateGLTextureUnit(stage))
return;
glMatrixMode(GL_TEXTURE);
// Load this matrix in
glLoadMatrixf(xform.transpose()[0]);
if (mUseAutoTextureMatrix)
{
// Concat auto matrix
glMultMatrixf(mAutoTextureMatrix);
}
glMatrixMode(GL_MODELVIEW);
}
//-----------------------------------------------------------------------------
GLint GLRenderSystem::getBlendMode(SceneBlendFactor ogreBlend) const
{
switch(ogreBlend)
{
case SBF_ONE:
return GL_ONE;
case SBF_ZERO:
return GL_ZERO;
case SBF_DEST_COLOUR:
return GL_DST_COLOR;
case SBF_SOURCE_COLOUR:
return GL_SRC_COLOR;
case SBF_ONE_MINUS_DEST_COLOUR:
return GL_ONE_MINUS_DST_COLOR;
case SBF_ONE_MINUS_SOURCE_COLOUR:
return GL_ONE_MINUS_SRC_COLOR;
case SBF_DEST_ALPHA:
return GL_DST_ALPHA;
case SBF_SOURCE_ALPHA:
return GL_SRC_ALPHA;
case SBF_ONE_MINUS_DEST_ALPHA:
return GL_ONE_MINUS_DST_ALPHA;
case SBF_ONE_MINUS_SOURCE_ALPHA:
return GL_ONE_MINUS_SRC_ALPHA;
};
// to keep compiler happy
return GL_ONE;
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setSeparateSceneBlending(
SceneBlendFactor sourceFactor, SceneBlendFactor destFactor,
SceneBlendFactor sourceFactorAlpha, SceneBlendFactor destFactorAlpha,
SceneBlendOperation op, SceneBlendOperation alphaOp )
{
GLint sourceBlend = getBlendMode(sourceFactor);
GLint destBlend = getBlendMode(destFactor);
GLint sourceBlendAlpha = getBlendMode(sourceFactorAlpha);
GLint destBlendAlpha = getBlendMode(destFactorAlpha);
if(sourceFactor == SBF_ONE && destFactor == SBF_ZERO &&
sourceFactorAlpha == SBF_ONE && destFactorAlpha == SBF_ZERO)
{
mStateCacheManager->setEnabled(GL_BLEND, false);
}
else
{
mStateCacheManager->setEnabled(GL_BLEND, true);
mStateCacheManager->setBlendFunc(sourceBlend, destBlend, sourceBlendAlpha, destBlendAlpha);
}
GLint func = GL_FUNC_ADD, alphaFunc = GL_FUNC_ADD;
switch(op)
{
case SBO_ADD:
func = GL_FUNC_ADD;
break;
case SBO_SUBTRACT:
func = GL_FUNC_SUBTRACT;
break;
case SBO_REVERSE_SUBTRACT:
func = GL_FUNC_REVERSE_SUBTRACT;
break;
case SBO_MIN:
func = GL_MIN;
break;
case SBO_MAX:
func = GL_MAX;
break;
}
switch(alphaOp)
{
case SBO_ADD:
alphaFunc = GL_FUNC_ADD;
break;
case SBO_SUBTRACT:
alphaFunc = GL_FUNC_SUBTRACT;
break;
case SBO_REVERSE_SUBTRACT:
alphaFunc = GL_FUNC_REVERSE_SUBTRACT;
break;
case SBO_MIN:
alphaFunc = GL_MIN;
break;
case SBO_MAX:
alphaFunc = GL_MAX;
break;
}
mStateCacheManager->setBlendEquation(func, alphaFunc);
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setAlphaRejectSettings(CompareFunction func, unsigned char value, bool alphaToCoverage)
{
bool enable = func != CMPF_ALWAYS_PASS;
mStateCacheManager->setEnabled(GL_ALPHA_TEST, enable);
if(enable)
{
glAlphaFunc(convertCompareFunction(func), value / 255.0f);
}
if (getCapabilities()->hasCapability(RSC_ALPHA_TO_COVERAGE))
{
mStateCacheManager->setEnabled(GL_SAMPLE_ALPHA_TO_COVERAGE, alphaToCoverage && enable);
}
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setViewport(Viewport *vp)
{
// Check if viewport is different
if (!vp)
{
mActiveViewport = NULL;
_setRenderTarget(NULL);
}
else if (vp != mActiveViewport || vp->_isUpdated())
{
RenderTarget* target;
target = vp->getTarget();
_setRenderTarget(target);
mActiveViewport = vp;
GLsizei x, y, w, h;
// Calculate the "lower-left" corner of the viewport
w = vp->getActualWidth();
h = vp->getActualHeight();
x = vp->getActualLeft();
y = vp->getActualTop();
if (!target->requiresTextureFlipping())
{
// Convert "upper-left" corner to "lower-left"
y = target->getHeight() - h - y;
}
mStateCacheManager->setViewport(x, y, w, h);
// Configure the viewport clipping
glScissor(x, y, w, h);
mScissorBox[0] = x;
mScissorBox[1] = y;
mScissorBox[2] = w;
mScissorBox[3] = h;
vp->_clearUpdatedFlag();
}
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_beginFrame(void)
{
if (!mActiveViewport)
OGRE_EXCEPT(Exception::ERR_INVALID_STATE,
"Cannot begin frame - no viewport selected.",
"GLRenderSystem::_beginFrame");
// Activate the viewport clipping
mScissorsEnabled = true;
mStateCacheManager->setEnabled(GL_SCISSOR_TEST, true);
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_endFrame(void)
{
// Deactivate the viewport clipping.
mScissorsEnabled = false;
mStateCacheManager->setEnabled(GL_SCISSOR_TEST, false);
// unbind GPU programs at end of frame
// this is mostly to avoid holding bound programs that might get deleted
// outside via the resource manager
unbindGpuProgram(GPT_VERTEX_PROGRAM);
unbindGpuProgram(GPT_FRAGMENT_PROGRAM);
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setCullingMode(CullingMode mode)
{
mCullingMode = mode;
// NB: Because two-sided stencil API dependence of the front face, we must
// use the same 'winding' for the front face everywhere. As the OGRE default
// culling mode is clockwise, we also treat anticlockwise winding as front
// face for consistently. On the assumption that, we can't change the front
// face by glFrontFace anywhere.
GLenum cullMode;
switch( mode )
{
case CULL_NONE:
mStateCacheManager->setEnabled( GL_CULL_FACE, false );
return;
default:
case CULL_CLOCKWISE:
if (mActiveRenderTarget &&
((mActiveRenderTarget->requiresTextureFlipping() && !mInvertVertexWinding) ||
(!mActiveRenderTarget->requiresTextureFlipping() && mInvertVertexWinding)))
{
cullMode = GL_FRONT;
}
else
{
cullMode = GL_BACK;
}
break;
case CULL_ANTICLOCKWISE:
if (mActiveRenderTarget &&
((mActiveRenderTarget->requiresTextureFlipping() && !mInvertVertexWinding) ||
(!mActiveRenderTarget->requiresTextureFlipping() && mInvertVertexWinding)))
{
cullMode = GL_BACK;
}
else
{
cullMode = GL_FRONT;
}
break;
}
mStateCacheManager->setEnabled( GL_CULL_FACE, true );
mStateCacheManager->setCullFace( cullMode );
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setDepthBufferParams(bool depthTest, bool depthWrite, CompareFunction depthFunction)
{
_setDepthBufferCheckEnabled(depthTest);
_setDepthBufferWriteEnabled(depthWrite);
_setDepthBufferFunction(depthFunction);
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setDepthBufferCheckEnabled(bool enabled)
{
if (enabled)
{
mStateCacheManager->setClearDepth(1.0f);
}
mStateCacheManager->setEnabled(GL_DEPTH_TEST, enabled);
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setDepthBufferWriteEnabled(bool enabled)
{
GLboolean flag = enabled ? GL_TRUE : GL_FALSE;
mStateCacheManager->setDepthMask( flag );
// Store for reference in _beginFrame
mDepthWrite = enabled;
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setDepthBufferFunction(CompareFunction func)
{
mStateCacheManager->setDepthFunc(convertCompareFunction(func));
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setDepthBias(float constantBias, float slopeScaleBias)
{
bool enable = constantBias != 0 || slopeScaleBias != 0;
mStateCacheManager->setEnabled(GL_POLYGON_OFFSET_FILL, enable);
mStateCacheManager->setEnabled(GL_POLYGON_OFFSET_POINT, enable);
mStateCacheManager->setEnabled(GL_POLYGON_OFFSET_LINE, enable);
if (enable)
{
glPolygonOffset(-slopeScaleBias, -constantBias);
}
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setColourBufferWriteEnabled(bool red, bool green, bool blue, bool alpha)
{
mStateCacheManager->setColourMask(red, green, blue, alpha);
// record this
mColourWrite[0] = red;
mColourWrite[1] = green;
mColourWrite[2] = blue;
mColourWrite[3] = alpha;
}
//-----------------------------------------------------------------------------
void GLRenderSystem::setLightingEnabled(bool enabled)
{
mStateCacheManager->setEnabled(GL_LIGHTING, enabled);
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setFog(FogMode mode)
{
GLint fogMode;
switch (mode)
{
case FOG_EXP:
fogMode = GL_EXP;
break;
case FOG_EXP2:
fogMode = GL_EXP2;
break;
case FOG_LINEAR:
fogMode = GL_LINEAR;
break;
default:
// Give up on it
mStateCacheManager->setEnabled(GL_FOG, false);
mFixedFunctionParams->clearAutoConstant(18);
mFixedFunctionParams->clearAutoConstant(19);
return;
}
mFixedFunctionParams->setAutoConstant(18, GpuProgramParameters::ACT_FOG_PARAMS);
mFixedFunctionParams->setAutoConstant(19, GpuProgramParameters::ACT_FOG_COLOUR);
mStateCacheManager->setEnabled(GL_FOG, true);
glFogi(GL_FOG_MODE, fogMode);
}
void GLRenderSystem::_setPolygonMode(PolygonMode level)
{
GLenum glmode;
switch(level)
{
case PM_POINTS:
glmode = GL_POINT;
break;
case PM_WIREFRAME:
glmode = GL_LINE;
break;
default:
case PM_SOLID:
glmode = GL_FILL;
break;
}
mStateCacheManager->setPolygonMode(glmode);
}
//---------------------------------------------------------------------
void GLRenderSystem::setStencilCheckEnabled(bool enabled)
{
mStateCacheManager->setEnabled(GL_STENCIL_TEST, enabled);
}
//---------------------------------------------------------------------
void GLRenderSystem::setStencilBufferParams(CompareFunction func,
uint32 refValue, uint32 compareMask, uint32 writeMask, StencilOperation stencilFailOp,
StencilOperation depthFailOp, StencilOperation passOp,
bool twoSidedOperation, bool readBackAsTexture)
{
bool flip;
mStencilWriteMask = writeMask;
if (twoSidedOperation)
{
if (!mCurrentCapabilities->hasCapability(RSC_TWO_SIDED_STENCIL))
OGRE_EXCEPT(Exception::ERR_INVALIDPARAMS, "2-sided stencils are not supported",
"GLRenderSystem::setStencilBufferParams");
// NB: We should always treat CCW as front face for consistent with default
// culling mode. Therefore, we must take care with two-sided stencil settings.
flip = (mInvertVertexWinding && !mActiveRenderTarget->requiresTextureFlipping()) ||
(!mInvertVertexWinding && mActiveRenderTarget->requiresTextureFlipping());
if(GLEW_VERSION_2_0) // New GL2 commands
{
// Back
glStencilMaskSeparate(GL_BACK, writeMask);
glStencilFuncSeparate(GL_BACK, convertCompareFunction(func), refValue, compareMask);
glStencilOpSeparate(GL_BACK,
convertStencilOp(stencilFailOp, !flip),
convertStencilOp(depthFailOp, !flip),
convertStencilOp(passOp, !flip));
// Front
glStencilMaskSeparate(GL_FRONT, writeMask);
glStencilFuncSeparate(GL_FRONT, convertCompareFunction(func), refValue, compareMask);
glStencilOpSeparate(GL_FRONT,
convertStencilOp(stencilFailOp, flip),
convertStencilOp(depthFailOp, flip),
convertStencilOp(passOp, flip));
}
else // EXT_stencil_two_side
{
mStateCacheManager->setEnabled(GL_STENCIL_TEST_TWO_SIDE_EXT, true);
// Back
glActiveStencilFaceEXT(GL_BACK);
mStateCacheManager->setStencilMask(writeMask);
glStencilFunc(convertCompareFunction(func), refValue, compareMask);
glStencilOp(
convertStencilOp(stencilFailOp, !flip),
convertStencilOp(depthFailOp, !flip),
convertStencilOp(passOp, !flip));
// Front
glActiveStencilFaceEXT(GL_FRONT);
mStateCacheManager->setStencilMask(writeMask);
glStencilFunc(convertCompareFunction(func), refValue, compareMask);
glStencilOp(
convertStencilOp(stencilFailOp, flip),
convertStencilOp(depthFailOp, flip),
convertStencilOp(passOp, flip));
}
}
else
{
if(!GLEW_VERSION_2_0)
mStateCacheManager->setEnabled(GL_STENCIL_TEST_TWO_SIDE_EXT, false);
flip = false;
mStateCacheManager->setStencilMask(writeMask);
glStencilFunc(convertCompareFunction(func), refValue, compareMask);
glStencilOp(
convertStencilOp(stencilFailOp, flip),
convertStencilOp(depthFailOp, flip),
convertStencilOp(passOp, flip));
}
}
//---------------------------------------------------------------------
GLint GLRenderSystem::convertCompareFunction(CompareFunction func) const
{
switch(func)
{
case CMPF_ALWAYS_FAIL:
return GL_NEVER;
case CMPF_ALWAYS_PASS:
return GL_ALWAYS;
case CMPF_LESS:
return GL_LESS;
case CMPF_LESS_EQUAL:
return GL_LEQUAL;
case CMPF_EQUAL:
return GL_EQUAL;
case CMPF_NOT_EQUAL:
return GL_NOTEQUAL;
case CMPF_GREATER_EQUAL:
return GL_GEQUAL;
case CMPF_GREATER:
return GL_GREATER;
};
// to keep compiler happy
return GL_ALWAYS;
}
//---------------------------------------------------------------------
GLint GLRenderSystem::convertStencilOp(StencilOperation op, bool invert) const
{
switch(op)
{
case SOP_KEEP:
return GL_KEEP;
case SOP_ZERO:
return GL_ZERO;
case SOP_REPLACE:
return GL_REPLACE;
case SOP_INCREMENT:
return invert ? GL_DECR : GL_INCR;
case SOP_DECREMENT:
return invert ? GL_INCR : GL_DECR;
case SOP_INCREMENT_WRAP:
return invert ? GL_DECR_WRAP_EXT : GL_INCR_WRAP_EXT;
case SOP_DECREMENT_WRAP:
return invert ? GL_INCR_WRAP_EXT : GL_DECR_WRAP_EXT;
case SOP_INVERT:
return GL_INVERT;
};
// to keep compiler happy
return SOP_KEEP;
}
//---------------------------------------------------------------------
void GLRenderSystem::_setTextureUnitFiltering(size_t unit,
FilterType ftype, FilterOptions fo)
{
if (!mStateCacheManager->activateGLTextureUnit(unit))
return;
switch(ftype)
{
case FT_MIN:
mMinFilter = fo;
// Combine with existing mip filter
mStateCacheManager->setTexParameteri(
mTextureTypes[unit],
GL_TEXTURE_MIN_FILTER,
getCombinedMinMipFilter(mMinFilter, mMipFilter));
break;
case FT_MAG:
switch (fo)
{
case FO_ANISOTROPIC: // GL treats linear and aniso the same
case FO_LINEAR:
mStateCacheManager->setTexParameteri(
mTextureTypes[unit],
GL_TEXTURE_MAG_FILTER,
GL_LINEAR);
break;
case FO_POINT:
case FO_NONE:
mStateCacheManager->setTexParameteri(
mTextureTypes[unit],
GL_TEXTURE_MAG_FILTER,
GL_NEAREST);
break;
}
break;
case FT_MIP:
mMipFilter = fo;
// Combine with existing min filter
mStateCacheManager->setTexParameteri(
mTextureTypes[unit],
GL_TEXTURE_MIN_FILTER,
getCombinedMinMipFilter(mMinFilter, mMipFilter));
break;
}
}
//-----------------------------------------------------------------------------
void GLRenderSystem::_setTextureBlendMode(size_t stage, const LayerBlendModeEx& bm)
{
if (stage >= mFixedFunctionTextureUnits)
{
// Can't do this
return;
}
GLenum src1op, src2op, cmd;
GLfloat cv1[4], cv2[4];
if (bm.blendType == LBT_COLOUR)
{
cv1[0] = bm.colourArg1.r;
cv1[1] = bm.colourArg1.g;
cv1[2] = bm.colourArg1.b;
cv1[3] = bm.colourArg1.a;
mManualBlendColours[stage][0] = bm.colourArg1;
cv2[0] = bm.colourArg2.r;
cv2[1] = bm.colourArg2.g;
cv2[2] = bm.colourArg2.b;
cv2[3] = bm.colourArg2.a;
mManualBlendColours[stage][1] = bm.colourArg2;
}
if (bm.blendType == LBT_ALPHA)
{
cv1[0] = mManualBlendColours[stage][0].r;
cv1[1] = mManualBlendColours[stage][0].g;
cv1[2] = mManualBlendColours[stage][0].b;
cv1[3] = bm.alphaArg1;
cv2[0] = mManualBlendColours[stage][1].r;
cv2[1] = mManualBlendColours[stage][1].g;
cv2[2] = mManualBlendColours[stage][1].b;
cv2[3] = bm.alphaArg2;
}
switch (bm.source1)
{
case LBS_CURRENT:
src1op = GL_PREVIOUS;
break;
case LBS_TEXTURE:
src1op = GL_TEXTURE;
break;
case LBS_MANUAL:
src1op = GL_CONSTANT;
break;
case LBS_DIFFUSE:
src1op = GL_PRIMARY_COLOR;
break;
// XXX
case LBS_SPECULAR:
src1op = GL_PRIMARY_COLOR;
break;
default:
src1op = 0;
}
switch (bm.source2)
{
case LBS_CURRENT:
src2op = GL_PREVIOUS;
break;
case LBS_TEXTURE:
src2op = GL_TEXTURE;
break;
case LBS_MANUAL:
src2op = GL_CONSTANT;
break;
case LBS_DIFFUSE:
src2op = GL_PRIMARY_COLOR;
break;
// XXX
case LBS_SPECULAR:
src2op = GL_PRIMARY_COLOR;
break;
default:
src2op = 0;
}
switch (bm.operation)
{
case LBX_SOURCE1:
cmd = GL_REPLACE;
break;
case LBX_SOURCE2:
cmd = GL_REPLACE;
break;
case LBX_MODULATE:
cmd = GL_MODULATE;
break;
case LBX_MODULATE_X2:
cmd = GL_MODULATE;
break;
case LBX_MODULATE_X4:
cmd = GL_MODULATE;
break;
case LBX_ADD:
cmd = GL_ADD;
break;
case LBX_ADD_SIGNED:
cmd = GL_ADD_SIGNED;
break;
case LBX_ADD_SMOOTH:
cmd = GL_INTERPOLATE;
break;
case LBX_SUBTRACT:
cmd = GL_SUBTRACT;
break;
case LBX_BLEND_DIFFUSE_COLOUR:
cmd = GL_INTERPOLATE;
break;
case LBX_BLEND_DIFFUSE_ALPHA:
cmd = GL_INTERPOLATE;
break;
case LBX_BLEND_TEXTURE_ALPHA:
cmd = GL_INTERPOLATE;
break;
case LBX_BLEND_CURRENT_ALPHA:
cmd = GL_INTERPOLATE;
break;
case LBX_BLEND_MANUAL:
cmd = GL_INTERPOLATE;
break;
case LBX_DOTPRODUCT:
cmd = mCurrentCapabilities->hasCapability(RSC_DOT3)
? GL_DOT3_RGB : GL_MODULATE;
break;
default:
cmd = 0;
}
if (!mStateCacheManager->activateGLTextureUnit(stage))
return;
glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE);
if (bm.blendType == LBT_COLOUR)
{
glTexEnvi(GL_TEXTURE_ENV, GL_COMBINE_RGB, cmd);
glTexEnvi(GL_TEXTURE_ENV, GL_SOURCE0_RGB, src1op);
glTexEnvi(GL_TEXTURE_ENV, GL_SOURCE1_RGB, src2op);
glTexEnvi(GL_TEXTURE_ENV, GL_SOURCE2_RGB, GL_CONSTANT);
}
else
{
glTexEnvi(GL_TEXTURE_ENV, GL_COMBINE_ALPHA, cmd);
glTexEnvi(GL_TEXTURE_ENV, GL_SOURCE0_ALPHA, src1op);
glTexEnvi(GL_TEXTURE_ENV, GL_SOURCE1_ALPHA, src2op);
glTexEnvi(GL_TEXTURE_ENV, GL_SOURCE2_ALPHA, GL_CONSTANT);
}
float blendValue[4] = {0, 0, 0, static_cast<float>(bm.factor)};
switch (bm.operation)
{
case LBX_BLEND_DIFFUSE_COLOUR:
glTexEnvi(GL_TEXTURE_ENV, GL_SOURCE2_RGB, GL_PRIMARY_COLOR);
glTexEnvi(GL_TEXTURE_ENV, GL_SOURCE2_ALPHA, GL_PRIMARY_COLOR);
break;
case LBX_BLEND_DIFFUSE_ALPHA:
glTexEnvi(GL_TEXTURE_ENV, GL_SOURCE2_RGB, GL_PRIMARY_COLOR);
glTexEnvi(GL_TEXTURE_ENV, GL_SOURCE2_ALPHA, GL_PRIMARY_COLOR);
break;
case LBX_BLEND_TEXTURE_ALPHA:
glTexEnvi(GL_TEXTURE_ENV, GL_SOURCE2_RGB, GL_TEXTURE);
glTexEnvi(GL_TEXTURE_ENV, GL_SOURCE2_ALPHA, GL_TEXTURE);
break;
case LBX_BLEND_CURRENT_ALPHA:
glTexEnvi(GL_TEXTURE_ENV, GL_SOURCE2_RGB, GL_PREVIOUS);
glTexEnvi(GL_TEXTURE_ENV, GL_SOURCE2_ALPHA, GL_PREVIOUS);
break;
case LBX_BLEND_MANUAL:
glTexEnvfv(GL_TEXTURE_ENV, GL_TEXTURE_ENV_COLOR, blendValue);
break;
default:
break;
};
switch (bm.operation)
{
case LBX_MODULATE_X2:
glTexEnvi(GL_TEXTURE_ENV, bm.blendType == LBT_COLOUR ?
GL_RGB_SCALE : GL_ALPHA_SCALE, 2);
break;
case LBX_MODULATE_X4:
glTexEnvi(GL_TEXTURE_ENV, bm.blendType == LBT_COLOUR ?
GL_RGB_SCALE : GL_ALPHA_SCALE, 4);
break;
default:
glTexEnvi(GL_TEXTURE_ENV, bm.blendType == LBT_COLOUR ?
GL_RGB_SCALE : GL_ALPHA_SCALE, 1);
break;
}
if (bm.blendType == LBT_COLOUR){
glTexEnvi(GL_TEXTURE_ENV, GL_OPERAND0_RGB, GL_SRC_COLOR);
glTexEnvi(GL_TEXTURE_ENV, GL_OPERAND1_RGB, GL_SRC_COLOR);
if (bm.operation == LBX_BLEND_DIFFUSE_COLOUR){
glTexEnvi(GL_TEXTURE_ENV, GL_OPERAND2_RGB, GL_SRC_COLOR);
} else {
glTexEnvi(GL_TEXTURE_ENV, GL_OPERAND2_RGB, GL_SRC_ALPHA);
}
}
glTexEnvi(GL_TEXTURE_ENV, GL_OPERAND0_ALPHA, GL_SRC_ALPHA);
glTexEnvi(GL_TEXTURE_ENV, GL_OPERAND1_ALPHA, GL_SRC_ALPHA);
glTexEnvi(GL_TEXTURE_ENV, GL_OPERAND2_ALPHA, GL_SRC_ALPHA);
if(bm.source1 == LBS_MANUAL)
glTexEnvfv(GL_TEXTURE_ENV, GL_TEXTURE_ENV_COLOR, cv1);
if (bm.source2 == LBS_MANUAL)
glTexEnvfv(GL_TEXTURE_ENV, GL_TEXTURE_ENV_COLOR, cv2);
}
//---------------------------------------------------------------------
void GLRenderSystem::_render(const RenderOperation& op)
{
// Call super class
RenderSystem::_render(op);
mMaxBuiltInTextureAttribIndex = 0;
HardwareVertexBufferSharedPtr globalInstanceVertexBuffer = getGlobalInstanceVertexBuffer();
VertexDeclaration* globalVertexDeclaration = getGlobalInstanceVertexBufferVertexDeclaration();
bool hasInstanceData = (op.useGlobalInstancingVertexBufferIsAvailable &&
globalInstanceVertexBuffer && globalVertexDeclaration != NULL) ||
op.vertexData->vertexBufferBinding->hasInstanceData();
size_t numberOfInstances = op.numberOfInstances;
if (op.useGlobalInstancingVertexBufferIsAvailable)
{
numberOfInstances *= getGlobalNumberOfInstances();
}
const VertexDeclaration::VertexElementList& decl =
op.vertexData->vertexDeclaration->getElements();
VertexDeclaration::VertexElementList::const_iterator elemIter, elemEnd;
elemEnd = decl.end();
for (elemIter = decl.begin(); elemIter != elemEnd; ++elemIter)
{
const VertexElement & elem = *elemIter;
size_t source = elem.getSource();
if (!op.vertexData->vertexBufferBinding->isBufferBound(source))
continue; // skip unbound elements
HardwareVertexBufferSharedPtr vertexBuffer =
op.vertexData->vertexBufferBinding->getBuffer(source);
bindVertexElementToGpu(elem, vertexBuffer, op.vertexData->vertexStart);
}
if( globalInstanceVertexBuffer && globalVertexDeclaration != NULL )
{
elemEnd = globalVertexDeclaration->getElements().end();
for (elemIter = globalVertexDeclaration->getElements().begin(); elemIter != elemEnd; ++elemIter)
{
const VertexElement & elem = *elemIter;
bindVertexElementToGpu(elem, globalInstanceVertexBuffer, 0);
}
}
bool multitexturing = (getCapabilities()->getNumTextureUnits() > 1);
if (multitexturing)
glClientActiveTextureARB(GL_TEXTURE0);
// Find the correct type to render
GLint primType;
int operationType = op.operationType;
// Use adjacency if there is a geometry program and it requested adjacency info
if(mGeometryProgramBound && mCurrentGeometryProgram && mCurrentGeometryProgram->isAdjacencyInfoRequired())
operationType |= RenderOperation::OT_DETAIL_ADJACENCY_BIT;
switch (operationType)
{
case RenderOperation::OT_POINT_LIST:
primType = GL_POINTS;
break;
case RenderOperation::OT_LINE_LIST:
primType = GL_LINES;
break;
case RenderOperation::OT_LINE_LIST_ADJ:
primType = GL_LINES_ADJACENCY_EXT;
break;
case RenderOperation::OT_LINE_STRIP:
primType = GL_LINE_STRIP;
break;
case RenderOperation::OT_LINE_STRIP_ADJ:
primType = GL_LINE_STRIP_ADJACENCY_EXT;
break;
default:
case RenderOperation::OT_TRIANGLE_LIST:
primType = GL_TRIANGLES;
break;
case RenderOperation::OT_TRIANGLE_LIST_ADJ:
primType = GL_TRIANGLES_ADJACENCY_EXT;
break;
case RenderOperation::OT_TRIANGLE_STRIP:
primType = GL_TRIANGLE_STRIP;
break;
case RenderOperation::OT_TRIANGLE_STRIP_ADJ:
primType = GL_TRIANGLE_STRIP_ADJACENCY_EXT;
break;
case RenderOperation::OT_TRIANGLE_FAN:
primType = GL_TRIANGLE_FAN;
break;
}
if (op.useIndexes)
{
void* pBufferData = 0;
mStateCacheManager->bindGLBuffer(GL_ELEMENT_ARRAY_BUFFER_ARB,
static_cast<GLHardwareIndexBuffer*>(
op.indexData->indexBuffer.get())->getGLBufferId());
pBufferData = VBO_BUFFER_OFFSET(
op.indexData->indexStart * op.indexData->indexBuffer->getIndexSize());
GLenum indexType = (op.indexData->indexBuffer->getType() == HardwareIndexBuffer::IT_16BIT) ? GL_UNSIGNED_SHORT : GL_UNSIGNED_INT;
do
{
// Update derived depth bias
if (mDerivedDepthBias && mCurrentPassIterationNum > 0)
{
_setDepthBias(mDerivedDepthBiasBase +
mDerivedDepthBiasMultiplier * mCurrentPassIterationNum,
mDerivedDepthBiasSlopeScale);
}
if(hasInstanceData)
{
glDrawElementsInstancedARB(primType, op.indexData->indexCount, indexType, pBufferData, numberOfInstances);
}
else
{
glDrawElements(primType, op.indexData->indexCount, indexType, pBufferData);
}
} while (updatePassIterationRenderState());
}
else
{
do
{
// Update derived depth bias
if (mDerivedDepthBias && mCurrentPassIterationNum > 0)
{
_setDepthBias(mDerivedDepthBiasBase +
mDerivedDepthBiasMultiplier * mCurrentPassIterationNum,
mDerivedDepthBiasSlopeScale);
}
if(hasInstanceData)
{
glDrawArraysInstancedARB(primType, 0, op.vertexData->vertexCount, numberOfInstances);
}
else
{
glDrawArrays(primType, 0, op.vertexData->vertexCount);
}
} while (updatePassIterationRenderState());
}
glDisableClientState( GL_VERTEX_ARRAY );
// only valid up to GL_MAX_TEXTURE_UNITS, which is recorded in mFixedFunctionTextureUnits
if (multitexturing)
{
unsigned short mNumEnabledTextures = std::max(std::min((unsigned short)mDisabledTexUnitsFrom, mFixedFunctionTextureUnits), (unsigned short)(mMaxBuiltInTextureAttribIndex + 1));
for (unsigned short i = 0; i < mNumEnabledTextures; i++)
{
// No need to disable for texture units that weren't used
glClientActiveTextureARB(GL_TEXTURE0 + i);
glDisableClientState( GL_TEXTURE_COORD_ARRAY );
}
glClientActiveTextureARB(GL_TEXTURE0);
}
else
{
glDisableClientState( GL_TEXTURE_COORD_ARRAY );
}
glDisableClientState( GL_NORMAL_ARRAY );
glDisableClientState( GL_COLOR_ARRAY );
if (GLEW_EXT_secondary_color)
{
glDisableClientState( GL_SECONDARY_COLOR_ARRAY );
}
// unbind any custom attributes
for (std::vector<GLuint>::iterator ai = mRenderAttribsBound.begin(); ai != mRenderAttribsBound.end(); ++ai)
{
glDisableVertexAttribArrayARB(*ai);
}
// unbind any instance attributes
for (std::vector<GLuint>::iterator ai = mRenderInstanceAttribsBound.begin(); ai != mRenderInstanceAttribsBound.end(); ++ai)
{
glVertexAttribDivisorARB(*ai, 0);
}
mRenderAttribsBound.clear();
mRenderInstanceAttribsBound.clear();
}
//---------------------------------------------------------------------
void GLRenderSystem::setNormaliseNormals(bool normalise)
{
mStateCacheManager->setEnabled(GL_NORMALIZE, normalise);
}
//---------------------------------------------------------------------
void GLRenderSystem::bindGpuProgram(GpuProgram* prg)
{
if (!prg)
{
OGRE_EXCEPT(Exception::ERR_RENDERINGAPI_ERROR,
"Null program bound.",
"GLRenderSystem::bindGpuProgram");
}
GLGpuProgram* glprg = static_cast<GLGpuProgram*>(prg);
// Unbind previous gpu program first.
//
// Note:
// 1. Even if both previous and current are the same object, we can't
// bypass re-bind completely since the object itself maybe modified.
// But we can bypass unbind based on the assumption that object
// internally GL program type shouldn't be changed after it has
// been created. The behavior of bind to a GL program type twice
// should be same as unbind and rebind that GL program type, even
// for difference objects.
// 2. We also assumed that the program's type (vertex or fragment) should
// not be changed during it's in using. If not, the following switch
// statement will confuse GL state completely, and we can't fix it
// here. To fix this case, we must coding the program implementation
// itself, if type is changing (during load/unload, etc), and it's inuse,
// unbind and notify render system to correct for its state.
//
switch (glprg->getType())
{
case GPT_VERTEX_PROGRAM:
if (mCurrentVertexProgram != glprg)
{
if (mCurrentVertexProgram)
mCurrentVertexProgram->unbindProgram();
mCurrentVertexProgram = glprg;
}
break;
case GPT_FRAGMENT_PROGRAM:
if (mCurrentFragmentProgram != glprg)
{
if (mCurrentFragmentProgram)
mCurrentFragmentProgram->unbindProgram();
mCurrentFragmentProgram = glprg;
}
break;
case GPT_GEOMETRY_PROGRAM:
if (mCurrentGeometryProgram != glprg)
{
if (mCurrentGeometryProgram)
mCurrentGeometryProgram->unbindProgram();
mCurrentGeometryProgram = glprg;
}
break;
case GPT_COMPUTE_PROGRAM:
case GPT_DOMAIN_PROGRAM:
case GPT_HULL_PROGRAM:
break;
}
// Bind the program
glprg->bindProgram();
RenderSystem::bindGpuProgram(prg);
}
//---------------------------------------------------------------------
void GLRenderSystem::unbindGpuProgram(GpuProgramType gptype)
{
if (gptype == GPT_VERTEX_PROGRAM && mCurrentVertexProgram)
{
mActiveVertexGpuProgramParameters.reset();
mCurrentVertexProgram->unbindProgram();
mCurrentVertexProgram = 0;
}
else if (gptype == GPT_GEOMETRY_PROGRAM && mCurrentGeometryProgram)
{
mActiveGeometryGpuProgramParameters.reset();
mCurrentGeometryProgram->unbindProgram();
mCurrentGeometryProgram = 0;
}
else if (gptype == GPT_FRAGMENT_PROGRAM && mCurrentFragmentProgram)
{
mActiveFragmentGpuProgramParameters.reset();
mCurrentFragmentProgram->unbindProgram();
mCurrentFragmentProgram = 0;
}
RenderSystem::unbindGpuProgram(gptype);
}
//---------------------------------------------------------------------
void GLRenderSystem::bindGpuProgramParameters(GpuProgramType gptype, const GpuProgramParametersPtr& params, uint16 mask)
{
if (mask & (uint16)GPV_GLOBAL)
{
// We could maybe use GL_EXT_bindable_uniform here to produce Dx10-style
// shared constant buffers, but GPU support seems fairly weak?
// for now, just copy
params->_copySharedParams();
}
switch (gptype)
{
case GPT_VERTEX_PROGRAM:
mActiveVertexGpuProgramParameters = params;
mCurrentVertexProgram->bindProgramParameters(params, mask);
break;
case GPT_GEOMETRY_PROGRAM:
mActiveGeometryGpuProgramParameters = params;
mCurrentGeometryProgram->bindProgramParameters(params, mask);
break;
case GPT_FRAGMENT_PROGRAM:
mActiveFragmentGpuProgramParameters = params;
mCurrentFragmentProgram->bindProgramParameters(params, mask);
break;
case GPT_COMPUTE_PROGRAM:
case GPT_DOMAIN_PROGRAM:
case GPT_HULL_PROGRAM:
break;
}
}
//---------------------------------------------------------------------
void GLRenderSystem::setClipPlanesImpl(const PlaneList& clipPlanes)
{
// A note on GL user clipping:
// When an ARB vertex program is enabled in GL, user clipping is completely
// disabled. There is no way around this, it's just turned off.
// When using GLSL, user clipping can work but you have to include a
// glClipVertex command in your vertex shader.
// Thus the planes set here may not actually be respected.
size_t i = 0;
size_t numClipPlanes;
GLdouble clipPlane[4];
// Save previous modelview
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
// just load view matrix (identity world)
GLfloat mat[16];
makeGLMatrix(mat, mViewMatrix);
glLoadMatrixf(mat);
numClipPlanes = clipPlanes.size();
for (i = 0; i < numClipPlanes; ++i)
{
GLenum clipPlaneId = static_cast<GLenum>(GL_CLIP_PLANE0 + i);
const Plane& plane = clipPlanes[i];
if (i >= 6/*GL_MAX_CLIP_PLANES*/)
{
OGRE_EXCEPT(Exception::ERR_RENDERINGAPI_ERROR, "Unable to set clip plane",
"GLRenderSystem::setClipPlanes");
}
clipPlane[0] = plane.normal.x;
clipPlane[1] = plane.normal.y;
clipPlane[2] = plane.normal.z;
clipPlane[3] = plane.d;
glClipPlane(clipPlaneId, clipPlane);
mStateCacheManager->setEnabled(clipPlaneId, true);
}
// disable remaining clip planes
for ( ; i < 6/*GL_MAX_CLIP_PLANES*/; ++i)
{
mStateCacheManager->setEnabled(static_cast<GLenum>(GL_CLIP_PLANE0 + i), false);
}
// restore matrices
glPopMatrix();
}
//---------------------------------------------------------------------
void GLRenderSystem::setScissorTest(bool enabled, size_t left,
size_t top, size_t right, size_t bottom)
{
mScissorsEnabled = enabled;
// If request texture flipping, use "upper-left", otherwise use "lower-left"
bool flipping = mActiveRenderTarget->requiresTextureFlipping();
// GL measures from the bottom, not the top
size_t targetHeight = mActiveRenderTarget->getHeight();
// Calculate the "lower-left" corner of the viewport
GLsizei x = 0, y = 0, w = 0, h = 0;
if (enabled)
{
mStateCacheManager->setEnabled(GL_SCISSOR_TEST, true);
// NB GL uses width / height rather than right / bottom
x = left;
if (flipping)
y = top;
else
y = targetHeight - bottom;
w = right - left;
h = bottom - top;
glScissor(x, y, w, h);
mScissorBox[0] = x;
mScissorBox[1] = y;
mScissorBox[2] = w;
mScissorBox[3] = h;
}
else
{
// GL requires you to reset the scissor when disabling
w = mActiveViewport->getActualWidth();
h = mActiveViewport->getActualHeight();
x = mActiveViewport->getActualLeft();
if (flipping)
y = mActiveViewport->getActualTop();
else
y = targetHeight - mActiveViewport->getActualTop() - h;
glScissor(x, y, w, h);
mScissorBox[0] = x;
mScissorBox[1] = y;
mScissorBox[2] = w;
mScissorBox[3] = h;
}
}
//---------------------------------------------------------------------
void GLRenderSystem::clearFrameBuffer(unsigned int buffers,
const ColourValue& colour, Real depth, unsigned short stencil)
{
bool colourMask = !mColourWrite[0] || !mColourWrite[1]
|| !mColourWrite[2] || !mColourWrite[3];
if(mCurrentContext)
mCurrentContext->setCurrent();
GLbitfield flags = 0;
if (buffers & FBT_COLOUR)
{
flags |= GL_COLOR_BUFFER_BIT;
// Enable buffer for writing if it isn't
if (colourMask)
{
mStateCacheManager->setColourMask(true, true, true, true);
}
mStateCacheManager->setClearColour(colour.r, colour.g, colour.b, colour.a);
}
if (buffers & FBT_DEPTH)
{
flags |= GL_DEPTH_BUFFER_BIT;
// Enable buffer for writing if it isn't
if (!mDepthWrite)
{
mStateCacheManager->setDepthMask( GL_TRUE );
}
mStateCacheManager->setClearDepth(depth);
}
if (buffers & FBT_STENCIL)
{
flags |= GL_STENCIL_BUFFER_BIT;
// Enable buffer for writing if it isn't
mStateCacheManager->setStencilMask(0xFFFFFFFF);
glClearStencil(stencil);
}
// Should be enable scissor test due the clear region is
// relied on scissor box bounds.
if (!mScissorsEnabled)
{
mStateCacheManager->setEnabled(GL_SCISSOR_TEST, true);
}
// Sets the scissor box as same as viewport
GLint viewport[4];
mStateCacheManager->getViewport(viewport);
bool scissorBoxDifference =
viewport[0] != mScissorBox[0] || viewport[1] != mScissorBox[1] ||
viewport[2] != mScissorBox[2] || viewport[3] != mScissorBox[3];
if (scissorBoxDifference)
{
glScissor(viewport[0], viewport[1], viewport[2], viewport[3]);
}
// Clear buffers
glClear(flags);
// Restore scissor box
if (scissorBoxDifference)
{
glScissor(mScissorBox[0], mScissorBox[1], mScissorBox[2], mScissorBox[3]);
}
// Restore scissor test
if (!mScissorsEnabled)
{
mStateCacheManager->setEnabled(GL_SCISSOR_TEST, false);
}
// Reset buffer write state
if (!mDepthWrite && (buffers & FBT_DEPTH))
{
mStateCacheManager->setDepthMask( GL_FALSE );
}
if (colourMask && (buffers & FBT_COLOUR))
{
mStateCacheManager->setColourMask(mColourWrite[0], mColourWrite[1], mColourWrite[2], mColourWrite[3]);
}
if (buffers & FBT_STENCIL)
{
mStateCacheManager->setStencilMask(mStencilWriteMask);
}
}
//---------------------------------------------------------------------
HardwareOcclusionQuery* GLRenderSystem::createHardwareOcclusionQuery(void)
{
GLHardwareOcclusionQuery* ret = new GLHardwareOcclusionQuery();
mHwOcclusionQueries.push_back(ret);
return ret;
}
//---------------------------------------------------------------------
void GLRenderSystem::_oneTimeContextInitialization()
{
// Set nicer lighting model -- d3d9 has this by default
glLightModeli(GL_LIGHT_MODEL_COLOR_CONTROL, GL_SEPARATE_SPECULAR_COLOR);
glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, 1);
mStateCacheManager->setEnabled(GL_COLOR_SUM, true);
mStateCacheManager->setEnabled(GL_DITHER, false);
// Check for FSAA
// Enable the extension if it was enabled by the GLSupport
if (checkExtension("GL_ARB_multisample"))
{
int fsaa_active = false;
glGetIntegerv(GL_SAMPLE_BUFFERS_ARB,(GLint*)&fsaa_active);
if(fsaa_active)
{
mStateCacheManager->setEnabled(GL_MULTISAMPLE_ARB, true);
LogManager::getSingleton().logMessage("Using FSAA from GL_ARB_multisample extension.");
}
}
if (checkExtension("GL_ARB_seamless_cube_map"))
{
// Enable seamless cube maps
glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS);
}
}
//---------------------------------------------------------------------
void GLRenderSystem::_switchContext(GLContext *context)
{
// Unbind GPU programs and rebind to new context later, because
// scene manager treat render system as ONE 'context' ONLY, and it
// cached the GPU programs using state.
if (mCurrentVertexProgram)
mCurrentVertexProgram->unbindProgram();
if (mCurrentGeometryProgram)
mCurrentGeometryProgram->unbindProgram();
if (mCurrentFragmentProgram)
mCurrentFragmentProgram->unbindProgram();
// Disable lights
for (unsigned short i = 0; i < mCurrentLights; ++i)
{
setGLLight(i, false);
}
mCurrentLights = 0;
// Disable textures
_disableTextureUnitsFrom(0);
// It's ready for switching
if (mCurrentContext!=context)
{
#if OGRE_PLATFORM == OGRE_PLATFORM_APPLE
// NSGLContext::makeCurrentContext does not flush automatically. everybody else does.
glFlushRenderAPPLE();
#endif
mCurrentContext->endCurrent();
mCurrentContext = context;
}
mCurrentContext->setCurrent();
mStateCacheManager = mCurrentContext->createOrRetrieveStateCacheManager<GLStateCacheManager>();
// Check if the context has already done one-time initialisation
if(!mCurrentContext->getInitialized())
{
_oneTimeContextInitialization();
mCurrentContext->setInitialized();
}
// Rebind GPU programs to new context
if (mCurrentVertexProgram)
mCurrentVertexProgram->bindProgram();
if (mCurrentGeometryProgram)
mCurrentGeometryProgram->bindProgram();
if (mCurrentFragmentProgram)
mCurrentFragmentProgram->bindProgram();
// Must reset depth/colour write mask to according with user desired, otherwise,
// clearFrameBuffer would be wrong because the value we are recorded may be
// difference with the really state stored in GL context.
mStateCacheManager->setDepthMask(mDepthWrite);
mStateCacheManager->setColourMask(mColourWrite[0], mColourWrite[1], mColourWrite[2], mColourWrite[3]);
mStateCacheManager->setStencilMask(mStencilWriteMask);
}
//---------------------------------------------------------------------
void GLRenderSystem::_setRenderTarget(RenderTarget *target)
{
// Unbind frame buffer object
if(mActiveRenderTarget)
mRTTManager->unbind(mActiveRenderTarget);
mActiveRenderTarget = target;
if (target)
{
// Switch context if different from current one
GLContext *newContext = dynamic_cast<GLRenderTarget*>(target)->getContext();
if(newContext && mCurrentContext != newContext)
{
_switchContext(newContext);
}
//Check the FBO's depth buffer status
GLDepthBuffer *depthBuffer = static_cast<GLDepthBuffer*>(target->getDepthBuffer());
if( target->getDepthBufferPool() != DepthBuffer::POOL_NO_DEPTH &&
(!depthBuffer || depthBuffer->getGLContext() != mCurrentContext ) )
{
//Depth is automatically managed and there is no depth buffer attached to this RT
//or the Current context doesn't match the one this Depth buffer was created with
setDepthBufferFor( target );
}
// Bind frame buffer object
mRTTManager->bind(target);
if (GLEW_EXT_framebuffer_sRGB)
{
// Enable / disable sRGB states
mStateCacheManager->setEnabled(GL_FRAMEBUFFER_SRGB_EXT, target->isHardwareGammaEnabled());
// Note: could test GL_FRAMEBUFFER_SRGB_CAPABLE_EXT here before
// enabling, but GL spec says incapable surfaces ignore the setting
// anyway. We test the capability to enable isHardwareGammaEnabled.
}
}
}
//---------------------------------------------------------------------
void GLRenderSystem::_unregisterContext(GLContext *context)
{
if(mCurrentContext == context) {
// Change the context to something else so that a valid context
// remains active. When this is the main context being unregistered,
// we set the main context to 0.
if(mCurrentContext != mMainContext) {
_switchContext(mMainContext);
} else {
/// No contexts remain
mCurrentContext->endCurrent();
mCurrentContext = 0;
mMainContext = 0;
mStateCacheManager = 0;
}
}
}
//---------------------------------------------------------------------
void GLRenderSystem::registerThread()
{
OGRE_LOCK_MUTEX(mThreadInitMutex);
// This is only valid once we've created the main context
if (!mMainContext)
{
OGRE_EXCEPT(Exception::ERR_INVALIDPARAMS,
"Cannot register a background thread before the main context "
"has been created.",
"GLRenderSystem::registerThread");
}
// Create a new context for this thread. Cloning from the main context
// will ensure that resources are shared with the main context
// We want a separate context so that we can safely create GL
// objects in parallel with the main thread
GLContext* newContext = mMainContext->clone();
mBackgroundContextList.push_back(newContext);
// Bind this new context to this thread.
newContext->setCurrent();
_oneTimeContextInitialization();
newContext->setInitialized();
}
//---------------------------------------------------------------------
void GLRenderSystem::unregisterThread()
{
// nothing to do here?
// Don't need to worry about active context, just make sure we delete
// on shutdown.
}
//---------------------------------------------------------------------
void GLRenderSystem::preExtraThreadsStarted()
{
OGRE_LOCK_MUTEX(mThreadInitMutex);
// free context, we'll need this to share lists
if(mCurrentContext)
mCurrentContext->endCurrent();
}
//---------------------------------------------------------------------
void GLRenderSystem::postExtraThreadsStarted()
{
OGRE_LOCK_MUTEX(mThreadInitMutex);
// reacquire context
if(mCurrentContext)
mCurrentContext->setCurrent();
}
//---------------------------------------------------------------------
unsigned int GLRenderSystem::getDisplayMonitorCount() const
{
return mGLSupport->getDisplayMonitorCount();
}
//---------------------------------------------------------------------
void GLRenderSystem::beginProfileEvent( const String &eventName )
{
markProfileEvent("Begin Event: " + eventName);
}
//---------------------------------------------------------------------
void GLRenderSystem::endProfileEvent( void )
{
markProfileEvent("End Event");
}
//---------------------------------------------------------------------
void GLRenderSystem::markProfileEvent( const String &eventName )
{
if( eventName.empty() )
return;
if(GLEW_GREMEDY_string_marker)
glStringMarkerGREMEDY(eventName.length(), eventName.c_str());
}
//---------------------------------------------------------------------
void GLRenderSystem::bindVertexElementToGpu(const VertexElement& elem,
const HardwareVertexBufferSharedPtr& vertexBuffer,
const size_t vertexStart)
{
void* pBufferData = 0;
const GLHardwareVertexBuffer* hwGlBuffer = static_cast<const GLHardwareVertexBuffer*>(vertexBuffer.get());
mStateCacheManager->bindGLBuffer(GL_ARRAY_BUFFER_ARB,
hwGlBuffer->getGLBufferId());
pBufferData = VBO_BUFFER_OFFSET(elem.getOffset());
if (vertexStart)
{
pBufferData = static_cast<char*>(pBufferData) + vertexStart * vertexBuffer->getVertexSize();
}
VertexElementSemantic sem = elem.getSemantic();
bool multitexturing = (getCapabilities()->getNumTextureUnits() > 1);
bool isCustomAttrib = false;
if (mCurrentVertexProgram)
{
isCustomAttrib = mCurrentVertexProgram->isAttributeValid(sem, elem.getIndex());
if (hwGlBuffer->isInstanceData())
{
GLint attrib = GLSLProgramCommon::getFixedAttributeIndex(sem, elem.getIndex());
glVertexAttribDivisorARB(attrib, hwGlBuffer->getInstanceDataStepRate() );
mRenderInstanceAttribsBound.push_back(attrib);
}
}
// Custom attribute support
// tangents, binormals, blendweights etc always via this route
// builtins may be done this way too
if (isCustomAttrib)
{
GLint attrib = GLSLProgramCommon::getFixedAttributeIndex(sem, elem.getIndex());
unsigned short typeCount = VertexElement::getTypeCount(elem.getType());
GLboolean normalised = GL_FALSE;
switch(elem.getType())
{
case VET_COLOUR:
case VET_COLOUR_ABGR:
case VET_COLOUR_ARGB:
// Because GL takes these as a sequence of single unsigned bytes, count needs to be 4
// VertexElement::getTypeCount treats them as 1 (RGBA)
// Also need to normalise the fixed-point data
typeCount = 4;
normalised = GL_TRUE;
break;
case VET_UBYTE4_NORM:
case VET_SHORT2_NORM:
case VET_USHORT2_NORM:
case VET_SHORT4_NORM:
case VET_USHORT4_NORM:
normalised = GL_TRUE;
break;
default:
break;
};
glVertexAttribPointerARB(
attrib,
typeCount,
GLHardwareBufferManager::getGLType(elem.getType()),
normalised,
static_cast<GLsizei>(vertexBuffer->getVertexSize()),
pBufferData);
glEnableVertexAttribArrayARB(attrib);
mRenderAttribsBound.push_back(attrib);
}
else
{
// fixed-function & builtin attribute support
switch(sem)
{
case VES_POSITION:
glVertexPointer(VertexElement::getTypeCount(
elem.getType()),
GLHardwareBufferManager::getGLType(elem.getType()),
static_cast<GLsizei>(vertexBuffer->getVertexSize()),
pBufferData);
glEnableClientState( GL_VERTEX_ARRAY );
break;
case VES_NORMAL:
glNormalPointer(
GLHardwareBufferManager::getGLType(elem.getType()),
static_cast<GLsizei>(vertexBuffer->getVertexSize()),
pBufferData);
glEnableClientState( GL_NORMAL_ARRAY );
break;
case VES_DIFFUSE:
glColorPointer(4,
GLHardwareBufferManager::getGLType(elem.getType()),
static_cast<GLsizei>(vertexBuffer->getVertexSize()),
pBufferData);
glEnableClientState( GL_COLOR_ARRAY );
break;
case VES_SPECULAR:
if (GLEW_EXT_secondary_color)
{
glSecondaryColorPointerEXT(4,
GLHardwareBufferManager::getGLType(elem.getType()),
static_cast<GLsizei>(vertexBuffer->getVertexSize()),
pBufferData);
glEnableClientState( GL_SECONDARY_COLOR_ARRAY );
}
break;
case VES_TEXTURE_COORDINATES:
if (mCurrentVertexProgram)
{
// Programmable pipeline - direct UV assignment
glClientActiveTextureARB(GL_TEXTURE0 + elem.getIndex());
glTexCoordPointer(
VertexElement::getTypeCount(elem.getType()),
GLHardwareBufferManager::getGLType(elem.getType()),
static_cast<GLsizei>(vertexBuffer->getVertexSize()),
pBufferData);
glEnableClientState( GL_TEXTURE_COORD_ARRAY );
if (elem.getIndex() > mMaxBuiltInTextureAttribIndex)
mMaxBuiltInTextureAttribIndex = elem.getIndex();
}
else
{
// fixed function matching to units based on tex_coord_set
for (unsigned int i = 0; i < mDisabledTexUnitsFrom; i++)
{
// Only set this texture unit's texcoord pointer if it
// is supposed to be using this element's index
if (mTextureCoordIndex[i] == elem.getIndex() && i < mFixedFunctionTextureUnits)
{
if (multitexturing)
glClientActiveTextureARB(GL_TEXTURE0 + i);
glTexCoordPointer(
VertexElement::getTypeCount(elem.getType()),
GLHardwareBufferManager::getGLType(elem.getType()),
static_cast<GLsizei>(vertexBuffer->getVertexSize()),
pBufferData);
glEnableClientState( GL_TEXTURE_COORD_ARRAY );
}
}
}
break;
default:
break;
};
} // isCustomAttrib
}
//---------------------------------------------------------------------
#if OGRE_NO_QUAD_BUFFER_STEREO == 0
bool GLRenderSystem::setDrawBuffer(ColourBufferType colourBuffer)
{
bool result = true;
switch (colourBuffer)
{
case CBT_BACK:
glDrawBuffer(GL_BACK);
break;
case CBT_BACK_LEFT:
glDrawBuffer(GL_BACK_LEFT);
break;
case CBT_BACK_RIGHT:
glDrawBuffer(GL_BACK_RIGHT);
break;
default:
result = false;
}
// Check for any errors
GLenum error = glGetError();
if (result && GL_NO_ERROR != error)
{
const char* errorCode = glErrorToString(error);
String errorString = "GLRenderSystem::setDrawBuffer("
+ Ogre::StringConverter::toString(colourBuffer) + "): " + errorCode;
Ogre::LogManager::getSingleton().logMessage(errorString);
result = false;
}
return result;
}
#endif
void GLRenderSystem::_copyContentsToMemory(Viewport* vp, const Box& src, const PixelBox &dst, RenderWindow::FrameBuffer buffer)
{
GLenum format = GLPixelUtil::getGLOriginFormat(dst.format);
GLenum type = GLPixelUtil::getGLOriginDataType(dst.format);
if ((format == GL_NONE) || (type == 0))
{
OGRE_EXCEPT(Exception::ERR_INVALIDPARAMS, "Unsupported format.", "GLRenderSystem::copyContentsToMemory" );
}
// Switch context if different from current one
_setViewport(vp);
if(dst.getWidth() != dst.rowPitch)
glPixelStorei(GL_PACK_ROW_LENGTH, dst.rowPitch);
// Must change the packing to ensure no overruns!
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glReadBuffer((buffer == RenderWindow::FB_FRONT)? GL_FRONT : GL_BACK);
uint32_t height = vp->getTarget()->getHeight();
glReadPixels((GLint)src.left, (GLint)(height - src.bottom),
(GLsizei)dst.getWidth(), (GLsizei)dst.getHeight(),
format, type, dst.getTopLeftFrontPixelPtr());
// restore default alignment
glPixelStorei(GL_PACK_ALIGNMENT, 4);
glPixelStorei(GL_PACK_ROW_LENGTH, 0);
PixelUtil::bulkPixelVerticalFlip(dst);
}
//---------------------------------------------------------------------
void GLRenderSystem::initialiseExtensions(void)
{
// Set version string
const GLubyte* pcVer = glGetString(GL_VERSION);
assert(pcVer && "Problems getting GL version string using glGetString");
String tmpStr = (const char*)pcVer;
mDriverVersion.fromString(tmpStr.substr(0, tmpStr.find(' ')));
LogManager::getSingleton().logMessage("GL_VERSION = " + mDriverVersion.toString());
// Get vendor
const GLubyte* pcVendor = glGetString(GL_VENDOR);
tmpStr = (const char*)pcVendor;
LogManager::getSingleton().logMessage("GL_VENDOR = " + tmpStr);
mVendor = tmpStr.substr(0, tmpStr.find(' '));
// Get renderer
const GLubyte* pcRenderer = glGetString(GL_RENDERER);
tmpStr = (const char*)pcRenderer;
LogManager::getSingleton().logMessage("GL_RENDERER = " + tmpStr);
// Set extension list
StringStream ext;
String str;
const GLubyte* pcExt = glGetString(GL_EXTENSIONS);
assert(pcExt && "Problems getting GL extension string using glGetString");
LogManager::getSingleton().logMessage("GL_EXTENSIONS = " + String((const char*)pcExt));
ext << pcExt;
while(ext >> str)
{
mExtensionList.insert(str);
}
}
}
|
// This program is free software: you can use, modify and/or redistribute it
// under the terms of the simplified BSD License. You should have received a
// copy of this license along this program. If not, see
// <http://www.opensource.org/licenses/bsd-license.html>.
//
// Copyright (C) 2012, Javier Sánchez Pérez <jsanchez@dis.ulpgc.es>
// All rights reserved.
#include <algorithm>
#include <iostream>
extern "C"
{
#include "iio.h"
}
#include "brox_optic_flow.h"
#define PAR_DEFAULT_NPROC 0
#define PAR_DEFAULT_ALPHA 50
#define PAR_DEFAULT_GAMMA 10
#define PAR_DEFAULT_NSCALES 10
#define PAR_DEFAULT_ZFACTOR 0.5
#define PAR_DEFAULT_TOL 0.0001
#define PAR_DEFAULT_INNER_ITER 1
#define PAR_DEFAULT_OUTER_ITER 15
#define PAR_DEFAULT_VERBOSE 0
using namespace std;
/**
*
* Function to read images using the iio library
* It allocates memory for the image and returns true if it
* correctly reads the image.
*
*/
bool
read_image (const char *fname, float **f, int *w, int *h)
{
*f = iio_read_image_float (fname, w, h);
return *f ? true : false;
}
/**
*
* Main program:
* This program reads the following parameters from the console and
* then computes the optical flow:
* -I1 first image
* -I2 second image
* -out_file name of the output flow field
* -processors number of threads used with the OpenMP library
* -alpha smoothing parameter
* -gamma gradient constancy parameter
* -nscales number of scales for the pyramidal approach
* -zoom_factor reduction factor for creating the scales
* -TOL stopping criterion threshold for the iterative process
* -inner_iter number of inner iterations
* -outer_iter number of outer iterations
* -verbose switch on/off messages
*
*/
int
main (int argc, char *argv[])
{
if (argc < 3)
cout << "Usage: " << argv[0]
<< " I1 I2 [out_file processors"
<< " alpha gamma nscales zoom_factor"
<< " TOL inner_iter outer_iter verbose]" << endl;
else
{
int i = 1;
//read parameters from the console
const char *image1 = argv[i];
i++;
const char *image2 = argv[i];
i++;
const char *outfile = (argc >= 4) ? argv[i] : "flow.flo";
i++;
int nproc = (argc > i) ? atoi (argv[i]) : PAR_DEFAULT_NPROC;
i++;
float alpha = (argc > i) ? atof (argv[i]) : PAR_DEFAULT_ALPHA;
i++;
float gamma = (argc > i) ? atof (argv[i]) : PAR_DEFAULT_GAMMA;
i++;
int nscales = (argc > i) ? atoi (argv[i]) : PAR_DEFAULT_NSCALES;
i++;
float zfactor = (argc > i) ? atof (argv[i]) : PAR_DEFAULT_ZFACTOR;
i++;
float TOL = (argc > i) ? atof (argv[i]) : PAR_DEFAULT_TOL;
i++;
int initer = (argc > i) ? atoi (argv[i]) : PAR_DEFAULT_INNER_ITER;
i++;
int outiter = (argc > i) ? atoi (argv[i]) : PAR_DEFAULT_OUTER_ITER;
i++;
int verbose = (argc > i) ? atoi (argv[i]) : PAR_DEFAULT_VERBOSE;
i++;
//check parameters
if (nproc > 0)
omp_set_num_threads (nproc);
if (alpha <= 0)
alpha = PAR_DEFAULT_ALPHA;
if (gamma < 0)
gamma = PAR_DEFAULT_GAMMA;
if (nscales <= 0)
nscales = PAR_DEFAULT_NSCALES;
if (zfactor <= 0 || zfactor >= 1)
zfactor = PAR_DEFAULT_ZFACTOR;
if (TOL <= 0)
TOL = PAR_DEFAULT_TOL;
if (initer <= 0)
initer = PAR_DEFAULT_INNER_ITER;
if (outiter <= 0)
outiter = PAR_DEFAULT_OUTER_ITER;
int nx, ny, nx1, ny1;
float *I1, *I2;
//read the input images
bool correct1 = read_image (image1, &I1, &nx, &ny);
bool correct2 = read_image (image2, &I2, &nx1, &ny1);
// if the images are correct, compute the optical flow
if (correct1 && correct2 && nx == nx1 && ny == ny1)
{
//set the number of scales according to the size of the
//images. The value N is computed to assure that the smaller
//images of the pyramid don't have a size smaller than 16x16
const float N =
1 + log (std::min (nx, ny) / 16.) / log (1. / zfactor);
if ((int) N < nscales)
nscales = (int) N;
// if(verbose)
cout << endl << "ncores:" << nproc << " alpha:" << alpha
<< " gamma:" << gamma << " scales:" << nscales << " nu:" <<
zfactor << " TOL:" << TOL << " inner:" << initer << " outer:" <<
outiter << endl;
//allocate memory for the flow
float *u = new float[nx * ny];
float *v = new float[nx * ny];
//compute the optic flow
brox_optic_flow (I1, I2, u, v, nx, ny, alpha, gamma,
nscales, zfactor, TOL, initer, outiter, verbose);
//save the flow
float *f = new float[nx * ny * 2];
for (int i = 0; i < nx * ny; i++)
{
f[2 * i] = u[i];
f[2 * i + 1] = v[i];
}
iio_save_image_float_vec ((char *) outfile, f, nx, ny, 2);
//free dynamic memory
free (I1);
free (I2);
delete[]u;
delete[]v;
delete[]f;
}
else
cerr <<
"Cannot read the images or the size of the images are not equal" <<
endl;
}
return 0;
}
|
/* ---------------------------------------------------------------- *
Antti Jumpponen <kuumies@gmail.com>
The implementation of kuu::rasperi::OpenGLPhongShader class.
* ---------------------------------------------------------------- */
#include "rasperi_opengl_phong_shader.h"
#include <glm/gtc/matrix_inverse.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#include "rasperi_opengl_shader_loader.h"
#include <QDebug>
namespace kuu
{
namespace rasperi
{
using namespace opengl_shader_loader;
/* ---------------------------------------------------------------- *
* ---------------------------------------------------------------- */
struct OpenGLPhongShader::Impl
{
/* ------------------------------------------------------------ *
* ------------------------------------------------------------ */
Impl(OpenGLPhongShader* self)
: self(self)
{
pgm = load("shaders/rasperi_opengl_phong_shader.vsh",
"shaders/rasperi_opengl_phong_shader.fsh");
uniformProjectionMatrix = glGetUniformLocation(pgm, "matrices.projection");
uniformViewMatrix = glGetUniformLocation(pgm, "matrices.view");
uniformModelMatrix = glGetUniformLocation(pgm, "matrices.model");
uniformNormalMatrix = glGetUniformLocation(pgm, "matrices.normal");
uniformCameraPosition = glGetUniformLocation(pgm, "cameraPosition");
uniformSunDirection = glGetUniformLocation(pgm, "sunDirection");
uniformAmbient = glGetUniformLocation(pgm, "ambient");
uniformDiffuse = glGetUniformLocation(pgm, "diffuse");
uniformSpecular = glGetUniformLocation(pgm, "specular");
uniformSpecularPower = glGetUniformLocation(pgm, "specularPower");
uniformAmbientSampler = glGetUniformLocation(pgm, "ambientSampler");
uniformDiffuseSampler = glGetUniformLocation(pgm, "diffuseSampler");
uniformSpecularSampler = glGetUniformLocation(pgm, "specularSampler");
uniformSpecularPowerSampler = glGetUniformLocation(pgm, "speculerPowerSampler");
uniformNormalSampler = glGetUniformLocation(pgm, "normalSampler");
uniformUseAmbientSampler = glGetUniformLocation(pgm, "useAmbientSampler");
uniformUseDiffuseSampler = glGetUniformLocation(pgm, "useDiffuseSampler");
uniformUseSpecularSampler = glGetUniformLocation(pgm, "useSpecularSampler");
uniformUseSpecularPowerSampler = glGetUniformLocation(pgm, "useSpeculerPowerSampler");
uniformUseNormalSampler = glGetUniformLocation(pgm, "useNormalSampler");
uniformRgbSpecularSampler = glGetUniformLocation(pgm, "rgbSpecularSampler");
}
/* ------------------------------------------------------------ *
* ------------------------------------------------------------ */
~Impl()
{
glDeleteProgram(pgm);
}
/* ------------------------------------------------------------ *
* ------------------------------------------------------------ */
void use()
{
const glm::mat3 normalMatrix = glm::mat3(glm::inverseTranspose(self->modelMatrix));
glUseProgram(pgm);
glUniformMatrix4fv(uniformModelMatrix, 1, GL_FALSE, glm::value_ptr(self->modelMatrix));
glUniformMatrix4fv(uniformViewMatrix, 1, GL_FALSE, glm::value_ptr(self->viewMatrix));
glUniformMatrix4fv(uniformProjectionMatrix, 1, GL_FALSE, glm::value_ptr(self->projectionMatrix));
glUniformMatrix3fv(uniformNormalMatrix, 1, GL_FALSE, glm::value_ptr(normalMatrix));
glUniform3fv( uniformCameraPosition, 1, glm::value_ptr(self->cameraPosition));
glUniform3fv( uniformSunDirection, 1, glm::value_ptr(self->lightDirection));
glUniform3fv( uniformAmbient, 1, glm::value_ptr(self->ambient));
glUniform3fv( uniformDiffuse, 1, glm::value_ptr(self->diffuse));
glUniform3fv( uniformSpecular, 1, glm::value_ptr(self->specular));
glUniform1f( uniformSpecularPower, self->specularPower);
glUniform1i( uniformAmbientSampler, 0);
glUniform1i( uniformDiffuseSampler, 1);
glUniform1i( uniformSpecularSampler, 2);
glUniform1i( uniformSpecularPowerSampler, 3);
glUniform1i( uniformNormalSampler, 4);
glUniform1i( uniformUseAmbientSampler, self->useAmbientSampler);
glUniform1i( uniformUseDiffuseSampler, self->useDiffuseSampler);
glUniform1i( uniformUseSpecularSampler, self->useSpecularSampler);
glUniform1i( uniformUseSpecularPowerSampler, self->useSpecularPowerSampler);
glUniform1i( uniformUseNormalSampler, self->useNormalSampler);
glUniform1i( uniformRgbSpecularSampler, self->rgbSpecularSampler);
}
/* ------------------------------------------------------------ *
* ------------------------------------------------------------ */
OpenGLPhongShader* self;
GLuint pgm;
GLint uniformProjectionMatrix;
GLint uniformViewMatrix;
GLint uniformModelMatrix;
GLint uniformNormalMatrix;
GLint uniformCameraPosition;
GLint uniformSunDirection;
GLint uniformAmbient;
GLint uniformDiffuse;
GLint uniformSpecular;
GLint uniformSpecularPower;
GLint uniformAmbientSampler;
GLint uniformDiffuseSampler;
GLint uniformSpecularSampler;
GLint uniformSpecularPowerSampler;
GLint uniformNormalSampler;
GLint uniformUseAmbientSampler;
GLint uniformUseDiffuseSampler;
GLint uniformUseSpecularSampler;
GLint uniformUseSpecularPowerSampler;
GLint uniformUseNormalSampler;
GLint uniformRgbSpecularSampler;
};
/* ---------------------------------------------------------------- *
* ---------------------------------------------------------------- */
OpenGLPhongShader::OpenGLPhongShader()
: impl(std::make_shared<Impl>(this))
{}
/* ---------------------------------------------------------------- *
* ---------------------------------------------------------------- */
void OpenGLPhongShader::use()
{ impl->use(); }
} // namespace rasperi
} // namespace kuu
|
/*
* Michael Gabilondo
* CDA5110 Program 2
* Feb. 24, 2010
*/
#include <stdio.h>
#include <mpi.h>
#include <stdlib.h>
#include <time.h>
#include <limits.h>
#include <math.h>
// the line below produces extra output
// #define DEBUG
// generate double-precision floating point numbers in the
// range [0.0, RAND_MAX_GEN).
#define RAND_MAX_GEN 50.0
double random_double();
double* generate_random(size_t num, int rank);
double find_sum(double* numbers, size_t size);
void start_find_sum(int rank, int size,
double* random_numbers,
size_t num_per_proc,
double* overall_sum);
void start_find_psum(int rank, int size,
double* random_numbers, size_t num_per_proc,
double sum);
int main(int argc, char **argv)
{
int rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Barrier(MPI_COMM_WORLD);
/* `num` is the total number of random numbers between all
* processes; allow an optional parameter to set `num`
*/
size_t num = 90000000;
size_t pos = 5;
if (argc == 3) {
num = (size_t) atoi(argv[1]);
pos = (size_t) atoi(argv[2]);
}
/* generate some random numbers for `rank` */
size_t num_per_proc = num / size;
double* random_numbers = generate_random(num_per_proc,
rank);
#ifdef DEBUG
if (rank == 0)
printf("Generated %zd random numbers per process\n",
num_per_proc);
#endif
double sum;
double elapsed = 0;
double start = MPI_Wtime();
start_find_sum(rank, size, random_numbers, num_per_proc,
&sum);
start_find_psum(rank, size, random_numbers, num_per_proc,
sum);
// this barrier ensures that all processes have
// finished computing their prefix sums, since we don't
// want to set `end` before that.
MPI_Barrier(MPI_COMM_WORLD);
// print the requested prefix sum
size_t start_pos = num_per_proc * rank;
size_t end_pos = num_per_proc * (rank+1) - 1;
if (pos >= start_pos && pos <= end_pos) {
printf("Prefix sum number %zu is %lf\n",
pos, random_numbers[pos - start_pos]);
}
double end = MPI_Wtime();
if (rank == 0) {
elapsed = end - start;
printf("Took %lf seconds\n", elapsed);
}
#ifdef DEBUG
size_t j;
for (j = 0; j < num_per_proc; j++) {
size_t j_real_pos = start_pos + j;
printf("Prefix sum %02zu: %lf\n", j_real_pos,
random_numbers[j]);
}
#endif
free(random_numbers);
MPI_Finalize();
return 0;
}
// return a random double in [0.0, 1.0)
double random_double()
{
return (double)random() /
((double)RAND_MAX + (double)1);
}
/* Generate `num` random nunmbers and returns a pointer to
* the first number; the caller is responsible for freeing
*/
double* generate_random(size_t num, int rank)
{
// seed the random number generator; add the rank to the
// time to make sure every process has a different seed
srandom(time(NULL) + rank);
double* random_numbers = (double*)
malloc(sizeof(double) * num);
if (!random_numbers) {
printf("%d Failed to malloc\n", rank);
exit(EXIT_FAILURE);
}
for (size_t i = 0; i < num; i++) {
random_numbers[i] = random_double() * RAND_MAX_GEN;
#ifdef DEBUG
size_t i_real_pos = num * rank + i;
printf("Number %02zu: %lf\n", i_real_pos,
random_numbers[i]);
#endif
}
return random_numbers;
}
// returns the sum of all the numbers in `number`, an array
// of size `size`
double find_sum(double* numbers, size_t size)
{
double sum = 0.0;
for (size_t i = 0; i < size; i++) {
sum += numbers[i];
}
return sum;
}
/*
* rank - the current process
* size - the total number of processes
* random_numbers - the set of numbers of the current
* process to find the max of.
* num_per_proc - the length of `random_numbers` array
* overall_sum - the sum of this rank will be written
*
* Find the max among all processes, each having a different
* `random_numbers`
*/
void start_find_sum(int rank, int size,
double* random_numbers, size_t num_per_proc,
double* overall_sum)
{
MPI_Status status;
double sum = find_sum(random_numbers, num_per_proc);
int still_alive = 1;
int level;
/* `level` is the current level of the complete binary
* tree. At level 0, there are n nodes (processes); each
* of these nodes has its sum already, computed by
* find_sum. A node with label `rank` that is in an
* even-numbered position on the current level becomes a
* parent on the next level; its new sum is the sum of
* its current sum and the sum of node `rank + 2^level`.
* A node with label `rank` in an odd-numbered position
* on the current level is responsible for sending its
* sum to the parent, `rank - 2^level`; after that, the
* node becomes inactive (still_alive is set to 0).
* After log2(size) levels have been created, node with
* rank 0 contains the sum. NOTE: positions start at
* ZERO, not one.
*
* Note that for the sequential version, rank == 1 and
* so this for loop will be skipped.
*/
for (level = 0; level < (int)log2(size); level++) {
if (still_alive) {
int position = rank / (int)pow(2, level);
if (position % 2 == 0) {
// I am a receiver
double sender_sum;
int sending_rank = rank + (int)pow(2, level);
MPI_Recv(&sender_sum, 1, MPI_DOUBLE, sending_rank,
0, MPI_COMM_WORLD, &status);
sum += sender_sum;
}
else {
// I am a sender
int receiving_rank = rank - (int)pow(2, level);
MPI_Send(&sum, 1, MPI_DOUBLE, receiving_rank, 0,
MPI_COMM_WORLD);
still_alive = 0;
}
}
MPI_Barrier(MPI_COMM_WORLD);
}
*overall_sum = sum;
}
/*
* Description of start_find_psum
* A process with rank R on level i that is an even-numbered
* position as its sum set to the sum of this process R on
* level i + 1, rather than level i. This sum was created by
* adding the old sum from level i with the sum of process
* R + 2^i (the sibling of process R). The correct sum for
* process R on level i can be restored by subtracting away
* the sum of the sibling process of R.
*
* (The above about restoring the correct sum isn't necessary and is
* commented out.)
*
* Each node (process) in the tree has an associated prefix
* sum that represents the prefix sum of all the numbers up
* to the last number of the rightmost leaf process in the
* tree rooted at that node. Rank 0 is the root node and so
* its psum is set to sum.
*
* Iteration is from the level under the root to the bottom
* level. A node in an odd-numbered position on the current
* level receives its prefix sum from its parent. Because
* even-numbered nodes become parents on the next level, the
* parent of any odd numbered node of rank R on level i is R
* - 2^i. A node in an even-numbered position on level i has
* its prefix sum set as the prefix sum of its parent minus
* the regular sum of its sibling. Because a node in an
* even-numbered position is also its own parent, it already
* has the prefix sum of its parent. It also has access to
* the regular sum of its sibling from the time it fixed its
* own sum, so that value can be reused.
*
* Now every node of rank R has its prefix sum; this
* prefix sum is the sum of all of the `random_numbers` of
* nodes from Rank 0 to Rank R. We will overwrite
* `random_numbers` to be the prefix sums for
* num_per_proc*rank TO num_per_proc*(rank + 1) - 1,
* from a global perspective, if we consider each
* random_numbers as part of a distributed array.
*/
void start_find_psum(int rank, int size,
double* random_numbers, size_t num_per_proc,
double sum)
{
double psum;
int level;
MPI_Status status;
if (rank == 0) {
psum = sum;
}
for (level = (int)log2(size) - 1; level >= 0; level--) {
// only trigger the processes on the current level
if (level == 0 || rank % (int)pow(2, level) == 0) {
int position = rank / (int)pow(2, level);
if (position % 2 == 0) {
double sender_sum;
int sending_rank = rank + (int)pow(2, level);
// in the previous level, this process was the parent
// of `sending_rank`, and had it as its right child,
// so this psum value is the psum value of the parent
// of our sibling
MPI_Send(&psum, 1, MPI_DOUBLE,
sending_rank, // RIGHT CHILD
0, MPI_COMM_WORLD);
MPI_Recv(&sender_sum, 1, MPI_DOUBLE,
sending_rank, 0, MPI_COMM_WORLD, &status);
// fix the sum to be the correct value:
// this isn't required, so it's commented out
//sum -= sender_sum;
// psum <- (prefix sum of parent) - (sum of sibling)
psum -= sender_sum;
}
else{
int receiving_rank = rank - (int)pow(2, level);
MPI_Recv(&psum, 1, MPI_DOUBLE,
receiving_rank, // PARENT
0, MPI_COMM_WORLD, &status);
// send sum to receiving_rank so it can fix its sum
MPI_Send(&sum, 1, MPI_DOUBLE,
receiving_rank, 0, MPI_COMM_WORLD);
}
}
MPI_Barrier(MPI_COMM_WORLD);
}
// put the prefix sums associated with this node in
// random_numbers
double next_sum = random_numbers[num_per_proc-1];
random_numbers[num_per_proc-1] = psum;
int j;
for (j = num_per_proc - 2; j >= 0; j--) {
double next_sum_tmp = random_numbers[j];
random_numbers[j] = random_numbers[j+1] -
next_sum;
next_sum = next_sum_tmp;
}
}
|
// Copyright (c) 2018-2019 The Dnxcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <util/spanparsing.h>
#include <span.h>
#include <string>
#include <vector>
namespace spanparsing {
bool Const(const std::string& str, Span<const char>& sp)
{
if ((size_t)sp.size() >= str.size() && std::equal(str.begin(), str.end(), sp.begin())) {
sp = sp.subspan(str.size());
return true;
}
return false;
}
bool Func(const std::string& str, Span<const char>& sp)
{
if ((size_t)sp.size() >= str.size() + 2 && sp[str.size()] == '(' && sp[sp.size() - 1] == ')' && std::equal(str.begin(), str.end(), sp.begin())) {
sp = sp.subspan(str.size() + 1, sp.size() - str.size() - 2);
return true;
}
return false;
}
Span<const char> Expr(Span<const char>& sp)
{
int level = 0;
auto it = sp.begin();
while (it != sp.end()) {
if (*it == '(') {
++level;
} else if (level && *it == ')') {
--level;
} else if (level == 0 && (*it == ')' || *it == ',')) {
break;
}
++it;
}
Span<const char> ret = sp.first(it - sp.begin());
sp = sp.subspan(it - sp.begin());
return ret;
}
std::vector<Span<const char>> Split(const Span<const char>& sp, char sep)
{
std::vector<Span<const char>> ret;
auto it = sp.begin();
auto start = it;
while (it != sp.end()) {
if (*it == sep) {
ret.emplace_back(start, it);
start = it + 1;
}
++it;
}
ret.emplace_back(start, it);
return ret;
}
} // namespace spanparsing
|
#include "statistics.cpp"
#include "input_helper.h"
#include "assert.h"
using namespace std;
void zero_length_array_test() {
cout << __func__ << endl;
statistics<double> myStats = statistics<double>();
cout << "Entered Values: " << myStats << endl;
cout << myStats.get_descriptive_statistics() << endl;
}
// Test to ensure that a single length list works
bool single_length_array_test() {
cout << __func__ << endl;
std::string filename = "./tests/single.csv";
statistics<double> myStats;
std::vector<double> csv_content;
if (not input_helper::parse_csv(filename, csv_content)) {
cout << "Something went wrong parsing the csv!" << endl;
return false;
}
myStats = statistics<double>(csv_content);
cout << "Entered Values: " << myStats << endl;
cout << myStats.get_descriptive_statistics() << endl;
return true;
}
// Test that a sequence of length 2 can be used
bool size_two_length_array_test() {
cout << __func__ << endl;
std::string filename = "./tests/two.csv";
statistics<double> myStats;
std::vector<double> csv_content;
if (not input_helper::parse_csv(filename, csv_content)) {
cout << "Something went wrong parsing the csv!" << endl;
return false;
}
myStats = statistics<double>(csv_content);
cout << "Entered Values: " << myStats << endl;
cout << myStats.get_descriptive_statistics() << endl;
return true;
}
// Test that odd sized sequences work
bool odd_size_seq_test() {
cout << __func__ << endl;
std::string filename = "./tests/odd.csv";
statistics<double> myStats;
std::vector<double> csv_content;
if (not input_helper::parse_csv(filename, csv_content)) {
cout << "Something went wrong parsing the csv!" << endl;
return false;
}
myStats = statistics<double>(csv_content);
cout << "Entered Values: " << myStats << endl;
cout << myStats.get_descriptive_statistics() << endl;
return true;
}
// Test that even sized sequenced work
bool even_size_seq_test() {
cout << __func__ << endl;
std::string filename = "./tests/even.csv";
statistics<double> myStats;
std::vector<double> csv_content;
if (not input_helper::parse_csv(filename, csv_content)) {
cout << "Something went wrong parsing the csv!" << endl;
return false;
}
myStats = statistics<double>(csv_content);
cout << "Entered Values: " << myStats << endl;
cout << myStats.get_descriptive_statistics() << endl;
return true;
}
int main (void) {
zero_length_array_test();
assert(single_length_array_test());
assert(size_two_length_array_test());
assert(odd_size_seq_test());
assert(even_size_seq_test());
return 0;
};
|
#ifndef BKCRACK_KEYS_HPP
#define BKCRACK_KEYS_HPP
#include <iostream>
#include "types.hpp"
/// Keys defining the cipher state
class Keys
{
public:
/// Constructor
Keys(dword x = 0x12345678, dword y = 0x23456789, dword z = 0x34567890);
/// Update the state with a plaintext byte
void update(byte p);
/// Update the state backward with a ciphertext byte
void updateBackward(byte c);
/// \return X value
dword getX() const;
/// \return Y value
dword getY() const;
/// \return Z value
dword getZ() const;
private:
dword x, y, z;
};
/// Insert a representation of keys into the stream os
std::ostream& operator<<(std::ostream& os, const Keys& keys);
#endif // BKCRACK_KEYS_HPP
|
#include <abi/Syscalls.h>
#include <assert.h>
#include <libsystem/core/Plugs.h>
#include <libsystem/process/Launchpad.h>
int process_this()
{
return __plug_process_this();
}
const char *process_name()
{
return __plug_process_name();
}
Result process_run(const char *command, int *pid, TaskFlags flags)
{
Launchpad *launchpad = launchpad_create("shell", "/Applications/shell/shell");
launchpad_flags(launchpad, flags);
launchpad_argument(launchpad, "-c");
launchpad_argument(launchpad, command);
return launchpad_launch(launchpad, pid);
}
void NO_RETURN process_exit(int code)
{
__plug_uninitialize(code);
__builtin_unreachable();
}
void NO_RETURN process_abort()
{
__plug_process_exit(PROCESS_FAILURE);
__builtin_unreachable();
}
Result process_cancel(int pid)
{
return __plug_process_cancel(pid);
}
Result process_get_directory(char *buffer, size_t size)
{
return __plug_process_get_directory(buffer, size);
}
Result process_set_directory(const char *directory)
{
return __plug_process_set_directory(directory);
}
String process_resolve(String path)
{
return __plug_process_resolve(path);
}
Result process_sleep(int time)
{
return __plug_process_sleep(time);
}
Result process_wait(int pid, int *exit_value)
{
return __plug_process_wait(pid, exit_value);
}
|
#include "config.h"
int main(int argc, char *argv[])
{
//需要修改的数据库信息,登录名,密码,库名
string user = "root";
string passwd = "root";
string databasename = "mydb";
//命令行解析
Config config;
config.parse_arg(argc, argv);
/// server类, 创建在栈上
WebServer server;
//初始化server
server.init(config.PORT, user, passwd, databasename, config.LOGWrite,
config.OPT_LINGER, config.TRIGMode, config.sql_num, config.thread_num,
config.close_log, config.actor_model); /// actor_model, 并发模型
//日志
server.log_write();
//数据库
server.sql_pool();
//线程池
server.thread_pool();
//触发模式
server.trig_mode();
//设置监听连接
server.eventListen();
//运行
server.eventLoop();
return 0;
}
|
// Created on: 1999-05-13
// Created by: data exchange team
// Copyright (c) 1999 Matra Datavision
// Copyright (c) 1999-2014 OPEN CASCADE SAS
//
// This file is part of Open CASCADE Technology software library.
//
// This library is free software; you can redistribute it and/or modify it under
// the terms of the GNU Lesser General Public License version 2.1 as published
// by the Free Software Foundation, with special exception defined in the file
// OCCT_LGPL_EXCEPTION.txt. Consult the file LICENSE_LGPL_21.txt included in OCCT
// distribution for complete text of the license and disclaimer of any warranty.
//
// Alternatively, this file may be used under the terms of Open CASCADE
// commercial license or contractual agreement.
#ifndef _ShapeUpgrade_ConvertCurve3dToBezier_HeaderFile
#define _ShapeUpgrade_ConvertCurve3dToBezier_HeaderFile
#include <Standard.hxx>
#include <Standard_Type.hxx>
#include <TColGeom_HSequenceOfCurve.hxx>
#include <TColStd_HSequenceOfReal.hxx>
#include <Standard_Boolean.hxx>
#include <ShapeUpgrade_SplitCurve3d.hxx>
class ShapeUpgrade_ConvertCurve3dToBezier;
DEFINE_STANDARD_HANDLE(ShapeUpgrade_ConvertCurve3dToBezier, ShapeUpgrade_SplitCurve3d)
//! converts/splits a 3d curve of any type to a list of beziers
class ShapeUpgrade_ConvertCurve3dToBezier : public ShapeUpgrade_SplitCurve3d
{
public:
//! Empty constructor
Standard_EXPORT ShapeUpgrade_ConvertCurve3dToBezier();
//! Sets mode for conversion Geom_Line to bezier.
void SetLineMode (const Standard_Boolean mode);
//! Returns the Geom_Line conversion mode.
Standard_Boolean GetLineMode() const;
//! Sets mode for conversion Geom_Circle to bezier.
void SetCircleMode (const Standard_Boolean mode);
//! Returns the Geom_Circle conversion mode.
Standard_Boolean GetCircleMode() const;
//! Returns the Geom_Conic conversion mode.
void SetConicMode (const Standard_Boolean mode);
//! Performs converting and computes the resulting shape.
Standard_Boolean GetConicMode() const;
//! Converts curve into a list of beziers, and stores the
//! splitting parameters on original curve.
Standard_EXPORT virtual void Compute() Standard_OVERRIDE;
//! Splits a list of beziers computed by Compute method according
//! the split values and splitting parameters.
Standard_EXPORT virtual void Build (const Standard_Boolean Segment) Standard_OVERRIDE;
//! Returns the list of splitted parameters in original curve
//! parametrisation.
Standard_EXPORT Handle(TColStd_HSequenceOfReal) SplitParams() const;
DEFINE_STANDARD_RTTIEXT(ShapeUpgrade_ConvertCurve3dToBezier,ShapeUpgrade_SplitCurve3d)
protected:
private:
//! Returns the list of bezier curves correspondent to original
//! curve.
Standard_EXPORT Handle(TColGeom_HSequenceOfCurve) Segments() const;
Handle(TColGeom_HSequenceOfCurve) mySegments;
Handle(TColStd_HSequenceOfReal) mySplitParams;
Standard_Boolean myLineMode;
Standard_Boolean myCircleMode;
Standard_Boolean myConicMode;
};
#include <ShapeUpgrade_ConvertCurve3dToBezier.lxx>
#endif // _ShapeUpgrade_ConvertCurve3dToBezier_HeaderFile
|
/*
* Copyright (C) 2014-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include "DFGIntegerCheckCombiningPhase.h"
#if ENABLE(DFG_JIT)
#include "DFGGraph.h"
#include "DFGInsertionSet.h"
#include "DFGPhase.h"
#include "DFGPredictionPropagationPhase.h"
#include "DFGVariableAccessDataDump.h"
#include "JSCInlines.h"
#include <unordered_map>
#include <wtf/HashMethod.h>
namespace JSC { namespace DFG {
namespace DFGIntegerCheckCombiningPhaseInternal {
static const bool verbose = false;
}
class IntegerCheckCombiningPhase : public Phase {
public:
enum RangeKind {
InvalidRangeKind,
// This means we did ArithAdd with CheckOverflow.
Addition,
// This means we did CheckInBounds on some length.
ArrayBounds
};
struct RangeKey {
static RangeKey addition(Edge edge)
{
RangeKey result;
result.m_kind = Addition;
result.m_source = edge.sanitized();
result.m_key = 0;
return result;
}
static RangeKey arrayBounds(Edge edge, Node* key)
{
RangeKey result;
result.m_kind = ArrayBounds;
result.m_source = edge.sanitized();
result.m_key = key;
return result;
}
bool operator!() const { return m_kind == InvalidRangeKind; }
unsigned hash() const
{
return m_kind + m_source.hash() + PtrHash<Node*>::hash(m_key);
}
bool operator==(const RangeKey& other) const
{
return m_kind == other.m_kind
&& m_source == other.m_source
&& m_key == other.m_key;
}
void dump(PrintStream& out) const
{
switch (m_kind) {
case InvalidRangeKind:
out.print("InvalidRangeKind(");
break;
case Addition:
out.print("Addition(");
break;
case ArrayBounds:
out.print("ArrayBounds(");
break;
}
if (m_source)
out.print(m_source);
else
out.print("null");
out.print(", ");
if (m_key)
out.print(m_key);
else
out.print("null");
out.print(")");
}
RangeKind m_kind { InvalidRangeKind };
Edge m_source;
Node* m_key { nullptr };
};
struct RangeKeyAndAddend {
RangeKeyAndAddend() = default;
RangeKeyAndAddend(RangeKey key, int32_t addend)
: m_key(key)
, m_addend(addend)
{
}
bool operator!() const { return !m_key && !m_addend; }
void dump(PrintStream& out) const
{
out.print(m_key, " + ", m_addend);
}
RangeKey m_key;
int32_t m_addend { 0 };
};
struct Range {
void dump(PrintStream& out) const
{
out.print("(", m_minBound, " @", m_minOrigin, ") .. (", m_maxBound, " @", m_maxOrigin, "), count = ", m_count, ", hoisted = ", m_hoisted);
}
int32_t m_minBound { 0 };
CodeOrigin m_minOrigin;
int32_t m_maxBound { 0 };
CodeOrigin m_maxOrigin;
unsigned m_count { 0 }; // If this is zero then the bounds won't necessarily make sense.
bool m_hoisted { false };
};
IntegerCheckCombiningPhase(Graph& graph)
: Phase(graph, "integer check combining")
, m_insertionSet(graph)
{
}
bool run()
{
ASSERT(m_graph.m_form == SSA);
m_changed = false;
for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;)
handleBlock(blockIndex);
return m_changed;
}
private:
void handleBlock(BlockIndex blockIndex)
{
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
return;
m_map.clear();
// First we collect Ranges. If operations within the range have enough redundancy,
// we hoist. And then we remove additions and checks that fall within the max range.
for (auto* node : *block) {
RangeKeyAndAddend data = rangeKeyAndAddend(node);
if (DFGIntegerCheckCombiningPhaseInternal::verbose)
dataLog("For ", node, ": ", data, "\n");
if (!data)
continue;
Range& range = m_map[data.m_key];
if (DFGIntegerCheckCombiningPhaseInternal::verbose)
dataLog(" Range: ", range, "\n");
if (range.m_count) {
if (data.m_addend > range.m_maxBound) {
range.m_maxBound = data.m_addend;
range.m_maxOrigin = node->origin.semantic;
} else if (data.m_addend < range.m_minBound) {
range.m_minBound = data.m_addend;
range.m_minOrigin = node->origin.semantic;
}
} else {
range.m_maxBound = data.m_addend;
range.m_minBound = data.m_addend;
range.m_minOrigin = node->origin.semantic;
range.m_maxOrigin = node->origin.semantic;
}
range.m_count++;
if (DFGIntegerCheckCombiningPhaseInternal::verbose)
dataLog(" New range: ", range, "\n");
}
for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
Node* node = block->at(nodeIndex);
RangeKeyAndAddend data = rangeKeyAndAddend(node);
if (!data)
continue;
Range range = m_map[data.m_key];
if (!isValid(data.m_key, range))
continue;
// Do the hoisting.
if (!range.m_hoisted) {
NodeOrigin minOrigin = node->origin.withSemantic(range.m_minOrigin);
NodeOrigin maxOrigin = node->origin.withSemantic(range.m_maxOrigin);
switch (data.m_key.m_kind) {
case Addition: {
if (range.m_minBound < 0)
insertAdd(nodeIndex, minOrigin, data.m_key.m_source, range.m_minBound);
if (range.m_maxBound > 0)
insertAdd(nodeIndex, maxOrigin, data.m_key.m_source, range.m_maxBound);
break;
}
case ArrayBounds: {
Node* minNode;
Node* maxNode;
if (!data.m_key.m_source) {
// data.m_key.m_source being null means that we're comparing against int32 constants (see rangeKeyAndAddend()).
// Since CheckInBounds does an unsigned comparison, if the minBound >= 0, it is also covered by the
// maxBound comparison. However, if minBound < 0, then CheckInBounds should always fail its speculation check.
// We'll force an OSR exit in that case.
minNode = nullptr;
if (range.m_minBound < 0)
m_insertionSet.insertNode(nodeIndex, SpecNone, ForceOSRExit, node->origin);
maxNode = m_insertionSet.insertConstant(
nodeIndex, maxOrigin, jsNumber(range.m_maxBound));
} else {
minNode = insertAdd(
nodeIndex, minOrigin, data.m_key.m_source, range.m_minBound,
Arith::Unchecked);
maxNode = insertAdd(
nodeIndex, maxOrigin, data.m_key.m_source, range.m_maxBound,
Arith::Unchecked);
}
if (minNode) {
m_insertionSet.insertNode(
nodeIndex, SpecNone, CheckInBounds, node->origin,
Edge(minNode, Int32Use), Edge(data.m_key.m_key, Int32Use));
}
m_insertionSet.insertNode(
nodeIndex, SpecNone, CheckInBounds, node->origin,
Edge(maxNode, Int32Use), Edge(data.m_key.m_key, Int32Use));
break;
}
default:
RELEASE_ASSERT_NOT_REACHED();
}
m_changed = true;
m_map[data.m_key].m_hoisted = true;
}
// Do the elimination.
switch (data.m_key.m_kind) {
case Addition:
node->setArithMode(Arith::Unchecked);
m_changed = true;
break;
case ArrayBounds:
node->remove(m_graph);
m_changed = true;
break;
default:
RELEASE_ASSERT_NOT_REACHED();
}
}
m_insertionSet.execute(block);
}
RangeKeyAndAddend rangeKeyAndAddend(Node* node)
{
switch (node->op()) {
case ArithAdd: {
if (node->arithMode() != Arith::CheckOverflow
&& node->arithMode() != Arith::CheckOverflowAndNegativeZero)
break;
if (!node->child2()->isInt32Constant())
break;
return RangeKeyAndAddend(
RangeKey::addition(node->child1()),
node->child2()->asInt32());
}
case CheckInBounds: {
Edge source;
int32_t addend;
Node* key = node->child2().node();
Edge index = node->child1();
if (index->isInt32Constant()) {
source = Edge();
addend = index->asInt32();
} else if (
index->op() == ArithAdd
&& index->isBinaryUseKind(Int32Use)
&& index->child2()->isInt32Constant()) {
source = index->child1();
addend = index->child2()->asInt32();
} else {
source = index;
addend = 0;
}
return RangeKeyAndAddend(RangeKey::arrayBounds(source, key), addend);
}
default:
break;
}
return RangeKeyAndAddend();
}
bool isValid(const RangeKey& key, const Range& range)
{
if (range.m_count < 2)
return false;
switch (key.m_kind) {
case ArrayBounds: {
// Have to do this carefully because C++ compilers are too smart. But all we're really doing is detecting if
// the difference between the bounds is 2^31 or more. If it was, then we'd have to worry about wrap-around.
// The way we'd like to write this expression is (range.m_maxBound - range.m_minBound) >= 0, but that is a
// signed subtraction and compare, which allows the C++ compiler to do anything it wants in case of
// wrap-around.
uint32_t maxBound = range.m_maxBound;
uint32_t minBound = range.m_minBound;
uint32_t unsignedDifference = maxBound - minBound;
return !(unsignedDifference >> 31);
}
default:
return true;
}
}
Node* insertAdd(
unsigned nodeIndex, NodeOrigin origin, Edge source, int32_t addend,
Arith::Mode arithMode = Arith::CheckOverflow)
{
if (!addend)
return source.node();
return m_insertionSet.insertNode(
nodeIndex, source->prediction(), source->result(),
ArithAdd, origin, OpInfo(arithMode), source,
m_insertionSet.insertConstantForUse(
nodeIndex, origin, jsNumber(addend), source.useKind()));
}
using RangeMap = std::unordered_map<RangeKey, Range, HashMethod<RangeKey>, std::equal_to<RangeKey>, FastAllocator<std::pair<const RangeKey, Range>>>;
RangeMap m_map;
InsertionSet m_insertionSet;
bool m_changed;
};
bool performIntegerCheckCombining(Graph& graph)
{
return runPhase<IntegerCheckCombiningPhase>(graph);
}
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
|
// Copyright 2010 Google Inc. All Rights Reserved
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "supersonic/expression/core/projecting_expressions.h"
#include <glog/logging.h>
#include "supersonic/utils/logging-inl.h"
#include "supersonic/utils/scoped_ptr.h"
#include "supersonic/base/infrastructure/bit_pointers.h"
#include "supersonic/base/infrastructure/block.h"
#include "supersonic/base/infrastructure/tuple_schema.h"
#include "supersonic/base/memory/memory.h"
#include "supersonic/proto/supersonic.pb.h"
#include "supersonic/testing/block_builder.h"
#include "supersonic/testing/comparators.h"
#include "supersonic/testing/expression_test_helper.h"
#include "gtest/gtest.h"
namespace supersonic {
namespace {
class ProjectingExpressionTest : public testing::Test {
public:
ProjectingExpressionTest() : block_(CreateBlock()) {}
const View& input() const { return block_->view(); }
protected:
void ExpectColumnsEqual(const Column& col1, const Column& col2, size_t rows) {
ASSERT_EQ(col1.attribute().type(), col2.attribute().type());
ASSERT_EQ(col1.attribute().nullability(), col2.attribute().nullability());
// The data is _not_ rewritten, it's the same pointer.
EXPECT_EQ(col1.data(), col2.data());
if (col1.attribute().nullability() == NULLABLE) {
for (int i = 0; i < rows; ++i) {
EXPECT_EQ(col1.is_null()[i], col2.is_null()[i]);
}
}
}
const string name(size_t index) {
CHECK_LT(index, input().column_count());
CHECK_GE(index, 0);
return StrCat("col", index);
}
const size_t rows() { return input().row_count(); }
private:
static Block* CreateBlock() {
return BlockBuilder<STRING, INT32, DOUBLE, INT32>()
.AddRow("1", 12, 5.1, 22)
.AddRow("2", 13, 6.2, 23)
.AddRow("3", 14, 7.3, 24)
.AddRow("4", __, 8.4, 25)
.AddRow(__, 16, __, 26)
.Build();
}
scoped_ptr<Block> block_;
};
TEST_F(ProjectingExpressionTest, AttributeAtSelects) {
for (int i = 0; i < input().column_count(); ++i) {
scoped_ptr<BoundExpressionTree> attribute(
DefaultBind(input().schema(), 100, AttributeAt(i)));
const View& result = DefaultEvaluate(attribute.get(), input());
EXPECT_EQ(1, result.schema().attribute_count());
EXPECT_COLUMNS_EQUAL(input().column(i), result.column(0), rows());
}
}
TEST_F(ProjectingExpressionTest, NamedAttributeSelects) {
for (int i = 0; i < input().column_count(); ++i) {
scoped_ptr<BoundExpressionTree> attribute(
DefaultBind(input().schema(), 100, NamedAttribute(name(i))));
const View& result = DefaultEvaluate(attribute.get(), input());
EXPECT_EQ(1, result.schema().attribute_count());
EXPECT_COLUMNS_EQUAL(input().column(i), result.column(0), rows());
}
}
TEST_F(ProjectingExpressionTest, SingleColumnInputAttributeProjection) {
vector<string> projected_columns;
for (int i = 0; i < input().column_count(); ++i) {
projected_columns.clear();
projected_columns.push_back(name(i));
scoped_ptr<const SingleSourceProjector> projector(
ProjectNamedAttributes(projected_columns));
scoped_ptr<BoundExpressionTree> projected(DefaultBind(
input().schema(),
100,
InputAttributeProjection(projector.release())));
const View& result = DefaultEvaluate(projected.get(), input());
EXPECT_EQ(1, result.schema().attribute_count());
EXPECT_COLUMNS_EQUAL(input().column(i), result.column(0), rows());
}
}
TEST_F(ProjectingExpressionTest, TwoColumnInputAttributeProjection) {
vector<string> projected_columns;
projected_columns.push_back(name(3));
projected_columns.push_back(name(1));
scoped_ptr<const SingleSourceProjector> projector(
ProjectNamedAttributes(projected_columns));
scoped_ptr<BoundExpressionTree> projected(DefaultBind(
input().schema(),
100,
InputAttributeProjection(projector.release())));
const View& result = DefaultEvaluate(projected.get(), input());
EXPECT_EQ(2, result.schema().attribute_count());
EXPECT_COLUMNS_EQUAL(input().column(3), result.column(0), rows());
EXPECT_COLUMNS_EQUAL(input().column(1), result.column(1), rows());
}
TEST_F(ProjectingExpressionTest, TwoColumnInputProjectionShortCircuit) {
vector<string> projected_columns;
projected_columns.push_back(name(2));
projected_columns.push_back(name(0));
scoped_ptr<const SingleSourceProjector> projector(
ProjectNamedAttributes(projected_columns));
scoped_ptr<BoundExpression> projected(DefaultDoBind(
input().schema(),
100,
InputAttributeProjection(projector.release())));
BoolBlock skip_vectors(2, HeapBufferAllocator::Get());
ASSERT_TRUE(skip_vectors.TryReallocate(5).is_success());
bool left_skip[5] = { true, false, false, true, false };
bool right_skip[5] = { false, true, false, false, true };
bit_pointer::FillFrom(skip_vectors.view().column(0), left_skip, 5);
bit_pointer::FillFrom(skip_vectors.view().column(1), right_skip, 5);
EvaluationResult result =
projected.get()->DoEvaluate(input(), skip_vectors.view());
ASSERT_TRUE(result.is_success());
EXPECT_EQ(2, result.get().schema().attribute_count());
for (int i = 0; i < input().row_count(); ++i) {
EXPECT_EQ(input().column(2).is_null()[i] || left_skip[i],
result.get().column(0).is_null()[i]);
EXPECT_EQ(input().column(0).is_null()[i] || right_skip[i],
result.get().column(1).is_null()[i]);
}
}
TEST_F(ProjectingExpressionTest, Flat) {
scoped_ptr<BoundExpressionTree> projected(DefaultBind(
input().schema(),
100,
Flat((new ExpressionList())->add(AttributeAt(0))->add(AttributeAt(3)))));
const View& result = DefaultEvaluate(projected.get(), input());
EXPECT_EQ(2, result.schema().attribute_count());
EXPECT_COLUMNS_EQUAL(input().column(0), result.column(0), rows());
EXPECT_COLUMNS_EQUAL(input().column(3), result.column(1), rows());
}
TEST_F(ProjectingExpressionTest, FlattenEmptyList) {
scoped_ptr<BoundExpressionTree> projected(DefaultBind(
input().schema(), 100, Flat(new ExpressionList())));
const View& result = DefaultEvaluate(projected.get(), input());
EXPECT_EQ(0, result.schema().attribute_count());
}
TEST_F(ProjectingExpressionTest, Alias) {
scoped_ptr<BoundExpressionTree> aliased(DefaultBind(
input().schema(),
100,
Alias("Some alias", AttributeAt(0))));
const View& result = DefaultEvaluate(aliased.get(), input());
EXPECT_EQ(1, result.schema().attribute_count());
EXPECT_COLUMNS_EQUAL(input().column(0), result.column(0), rows());
EXPECT_EQ("Some alias", result.schema().attribute(0).name());
}
TEST_F(ProjectingExpressionTest, AliasFailsOnTooManyColumns) {
scoped_ptr<const Expression> two_columns(
Flat((new ExpressionList())->add(AttributeAt(0))->add(AttributeAt(1))));
scoped_ptr<const Expression> alias(
Alias(string("Some other alias"), two_columns.release()));
FailureOrOwned<BoundExpressionTree> bound_alias =
alias->Bind(input().schema(), HeapBufferAllocator::Get(), 100);
EXPECT_TRUE(bound_alias.is_failure());
}
TEST_F(ProjectingExpressionTest, AliasFailsOnNoColumns) {
scoped_ptr<const Expression> no_columns(Flat((new ExpressionList())));
scoped_ptr<const Expression> alias(
Alias(string("Yet another alias"), no_columns.release()));
FailureOrOwned<BoundExpressionTree> bound_alias =
alias->Bind(input().schema(), HeapBufferAllocator::Get(), 100);
EXPECT_TRUE(bound_alias.is_failure());
}
} // namespace
} // namespace supersonic
|
// Copyright (c) 2011-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#if defined(HAVE_CONFIG_H)
#include "config/axe-config.h"
#endif
#include "optionsdialog.h"
#include "ui_optionsdialog.h"
#include "bitcoinunits.h"
#include "guiutil.h"
#include "optionsmodel.h"
#include "validation.h" // for DEFAULT_SCRIPTCHECK_THREADS and MAX_SCRIPTCHECK_THREADS
#include "netbase.h"
#include "txdb.h" // for -dbcache defaults
#ifdef ENABLE_WALLET
#include "wallet/wallet.h" // for CWallet::GetRequiredFee()
#include "privatesend/privatesend-client.h"
#endif // ENABLE_WALLET
#include <boost/thread.hpp>
#include <QDataWidgetMapper>
#include <QDir>
#include <QIntValidator>
#include <QLocale>
#include <QMessageBox>
#include <QTimer>
#ifdef ENABLE_WALLET
extern CWallet* pwalletMain;
#endif // ENABLE_WALLET
OptionsDialog::OptionsDialog(QWidget *parent, bool enableWallet) :
QDialog(parent),
ui(new Ui::OptionsDialog),
model(0),
mapper(0)
{
ui->setupUi(this);
/* Main elements init */
ui->databaseCache->setMinimum(nMinDbCache);
ui->databaseCache->setMaximum(nMaxDbCache);
ui->threadsScriptVerif->setMinimum(-GetNumCores());
ui->threadsScriptVerif->setMaximum(MAX_SCRIPTCHECK_THREADS);
/* Network elements init */
#ifndef USE_UPNP
ui->mapPortUpnp->setEnabled(false);
#endif
ui->proxyIp->setEnabled(false);
ui->proxyPort->setEnabled(false);
ui->proxyPort->setValidator(new QIntValidator(1, 65535, this));
ui->proxyIpTor->setEnabled(false);
ui->proxyPortTor->setEnabled(false);
ui->proxyPortTor->setValidator(new QIntValidator(1, 65535, this));
connect(ui->connectSocks, SIGNAL(toggled(bool)), ui->proxyIp, SLOT(setEnabled(bool)));
connect(ui->connectSocks, SIGNAL(toggled(bool)), ui->proxyPort, SLOT(setEnabled(bool)));
connect(ui->connectSocks, SIGNAL(toggled(bool)), this, SLOT(updateProxyValidationState()));
connect(ui->connectSocksTor, SIGNAL(toggled(bool)), ui->proxyIpTor, SLOT(setEnabled(bool)));
connect(ui->connectSocksTor, SIGNAL(toggled(bool)), ui->proxyPortTor, SLOT(setEnabled(bool)));
connect(ui->connectSocksTor, SIGNAL(toggled(bool)), this, SLOT(updateProxyValidationState()));
/* Window elements init */
#ifdef Q_OS_MAC
/* remove Window tab on Mac */
ui->tabWidget->removeTab(ui->tabWidget->indexOf(ui->tabWindow));
#endif
/* remove Wallet tab in case of -disablewallet */
if (!enableWallet) {
ui->tabWidget->removeTab(ui->tabWidget->indexOf(ui->tabWallet));
}
/* Display elements init */
/* Number of displayed decimal digits selector */
QString digits;
for(int index = 2; index <=8; index++){
digits.setNum(index);
ui->digits->addItem(digits, digits);
}
/* Theme selector */
ui->theme->addItem(QString("Light"), QVariant("light"));
ui->theme->addItem(QString("Light-HiRes"), QVariant("light-hires"));
ui->theme->addItem(QString("Light-Retro"), QVariant("light-retro"));
ui->theme->addItem(QString("Light-HiRes-Retro"), QVariant("light-hires-retro"));
ui->theme->addItem(QString("Blue"), QVariant("drkblue"));
ui->theme->addItem(QString("Crownium"), QVariant("crownium"));
ui->theme->addItem(QString("Traditional"), QVariant("trad"));
/* Language selector */
QDir translations(":translations");
ui->bitcoinAtStartup->setToolTip(ui->bitcoinAtStartup->toolTip().arg(tr(PACKAGE_NAME)));
ui->bitcoinAtStartup->setText(ui->bitcoinAtStartup->text().arg(tr(PACKAGE_NAME)));
ui->lang->setToolTip(ui->lang->toolTip().arg(tr(PACKAGE_NAME)));
ui->lang->addItem(QString("(") + tr("default") + QString(")"), QVariant(""));
Q_FOREACH(const QString &langStr, translations.entryList())
{
QLocale locale(langStr);
/** check if the locale name consists of 2 parts (language_country) */
if(langStr.contains("_"))
{
#if QT_VERSION >= 0x040800
/** display language strings as "native language - native country (locale name)", e.g. "Deutsch - Deutschland (de)" */
ui->lang->addItem(locale.nativeLanguageName() + QString(" - ") + locale.nativeCountryName() + QString(" (") + langStr + QString(")"), QVariant(langStr));
#else
/** display language strings as "language - country (locale name)", e.g. "German - Germany (de)" */
ui->lang->addItem(QLocale::languageToString(locale.language()) + QString(" - ") + QLocale::countryToString(locale.country()) + QString(" (") + langStr + QString(")"), QVariant(langStr));
#endif
}
else
{
#if QT_VERSION >= 0x040800
/** display language strings as "native language (locale name)", e.g. "Deutsch (de)" */
ui->lang->addItem(locale.nativeLanguageName() + QString(" (") + langStr + QString(")"), QVariant(langStr));
#else
/** display language strings as "language (locale name)", e.g. "German (de)" */
ui->lang->addItem(QLocale::languageToString(locale.language()) + QString(" (") + langStr + QString(")"), QVariant(langStr));
#endif
}
}
#if QT_VERSION >= 0x040700
ui->thirdPartyTxUrls->setPlaceholderText("https://example.com/tx/%s");
#endif
ui->unit->setModel(new BitcoinUnits(this));
/* Widget-to-option mapper */
mapper = new QDataWidgetMapper(this);
mapper->setSubmitPolicy(QDataWidgetMapper::ManualSubmit);
mapper->setOrientation(Qt::Vertical);
/* setup/change UI elements when proxy IPs are invalid/valid */
ui->proxyIp->setCheckValidator(new ProxyAddressValidator(parent));
ui->proxyIpTor->setCheckValidator(new ProxyAddressValidator(parent));
connect(ui->proxyIp, SIGNAL(validationDidChange(QValidatedLineEdit *)), this, SLOT(updateProxyValidationState()));
connect(ui->proxyIpTor, SIGNAL(validationDidChange(QValidatedLineEdit *)), this, SLOT(updateProxyValidationState()));
connect(ui->proxyPort, SIGNAL(textChanged(const QString&)), this, SLOT(updateProxyValidationState()));
connect(ui->proxyPortTor, SIGNAL(textChanged(const QString&)), this, SLOT(updateProxyValidationState()));
}
OptionsDialog::~OptionsDialog()
{
delete ui;
}
void OptionsDialog::setModel(OptionsModel *_model)
{
this->model = _model;
if(_model)
{
/* check if client restart is needed and show persistent message */
if (_model->isRestartRequired())
showRestartWarning(true);
QString strLabel = _model->getOverriddenByCommandLine();
if (strLabel.isEmpty())
strLabel = tr("none");
ui->overriddenByCommandLineLabel->setText(strLabel);
mapper->setModel(_model);
setMapper();
mapper->toFirst();
updateDefaultProxyNets();
}
/* warn when one of the following settings changes by user action (placed here so init via mapper doesn't trigger them) */
/* Main */
connect(ui->databaseCache, SIGNAL(valueChanged(int)), this, SLOT(showRestartWarning()));
connect(ui->threadsScriptVerif, SIGNAL(valueChanged(int)), this, SLOT(showRestartWarning()));
/* Wallet */
connect(ui->showMasternodesTab, SIGNAL(clicked(bool)), this, SLOT(showRestartWarning()));
connect(ui->spendZeroConfChange, SIGNAL(clicked(bool)), this, SLOT(showRestartWarning()));
/* Network */
connect(ui->allowIncoming, SIGNAL(clicked(bool)), this, SLOT(showRestartWarning()));
connect(ui->connectSocks, SIGNAL(clicked(bool)), this, SLOT(showRestartWarning()));
connect(ui->connectSocksTor, SIGNAL(clicked(bool)), this, SLOT(showRestartWarning()));
/* Display */
connect(ui->digits, SIGNAL(valueChanged()), this, SLOT(showRestartWarning()));
connect(ui->theme, SIGNAL(valueChanged()), this, SLOT(showRestartWarning()));
connect(ui->lang, SIGNAL(valueChanged()), this, SLOT(showRestartWarning()));
connect(ui->thirdPartyTxUrls, SIGNAL(textChanged(const QString &)), this, SLOT(showRestartWarning()));
}
void OptionsDialog::setMapper()
{
/* Main */
mapper->addMapping(ui->bitcoinAtStartup, OptionsModel::StartAtStartup);
mapper->addMapping(ui->threadsScriptVerif, OptionsModel::ThreadsScriptVerif);
mapper->addMapping(ui->databaseCache, OptionsModel::DatabaseCache);
/* Wallet */
mapper->addMapping(ui->coinControlFeatures, OptionsModel::CoinControlFeatures);
mapper->addMapping(ui->showMasternodesTab, OptionsModel::ShowMasternodesTab);
mapper->addMapping(ui->showAdvancedPSUI, OptionsModel::ShowAdvancedPSUI);
mapper->addMapping(ui->showPrivateSendPopups, OptionsModel::ShowPrivateSendPopups);
mapper->addMapping(ui->lowKeysWarning, OptionsModel::LowKeysWarning);
mapper->addMapping(ui->privateSendMultiSession, OptionsModel::PrivateSendMultiSession);
mapper->addMapping(ui->spendZeroConfChange, OptionsModel::SpendZeroConfChange);
mapper->addMapping(ui->privateSendRounds, OptionsModel::PrivateSendRounds);
mapper->addMapping(ui->privateSendAmount, OptionsModel::PrivateSendAmount);
/* Network */
mapper->addMapping(ui->mapPortUpnp, OptionsModel::MapPortUPnP);
mapper->addMapping(ui->allowIncoming, OptionsModel::Listen);
mapper->addMapping(ui->connectSocks, OptionsModel::ProxyUse);
mapper->addMapping(ui->proxyIp, OptionsModel::ProxyIP);
mapper->addMapping(ui->proxyPort, OptionsModel::ProxyPort);
mapper->addMapping(ui->connectSocksTor, OptionsModel::ProxyUseTor);
mapper->addMapping(ui->proxyIpTor, OptionsModel::ProxyIPTor);
mapper->addMapping(ui->proxyPortTor, OptionsModel::ProxyPortTor);
/* Window */
#ifndef Q_OS_MAC
mapper->addMapping(ui->hideTrayIcon, OptionsModel::HideTrayIcon);
mapper->addMapping(ui->minimizeToTray, OptionsModel::MinimizeToTray);
mapper->addMapping(ui->minimizeOnClose, OptionsModel::MinimizeOnClose);
#endif
/* Display */
mapper->addMapping(ui->digits, OptionsModel::Digits);
mapper->addMapping(ui->theme, OptionsModel::Theme);
mapper->addMapping(ui->lang, OptionsModel::Language);
mapper->addMapping(ui->unit, OptionsModel::DisplayUnit);
mapper->addMapping(ui->thirdPartyTxUrls, OptionsModel::ThirdPartyTxUrls);
}
void OptionsDialog::setOkButtonState(bool fState)
{
ui->okButton->setEnabled(fState);
}
void OptionsDialog::on_resetButton_clicked()
{
if(model)
{
// confirmation dialog
QMessageBox::StandardButton btnRetVal = QMessageBox::question(this, tr("Confirm options reset"),
tr("Client restart required to activate changes.") + "<br><br>" + tr("Client will be shut down. Do you want to proceed?"),
QMessageBox::Yes | QMessageBox::Cancel, QMessageBox::Cancel);
if(btnRetVal == QMessageBox::Cancel)
return;
/* reset all options and close GUI */
model->Reset();
QApplication::quit();
}
}
void OptionsDialog::on_okButton_clicked()
{
mapper->submit();
#ifdef ENABLE_WALLET
privateSendClient.nCachedNumBlocks = std::numeric_limits<int>::max();
if(pwalletMain)
pwalletMain->MarkDirty();
#endif // ENABLE_WALLET
accept();
updateDefaultProxyNets();
}
void OptionsDialog::on_cancelButton_clicked()
{
reject();
}
void OptionsDialog::on_hideTrayIcon_stateChanged(int fState)
{
if(fState)
{
ui->minimizeToTray->setChecked(false);
ui->minimizeToTray->setEnabled(false);
}
else
{
ui->minimizeToTray->setEnabled(true);
}
}
void OptionsDialog::showRestartWarning(bool fPersistent)
{
ui->statusLabel->setStyleSheet("QLabel { color: red; }");
if(fPersistent)
{
ui->statusLabel->setText(tr("Client restart required to activate changes."));
}
else
{
ui->statusLabel->setText(tr("This change would require a client restart."));
// clear non-persistent status label after 10 seconds
// Todo: should perhaps be a class attribute, if we extend the use of statusLabel
QTimer::singleShot(10000, this, SLOT(clearStatusLabel()));
}
}
void OptionsDialog::clearStatusLabel()
{
ui->statusLabel->clear();
if (model && model->isRestartRequired()) {
showRestartWarning(true);
}
}
void OptionsDialog::updateProxyValidationState()
{
QValidatedLineEdit *pUiProxyIp = ui->proxyIp;
QValidatedLineEdit *otherProxyWidget = (pUiProxyIp == ui->proxyIpTor) ? ui->proxyIp : ui->proxyIpTor;
if (pUiProxyIp->isValid() && (!ui->proxyPort->isEnabled() || ui->proxyPort->text().toInt() > 0) && (!ui->proxyPortTor->isEnabled() || ui->proxyPortTor->text().toInt() > 0))
{
setOkButtonState(otherProxyWidget->isValid()); //only enable ok button if both proxys are valid
clearStatusLabel();
}
else
{
setOkButtonState(false);
ui->statusLabel->setStyleSheet("QLabel { color: red; }");
ui->statusLabel->setText(tr("The supplied proxy address is invalid."));
}
}
void OptionsDialog::updateDefaultProxyNets()
{
proxyType proxy;
std::string strProxy;
QString strDefaultProxyGUI;
GetProxy(NET_IPV4, proxy);
strProxy = proxy.proxy.ToStringIP() + ":" + proxy.proxy.ToStringPort();
strDefaultProxyGUI = ui->proxyIp->text() + ":" + ui->proxyPort->text();
(strProxy == strDefaultProxyGUI.toStdString()) ? ui->proxyReachIPv4->setChecked(true) : ui->proxyReachIPv4->setChecked(false);
GetProxy(NET_IPV6, proxy);
strProxy = proxy.proxy.ToStringIP() + ":" + proxy.proxy.ToStringPort();
strDefaultProxyGUI = ui->proxyIp->text() + ":" + ui->proxyPort->text();
(strProxy == strDefaultProxyGUI.toStdString()) ? ui->proxyReachIPv6->setChecked(true) : ui->proxyReachIPv6->setChecked(false);
GetProxy(NET_TOR, proxy);
strProxy = proxy.proxy.ToStringIP() + ":" + proxy.proxy.ToStringPort();
strDefaultProxyGUI = ui->proxyIp->text() + ":" + ui->proxyPort->text();
(strProxy == strDefaultProxyGUI.toStdString()) ? ui->proxyReachTor->setChecked(true) : ui->proxyReachTor->setChecked(false);
}
ProxyAddressValidator::ProxyAddressValidator(QObject *parent) :
QValidator(parent)
{
}
QValidator::State ProxyAddressValidator::validate(QString &input, int &pos) const
{
Q_UNUSED(pos);
// Validate the proxy
CService serv(LookupNumeric(input.toStdString().c_str(), 9050));
proxyType addrProxy = proxyType(serv, true);
if (addrProxy.IsValid())
return QValidator::Acceptable;
return QValidator::Invalid;
}
|
/*
* Copyright 2016 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "include/core/SkFont.h"
#include "include/core/SkFontArguments.h"
#include "include/core/SkFontMetrics.h"
#include "include/core/SkFontMgr.h"
#include "include/core/SkFontTypes.h"
#include "include/core/SkPaint.h"
#include "include/core/SkPoint.h"
#include "include/core/SkRect.h"
#include "include/core/SkRefCnt.h"
#include "include/core/SkScalar.h"
#include "include/core/SkStream.h"
#include "include/core/SkTypeface.h"
#include "include/core/SkTypes.h"
#include "include/private/SkBitmaskEnum.h"
#include "include/private/SkMalloc.h"
#include "include/private/SkMutex.h"
#include "include/private/SkTArray.h"
#include "include/private/SkTFitsIn.h"
#include "include/private/SkTemplates.h"
#include "include/private/SkTo.h"
#include "modules/skshaper/include/SkShaper.h"
#include "modules/skshaper/src/SkUnicode.h"
#include "src/core/SkLRUCache.h"
#include "src/core/SkSpan.h"
#include "src/core/SkTDPQueue.h"
#include "src/utils/SkUTF.h"
#include <hb.h>
#include <hb-icu.h>
#include <hb-ot.h>
#include <unicode/uscript.h>
#include <cstring>
#include <memory>
#include <type_traits>
#include <utility>
// HB_FEATURE_GLOBAL_START and HB_FEATURE_GLOBAL_END were not added until HarfBuzz 2.0
// They would have always worked, they just hadn't been named yet.
#if !defined(HB_FEATURE_GLOBAL_START)
# define HB_FEATURE_GLOBAL_START 0
#endif
#if !defined(HB_FEATURE_GLOBAL_END)
# define HB_FEATURE_GLOBAL_END ((unsigned int) -1)
#endif
namespace sknonstd {
template <> struct is_bitmask_enum<hb_buffer_flags_t> : std::true_type {};
} // namespace sknonstd
namespace {
template <typename T,typename P,P* p> using resource = std::unique_ptr<T, SkFunctionWrapper<P, p>>;
using HBBlob = resource<hb_blob_t , decltype(hb_blob_destroy) , hb_blob_destroy >;
using HBFace = resource<hb_face_t , decltype(hb_face_destroy) , hb_face_destroy >;
using HBFont = resource<hb_font_t , decltype(hb_font_destroy) , hb_font_destroy >;
using HBBuffer = resource<hb_buffer_t , decltype(hb_buffer_destroy), hb_buffer_destroy>;
using SkUnicodeBidi = std::unique_ptr<SkBidiIterator>;
using SkUnicodeBreak = std::unique_ptr<SkBreakIterator>;
hb_position_t skhb_position(SkScalar value) {
// Treat HarfBuzz hb_position_t as 16.16 fixed-point.
constexpr int kHbPosition1 = 1 << 16;
return SkScalarRoundToInt(value * kHbPosition1);
}
hb_bool_t skhb_glyph(hb_font_t* hb_font,
void* font_data,
hb_codepoint_t unicode,
hb_codepoint_t variation_selector,
hb_codepoint_t* glyph,
void* user_data) {
SkFont& font = *reinterpret_cast<SkFont*>(font_data);
*glyph = font.unicharToGlyph(unicode);
return *glyph != 0;
}
hb_bool_t skhb_nominal_glyph(hb_font_t* hb_font,
void* font_data,
hb_codepoint_t unicode,
hb_codepoint_t* glyph,
void* user_data) {
return skhb_glyph(hb_font, font_data, unicode, 0, glyph, user_data);
}
unsigned skhb_nominal_glyphs(hb_font_t *hb_font, void *font_data,
unsigned int count,
const hb_codepoint_t *unicodes,
unsigned int unicode_stride,
hb_codepoint_t *glyphs,
unsigned int glyph_stride,
void *user_data) {
SkFont& font = *reinterpret_cast<SkFont*>(font_data);
// Batch call textToGlyphs since entry cost is not cheap.
// Copy requred because textToGlyphs is dense and hb is strided.
SkAutoSTMalloc<256, SkUnichar> unicode(count);
for (unsigned i = 0; i < count; i++) {
unicode[i] = *unicodes;
unicodes = SkTAddOffset<const hb_codepoint_t>(unicodes, unicode_stride);
}
SkAutoSTMalloc<256, SkGlyphID> glyph(count);
font.textToGlyphs(unicode.get(), count * sizeof(SkUnichar), SkTextEncoding::kUTF32,
glyph.get(), count);
// Copy the results back to the sparse array.
unsigned int done;
for (done = 0; done < count && glyph[done] != 0; done++) {
*glyphs = glyph[done];
glyphs = SkTAddOffset<hb_codepoint_t>(glyphs, glyph_stride);
}
// return 'done' to allow HarfBuzz to synthesize with NFC and spaces, return 'count' to avoid
return done;
}
hb_position_t skhb_glyph_h_advance(hb_font_t* hb_font,
void* font_data,
hb_codepoint_t hbGlyph,
void* user_data) {
SkFont& font = *reinterpret_cast<SkFont*>(font_data);
SkScalar advance;
SkGlyphID skGlyph = SkTo<SkGlyphID>(hbGlyph);
font.getWidths(&skGlyph, 1, &advance);
if (!font.isSubpixel()) {
advance = SkScalarRoundToInt(advance);
}
return skhb_position(advance);
}
void skhb_glyph_h_advances(hb_font_t* hb_font,
void* font_data,
unsigned count,
const hb_codepoint_t* glyphs,
unsigned int glyph_stride,
hb_position_t* advances,
unsigned int advance_stride,
void* user_data) {
SkFont& font = *reinterpret_cast<SkFont*>(font_data);
// Batch call getWidths since entry cost is not cheap.
// Copy requred because getWidths is dense and hb is strided.
SkAutoSTMalloc<256, SkGlyphID> glyph(count);
for (unsigned i = 0; i < count; i++) {
glyph[i] = *glyphs;
glyphs = SkTAddOffset<const hb_codepoint_t>(glyphs, glyph_stride);
}
SkAutoSTMalloc<256, SkScalar> advance(count);
font.getWidths(glyph.get(), count, advance.get());
if (!font.isSubpixel()) {
for (unsigned i = 0; i < count; i++) {
advance[i] = SkScalarRoundToInt(advance[i]);
}
}
// Copy the results back to the sparse array.
for (unsigned i = 0; i < count; i++) {
*advances = skhb_position(advance[i]);
advances = SkTAddOffset<hb_position_t>(advances, advance_stride);
}
}
// HarfBuzz callback to retrieve glyph extents, mainly used by HarfBuzz for
// fallback mark positioning, i.e. the situation when the font does not have
// mark anchors or other mark positioning rules, but instead HarfBuzz is
// supposed to heuristically place combining marks around base glyphs. HarfBuzz
// does this by measuring "ink boxes" of glyphs, and placing them according to
// Unicode mark classes. Above, below, centered or left or right, etc.
hb_bool_t skhb_glyph_extents(hb_font_t* hb_font,
void* font_data,
hb_codepoint_t hbGlyph,
hb_glyph_extents_t* extents,
void* user_data) {
SkFont& font = *reinterpret_cast<SkFont*>(font_data);
SkASSERT(extents);
SkRect sk_bounds;
SkGlyphID skGlyph = SkTo<SkGlyphID>(hbGlyph);
font.getWidths(&skGlyph, 1, nullptr, &sk_bounds);
if (!font.isSubpixel()) {
sk_bounds.set(sk_bounds.roundOut());
}
// Skia is y-down but HarfBuzz is y-up.
extents->x_bearing = skhb_position(sk_bounds.fLeft);
extents->y_bearing = skhb_position(-sk_bounds.fTop);
extents->width = skhb_position(sk_bounds.width());
extents->height = skhb_position(-sk_bounds.height());
return true;
}
#define SK_HB_VERSION_CHECK(x, y, z) \
(HB_VERSION_MAJOR > (x)) || \
(HB_VERSION_MAJOR == (x) && HB_VERSION_MINOR > (y)) || \
(HB_VERSION_MAJOR == (x) && HB_VERSION_MINOR == (y) && HB_VERSION_MICRO >= (z))
hb_font_funcs_t* skhb_get_font_funcs() {
static hb_font_funcs_t* const funcs = []{
// HarfBuzz will use the default (parent) implementation if they aren't set.
hb_font_funcs_t* const funcs = hb_font_funcs_create();
hb_font_funcs_set_variation_glyph_func(funcs, skhb_glyph, nullptr, nullptr);
hb_font_funcs_set_nominal_glyph_func(funcs, skhb_nominal_glyph, nullptr, nullptr);
#if SK_HB_VERSION_CHECK(2, 0, 0)
hb_font_funcs_set_nominal_glyphs_func(funcs, skhb_nominal_glyphs, nullptr, nullptr);
#else
sk_ignore_unused_variable(skhb_nominal_glyphs);
#endif
hb_font_funcs_set_glyph_h_advance_func(funcs, skhb_glyph_h_advance, nullptr, nullptr);
#if SK_HB_VERSION_CHECK(1, 8, 6)
hb_font_funcs_set_glyph_h_advances_func(funcs, skhb_glyph_h_advances, nullptr, nullptr);
#else
sk_ignore_unused_variable(skhb_glyph_h_advances);
#endif
hb_font_funcs_set_glyph_extents_func(funcs, skhb_glyph_extents, nullptr, nullptr);
hb_font_funcs_make_immutable(funcs);
return funcs;
}();
SkASSERT(funcs);
return funcs;
}
hb_blob_t* skhb_get_table(hb_face_t* face, hb_tag_t tag, void* user_data) {
SkTypeface& typeface = *reinterpret_cast<SkTypeface*>(user_data);
auto data = typeface.copyTableData(tag);
if (!data) {
return nullptr;
}
SkData* rawData = data.release();
return hb_blob_create(reinterpret_cast<char*>(rawData->writable_data()), rawData->size(),
HB_MEMORY_MODE_READONLY, rawData, [](void* ctx) {
SkSafeUnref(((SkData*)ctx));
});
}
HBBlob stream_to_blob(std::unique_ptr<SkStreamAsset> asset) {
size_t size = asset->getLength();
HBBlob blob;
if (const void* base = asset->getMemoryBase()) {
blob.reset(hb_blob_create((char*)base, SkToUInt(size),
HB_MEMORY_MODE_READONLY, asset.release(),
[](void* p) { delete (SkStreamAsset*)p; }));
} else {
// SkDebugf("Extra SkStreamAsset copy\n");
void* ptr = size ? sk_malloc_throw(size) : nullptr;
asset->read(ptr, size);
blob.reset(hb_blob_create((char*)ptr, SkToUInt(size),
HB_MEMORY_MODE_READONLY, ptr, sk_free));
}
SkASSERT(blob);
hb_blob_make_immutable(blob.get());
return blob;
}
SkDEBUGCODE(static hb_user_data_key_t gDataIdKey;)
HBFace create_hb_face(const SkTypeface& typeface) {
int index;
std::unique_ptr<SkStreamAsset> typefaceAsset = typeface.openStream(&index);
HBFace face;
if (typefaceAsset && typefaceAsset->getMemoryBase()) {
HBBlob blob(stream_to_blob(std::move(typefaceAsset)));
face.reset(hb_face_create(blob.get(), (unsigned)index));
} else {
face.reset(hb_face_create_for_tables(
skhb_get_table,
const_cast<SkTypeface*>(SkRef(&typeface)),
[](void* user_data){ SkSafeUnref(reinterpret_cast<SkTypeface*>(user_data)); }));
}
SkASSERT(face);
if (!face) {
return nullptr;
}
hb_face_set_index(face.get(), (unsigned)index);
hb_face_set_upem(face.get(), typeface.getUnitsPerEm());
SkDEBUGCODE(
hb_face_set_user_data(face.get(), &gDataIdKey, const_cast<SkTypeface*>(&typeface),
nullptr, false);
)
return face;
}
HBFont create_hb_font(const SkFont& font, const HBFace& face) {
SkDEBUGCODE(
void* dataId = hb_face_get_user_data(face.get(), &gDataIdKey);
SkASSERT(dataId == font.getTypeface());
)
HBFont otFont(hb_font_create(face.get()));
SkASSERT(otFont);
if (!otFont) {
return nullptr;
}
hb_ot_font_set_funcs(otFont.get());
int axis_count = font.getTypeface()->getVariationDesignPosition(nullptr, 0);
if (axis_count > 0) {
SkAutoSTMalloc<4, SkFontArguments::VariationPosition::Coordinate> axis_values(axis_count);
if (font.getTypeface()->getVariationDesignPosition(axis_values, axis_count) == axis_count) {
hb_font_set_variations(otFont.get(),
reinterpret_cast<hb_variation_t*>(axis_values.get()),
axis_count);
}
}
// Creating a sub font means that non-available functions
// are found from the parent.
HBFont skFont(hb_font_create_sub_font(otFont.get()));
hb_font_set_funcs(skFont.get(), skhb_get_font_funcs(),
reinterpret_cast<void *>(new SkFont(font)),
[](void* user_data){ delete reinterpret_cast<SkFont*>(user_data); });
int scale = skhb_position(font.getSize());
hb_font_set_scale(skFont.get(), scale, scale);
return skFont;
}
/** Replaces invalid utf-8 sequences with REPLACEMENT CHARACTER U+FFFD. */
static inline SkUnichar utf8_next(const char** ptr, const char* end) {
SkUnichar val = SkUTF::NextUTF8(ptr, end);
return val < 0 ? 0xFFFD : val;
}
class SkUnicodeBidiRunIterator final : public SkShaper::BiDiRunIterator {
public:
SkUnicodeBidiRunIterator(const char* utf8, const char* end, SkUnicodeBidi bidi)
: fBidi(std::move(bidi))
, fEndOfCurrentRun(utf8)
, fBegin(utf8)
, fEnd(end)
, fUTF16LogicalPosition(0)
, fLevel(SkBidiIterator::kLTR)
{}
void consume() override {
SkASSERT(fUTF16LogicalPosition < fBidi->getLength());
int32_t endPosition = fBidi->getLength();
fLevel = fBidi->getLevelAt(fUTF16LogicalPosition);
SkUnichar u = utf8_next(&fEndOfCurrentRun, fEnd);
fUTF16LogicalPosition += SkUTF::ToUTF16(u);
SkBidiIterator::Level level;
while (fUTF16LogicalPosition < endPosition) {
level = fBidi->getLevelAt(fUTF16LogicalPosition);
if (level != fLevel) {
break;
}
u = utf8_next(&fEndOfCurrentRun, fEnd);
fUTF16LogicalPosition += SkUTF::ToUTF16(u);
}
}
size_t endOfCurrentRun() const override {
return fEndOfCurrentRun - fBegin;
}
bool atEnd() const override {
return fUTF16LogicalPosition == fBidi->getLength();
}
SkBidiIterator::Level currentLevel() const override {
return fLevel;
}
private:
SkUnicodeBidi fBidi;
char const * fEndOfCurrentRun;
char const * const fBegin;
char const * const fEnd;
int32_t fUTF16LogicalPosition;
SkBidiIterator::Level fLevel;
};
class HbIcuScriptRunIterator final : public SkShaper::ScriptRunIterator {
public:
HbIcuScriptRunIterator(const char* utf8, size_t utf8Bytes)
: fCurrent(utf8), fBegin(utf8), fEnd(fCurrent + utf8Bytes)
, fCurrentScript(HB_SCRIPT_UNKNOWN)
{}
static hb_script_t hb_script_from_icu(SkUnichar u) {
UErrorCode status = U_ZERO_ERROR;
UScriptCode scriptCode = uscript_getScript(u, &status);
if (U_FAILURE (status)) {
return HB_SCRIPT_UNKNOWN;
}
return hb_icu_script_to_script(scriptCode);
}
void consume() override {
SkASSERT(fCurrent < fEnd);
SkUnichar u = utf8_next(&fCurrent, fEnd);
fCurrentScript = hb_script_from_icu(u);
while (fCurrent < fEnd) {
const char* prev = fCurrent;
u = utf8_next(&fCurrent, fEnd);
const hb_script_t script = hb_script_from_icu(u);
if (script != fCurrentScript) {
if (fCurrentScript == HB_SCRIPT_INHERITED || fCurrentScript == HB_SCRIPT_COMMON) {
fCurrentScript = script;
} else if (script == HB_SCRIPT_INHERITED || script == HB_SCRIPT_COMMON) {
continue;
} else {
fCurrent = prev;
break;
}
}
}
if (fCurrentScript == HB_SCRIPT_INHERITED) {
fCurrentScript = HB_SCRIPT_COMMON;
}
}
size_t endOfCurrentRun() const override {
return fCurrent - fBegin;
}
bool atEnd() const override {
return fCurrent == fEnd;
}
SkFourByteTag currentScript() const override {
return SkSetFourByteTag(HB_UNTAG(fCurrentScript));
}
private:
char const * fCurrent;
char const * const fBegin;
char const * const fEnd;
hb_script_t fCurrentScript;
};
class RunIteratorQueue {
public:
void insert(SkShaper::RunIterator* runIterator, int priority) {
fEntries.insert({runIterator, priority});
}
bool advanceRuns() {
const SkShaper::RunIterator* leastRun = fEntries.peek().runIterator;
if (leastRun->atEnd()) {
SkASSERT(this->allRunsAreAtEnd());
return false;
}
const size_t leastEnd = leastRun->endOfCurrentRun();
SkShaper::RunIterator* currentRun = nullptr;
SkDEBUGCODE(size_t previousEndOfCurrentRun);
while ((currentRun = fEntries.peek().runIterator)->endOfCurrentRun() <= leastEnd) {
int priority = fEntries.peek().priority;
fEntries.pop();
SkDEBUGCODE(previousEndOfCurrentRun = currentRun->endOfCurrentRun());
currentRun->consume();
SkASSERT(previousEndOfCurrentRun < currentRun->endOfCurrentRun());
fEntries.insert({currentRun, priority});
}
return true;
}
size_t endOfCurrentRun() const {
return fEntries.peek().runIterator->endOfCurrentRun();
}
private:
bool allRunsAreAtEnd() const {
for (int i = 0; i < fEntries.count(); ++i) {
if (!fEntries.at(i).runIterator->atEnd()) {
return false;
}
}
return true;
}
struct Entry {
SkShaper::RunIterator* runIterator;
int priority;
};
static bool CompareEntry(Entry const& a, Entry const& b) {
size_t aEnd = a.runIterator->endOfCurrentRun();
size_t bEnd = b.runIterator->endOfCurrentRun();
return aEnd < bEnd || (aEnd == bEnd && a.priority < b.priority);
}
SkTDPQueue<Entry, CompareEntry> fEntries;
};
struct ShapedGlyph {
SkGlyphID fID;
uint32_t fCluster;
SkPoint fOffset;
SkVector fAdvance;
bool fMayLineBreakBefore;
bool fMustLineBreakBefore;
bool fHasVisual;
bool fGraphemeBreakBefore;
bool fUnsafeToBreak;
};
struct ShapedRun {
ShapedRun(SkShaper::RunHandler::Range utf8Range, const SkFont& font, SkBidiIterator::Level level,
std::unique_ptr<ShapedGlyph[]> glyphs, size_t numGlyphs, SkVector advance = {0, 0})
: fUtf8Range(utf8Range), fFont(font), fLevel(level)
, fGlyphs(std::move(glyphs)), fNumGlyphs(numGlyphs), fAdvance(advance)
{}
SkShaper::RunHandler::Range fUtf8Range;
SkFont fFont;
SkBidiIterator::Level fLevel;
std::unique_ptr<ShapedGlyph[]> fGlyphs;
size_t fNumGlyphs;
SkVector fAdvance;
};
struct ShapedLine {
SkTArray<ShapedRun> runs;
SkVector fAdvance = { 0, 0 };
};
constexpr bool is_LTR(SkBidiIterator::Level level) {
return (level & 1) == 0;
}
void append(SkShaper::RunHandler* handler, const SkShaper::RunHandler::RunInfo& runInfo,
const ShapedRun& run, size_t startGlyphIndex, size_t endGlyphIndex) {
SkASSERT(startGlyphIndex <= endGlyphIndex);
const size_t glyphLen = endGlyphIndex - startGlyphIndex;
const auto buffer = handler->runBuffer(runInfo);
SkASSERT(buffer.glyphs);
SkASSERT(buffer.positions);
SkVector advance = {0,0};
for (size_t i = 0; i < glyphLen; i++) {
// Glyphs are in logical order, but output ltr since PDF readers seem to expect that.
const ShapedGlyph& glyph = run.fGlyphs[is_LTR(run.fLevel) ? startGlyphIndex + i
: endGlyphIndex - 1 - i];
buffer.glyphs[i] = glyph.fID;
if (buffer.offsets) {
buffer.positions[i] = advance + buffer.point;
buffer.offsets[i] = glyph.fOffset;
} else {
buffer.positions[i] = advance + buffer.point + glyph.fOffset;
}
if (buffer.clusters) {
buffer.clusters[i] = glyph.fCluster;
}
advance += glyph.fAdvance;
}
handler->commitRunBuffer(runInfo);
}
void emit(const ShapedLine& line, SkShaper::RunHandler* handler) {
// Reorder the runs and glyphs per line and write them out.
handler->beginLine();
int numRuns = line.runs.size();
SkAutoSTMalloc<4, SkBidiIterator::Level> runLevels(numRuns);
for (int i = 0; i < numRuns; ++i) {
runLevels[i] = line.runs[i].fLevel;
}
SkAutoSTMalloc<4, int32_t> logicalFromVisual(numRuns);
SkBidiIterator::ReorderVisual(runLevels, numRuns, logicalFromVisual);
for (int i = 0; i < numRuns; ++i) {
int logicalIndex = logicalFromVisual[i];
const auto& run = line.runs[logicalIndex];
const SkShaper::RunHandler::RunInfo info = {
run.fFont,
run.fLevel,
run.fAdvance,
run.fNumGlyphs,
run.fUtf8Range
};
handler->runInfo(info);
}
handler->commitRunInfo();
for (int i = 0; i < numRuns; ++i) {
int logicalIndex = logicalFromVisual[i];
const auto& run = line.runs[logicalIndex];
const SkShaper::RunHandler::RunInfo info = {
run.fFont,
run.fLevel,
run.fAdvance,
run.fNumGlyphs,
run.fUtf8Range
};
append(handler, info, run, 0, run.fNumGlyphs);
}
handler->commitLine();
}
struct ShapedRunGlyphIterator {
ShapedRunGlyphIterator(const SkTArray<ShapedRun>& origRuns)
: fRuns(&origRuns), fRunIndex(0), fGlyphIndex(0)
{ }
ShapedRunGlyphIterator(const ShapedRunGlyphIterator& that) = default;
ShapedRunGlyphIterator& operator=(const ShapedRunGlyphIterator& that) = default;
bool operator==(const ShapedRunGlyphIterator& that) const {
return fRuns == that.fRuns &&
fRunIndex == that.fRunIndex &&
fGlyphIndex == that.fGlyphIndex;
}
bool operator!=(const ShapedRunGlyphIterator& that) const {
return fRuns != that.fRuns ||
fRunIndex != that.fRunIndex ||
fGlyphIndex != that.fGlyphIndex;
}
ShapedGlyph* next() {
const SkTArray<ShapedRun>& runs = *fRuns;
SkASSERT(fRunIndex < runs.count());
SkASSERT(fGlyphIndex < runs[fRunIndex].fNumGlyphs);
++fGlyphIndex;
if (fGlyphIndex == runs[fRunIndex].fNumGlyphs) {
fGlyphIndex = 0;
++fRunIndex;
if (fRunIndex >= runs.count()) {
return nullptr;
}
}
return &runs[fRunIndex].fGlyphs[fGlyphIndex];
}
ShapedGlyph* current() {
const SkTArray<ShapedRun>& runs = *fRuns;
if (fRunIndex >= runs.count()) {
return nullptr;
}
return &runs[fRunIndex].fGlyphs[fGlyphIndex];
}
const SkTArray<ShapedRun>* fRuns;
int fRunIndex;
size_t fGlyphIndex;
};
class ShaperHarfBuzz : public SkShaper {
public:
ShaperHarfBuzz(std::unique_ptr<SkUnicode>,
SkUnicodeBreak line,
SkUnicodeBreak grapheme,
HBBuffer,
sk_sp<SkFontMgr>);
protected:
std::unique_ptr<SkUnicode> fUnicode;
SkUnicodeBreak fLineBreakIterator;
SkUnicodeBreak fGraphemeBreakIterator;
ShapedRun shape(const char* utf8, size_t utf8Bytes,
const char* utf8Start,
const char* utf8End,
const BiDiRunIterator&,
const LanguageRunIterator&,
const ScriptRunIterator&,
const FontRunIterator&,
const Feature*, size_t featuresSize) const;
private:
const sk_sp<SkFontMgr> fFontMgr;
HBBuffer fBuffer;
hb_language_t fUndefinedLanguage;
void shape(const char* utf8, size_t utf8Bytes,
const SkFont&,
bool leftToRight,
SkScalar width,
RunHandler*) const override;
void shape(const char* utf8Text, size_t textBytes,
FontRunIterator&,
BiDiRunIterator&,
ScriptRunIterator&,
LanguageRunIterator&,
SkScalar width,
RunHandler*) const override;
void shape(const char* utf8Text, size_t textBytes,
FontRunIterator&,
BiDiRunIterator&,
ScriptRunIterator&,
LanguageRunIterator&,
const Feature*, size_t featuresSize,
SkScalar width,
RunHandler*) const override;
virtual void wrap(char const * const utf8, size_t utf8Bytes,
const BiDiRunIterator&,
const LanguageRunIterator&,
const ScriptRunIterator&,
const FontRunIterator&,
RunIteratorQueue& runSegmenter,
const Feature*, size_t featuresSize,
SkScalar width,
RunHandler*) const = 0;
};
class ShaperDrivenWrapper : public ShaperHarfBuzz {
public:
using ShaperHarfBuzz::ShaperHarfBuzz;
private:
void wrap(char const * const utf8, size_t utf8Bytes,
const BiDiRunIterator&,
const LanguageRunIterator&,
const ScriptRunIterator&,
const FontRunIterator&,
RunIteratorQueue& runSegmenter,
const Feature*, size_t featuresSize,
SkScalar width,
RunHandler*) const override;
};
class ShapeThenWrap : public ShaperHarfBuzz {
public:
using ShaperHarfBuzz::ShaperHarfBuzz;
private:
void wrap(char const * const utf8, size_t utf8Bytes,
const BiDiRunIterator&,
const LanguageRunIterator&,
const ScriptRunIterator&,
const FontRunIterator&,
RunIteratorQueue& runSegmenter,
const Feature*, size_t featuresSize,
SkScalar width,
RunHandler*) const override;
};
class ShapeDontWrapOrReorder : public ShaperHarfBuzz {
public:
using ShaperHarfBuzz::ShaperHarfBuzz;
private:
void wrap(char const * const utf8, size_t utf8Bytes,
const BiDiRunIterator&,
const LanguageRunIterator&,
const ScriptRunIterator&,
const FontRunIterator&,
RunIteratorQueue& runSegmenter,
const Feature*, size_t featuresSize,
SkScalar width,
RunHandler*) const override;
};
static std::unique_ptr<SkShaper> MakeHarfBuzz(sk_sp<SkFontMgr> fontmgr, bool correct) {
HBBuffer buffer(hb_buffer_create());
if (!buffer) {
SkDEBUGF("Could not create hb_buffer");
return nullptr;
}
auto unicode = SkUnicode::Make();
if (!unicode) {
return nullptr;
}
auto lineIter = unicode->makeBreakIterator("th", SkUnicode::BreakType::kLines);
if (!lineIter) {
return nullptr;
}
auto graphIter = unicode->makeBreakIterator("th", SkUnicode::BreakType::kGraphemes);
if (!graphIter) {
return nullptr;
}
if (correct) {
return std::make_unique<ShaperDrivenWrapper>(std::move(unicode),
std::move(lineIter), std::move(graphIter), std::move(buffer), std::move(fontmgr));
} else {
return std::make_unique<ShapeThenWrap>(std::move(unicode),
std::move(lineIter), std::move(graphIter), std::move(buffer), std::move(fontmgr));
}
}
ShaperHarfBuzz::ShaperHarfBuzz(std::unique_ptr<SkUnicode> unicode,
SkUnicodeBreak lineIter, SkUnicodeBreak graphIter, HBBuffer buffer, sk_sp<SkFontMgr> fontmgr)
: fUnicode(std::move(unicode))
, fLineBreakIterator(std::move(lineIter))
, fGraphemeBreakIterator(std::move(graphIter))
, fFontMgr(std::move(fontmgr))
, fBuffer(std::move(buffer))
, fUndefinedLanguage(hb_language_from_string("und", -1))
{ }
void ShaperHarfBuzz::shape(const char* utf8, size_t utf8Bytes,
const SkFont& srcFont,
bool leftToRight,
SkScalar width,
RunHandler* handler) const
{
SkBidiIterator::Level defaultLevel = leftToRight ? SkBidiIterator::kLTR : SkBidiIterator::kRTL;
std::unique_ptr<BiDiRunIterator> bidi(MakeSkUnicodeBidiRunIterator(fUnicode.get(),
utf8,
utf8Bytes,
defaultLevel));
if (!bidi) {
return;
}
std::unique_ptr<LanguageRunIterator> language(MakeStdLanguageRunIterator(utf8, utf8Bytes));
if (!language) {
return;
}
std::unique_ptr<ScriptRunIterator> script(MakeHbIcuScriptRunIterator(utf8, utf8Bytes));
if (!script) {
return;
}
std::unique_ptr<FontRunIterator> font(
MakeFontMgrRunIterator(utf8, utf8Bytes, srcFont,
fFontMgr ? fFontMgr : SkFontMgr::RefDefault()));
if (!font) {
return;
}
this->shape(utf8, utf8Bytes, *font, *bidi, *script, *language, width, handler);
}
void ShaperHarfBuzz::shape(const char* utf8, size_t utf8Bytes,
FontRunIterator& font,
BiDiRunIterator& bidi,
ScriptRunIterator& script,
LanguageRunIterator& language,
SkScalar width,
RunHandler* handler) const
{
this->shape(utf8, utf8Bytes, font, bidi, script, language, nullptr, 0, width, handler);
}
void ShaperHarfBuzz::shape(const char* utf8, size_t utf8Bytes,
FontRunIterator& font,
BiDiRunIterator& bidi,
ScriptRunIterator& script,
LanguageRunIterator& language,
const Feature* features, size_t featuresSize,
SkScalar width,
RunHandler* handler) const
{
SkASSERT(handler);
RunIteratorQueue runSegmenter;
runSegmenter.insert(&font, 3); // The font iterator is always run last in case of tie.
runSegmenter.insert(&bidi, 2);
runSegmenter.insert(&script, 1);
runSegmenter.insert(&language, 0);
this->wrap(utf8, utf8Bytes, bidi, language, script, font, runSegmenter,
features, featuresSize, width, handler);
}
void ShaperDrivenWrapper::wrap(char const * const utf8, size_t utf8Bytes,
const BiDiRunIterator& bidi,
const LanguageRunIterator& language,
const ScriptRunIterator& script,
const FontRunIterator& font,
RunIteratorQueue& runSegmenter,
const Feature* features, size_t featuresSize,
SkScalar width,
RunHandler* handler) const
{
ShapedLine line;
const char* utf8Start = nullptr;
const char* utf8End = utf8;
while (runSegmenter.advanceRuns()) { // For each item
utf8Start = utf8End;
utf8End = utf8 + runSegmenter.endOfCurrentRun();
ShapedRun model(RunHandler::Range(), SkFont(), 0, nullptr, 0);
bool modelNeedsRegenerated = true;
int modelGlyphOffset = 0;
struct TextProps {
int glyphLen = 0;
SkVector advance = {0, 0};
};
// map from character position to [safe to break, glyph position, advance]
std::unique_ptr<TextProps[]> modelText;
int modelTextOffset = 0;
SkVector modelAdvanceOffset = {0, 0};
while (utf8Start < utf8End) { // While there are still code points left in this item
size_t utf8runLength = utf8End - utf8Start;
if (modelNeedsRegenerated) {
model = shape(utf8, utf8Bytes,
utf8Start, utf8End,
bidi, language, script, font,
features, featuresSize);
modelGlyphOffset = 0;
SkVector advance = {0, 0};
modelText = std::make_unique<TextProps[]>(utf8runLength + 1);
size_t modelStartCluster = utf8Start - utf8;
for (size_t i = 0; i < model.fNumGlyphs; ++i) {
SkASSERT(modelStartCluster <= model.fGlyphs[i].fCluster);
SkASSERT( model.fGlyphs[i].fCluster < (size_t)(utf8End - utf8));
if (!model.fGlyphs[i].fUnsafeToBreak) {
modelText[model.fGlyphs[i].fCluster - modelStartCluster].glyphLen = i;
modelText[model.fGlyphs[i].fCluster - modelStartCluster].advance = advance;
}
advance += model.fGlyphs[i].fAdvance;
}
// Assume it is always safe to break after the end of an item
modelText[utf8runLength].glyphLen = model.fNumGlyphs;
modelText[utf8runLength].advance = model.fAdvance;
modelTextOffset = 0;
modelAdvanceOffset = {0, 0};
modelNeedsRegenerated = false;
}
// TODO: break iterator per item, but just reset position if needed?
// Maybe break iterator with model?
if (!fLineBreakIterator->setText(utf8Start, utf8runLength)) {
return;
}
SkBreakIterator& breakIterator = *fLineBreakIterator;
ShapedRun best(RunHandler::Range(), SkFont(), 0, nullptr, 0,
{ SK_ScalarNegativeInfinity, SK_ScalarNegativeInfinity });
bool bestIsInvalid = true;
bool bestUsesModelForGlyphs = false;
SkScalar widthLeft = width - line.fAdvance.fX;
for (int32_t breakIteratorCurrent = breakIterator.next();
!breakIterator.isDone();
breakIteratorCurrent = breakIterator.next())
{
// TODO: if past a safe to break, future safe to break will be at least as long
// TODO: adjust breakIteratorCurrent by ignorable whitespace
bool candidateUsesModelForGlyphs = false;
ShapedRun candidate = [&](const TextProps& props){
if (props.glyphLen) {
candidateUsesModelForGlyphs = true;
return ShapedRun(RunHandler::Range(utf8Start - utf8, breakIteratorCurrent),
font.currentFont(), bidi.currentLevel(),
std::unique_ptr<ShapedGlyph[]>(),
props.glyphLen - modelGlyphOffset,
props.advance - modelAdvanceOffset);
} else {
return shape(utf8, utf8Bytes,
utf8Start, utf8Start + breakIteratorCurrent,
bidi, language, script, font,
features, featuresSize);
}
}(modelText[breakIteratorCurrent + modelTextOffset]);
auto score = [widthLeft](const ShapedRun& run) -> SkScalar {
if (run.fAdvance.fX < widthLeft) {
return run.fUtf8Range.size();
} else {
return widthLeft - run.fAdvance.fX;
}
};
if (bestIsInvalid || score(best) < score(candidate)) {
best = std::move(candidate);
bestIsInvalid = false;
bestUsesModelForGlyphs = candidateUsesModelForGlyphs;
}
}
// If nothing fit (best score is negative) and the line is not empty
if (width < line.fAdvance.fX + best.fAdvance.fX && !line.runs.empty()) {
emit(line, handler);
line.runs.reset();
line.fAdvance = {0, 0};
} else {
if (bestUsesModelForGlyphs) {
best.fGlyphs = std::make_unique<ShapedGlyph[]>(best.fNumGlyphs);
memcpy(best.fGlyphs.get(), model.fGlyphs.get() + modelGlyphOffset,
best.fNumGlyphs * sizeof(ShapedGlyph));
modelGlyphOffset += best.fNumGlyphs;
modelTextOffset += best.fUtf8Range.size();
modelAdvanceOffset += best.fAdvance;
} else {
modelNeedsRegenerated = true;
}
utf8Start += best.fUtf8Range.size();
line.fAdvance += best.fAdvance;
line.runs.emplace_back(std::move(best));
// If item broken, emit line (prevent remainder from accidentally fitting)
if (utf8Start != utf8End) {
emit(line, handler);
line.runs.reset();
line.fAdvance = {0, 0};
}
}
}
}
emit(line, handler);
}
void ShapeThenWrap::wrap(char const * const utf8, size_t utf8Bytes,
const BiDiRunIterator& bidi,
const LanguageRunIterator& language,
const ScriptRunIterator& script,
const FontRunIterator& font,
RunIteratorQueue& runSegmenter,
const Feature* features, size_t featuresSize,
SkScalar width,
RunHandler* handler) const
{
SkTArray<ShapedRun> runs;
{
if (!fLineBreakIterator->setText(utf8, utf8Bytes)) {
return;
}
if (!fGraphemeBreakIterator->setText(utf8, utf8Bytes)) {
return;
}
SkBreakIterator& lineBreakIterator = *fLineBreakIterator;
SkBreakIterator& graphemeBreakIterator = *fGraphemeBreakIterator;
const char* utf8Start = nullptr;
const char* utf8End = utf8;
while (runSegmenter.advanceRuns()) {
utf8Start = utf8End;
utf8End = utf8 + runSegmenter.endOfCurrentRun();
runs.emplace_back(shape(utf8, utf8Bytes,
utf8Start, utf8End,
bidi, language, script, font,
features, featuresSize));
ShapedRun& run = runs.back();
uint32_t previousCluster = 0xFFFFFFFF;
for (size_t i = 0; i < run.fNumGlyphs; ++i) {
ShapedGlyph& glyph = run.fGlyphs[i];
int32_t glyphCluster = glyph.fCluster;
int32_t lineBreakIteratorCurrent = lineBreakIterator.current();
while (!lineBreakIterator.isDone() && lineBreakIteratorCurrent < glyphCluster)
{
lineBreakIteratorCurrent = lineBreakIterator.next();
}
glyph.fMayLineBreakBefore = glyph.fCluster != previousCluster &&
lineBreakIteratorCurrent == glyphCluster;
int32_t graphemeBreakIteratorCurrent = graphemeBreakIterator.current();
while (!graphemeBreakIterator.isDone() && graphemeBreakIteratorCurrent < glyphCluster)
{
graphemeBreakIteratorCurrent = graphemeBreakIterator.next();
}
glyph.fGraphemeBreakBefore = glyph.fCluster != previousCluster &&
graphemeBreakIteratorCurrent == glyphCluster;
previousCluster = glyph.fCluster;
}
}
}
// Iterate over the glyphs in logical order to find potential line lengths.
{
/** The position of the beginning of the line. */
ShapedRunGlyphIterator beginning(runs);
/** The position of the candidate line break. */
ShapedRunGlyphIterator candidateLineBreak(runs);
SkScalar candidateLineBreakWidth = 0;
/** The position of the candidate grapheme break. */
ShapedRunGlyphIterator candidateGraphemeBreak(runs);
SkScalar candidateGraphemeBreakWidth = 0;
/** The position of the current location. */
ShapedRunGlyphIterator current(runs);
SkScalar currentWidth = 0;
while (ShapedGlyph* glyph = current.current()) {
// 'Break' at graphemes until a line boundary, then only at line boundaries.
// Only break at graphemes if no line boundary is valid.
if (current != beginning) {
if (glyph->fGraphemeBreakBefore || glyph->fMayLineBreakBefore) {
// TODO: preserve line breaks <= grapheme breaks
// and prevent line breaks inside graphemes
candidateGraphemeBreak = current;
candidateGraphemeBreakWidth = currentWidth;
if (glyph->fMayLineBreakBefore) {
candidateLineBreak = current;
candidateLineBreakWidth = currentWidth;
}
}
}
SkScalar glyphWidth = glyph->fAdvance.fX;
// Break when overwidth, the glyph has a visual representation, and some space is used.
if (width < currentWidth + glyphWidth && glyph->fHasVisual && candidateGraphemeBreakWidth > 0){
if (candidateLineBreak != beginning) {
beginning = candidateLineBreak;
currentWidth -= candidateLineBreakWidth;
candidateGraphemeBreakWidth -= candidateLineBreakWidth;
candidateLineBreakWidth = 0;
} else if (candidateGraphemeBreak != beginning) {
beginning = candidateGraphemeBreak;
candidateLineBreak = beginning;
currentWidth -= candidateGraphemeBreakWidth;
candidateGraphemeBreakWidth = 0;
candidateLineBreakWidth = 0;
} else {
SK_ABORT("");
}
if (width < currentWidth) {
if (width < candidateGraphemeBreakWidth) {
candidateGraphemeBreak = candidateLineBreak;
candidateGraphemeBreakWidth = candidateLineBreakWidth;
}
current = candidateGraphemeBreak;
currentWidth = candidateGraphemeBreakWidth;
}
glyph = beginning.current();
if (glyph) {
glyph->fMustLineBreakBefore = true;
}
} else {
current.next();
currentWidth += glyphWidth;
}
}
}
// Reorder the runs and glyphs per line and write them out.
{
ShapedRunGlyphIterator previousBreak(runs);
ShapedRunGlyphIterator glyphIterator(runs);
int previousRunIndex = -1;
while (glyphIterator.current()) {
const ShapedRunGlyphIterator current = glyphIterator;
ShapedGlyph* nextGlyph = glyphIterator.next();
if (previousRunIndex != current.fRunIndex) {
SkFontMetrics metrics;
runs[current.fRunIndex].fFont.getMetrics(&metrics);
previousRunIndex = current.fRunIndex;
}
// Nothing can be written until the baseline is known.
if (!(nextGlyph == nullptr || nextGlyph->fMustLineBreakBefore)) {
continue;
}
int numRuns = current.fRunIndex - previousBreak.fRunIndex + 1;
SkAutoSTMalloc<4, SkBidiIterator::Level> runLevels(numRuns);
for (int i = 0; i < numRuns; ++i) {
runLevels[i] = runs[previousBreak.fRunIndex + i].fLevel;
}
SkAutoSTMalloc<4, int32_t> logicalFromVisual(numRuns);
SkBidiIterator::ReorderVisual(runLevels, numRuns, logicalFromVisual);
// step through the runs in reverse visual order and the glyphs in reverse logical order
// until a visible glyph is found and force them to the end of the visual line.
handler->beginLine();
struct SubRun { const ShapedRun& run; size_t startGlyphIndex; size_t endGlyphIndex; };
auto makeSubRun = [&runs, &previousBreak, ¤t, &logicalFromVisual](size_t visualIndex){
int logicalIndex = previousBreak.fRunIndex + logicalFromVisual[visualIndex];
const auto& run = runs[logicalIndex];
size_t startGlyphIndex = (logicalIndex == previousBreak.fRunIndex)
? previousBreak.fGlyphIndex
: 0;
size_t endGlyphIndex = (logicalIndex == current.fRunIndex)
? current.fGlyphIndex + 1
: run.fNumGlyphs;
return SubRun{ run, startGlyphIndex, endGlyphIndex };
};
auto makeRunInfo = [](const SubRun& sub) {
uint32_t startUtf8 = sub.run.fGlyphs[sub.startGlyphIndex].fCluster;
uint32_t endUtf8 = (sub.endGlyphIndex < sub.run.fNumGlyphs)
? sub.run.fGlyphs[sub.endGlyphIndex].fCluster
: sub.run.fUtf8Range.end();
SkVector advance = SkVector::Make(0, 0);
for (size_t i = sub.startGlyphIndex; i < sub.endGlyphIndex; ++i) {
advance += sub.run.fGlyphs[i].fAdvance;
}
return RunHandler::RunInfo{
sub.run.fFont,
sub.run.fLevel,
advance,
sub.endGlyphIndex - sub.startGlyphIndex,
RunHandler::Range(startUtf8, endUtf8 - startUtf8)
};
};
for (int i = 0; i < numRuns; ++i) {
handler->runInfo(makeRunInfo(makeSubRun(i)));
}
handler->commitRunInfo();
for (int i = 0; i < numRuns; ++i) {
SubRun sub = makeSubRun(i);
append(handler, makeRunInfo(sub), sub.run, sub.startGlyphIndex, sub.endGlyphIndex);
}
handler->commitLine();
previousRunIndex = -1;
previousBreak = glyphIterator;
}
}
}
void ShapeDontWrapOrReorder::wrap(char const * const utf8, size_t utf8Bytes,
const BiDiRunIterator& bidi,
const LanguageRunIterator& language,
const ScriptRunIterator& script,
const FontRunIterator& font,
RunIteratorQueue& runSegmenter,
const Feature* features, size_t featuresSize,
SkScalar width,
RunHandler* handler) const
{
sk_ignore_unused_variable(width);
SkTArray<ShapedRun> runs;
const char* utf8Start = nullptr;
const char* utf8End = utf8;
while (runSegmenter.advanceRuns()) {
utf8Start = utf8End;
utf8End = utf8 + runSegmenter.endOfCurrentRun();
runs.emplace_back(shape(utf8, utf8Bytes,
utf8Start, utf8End,
bidi, language, script, font,
features, featuresSize));
}
handler->beginLine();
for (const auto& run : runs) {
const RunHandler::RunInfo info = {
run.fFont,
run.fLevel,
run.fAdvance,
run.fNumGlyphs,
run.fUtf8Range
};
handler->runInfo(info);
}
handler->commitRunInfo();
for (const auto& run : runs) {
const RunHandler::RunInfo info = {
run.fFont,
run.fLevel,
run.fAdvance,
run.fNumGlyphs,
run.fUtf8Range
};
append(handler, info, run, 0, run.fNumGlyphs);
}
handler->commitLine();
}
ShapedRun ShaperHarfBuzz::shape(char const * const utf8,
size_t const utf8Bytes,
char const * const utf8Start,
char const * const utf8End,
const BiDiRunIterator& bidi,
const LanguageRunIterator& language,
const ScriptRunIterator& script,
const FontRunIterator& font,
Feature const * const features, size_t const featuresSize) const
{
size_t utf8runLength = utf8End - utf8Start;
ShapedRun run(RunHandler::Range(utf8Start - utf8, utf8runLength),
font.currentFont(), bidi.currentLevel(), nullptr, 0);
hb_buffer_t* buffer = fBuffer.get();
SkAutoTCallVProc<hb_buffer_t, hb_buffer_clear_contents> autoClearBuffer(buffer);
hb_buffer_set_content_type(buffer, HB_BUFFER_CONTENT_TYPE_UNICODE);
hb_buffer_set_cluster_level(buffer, HB_BUFFER_CLUSTER_LEVEL_MONOTONE_CHARACTERS);
// Documentation for HB_BUFFER_FLAG_BOT/EOT at 763e5466c0a03a7c27020e1e2598e488612529a7.
// Currently BOT forces a dotted circle when first codepoint is a mark; EOT has no effect.
// Avoid adding dotted circle, re-evaluate if BOT/EOT change. See https://skbug.com/9618.
// hb_buffer_set_flags(buffer, HB_BUFFER_FLAG_BOT | HB_BUFFER_FLAG_EOT);
// Add precontext.
hb_buffer_add_utf8(buffer, utf8, utf8Start - utf8, utf8Start - utf8, 0);
// Populate the hb_buffer directly with utf8 cluster indexes.
const char* utf8Current = utf8Start;
while (utf8Current < utf8End) {
unsigned int cluster = utf8Current - utf8;
hb_codepoint_t u = utf8_next(&utf8Current, utf8End);
hb_buffer_add(buffer, u, cluster);
}
// Add postcontext.
hb_buffer_add_utf8(buffer, utf8Current, utf8 + utf8Bytes - utf8Current, 0, 0);
hb_direction_t direction = is_LTR(bidi.currentLevel()) ? HB_DIRECTION_LTR:HB_DIRECTION_RTL;
hb_buffer_set_direction(buffer, direction);
hb_buffer_set_script(buffer, hb_script_from_iso15924_tag((hb_tag_t)script.currentScript()));
// Buffers with HB_LANGUAGE_INVALID race since hb_language_get_default is not thread safe.
// The user must provide a language, but may provide data hb_language_from_string cannot use.
// Use "und" for the undefined language in this case (RFC5646 4.1 5).
hb_language_t hbLanguage = hb_language_from_string(language.currentLanguage(), -1);
if (hbLanguage == HB_LANGUAGE_INVALID) {
hbLanguage = fUndefinedLanguage;
}
hb_buffer_set_language(buffer, hbLanguage);
hb_buffer_guess_segment_properties(buffer);
// TODO: better cache HBFace (data) / hbfont (typeface)
// An HBFace is expensive (it sanitizes the bits).
// An HBFont is fairly inexpensive.
// An HBFace is actually tied to the data, not the typeface.
// The size of 100 here is completely arbitrary and used to match libtxt.
static SkLRUCache<SkFontID, HBFace> gHBFaceCache(100);
static SkMutex gHBFaceCacheMutex;
HBFont hbFont;
{
SkAutoMutexExclusive lock(gHBFaceCacheMutex);
SkFontID dataId = font.currentFont().getTypeface()->uniqueID();
HBFace* hbFaceCached = gHBFaceCache.find(dataId);
if (!hbFaceCached) {
HBFace hbFace(create_hb_face(*font.currentFont().getTypeface()));
hbFaceCached = gHBFaceCache.insert(dataId, std::move(hbFace));
}
hbFont = create_hb_font(font.currentFont(), *hbFaceCached);
}
if (!hbFont) {
return run;
}
SkSTArray<32, hb_feature_t> hbFeatures;
for (const auto& feature : SkMakeSpan(features, featuresSize)) {
if (feature.end < SkTo<size_t>(utf8Start - utf8) ||
SkTo<size_t>(utf8End - utf8) <= feature.start)
{
continue;
}
if (feature.start <= SkTo<size_t>(utf8Start - utf8) &&
SkTo<size_t>(utf8End - utf8) <= feature.end)
{
hbFeatures.push_back({ (hb_tag_t)feature.tag, feature.value,
HB_FEATURE_GLOBAL_START, HB_FEATURE_GLOBAL_END});
} else {
hbFeatures.push_back({ (hb_tag_t)feature.tag, feature.value,
SkTo<unsigned>(feature.start), SkTo<unsigned>(feature.end)});
}
}
hb_shape(hbFont.get(), buffer, hbFeatures.data(), hbFeatures.size());
unsigned len = hb_buffer_get_length(buffer);
if (len == 0) {
return run;
}
if (direction == HB_DIRECTION_RTL) {
// Put the clusters back in logical order.
// Note that the advances remain ltr.
hb_buffer_reverse(buffer);
}
hb_glyph_info_t* info = hb_buffer_get_glyph_infos(buffer, nullptr);
hb_glyph_position_t* pos = hb_buffer_get_glyph_positions(buffer, nullptr);
run = ShapedRun(RunHandler::Range(utf8Start - utf8, utf8runLength),
font.currentFont(), bidi.currentLevel(),
std::unique_ptr<ShapedGlyph[]>(new ShapedGlyph[len]), len);
int scaleX, scaleY;
hb_font_get_scale(hbFont.get(), &scaleX, &scaleY);
double textSizeY = run.fFont.getSize() / scaleY;
double textSizeX = run.fFont.getSize() / scaleX * run.fFont.getScaleX();
SkVector runAdvance = { 0, 0 };
for (unsigned i = 0; i < len; i++) {
ShapedGlyph& glyph = run.fGlyphs[i];
glyph.fID = info[i].codepoint;
glyph.fCluster = info[i].cluster;
glyph.fOffset.fX = pos[i].x_offset * textSizeX;
glyph.fOffset.fY = -(pos[i].y_offset * textSizeY); // HarfBuzz y-up, Skia y-down
glyph.fAdvance.fX = pos[i].x_advance * textSizeX;
glyph.fAdvance.fY = -(pos[i].y_advance * textSizeY); // HarfBuzz y-up, Skia y-down
SkRect bounds;
SkScalar advance;
SkPaint p;
run.fFont.getWidthsBounds(&glyph.fID, 1, &advance, &bounds, &p);
glyph.fHasVisual = !bounds.isEmpty(); //!font->currentTypeface()->glyphBoundsAreZero(glyph.fID);
#if SK_HB_VERSION_CHECK(1, 5, 0)
glyph.fUnsafeToBreak = info[i].mask & HB_GLYPH_FLAG_UNSAFE_TO_BREAK;
#else
glyph.fUnsafeToBreak = false;
#endif
glyph.fMustLineBreakBefore = false;
runAdvance += glyph.fAdvance;
}
run.fAdvance = runAdvance;
return run;
}
} // namespace
std::unique_ptr<SkShaper::BiDiRunIterator>
SkShaper::MakeIcuBiDiRunIterator(const char* utf8, size_t utf8Bytes, uint8_t bidiLevel) {
auto unicode = SkUnicode::Make();
std::unique_ptr<SkShaper::BiDiRunIterator> bidi =
SkShaper::MakeSkUnicodeBidiRunIterator(unicode.get(),
utf8,
utf8Bytes,
bidiLevel);
return bidi;
}
std::unique_ptr<SkShaper::BiDiRunIterator>
SkShaper::MakeSkUnicodeBidiRunIterator(SkUnicode* unicode, const char* utf8, size_t utf8Bytes, uint8_t bidiLevel) {
// ubidi only accepts utf16 (though internally it basically works on utf32 chars).
// We want an ubidi_setPara(UBiDi*, UText*, UBiDiLevel, UBiDiLevel*, UErrorCode*);
if (!SkTFitsIn<int32_t>(utf8Bytes)) {
SkDEBUGF("Bidi error: text too long");
return nullptr;
}
int32_t utf16Units = SkUTF::UTF8ToUTF16(nullptr, 0, utf8, utf8Bytes);
if (utf16Units < 0) {
SkDEBUGF("Invalid utf8 input\n");
return nullptr;
}
std::unique_ptr<uint16_t[]> utf16(new uint16_t[utf16Units]);
(void)SkUTF::UTF8ToUTF16(utf16.get(), utf16Units, utf8, utf8Bytes);
auto bidiDir = (bidiLevel % 2 == 0) ? SkBidiIterator::kLTR : SkBidiIterator::kRTL;
SkUnicodeBidi bidi = unicode->makeBidiIterator(utf16.get(), utf16Units, bidiDir);
if (!bidi) {
SkDEBUGF("Bidi error\n");
return nullptr;
}
return std::make_unique<SkUnicodeBidiRunIterator>(utf8, utf8 + utf8Bytes, std::move(bidi));
}
std::unique_ptr<SkShaper::ScriptRunIterator>
SkShaper::MakeHbIcuScriptRunIterator(const char* utf8, size_t utf8Bytes) {
return std::make_unique<HbIcuScriptRunIterator>(utf8, utf8Bytes);
}
std::unique_ptr<SkShaper> SkShaper::MakeShaperDrivenWrapper(sk_sp<SkFontMgr> fontmgr) {
return MakeHarfBuzz(std::move(fontmgr), true);
}
std::unique_ptr<SkShaper> SkShaper::MakeShapeThenWrap(sk_sp<SkFontMgr> fontmgr) {
return MakeHarfBuzz(std::move(fontmgr), false);
}
std::unique_ptr<SkShaper> SkShaper::MakeShapeDontWrapOrReorder(sk_sp<SkFontMgr> fontmgr) {
HBBuffer buffer(hb_buffer_create());
if (!buffer) {
SkDEBUGF("Could not create hb_buffer");
return nullptr;
}
auto unicode = SkUnicode::Make();
if (!unicode) {
return nullptr;
}
return std::make_unique<ShapeDontWrapOrReorder>
(std::move(unicode), nullptr, nullptr, std::move(buffer), std::move(fontmgr));
}
|
/*
* Copyright 2009-2017 Alibaba Cloud All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <alibabacloud/mts/model/ReportTerrorismJobResultRequest.h>
using AlibabaCloud::Mts::Model::ReportTerrorismJobResultRequest;
ReportTerrorismJobResultRequest::ReportTerrorismJobResultRequest() :
RpcServiceRequest("mts", "2014-06-18", "ReportTerrorismJobResult")
{}
ReportTerrorismJobResultRequest::~ReportTerrorismJobResultRequest()
{}
std::string ReportTerrorismJobResultRequest::getJobId()const
{
return jobId_;
}
void ReportTerrorismJobResultRequest::setJobId(const std::string& jobId)
{
jobId_ = jobId;
setParameter("JobId", jobId);
}
long ReportTerrorismJobResultRequest::getResourceOwnerId()const
{
return resourceOwnerId_;
}
void ReportTerrorismJobResultRequest::setResourceOwnerId(long resourceOwnerId)
{
resourceOwnerId_ = resourceOwnerId;
setParameter("ResourceOwnerId", std::to_string(resourceOwnerId));
}
std::string ReportTerrorismJobResultRequest::getResourceOwnerAccount()const
{
return resourceOwnerAccount_;
}
void ReportTerrorismJobResultRequest::setResourceOwnerAccount(const std::string& resourceOwnerAccount)
{
resourceOwnerAccount_ = resourceOwnerAccount;
setParameter("ResourceOwnerAccount", resourceOwnerAccount);
}
std::string ReportTerrorismJobResultRequest::getOwnerAccount()const
{
return ownerAccount_;
}
void ReportTerrorismJobResultRequest::setOwnerAccount(const std::string& ownerAccount)
{
ownerAccount_ = ownerAccount;
setParameter("OwnerAccount", ownerAccount);
}
std::string ReportTerrorismJobResultRequest::getLabel()const
{
return label_;
}
void ReportTerrorismJobResultRequest::setLabel(const std::string& label)
{
label_ = label;
setParameter("Label", label);
}
std::string ReportTerrorismJobResultRequest::getDetail()const
{
return detail_;
}
void ReportTerrorismJobResultRequest::setDetail(const std::string& detail)
{
detail_ = detail;
setParameter("Detail", detail);
}
long ReportTerrorismJobResultRequest::getOwnerId()const
{
return ownerId_;
}
void ReportTerrorismJobResultRequest::setOwnerId(long ownerId)
{
ownerId_ = ownerId;
setParameter("OwnerId", std::to_string(ownerId));
}
std::string ReportTerrorismJobResultRequest::getAccessKeyId()const
{
return accessKeyId_;
}
void ReportTerrorismJobResultRequest::setAccessKeyId(const std::string& accessKeyId)
{
accessKeyId_ = accessKeyId;
setParameter("AccessKeyId", accessKeyId);
}
|
/*
Copyright (C) 2014 Jerome Revaud
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
*/
#include "mainwindow.h"
#include <QApplication>
#include <QMainWindow>
int main (int argc, char **argv)
{
QApplication a (argc, argv);
MainWindow w;
w.show ();
return a.exec ();
}
|
//=======================================================================
// Copyright (c) Aaron Windsor 2007
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//=======================================================================
#ifndef __FACE_HANDLES_HPP__
#define __FACE_HANDLES_HPP__
#include <list>
#include <boost/graph/graph_traits.hpp>
#include <boost/shared_ptr.hpp>
// A "face handle" is an optimization meant to serve two purposes in
// the implementation of the Boyer-Myrvold planarity test: (1) it holds
// the partial planar embedding of a particular vertex as it's being
// constructed, and (2) it allows for efficient traversal around the
// outer face of the partial embedding at that particular vertex. A face
// handle is lightweight, just a shared pointer to the actual implementation,
// since it is passed around/copied liberally in the algorithm. It consists
// of an "anchor" (the actual vertex it's associated with) as well as a
// sequence of edges. The functions first_vertex/second_vertex and
// first_edge/second_edge allow fast access to the beginning and end of the
// stored sequence, which allows one to traverse the outer face of the partial
// planar embedding as it's being created.
//
// There are some policies below that define the contents of the face handles:
// in the case no embedding is needed (for example, if one just wants to use
// the Boyer-Myrvold algorithm as a true/false test for planarity, the
// no_embedding class can be passed as the StoreEmbedding policy. Otherwise,
// either std_list (which uses as std::list) or recursive_lazy_list can be
// passed as this policy. recursive_lazy_list has the best theoretical
// performance (O(n) for a sequence of interleaved concatenations and reversals
// of the underlying list), but I've noticed little difference between std_list
// and recursive_lazy_list in my tests, even though using std_list changes
// the worst-case complexity of the planarity test to O(n^2)
//
// Another policy is StoreOldHandlesPolicy, which specifies whether or not
// to keep a record of the previous first/second vertex/edge - this is needed
// if a Kuratowski subgraph needs to be isolated.
namespace boost { namespace graph { namespace detail {
//face handle policies
//EmbeddingStorage policy
struct store_embedding {};
struct recursive_lazy_list : public store_embedding {};
struct std_list : public store_embedding {};
struct no_embedding {};
//StoreOldHandlesPolicy
struct store_old_handles {};
struct no_old_handles {};
template<typename DataType>
struct lazy_list_node
{
typedef shared_ptr< lazy_list_node<DataType> > ptr_t;
lazy_list_node(const DataType& data) :
m_reversed(false),
m_data(data),
m_has_data(true)
{}
lazy_list_node(ptr_t left_child, ptr_t right_child) :
m_reversed(false),
m_has_data(false),
m_left_child(left_child),
m_right_child(right_child)
{}
bool m_reversed;
DataType m_data;
bool m_has_data;
shared_ptr<lazy_list_node> m_left_child;
shared_ptr<lazy_list_node> m_right_child;
};
template <typename StoreOldHandlesPolicy, typename Vertex, typename Edge>
struct old_handles_storage;
template <typename Vertex, typename Edge>
struct old_handles_storage<store_old_handles, Vertex, Edge>
{
Vertex first_vertex;
Vertex second_vertex;
Edge first_edge;
Edge second_edge;
};
template <typename Vertex, typename Edge>
struct old_handles_storage<no_old_handles, Vertex, Edge>
{};
template <typename StoreEmbeddingPolicy, typename Edge>
struct edge_list_storage;
template <typename Edge>
struct edge_list_storage<no_embedding, Edge>
{
typedef void type;
void push_back(Edge) {}
void push_front(Edge) {}
void reverse() {}
void concat_front(edge_list_storage<no_embedding,Edge>) {}
void concat_back(edge_list_storage<no_embedding,Edge>) {}
template <typename OutputIterator>
void get_list(OutputIterator) {}
};
template <typename Edge>
struct edge_list_storage<recursive_lazy_list, Edge>
{
typedef lazy_list_node<Edge> node_type;
typedef shared_ptr< node_type > type;
type value;
void push_back(Edge e)
{
type new_node(new node_type(e));
value = type(new node_type(value, new_node));
}
void push_front(Edge e)
{
type new_node(new node_type(e));
value = type(new node_type(new_node, value));
}
void reverse()
{
value->m_reversed = !value->m_reversed;
}
void concat_front(edge_list_storage<recursive_lazy_list, Edge> other)
{
value = type(new node_type(other.value, value));
}
void concat_back(edge_list_storage<recursive_lazy_list, Edge> other)
{
value = type(new node_type(value, other.value));
}
template <typename OutputIterator>
void get_list(OutputIterator out)
{
get_list_helper(out, value);
}
private:
template <typename OutputIterator>
void get_list_helper(OutputIterator o_itr,
type root,
bool flipped = false
)
{
if (!root)
return;
if (root->m_has_data)
*o_itr = root->m_data;
if ((flipped && !root->m_reversed) ||
(!flipped && root->m_reversed)
)
{
get_list_helper(o_itr, root->m_right_child, true);
get_list_helper(o_itr, root->m_left_child, true);
}
else
{
get_list_helper(o_itr, root->m_left_child, false);
get_list_helper(o_itr, root->m_right_child, false);
}
}
};
template <typename Edge>
struct edge_list_storage<std_list, Edge>
{
typedef std::list<Edge> type;
type value;
void push_back(Edge e)
{
value.push_back(e);
}
void push_front(Edge e)
{
value.push_front(e);
}
void reverse()
{
value.reverse();
}
void concat_front(edge_list_storage<std_list,Edge> other)
{
value.splice(value.begin(), other.value);
}
void concat_back(edge_list_storage<std_list, Edge> other)
{
value.splice(value.end(), other.value);
}
template <typename OutputIterator>
void get_list(OutputIterator out)
{
std::copy(value.begin(), value.end(), out);
}
};
template<typename Graph,
typename StoreOldHandlesPolicy,
typename StoreEmbeddingPolicy
>
struct face_handle_impl
{
typedef typename graph_traits<Graph>::vertex_descriptor vertex_t;
typedef typename graph_traits<Graph>::edge_descriptor edge_t;
typedef typename edge_list_storage<StoreEmbeddingPolicy, edge_t>::type
edge_list_storage_t;
face_handle_impl() :
cached_first_vertex(graph_traits<Graph>::null_vertex()),
cached_second_vertex(graph_traits<Graph>::null_vertex()),
true_first_vertex(graph_traits<Graph>::null_vertex()),
true_second_vertex(graph_traits<Graph>::null_vertex()),
anchor(graph_traits<Graph>::null_vertex())
{
initialize_old_vertices_dispatch(StoreOldHandlesPolicy());
}
void initialize_old_vertices_dispatch(store_old_handles)
{
old_handles.first_vertex = graph_traits<Graph>::null_vertex();
old_handles.second_vertex = graph_traits<Graph>::null_vertex();
}
void initialize_old_vertices_dispatch(no_old_handles) {}
vertex_t cached_first_vertex;
vertex_t cached_second_vertex;
vertex_t true_first_vertex;
vertex_t true_second_vertex;
vertex_t anchor;
edge_t cached_first_edge;
edge_t cached_second_edge;
edge_list_storage<StoreEmbeddingPolicy, edge_t> edge_list;
old_handles_storage<StoreOldHandlesPolicy, vertex_t, edge_t> old_handles;
};
template <typename Graph,
typename StoreOldHandlesPolicy = store_old_handles,
typename StoreEmbeddingPolicy = recursive_lazy_list
>
class face_handle
{
public:
typedef typename graph_traits<Graph>::vertex_descriptor vertex_t;
typedef typename graph_traits<Graph>::edge_descriptor edge_t;
typedef face_handle_impl
<Graph, StoreOldHandlesPolicy, StoreEmbeddingPolicy> impl_t;
typedef face_handle
<Graph, StoreOldHandlesPolicy, StoreEmbeddingPolicy> self_t;
face_handle(vertex_t anchor = graph_traits<Graph>::null_vertex()) :
pimpl(new impl_t())
{
pimpl->anchor = anchor;
}
face_handle(vertex_t anchor, edge_t initial_edge, const Graph& g) :
pimpl(new impl_t())
{
vertex_t s(source(initial_edge,g));
vertex_t t(target(initial_edge,g));
vertex_t other_vertex = s == anchor ? t : s;
pimpl->anchor = anchor;
pimpl->cached_first_edge = initial_edge;
pimpl->cached_second_edge = initial_edge;
pimpl->cached_first_vertex = other_vertex;
pimpl->cached_second_vertex = other_vertex;
pimpl->true_first_vertex = other_vertex;
pimpl->true_second_vertex = other_vertex;
pimpl->edge_list.push_back(initial_edge);
store_old_face_handles_dispatch(StoreOldHandlesPolicy());
}
//default copy construction, assignment okay.
void push_first(edge_t e, const Graph& g)
{
pimpl->edge_list.push_front(e);
pimpl->cached_first_vertex = pimpl->true_first_vertex =
source(e, g) == pimpl->anchor ? target(e,g) : source(e,g);
pimpl->cached_first_edge = e;
}
void push_second(edge_t e, const Graph& g)
{
pimpl->edge_list.push_back(e);
pimpl->cached_second_vertex = pimpl->true_second_vertex =
source(e, g) == pimpl->anchor ? target(e,g) : source(e,g);
pimpl->cached_second_edge = e;
}
inline void store_old_face_handles()
{
store_old_face_handles_dispatch(StoreOldHandlesPolicy());
}
inline vertex_t first_vertex() const
{
return pimpl->cached_first_vertex;
}
inline vertex_t second_vertex() const
{
return pimpl->cached_second_vertex;
}
inline vertex_t true_first_vertex() const
{
return pimpl->true_first_vertex;
}
inline vertex_t true_second_vertex() const
{
return pimpl->true_second_vertex;
}
inline vertex_t old_first_vertex() const
{
return pimpl->old_handles.first_vertex;
}
inline vertex_t old_second_vertex() const
{
return pimpl->old_handles.second_vertex;
}
inline edge_t old_first_edge() const
{
return pimpl->old_handles.first_edge;
}
inline edge_t old_second_edge() const
{
return pimpl->old_handles.second_edge;
}
inline edge_t first_edge() const
{
return pimpl->cached_first_edge;
}
inline edge_t second_edge() const
{
return pimpl->cached_second_edge;
}
inline vertex_t get_anchor() const
{
return pimpl->anchor;
}
void glue_first_to_second
(face_handle<Graph,StoreOldHandlesPolicy,StoreEmbeddingPolicy>& bottom)
{
pimpl->edge_list.concat_front(bottom.pimpl->edge_list);
pimpl->true_first_vertex = bottom.pimpl->true_first_vertex;
pimpl->cached_first_vertex = bottom.pimpl->cached_first_vertex;
pimpl->cached_first_edge = bottom.pimpl->cached_first_edge;
}
void glue_second_to_first
(face_handle<Graph,StoreOldHandlesPolicy,StoreEmbeddingPolicy>& bottom)
{
pimpl->edge_list.concat_back(bottom.pimpl->edge_list);
pimpl->true_second_vertex = bottom.pimpl->true_second_vertex;
pimpl->cached_second_vertex = bottom.pimpl->cached_second_vertex;
pimpl->cached_second_edge = bottom.pimpl->cached_second_edge;
}
void flip()
{
pimpl->edge_list.reverse();
std::swap(pimpl->true_first_vertex, pimpl->true_second_vertex);
std::swap(pimpl->cached_first_vertex, pimpl->cached_second_vertex);
std::swap(pimpl->cached_first_edge, pimpl->cached_second_edge);
}
template <typename OutputIterator>
void get_list(OutputIterator o_itr)
{
pimpl->edge_list.get_list(o_itr);
}
void reset_vertex_cache()
{
pimpl->cached_first_vertex = pimpl->true_first_vertex;
pimpl->cached_second_vertex = pimpl->true_second_vertex;
}
inline void set_first_vertex(vertex_t v)
{
pimpl->cached_first_vertex = v;
}
inline void set_second_vertex(vertex_t v)
{
pimpl->cached_second_vertex = v;
}
private:
void store_old_face_handles_dispatch(store_old_handles)
{
pimpl->old_handles.first_vertex = pimpl->true_first_vertex;
pimpl->old_handles.second_vertex = pimpl->true_second_vertex;
pimpl->old_handles.first_edge = pimpl->cached_first_edge;
pimpl->old_handles.second_edge = pimpl->cached_second_edge;
}
void store_old_face_handles_dispatch(no_old_handles) {}
boost::shared_ptr<impl_t> pimpl;
};
} /* namespace detail */ } /* namespace graph */ } /* namespace boost */
#endif //__FACE_HANDLES_HPP__
|
#include <fstream>
#include <numeric> // std::accumulate
//![include_ranges_chunk]
#include <range/v3/view/chunk.hpp>
//![include_ranges_chunk]
#include <seqan3/core/char_operations/predicate.hpp>
//![include]
#include <seqan3/io/sequence_file/all.hpp>
//![include]
//![include_debug_stream]
#include <seqan3/core/debug_stream.hpp>
//![include_debug_stream]
#include <seqan3/range/detail/misc.hpp>
//![include_ranges]
#include <seqan3/std/ranges>
//![include_ranges]
struct write_file_dummy_struct
{
std::filesystem::path const tmp_path = std::filesystem::temp_directory_path();
write_file_dummy_struct()
{
auto file_raw = R"////![fastq_file]";
std::ofstream file{tmp_path/"my.fastq"};
std::string str{file_raw};
file << str.substr(1); // skip first newline
std::ofstream file2{tmp_path/"my.qq"};
file2 << str.substr(1); // skip first newline
std::ofstream file3{tmp_path/"my.fasta"};
file3 << ">seq1\nAVAV\n>seq2\nAVAVA\n";
}
~write_file_dummy_struct()
{
std::error_code ec{};
std::filesystem::path file_path{};
file_path = tmp_path/"my.fastq";
std::filesystem::remove(file_path, ec);
if (ec)
seqan3::debug_stream << "[WARNING] Could not delete " << file_path << ". " << ec.message() << '\n';
file_path = tmp_path/"my.qq";
std::filesystem::remove(file_path, ec);
if (ec)
seqan3::debug_stream << "[WARNING] Could not delete " << file_path << ". " << ec.message() << '\n';
file_path = tmp_path/"my.fasta";
std::filesystem::remove(file_path, ec);
if (ec)
seqan3::debug_stream << "[WARNING] Could not delete " << file_path << ". " << ec.message() << '\n';
}
};
write_file_dummy_struct go{}; // write file
int main()
{
{
//![file_extensions]
seqan3::debug_stream << seqan3::format_fastq::file_extensions << '\n'; // prints [fastq,fq]
//![file_extensions]
//![modify_file_extensions]
seqan3::format_fastq::file_extensions.push_back("qq");
seqan3::sequence_file_input fin{std::filesystem::temp_directory_path()/"my.qq"}; // detects FASTQ format
//![modify_file_extensions]
}
{
/*
//![construct_from_cin]
seqan3::sequence_file_input fin{std::cin, format_fasta{}};
//![construct_from_cin]
*/
}
{
//![amino_acid_type_trait]
seqan3::sequence_file_input<seqan3::sequence_file_input_default_traits_aa> fin{std::filesystem::temp_directory_path()/"my.fasta"};
//![amino_acid_type_trait]
}
{
//![record_type]
seqan3::sequence_file_input fin{std::filesystem::temp_directory_path()/"my.fastq"};
using record_type = typename decltype(fin)::record_type;
// Because `fin` is a range, we can access the first element by dereferencing fin.begin()
record_type rec = *fin.begin();
//![record_type]
}
{
seqan3::sequence_file_input fin{std::filesystem::temp_directory_path()/"my.fastq"};
using record_type = typename decltype(fin)::record_type;
//![record_type2]
record_type rec = std::move(*fin.begin()); // avoid copying
//![record_type2]
}
{
//![paired_reads]
// for simplicity we take the same file
seqan3::sequence_file_input fin1{std::filesystem::temp_directory_path()/"my.fastq"};
seqan3::sequence_file_input fin2{std::filesystem::temp_directory_path()/"my.fastq"};
for (auto && [rec1, rec2] : seqan3::views::zip(fin1, fin2)) // && is important!
{ // because seqan3::views::zip returns temporaries
if (seqan3::get<seqan3::field::id>(rec1) != seqan3::get<seqan3::field::id>(rec2))
throw std::runtime_error("Oh oh your pairs don't match.");
}
//![paired_reads]
}
{
//![read_in_batches]
seqan3::sequence_file_input fin{std::filesystem::temp_directory_path()/"my.fastq"};
// `&&` is important because seqan3::views::chunk returns temporaries!
for (auto && records : fin | ranges::views::chunk(10))
{
// `records` contains 10 elements (or less at the end)
seqan3::debug_stream << "Taking the next 10 sequences:\n";
seqan3::debug_stream << "ID: " << seqan3::get<seqan3::field::id>(*records.begin()) << '\n';
} // prints first ID in batch
//![read_in_batches]
}
{
//![quality_filter]
seqan3::sequence_file_input fin{std::filesystem::temp_directory_path()/"my.fastq"};
// std::views::filter takes a function object (a lambda in this case) as input that returns a boolean
auto minimum_quality_filter = std::views::filter([] (auto const & rec)
{
auto qual = seqan3::get<seqan3::field::qual>(rec) | std::views::transform([] (auto q) { return q.to_phred(); });
double sum = std::accumulate(qual.begin(), qual.end(), 0);
return sum / std::ranges::size(qual) >= 40; // minimum average quality >= 40
});
for (auto & rec : fin | minimum_quality_filter)
{
seqan3::debug_stream << "ID: " << seqan3::get<seqan3::field::id>(rec) << '\n';
}
//![quality_filter]
}
{
//![piping_in_out]
auto tmp_dir = std::filesystem::temp_directory_path();
seqan3::sequence_file_input fin{tmp_dir/"my.fastq"};
seqan3::sequence_file_output fout{tmp_dir/"output.fastq"};
// the following are equivalent:
fin | fout;
fout = fin;
seqan3::sequence_file_output{tmp_dir/"output.fastq"} = seqan3::sequence_file_input{tmp_dir/"my.fastq"};
//![piping_in_out]
}
{
//![file_conversion]
auto tmp_dir = std::filesystem::temp_directory_path();
seqan3::sequence_file_output{tmp_dir/"output.fasta"} = seqan3::sequence_file_input{tmp_dir/"my.fastq"};
//![file_conversion]
}
std::filesystem::remove(std::filesystem::temp_directory_path()/"output.fasta");
std::filesystem::remove(std::filesystem::temp_directory_path()/"output.fastq");
}
|
#include "Mesh.h"
namespace Fission {
Mesh::Mesh( int vertex_count, int index_count, int color_count )
: m_Data( new MeshData )
{
m_Data->vertex_buffer.reserve( vertex_count );
m_Data->index_buffer.reserve( index_count );
m_Data->color_buffer.reserve( color_count );
}
Mesh::Mesh( const Mesh & src )
: m_Data( new MeshData( *src.m_Data ) )
{}
Mesh::~Mesh()
{
delete m_Data;
}
void Mesh::push_color( color col ) {
m_Data->color_buffer.emplace_back( col );
}
void Mesh::push_vertex( base::vector2f position, int color_index ) {
m_Data->vertex_buffer.emplace_back( position, color_index );
}
void Mesh::push_index( uint32_t index ) {
m_Data->index_buffer.emplace_back( index );
}
void Mesh::set_color( uint32_t index, color new_color ) {
m_Data->color_buffer[index] = new_color;
}
uint32_t Mesh::vertex_count() const {
return (uint32_t)m_Data->vertex_buffer.size();
}
uint32_t Mesh::index_count() const {
return (uint32_t)m_Data->vertex_buffer.size();
}
uint32_t Mesh::color_count() const {
return (uint32_t)m_Data->color_buffer.size();
}
}
|
/* Copyright © 2017 Apple Inc. All rights reserved.
*
* Use of this source code is governed by a BSD-3-clause license that can
* be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
*/
#include <model_server/lib/toolkit_function_macros.hpp>
#include <toolkits/supervised_learning/neuralnet_device.hpp>
using namespace turi;
int get_gpu_count() {
return supervised::neuralnet_v2::get_gpu_device_ids().size();
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(get_gpu_count)
END_FUNCTION_REGISTRATION
|
// RUN: %clang_cc1 -fsyntax-only -fcxx-exceptions -verify -std=c++11 -Wall %s
struct Bitfield {
int n : 3 = 7; // expected-warning {{C++20 extension}} expected-warning {{changes value from 7 to -1}}
};
int a;
class NoWarning {
int &n = a;
public:
int &GetN() { return n; }
};
bool b();
int k;
struct Recurse { // expected-error {{initializer for 'n' needed}}
int &n = // expected-note {{declared here}}
b() ?
Recurse().n : // expected-note {{in evaluation of exception spec}}
k;
};
struct UnknownBound {
int as[] = { 1, 2, 3 }; // expected-error {{array bound cannot be deduced from an in-class initializer}}
int bs[4] = { 4, 5, 6, 7 };
int cs[] = { 8, 9, 10 }; // expected-error {{array bound cannot be deduced from an in-class initializer}}
};
template<int n> struct T { static const int B; };
template<> struct T<2> { template<int C, int D> using B = int; };
const int C = 0, D = 0;
struct S {
int as[] = { decltype(x)::B<C, D>(0) }; // expected-error {{array bound cannot be deduced from an in-class initializer}}
T<sizeof(as) / sizeof(int)> x;
// test that we handle invalid array bound deductions without crashing when the declarator name is itself invalid
operator int[](){}; // expected-error {{'operator int' cannot be the name of a variable or data member}} \
// expected-error {{array bound cannot be deduced from an in-class initializer}}
};
struct ThrowCtor { ThrowCtor(int) noexcept(false); };
struct NoThrowCtor { NoThrowCtor(int) noexcept(true); };
struct Throw { ThrowCtor tc = 42; };
struct NoThrow { NoThrowCtor tc = 42; };
static_assert(!noexcept(Throw()), "incorrect exception specification");
static_assert(noexcept(NoThrow()), "incorrect exception specification");
struct CheckExcSpec {
CheckExcSpec() noexcept(true) = default;
int n = 0;
};
struct CheckExcSpecFail {
CheckExcSpecFail() noexcept(true) = default; // ok, but calls terminate() on exception
ThrowCtor tc = 123;
};
struct TypedefInit {
typedef int A = 0; // expected-error {{illegal initializer}}
};
// PR10578 / <rdar://problem/9877267>
namespace PR10578 {
template<typename T>
struct X {
X() {
T* x = 1; // expected-error{{cannot initialize a variable of type 'int *' with an rvalue of type 'int'}}
}
};
struct Y : X<int> {
Y();
};
Y::Y() try { // expected-note{{in instantiation of member function 'PR10578::X<int>::X' requested here}}
} catch(...) {
}
}
namespace PR14838 {
struct base { ~base() {} };
class function : base {
~function() {} // expected-note {{implicitly declared private here}}
public:
function(...) {}
};
struct thing {};
struct another {
another() : r(thing()) {} // expected-error {{binds to a temporary object}}
// expected-error@-1 {{temporary of type 'PR14838::function' has private destructor}}
const function &r; // expected-note {{reference member declared here}}
} af;
}
namespace rdar14084171 {
struct Point { // expected-note 3 {{candidate constructor}}
double x;
double y;
};
struct Sprite {
Point location = Point(0,0); // expected-error {{no matching constructor for initialization of 'rdar14084171::Point'}}
};
void f(Sprite& x) { x = x; } // expected-warning {{explicitly assigning value of variable}}
}
namespace PR18560 {
struct X { int m; };
template<typename T = X,
typename U = decltype(T::m)>
int f();
struct Y { int b = f(); };
}
namespace template_valid {
// Valid, we shouldn't build a CXXDefaultInitExpr until A's ctor definition.
struct A {
A();
template <typename T>
struct B { int m1 = sizeof(A) + sizeof(T); };
B<int> m2;
};
A::A() {}
}
namespace template_default_ctor {
struct A {
template <typename T>
struct B { // expected-error {{initializer for 'm1' needed}}
int m1 = 0; // expected-note {{declared here}}
};
enum { NOE = noexcept(B<int>()) }; // expected-note {{in evaluation of exception spec}}
};
}
namespace default_ctor {
struct A {
struct B { // expected-error {{initializer for 'm1' needed}}
int m1 = 0; // expected-note {{declared here}}
};
enum { NOE = noexcept(B()) }; // expected-note {{in evaluation of exception spec}}
};
}
namespace member_template {
struct A {
template <typename T>
struct B {
struct C { // expected-error {{initializer for 'm1' needed}}
int m1 = 0; // expected-note {{declared here}}
};
template <typename U>
struct D { // expected-error {{initializer for 'm1' needed}}
int m1 = 0; // expected-note {{declared here}}
};
};
enum {
NOE1 = noexcept(B<int>::C()), // expected-note {{in evaluation of exception spec}}
NOE2 = noexcept(B<int>::D<int>()) // expected-note {{in evaluation of exception spec}}
};
};
}
namespace explicit_instantiation {
template<typename T> struct X {
X(); // expected-note {{in instantiation of default member initializer 'explicit_instantiation::X<float>::n' requested here}}
int n = T::error; // expected-error {{type 'float' cannot be used prior to '::' because it has no members}}
};
template struct X<int>; // ok
template<typename T> X<T>::X() {}
template struct X<float>; // expected-note {{in instantiation of member function 'explicit_instantiation::X<float>::X' requested here}}
}
namespace local_class {
template<typename T> void f() {
struct X { // expected-note {{in instantiation of default member initializer 'local_class::f()::X::n' requested here}}
int n = T::error; // expected-error {{type 'int' cannot be used prior to '::' because it has no members}}
};
}
void g() { f<int>(); } // expected-note {{in instantiation of function template specialization 'local_class::f<int>' requested here}}
}
namespace PR22056 {
template <int N>
struct S {
int x[3] = {[N] = 3}; // expected-warning {{C99 extension}}
};
}
namespace PR28060 {
template <class T>
void foo(T v) {
struct s {
T *s = 0;
};
}
template void foo(int);
}
|
/******************************************************************************
*
* Project: UK NTF Reader
* Purpose: Implements OGRNTFFeatureClassLayer class.
* Author: Frank Warmerdam, warmerdam@pobox.com
*
******************************************************************************
* Copyright (c) 1999, Frank Warmerdam
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
****************************************************************************/
#include "ntf.h"
#include "cpl_conv.h"
CPL_CVSID("$Id$")
/************************************************************************/
/* OGRNTFFeatureClassLayer() */
/* */
/* Note that the OGRNTFLayer assumes ownership of the passed */
/* OGRFeatureDefn object. */
/************************************************************************/
OGRNTFFeatureClassLayer::OGRNTFFeatureClassLayer( OGRNTFDataSource *poDSIn ) :
poFeatureDefn(new OGRFeatureDefn("FEATURE_CLASSES")),
poFilterGeom(nullptr),
poDS(poDSIn),
iCurrentFC(0)
{
/* -------------------------------------------------------------------- */
/* Establish the schema. */
/* -------------------------------------------------------------------- */
SetDescription( poFeatureDefn->GetName() );
poFeatureDefn->SetGeomType( wkbNone );
poFeatureDefn->Reference();
OGRFieldDefn oFCNum( "FEAT_CODE", OFTString );
oFCNum.SetWidth( 4 );
poFeatureDefn->AddFieldDefn( &oFCNum );
OGRFieldDefn oFCName( "FC_NAME", OFTString );
oFCNum.SetWidth( 80 );
poFeatureDefn->AddFieldDefn( &oFCName );
}
/************************************************************************/
/* ~OGRNTFFeatureClassLayer() */
/************************************************************************/
OGRNTFFeatureClassLayer::~OGRNTFFeatureClassLayer()
{
if( poFeatureDefn )
poFeatureDefn->Release();
if( poFilterGeom != nullptr )
delete poFilterGeom;
}
/************************************************************************/
/* SetSpatialFilter() */
/************************************************************************/
void OGRNTFFeatureClassLayer::SetSpatialFilter( OGRGeometry * poGeomIn )
{
if( poFilterGeom != nullptr )
{
delete poFilterGeom;
poFilterGeom = nullptr;
}
if( poGeomIn != nullptr )
poFilterGeom = poGeomIn->clone();
}
/************************************************************************/
/* ResetReading() */
/************************************************************************/
void OGRNTFFeatureClassLayer::ResetReading()
{
iCurrentFC = 0;
}
/************************************************************************/
/* GetNextFeature() */
/************************************************************************/
OGRFeature *OGRNTFFeatureClassLayer::GetNextFeature()
{
if( iCurrentFC >= GetFeatureCount() )
return nullptr;
return GetFeature( (long) iCurrentFC++ );
}
/************************************************************************/
/* GetFeature() */
/************************************************************************/
OGRFeature *OGRNTFFeatureClassLayer::GetFeature( GIntBig nFeatureId )
{
char *pszFCName, *pszFCId;
if( nFeatureId < 0 || nFeatureId >= poDS->GetFCCount() )
return nullptr;
poDS->GetFeatureClass( (int)nFeatureId, &pszFCId, &pszFCName );
/* -------------------------------------------------------------------- */
/* Create a corresponding feature. */
/* -------------------------------------------------------------------- */
OGRFeature *poFeature = new OGRFeature( poFeatureDefn );
poFeature->SetField( 0, pszFCId );
poFeature->SetField( 1, pszFCName );
poFeature->SetFID( nFeatureId );
return poFeature;
}
/************************************************************************/
/* GetFeatureCount() */
/* */
/* If a spatial filter is in effect, we turn control over to */
/* the generic counter. Otherwise we return the total count. */
/* Eventually we should consider implementing a more efficient */
/* way of counting features matching a spatial query. */
/************************************************************************/
GIntBig OGRNTFFeatureClassLayer::GetFeatureCount( CPL_UNUSED int bForce )
{
return poDS->GetFCCount();
}
/************************************************************************/
/* TestCapability() */
/************************************************************************/
int OGRNTFFeatureClassLayer::TestCapability( const char * pszCap )
{
if( EQUAL(pszCap,OLCRandomRead) )
return TRUE;
else if( EQUAL(pszCap,OLCSequentialWrite)
|| EQUAL(pszCap,OLCRandomWrite) )
return FALSE;
else if( EQUAL(pszCap,OLCFastFeatureCount) )
return TRUE;
else if( EQUAL(pszCap,OLCFastSpatialFilter) )
return TRUE;
else
return FALSE;
}
|
/* Copyright or © or Copr. Centre National de la Recherche Scientifique (CNRS) (2017/05/03)
Contributors:
- Vincent Lanore <vincent.lanore@gmail.com>
This software is a computer program whose purpose is to provide the necessary classes to write ligntweight component-based
c++ applications.
This software is governed by the CeCILL-B license under French law and abiding by the rules of distribution of free software.
You can use, modify and/ or redistribute the software under the terms of the CeCILL-B license as circulated by CEA, CNRS and
INRIA at the following URL "http://www.cecill.info".
As a counterpart to the access to the source code and rights to copy, modify and redistribute granted by the license, users
are provided only with a limited warranty and the software's author, the holder of the economic rights, and the successive
licensors have only limited liability.
In this respect, the user's attention is drawn to the risks associated with loading, using, modifying and/or developing or
reproducing the software by the user in light of its specific status of free software, that may mean that it is complicated
to manipulate, and that also therefore means that it is reserved for developers and experienced professionals having in-depth
computer knowledge. Users are therefore encouraged to load and test the software's suitability as regards their requirements
in conditions enabling the security of their systems and/or data to be ensured and, more generally, to use and operate it in
the same conditions as regards security.
The fact that you are presently reading this means that you have had knowledge of the CeCILL-B license and that you accept
its terms.*/
#ifndef TINYCOMPO_HPP
#define TINYCOMPO_HPP
#ifdef __GNUG__
#include <cxxabi.h>
#endif
#include <string.h>
#include <cassert>
#include <exception>
#include <fstream>
#include <functional>
#include <iostream>
#include <list>
#include <map>
#include <memory>
#include <numeric>
#include <set>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
namespace tc {
/*
=============================================================================================================================
~*~ Various forward-declarations and abstract interfaces ~*~
===========================================================================================================================*/
class Model;
class Assembly;
class Address;
class Component;
struct PortAddress;
class ComponentReference;
struct Composite;
struct Meta {}; // type tag for meta connectors
template <class T> // this is an empty helper class that is used to pass T to the _ComponentBuilder
class _Type {}; // constructor below
struct _AbstractPort {
virtual ~_AbstractPort() = default;
};
struct _AbstractProvidePort : public _AbstractPort {
virtual Component* get_type_erased() = 0;
};
struct _AbstractDriver {
virtual void go() = 0;
virtual void set_refs(std::vector<Component*>) = 0;
};
struct _AbstractAddress {}; // for identification of _Address types encountered in the wild
using DirectedGraph = std::pair<std::set<std::string>, std::multimap<std::string, std::string>>;
/*
=============================================================================================================================
~*~ Debug ~*~
===========================================================================================================================*/
class TinycompoException : public std::exception {
std::string message{""};
std::vector<TinycompoException> context;
public:
TinycompoException(const std::string& init = "") : message{init} {}
TinycompoException(const std::string& init, const TinycompoException& context_in)
: message{init}, context({context_in}) {}
const char* what() const noexcept override { return message.c_str(); }
};
class TinycompoDebug { // bundle of static functions to help with debug messages
#ifdef __GNUG__
static std::string demangle(const char* name) {
int status{0};
std::unique_ptr<char, void (*)(void*)> res{abi::__cxa_demangle(name, NULL, NULL, &status), std::free};
return (status == 0) ? res.get() : name;
}
#else
static std::string demangle(const char* name) { return name; }
#endif
public:
template <class T>
static std::string type() { // display human-friendly typename
return demangle(typeid(T).name());
}
template <class T1, class T2>
static std::string list(const std::map<T1, T2>& structure) { // bullet-pointed list of key names in a string
std::stringstream acc;
for (auto& e : structure) {
acc << " * " << e.first << '\n';
}
return acc.str();
}
};
/*
=============================================================================================================================
~*~ _Port class ~*~
_Port<Args...> derives from _AbstractPort which allows the storage of pointers to _Port by converting them to _AbstractPort*.
These classes are for internal use by tinycompo and should not be seen by the user (as denoted by the underscore prefixes).
===========================================================================================================================*/
template <class... Args>
struct _Port : public _AbstractPort {
std::function<void(Args...)> _set;
_Port() = delete;
template <class C>
explicit _Port(C* ref, void (C::*prop)(Args...))
: _set([=](const Args... args) { (ref->*prop)(std::forward<const Args>(args)...); }) {}
template <class C, class Type>
explicit _Port(C* ref, Type(C::*prop)) : _set([ref, prop](const Type arg) { ref->*prop = arg; }) {}
};
template <class Interface>
struct _ProvidePort : public _AbstractProvidePort {
std::function<Interface*()> _get;
Component* get_type_erased() override { return dynamic_cast<Component*>(_get()); }
_ProvidePort() = delete;
template <class C>
explicit _ProvidePort(C* ref, Interface* (C::*prop)()) : _get([=]() { return (ref->*prop)(); }) {}
_ProvidePort(Assembly& assembly, Address address); // composite port, direct
_ProvidePort(Assembly& assembly, PortAddress port); // composite port, provide
};
/*
=============================================================================================================================
~*~ Component class ~*~
tinycompo components should always inherit from this class. It is mostly used as a base to be able to store pointers to child
class instances but also provides basic debugging methods and the infrastructure required to declare ports.
===========================================================================================================================*/
class Component {
std::map<std::string, std::unique_ptr<_AbstractPort>> _ports; // not meant to be accessible for users
std::string name{""}; // accessible through get/set name accessors
friend Assembly;
public:
/*
=========================================================================================================================
~*~ Constructors ~*~ */
void operator=(const Component&) = delete; // forbidding assignation
Component(const Component&) = delete; // forbidding copy
Component() = default;
virtual ~Component() = default;
/*
=========================================================================================================================
~*~ Functions that can be overriden by user (lifecycle and debug) ~*~ */
virtual std::string debug() const { return "Component"; } // runtime overridable method for debug info (class&state)
virtual void after_construct() {} // called after component constructor but before connection
virtual void after_connect() {} // called after connections are all done
/*
=========================================================================================================================
~*~ Declaration of ports ~*~ */
template <class C, class... Args>
void port(std::string name, void (C::*prop)(Args...)) { // case where the port is a setter member function
_ports[name] = std::unique_ptr<_AbstractPort>(
static_cast<_AbstractPort*>(new _Port<const Args...>(dynamic_cast<C*>(this), prop)));
}
template <class C, class Arg>
void port(std::string name, Arg(C::*prop)) { // case where the port is a data member
_ports[name] =
std::unique_ptr<_AbstractPort>(static_cast<_AbstractPort*>(new _Port<const Arg>(dynamic_cast<C*>(this), prop)));
}
template <class C, class Interface>
void provide(std::string name, Interface* (C::*prop)()) {
_ports[name] = std::unique_ptr<_AbstractPort>(
static_cast<_AbstractPort*>(new _ProvidePort<Interface>(dynamic_cast<C*>(this), prop)));
}
/*
=========================================================================================================================
~*~ Accessors to ports and name ~*~ */
template <class... Args>
void set(std::string name, Args... args) { // no perfect forwarding to avoid references
if (_ports.find(name) == _ports.end()) { // there exists no port with this name
throw TinycompoException{"Port name not found. Could not find port " + name + " in component " + debug() + "."};
} else { // there exists a port with this name
auto ptr = dynamic_cast<_Port<const Args...>*>(_ports[name].get());
if (ptr != nullptr) // casting succeedeed
{
ptr->_set(std::forward<Args>(args)...);
} else { // casting failed, trying to provide useful error message
throw TinycompoException("Setting property failed. Type " + TinycompoDebug::type<_Port<const Args...>>() +
" does not seem to match port " + name + '.');
}
}
}
template <class Interface>
Interface* get(std::string name) const {
try {
return dynamic_cast<_ProvidePort<Interface>*>(_ports.at(name).get())->_get();
} catch (std::out_of_range) {
throw TinycompoException("<Component::get<Interface>> Port name " + name + " not found. Existing ports are:\n" +
TinycompoDebug::list(_ports));
}
}
Component* get(std::string name) const {
try {
return dynamic_cast<_AbstractProvidePort*>(_ports.at(name).get())->get_type_erased();
} catch (std::out_of_range) {
throw TinycompoException("<Component::get> Port name " + name + " not found. Existing ports are:\n" +
TinycompoDebug::list(_ports));
}
}
void set_name(const std::string& n) { name = n; }
std::string get_name() const { return name; }
};
/*
=============================================================================================================================
~*~ key_to_string ~*~
===========================================================================================================================*/
template <class Key>
std::string key_to_string(Key key) {
std::stringstream ss;
ss << key;
return ss.str();
}
/*
=============================================================================================================================
~*~ Addresses ~*~
===========================================================================================================================*/
class Address {
std::vector<std::string> keys;
mutable std::string to_string_buf{""};
template <class Arg>
void register_helper(std::true_type, Arg arg) {
keys.insert(keys.end(), arg.keys.begin(), arg.keys.end());
}
template <class Arg>
void register_helper(std::false_type, Arg arg) {
auto strkey = key_to_string(arg);
if (strkey.find("__") != std::string::npos) {
throw TinycompoException("Trying to add key " + strkey + " (which contains __) of type " +
TinycompoDebug::type<Arg>() + " to address " + to_string() + "\n");
}
keys.push_back(strkey);
}
template <class Arg>
void register_keys(Arg arg) {
register_helper(std::is_same<Address, Arg>(), arg);
}
template <class Arg, class... Args>
void register_keys(Arg arg, Args... args) {
register_keys(arg);
register_keys(std::forward<Args>(args)...);
}
public:
Address() = default;
Address(const ComponentReference&);
Address(const char* input) : Address(std::string(input)) {}
Address(int input) { register_keys(input); }
Address(float input) { register_keys(input); }
Address(double input) { register_keys(input); }
Address(const std::string& input) {
std::string copy = input;
auto get_token = [&]() -> std::string {
auto it = copy.find("__");
std::string result;
if (it != std::string::npos) {
result = copy.substr(0, it);
copy = copy.substr(++++it);
} else {
result = copy;
copy = "";
}
return result;
};
std::string token = get_token();
keys.push_back(token);
while (true) {
token = get_token();
if (token == "") break;
keys.push_back(token);
}
}
template <class Key, class... Keys>
explicit Address(Key key, Keys... keys) {
register_keys(key, std::forward<Keys>(keys)...);
}
std::string first() const {
if (keys.size() > 0) {
return keys.front();
} else {
return "";
}
}
std::string last() const {
if (keys.size() > 0) {
return keys.back();
} else {
return "";
}
}
Address rest() const {
Address acc;
for (unsigned int i = 1; i < keys.size(); i++) {
acc.register_keys(keys.at(i));
}
return acc;
}
bool is_composite() const { return keys.size() > 1; }
bool is_ancestor(const Address& other) const {
if (keys.size() == 0) {
return true;
} else {
return (first() == other.first()) and rest().is_ancestor(other.rest());
}
}
Address rebase(const Address& other) const { // if other is ancestor, remove corresponding prefix
if (!other.is_ancestor(*this)) {
throw TinycompoException("Trying to rebase address " + to_string() + " from " + other.to_string() +
" although it is not an ancestor!\n");
} else {
if (other.keys.size() == 0) {
return *this;
} else {
return rest().rebase(other.rest());
}
}
}
Address format_last(const char* format) const {
char* buf = nullptr;
asprintf(&buf, format, last().c_str());
std::string formatted_key(buf);
free(buf);
Address copy(*this);
copy.keys.back() = formatted_key;
return copy;
}
std::string to_string(std::string sep = "__") const {
return std::accumulate(keys.begin(), keys.end(), std::string(""),
[sep](std::string acc, std::string key) { return ((acc == "") ? "" : acc + sep) + key; });
}
const char* c_str() const { // for easier use with printf
to_string_buf = to_string();
return to_string_buf.c_str();
}
// for use as key in maps
bool operator<(const Address& other_address) const {
return std::lexicographical_compare(keys.begin(), keys.end(), other_address.keys.begin(), other_address.keys.end());
}
bool operator==(const Address& other_address) const { return (keys == other_address.keys); }
};
struct PortAddress {
std::string prop;
Address address;
template <class... Keys>
PortAddress(const std::string& prop, Keys... keys) : prop(prop), address(std::forward<Keys>(keys)...) {}
bool operator==(const PortAddress& other_address) const {
return (prop == other_address.prop) and (address == other_address.address);
}
};
std::ostream& operator<<(std::ostream& os, const Address& a) { return os << a.to_string(); }
std::ostream& operator<<(std::ostream& os, const PortAddress& p) { return os << p.address.to_string() << "." << p.prop; }
/*
=============================================================================================================================
~*~ Graph representation classes ~*~
Small classes implementing a simple easily explorable graph representation for TinyCompo component assemblies.
===========================================================================================================================*/
struct _GraphAddress {
std::string address;
std::string port;
_GraphAddress(const std::string& address, const std::string& port = "") : address(address), port(port) {}
void print(std::ostream& os = std::cout) const { os << "->" << address << ((port == "") ? "" : ("." + port)); }
};
/*
=============================================================================================================================
~*~ _Operation class ~*~
===========================================================================================================================*/
class _Operation {
template <class Functor, class... Args>
void neighbors_from_args(Args... args) { // populates the list of neighbors from arguments of the Connector
helper1(Functor::_connect, args...);
}
template <class... Args, class... CArgs>
void helper1(void (*)(Assembly&, Args...), CArgs... cargs) {
void (*g)(Args...) = nullptr; // Double recursion on connect call arguments (cargs) and on argument types of
helper2(g, cargs...); // _connect function (through the g pointer). This is necessary because call
} // arguments might have the wrong type (eg a string instead of an address).
void helper2(void (*)()) {}
template <class... Args, class CArg, class... CArgs>
void helper2(void (*)(Address, Args...), CArg carg, CArgs... cargs) {
neighbors.push_back(_GraphAddress(Address(carg).to_string()));
void (*g)(Args...) = nullptr;
helper2(g, cargs...);
}
template <class... Args, class... CArgs>
void helper2(void (*)(PortAddress, Args...), PortAddress carg, CArgs... cargs) {
neighbors.push_back(_GraphAddress(carg.address.to_string(), carg.prop));
void (*g)(Args...) = nullptr;
helper2(g, cargs...);
}
template <class Arg, class... Args, class CArg, class... CArgs>
void helper2(void (*)(Arg, Args...), CArg, CArgs... cargs) {
void (*g)(Args...) = nullptr;
helper2(g, cargs...);
}
public:
template <class Connector, class... Args>
_Operation(_Type<Connector>, Args&&... args)
: _connect([args...](Assembly& assembly) { Connector::_connect(assembly, args...); }),
type(TinycompoDebug::type<Connector>()) {
neighbors_from_args<Connector>(args...);
}
template <class Target, class Lambda>
_Operation(Address address, _Type<Target>, Lambda lambda); // def at end of file
std::function<void(Assembly&)> _connect;
// representation-related stuff
std::string type;
std::vector<_GraphAddress> neighbors;
void print(std::ostream& os = std::cout, int tabs = 0) const {
os << std::string(tabs, '\t') << "Connector (" << type << ") ";
for (auto& n : neighbors) {
n.print(os);
os << " ";
}
os << '\n';
}
};
/*
=============================================================================================================================
~*~ _ComponentBuilder class ~*~
A small class that is capable of storing a constructor call for any Component child class and execute said call later on
demand. The class itself is not templated (allowing direct storage) but the constructor call is. This is an internal
tinycompo class that should never be seen by the user (as denoted by the underscore prefix).
===========================================================================================================================*/
struct _ComponentBuilder {
template <class T, class... Args>
_ComponentBuilder(_Type<T>, const std::string& name, Args... args)
: _constructor([=]() { return std::unique_ptr<Component>(dynamic_cast<Component*>(new T(args...))); }),
type(TinycompoDebug::type<T>()),
name(name) {}
std::function<std::unique_ptr<Component>()> _constructor; // stores the component constructor
// representation-related stuff
std::string type;
std::string name; // should it be removed (not very useful as its stored in a map by name)
void print(std::ostream& os = std::cout, int tabs = 0) const {
os << std::string(tabs, '\t') << "Component \"" << name << "\""
<< " (" << type << ")\n";
}
};
/*
=============================================================================================================================
~*~ ComponentReference ~*~
Small class used to interact with an already-declared component without repeating its name everytime. This class allows the
chaining of declaration, eg : model.component(...).connect(...).connect(...).annotate(...)
===========================================================================================================================*/
class ComponentReference {
Model& model_ref;
Address component_address;
friend Address;
public:
ComponentReference(Model& model_ref, const Address& component_address)
: model_ref(model_ref), component_address(component_address) {}
template <class T, class... Args>
ComponentReference& connect(const std::string&, Args&&...); // implemented at the end
template <class... Args>
ComponentReference& connect(Args&&...); // implemented at the end
template <class Lambda>
ComponentReference& configure(Lambda lambda); // implemented at the end
template <class... Args>
ComponentReference& set(const std::string&, Args&&...); // implemented at the end
};
/*
=============================================================================================================================
~*~ _Driver ~*~
===========================================================================================================================*/
// invariant : Refs are all pointers to classes inheriting from Component
template <class... Refs>
class _Driver : public Component, public _AbstractDriver {
std::function<void(Refs...)> instructions;
std::tuple<Refs...> refs;
// C++11 integer_sequence implementation :/
template <int...>
struct seq {};
template <int N, int... S>
struct gens : gens<N - 1, N - 1, S...> {};
template <int... S>
struct gens<0, S...> {
typedef seq<S...> type;
};
// helper functions
template <int... S>
void call_helper(seq<S...>) {
instructions(std::get<S>(refs)...);
}
template <int i>
void set_ref_helper(std::vector<Component*>&) {}
template <int i, class Head, class... Tail>
void set_ref_helper(std::vector<Component*>& ref_values) {
std::get<i>(refs) = dynamic_cast<Head>(ref_values.at(i));
set_ref_helper<i + 1, Tail...>(ref_values);
}
// port to set the references (invariant : vector should have the same size as Refs)
void set_refs(std::vector<Component*> ref_values) override { set_ref_helper<0, Refs...>(ref_values); }
void go() override { call_helper(typename gens<sizeof...(Refs)>::type()); }
public:
_Driver(const std::function<void(Refs...)>& instructions) : instructions(instructions) {
port("go", &_Driver::go);
port("refs", &_Driver::set_refs);
}
};
/*
=============================================================================================================================
~*~ Model ~*~
===========================================================================================================================*/
class Model {
friend class Assembly; // to access internal data
friend class Introspector;
// state of model
std::map<std::string, _ComponentBuilder> components;
std::vector<_Operation> operations;
std::map<std::string, std::pair<Model, _ComponentBuilder>> composites;
// helper functions
std::string strip(std::string s) const {
auto it = s.find("__");
return s.substr(++++it);
}
template <class Lambda, class C> // this helper extracts the component type from the lambda
void configure_helper(Address address, Lambda lambda, void (Lambda::*)(C&) const) {
operations.emplace_back(address, _Type<C>(), lambda);
}
template <class Lambda, class... Refs> // this helper extracts the reference types from the lambda
ComponentReference driver_helper(Address address, Lambda lambda, void (Lambda::*)(Refs...) const) {
return component<_Driver<Refs...>>(address, lambda);
}
// helpers to select right component method
using IsComponent = std::false_type;
using IsComposite = std::true_type;
using IsAddress = std::true_type;
using IsNotAddress = std::false_type;
using IsMeta = std::true_type;
using IsConcrete = std::false_type;
template <class T, class Whatever, class Whatever2, class... Args>
ComponentReference component_call_helper(IsMeta, Whatever, Whatever2, Args&&... args) {
return T::connect(*this, std::forward<Args>(args)...);
}
template <class T, class Whatever, class... Args>
ComponentReference component_call_helper(IsConcrete, Whatever, IsAddress, const Address& address, Args&&... args) {
if (!address.is_composite()) {
component<T>(address.first(), std::forward<Args>(args)...);
} else {
get_composite(address.first()).component<T>(address.rest(), std::forward<Args>(args)...);
}
return ComponentReference(*this, address);
}
template <class T, class CallKey, class... Args>
ComponentReference component_call_helper(IsConcrete, IsComponent, IsNotAddress, CallKey key, Args&&... args) {
std::string key_name = key_to_string(key);
components.emplace(std::piecewise_construct, std::forward_as_tuple(key_name),
std::forward_as_tuple(_Type<T>(), key_name, std::forward<Args>(args)...));
return ComponentReference(*this, Address(key));
}
template <class T, class CallKey, class... Args>
ComponentReference component_call_helper(IsConcrete, IsComposite, IsNotAddress, CallKey key, Args&&... args) {
std::string key_name = key_to_string(key);
Model m;
T::contents(m, args...);
composites.emplace(std::piecewise_construct, std::forward_as_tuple(key_name),
std::forward_as_tuple(std::piecewise_construct, std::forward_as_tuple(m),
std::forward_as_tuple(_Type<T>(), key_name)));
return ComponentReference(*this, Address(key));
}
// helpers for connect method
template <class C, class... Args>
void connect_call_helper(IsConcrete, Args&&... args) {
operations.emplace_back(_Type<C>(), std::forward<Args>(args)...);
}
template <class C, class... Args>
void connect_call_helper(IsMeta, Args&&... args) {
C::connect(*this, std::forward<Args>(args)...);
}
// helpers for introspection things
std::vector<Address> all_addresses_helper(Address parent) const {
std::vector<Address> result;
for (auto&& c : components) {
result.emplace_back(Address(parent, c.first));
}
for (auto&& c : composites) {
auto recursive_result = c.second.first.all_addresses_helper(Address(parent, c.first));
result.insert(result.end(), recursive_result.begin(), recursive_result.end());
}
return result;
}
public:
Model() = default; // when creating model from scratch
template <class T, class... Args>
Model(_Type<T>, Args... args) { // when instantiating from composite content function
T::contents(*this, std::forward<Args>(args)...);
}
/*
=========================================================================================================================
~*~ Declaration functions ~*~ */
template <class T, class MaybeAddress, class... Args>
ComponentReference component(MaybeAddress address, Args... args) {
return component_call_helper<T>(std::is_base_of<Meta, T>(), std::is_base_of<Composite, T>(),
std::is_same<Address, MaybeAddress>(), address, args...);
}
ComponentReference composite(const Address& address) { return component<Composite>(address); }
template <class C, class... Args>
void connect(Args&&... args) {
connect_call_helper<C>(std::is_base_of<Meta, C>(), std::forward<Args>(args)...);
}
template <class Lambda>
void configure(Address address, Lambda lambda) { // does not work with a function pointer (needs operator())
configure_helper(address, lambda, &Lambda::operator());
}
template <class Lambda>
ComponentReference driver(Address address, Lambda lambda) {
return driver_helper(address, lambda, &Lambda::operator());
}
/*
=========================================================================================================================
~*~ Getters / introspection ~*~ */
Model& get_composite(const Address& address) {
if (address.is_composite()) {
return get_composite(address.first()).get_composite(address.rest());
} else {
std::string key_name = address.first();
auto compositeIt = composites.find(key_name);
if (compositeIt == composites.end()) {
throw TinycompoException("Composite not found. Composite " + key_name +
" does not exist. Existing composites are:\n" + TinycompoDebug::list(composites));
} else {
return dynamic_cast<Model&>(compositeIt->second.first);
}
}
}
const Model& get_composite(const Address& address) const {
if (address.is_composite()) {
return get_composite(address.first()).get_composite(address.rest());
} else {
std::string key_name = address.first();
auto compositeIt = composites.find(key_name);
if (compositeIt == composites.end()) {
throw TinycompoException("Composite not found. Composite " + key_name +
" does not exist. Existing composites are:\n" + TinycompoDebug::list(composites));
} else {
return dynamic_cast<const Model&>(compositeIt->second.first);
}
}
}
template <class T>
bool has_type(const Address& address) const {
if (address.is_composite()) { // address is composite (several names)
return get_composite(address.first()).has_type<T>(address.rest());
} else {
if (is_composite(address)) { // non-composite address corresponds to a composite
return false; // composite don't have types
} else {
auto tmp_ptr = components.at(address.to_string())._constructor();
return dynamic_cast<T*>(tmp_ptr.get()) != nullptr;
}
}
}
bool is_composite(const Address& address) const {
if (address.is_composite()) {
return get_composite(address.first()).is_composite(address.rest());
} else {
bool result = false;
for (auto& c : composites) {
result = result or c.first == address.first() or c.second.first.is_composite(address.first());
}
return result;
}
}
bool exists(const Address& address) const {
if (address.is_composite()) {
return get_composite(address.first()).exists(address.rest());
} else {
return components.count(address.first()) != 0 or composites.count(address.first()) != 0;
}
}
std::size_t size() const { return components.size() + composites.size(); }
void dot(std::ostream& stream = std::cout) const { to_dot(0, "", stream); }
void dot_to_file(const std::string& fileName = "tmp.dot") const {
std::ofstream file;
file.open(fileName);
dot(file);
}
DirectedGraph get_digraph() const {
std::set<std::string> nodes;
std::multimap<std::string, std::string> edges;
for (auto c : operations) {
// if connector is of the form (PortAddress, Address)
if ((c.neighbors.size() == 2) and (c.neighbors[0].port != "") and (c.neighbors[1].port == "")) {
edges.insert(make_pair(c.neighbors[0].address, c.neighbors[1].address));
nodes.insert(c.neighbors[0].address);
nodes.insert(c.neighbors[1].address);
}
}
return make_pair(nodes, edges);
}
void to_dot(int tabs = 0, const std::string& name = "", std::ostream& os = std::cout) const {
std::string prefix = name + (name == "" ? "" : "__");
if (name == "") { // toplevel
os << std::string(tabs, '\t') << "graph g {\n\tsep=\"+25,25\";\n\tnodesep=0.6;\n";
} else {
os << std::string(tabs, '\t') << "subgraph cluster_" << name << " {\n";
}
for (auto& c : components) {
os << std::string(tabs + 1, '\t') << prefix << c.first << " [label=\"" << c.first << "\\n(" << c.second.type
<< ")\" shape=component margin=0.15];\n";
}
int i = 0;
for (auto& c : operations) {
std::string cname = "connect_" + prefix + std::to_string(i);
os << std::string(tabs + 1, '\t') << cname << " [xlabel=\"" << c.type << "\" shape=point];\n";
for (auto& n : c.neighbors) {
os << std::string(tabs + 1, '\t') << cname << " -- "
<< (is_composite(n.address) ? "cluster_" + prefix + n.address : prefix + n.address)
<< (n.port == "" ? "" : "[xlabel=\"" + n.port + "\"]") << ";\n";
}
i++;
}
for (auto& c : composites) {
c.second.first.to_dot(tabs + 1, prefix + c.first, os);
}
os << std::string(tabs, '\t') << "}\n";
}
void print(std::ostream& os = std::cout, int tabs = 0) const {
for (auto& c : components) {
c.second.print(os, tabs);
}
for (auto& c : operations) {
c.print(os, tabs);
}
for (auto& c : composites) {
os << std::string(tabs, '\t') << "Composite " << c.first << " {\n";
c.second.first.print(os, tabs + 1);
os << std::string(tabs, '\t') << "}\n";
}
}
std::vector<Address> all_addresses() const {
std::vector<Address> result;
for (auto&& c : components) {
result.emplace_back(c.first);
}
for (auto&& c : composites) {
auto recursive_result = c.second.first.all_addresses_helper(c.first);
result.insert(result.end(), recursive_result.begin(), recursive_result.end());
}
return result;
}
std::vector<Address> all_addresses(const Address& address) const { return get_composite(address).all_addresses(); }
std::vector<std::string> all_component_names(int depth = 0, bool include_composites = false,
const std::string& name = "") const {
std::string prefix = name + (name == "" ? "" : "__");
std::vector<std::string> result;
for (auto& c : components) { // local components
result.push_back(prefix + c.first); // stringified name
}
if (include_composites) {
for (auto& c : composites) {
result.push_back(prefix + c.first);
}
}
if (depth > 0) {
for (auto& c : composites) { // names from composites until a certain depth
auto subresult = c.second.first.all_component_names(depth - 1, include_composites, prefix + c.first);
result.insert(result.end(), subresult.begin(), subresult.end());
}
}
return result;
}
};
/*
=============================================================================================================================
~*~ Introspector ~*~
===========================================================================================================================*/
class Introspector {
Model& m;
template <class T>
T acc_composites(T init, std::function<void(T&, Introspector&)> f) const {
T result = init;
acc_composites_ref(result, f);
return result;
}
template <class T>
void acc_composites_ref(T& acc, std::function<void(T&, Introspector&)> f) const {
for (auto composite : m.composites) {
Introspector i(composite.second.first);
f(acc, i);
}
}
template <class T>
void acc_composites_ref(T& acc, std::function<void(T&, Introspector&, Address)> f) const {
for (auto composite : m.composites) {
Introspector i(composite.second.first);
f(acc, i, composite.first);
}
}
public:
Introspector(Model& m) : m(m) {}
/*
=========================================================================================================================
~*~ Size functions ~*~ */
size_t nb_components() const { return m.components.size() + m.composites.size(); }
size_t deep_nb_components() const {
return acc_composites<size_t>(m.components.size(),
[](size_t& acc, Introspector& i) { acc += i.deep_nb_components(); });
}
size_t nb_operations() const { return m.operations.size(); }
size_t deep_nb_operations() const {
return acc_composites<size_t>(nb_operations(), [](size_t& acc, Introspector& i) { acc += i.deep_nb_operations(); });
}
/*
=========================================================================================================================
~*~ Topology-related functions ~*~ */
std::vector<Address> components() const {
std::vector<Address> result;
for (auto component : m.components) {
result.emplace_back(component.first);
}
for (auto composite : m.composites) {
result.emplace_back(composite.first);
}
return result;
}
std::vector<Address> deep_components(Address prefix = Address()) const {
std::vector<Address> result;
for (auto component : m.components) {
result.emplace_back(prefix, component.first);
}
acc_composites_ref<std::vector<Address>>(result, [](std::vector<Address>& acc, Introspector& i, Address context) {
auto tmp = i.deep_components(context);
acc.insert(acc.end(), tmp.begin(), tmp.end());
});
return result;
}
std::vector<std::pair<PortAddress, Address>> directed_binops(Address prefix = Address()) const {
std::vector<std::pair<PortAddress, Address>> result;
for (auto operation : m.operations) {
auto& n = operation.neighbors;
if (n.size() == 2 and n.at(0).port != "" and n.at(1).port == "") {
PortAddress origin(n.at(0).port, Address(prefix, Address(n.at(0).address)));
Address dest(prefix, Address(n.at(1).address));
result.emplace_back(origin, dest);
}
}
return result;
}
std::vector<std::pair<PortAddress, Address>> deep_directed_binops(Address prefix = Address()) const {
using t = std::vector<std::pair<PortAddress, Address>>;
auto result = directed_binops(prefix);
acc_composites_ref<t>(result, [](t& acc, Introspector& i, Address context) {
auto tmp = i.deep_directed_binops(context);
acc.insert(acc.end(), tmp.begin(), tmp.end());
});
return result;
}
};
/*
=============================================================================================================================
~*~ InstanceSet ~*~
An object representing a set of instantiated components. To be obtained via Assembly.
===========================================================================================================================*/
template <class C>
class InstanceSet {
std::vector<Address> _names;
std::vector<C*> _pointers;
public:
void push_back(Address address, C* pointer) {
_names.push_back(address);
_pointers.push_back(pointer);
}
void combine(const InstanceSet<C>& other) {
_names.insert(_names.end(), other._names.begin(), other._names.end());
_pointers.insert(_pointers.end(), other._pointers.begin(), other._pointers.end());
}
const std::vector<Address>& names() const { return _names; }
const std::vector<C*>& pointers() const { return _pointers; }
};
/*
=============================================================================================================================
~*~ Assembly class ~*~
===========================================================================================================================*/
class Assembly : public Component {
std::map<std::string, std::unique_ptr<Component>> instances;
Model internal_model;
friend Composite;
void build() {
for (auto& c : internal_model.components) {
instances.emplace(c.first, std::unique_ptr<Component>(c.second._constructor()));
std::stringstream ss;
ss << get_name() << ((get_name() != "") ? "__" : "") << c.first;
instances.at(c.first).get()->set_name(ss.str());
}
for (auto& c : internal_model.composites) {
std::stringstream ss;
ss << get_name() << ((get_name() != "") ? "__" : "") << c.first;
auto it = instances.emplace(c.first, std::unique_ptr<Component>(c.second.second._constructor())).first;
auto& ref = dynamic_cast<Assembly&>(*(*it).second.get());
ref.set_name(ss.str());
ref.instantiate_from(c.second.first);
}
for (auto& i : instances) {
i.second->after_construct();
}
for (auto& o : internal_model.operations) {
o._connect(*this);
}
for (auto& i : instances) {
i.second->after_connect();
}
}
public:
Assembly() : internal_model(Model()) {}
explicit Assembly(Model& model, const std::string& name = "") : internal_model(model) {
set_name(name);
build();
}
void instantiate_from(Model model) {
internal_model = model;
instantiate();
}
void instantiate() {
instances.clear();
build();
}
std::string debug() const override {
std::stringstream ss;
ss << "Composite {\n";
print(ss);
ss << "}";
return ss.str();
}
std::size_t size() const { return instances.size(); }
template <class C>
bool derives_from(const Address& address) const {
auto ptr = dynamic_cast<C*>(&at(address));
return ptr != nullptr;
}
bool is_composite(const Address& address) const { return derives_from<Assembly>(address); }
template <class T = Component, class Key>
T& at(Key key) const {
std::string key_name = key_to_string(key);
try {
return dynamic_cast<T&>(*(instances.at(key_name).get()));
} catch (std::out_of_range) {
throw TinycompoException("<Assembly::at> Trying to access incorrect address. Address " + key_name +
" does not exist. Existing addresses are:\n" + TinycompoDebug::list(instances));
}
}
template <class T = Component>
T& at(const Address& address) const {
if (!address.is_composite()) {
return at<T>(address.first());
} else {
return at<Assembly>(address.first()).template at<T>(address.rest());
}
}
template <class T = Component>
T& at(const PortAddress& port_address) const {
auto& compo_ref = at(port_address.address);
return *compo_ref.get<T>(port_address.prop);
}
Model& get_model() { return internal_model; }
void print(std::ostream& os = std::cout) const {
for (auto& i : instances) {
os << i.first << ": " << i.second->debug() << std::endl;
}
}
template <class... Args>
void call(const PortAddress& port, Args... args) const {
at(port.address).set(port.prop, std::forward<Args>(args)...);
}
template <class Key, class... Args>
void call(const Key& key, const std::string& prop, Args... args) const {
at(key).set(prop, std::forward<Args>(args)...);
}
template <class Interface>
void provide(const std::string& prop_name, const Address& address) {
_ports[prop_name] =
std::unique_ptr<_AbstractPort>(static_cast<_AbstractPort*>(new _ProvidePort<Interface>(*this, address)));
}
template <class Interface>
void provide(const std::string prop_name, const PortAddress& address) {
_ports[prop_name] =
std::unique_ptr<_AbstractPort>(static_cast<_AbstractPort*>(new _ProvidePort<Interface>(*this, address)));
}
template <class T = Component>
InstanceSet<T> get_all_helper(const Address parent = Address()) {
InstanceSet<T> result;
auto all_addresses = internal_model.all_addresses();
for (auto&& address : all_addresses) {
auto ptr = dynamic_cast<T*>(&at<Component>(address));
if (ptr != nullptr) {
result.push_back(Address(parent, address), ptr);
}
}
return result;
}
template <class T = Component>
InstanceSet<T> get_all() {
return get_all_helper<T>(Address());
}
template <class T>
InstanceSet<T> get_all(std::set<Address> composites_and_components, const Address& point_of_view = Address()) {
InstanceSet<T> result;
for (auto&& compo : composites_and_components) {
if (is_composite(compo)) {
result.combine(get_all<T>(compo, point_of_view));
} else {
result.push_back(compo, &at<T>(compo));
}
}
return result;
}
template <class T>
InstanceSet<T> get_all(const Address& composite, const Address& point_of_view = Address("invalid")) {
Address pov = (point_of_view == Address("invalid")) ? composite : point_of_view;
if (composite == Address()) {
return get_all_helper<T>();
} else if (pov == Address()) {
return at<Assembly>(composite).get_all_helper<T>(composite);
} else {
return at<Assembly>(pov.first()).get_all<T>(composite.rest(), pov.rest());
}
}
};
/*
=============================================================================================================================
~*~ Composite ~*~
===========================================================================================================================*/
struct Composite : public Assembly {
static void contents(Model&) {} // useful for empty composites
};
template <class C, class... Args>
void instantiate_composite(C& c, Args&&... args) {
Model m;
C::contents(m, std::forward<Args>(args)...);
c.instantiate_from(m);
c.after_construct();
}
/*
=============================================================================================================================
~*~ Set class ~*~
===========================================================================================================================*/
template <class... Args>
struct Set {
static void _connect(Assembly& assembly, PortAddress component, Args... args) {
assembly.at(component.address).set(component.prop, std::forward<Args>(args)...);
}
};
/*
=============================================================================================================================
~*~ Use class ~*~
UseProvide is a "connector class", ie a functor that can be passed as template parameter to Assembly::connect. This
particular connector implements the "use/provide" connection, ie setting a port of one component (the user) to a pointer
to an interface of another (the provider). This class should be used as-is to declare assembly connections.
===========================================================================================================================*/
template <class Interface>
struct Use {
static void _connect(Assembly& assembly, PortAddress user, Address provider) {
auto& ref_user = assembly.at(user.address);
auto& ref_provider = assembly.template at<Interface>(provider);
ref_user.set(user.prop, &ref_provider);
}
};
/*
=============================================================================================================================
~*~ UseProvide class ~*~
===========================================================================================================================*/
template <class Interface>
struct UseProvide {
static void _connect(Assembly& assembly, PortAddress user, PortAddress provider) {
auto& ref_user = assembly.at(user.address);
auto& ref_provider = assembly.at(provider.address);
ref_user.set(user.prop, ref_provider.template get<Interface>(provider.prop));
}
};
/*
=============================================================================================================================
~*~ Array class ~*~
===========================================================================================================================*/
template <class T>
struct Array : public Composite {
using Composite::Composite;
template <class... Args>
static void contents(Model& model, int nb_elems, Args&&... args) {
for (int i = 0; i < nb_elems; i++) {
model.component<T>(i, std::forward<Args>(args)...);
}
}
};
/*
=============================================================================================================================
~*~ ArraySet class ~*~
===========================================================================================================================*/
template <class Data>
struct ArraySet {
static void _connect(Assembly& assembly, PortAddress array, const std::vector<Data>& data) {
auto& arrayRef = assembly.template at<Assembly>(array.address);
for (int i = 0; i < static_cast<int>(arrayRef.size()); i++) {
arrayRef.at(i).set(array.prop, data.at(i));
}
}
};
/*
=============================================================================================================================
~*~ ArrayOneToOne class ~*~
This is a connector that takes two arrays with identical sizes and connects (as if using the UseProvide connector) every
i-th element in array1 to its corresponding element in array2 (ie, the i-th element in array2). This class should be used as
a template parameter for Assembly::connect.
===========================================================================================================================*/
template <class Interface>
struct ArrayOneToOne {
static void _connect(Assembly& a, PortAddress array1, Address array2) {
auto& ref1 = a.at<Assembly>(array1.address);
auto& ref2 = a.at<Assembly>(array2);
if (ref1.size() == ref2.size()) {
for (int i = 0; i < static_cast<int>(ref1.size()); i++) {
auto ptr = dynamic_cast<Interface*>(&ref2.at(i));
ref1.at(i).set(array1.prop, ptr);
}
} else {
throw TinycompoException{"Array connection: mismatched sizes. " + array1.address.to_string() + " has size " +
std::to_string(ref1.size()) + " while " + array2.to_string() + " has size " +
std::to_string(ref2.size()) + '.'};
}
}
};
/*
=============================================================================================================================
~*~ MultiUse class ~*~
The MultiUse class is a connector that connects (as if using the Use connector) one port of one component to every component
in an array. This can be seen as a "multiple use" connector (the reducer is the user in multiple use/provide connections).
This class should be used as a template parameter for Assembly::connect.
===========================================================================================================================*/
template <class Interface>
struct MultiUse {
static void _connect(Assembly& a, PortAddress reducer, Address array) {
auto& ref1 = a.at<Component>(reducer.address);
auto& ref2 = a.at<Assembly>(array);
for (int i = 0; i < static_cast<int>(ref2.size()); i++) {
auto ptr = dynamic_cast<Interface*>(&ref2.at(i));
ref1.set(reducer.prop, ptr);
}
}
};
/*
=============================================================================================================================
~*~ MultiProvide class ~*~
===========================================================================================================================*/
template <class Interface>
struct MultiProvide {
static void _connect(Assembly& a, PortAddress array, Address mapper) {
try {
for (int i = 0; i < static_cast<int>(a.at<Assembly>(array.address).size()); i++) {
a.at(Address(array.address, i)).set(array.prop, &a.at<Interface>(mapper));
}
} catch (...) {
throw TinycompoException("<MultiProvide::_connect> There was an error while trying to connect components.");
}
}
};
/*
=============================================================================================================================
~*~ DriverConnect class ~*~
===========================================================================================================================*/
template <class... Addresses>
struct DriverConnect {
static void ref_gathering_helper(Assembly&, std::vector<Component*>&) {}
template <class Head, class... Tail>
static void ref_gathering_helper(Assembly& a, std::vector<Component*>& result, Head head, Tail... tail) {
result.push_back(&a.at(Address(head)));
ref_gathering_helper(a, result, tail...);
}
template <class... Tail>
static void ref_gathering_helper(Assembly& a, std::vector<Component*>& result, PortAddress head, Tail... tail) {
auto provided_port = a.at(head.address).get(head.prop);
result.push_back(provided_port);
ref_gathering_helper(a, result, tail...);
}
static void _connect(Assembly& a, Address driver, Addresses... addresses) {
std::vector<Component*> result;
ref_gathering_helper(a, result, addresses...);
a.at<_AbstractDriver>(driver).set_refs(result);
}
};
/*
=============================================================================================================================
~*~ Out-of-order implementations ~*~
===========================================================================================================================*/
template <class Target, class Lambda>
inline _Operation::_Operation(Address address, _Type<Target>, Lambda lambda)
: _connect([lambda, address](Assembly& a) { lambda(a.at<Target>(address)); }), type("lambda") {}
// Address method that depends on ComponentReference
inline Address::Address(const ComponentReference& ref) { keys = ref.component_address.keys; }
// ComponentReference methods that depend on Model
template <class T, class... Args>
inline ComponentReference& ComponentReference::connect(const std::string& port, Args&&... args) {
model_ref.connect<T>(PortAddress(port, component_address), std::forward<Args>(args)...);
return *this;
}
template <class... Args>
ComponentReference& ComponentReference::connect(Args&&... args) {
model_ref.connect<DriverConnect<Args...>>(component_address, std::forward<Args>(args)...);
return *this;
}
template <class Lambda>
inline ComponentReference& ComponentReference::configure(Lambda lambda) {
model_ref.configure(component_address, lambda);
return *this;
}
template <class... Args>
inline ComponentReference& ComponentReference::set(const std::string& port, Args&&... args) {
model_ref.connect<Set<Args...>>(PortAddress(port, component_address), std::forward<Args>(args)...);
return *this;
}
// implementation of _ProvidePort methods that depended on Assembly interface
template <class Interface>
inline _ProvidePort<Interface>::_ProvidePort(Assembly& assembly, Address address)
: _get([&assembly, address]() { return &assembly.at<Interface>(address); }) {}
template <class Interface>
inline _ProvidePort<Interface>::_ProvidePort(Assembly& assembly, PortAddress port)
: _get([&assembly, port]() { return assembly.at<Component>(port.address).get<Interface>(port.prop); }) {}
} // namespace tc
#endif // TINYCOMPO_HPP
|
// RUN: %clang_cc1 -fsycl -fsycl-is-device -triple spir64-unknown-unknown-sycldevice -disable-llvm-passes -emit-llvm %s -o - | FileCheck %s
template <typename T>
T bar(T arg);
void foo() {
int a = 1 + 1 + bar(1);
}
template <typename T>
T bar(T arg) {
return arg;
}
template <typename name, typename Func>
__attribute__((sycl_kernel)) void kernel_single_task(const Func &kernelFunc) {
kernelFunc();
}
int main() {
kernel_single_task<class fake_kernel>([]() { foo(); });
return 0;
}
// CHECK: define {{.*}}spir_kernel void @_ZTSZ4mainE11fake_kernel()
// CHECK: define internal spir_func void @"_ZZ4mainENK3$_0clEv"(%"class.{{.*}}.anon" addrspace(4)* {{[^,]*}} %this)
// CHECK: define {{.*}}spir_func void @_Z3foov()
// CHECK: define linkonce_odr spir_func i32 @_Z3barIiET_S0_(i32 %arg)
|
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "weblayer/browser/safe_browsing/safe_browsing_metrics_collector_factory.h"
#include "base/no_destructor.h"
#include "components/keyed_service/content/browser_context_dependency_manager.h"
#include "components/safe_browsing/core/browser/safe_browsing_metrics_collector.h"
#include "weblayer/browser/browser_context_impl.h"
namespace weblayer {
// static
safe_browsing::SafeBrowsingMetricsCollector*
SafeBrowsingMetricsCollectorFactory::GetForBrowserContext(
content::BrowserContext* browser_context) {
return static_cast<safe_browsing::SafeBrowsingMetricsCollector*>(
GetInstance()->GetServiceForBrowserContext(browser_context,
/* create= */ true));
}
// static
SafeBrowsingMetricsCollectorFactory*
SafeBrowsingMetricsCollectorFactory::GetInstance() {
static base::NoDestructor<SafeBrowsingMetricsCollectorFactory> factory;
return factory.get();
}
// static
SafeBrowsingMetricsCollectorFactory::SafeBrowsingMetricsCollectorFactory()
: BrowserContextKeyedServiceFactory(
"SafeBrowsingMetricsCollector",
BrowserContextDependencyManager::GetInstance()) {}
KeyedService* SafeBrowsingMetricsCollectorFactory::BuildServiceInstanceFor(
content::BrowserContext* context) const {
BrowserContextImpl* context_impl = static_cast<BrowserContextImpl*>(context);
return new safe_browsing::SafeBrowsingMetricsCollector(
context_impl->pref_service());
}
} // namespace weblayer
|
#pragma once
#include "GameObject.hpp"
#include "CollisionSystem.hpp"
#include "Scene.hpp"
using namespace std;
class PlayerObject : public GameObject {
public :
PlayerObject();
PlayerObject(float mass, const glm::vec3& pos = glm::vec3(0.f),
float w = 1.f, float h = 1.f,
const glm::vec3& vel = glm::vec3(0.f),
bool isFixed = false,
const std::string& filename = "", float l = 100.f);
~PlayerObject();
std::shared_ptr<PlayerCollisionBox> box;
//input tracking:
struct Button {
uint8_t downs = 0;
bool pressed = 0;
} left, right, space;
int direction = 1;
bool canJump = true;
float getSpeed() {return speed;}
float getJumpPower() {return jump_power;}
virtual void update(float elapsed) override;
virtual void reset() override;
virtual void createVerts() override;
private:
float width, height;
float speed = 40.f;
float jump_power = 4000.0f;
};
|
#include "ParseTable.h"
#include <algorithm>
#include <cassert>
#include "SDTScheme.h"
void ParseTable::Initialize()
{
ComputeFirst();
ComputeFollow();
ComputeParseTable();
}
//////////////////////////////////////////////////////////////////////////
void ParseTable::ComputeFirst()
{
for(set<string>::iterator itr = m_grammar->NonTerminals.begin();
itr != m_grammar->NonTerminals.end();
itr++)
{
const string &str = itr.operator*();
ComputeFirstAux0(str);
}
}
//////////////////////////////////////////////////////////////////////////
void ParseTable::ComputeFirstAux0( const string& p_symbol )
{
// Base case: first is already computed
if(!m_first[p_symbol].empty())
{
return;
}
// Base case: symbol is a terminal, so FIRST(symbol) = {symbol}
else if(m_grammar->Terminals.find(p_symbol) != m_grammar->Terminals.end())
{
m_first[p_symbol].insert(p_symbol);
}
// Inductive case: first not computed for symbol, that is FIRST(symbol) = {}
else
{
for(vector< ProductionBody >::iterator itr = m_grammar->Productions[p_symbol].begin();
itr != m_grammar->Productions[p_symbol].end();
itr++)
{
ComputeFirstAux1(p_symbol, itr->Production);
}
}
}
//////////////////////////////////////////////////////////////////////////
void ParseTable::ComputeFirstAux1(const string& p_productionHead, ProductionType& p_productionBody )
{
for(ProductionBody::ProductionItr itr = p_productionBody.begin();
itr != p_productionBody.end();
itr++)
{
if(*itr == SemanticActionMarker)
continue;
ComputeFirstAux0(*itr);
// no epsilon productions
if(m_first[*itr].find(Epsilon) == m_first[*itr].end())
{
// add all production body symbols
m_first[p_productionHead].insert(m_first[*itr].begin(), m_first[*itr].end());
return;
}
else
{
// add all production body symbols except epsilon
for(set<string>::iterator copyItr = m_first[*itr].begin();
copyItr != m_first[*itr].end();
copyItr++)
{
if(*copyItr == Epsilon || *copyItr == SemanticActionMarker)
continue;
m_first[p_productionHead].insert(*copyItr);
}
}
}
// If all FIRST(production body symbols) contains epsilon then add epsilon to FIRST(production head symbol)
m_first[p_productionHead].insert(Epsilon);
}
//////////////////////////////////////////////////////////////////////////
void ParseTable::GetFirst(ProductionBody::ProductionItr p_symbolsBegin, ProductionBody::ProductionItr p_symbolsEnd, set<string>& p_first)
{
for(ProductionBody::ProductionItr itr = p_symbolsBegin;
itr != p_symbolsEnd;
itr++)
{
if(*itr == SemanticActionMarker)
continue;
ComputeFirstAux0(*itr);
// no epsilon productions
if(m_first[*itr].find(Epsilon) == m_first[*itr].end())
{
// add all production body symbols
p_first.insert(m_first[*itr].begin(), m_first[*itr].end());
return;
}
else
{
// add all production body symbols except epsilon
for(set<string>::iterator copyItr = m_first[*itr].begin();
copyItr != m_first[*itr].end();
copyItr++)
{
if(*copyItr == Epsilon || *copyItr == SemanticActionMarker)
continue;
p_first.insert(*copyItr);
}
}
}
// If all FIRST(production body symbols) contains epsilon then add epsilon to FIRST(production head symbol)
p_first.insert(Epsilon);
}
//////////////////////////////////////////////////////////////////////////
void ParseTable::ComputeFollow()
{
m_follow[m_grammar->Start].insert(RightEndMarker);
for(set<string>::iterator itr = m_grammar->NonTerminals.begin();
itr != m_grammar->NonTerminals.end();
itr++)
{
ComputeFollowAux0(*itr);
}
}
//////////////////////////////////////////////////////////////////////////
void ParseTable::ComputeFollowAux0( const string& p_symbol )
{
// FOLLOW(symbol) is already computed
if (!(m_follow[p_symbol].empty() ||
(p_symbol == m_grammar->Start && m_follow[p_symbol].size() == 1)))
return;
ProductionBody::ProductionItr where;
set<string>::iterator whereEpsilon;
set<string> first;
for(set<string>::iterator nonTerminalItr = m_grammar->NonTerminals.begin();
nonTerminalItr != m_grammar->NonTerminals.end();
nonTerminalItr++)
{
// ignores searching in production of the form A -> aAb where a and b can be null
if(*nonTerminalItr == p_symbol)
continue;
for(vector< ProductionBody >::iterator productionItr = m_grammar->Productions[*nonTerminalItr].begin();
productionItr != m_grammar->Productions[*nonTerminalItr].end();
productionItr++)
{
where = find(productionItr->Production.begin(), productionItr->Production.end(), p_symbol);
// production does not contain symbol
if(where == productionItr->Production.end())
continue;
int idx = where - productionItr->Production.begin();
// A -> aB OR
// A -> aBsssss... where s is a semantic action
// FOLLOW(B) = FOLLOW(A)
if(idx == productionItr->Production.size() - 1 ||
count(where + 1, productionItr->Production.end(), SemanticActionMarker) == productionItr->Production.size() - idx - 1)
{
ComputeFollowAux0(*nonTerminalItr);
m_follow[p_symbol].insert(m_follow[*nonTerminalItr].begin(), m_follow[*nonTerminalItr].end());
}
// A -> aBb
// A -> aBsb
else
{
// FIXME: it is assumed that the grammar will not contain production as A -> aAbA
// Find first non-semantic action symbol
for(where = where + 1;
where != productionItr->Production.end();
where++)
{
if(*where != SemanticActionMarker && *where != p_symbol)
{
break;
}
}
assert(where != productionItr->Production.end());
first.clear();
GetFirst(where, productionItr->Production.end(), first);
// A -> aBb and FIRST(b) contains e
// FOLLOW(B) = FIRST(b) - {e} + FOLLOW(A)
whereEpsilon = first.find(Epsilon);
if(whereEpsilon != first.end())
{
first.erase(whereEpsilon);
m_follow[p_symbol].insert(first.begin(), first.end());
ComputeFollowAux0(*nonTerminalItr);
m_follow[p_symbol].insert(m_follow[*nonTerminalItr].begin(), m_follow[*nonTerminalItr].end());
}
// A -> aBb and FIRST(b) does not contain e
// FOLLOW(B) = FIRST(b)
else
{
m_follow[p_symbol].insert(first.begin(), first.end());
}
}
}
}
}
//////////////////////////////////////////////////////////////////////////
void ParseTable::ComputeParseTable()
{
set<string> first;
for(set<string>::iterator nonTerminalItr = m_grammar->NonTerminals.begin();
nonTerminalItr != m_grammar->NonTerminals.end();
nonTerminalItr++)
{
// foreach production A -> b
for(vector< ProductionBody >::iterator productionItr = m_grammar->Productions[*nonTerminalItr].begin();
productionItr != m_grammar->Productions[*nonTerminalItr].end();
productionItr++)
{
first.clear();
GetFirst(productionItr->Production.begin(), productionItr->Production.end(), first);
// [1] foreach terminal a in FIRST(b), add A -> b to M[A, a]
for(set<string>::iterator terminalItr = first.begin();
terminalItr != first.end();
terminalItr++)
{
if(*terminalItr == Epsilon)
continue;
// if the entry M[A, a] is not empty then this is not LL(1) grammar
assert(m_parseTable[*nonTerminalItr].Row.find(*terminalItr) == m_parseTable[*nonTerminalItr].Row.end());
m_parseTable[*nonTerminalItr].Row[*terminalItr] = productionItr;
}
// if e is in FIRST(b)
if(first.find(Epsilon) != first.end())
{
// [1] foreach terminal c in FOLLOW(A), add A -> b to M[A, c]
for(set<string>::iterator terminalItr = m_follow[*nonTerminalItr].begin();
terminalItr != m_follow[*nonTerminalItr].end();
terminalItr++)
{
// if the entry M[A, a] is not empty then this is not LL(1) grammar
assert(m_parseTable[*nonTerminalItr].Row.find(*terminalItr) == m_parseTable[*nonTerminalItr].Row.end());
m_parseTable[*nonTerminalItr].Row[*terminalItr] = productionItr;
}
}
}
ComputeSynchSet(*nonTerminalItr);
}
}
//////////////////////////////////////////////////////////////////////////
bool ParseTable::GetProductionAt(const string& p_nonTerminal, const string& p_terminal, ProductionType& p_production, int& p_relativeIdx)
{
// it is illogical to miss a non-terminal in the parse table, this means an unsuccessful parse table generation
assert(m_parseTable.find(p_nonTerminal) != m_parseTable.end());
ParseTableRow::Iterator where = m_parseTable[p_nonTerminal].Row.find(p_terminal);
if(where == m_parseTable[p_nonTerminal].Row.end())
{
return false;
}
else
{
vector<ProductionBody>::iterator itr = where->second;
p_production.reserve(itr->Production.size());
p_production.insert(p_production.begin(), itr->Production.begin(), itr->Production.end());
p_relativeIdx = itr - m_grammar->Productions[p_nonTerminal].begin();
return true;
}
}
//////////////////////////////////////////////////////////////////////////
void ParseTable::ComputeSynchSet(const string& p_nonTerminal)
{
for(set<string>::iterator itr = m_follow[p_nonTerminal].begin();
itr != m_follow[p_nonTerminal].end();
itr++)
{
m_synch[p_nonTerminal].insert(*itr);
}
}
//////////////////////////////////////////////////////////////////////////
ParseTable::~ParseTable()
{
}
|
#include <iostream>
using namespace std;
int main()
{
int n;
cin >> n;
if (n % 7 == 0 && n % 11 != 0)
{
cout << "YES" << endl;
}
else
{
cout << "NO" << endl;
}
}
|
#include "DialogUI.h"
#include "GameWorld.h"
slava::DialogUI::DialogUI(sf::Font& font) {
question.setFont(font);
question.setCharacterSize(24);
question.setColor(sf::Color::Red);
sQuestion.setFillColor(sf::Color(0, 0, 0, 180));
sQuestion.setSize(sf::Vector2f(400, 70));
for (int i = 0; i < 4; ++i) {
answers[i].setFont(font);
answers[i].setCharacterSize(24);
answers[i].setColor(sf::Color::Red);
sAnswers[i].setFillColor(sf::Color(0, 0, 0, 180));
sAnswers[i].setSize(sf::Vector2f(400, 60));
}
}
void slava::DialogUI::control(GameWorld* world) {
if (!cpySet) {
cpy = world->getCurrentDialog();
cpySet = true;
}
sf::RenderWindow* win = world->getWindow();
auto coords = sf::Mouse::getPosition(*win);
auto worldCoords = win->mapPixelToCoords(coords);
world->getMainCharacter()->stopMovement();
world->pause();
int xM = worldCoords.x;
int yM = worldCoords.y;
if (sf::Mouse::isButtonPressed(sf::Mouse::Left)) {
for (int i = 0; i < 4; ++i) {
if (contains(sAnswers[i], xM, yM)) {
if (cpy->action[i] != NULL) {
cpy->action[i](world);
}
if (cpy->next[i] != NULL) {
cpy = cpy->next[i];
}
else {
cpySet = false;
this->active = false;
world->unpause();
}
}
}
}
for (int i = 0; i < 4; ++i) {
if (contains(sAnswers[i], xM, yM)) {
sAnswers[i].setFillColor(sf::Color(0, 0, 0, 120));
}
else {
sAnswers[i].setFillColor(sf::Color(0, 0, 0, 180));
}
}
}
void slava::DialogUI::draw(sf::RenderWindow& win) {
int xC = win.getView().getCenter().x;
int yC = win.getView().getCenter().y;
if (cpy->question.length() > 40) {
question.setCharacterSize(15);
}
else {
question.setCharacterSize(24);
}
question.setString(" " + cpy->question);
for (int i = 0; i < 4; ++i) {
if (cpy->answers[i] != "") {
answers[i].setString(" " + cpy->answers[i]);
}
else {
answers[i].setString(" ...");
}
answers[i].setPosition(xC - 200, yC + 60 * i);
sAnswers[i].setPosition(xC - 200, yC + 60 * i - 10);
win.draw(sAnswers[i]);
win.draw(answers[i]);
}
question.setPosition(xC - 200, yC - 60);
sQuestion.setPosition(xC - 200, yC - 80);
win.draw(sQuestion);
win.draw(question);
}
|
// MegaHash v1.0
// Copyright (c) 2019 Joseph Huckaby
// Based on DeepHash, (c) 2003 Joseph Huckaby
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include "MegaHash.h"
Response Hash::store(unsigned char *key, BH_KLEN_T keyLength, unsigned char *content, BH_LEN_T contentLength, unsigned char flags) {
// store key/value pair in hash
unsigned char digest[BH_DIGEST_SIZE];
Response resp;
// first digest key
digestKey(key, keyLength, digest);
// combine key and content together, with length prefixes, into single blob
// this reduces malloc bashing and memory frag
BH_LEN_T payloadSize = BH_KLEN_SIZE + keyLength + BH_LEN_SIZE + contentLength;
BH_LEN_T offset = 0;
unsigned char *payload = (unsigned char *)malloc(payloadSize);
memcpy( (void *)&payload[offset], (void *)&keyLength, BH_KLEN_SIZE ); offset += BH_KLEN_SIZE;
memcpy( (void *)&payload[offset], (void *)key, keyLength ); offset += keyLength;
memcpy( (void *)&payload[offset], (void *)&contentLength, BH_LEN_SIZE ); offset += BH_LEN_SIZE;
memcpy( (void *)&payload[offset], (void *)content, contentLength ); offset += contentLength;
unsigned char digestIndex = 0;
unsigned char ch;
unsigned char bucketIndex = 0;
Tag *tag = (Tag *)index;
Index *level, *newLevel;
Bucket *bucket, *newBucket, *lastBucket;
while (tag && (tag->type == BH_SIG_INDEX)) {
level = (Index *)tag;
ch = digest[digestIndex];
tag = level->data[ch];
if (!tag) {
// create new bucket list here
bucket = new Bucket();
bucket->data = payload;
bucket->flags = flags;
level->data[ch] = (Tag *)bucket;
resp.result = BH_ADD;
stats->dataSize += keyLength + contentLength;
stats->metaSize += sizeof(Bucket) + BH_KLEN_SIZE + BH_LEN_SIZE;
stats->numKeys++;
tag = NULL; // break
}
else if (tag->type == BH_SIG_BUCKET) {
// found bucket list, append
bucket = (Bucket *)tag;
lastBucket = NULL;
while (bucket) {
if (bucketKeyEquals(bucket->data, key, keyLength)) {
// replace
newBucket = new Bucket();
newBucket->data = payload;
newBucket->flags = flags;
if (lastBucket) lastBucket->next = newBucket;
else level->data[ch] = (Tag *)newBucket;
resp.result = BH_REPLACE;
stats->dataSize -= (bucketGetKeyLength(bucket->data) + bucketGetContentLength(bucket->data));
stats->dataSize += keyLength + contentLength;
free(bucket->data);
delete bucket;
bucket = NULL; // break
}
else if (!bucket->next) {
// append here
newBucket = new Bucket();
newBucket->data = payload;
newBucket->flags = flags;
bucket->next = newBucket;
resp.result = BH_ADD;
stats->dataSize += keyLength + contentLength;
stats->metaSize += sizeof(Bucket) + BH_KLEN_SIZE + BH_LEN_SIZE;
stats->numKeys++;
bucket = NULL; // break
// possibly reindex here
if ((bucketIndex >= maxBuckets + (ch % reindexScatter)) && (digestIndex < BH_DIGEST_SIZE - 1)) {
// deeper we go
digestIndex++;
newLevel = new Index();
stats->indexSize += sizeof(Index);
bucket = (Bucket *)tag;
level->data[ch] = (Tag *)newLevel;
while (bucket) {
lastBucket = bucket;
bucket = bucket->next;
reindexBucket(lastBucket, newLevel, digestIndex);
}
} // reindex
}
else {
lastBucket = bucket;
bucket = bucket->next;
bucketIndex++;
}
} // while bucket
tag = NULL; // break
}
else {
digestIndex++;
}
} // while tag
return resp;
}
void Hash::reindexBucket(Bucket *bucket, Index *index, unsigned char digestIndex) {
// reindex existing bucket into new subindex level
unsigned char digest[BH_DIGEST_SIZE];
BH_KLEN_T keyLength = bucketGetKeyLength(bucket->data);
unsigned char *key = bucket->data + BH_KLEN_SIZE;
digestKey(key, keyLength, digest);
unsigned char ch = digest[digestIndex];
Tag *tag = index->data[ch];
if (!tag) {
// create new bucket list here
index->data[ch] = (Tag *)bucket;
bucket->next = NULL;
}
else {
// traverse list, append to end
Bucket *current = (Bucket *)tag;
while (current->next) {
current = current->next;
}
current->next = bucket;
bucket->next = NULL;
}
}
Response Hash::fetch(unsigned char *key, BH_KLEN_T keyLength) {
// fetch value given key
unsigned char digest[BH_DIGEST_SIZE];
Response resp;
// first digest key
digestKey(key, keyLength, digest);
unsigned char digestIndex = 0;
unsigned char ch;
Tag *tag = (Tag *)index;
Index *level;
Bucket *bucket;
while (tag && (tag->type == BH_SIG_INDEX)) {
level = (Index *)tag;
ch = digest[digestIndex];
tag = level->data[ch];
if (!tag) {
// not found
resp.result = BH_ERR;
tag = NULL; // break
}
else if (tag->type == BH_SIG_BUCKET) {
// found bucket list, append
bucket = (Bucket *)tag;
while (bucket) {
if (bucketKeyEquals(bucket->data, key, keyLength)) {
// found!
resp.result = BH_OK;
resp.content = bucketGetContent(bucket->data);
resp.contentLength = bucketGetContentLength(bucket->data);
resp.flags = bucket->flags;
bucket = NULL; // break
}
else if (!bucket->next) {
// not found
resp.result = BH_ERR;
bucket = NULL; // break
}
else {
bucket = bucket->next;
}
} // while bucket
tag = NULL; // break
}
else {
digestIndex++;
}
} // while tag
return resp;
}
Response Hash::remove(unsigned char *key, BH_KLEN_T keyLength) {
// remove bucket given key
unsigned char digest[BH_DIGEST_SIZE];
Response resp;
// first digest key
digestKey(key, keyLength, digest);
unsigned char digestIndex = 0;
unsigned char ch;
Tag *tag = (Tag *)index;
Index *level;
Bucket *bucket, *lastBucket;
while (tag && (tag->type == BH_SIG_INDEX)) {
level = (Index *)tag;
ch = digest[digestIndex];
tag = level->data[ch];
if (!tag) {
// not found
resp.result = BH_ERR;
tag = NULL; // break
}
else if (tag->type == BH_SIG_BUCKET) {
// found bucket list, traverse
bucket = (Bucket *)tag;
lastBucket = NULL;
while (bucket) {
if (bucketKeyEquals(bucket->data, key, keyLength)) {
// found!
stats->dataSize -= (bucketGetKeyLength(bucket->data) + bucketGetContentLength(bucket->data));
stats->metaSize -= (sizeof(Bucket) + BH_KLEN_SIZE + BH_LEN_SIZE);
stats->numKeys--;
if (lastBucket) lastBucket->next = bucket->next;
else level->data[ch] = bucket->next;
resp.result = BH_OK;
free(bucket->data);
delete bucket;
bucket = NULL; // break
}
else if (!bucket->next) {
// not found
resp.result = BH_ERR;
bucket = NULL; // break
}
else {
lastBucket = bucket;
bucket = bucket->next;
}
} // while bucket
tag = NULL; // break
}
else {
digestIndex++;
}
} // while tag
return resp;
}
void Hash::clear() {
// clear ALL keys/values
for (int idx = 0; idx < BH_INDEX_SIZE; idx++) {
if (index->data[idx]) {
clearTag( index->data[idx] );
index->data[idx] = NULL;
}
}
}
void Hash::clear(unsigned char idx) {
// clear one "slice" from main index (about 1/16 of total keys)
// this is so you can split up the job into pieces and not stall the event loop for too long
if (idx >= BH_INDEX_SIZE) return;
if (index->data[idx]) {
clearTag( index->data[idx] );
index->data[idx] = NULL;
}
}
void Hash::clearTag(Tag *tag) {
// internal method: clear one tag (index or bucket)
// traverse lists, recurse for nested indexes
if (tag->type == BH_SIG_INDEX) {
// traverse index
Index *level = (Index *)tag;
for (int idx = 0; idx < BH_INDEX_SIZE; idx++) {
if (level->data[idx]) {
clearTag( level->data[idx] );
level->data[idx] = NULL;
}
}
// kill index
delete level;
stats->indexSize -= sizeof(Index);
}
else if (tag->type == BH_SIG_BUCKET) {
// delete all buckets in list
Bucket *bucket = (Bucket *)tag;
Bucket *lastBucket;
while (bucket) {
lastBucket = bucket;
bucket = bucket->next;
stats->dataSize -= (bucketGetKeyLength(lastBucket->data) + bucketGetContentLength(lastBucket->data));
stats->metaSize -= sizeof(Bucket);
stats->numKeys--;
free(lastBucket->data);
delete lastBucket;
}
}
}
Response Hash::firstKey() {
// return first key found (in undefined order)
unsigned char returnNext = 1;
unsigned char digest[BH_DIGEST_SIZE];
Response resp;
for (int idx = 0; idx < BH_DIGEST_SIZE; idx++) {
digest[idx] = 0;
}
traverseTag( &resp, (Tag *)index, NULL, 0, digest, 0, &returnNext );
return resp;
}
Response Hash::nextKey(unsigned char *key, BH_KLEN_T keyLength) {
// return next key given previous key (in undefined order)
unsigned char returnNext = 0;
unsigned char digest[BH_DIGEST_SIZE];
Response resp;
// first digest key
digestKey(key, keyLength, digest);
traverseTag( &resp, (Tag *)index, key, keyLength, digest, 0, &returnNext );
return resp;
}
void Hash::traverseTag(Response *resp, Tag *tag, unsigned char *key, BH_KLEN_T keyLength, unsigned char *digest, unsigned char digestIndex, unsigned char *returnNext) {
// internal method
// traverse tag tree looking for key (or return next key found)
if (tag->type == BH_SIG_INDEX) {
// traverse index
Index *level = (Index *)tag;
for (int idx = digest[digestIndex]; idx < BH_INDEX_SIZE; idx++) {
if (level->data[idx]) {
traverseTag( resp, level->data[idx], key, keyLength, digest, digestIndex + 1, returnNext );
if (resp->result == BH_OK) idx = BH_INDEX_SIZE;
}
}
}
else if (tag->type == BH_SIG_BUCKET) {
// traverse bucket list
Bucket *bucket = (Bucket *)tag;
while (bucket) {
if (returnNext[0]) {
// return whatever key we landed on (repurpose the response content for this)
resp->result = BH_OK;
resp->content = bucketGetKey(bucket->data);
resp->contentLength = bucketGetKeyLength(bucket->data);
bucket = NULL; // break;
}
else if (bucketKeyEquals(bucket->data, key, keyLength)) {
// found target key, return next one
returnNext[0] = 1;
// clear all digest bits so next index ierations begin at zero
((uint64_t *)digest)[0] = 0;
}
if (bucket) bucket = bucket->next;
}
}
}
|
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_X87
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/isolate-inl.h"
#include "src/runtime/runtime.h"
#include "src/serialize.h"
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// MacroAssembler implementation.
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false) {
if (isolate() != NULL) {
// TODO(titzer): should we just use a null handle here instead?
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate());
}
}
void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
DCHECK(!r.IsDouble());
if (r.IsInteger8()) {
movsx_b(dst, src);
} else if (r.IsUInteger8()) {
movzx_b(dst, src);
} else if (r.IsInteger16()) {
movsx_w(dst, src);
} else if (r.IsUInteger16()) {
movzx_w(dst, src);
} else {
mov(dst, src);
}
}
void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
DCHECK(!r.IsDouble());
if (r.IsInteger8() || r.IsUInteger8()) {
mov_b(dst, src);
} else if (r.IsInteger16() || r.IsUInteger16()) {
mov_w(dst, src);
} else {
if (r.IsHeapObject()) {
AssertNotSmi(src);
} else if (r.IsSmi()) {
AssertSmi(src);
}
mov(dst, src);
}
}
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
mov(destination, value);
return;
}
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(destination, Immediate(index));
mov(destination, Operand::StaticArray(destination,
times_pointer_size,
roots_array_start));
}
void MacroAssembler::StoreRoot(Register source,
Register scratch,
Heap::RootListIndex index) {
DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(scratch, Immediate(index));
mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
source);
}
void MacroAssembler::CompareRoot(Register with,
Register scratch,
Heap::RootListIndex index) {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(scratch, Immediate(index));
cmp(with, Operand::StaticArray(scratch,
times_pointer_size,
roots_array_start));
}
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
cmp(with, value);
}
void MacroAssembler::CompareRoot(const Operand& with,
Heap::RootListIndex index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
cmp(with, value);
}
void MacroAssembler::InNewSpace(
Register object,
Register scratch,
Condition cc,
Label* condition_met,
Label::Distance condition_met_distance) {
DCHECK(cc == equal || cc == not_equal);
if (scratch.is(object)) {
and_(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
mov(scratch, Immediate(~Page::kPageAlignmentMask));
and_(scratch, object);
}
// Check that we can use a test_b.
DCHECK(MemoryChunk::IN_FROM_SPACE < 8);
DCHECK(MemoryChunk::IN_TO_SPACE < 8);
int mask = (1 << MemoryChunk::IN_FROM_SPACE)
| (1 << MemoryChunk::IN_TO_SPACE);
// If non-zero, the page belongs to new-space.
test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
static_cast<uint8_t>(mask));
j(cc, condition_met, condition_met_distance);
}
void MacroAssembler::RememberedSetHelper(
Register object, // Only used for debug checks.
Register addr, Register scratch, SaveFPRegsMode save_fp,
MacroAssembler::RememberedSetFinalAction and_then) {
Label done;
if (emit_debug_code()) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
int3();
bind(&ok);
}
// Load store buffer top.
ExternalReference store_buffer =
ExternalReference::store_buffer_top(isolate());
mov(scratch, Operand::StaticVariable(store_buffer));
// Store pointer to buffer.
mov(Operand(scratch, 0), addr);
// Increment buffer top.
add(scratch, Immediate(kPointerSize));
// Write back new top of buffer.
mov(Operand::StaticVariable(store_buffer), scratch);
// Call stub on end of buffer.
// Check for end of buffer.
test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
if (and_then == kReturnAtEnd) {
Label buffer_overflowed;
j(not_equal, &buffer_overflowed, Label::kNear);
ret(0);
bind(&buffer_overflowed);
} else {
DCHECK(and_then == kFallThroughAtEnd);
j(equal, &done, Label::kNear);
}
StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
CallStub(&store_buffer_overflow);
if (and_then == kReturnAtEnd) {
ret(0);
} else {
DCHECK(and_then == kFallThroughAtEnd);
bind(&done);
}
}
void MacroAssembler::ClampTOSToUint8(Register result_reg) {
Label done, conv_failure;
sub(esp, Immediate(kPointerSize));
fnclex();
fist_s(Operand(esp, 0));
pop(result_reg);
X87CheckIA();
j(equal, &conv_failure, Label::kNear);
test(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
setcc(sign, result_reg);
sub(result_reg, Immediate(1));
and_(result_reg, Immediate(255));
jmp(&done, Label::kNear);
bind(&conv_failure);
fnclex();
fldz();
fld(1);
FCmp();
setcc(below, result_reg); // 1 if negative, 0 if positive.
dec_b(result_reg); // 0 if negative, 255 if positive.
bind(&done);
}
void MacroAssembler::ClampUint8(Register reg) {
Label done;
test(reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
setcc(negative, reg); // 1 if negative, 0 if positive.
dec_b(reg); // 0 if negative, 255 if positive.
bind(&done);
}
void MacroAssembler::SlowTruncateToI(Register result_reg,
Register input_reg,
int offset) {
DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
call(stub.GetCode(), RelocInfo::CODE_TARGET);
}
void MacroAssembler::TruncateX87TOSToI(Register result_reg) {
sub(esp, Immediate(kDoubleSize));
fst_d(MemOperand(esp, 0));
SlowTruncateToI(result_reg, esp, 0);
add(esp, Immediate(kDoubleSize));
}
void MacroAssembler::X87TOSToI(Register result_reg,
MinusZeroMode minus_zero_mode,
Label* lost_precision, Label* is_nan,
Label* minus_zero, Label::Distance dst) {
Label done;
sub(esp, Immediate(kPointerSize));
fld(0);
fist_s(MemOperand(esp, 0));
fild_s(MemOperand(esp, 0));
pop(result_reg);
FCmp();
j(not_equal, lost_precision, dst);
j(parity_even, is_nan, dst);
if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
test(result_reg, Operand(result_reg));
j(not_zero, &done, Label::kNear);
// To check for minus zero, we load the value again as float, and check
// if that is still 0.
sub(esp, Immediate(kPointerSize));
fst_s(MemOperand(esp, 0));
pop(result_reg);
test(result_reg, Operand(result_reg));
j(not_zero, minus_zero, dst);
}
bind(&done);
}
void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
Register input_reg) {
Label done, slow_case;
SlowTruncateToI(result_reg, input_reg);
bind(&done);
}
void MacroAssembler::LoadUint32NoSSE2(const Operand& src) {
Label done;
push(src);
fild_s(Operand(esp, 0));
cmp(src, Immediate(0));
j(not_sign, &done, Label::kNear);
ExternalReference uint32_bias =
ExternalReference::address_of_uint32_bias();
fld_d(Operand::StaticVariable(uint32_bias));
faddp(1);
bind(&done);
add(esp, Immediate(kPointerSize));
}
void MacroAssembler::RecordWriteArray(
Register object, Register value, Register index, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action, SmiCheck smi_check,
PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
// Skip barrier if writing a smi.
if (smi_check == INLINE_SMI_CHECK) {
DCHECK_EQ(0, kSmiTag);
test(value, Immediate(kSmiTagMask));
j(zero, &done);
}
// Array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
// into an array of words.
Register dst = index;
lea(dst, Operand(object, index, times_half_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
RecordWrite(object, dst, value, save_fp, remembered_set_action,
OMIT_SMI_CHECK, pointers_to_here_check_for_value);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
mov(index, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
void MacroAssembler::RecordWriteField(
Register object, int offset, Register value, Register dst,
SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action,
SmiCheck smi_check, PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
// Skip barrier if writing a smi.
if (smi_check == INLINE_SMI_CHECK) {
JumpIfSmi(value, &done, Label::kNear);
}
// Although the object register is tagged, the offset is relative to the start
// of the object, so so offset must be a multiple of kPointerSize.
DCHECK(IsAligned(offset, kPointerSize));
lea(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
test_b(dst, (1 << kPointerSizeLog2) - 1);
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
}
RecordWrite(object, dst, value, save_fp, remembered_set_action,
OMIT_SMI_CHECK, pointers_to_here_check_for_value);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
void MacroAssembler::RecordWriteForMap(Register object, Handle<Map> map,
Register scratch1, Register scratch2,
SaveFPRegsMode save_fp) {
Label done;
Register address = scratch1;
Register value = scratch2;
if (emit_debug_code()) {
Label ok;
lea(address, FieldOperand(object, HeapObject::kMapOffset));
test_b(address, (1 << kPointerSizeLog2) - 1);
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
}
DCHECK(!object.is(value));
DCHECK(!object.is(address));
DCHECK(!value.is(address));
AssertNotSmi(object);
if (!FLAG_incremental_marking) {
return;
}
// Compute the address.
lea(address, FieldOperand(object, HeapObject::kMapOffset));
// A single check of the map's pages interesting flag suffices, since it is
// only set during incremental collection, and then it's also guaranteed that
// the from object's page's interesting flag is also set. This optimization
// relies on the fact that maps can never be in new space.
DCHECK(!isolate()->heap()->InNewSpace(*map));
CheckPageFlagForMap(map,
MemoryChunk::kPointersToHereAreInterestingMask,
zero,
&done,
Label::kNear);
RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET,
save_fp);
CallStub(&stub);
bind(&done);
// Count number of write barriers in generated code.
isolate()->counters()->write_barriers_static()->Increment();
IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
mov(scratch1, Immediate(bit_cast<int32_t>(kZapValue)));
mov(scratch2, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
void MacroAssembler::RecordWrite(
Register object, Register address, Register value, SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action, SmiCheck smi_check,
PointersToHereCheck pointers_to_here_check_for_value) {
DCHECK(!object.is(value));
DCHECK(!object.is(address));
DCHECK(!value.is(address));
AssertNotSmi(object);
if (remembered_set_action == OMIT_REMEMBERED_SET &&
!FLAG_incremental_marking) {
return;
}
if (emit_debug_code()) {
Label ok;
cmp(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
int3();
bind(&ok);
}
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
if (smi_check == INLINE_SMI_CHECK) {
// Skip barrier if writing a smi.
JumpIfSmi(value, &done, Label::kNear);
}
if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
CheckPageFlag(value,
value, // Used as scratch.
MemoryChunk::kPointersToHereAreInterestingMask,
zero,
&done,
Label::kNear);
}
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
zero,
&done,
Label::kNear);
RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
fp_mode);
CallStub(&stub);
bind(&done);
// Count number of write barriers in generated code.
isolate()->counters()->write_barriers_static()->Increment();
IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
void MacroAssembler::DebugBreak() {
Move(eax, Immediate(0));
mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(isolate(), 1);
call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
static const int kMaxImmediateBits = 17;
if (!RelocInfo::IsNone(x.rmode_)) return false;
return !is_intn(x.x_, kMaxImmediateBits);
}
void MacroAssembler::SafeMove(Register dst, const Immediate& x) {
if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
Move(dst, Immediate(x.x_ ^ jit_cookie()));
xor_(dst, jit_cookie());
} else {
Move(dst, x);
}
}
void MacroAssembler::SafePush(const Immediate& x) {
if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
push(Immediate(x.x_ ^ jit_cookie()));
xor_(Operand(esp, 0), Immediate(jit_cookie()));
} else {
push(x);
}
}
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {
mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
CmpInstanceType(map, type);
}
void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
static_cast<int8_t>(type));
}
void MacroAssembler::CheckFastElements(Register map,
Label* fail,
Label::Distance distance) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Map::kMaximumBitField2FastHoleyElementValue);
j(above, fail, distance);
}
void MacroAssembler::CheckFastObjectElements(Register map,
Label* fail,
Label::Distance distance) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Map::kMaximumBitField2FastHoleySmiElementValue);
j(below_equal, fail, distance);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Map::kMaximumBitField2FastHoleyElementValue);
j(above, fail, distance);
}
void MacroAssembler::CheckFastSmiElements(Register map,
Label* fail,
Label::Distance distance) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Map::kMaximumBitField2FastHoleySmiElementValue);
j(above, fail, distance);
}
void MacroAssembler::StoreNumberToDoubleElements(
Register maybe_number,
Register elements,
Register key,
Register scratch,
Label* fail,
int elements_offset) {
Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
CheckMap(maybe_number,
isolate()->factory()->heap_number_map(),
fail,
DONT_DO_SMI_CHECK);
// Double value, canonicalize NaN.
uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
cmp(FieldOperand(maybe_number, offset),
Immediate(kNaNOrInfinityLowerBoundUpper32));
j(greater_equal, &maybe_nan, Label::kNear);
bind(¬_nan);
ExternalReference canonical_nan_reference =
ExternalReference::address_of_canonical_non_hole_nan();
fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
fstp_d(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset));
jmp(&done);
bind(&maybe_nan);
// Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
// it's an Infinity, and the non-NaN code path applies.
j(greater, &is_nan, Label::kNear);
cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
j(zero, ¬_nan);
bind(&is_nan);
fld_d(Operand::StaticVariable(canonical_nan_reference));
jmp(&have_double_value, Label::kNear);
bind(&smi_value);
// Value is a smi. Convert to a double and store.
// Preserve original value.
mov(scratch, maybe_number);
SmiUntag(scratch);
push(scratch);
fild_s(Operand(esp, 0));
pop(scratch);
fstp_d(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset));
bind(&done);
}
void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
}
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
SmiCheckType smi_check_type) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
CompareMap(obj, map);
j(not_equal, fail);
}
void MacroAssembler::DispatchMap(Register obj,
Register unused,
Handle<Map> map,
Handle<Code> success,
SmiCheckType smi_check_type) {
Label fail;
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, &fail);
}
cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
j(equal, success);
bind(&fail);
}
Condition MacroAssembler::IsObjectStringType(Register heap_object,
Register map,
Register instance_type) {
mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
STATIC_ASSERT(kNotStringTag != 0);
test(instance_type, Immediate(kIsNotStringMask));
return zero;
}
Condition MacroAssembler::IsObjectNameType(Register heap_object,
Register map,
Register instance_type) {
mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
cmpb(instance_type, static_cast<uint8_t>(LAST_NAME_TYPE));
return below_equal;
}
void MacroAssembler::IsObjectJSObjectType(Register heap_object,
Register map,
Register scratch,
Label* fail) {
mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
IsInstanceJSObjectType(map, scratch, fail);
}
void MacroAssembler::IsInstanceJSObjectType(Register map,
Register scratch,
Label* fail) {
movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
cmp(scratch,
LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
j(above, fail);
}
void MacroAssembler::FCmp() {
fucompp();
push(eax);
fnstsw_ax();
sahf();
pop(eax);
}
void MacroAssembler::FXamMinusZero() {
fxam();
push(eax);
fnstsw_ax();
and_(eax, Immediate(0x4700));
// For minus zero, C3 == 1 && C1 == 1.
cmp(eax, Immediate(0x4200));
pop(eax);
fstp(0);
}
void MacroAssembler::FXamSign() {
fxam();
push(eax);
fnstsw_ax();
// For negative value (including -0.0), C1 == 1.
and_(eax, Immediate(0x0200));
pop(eax);
fstp(0);
}
void MacroAssembler::X87CheckIA() {
push(eax);
fnstsw_ax();
// For #IA, IE == 1 && SF == 0.
and_(eax, Immediate(0x0041));
cmp(eax, Immediate(0x0001));
pop(eax);
}
// rc=00B, round to nearest.
// rc=01B, round down.
// rc=10B, round up.
// rc=11B, round toward zero.
void MacroAssembler::X87SetRC(int rc) {
sub(esp, Immediate(kPointerSize));
fnstcw(MemOperand(esp, 0));
and_(MemOperand(esp, 0), Immediate(0xF3FF));
or_(MemOperand(esp, 0), Immediate(rc));
fldcw(MemOperand(esp, 0));
add(esp, Immediate(kPointerSize));
}
void MacroAssembler::AssertNumber(Register object) {
if (emit_debug_code()) {
Label ok;
JumpIfSmi(object, &ok);
cmp(FieldOperand(object, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
Check(equal, kOperandNotANumber);
bind(&ok);
}
}
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
Check(equal, kOperandIsNotASmi);
}
}
void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAString);
push(object);
mov(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, FIRST_NONSTRING_TYPE);
pop(object);
Check(below, kOperandIsNotAString);
}
}
void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAName);
push(object);
mov(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, LAST_NAME_TYPE);
pop(object);
Check(below_equal, kOperandIsNotAName);
}
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
if (emit_debug_code()) {
Label done_checking;
AssertNotSmi(object);
cmp(object, isolate()->factory()->undefined_value());
j(equal, &done_checking);
cmp(FieldOperand(object, 0),
Immediate(isolate()->factory()->allocation_site_map()));
Assert(equal, kExpectedUndefinedOrCell);
bind(&done_checking);
}
}
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmi);
}
}
void MacroAssembler::StubPrologue() {
push(ebp); // Caller's frame pointer.
mov(ebp, esp);
push(esi); // Callee's context.
push(Immediate(Smi::FromInt(StackFrame::STUB)));
}
void MacroAssembler::Prologue(bool code_pre_aging) {
PredictableCodeSizeScope predictible_code_size_scope(this,
kNoCodeAgeSequenceLength);
if (code_pre_aging) {
// Pre-age the code.
call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
RelocInfo::CODE_AGE_SEQUENCE);
Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
} else {
push(ebp); // Caller's frame pointer.
mov(ebp, esp);
push(esi); // Callee's context.
push(edi); // Callee's JS function.
}
}
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// Out-of-line constant pool not implemented on x87.
UNREACHABLE();
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
mov(ebp, esp);
push(esi);
push(Immediate(Smi::FromInt(type)));
push(Immediate(CodeObject()));
if (emit_debug_code()) {
cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
Check(not_equal, kCodeObjectNotProperlyPatched);
}
}
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
Immediate(Smi::FromInt(type)));
Check(equal, kStackFrameTypesMustMatch);
}
leave();
}
void MacroAssembler::EnterExitFramePrologue() {
// Set up the frame structure on the stack.
DCHECK(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
DCHECK(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
push(ebp);
mov(ebp, esp);
// Reserve room for entry stack pointer and push the code object.
DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // Saved entry sp, patched before call.
push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
// Save the frame pointer and the context in top.
ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
ExternalReference context_address(Isolate::kContextAddress, isolate());
ExternalReference c_function_address(Isolate::kCFunctionAddress, isolate());
mov(Operand::StaticVariable(c_entry_fp_address), ebp);
mov(Operand::StaticVariable(context_address), esi);
mov(Operand::StaticVariable(c_function_address), ebx);
}
void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
// Optionally save FPU state.
if (save_doubles) {
// Store FPU state to m108byte.
int space = 108 + argc * kPointerSize;
sub(esp, Immediate(space));
const int offset = -2 * kPointerSize; // entry fp + code object.
fnsave(MemOperand(ebp, offset - 108));
} else {
sub(esp, Immediate(argc * kPointerSize));
}
// Get the required frame alignment for the OS.
const int kFrameAlignment = base::OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
and_(esp, -kFrameAlignment);
}
// Patch the saved entry sp.
mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
}
void MacroAssembler::EnterExitFrame(bool save_doubles) {
EnterExitFramePrologue();
// Set up argc and argv in callee-saved registers.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
mov(edi, eax);
lea(esi, Operand(ebp, eax, times_4, offset));
// Reserve space for argc, argv and isolate.
EnterExitFrameEpilogue(3, save_doubles);
}
void MacroAssembler::EnterApiExitFrame(int argc) {
EnterExitFramePrologue();
EnterExitFrameEpilogue(argc, false);
}
void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Optionally restore FPU state.
if (save_doubles) {
const int offset = -2 * kPointerSize;
frstor(MemOperand(ebp, offset - 108));
}
// Get the return address from the stack and restore the frame pointer.
mov(ecx, Operand(ebp, 1 * kPointerSize));
mov(ebp, Operand(ebp, 0 * kPointerSize));
// Pop the arguments and the receiver from the caller stack.
lea(esp, Operand(esi, 1 * kPointerSize));
// Push the return address to get ready to return.
push(ecx);
LeaveExitFrameEpilogue(true);
}
void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Isolate::kContextAddress, isolate());
if (restore_context) {
mov(esi, Operand::StaticVariable(context_address));
}
#ifdef DEBUG
mov(Operand::StaticVariable(context_address), Immediate(0));
#endif
// Clear the top frame.
ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
isolate());
mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
}
void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
mov(esp, ebp);
pop(ebp);
LeaveExitFrameEpilogue(restore_context);
}
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
int handler_index) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// We will build up the handler from the bottom by pushing on the stack.
// First push the frame pointer and context.
if (kind == StackHandler::JS_ENTRY) {
// The frame pointer does not point to a JS frame so we save NULL for
// ebp. We expect the code throwing an exception to check ebp before
// dereferencing it to restore the context.
push(Immediate(0)); // NULL frame pointer.
push(Immediate(Smi::FromInt(0))); // No context.
} else {
push(ebp);
push(esi);
}
// Push the state and the code object.
unsigned state =
StackHandler::IndexField::encode(handler_index) |
StackHandler::KindField::encode(kind);
push(Immediate(state));
Push(CodeObject());
// Link the current handler as the next handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
push(Operand::StaticVariable(handler_address));
// Set this new handler as the current one.
mov(Operand::StaticVariable(handler_address), esp);
}
void MacroAssembler::PopTryHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
pop(Operand::StaticVariable(handler_address));
add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
void MacroAssembler::JumpToHandlerEntry() {
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
// eax = exception, edi = code object, edx = state.
mov(ebx, FieldOperand(edi, Code::kHandlerTableOffset));
shr(edx, StackHandler::kKindWidth);
mov(edx, FieldOperand(ebx, edx, times_4, FixedArray::kHeaderSize));
SmiUntag(edx);
lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
jmp(edi);
}
void MacroAssembler::Throw(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The exception is expected in eax.
if (!value.is(eax)) {
mov(eax, value);
}
// Drop the stack pointer to the top of the top handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
mov(esp, Operand::StaticVariable(handler_address));
// Restore the next handler.
pop(Operand::StaticVariable(handler_address));
// Remove the code object and state, compute the handler address in edi.
pop(edi); // Code object.
pop(edx); // Index and state.
// Restore the context and frame pointer.
pop(esi); // Context.
pop(ebp); // Frame pointer.
// If the handler is a JS frame, restore the context to the frame.
// (kind == ENTRY) == (ebp == 0) == (esi == 0), so we could test either
// ebp or esi.
Label skip;
test(esi, esi);
j(zero, &skip, Label::kNear);
mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
bind(&skip);
JumpToHandlerEntry();
}
void MacroAssembler::ThrowUncatchable(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The exception is expected in eax.
if (!value.is(eax)) {
mov(eax, value);
}
// Drop the stack pointer to the top of the top stack handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
mov(esp, Operand::StaticVariable(handler_address));
// Unwind the handlers until the top ENTRY handler is found.
Label fetch_next, check_kind;
jmp(&check_kind, Label::kNear);
bind(&fetch_next);
mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
bind(&check_kind);
STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
test(Operand(esp, StackHandlerConstants::kStateOffset),
Immediate(StackHandler::KindField::kMask));
j(not_zero, &fetch_next);
// Set the top handler address to next handler past the top ENTRY handler.
pop(Operand::StaticVariable(handler_address));
// Remove the code object and state, compute the handler address in edi.
pop(edi); // Code object.
pop(edx); // Index and state.
// Clear the context pointer and frame pointer (0 was saved in the handler).
pop(esi);
pop(ebp);
JumpToHandlerEntry();
}
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch1,
Register scratch2,
Label* miss) {
Label same_contexts;
DCHECK(!holder_reg.is(scratch1));
DCHECK(!holder_reg.is(scratch2));
DCHECK(!scratch1.is(scratch2));
// Load current lexical context from the stack frame.
mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
cmp(scratch1, Immediate(0));
Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
mov(scratch1, FieldOperand(scratch1, offset));
mov(scratch1, FieldOperand(scratch1, GlobalObject::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
// Read the first word and compare to native_context_map.
cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
isolate()->factory()->native_context_map());
Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
}
// Check if both contexts are the same.
cmp(scratch1, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
j(equal, &same_contexts);
// Compare security tokens, save holder_reg on the stack so we can use it
// as a temporary register.
//
// Check that the security token in the calling global object is
// compatible with the security token in the receiving global
// object.
mov(scratch2,
FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
cmp(scratch2, isolate()->factory()->null_value());
Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
// Read the first word and compare to native_context_map(),
cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
isolate()->factory()->native_context_map());
Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
}
int token_offset = Context::kHeaderSize +
Context::SECURITY_TOKEN_INDEX * kPointerSize;
mov(scratch1, FieldOperand(scratch1, token_offset));
cmp(scratch1, FieldOperand(scratch2, token_offset));
j(not_equal, miss);
bind(&same_contexts);
}
// Compute the hash code from the untagged key. This must be kept in sync with
// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
//
// Note: r0 will contain hash code
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
// Xor original key with a seed.
if (serializer_enabled()) {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(scratch, Immediate(Heap::kHashSeedRootIndex));
mov(scratch,
Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
SmiUntag(scratch);
xor_(r0, scratch);
} else {
int32_t seed = isolate()->heap()->HashSeed();
xor_(r0, Immediate(seed));
}
// hash = ~hash + (hash << 15);
mov(scratch, r0);
not_(r0);
shl(scratch, 15);
add(r0, scratch);
// hash = hash ^ (hash >> 12);
mov(scratch, r0);
shr(scratch, 12);
xor_(r0, scratch);
// hash = hash + (hash << 2);
lea(r0, Operand(r0, r0, times_4, 0));
// hash = hash ^ (hash >> 4);
mov(scratch, r0);
shr(scratch, 4);
xor_(r0, scratch);
// hash = hash * 2057;
imul(r0, r0, 2057);
// hash = hash ^ (hash >> 16);
mov(scratch, r0);
shr(scratch, 16);
xor_(r0, scratch);
}
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register r0,
Register r1,
Register r2,
Register result) {
// Register use:
//
// elements - holds the slow-case elements of the receiver and is unchanged.
//
// key - holds the smi key on entry and is unchanged.
//
// Scratch registers:
//
// r0 - holds the untagged key on entry and holds the hash once computed.
//
// r1 - used to hold the capacity mask of the dictionary
//
// r2 - used for the index into the dictionary.
//
// result - holds the result on exit if the load succeeds and we fall through.
Label done;
GetNumberHash(r0, r1);
// Compute capacity mask.
mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
shr(r1, kSmiTagSize); // convert smi to int
dec(r1);
// Generate an unrolled loop that performs a few probes before giving up.
for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
mov(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
}
and_(r2, r1);
// Scale the index by multiplying by the entry size.
DCHECK(SeededNumberDictionary::kEntrySize == 3);
lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
cmp(key, FieldOperand(elements,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
if (i != (kNumberDictionaryProbes - 1)) {
j(equal, &done);
} else {
j(not_equal, miss);
}
}
bind(&done);
// Check that the value is a normal propety.
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
DCHECK_EQ(NORMAL, 0);
test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
j(not_zero, miss);
// Get the value at the masked, scaled index.
const int kValueOffset =
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Just return if allocation top is already known.
if ((flags & RESULT_CONTAINS_TOP) != 0) {
// No use of scratch if allocation top is provided.
DCHECK(scratch.is(no_reg));
#ifdef DEBUG
// Assert that result actually contains top on entry.
cmp(result, Operand::StaticVariable(allocation_top));
Check(equal, kUnexpectedAllocationTop);
#endif
return;
}
// Move address of new object to result. Use scratch register if available.
if (scratch.is(no_reg)) {
mov(result, Operand::StaticVariable(allocation_top));
} else {
mov(scratch, Immediate(allocation_top));
mov(result, Operand(scratch, 0));
}
}
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch,
AllocationFlags flags) {
if (emit_debug_code()) {
test(result_end, Immediate(kObjectAlignmentMask));
Check(zero, kUnalignedAllocationInNewSpace);
}
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Update new top. Use scratch if available.
if (scratch.is(no_reg)) {
mov(Operand::StaticVariable(allocation_top), result_end);
} else {
mov(Operand(scratch, 0), result_end);
}
}
void MacroAssembler::Allocate(int object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
mov(result, Immediate(0x7091));
if (result_end.is_valid()) {
mov(result_end, Immediate(0x7191));
}
if (scratch.is_valid()) {
mov(scratch, Immediate(0x7291));
}
}
jmp(gc_required);
return;
}
DCHECK(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
if ((flags & DOUBLE_ALIGNMENT) != 0) {
DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
cmp(result, Operand::StaticVariable(allocation_limit));
j(above_equal, gc_required);
}
mov(Operand(result, 0),
Immediate(isolate()->factory()->one_pointer_filler_map()));
add(result, Immediate(kDoubleSize / 2));
bind(&aligned);
}
// Calculate new top and bail out if space is exhausted.
Register top_reg = result_end.is_valid() ? result_end : result;
if (!top_reg.is(result)) {
mov(top_reg, result);
}
add(top_reg, Immediate(object_size));
j(carry, gc_required);
cmp(top_reg, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
// Update allocation top.
UpdateAllocationTopHelper(top_reg, scratch, flags);
// Tag result if requested.
bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
if (tag_result) {
sub(result, Immediate(object_size - kHeapObjectTag));
} else {
sub(result, Immediate(object_size));
}
} else if (tag_result) {
DCHECK(kHeapObjectTag == 1);
inc(result);
}
}
void MacroAssembler::Allocate(int header_size,
ScaleFactor element_size,
Register element_count,
RegisterValueType element_count_type,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & SIZE_IN_WORDS) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
mov(result, Immediate(0x7091));
mov(result_end, Immediate(0x7191));
if (scratch.is_valid()) {
mov(scratch, Immediate(0x7291));
}
// Register element_count is not modified by the function.
}
jmp(gc_required);
return;
}
DCHECK(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
if ((flags & DOUBLE_ALIGNMENT) != 0) {
DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
cmp(result, Operand::StaticVariable(allocation_limit));
j(above_equal, gc_required);
}
mov(Operand(result, 0),
Immediate(isolate()->factory()->one_pointer_filler_map()));
add(result, Immediate(kDoubleSize / 2));
bind(&aligned);
}
// Calculate new top and bail out if space is exhausted.
// We assume that element_count*element_size + header_size does not
// overflow.
if (element_count_type == REGISTER_VALUE_IS_SMI) {
STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
DCHECK(element_size >= times_2);
DCHECK(kSmiTagSize == 1);
element_size = static_cast<ScaleFactor>(element_size - 1);
} else {
DCHECK(element_count_type == REGISTER_VALUE_IS_INT32);
}
lea(result_end, Operand(element_count, element_size, header_size));
add(result_end, result);
j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
if ((flags & TAG_OBJECT) != 0) {
DCHECK(kHeapObjectTag == 1);
inc(result);
}
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch, flags);
}
void MacroAssembler::Allocate(Register object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
mov(result, Immediate(0x7091));
mov(result_end, Immediate(0x7191));
if (scratch.is_valid()) {
mov(scratch, Immediate(0x7291));
}
// object_size is left unchanged by this function.
}
jmp(gc_required);
return;
}
DCHECK(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
if ((flags & DOUBLE_ALIGNMENT) != 0) {
DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
cmp(result, Operand::StaticVariable(allocation_limit));
j(above_equal, gc_required);
}
mov(Operand(result, 0),
Immediate(isolate()->factory()->one_pointer_filler_map()));
add(result, Immediate(kDoubleSize / 2));
bind(&aligned);
}
// Calculate new top and bail out if space is exhausted.
if (!object_size.is(result_end)) {
mov(result_end, object_size);
}
add(result_end, result);
j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
// Tag result if requested.
if ((flags & TAG_OBJECT) != 0) {
DCHECK(kHeapObjectTag == 1);
inc(result);
}
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch, flags);
}
void MacroAssembler::UndoAllocationInNewSpace(Register object) {
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
and_(object, Immediate(~kHeapObjectTagMask));
#ifdef DEBUG
cmp(object, Operand::StaticVariable(new_space_allocation_top));
Check(below, kUndoAllocationOfNonAllocatedMemory);
#endif
mov(Operand::StaticVariable(new_space_allocation_top), object);
}
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
Label* gc_required,
MutableMode mode) {
// Allocate heap number in new space.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
Handle<Map> map = mode == MUTABLE
? isolate()->factory()->mutable_heap_number_map()
: isolate()->factory()->heap_number_map();
// Set the map.
mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map));
}
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
DCHECK(kShortSize == 2);
// scratch1 = length * 2 + kObjectAlignmentMask.
lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate two byte string in new space.
Allocate(SeqTwoByteString::kHeaderSize,
times_1,
scratch1,
REGISTER_VALUE_IS_INT32,
result,
scratch2,
scratch3,
gc_required,
TAG_OBJECT);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
Immediate(isolate()->factory()->string_map()));
mov(scratch1, length);
SmiTag(scratch1);
mov(FieldOperand(result, String::kLengthOffset), scratch1);
mov(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
void MacroAssembler::AllocateOneByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
mov(scratch1, length);
DCHECK(kCharSize == 1);
add(scratch1, Immediate(kObjectAlignmentMask));
and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate one-byte string in new space.
Allocate(SeqOneByteString::kHeaderSize,
times_1,
scratch1,
REGISTER_VALUE_IS_INT32,
result,
scratch2,
scratch3,
gc_required,
TAG_OBJECT);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
Immediate(isolate()->factory()->one_byte_string_map()));
mov(scratch1, length);
SmiTag(scratch1);
mov(FieldOperand(result, String::kLengthOffset), scratch1);
mov(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
void MacroAssembler::AllocateOneByteString(Register result, int length,
Register scratch1, Register scratch2,
Label* gc_required) {
DCHECK(length > 0);
// Allocate one-byte string in new space.
Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
gc_required, TAG_OBJECT);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
Immediate(isolate()->factory()->one_byte_string_map()));
mov(FieldOperand(result, String::kLengthOffset),
Immediate(Smi::FromInt(length)));
mov(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
void MacroAssembler::AllocateTwoByteConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
Immediate(isolate()->factory()->cons_string_map()));
}
void MacroAssembler::AllocateOneByteConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
Immediate(isolate()->factory()->cons_one_byte_string_map()));
}
void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
Immediate(isolate()->factory()->sliced_string_map()));
}
void MacroAssembler::AllocateOneByteSlicedString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
Immediate(isolate()->factory()->sliced_one_byte_string_map()));
}
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies. The contents of scratch and length are destroyed.
// Source and destination are incremented by length.
// Many variants of movsb, loop unrolling, word moves, and indexed operands
// have been tried here already, and this is fastest.
// A simpler loop is faster on small copies, but 30% slower on large ones.
// The cld() instruction must have been emitted, to set the direction flag(),
// before calling this function.
void MacroAssembler::CopyBytes(Register source,
Register destination,
Register length,
Register scratch) {
Label short_loop, len4, len8, len12, done, short_string;
DCHECK(source.is(esi));
DCHECK(destination.is(edi));
DCHECK(length.is(ecx));
cmp(length, Immediate(4));
j(below, &short_string, Label::kNear);
// Because source is 4-byte aligned in our uses of this function,
// we keep source aligned for the rep_movs call by copying the odd bytes
// at the end of the ranges.
mov(scratch, Operand(source, length, times_1, -4));
mov(Operand(destination, length, times_1, -4), scratch);
cmp(length, Immediate(8));
j(below_equal, &len4, Label::kNear);
cmp(length, Immediate(12));
j(below_equal, &len8, Label::kNear);
cmp(length, Immediate(16));
j(below_equal, &len12, Label::kNear);
mov(scratch, ecx);
shr(ecx, 2);
rep_movs();
and_(scratch, Immediate(0x3));
add(destination, scratch);
jmp(&done, Label::kNear);
bind(&len12);
mov(scratch, Operand(source, 8));
mov(Operand(destination, 8), scratch);
bind(&len8);
mov(scratch, Operand(source, 4));
mov(Operand(destination, 4), scratch);
bind(&len4);
mov(scratch, Operand(source, 0));
mov(Operand(destination, 0), scratch);
add(destination, length);
jmp(&done, Label::kNear);
bind(&short_string);
test(length, length);
j(zero, &done, Label::kNear);
bind(&short_loop);
mov_b(scratch, Operand(source, 0));
mov_b(Operand(destination, 0), scratch);
inc(source);
inc(destination);
dec(length);
j(not_zero, &short_loop);
bind(&done);
}
void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
Register end_offset,
Register filler) {
Label loop, entry;
jmp(&entry);
bind(&loop);
mov(Operand(start_offset, 0), filler);
add(start_offset, Immediate(kPointerSize));
bind(&entry);
cmp(start_offset, end_offset);
j(less, &loop);
}
void MacroAssembler::BooleanBitTest(Register object,
int field_offset,
int bit_index) {
bit_index += kSmiTagSize + kSmiShiftSize;
DCHECK(base::bits::IsPowerOfTwo32(kBitsPerByte));
int byte_index = bit_index / kBitsPerByte;
int byte_bit_index = bit_index & (kBitsPerByte - 1);
test_b(FieldOperand(object, field_offset + byte_index),
static_cast<byte>(1 << byte_bit_index));
}
void MacroAssembler::NegativeZeroTest(Register result,
Register op,
Label* then_label) {
Label ok;
test(result, result);
j(not_zero, &ok);
test(op, op);
j(sign, then_label);
bind(&ok);
}
void MacroAssembler::NegativeZeroTest(Register result,
Register op1,
Register op2,
Register scratch,
Label* then_label) {
Label ok;
test(result, result);
j(not_zero, &ok);
mov(scratch, op1);
or_(scratch, op2);
j(sign, then_label);
bind(&ok);
}
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
Label* miss,
bool miss_on_bound_function) {
Label non_instance;
if (miss_on_bound_function) {
// Check that the receiver isn't a smi.
JumpIfSmi(function, miss);
// Check that the function really is a function.
CmpObjectType(function, JS_FUNCTION_TYPE, result);
j(not_equal, miss);
// If a bound function, go to miss label.
mov(scratch,
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
SharedFunctionInfo::kBoundFunction);
j(not_zero, miss);
// Make sure that the function has an instance prototype.
movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
j(not_zero, &non_instance);
}
// Get the prototype or initial map from the function.
mov(result,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// If the prototype or initial map is the hole, don't return it and
// simply miss the cache instead. This will allow us to allocate a
// prototype object on-demand in the runtime system.
cmp(result, Immediate(isolate()->factory()->the_hole_value()));
j(equal, miss);
// If the function does not have an initial map, we're done.
Label done;
CmpObjectType(result, MAP_TYPE, scratch);
j(not_equal, &done);
// Get the prototype from the initial map.
mov(result, FieldOperand(result, Map::kPrototypeOffset));
if (miss_on_bound_function) {
jmp(&done);
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
bind(&non_instance);
mov(result, FieldOperand(result, Map::kConstructorOffset));
}
// All done.
bind(&done);
}
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
}
void MacroAssembler::StubReturn(int argc) {
DCHECK(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
}
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
void MacroAssembler::IndexFromHash(Register hash, Register index) {
// The assert checks that the constants for the maximum number of digits
// for an array index cached in the hash field and the number of bits
// reserved for it does not conflict.
DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
if (!index.is(hash)) {
mov(index, hash);
}
DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
CHECK(f->nargs < 0 || f->nargs == num_arguments);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
Move(eax, Immediate(num_arguments));
mov(ebx, Immediate(ExternalReference(f, isolate())));
CEntryStub ces(isolate(), 1, save_doubles);
CallStub(&ces);
}
void MacroAssembler::CallExternalReference(ExternalReference ref,
int num_arguments) {
mov(eax, Immediate(num_arguments));
mov(ebx, Immediate(ref));
CEntryStub stub(isolate(), 1);
CallStub(&stub);
}
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
int num_arguments,
int result_size) {
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
Move(eax, Immediate(num_arguments));
JumpToExternalReference(ext);
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
TailCallExternalReference(ExternalReference(fid, isolate()),
num_arguments,
result_size);
}
Operand ApiParameterOperand(int index) {
return Operand(esp, index * kPointerSize);
}
void MacroAssembler::PrepareCallApiFunction(int argc) {
EnterApiExitFrame(argc);
if (emit_debug_code()) {
mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
void MacroAssembler::CallApiFunctionAndReturn(
Register function_address,
ExternalReference thunk_ref,
Operand thunk_last_arg,
int stack_space,
Operand return_value_operand,
Operand* context_restore_operand) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
ExternalReference limit_address =
ExternalReference::handle_scope_limit_address(isolate());
ExternalReference level_address =
ExternalReference::handle_scope_level_address(isolate());
DCHECK(edx.is(function_address));
// Allocate HandleScope in callee-save registers.
mov(ebx, Operand::StaticVariable(next_address));
mov(edi, Operand::StaticVariable(limit_address));
add(Operand::StaticVariable(level_address), Immediate(1));
if (FLAG_log_timer_events) {
FrameScope frame(this, StackFrame::MANUAL);
PushSafepointRegisters();
PrepareCallCFunction(1, eax);
mov(Operand(esp, 0),
Immediate(ExternalReference::isolate_address(isolate())));
CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
PopSafepointRegisters();
}
Label profiler_disabled;
Label end_profiler_check;
mov(eax, Immediate(ExternalReference::is_profiling_address(isolate())));
cmpb(Operand(eax, 0), 0);
j(zero, &profiler_disabled);
// Additional parameter is the address of the actual getter function.
mov(thunk_last_arg, function_address);
// Call the api function.
mov(eax, Immediate(thunk_ref));
call(eax);
jmp(&end_profiler_check);
bind(&profiler_disabled);
// Call the api function.
call(function_address);
bind(&end_profiler_check);
if (FLAG_log_timer_events) {
FrameScope frame(this, StackFrame::MANUAL);
PushSafepointRegisters();
PrepareCallCFunction(1, eax);
mov(Operand(esp, 0),
Immediate(ExternalReference::isolate_address(isolate())));
CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
PopSafepointRegisters();
}
Label prologue;
// Load the value from ReturnValue
mov(eax, return_value_operand);
Label promote_scheduled_exception;
Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
bind(&prologue);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
mov(Operand::StaticVariable(next_address), ebx);
sub(Operand::StaticVariable(level_address), Immediate(1));
Assert(above_equal, kInvalidHandleScopeLevel);
cmp(edi, Operand::StaticVariable(limit_address));
j(not_equal, &delete_allocated_handles);
bind(&leave_exit_frame);
// Check if the function scheduled an exception.
ExternalReference scheduled_exception_address =
ExternalReference::scheduled_exception_address(isolate());
cmp(Operand::StaticVariable(scheduled_exception_address),
Immediate(isolate()->factory()->the_hole_value()));
j(not_equal, &promote_scheduled_exception);
bind(&exception_handled);
#if ENABLE_EXTRA_CHECKS
// Check if the function returned a valid JavaScript value.
Label ok;
Register return_value = eax;
Register map = ecx;
JumpIfSmi(return_value, &ok, Label::kNear);
mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
CmpInstanceType(map, FIRST_NONSTRING_TYPE);
j(below, &ok, Label::kNear);
CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
j(above_equal, &ok, Label::kNear);
cmp(map, isolate()->factory()->heap_number_map());
j(equal, &ok, Label::kNear);
cmp(return_value, isolate()->factory()->undefined_value());
j(equal, &ok, Label::kNear);
cmp(return_value, isolate()->factory()->true_value());
j(equal, &ok, Label::kNear);
cmp(return_value, isolate()->factory()->false_value());
j(equal, &ok, Label::kNear);
cmp(return_value, isolate()->factory()->null_value());
j(equal, &ok, Label::kNear);
Abort(kAPICallReturnedInvalidObject);
bind(&ok);
#endif
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
mov(esi, *context_restore_operand);
}
LeaveApiExitFrame(!restore_context);
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
{
FrameScope frame(this, StackFrame::INTERNAL);
CallRuntime(Runtime::kPromoteScheduledException, 0);
}
jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
ExternalReference delete_extensions =
ExternalReference::delete_handle_scope_extensions(isolate());
bind(&delete_allocated_handles);
mov(Operand::StaticVariable(limit_address), edi);
mov(edi, eax);
mov(Operand(esp, 0),
Immediate(ExternalReference::isolate_address(isolate())));
mov(eax, Immediate(delete_extensions));
call(eax);
mov(eax, edi);
jmp(&leave_exit_frame);
}
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext));
CEntryStub ces(isolate(), 1);
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
const Operand& code_operand,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance done_near,
const CallWrapper& call_wrapper) {
bool definitely_matches = false;
*definitely_mismatches = false;
Label invoke;
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
mov(eax, actual.immediate());
const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
if (expected.immediate() == sentinel) {
// Don't worry about adapting arguments for builtins that
// don't want that done. Skip adaption code by making it look
// like we have a match between expected and actual number of
// arguments.
definitely_matches = true;
} else {
*definitely_mismatches = true;
mov(ebx, expected.immediate());
}
}
} else {
if (actual.is_immediate()) {
// Expected is in register, actual is immediate. This is the
// case when we invoke function values without going through the
// IC mechanism.
cmp(expected.reg(), actual.immediate());
j(equal, &invoke);
DCHECK(expected.reg().is(ebx));
mov(eax, actual.immediate());
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
cmp(expected.reg(), actual.reg());
j(equal, &invoke);
DCHECK(actual.reg().is(eax));
DCHECK(expected.reg().is(ebx));
}
}
if (!definitely_matches) {
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (!code_constant.is_null()) {
mov(edx, Immediate(code_constant));
add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
} else if (!code_operand.is_reg(edx)) {
mov(edx, code_operand);
}
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
call(adaptor, RelocInfo::CODE_TARGET);
call_wrapper.AfterCall();
if (!*definitely_mismatches) {
jmp(done, done_near);
}
} else {
jmp(adaptor, RelocInfo::CODE_TARGET);
}
bind(&invoke);
}
}
void MacroAssembler::InvokeCode(const Operand& code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
Label done;
bool definitely_mismatches = false;
InvokePrologue(expected, actual, Handle<Code>::null(), code,
&done, &definitely_mismatches, flag, Label::kNear,
call_wrapper);
if (!definitely_mismatches) {
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
call(code);
call_wrapper.AfterCall();
} else {
DCHECK(flag == JUMP_FUNCTION);
jmp(code);
}
bind(&done);
}
}
void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
DCHECK(fun.is(edi));
mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
SmiUntag(ebx);
ParameterCount expected(ebx);
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
expected, actual, flag, call_wrapper);
}
void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
DCHECK(fun.is(edi));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
expected, actual, flag, call_wrapper);
}
void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
LoadHeapObject(edi, function);
InvokeFunction(edi, expected, actual, flag, call_wrapper);
}
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
// Rely on the assertion to check that the number of provided
// arguments match the expected number of arguments. Fake a
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
GetBuiltinFunction(edi, id);
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
expected, expected, flag, call_wrapper);
}
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the JavaScript builtin function from the builtins object.
mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
mov(target, FieldOperand(target,
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
}
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
DCHECK(!target.is(edi));
// Load the JavaScript builtin function from the builtins object.
GetBuiltinFunction(edi, id);
// Load the code entry point from the function into the target register.
mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
}
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
for (int i = 1; i < context_chain_length; i++) {
mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
} else {
// Slot is in the current function context. Move it into the
// destination register in case we store into it (the write barrier
// cannot be allowed to destroy the context in esi).
mov(dst, esi);
}
// We should not have found a with context by walking the context chain
// (i.e., the static scope chain and runtime context chain do not agree).
// A variable occurring in such a scope should have slot type LOOKUP and
// not CONTEXT.
if (emit_debug_code()) {
cmp(FieldOperand(dst, HeapObject::kMapOffset),
isolate()->factory()->with_context_map());
Check(not_equal, kVariableResolvedToWithContext);
}
}
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
mov(scratch, Operand(scratch,
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
size_t offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
cmp(map_in_out, FieldOperand(scratch, offset));
j(not_equal, no_map_match);
// Use the transitioned cached map.
offset = transitioned_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
mov(map_in_out, FieldOperand(scratch, offset));
}
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
mov(function,
Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
mov(function,
FieldOperand(function, GlobalObject::kNativeContextOffset));
// Load the function from the native context.
mov(function, Operand(function, Context::SlotOffset(index)));
}
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map) {
// Load the initial map. The global functions all have initial maps.
mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
if (emit_debug_code()) {
Label ok, fail;
CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
jmp(&ok);
bind(&fail);
Abort(kGlobalFunctionsMustHaveInitialMap);
bind(&ok);
}
}
// Store the value in register src in the safepoint register stack
// slot for register dst.
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
mov(SafepointRegisterSlot(dst), src);
}
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
mov(SafepointRegisterSlot(dst), src);
}
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
mov(dst, SafepointRegisterSlot(src));
}
Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
}
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the lowest encoding,
// which means that lowest encodings are furthest away from
// the stack pointer.
DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
return kNumSafepointRegisters - reg_code - 1;
}
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
AllowDeferredHandleDereference embedding_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
mov(result, Operand::ForCell(cell));
} else {
mov(result, object);
}
}
void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
cmp(reg, Operand::ForCell(cell));
} else {
cmp(reg, object);
}
}
void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
push(Operand::ForCell(cell));
} else {
Push(object);
}
}
void MacroAssembler::Ret() {
ret(0);
}
void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
if (is_uint16(bytes_dropped)) {
ret(bytes_dropped);
} else {
pop(scratch);
add(esp, Immediate(bytes_dropped));
push(scratch);
ret(0);
}
}
void MacroAssembler::VerifyX87StackDepth(uint32_t depth) {
// Turn off the stack depth check when serializer is enabled to reduce the
// code size.
if (serializer_enabled()) return;
// Make sure the floating point stack is either empty or has depth items.
DCHECK(depth <= 7);
// This is very expensive.
DCHECK(FLAG_debug_code && FLAG_enable_slow_asserts);
// The top-of-stack (tos) is 7 if there is one item pushed.
int tos = (8 - depth) % 8;
const int kTopMask = 0x3800;
push(eax);
fwait();
fnstsw_ax();
and_(eax, kTopMask);
shr(eax, 11);
cmp(eax, Immediate(tos));
Check(equal, kUnexpectedFPUStackDepthAfterInstruction);
fnclex();
pop(eax);
}
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
add(esp, Immediate(stack_elements * kPointerSize));
}
}
void MacroAssembler::Move(Register dst, Register src) {
if (!dst.is(src)) {
mov(dst, src);
}
}
void MacroAssembler::Move(Register dst, const Immediate& x) {
if (x.is_zero()) {
xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
} else {
mov(dst, x);
}
}
void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
mov(dst, x);
}
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
}
}
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Operand operand = Operand::StaticVariable(ExternalReference(counter));
if (value == 1) {
inc(operand);
} else {
add(operand, Immediate(value));
}
}
}
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Operand operand = Operand::StaticVariable(ExternalReference(counter));
if (value == 1) {
dec(operand);
} else {
sub(operand, Immediate(value));
}
}
}
void MacroAssembler::IncrementCounter(Condition cc,
StatsCounter* counter,
int value) {
DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Label skip;
j(NegateCondition(cc), &skip);
pushfd();
IncrementCounter(counter, value);
popfd();
bind(&skip);
}
}
void MacroAssembler::DecrementCounter(Condition cc,
StatsCounter* counter,
int value) {
DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Label skip;
j(NegateCondition(cc), &skip);
pushfd();
DecrementCounter(counter, value);
popfd();
bind(&skip);
}
}
void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
if (emit_debug_code()) Check(cc, reason);
}
void MacroAssembler::AssertFastElements(Register elements) {
if (emit_debug_code()) {
Factory* factory = isolate()->factory();
Label ok;
cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(factory->fixed_array_map()));
j(equal, &ok);
cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(factory->fixed_double_array_map()));
j(equal, &ok);
cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(factory->fixed_cow_array_map()));
j(equal, &ok);
Abort(kJSObjectWithFastElementsMapHasSlowElements);
bind(&ok);
}
}
void MacroAssembler::Check(Condition cc, BailoutReason reason) {
Label L;
j(cc, &L);
Abort(reason);
// will not return here
bind(&L);
}
void MacroAssembler::CheckStackAlignment() {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
Label alignment_as_expected;
test(esp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected);
// Abort if stack is not aligned.
int3();
bind(&alignment_as_expected);
}
}
void MacroAssembler::Abort(BailoutReason reason) {
#ifdef DEBUG
const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
}
if (FLAG_trap_on_abort) {
int3();
return;
}
#endif
push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason))));
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
CallRuntime(Runtime::kAbort, 1);
} else {
CallRuntime(Runtime::kAbort, 1);
}
// will not return here
int3();
}
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
}
void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
mov(dst, FieldOperand(map, Map::kBitField3Offset));
DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
}
void MacroAssembler::LookupNumberStringCache(Register object,
Register result,
Register scratch1,
Register scratch2,
Label* not_found) {
// Use of registers. Register result is used as a temporary.
Register number_string_cache = result;
Register mask = scratch1;
Register scratch = scratch2;
// Load the number string cache.
LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
sub(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value, and the hash for
// doubles is the xor of the upper and lower words. See
// Heap::GetNumberStringCache.
Label smi_hash_calculated;
Label load_result_from_cache;
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
JumpIfNotSmi(object, ¬_smi, Label::kNear);
mov(scratch, object);
SmiUntag(scratch);
jmp(&smi_hash_calculated, Label::kNear);
bind(¬_smi);
cmp(FieldOperand(object, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
j(not_equal, not_found);
STATIC_ASSERT(8 == kDoubleSize);
mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
// Object is heap number and hash is now in scratch. Calculate cache index.
and_(scratch, mask);
Register index = scratch;
Register probe = mask;
mov(probe,
FieldOperand(number_string_cache,
index,
times_twice_pointer_size,
FixedArray::kHeaderSize));
JumpIfSmi(probe, not_found);
fld_d(FieldOperand(object, HeapNumber::kValueOffset));
fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
FCmp();
j(parity_even, not_found); // Bail out if NaN is involved.
j(not_equal, not_found); // The cache did not contain this value.
jmp(&load_result_from_cache, Label::kNear);
bind(&smi_hash_calculated);
// Object is smi and hash is now in scratch. Calculate cache index.
and_(scratch, mask);
// Check if the entry is the smi we are looking for.
cmp(object,
FieldOperand(number_string_cache,
index,
times_twice_pointer_size,
FixedArray::kHeaderSize));
j(not_equal, not_found);
// Get the result from the cache.
bind(&load_result_from_cache);
mov(result,
FieldOperand(number_string_cache,
index,
times_twice_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
}
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
Register instance_type, Register scratch, Label* failure) {
if (!scratch.is(instance_type)) {
mov(scratch, instance_type);
}
and_(scratch,
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
j(not_equal, failure);
}
void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
Register object2,
Register scratch1,
Register scratch2,
Label* failure) {
// Check that both objects are not smis.
STATIC_ASSERT(kSmiTag == 0);
mov(scratch1, object1);
and_(scratch1, object2);
JumpIfSmi(scratch1, failure);
// Load instance type for both strings.
mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
// Check that both are flat one-byte strings.
const int kFlatOneByteStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
// Interleave bits from both instance types and compare them in one check.
DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
and_(scratch1, kFlatOneByteStringMask);
and_(scratch2, kFlatOneByteStringMask);
lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << 3));
j(not_equal, failure);
}
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
Label* not_unique_name,
Label::Distance distance) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
Label succeed;
test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
j(zero, &succeed);
cmpb(operand, static_cast<uint8_t>(SYMBOL_TYPE));
j(not_equal, not_unique_name, distance);
bind(&succeed);
}
void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
Register index,
Register value,
uint32_t encoding_mask) {
Label is_object;
JumpIfNotSmi(string, &is_object, Label::kNear);
Abort(kNonObject);
bind(&is_object);
push(value);
mov(value, FieldOperand(string, HeapObject::kMapOffset));
movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
cmp(value, Immediate(encoding_mask));
pop(value);
Check(equal, kUnexpectedStringType);
// The index is assumed to be untagged coming in, tag it to compare with the
// string length without using a temp register, it is restored at the end of
// this function.
SmiTag(index);
Check(no_overflow, kIndexIsTooLarge);
cmp(index, FieldOperand(string, String::kLengthOffset));
Check(less, kIndexIsTooLarge);
cmp(index, Immediate(Smi::FromInt(0)));
Check(greater_equal, kIndexIsNegative);
// Restore the index
SmiUntag(index);
}
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frame_alignment = base::OS::ActivationFrameAlignment();
if (frame_alignment != 0) {
// Make stack end at alignment and make room for num_arguments words
// and the original value of esp.
mov(scratch, esp);
sub(esp, Immediate((num_arguments + 1) * kPointerSize));
DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
and_(esp, -frame_alignment);
mov(Operand(esp, num_arguments * kPointerSize), scratch);
} else {
sub(esp, Immediate(num_arguments * kPointerSize));
}
}
void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
// Trashing eax is ok as it will be the return value.
mov(eax, Immediate(function));
CallCFunction(eax, num_arguments);
}
void MacroAssembler::CallCFunction(Register function,
int num_arguments) {
DCHECK(has_frame());
// Check stack alignment.
if (emit_debug_code()) {
CheckStackAlignment();
}
call(function);
if (base::OS::ActivationFrameAlignment() != 0) {
mov(esp, Operand(esp, num_arguments * kPointerSize));
} else {
add(esp, Immediate(num_arguments * kPointerSize));
}
}
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
Register reg3,
Register reg4,
Register reg5,
Register reg6,
Register reg7,
Register reg8) {
int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
reg7.is_valid() + reg8.is_valid();
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
if (reg2.is_valid()) regs |= reg2.bit();
if (reg3.is_valid()) regs |= reg3.bit();
if (reg4.is_valid()) regs |= reg4.bit();
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
if (reg7.is_valid()) regs |= reg7.bit();
if (reg8.is_valid()) regs |= reg8.bit();
int n_of_non_aliasing_regs = NumRegs(regs);
return n_of_valid_regs != n_of_non_aliasing_regs;
}
#endif
CodePatcher::CodePatcher(byte* address, int size)
: address_(address),
size_(size),
masm_(NULL, address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
CpuFeatures::FlushICache(address_, size_);
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
void MacroAssembler::CheckPageFlag(
Register object,
Register scratch,
int mask,
Condition cc,
Label* condition_met,
Label::Distance condition_met_distance) {
DCHECK(cc == zero || cc == not_zero);
if (scratch.is(object)) {
and_(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
mov(scratch, Immediate(~Page::kPageAlignmentMask));
and_(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
static_cast<uint8_t>(mask));
} else {
test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
}
j(cc, condition_met, condition_met_distance);
}
void MacroAssembler::CheckPageFlagForMap(
Handle<Map> map,
int mask,
Condition cc,
Label* condition_met,
Label::Distance condition_met_distance) {
DCHECK(cc == zero || cc == not_zero);
Page* page = Page::FromAddress(map->address());
DCHECK(!serializer_enabled()); // Serializer cannot match page_flags.
ExternalReference reference(ExternalReference::page_flags(page));
// The inlined static address check of the page's flags relies
// on maps never being compacted.
DCHECK(!isolate()->heap()->mark_compact_collector()->
IsOnEvacuationCandidate(*map));
if (mask < (1 << kBitsPerByte)) {
test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
} else {
test(Operand::StaticVariable(reference), Immediate(mask));
}
j(cc, condition_met, condition_met_distance);
}
void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
Register scratch,
Label* if_deprecated) {
if (map->CanBeDeprecated()) {
mov(scratch, map);
mov(scratch, FieldOperand(scratch, Map::kBitField3Offset));
and_(scratch, Immediate(Map::Deprecated::kMask));
j(not_zero, if_deprecated);
}
}
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black,
Label::Distance on_black_near) {
HasColor(object, scratch0, scratch1,
on_black, on_black_near,
1, 0); // kBlackBitPattern.
DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
}
void MacroAssembler::HasColor(Register object,
Register bitmap_scratch,
Register mask_scratch,
Label* has_color,
Label::Distance has_color_distance,
int first_bit,
int second_bit) {
DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
GetMarkBits(object, bitmap_scratch, mask_scratch);
Label other_color, word_boundary;
test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
add(mask_scratch, mask_scratch); // Shift left 1 by adding.
j(zero, &word_boundary, Label::kNear);
test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
jmp(&other_color, Label::kNear);
bind(&word_boundary);
test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
bind(&other_color);
}
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
DCHECK(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
and_(bitmap_reg, addr_reg);
mov(ecx, addr_reg);
int shift =
Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
shr(ecx, shift);
and_(ecx,
(Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
add(bitmap_reg, ecx);
mov(ecx, addr_reg);
shr(ecx, kPointerSizeLog2);
and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
mov(mask_reg, Immediate(1));
shl_cl(mask_reg);
}
void MacroAssembler::EnsureNotWhite(
Register value,
Register bitmap_scratch,
Register mask_scratch,
Label* value_is_white_and_not_data,
Label::Distance distance) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
Label done;
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
j(not_zero, &done, Label::kNear);
if (emit_debug_code()) {
// Check for impossible bit pattern.
Label ok;
push(mask_scratch);
// shl. May overflow making the check conservative.
add(mask_scratch, mask_scratch);
test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
pop(mask_scratch);
}
// Value is white. We check whether it is data that doesn't need scanning.
// Currently only checks for HeapNumber and non-cons strings.
Register map = ecx; // Holds map while checking type.
Register length = ecx; // Holds length of object after checking type.
Label not_heap_number;
Label is_data_object;
// Check for heap-number
mov(map, FieldOperand(value, HeapObject::kMapOffset));
cmp(map, isolate()->factory()->heap_number_map());
j(not_equal, ¬_heap_number, Label::kNear);
mov(length, Immediate(HeapNumber::kSize));
jmp(&is_data_object, Label::kNear);
bind(¬_heap_number);
// Check for strings.
DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
Register instance_type = ecx;
movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
j(not_zero, value_is_white_and_not_data);
// It's a non-indirect (non-cons and non-slice) string.
// If it's external, the length is just ExternalString::kSize.
// Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
Label not_external;
// External strings are the only ones with the kExternalStringTag bit
// set.
DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
test_b(instance_type, kExternalStringTag);
j(zero, ¬_external, Label::kNear);
mov(length, Immediate(ExternalString::kSize));
jmp(&is_data_object, Label::kNear);
bind(¬_external);
// Sequential string, either Latin1 or UC16.
DCHECK(kOneByteStringTag == 0x04);
and_(length, Immediate(kStringEncodingMask));
xor_(length, Immediate(kStringEncodingMask));
add(length, Immediate(0x04));
// Value now either 4 (if Latin1) or 8 (if UC16), i.e., char-size shifted
// by 2. If we multiply the string length as smi by this, it still
// won't overflow a 32-bit value.
DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
DCHECK(SeqOneByteString::kMaxSize <=
static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
imul(length, FieldOperand(value, String::kLengthOffset));
shr(length, 2 + kSmiTagSize + kSmiShiftSize);
add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
and_(length, Immediate(~kObjectAlignmentMask));
bind(&is_data_object);
// Value is a data object, and it is white. Mark it black. Since we know
// that the object is white we can make it black by flipping one bit.
or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
length);
if (emit_debug_code()) {
mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
Check(less_equal, kLiveBytesCountOverflowChunkSize);
}
bind(&done);
}
void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
mov(dst, FieldOperand(map, Map::kBitField3Offset));
and_(dst, Immediate(Map::EnumLengthBits::kMask));
SmiTag(dst);
}
void MacroAssembler::CheckEnumCache(Label* call_runtime) {
Label next, start;
mov(ecx, eax);
// Check if the enum length field is properly initialized, indicating that
// there is an enum cache.
mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
EnumLength(edx, ebx);
cmp(edx, Immediate(Smi::FromInt(kInvalidEnumCacheSentinel)));
j(equal, call_runtime);
jmp(&start);
bind(&next);
mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
EnumLength(edx, ebx);
cmp(edx, Immediate(Smi::FromInt(0)));
j(not_equal, call_runtime);
bind(&start);
// Check that there are no elements. Register rcx contains the current JS
// object we've reached through the prototype chain.
Label no_elements;
mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
cmp(ecx, isolate()->factory()->empty_fixed_array());
j(equal, &no_elements);
// Second chance, the object may be using the empty slow element dictionary.
cmp(ecx, isolate()->factory()->empty_slow_element_dictionary());
j(not_equal, call_runtime);
bind(&no_elements);
mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
cmp(ecx, isolate()->factory()->null_value());
j(not_equal, &next);
}
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
Register scratch_reg,
Label* no_memento_found) {
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
lea(scratch_reg, Operand(receiver_reg,
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
cmp(scratch_reg, Immediate(new_space_start));
j(less, no_memento_found);
cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
j(greater, no_memento_found);
cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
Immediate(isolate()->factory()->allocation_memento_map()));
}
void MacroAssembler::JumpIfDictionaryInPrototypeChain(
Register object,
Register scratch0,
Register scratch1,
Label* found) {
DCHECK(!scratch1.is(scratch0));
Factory* factory = isolate()->factory();
Register current = scratch0;
Label loop_again;
// scratch contained elements pointer.
mov(current, object);
// Loop based on the map going up the prototype chain.
bind(&loop_again);
mov(current, FieldOperand(current, HeapObject::kMapOffset));
mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
DecodeField<Map::ElementsKindBits>(scratch1);
cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
j(equal, found);
mov(current, FieldOperand(current, Map::kPrototypeOffset));
cmp(current, Immediate(factory->null_value()));
j(not_equal, &loop_again);
}
void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
DCHECK(!dividend.is(eax));
DCHECK(!dividend.is(edx));
base::MagicNumbersForDivision<uint32_t> mag =
base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
mov(eax, Immediate(mag.multiplier));
imul(dividend);
bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
if (divisor > 0 && neg) add(edx, dividend);
if (divisor < 0 && !neg && mag.multiplier > 0) sub(edx, dividend);
if (mag.shift > 0) sar(edx, mag.shift);
mov(eax, dividend);
shr(eax, 31);
add(edx, eax);
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X87
|
#include "ecp/EcpClientCommandLayer.h"
#include "ecp/EcpProtocol.h"
#include "dex/test/TestDataParser.h"
#include "timer/include/TimerEngine.h"
#include "utils/include/TestEventObserver.h"
#include "utils/include/Hex.h"
#include "test/include/Test.h"
class TestEcpClientPacketLayer : public Ecp::ClientPacketLayerInterface {
public:
TestEcpClientPacketLayer() : recvBuf(256) {}
virtual ~TestEcpClientPacketLayer() {}
void clearSendData() { sendBuf.clear(); }
const char *getSendData() { return sendBuf.getString(); }
void recvData(const char *hex) {
uint16_t len = hexToData(hex, strlen(hex), recvBuf.getData(), recvBuf.getSize());
recvBuf.setLen(len);
observer->procRecvData(recvBuf.getData(), recvBuf.getLen());
}
void recvError(Ecp::Error error) {
observer->procRecvError(error);
}
virtual void setObserver(Observer *observer) { this->observer = observer; }
virtual bool connect() {
sendBuf << "<connect>";
return true;
}
virtual void disconnect() { sendBuf << "<disconnect>"; }
virtual bool sendData(const Buffer *data) {
for(uint16_t i = 0; i < data->getLen(); i++) {
sendBuf.addHex((*data)[i]);
}
return true;
}
private:
Observer *observer;
StringBuilder sendBuf;
Buffer recvBuf;
};
class TestEcpClientObserver : public TestEventObserver {
public:
TestEcpClientObserver(StringBuilder *result) : TestEventObserver(result) {}
virtual void proc(Event *event) {
switch(event->getType()) {
case Ecp::Client::Event_ConnectOK: *result << "<event=ConnectOK>"; break;
case Ecp::Client::Event_ConnectError: *result << "<event=ConnectError," << event->getUint16() << ">"; break;
case Ecp::Client::Event_Disconnect: *result << "<event=Disconnect>"; break;
case Ecp::Client::Event_UploadOK: *result << "<event=UploadOK>"; break;
case Ecp::Client::Event_UploadError: *result << "<event=UploadError," << event->getUint16() << ">"; break;
case Ecp::Client::Event_DownloadOK: *result << "<event=DownloadOK>"; break;
case Ecp::Client::Event_DownloadError: *result << "<event=DownloadError," << event->getUint16() << ">"; break;
case Ecp::Client::Event_ResponseOK: *result << "<event=ResponseOK>"; break;
case Ecp::Client::Event_ResponseError: *result << "<event=ResponseError>"; break;
default: *result << "<event=" << event->getType() << ">";
}
}
};
class EcpClientTest : public TestSet {
public:
EcpClientTest();
bool init();
void cleanup();
bool testDisconnect();
bool testRemoteDisconnect();
bool testUpload();
bool testDownload();
bool testRecvData();
private:
StringBuilder *result;
TimerEngine *timerEngine;
TestEcpClientObserver *observer;
TestEcpClientPacketLayer *packetLayer;
Ecp::ClientCommandLayer *client;
bool gotoStateReady();
bool recvAnswer(uint8_t command, uint8_t resultCode);
bool recvAnswer(const char *hexData);
};
TEST_SET_REGISTER(EcpClientTest);
EcpClientTest::EcpClientTest() {
TEST_CASE_REGISTER(EcpClientTest, testDisconnect);
TEST_CASE_REGISTER(EcpClientTest, testRemoteDisconnect);
TEST_CASE_REGISTER(EcpClientTest, testUpload);
TEST_CASE_REGISTER(EcpClientTest, testDownload);
TEST_CASE_REGISTER(EcpClientTest, testRecvData);
}
bool EcpClientTest::init() {
this->result = new StringBuilder(1024, 1024);
this->timerEngine = new TimerEngine();
this->observer = new TestEcpClientObserver(result);
this->packetLayer = new TestEcpClientPacketLayer;
this->client = new Ecp::ClientCommandLayer(this->timerEngine, this->packetLayer);
this->client->setObserver(observer);
return true;
}
void EcpClientTest::cleanup() {
delete this->client;
delete this->packetLayer;
delete this->observer;
delete this->timerEngine;
delete this->result;
}
class EcpTestDataGenerator : public Dex::DataGenerator {
public:
virtual void reset() { index = 0; }
virtual void next() { index++; if(index > 1) { index = 1; } }
virtual bool isLast() { return index >= 1; }
virtual const void *getData() { return data[index]; }
virtual uint16_t getLen() { return 10; }
private:
int index;
static const char data[2][10];
};
const char EcpTestDataGenerator::data[2][10] = {
{ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A },
{ 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A },
};
bool EcpClientTest::gotoStateReady() {
TEST_NUMBER_EQUAL(true, client->connect());
// Connect
TEST_STRING_EQUAL("<connect>", packetLayer->getSendData());
packetLayer->clearSendData();
TEST_STRING_EQUAL("", result->getString());
// Connected
client->procConnect();
// Setup
TEST_STRING_EQUAL("01", packetLayer->getSendData());
packetLayer->clearSendData();
TEST_STRING_EQUAL("", result->getString());
// Recv ACK
packetLayer->recvData("0100");
TEST_STRING_EQUAL("", packetLayer->getSendData());
TEST_STRING_EQUAL("<event=ConnectOK>", result->getString());
result->clear();
return true;
}
bool EcpClientTest::testDisconnect() {
gotoStateReady();
client->disconnect();
TEST_STRING_EQUAL("<disconnect>", packetLayer->getSendData());
packetLayer->clearSendData();
TEST_STRING_EQUAL("", result->getString());
client->procDisconnect();
TEST_STRING_EQUAL("", packetLayer->getSendData());
TEST_STRING_EQUAL("<event=Disconnect>", result->getString());
return true;
}
bool EcpClientTest::testRemoteDisconnect() {
gotoStateReady();
client->procDisconnect();
TEST_STRING_EQUAL("", packetLayer->getSendData());
TEST_STRING_EQUAL("<event=Disconnect>", result->getString());
return true;
}
bool EcpClientTest::testUpload() {
EcpTestDataGenerator generator;
gotoStateReady();
// Start upload
TEST_NUMBER_EQUAL(true, client->uploadData(Ecp::Destination_FirmwareGsm, &generator));
TEST_STRING_EQUAL("020100000000", packetLayer->getSendData());
packetLayer->clearSendData();
// Recv ACK
packetLayer->recvData("0200");
// Send Data
TEST_STRING_EQUAL("030102030405060708090A", packetLayer->getSendData());
packetLayer->clearSendData();
// Recv ACK
packetLayer->recvData("0300");
// Send Data
TEST_STRING_EQUAL("031112131415161718191A", packetLayer->getSendData());
packetLayer->clearSendData();
// Recv BUSY
packetLayer->recvData("0301");
// Resend Data
timerEngine->tick(ECP_BUSY_TIMEOUT);
timerEngine->execute();
TEST_STRING_EQUAL("031112131415161718191A", packetLayer->getSendData());
packetLayer->clearSendData();
// Recv ACK
packetLayer->recvData("0300");
// Send End
TEST_STRING_EQUAL("04", packetLayer->getSendData());
packetLayer->clearSendData();
// Recv BUSY
packetLayer->recvData("0401");
TEST_STRING_EQUAL("", packetLayer->getSendData());
TEST_STRING_EQUAL("", result->getString());
// Resend End
timerEngine->tick(ECP_BUSY_TIMEOUT);
timerEngine->execute();
TEST_STRING_EQUAL("04", packetLayer->getSendData());
packetLayer->clearSendData();
// Recv BUSY
packetLayer->recvData("0401");
TEST_STRING_EQUAL("", packetLayer->getSendData());
TEST_STRING_EQUAL("", result->getString());
// Resend End
timerEngine->tick(ECP_BUSY_TIMEOUT);
timerEngine->execute();
TEST_STRING_EQUAL("04", packetLayer->getSendData());
packetLayer->clearSendData();
// Recv ACK
packetLayer->recvData("0400");
TEST_STRING_EQUAL("", packetLayer->getSendData());
TEST_STRING_EQUAL("<event=UploadOK>", result->getString());
return true;
}
bool EcpClientTest::testDownload() {
TestDataParser parser;
gotoStateReady();
// Start Download
TEST_NUMBER_EQUAL(true, client->downloadData(Ecp::Source_Audit, &parser));
TEST_STRING_EQUAL("0601", packetLayer->getSendData());
packetLayer->clearSendData();
// Recv ACK
packetLayer->recvData("060000000000");
TEST_STRING_EQUAL("<start=0>", parser.getData());
parser.clearData();
// Send Request
TEST_STRING_EQUAL("07", packetLayer->getSendData());
packetLayer->clearSendData();
// Recv Data
packetLayer->recvData("07000102030405060708090A");
TEST_STRING_EQUAL("0102030405060708090A", parser.getData());
parser.clearData();
// Send Request
TEST_STRING_EQUAL("07", packetLayer->getSendData());
packetLayer->clearSendData();
// Recv Last Data
packetLayer->recvData("070B1112131415161718191A");
TEST_STRING_EQUAL("1112131415161718191A<complete>", parser.getData());
parser.clearData();
TEST_STRING_EQUAL("", packetLayer->getSendData());
return true;
}
bool EcpClientTest::testRecvData() {
gotoStateReady();
// Send TableInfoRequest
TEST_NUMBER_EQUAL(true, client->getTableInfo(Ecp::Table_Event));
TEST_STRING_EQUAL("090100", packetLayer->getSendData());
packetLayer->clearSendData();
// Recv response
packetLayer->recvData("090005000000");
TEST_STRING_EQUAL("<event=ResponseOK>", result->getString());
result->clear();
// Send TableEntryRequest
TEST_NUMBER_EQUAL(true, client->getTableEntry(Ecp::Table_Event, 0));
TEST_STRING_EQUAL("0A010000000000", packetLayer->getSendData());
packetLayer->clearSendData();
// Recv response
packetLayer->recvData("0A0005000000");
TEST_STRING_EQUAL("<event=ResponseOK>", result->getString());
result->clear();
// Send TableEntryRequest
TEST_NUMBER_EQUAL(true, client->getTableEntry(Ecp::Table_Event, 255));
TEST_STRING_EQUAL("0A0100FF000000", packetLayer->getSendData());
packetLayer->clearSendData();
// Recv response
packetLayer->recvData("0A0A");
TEST_STRING_EQUAL("<event=ResponseError>", result->getString());
result->clear();
// Send DateTimeRequest
TEST_NUMBER_EQUAL(true, client->getDateTime());
TEST_STRING_EQUAL("0B", packetLayer->getSendData());
packetLayer->clearSendData();
// Recv response
packetLayer->recvData("0B00010203040506");
TEST_STRING_EQUAL("<event=ResponseOK>", result->getString());
return true;
}
|
//--------------------------------------------------------------------------------------------------
//
// File: nImO/nImOchunkArray.cpp
//
// Project: nImO
//
// Contains: The class definition for a string buffer.
//
// Written by: Norman Jaffe
//
// Copyright: (c) 2016 by OpenDragon.
//
// All rights reserved. Redistribution and use in source and binary forms, with or
// without modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright notice, this list
// of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice, this
// list of conditions and the following disclaimer in the documentation and / or
// other materials provided with the distribution.
// * Neither the name of the copyright holders nor the names of its contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// Created: 2016-05-03
//
//--------------------------------------------------------------------------------------------------
#include "nImOchunkArray.hpp"
#include <nImObufferChunk.hpp>
//#include <odlEnable.h>
#include <odlInclude.h>
#include <inttypes.h>
#if defined(__APPLE__)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wunknown-pragmas"
# pragma clang diagnostic ignored "-Wdocumentation-unknown-command"
#endif // defined(__APPLE__)
/*! @file
@brief The class definition for an array of chunks. */
#if defined(__APPLE__)
# pragma clang diagnostic pop
#endif // defined(__APPLE__)
#if defined(__APPLE__)
# pragma mark Namespace references
#endif // defined(__APPLE__)
using namespace nImO;
#if defined(__APPLE__)
# pragma mark Private structures, constants and variables
#endif // defined(__APPLE__)
#if defined(__APPLE__)
# pragma mark Global constants and variables
#endif // defined(__APPLE__)
#if defined(__APPLE__)
# pragma mark Local functions
#endif // defined(__APPLE__)
#if defined(__APPLE__)
# pragma mark Class methods
#endif // defined(__APPLE__)
#if defined(__APPLE__)
# pragma mark Constructors and Destructors
#endif // defined(__APPLE__)
nImO::ChunkArray::ChunkArray
(const bool padWithNull) :
_buffers(new BufferChunk *[1]), _buffersArePadded(padWithNull), _cachedString(), _numChunks(1)
{
ODL_ENTER(); //####
ODL_B1("padWithNull = ", padWithNull); //####
ODL_P1("_buffers <- ", _buffers); //####
ODL_I1("_numChunks <- ", _numChunks); //####
ODL_B1("_buffersArePadded <- ", _buffersArePadded); //####
*_buffers = new BufferChunk(_buffersArePadded);
ODL_EXIT_P(this); //####
} // nImO::ChunkArray::ChunkArray
nImO::ChunkArray::~ChunkArray
(void)
{
ODL_OBJENTER(); //####
if (_buffers)
{
ODL_LOG("(_buffers)"); //####
for (size_t ii = 0; _numChunks > ii; ++ii)
{
delete _buffers[ii];
}
delete[] _buffers;
}
ODL_OBJEXIT(); //####
} // nImO::ChunkArray::~ChunkArray
#if defined(__APPLE__)
# pragma mark Actions and Accessors
#endif // defined(__APPLE__)
void
nImO::ChunkArray::appendBytes
(const uint8_t * data,
const size_t numBytes)
{
ODL_OBJENTER(); //####
ODL_P1("data = ", data); //####
ODL_I1("numBytes = ", numBytes); //####
if (data && (0 < numBytes))
{
ODL_LOG("(data && (0 < numBytes))"); //####
const uint8_t * walker = data;
// Invalidate the cache.
_cachedString.clear();
for (size_t bytesLeft = numBytes; 0 < bytesLeft; )
{
BufferChunk * lastChunk = _buffers[_numChunks - 1];
size_t available = lastChunk->getAvailableBytes();
if (bytesLeft <= available)
{
ODL_LOG("(bytesLeft <= available)"); //####
lastChunk->appendData(walker, bytesLeft * sizeof(*data));
bytesLeft = 0;
}
else
{
ODL_LOG("! (bytesLeft <= available)"); //####
BufferChunk * prevChunk = lastChunk;
lastChunk = new BufferChunk(_buffersArePadded);
if (lastChunk)
{
ODL_LOG("(lastChunk)"); //####
auto newBuffers = new BufferChunk *[_numChunks + 1];
if (newBuffers)
{
ODL_LOG("(newBuffers)"); //####
memcpy(newBuffers, _buffers, sizeof(*_buffers) * _numChunks);
delete[] _buffers;
_buffers = newBuffers;
ODL_P1("_buffers <- ", _buffers); //####
_buffers[_numChunks++] = lastChunk;
ODL_I1("_numChunks <- ", _numChunks); //####
prevChunk->appendData(walker, available);
walker += available;
bytesLeft -= available;
}
else
{
ODL_LOG("! (newBuffers)"); //####
delete lastChunk;
bytesLeft = 0;
}
}
else
{
ODL_LOG("! (lastChunk)"); //####
bytesLeft = 0;
}
}
}
}
ODL_OBJEXIT(); //####
} // nImO::ChunkArray::appendBytes
bool
nImO::ChunkArray::atEnd
(const size_t index)
const
{
ODL_OBJENTER(); //####
ODL_I1("index = ", index); //####
bool result = true;
if (_buffers)
{
ODL_LOG("(_buffers)"); //####
size_t chunkNumber = (index / BufferChunk::kBufferSize);
size_t offset = (index % BufferChunk::kBufferSize);
ODL_I2("chunkNumber <- ", chunkNumber, "offset <- ", offset); //####
if (_numChunks > chunkNumber)
{
ODL_LOG("(_numChunks > chunkNumber)"); //####
BufferChunk * aChunk = _buffers[chunkNumber];
if (nullptr != aChunk)
{
ODL_LOG("(nullptr != aChunk)"); //####
if (offset < aChunk->getDataSize())
{
ODL_LOG("(offset < aChunk->getDataSize())"); //####
result = false;
}
}
}
}
ODL_OBJEXIT_B(result); //####
return result;
} // nImO::ChunkArray::atEnd
int
nImO::ChunkArray::getByte
(const size_t index,
bool & atEnd)
const
{
ODL_OBJENTER(); //####
ODL_I1("index = ", index); //####
int result = -1;
atEnd = true;
ODL_B1("atEnd <- ", atEnd); //####
if (_buffers)
{
ODL_LOG("(_buffers)"); //####
size_t chunkNumber = (index / BufferChunk::kBufferSize);
size_t offset = (index % BufferChunk::kBufferSize);
ODL_I2("chunkNumber <- ", chunkNumber, "offset <- ", offset); //####
if (_numChunks > chunkNumber)
{
ODL_LOG("(_numChunks > chunkNumber)"); //####
BufferChunk * aChunk = _buffers[chunkNumber];
if (nullptr != aChunk)
{
ODL_LOG("(nullptr != aChunk)"); //####
if (offset < aChunk->getDataSize())
{
ODL_LOG("(offset < aChunk->getDataSize())"); //####
const uint8_t * thisData = aChunk->getData();
result = *(thisData + offset);
atEnd = false;
ODL_B1("atEnd <- ", atEnd); //####
}
}
}
}
ODL_OBJEXIT_I(result); //####
return result;
} // nImO::ChunkArray::getByte
std::string
nImO::ChunkArray::getBytes
(void)
{
ODL_OBJENTER(); //####
if (0 == _cachedString.size())
{
ODL_LOG("(0 == _cachedString.size())"); //####
size_t length = getLength();
ODL_I1("length = ", length); //####
_cachedString.reserve(length + (_buffersArePadded ? 1 : 0));
for (size_t ii = 0; _numChunks > ii; ++ii)
{
BufferChunk * aChunk = _buffers[ii];
if (nullptr != aChunk)
{
auto data{aChunk->getData()};
for (size_t jj = 0, nn = aChunk->getDataSize(); nn > jj; ++jj)
{
_cachedString += *data++;
}
}
}
}
ODL_OBJEXIT(); //####
return _cachedString;
} // nImO::ChunkArray::getBytes
size_t
nImO::ChunkArray::getLength
(void)
const
{
ODL_OBJENTER(); //####
size_t totalLength = 0;
if (_buffers)
{
ODL_LOG("(_buffers)"); //####
BufferChunk * aChunk = _buffers[_numChunks - 1];
totalLength = ((_numChunks - 1) * BufferChunk::kBufferSize);
if (nullptr != aChunk)
{
ODL_LOG("(nullptr != aChunk)"); //####
totalLength += aChunk->getDataSize();
}
}
ODL_OBJEXIT_I(totalLength); //####
return totalLength;
} // nImO::ChunkArray::getLength
nImO::ChunkArray &
nImO::ChunkArray::reset
(void)
{
ODL_OBJENTER(); //####
// Invalidate the cache.
_cachedString.clear();
if (1 < _numChunks)
{
ODL_LOG("(1 < _numChunks)"); //####
for (size_t ii = 1; _numChunks > ii; ++ii)
{
BufferChunk * aChunk = _buffers[ii];
if (nullptr != aChunk)
{
delete aChunk;
}
}
BufferChunk * firstChunk = *_buffers;
delete[] _buffers;
_buffers = new BufferChunk *[1];
*_buffers = firstChunk;
}
_buffers[0]->reset();
ODL_OBJEXIT_P(this); //####
return *this;
} // nImO::ChunkArray::reset
#if defined(__APPLE__)
# pragma mark Global functions
#endif // defined(__APPLE__)
|
/*****************************************************************************
* Project: RooFit *
* *
* This code was autogenerated by RooClassFactory *
*****************************************************************************/
// Your description goes here...
#include "Riostream.h"
#include "RooInverseGaussianTail.h"
#include "RooAbsReal.h"
#include "RooAbsCategory.h"
#include <math.h>
#include "TMath.h"
ClassImp(RooInverseGaussianTail)
RooInverseGaussianTail::RooInverseGaussianTail(const char *name, const char *title,
RooAbsReal& _x,
RooAbsReal& _mean,
RooAbsReal& _sigma,
RooAbsReal& _tail) :
RooAbsPdf(name,title),
x("x","x",this,_x),
mean("mean","mean",this,_mean),
sigma("sigma","sigma",this,_sigma),
tail("tail","tail",this,_tail)
{
}
RooInverseGaussianTail::RooInverseGaussianTail(const RooInverseGaussianTail& other, const char* name) :
RooAbsPdf(other,name),
x("x",this,other.x),
mean("mean",this,other.mean),
sigma("sigma",this,other.sigma),
tail("tail",this,other.tail)
{
}
Double_t RooInverseGaussianTail::evaluate() const
{
Double_t tail2 = tail * sigma;
if (x >= (mean - tail2))
return TMath::Gaus(x, mean, sigma);
else
return TMath::Gaus(mean - tail2, mean, sigma) * TMath::Exp(tail2 * (x + tail2 - mean) / (sigma * sigma));
}
|
/*
Copyright (c) 2006, Michael Kazhdan and Matthew Bolitho
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer. Redistributions in binary form must reproduce
the above copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the distribution.
Neither the name of the Johns Hopkins University nor the names of its contributors
may be used to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
#include "Geometry.h"
///////////////////
// CoredMeshData //
///////////////////
const int CoredMeshData::IN_CORE_FLAG[]={1,2,4};
TriangulationEdge::TriangulationEdge(void){pIndex[0]=pIndex[1]=tIndex[0]=tIndex[1]=-1;}
TriangulationTriangle::TriangulationTriangle(void){eIndex[0]=eIndex[1]=eIndex[2]=-1;}
/////////////////////////
// CoredVectorMeshData //
/////////////////////////
CoredVectorMeshData::CoredVectorMeshData(void){oocPointIndex=triangleIndex=0;}
void CoredVectorMeshData::resetIterator(void){oocPointIndex=triangleIndex=0;}
int CoredVectorMeshData::addOutOfCorePoint(const Point3D<float>& p){
oocPoints.push_back(p);
return int(oocPoints.size())-1;
}
int CoredVectorMeshData::addTriangle(const TriangleIndex& t,const int& coreFlag){
TriangleIndex tt;
if(coreFlag & CoredMeshData::IN_CORE_FLAG[0]) {tt.idx[0]= t.idx[0];}
else {tt.idx[0]=-t.idx[0]-1;}
if(coreFlag & CoredMeshData::IN_CORE_FLAG[1]) {tt.idx[1]= t.idx[1];}
else {tt.idx[1]=-t.idx[1]-1;}
if(coreFlag & CoredMeshData::IN_CORE_FLAG[2]) {tt.idx[2]= t.idx[2];}
else {tt.idx[2]=-t.idx[2]-1;}
triangles.push_back(tt);
return int(triangles.size())-1;
}
int CoredVectorMeshData::nextOutOfCorePoint(Point3D<float>& p){
if(oocPointIndex<int(oocPoints.size())){
p=oocPoints[oocPointIndex++];
return 1;
}
else{return 0;}
}
int CoredVectorMeshData::nextTriangle(TriangleIndex& t,int& inCoreFlag){
inCoreFlag=0;
if(triangleIndex<int(triangles.size())){
t=triangles[triangleIndex++];
if(t.idx[0]<0) {t.idx[0]=-t.idx[0]-1;}
else {inCoreFlag|=CoredMeshData::IN_CORE_FLAG[0];}
if(t.idx[1]<0) {t.idx[1]=-t.idx[1]-1;}
else {inCoreFlag|=CoredMeshData::IN_CORE_FLAG[1];}
if(t.idx[2]<0) {t.idx[2]=-t.idx[2]-1;}
else {inCoreFlag|=CoredMeshData::IN_CORE_FLAG[2];}
return 1;
}
else{return 0;}
}
int CoredVectorMeshData::outOfCorePointCount(void){return int(oocPoints.size());}
int CoredVectorMeshData::triangleCount(void){return int(triangles.size());}
|
#include <array>
#include <ctime>
#include <iostream>
#include <string>
#include <sstream>
#include <unordered_map>
#include <vector>
using namespace std;
const int N_REG = 6;
typedef array<int, N_REG> Regs;
// The register containing the value 'c'
// that is produced by the main program loop
const int MAIN_VAR_REG_ID = 3;
// The instruction pointer value when executing the program exit condition
const int EXIT_CONDITION_IP = 28;
enum OpCode {
addr, addi,
mulr, muli,
banr, bani,
borr, bori,
setr, seti,
gtir, gtri, gtrr,
eqir, eqri, eqrr,
};
unordered_map<string, OpCode> OP_CODES = {
{"addr", addr}, {"addi", addi},
{"mulr", mulr}, {"muli", muli},
{"banr", banr}, {"bani", bani},
{"borr", borr}, {"bori", bori},
{"setr", setr}, {"seti", seti},
{"gtir", gtir}, {"gtri", gtri}, {"gtrr", gtrr},
{"eqir", eqir}, {"eqri", eqri}, {"eqrr", eqrr},
};
struct Instruction {
OpCode op;
int a, b, c;
};
int runOp(const Regs& registers, Instruction x) {
switch (x.op) {
case addr: return registers[x.a] + registers[x.b];
case addi: return registers[x.a] + x.b;
case mulr: return registers[x.a] * registers[x.b];
case muli: return registers[x.a] * x.b;
case banr: return registers[x.a] & registers[x.b];
case bani: return registers[x.a] & x.b;
case borr: return registers[x.a] | registers[x.b];
case bori: return registers[x.a] | x.b;
case setr: return registers[x.a];
case seti: return x.a;
case gtir: return x.a > registers[x.b];
case gtri: return registers[x.a] > x.b;
case gtrr: return registers[x.a] > registers[x.b];
case eqir: return x.a == registers[x.b];
case eqri: return registers[x.a] == x.b;
case eqrr: return registers[x.a] == registers[x.b];
default: return 0;
}
}
struct Program {
vector<Instruction> instructions;
Regs registers = {{0}};
int ip = 0;
int ip_register = 0;
uint64_t num_cycles = 0;
/*
* The main program loop is entirely parametrized by the value of
* register $3 on entry ('c'), which is also the output that is
* compared with the user value 'x' as in:
*
* if c == x:
* exit()
*
* Since 'x' is never used anywhere else, the solutions are:
* - lower bound is the resulting 'c' value after one iteration
* of the outer loop
* - upper bound is the last unique 'c' value produced by the loop
* before a duplicate is seen.
*/
bool terminate = false;
bool exit() const {
return ip >= instructions.size() || terminate;
}
void runNextInstruction() {
registers[ip_register] = ip;
Instruction& i = instructions[ip];
registers[i.c] = runOp(registers, i);
ip = registers[ip_register];
ip++;
num_cycles++;
}
void hack() {
if (ip == EXIT_CONDITION_IP) {
int c = registers[MAIN_VAR_REG_ID];
registers[0] = c;
terminate = true;
}
}
};
Program readProgram(const string& in) {
istringstream iss(in);
Program p;
string line;
getline(iss, line);
sscanf(line.c_str(), "#ip %d", &p.ip_register);
while (getline(iss, line)) {
Instruction i;
i.op = OP_CODES[line.substr(0, 4)];
sscanf(line.c_str(), "%*s %d %d %d",
&i.a, &i.b, &i.c);
p.instructions.push_back(i);
}
return p;
}
int run(const string& in) {
auto program = readProgram(in);
while (!program.exit()) {
program.runNextInstruction();
program.hack();
}
// The solution was written to register 0
return program.registers[0];
}
int main(int argc, char** argv) {
if (argc < 2) {
cout << "Missing one argument" << endl;
exit(1);
}
clock_t start = clock();
auto answer = run(string(argv[1]));
cout << "_duration:" << float( clock () - start ) * 1000.0 / CLOCKS_PER_SEC << "\n";
cout << answer << "\n";
return 0;
}
|
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/ec2/model/DescribeSnapshotTierStatusRequest.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/memory/stl/AWSStringStream.h>
using namespace Aws::EC2::Model;
using namespace Aws::Utils;
DescribeSnapshotTierStatusRequest::DescribeSnapshotTierStatusRequest() :
m_filtersHasBeenSet(false),
m_dryRun(false),
m_dryRunHasBeenSet(false),
m_nextTokenHasBeenSet(false),
m_maxResults(0),
m_maxResultsHasBeenSet(false)
{
}
Aws::String DescribeSnapshotTierStatusRequest::SerializePayload() const
{
Aws::StringStream ss;
ss << "Action=DescribeSnapshotTierStatus&";
if(m_filtersHasBeenSet)
{
unsigned filtersCount = 1;
for(auto& item : m_filters)
{
item.OutputToStream(ss, "Filter.", filtersCount, "");
filtersCount++;
}
}
if(m_dryRunHasBeenSet)
{
ss << "DryRun=" << std::boolalpha << m_dryRun << "&";
}
if(m_nextTokenHasBeenSet)
{
ss << "NextToken=" << StringUtils::URLEncode(m_nextToken.c_str()) << "&";
}
if(m_maxResultsHasBeenSet)
{
ss << "MaxResults=" << m_maxResults << "&";
}
ss << "Version=2016-11-15";
return ss.str();
}
void DescribeSnapshotTierStatusRequest::DumpBodyToUrl(Aws::Http::URI& uri ) const
{
uri.SetQueryString(SerializePayload());
}
|
// Copyright David Abrahams 2002.
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef EXTRACT_DWA200265_HPP
#define EXTRACT_DWA200265_HPP
#include <boost/python/detail/prefix.hpp>
#include <boost/python/converter/from_python.hpp>
#include <boost/python/converter/object_manager.hpp>
#include <boost/python/converter/registered.hpp>
#include <boost/python/converter/registered_pointee.hpp>
#include <boost/python/converter/rvalue_from_python_data.hpp>
#include <boost/python/object_core.hpp>
#include <boost/python/refcount.hpp>
#include <boost/call_traits.hpp>
#include <boost/python/detail/copy_ctor_mutates_rhs.hpp>
#include <boost/python/detail/void_ptr.hpp>
#include <boost/python/detail/void_return.hpp>
#if BOOST_WORKAROUND(BOOST_INTEL_WIN, <= 900)
#define BOOST_EXTRACT_WORKAROUND ()
#else
#define BOOST_EXTRACT_WORKAROUND
#endif
namespace boost {
namespace python {
namespace api {
class object;
}
namespace converter {
template <class Ptr> struct extract_pointer {
typedef Ptr result_type;
extract_pointer(PyObject *);
bool check() const;
Ptr operator()() const;
private:
PyObject *m_source;
void *m_result;
};
template <class Ref> struct extract_reference {
typedef Ref result_type;
extract_reference(PyObject *);
bool check() const;
Ref operator()() const;
private:
PyObject *m_source;
void *m_result;
};
template <class T> struct extract_rvalue : private noncopyable {
typedef
typename mpl::if_<python::detail::copy_ctor_mutates_rhs<T>, T &,
typename call_traits<T>::param_type>::type result_type;
extract_rvalue(PyObject *);
bool check() const;
result_type operator()() const;
private:
PyObject *m_source;
mutable rvalue_from_python_data<T> m_data;
};
template <class T> struct extract_object_manager {
typedef T result_type;
extract_object_manager(PyObject *);
bool check() const;
result_type operator()() const;
private:
PyObject *m_source;
};
template <class T> struct select_extract {
BOOST_STATIC_CONSTANT(bool, obj_mgr = is_object_manager<T>::value);
BOOST_STATIC_CONSTANT(bool, ptr = is_pointer<T>::value);
BOOST_STATIC_CONSTANT(bool, ref = is_reference<T>::value);
typedef typename mpl::if_c<
obj_mgr, extract_object_manager<T>,
typename mpl::if_c<ptr, extract_pointer<T>,
typename mpl::if_c<ref, extract_reference<T>,
extract_rvalue<T>>::type>::type>::
type type;
};
} // namespace converter
template <class T> struct extract : converter::select_extract<T>::type {
private:
typedef typename converter::select_extract<T>::type base;
public:
typedef typename base::result_type result_type;
operator result_type() const { return (*this)(); }
extract(PyObject *);
extract(api::object const &);
};
//
// Implementations
//
template <class T> inline extract<T>::extract(PyObject *o) : base(o) {}
template <class T>
inline extract<T>::extract(api::object const &o) : base(o.ptr()) {}
namespace converter {
template <class T>
inline extract_rvalue<T>::extract_rvalue(PyObject *x)
: m_source(x),
m_data((rvalue_from_python_stage1)(x, registered<T>::converters)) {}
template <class T> inline bool extract_rvalue<T>::check() const {
return m_data.stage1.convertible;
}
template <class T>
inline typename extract_rvalue<T>::result_type
extract_rvalue<T>::operator()() const {
return *(T *)(
// Only do the stage2 conversion once
m_data.stage1.convertible == m_data.storage.bytes
? m_data.storage.bytes
: (rvalue_from_python_stage2)(m_source, m_data.stage1,
registered<T>::converters));
}
template <class Ref>
inline extract_reference<Ref>::extract_reference(PyObject *obj)
: m_source(obj),
m_result((get_lvalue_from_python)(obj, registered<Ref>::converters)) {}
template <class Ref> inline bool extract_reference<Ref>::check() const {
return m_result != 0;
}
template <class Ref> inline Ref extract_reference<Ref>::operator()() const {
if (m_result == 0)
(throw_no_reference_from_python)(m_source, registered<Ref>::converters);
return python::detail::void_ptr_to_reference(m_result, (Ref(*)())0);
}
template <class Ptr>
inline extract_pointer<Ptr>::extract_pointer(PyObject *obj)
: m_source(obj),
m_result(obj == Py_None ? 0
: (get_lvalue_from_python)(
obj, registered_pointee<Ptr>::converters)) {
}
template <class Ptr> inline bool extract_pointer<Ptr>::check() const {
return m_source == Py_None || m_result != 0;
}
template <class Ptr> inline Ptr extract_pointer<Ptr>::operator()() const {
if (m_result == 0 && m_source != Py_None)
(throw_no_pointer_from_python)(m_source,
registered_pointee<Ptr>::converters);
return Ptr(m_result);
}
template <class T>
inline extract_object_manager<T>::extract_object_manager(PyObject *obj)
: m_source(obj) {}
template <class T> inline bool extract_object_manager<T>::check() const {
return object_manager_traits<T>::check(m_source);
}
template <class T> inline T extract_object_manager<T>::operator()() const {
return T(object_manager_traits<T>::adopt(python::incref(m_source)));
}
} // namespace converter
} // namespace python
} // namespace boost
#endif // EXTRACT_DWA200265_HPP
|
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/cloudformation/model/BatchDescribeTypeConfigurationsResult.h>
#include <aws/core/utils/xml/XmlSerializer.h>
#include <aws/core/AmazonWebServiceResult.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/logging/LogMacros.h>
#include <utility>
using namespace Aws::CloudFormation::Model;
using namespace Aws::Utils::Xml;
using namespace Aws::Utils::Logging;
using namespace Aws::Utils;
using namespace Aws;
BatchDescribeTypeConfigurationsResult::BatchDescribeTypeConfigurationsResult()
{
}
BatchDescribeTypeConfigurationsResult::BatchDescribeTypeConfigurationsResult(const Aws::AmazonWebServiceResult<XmlDocument>& result)
{
*this = result;
}
BatchDescribeTypeConfigurationsResult& BatchDescribeTypeConfigurationsResult::operator =(const Aws::AmazonWebServiceResult<XmlDocument>& result)
{
const XmlDocument& xmlDocument = result.GetPayload();
XmlNode rootNode = xmlDocument.GetRootElement();
XmlNode resultNode = rootNode;
if (!rootNode.IsNull() && (rootNode.GetName() != "BatchDescribeTypeConfigurationsResult"))
{
resultNode = rootNode.FirstChild("BatchDescribeTypeConfigurationsResult");
}
if(!resultNode.IsNull())
{
XmlNode errorsNode = resultNode.FirstChild("Errors");
if(!errorsNode.IsNull())
{
XmlNode errorsMember = errorsNode.FirstChild("member");
while(!errorsMember.IsNull())
{
m_errors.push_back(errorsMember);
errorsMember = errorsMember.NextNode("member");
}
}
XmlNode unprocessedTypeConfigurationsNode = resultNode.FirstChild("UnprocessedTypeConfigurations");
if(!unprocessedTypeConfigurationsNode.IsNull())
{
XmlNode unprocessedTypeConfigurationsMember = unprocessedTypeConfigurationsNode.FirstChild("member");
while(!unprocessedTypeConfigurationsMember.IsNull())
{
m_unprocessedTypeConfigurations.push_back(unprocessedTypeConfigurationsMember);
unprocessedTypeConfigurationsMember = unprocessedTypeConfigurationsMember.NextNode("member");
}
}
XmlNode typeConfigurationsNode = resultNode.FirstChild("TypeConfigurations");
if(!typeConfigurationsNode.IsNull())
{
XmlNode typeConfigurationsMember = typeConfigurationsNode.FirstChild("member");
while(!typeConfigurationsMember.IsNull())
{
m_typeConfigurations.push_back(typeConfigurationsMember);
typeConfigurationsMember = typeConfigurationsMember.NextNode("member");
}
}
}
if (!rootNode.IsNull()) {
XmlNode responseMetadataNode = rootNode.FirstChild("ResponseMetadata");
m_responseMetadata = responseMetadataNode;
AWS_LOGSTREAM_DEBUG("Aws::CloudFormation::Model::BatchDescribeTypeConfigurationsResult", "x-amzn-request-id: " << m_responseMetadata.GetRequestId() );
}
return *this;
}
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2014, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "mappingFuncs.hpp"
cv::Point2d getAzEl(const cv::Mat& from, const cv::Mat& to)
{
cv::Point2d ret;
cv::Point3d diff;
diff.x = (to.at<double>(0) - from.at<double>(0));
diff.y = (to.at<double>(1) - from.at<double>(1));
diff.z = (to.at<double>(2) - from.at<double>(2));
ret.x = atan2(diff.y, diff.x);
ret.y = asin(diff.z / norm(diff));
return ret;
}
//#endif
|
#include <vtkCellData.h>
#include <vtkCubeSource.h>
#include <vtkPointData.h>
#include <vtkPolyData.h>
#include <vtkSmartPointer.h>
#include <vtkSphereSource.h>
#include <vtkXMLPolyDataReader.h>
#include <vtkXMLPolyDataWriter.h>
#include <string>
#include <vector>
void FindAllData(vtkPolyData* polydata);
int main(int argc, char* argv[])
{
vtkSmartPointer<vtkPolyData> polydata;
if (argc < 2)
{
auto sphereSource = vtkSmartPointer<vtkSphereSource>::New();
sphereSource->Update();
auto writer = vtkSmartPointer<vtkXMLPolyDataWriter>::New();
writer->SetFileName("test.vtp");
writer->SetInputConnection(sphereSource->GetOutputPort());
writer->Write();
polydata = sphereSource->GetOutput();
}
else
{
auto reader = vtkSmartPointer<vtkXMLPolyDataReader>::New();
reader->SetFileName(argv[1]);
reader->Update();
polydata = reader->GetOutput();
}
FindAllData(polydata);
return EXIT_SUCCESS;
}
void FindAllData(vtkPolyData* polydata)
{
std::cout << "Normals: " << polydata->GetPointData()->GetNormals()
<< std::endl;
vtkIdType numberOfPointArrays = polydata->GetPointData()->GetNumberOfArrays();
std::cout << "Number of PointData arrays: " << numberOfPointArrays
<< std::endl;
vtkIdType numberOfCellArrays = polydata->GetCellData()->GetNumberOfArrays();
std::cout << "Number of CellData arrays: " << numberOfCellArrays << std::endl;
std::cout << "Type table/key: " << std::endl;
;
// more values can be found in <VTK_DIR>/Common/vtkSetGet.h
std::cout << VTK_UNSIGNED_CHAR << " unsigned char" << std::endl;
std::cout << VTK_UNSIGNED_INT << " unsigned int" << std::endl;
std::cout << VTK_FLOAT << " float" << std::endl;
std::cout << VTK_DOUBLE << " double" << std::endl;
for (vtkIdType i = 0; i < numberOfPointArrays; i++)
{
// The following two lines are equivalent
// arrayNames.push_back(polydata->GetPointData()->GetArray(i)->GetName());
// arrayNames.push_back(polydata->GetPointData()->GetArrayName(i));
int dataTypeID = polydata->GetPointData()->GetArray(i)->GetDataType();
std::cout << "Array " << i << ": "
<< polydata->GetPointData()->GetArrayName(i)
<< " (type: " << dataTypeID << ")" << std::endl;
}
for (vtkIdType i = 0; i < numberOfCellArrays; i++)
{
// The following two lines are equivalent
// polydata->GetPointData()->GetArray(i)->GetName();
// polydata->GetPointData()->GetArrayName(i);
int dataTypeID = polydata->GetCellData()->GetArray(i)->GetDataType();
std::cout << "Array " << i << ": "
<< polydata->GetCellData()->GetArrayName(i)
<< " (type: " << dataTypeID << ")" << std::endl;
}
}
|
/******************************************************************************
** Copyright (c) 2015, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ******************************************************************************/
/* Narayanan Sundaram (Intel Corp.), Nadathur Satish (Intel Corp.)
* ******************************************************************************/
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <vector>
#include <unordered_map>
#include <immintrin.h>
#include "utils.h"
#include <iostream>
inline void set_bitvector(unsigned int idx, int* bitvec) {
unsigned int neighbor_id = idx;
int dword = (neighbor_id >> 5);
int bit = neighbor_id & 0x1F;
unsigned int current_value = bitvec[dword];
if ( (current_value & (1<<bit)) == 0)
{
bitvec[dword] = current_value | (1<<bit);
}
}
inline bool get_bitvector(unsigned int idx, const int* bitvec) {
unsigned int neighbor_id = idx;
int dword = (neighbor_id >> 5);
int bit = neighbor_id & 0x1F;
unsigned int current_value = bitvec[dword];
return ( (current_value & (1<<bit)) );
}
//------------------------------------------------------------
template <class T>
class SparseVector {
public:
int *bitvector;
T* value;
int length;
int numInts;
public:
SparseVector(int n);
SparseVector();
void set(int idx, const T& v);
const T& getValue(int i) const ;
void resize(const int n);
void clear();
template<class U, class V, class E>
void reduce(const int idx, const T& v, const GraphProgram<U, T, V, E>* gp );
void print();
int nnz() const;
~SparseVector();
};
template<class T> using SparseInVector = SparseVector<T>;
template<class T> using SparseOutVector = SparseVector<T>;
//------------------------------------------------------------
template <class T>
void SparseVector<T>::set(int idx, const T& v) {
set_bitvector(idx, bitvector);
value[idx] = v;
}
template <class T>
const T& SparseVector<T>::getValue(int i) const {
#ifdef __ASSERT
assert( i>=0 && i < length);
#endif
return value[i];
}
template <class T>
SparseVector<T>::SparseVector() {
length = 0;
numInts = 0;
value = NULL;
bitvector = NULL;
}
template <class T>
SparseVector<T>::SparseVector(int n) {
length = n;
value = new T[length];
numInts = std::max(SIMD_WIDTH, ((length/32+SIMD_WIDTH)/SIMD_WIDTH)*SIMD_WIDTH); //multiple of SIMD_WIDTH
bitvector = new int[numInts];
memset(bitvector, 0, (numInts)*sizeof(int));
}
template <class T>
SparseVector<T>::~SparseVector() {
length = 0;
if (value) {
delete [] value;
value = NULL;
}
if (bitvector) {
delete [] bitvector;
bitvector = NULL;
}
}
template <class T>
void SparseVector<T>::clear() {
if (length) {
memset(bitvector, 0, (numInts)*sizeof(int));
}
}
template <class T>
void SparseVector<T>::resize(int n) {
length = 0;
if (value) {
delete [] value;
value = NULL;
}
if (bitvector) {
delete [] bitvector;
bitvector = NULL;
}
length = n+1;
value = new T[length];
numInts = std::max(SIMD_WIDTH, ((length/32+SIMD_WIDTH)/SIMD_WIDTH)*SIMD_WIDTH); //multiple of SIMD_WIDTH
bitvector = new int[numInts];
memset(bitvector, 0, (numInts)*sizeof(int));
}
template <class T>
void SparseVector<T>::print() {
std::cout << "Printing vector \n";
for (int i = 0; i <= length; i++) {
if (get_bitvector(i, bitvector)) {
std::cout << i << " " << value[i] << "\n";
}
}
}
template <class T>
int SparseVector<T>::nnz() const {
int len = 0;
#pragma omp parallel for num_threads(nthreads) schedule(guided, 128) reduction(+:len)
for (int ii = 0; ii < numInts; ii++) {
int p = _popcnt32(bitvector[ii]);
len += p;
}
return len;
}
template <class T>
template <class U, class V, class E>
void SparseVector<T>::reduce(int idx, const T& v, const GraphProgram<U, T, V, E>* gp ) {
if (get_bitvector(idx, bitvector)) {
gp->reduce_function(value[idx], v);
} else {
set_bitvector(idx, bitvector);
value[idx] = v;
}
}
//-------------------------------------
|
#include <ros/ros.h>
//#include <tf/transform_broadcaster.h>
#include <tf2/LinearMath/Quaternion.h>
#include <tf2_ros/transform_broadcaster.h>
#include <geometry_msgs/PoseStamped.h>
#include <std_msgs/Float32MultiArray.h>
#include <std_msgs/Float32.h>
#include <std_msgs/Int32MultiArray.h>
#include <sensor_msgs/Imu.h>
#include <sensor_msgs/MagneticField.h>
#include <nav_msgs/Odometry.h>
#include <string>
#include <math.h>
#include <vector>
using namespace std;
#define DEG_TO_RAD 0.01745329252
vector<int32_t> int_sensor_data(30);
vector<float> float_sensor_data(30);
std_msgs::Float32 linear_vel;
bool tf_publish=true;
bool odom_2d=true;
ros::Publisher vel_pub;
void int_sensor_data_callback(const std_msgs::Int32MultiArray& int_sensor_data_row){
int_sensor_data=int_sensor_data_row.data;
}
void float_sensor_data_callback(const std_msgs::Float32MultiArray& float_sensor_data_row){
static std::string odom_id="odom";
static std::string base_link_id="base_link";
float_sensor_data=float_sensor_data_row.data;
linear_vel.data=float_sensor_data_row.data[13]*3.6*0.5;
vel_pub.publish(linear_vel);
/*
static tf::TransformBroadcaster br;
tf::Transform transform;
std::string odom_id="odom";
std::string base_link_id="base_link";
//3D
transform.setOrigin( tf::Vector3(float_sensor_data[1], float_sensor_data[0], float_sensor_data[2]) );
//2D
//transform.setOrigin( tf::Vector3(float_sensor_data[19], float_sensor_data[18], 0.0) );
tf::Quaternion q;
//q.setRPY(0, 0, float_sensor_data[14]);
q.setX(-float_sensor_data[10]);
q.setY(float_sensor_data[9]);
q.setZ(float_sensor_data[11]);
q.setW(float_sensor_data[12]);
transform.setRotation(q);
br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), odom_id,base_link_id));
*/
static tf2_ros::TransformBroadcaster br;
geometry_msgs::TransformStamped transformStamped;
transformStamped.header.stamp = ros::Time::now();
transformStamped.header.frame_id = odom_id;
transformStamped.child_frame_id = base_link_id;
if(odom_2d){
//2D
transformStamped.transform.translation.x = float_sensor_data[19];
transformStamped.transform.translation.y = float_sensor_data[18];
transformStamped.transform.translation.z = 0;
}
else{
//3D
transformStamped.transform.translation.x = float_sensor_data[1];
transformStamped.transform.translation.y = float_sensor_data[0];
transformStamped.transform.translation.z = float_sensor_data[2];
}
transformStamped.transform.rotation.x = -float_sensor_data[10];
transformStamped.transform.rotation.y = float_sensor_data[9];
transformStamped.transform.rotation.z = float_sensor_data[11];
transformStamped.transform.rotation.w = float_sensor_data[12];
if(tf_publish){
br.sendTransform(transformStamped);
}
//std::cout<<transformStamped.child_frame_id<<std::endl;
}
int main(int argc, char **argv){
ros::init(argc, argv, "teensy_handler");
ros::NodeHandle n;
ros::Rate loop_rate(100);
ros::NodeHandle lSubscriber("");
//ros::Subscriber int_sub = lSubscriber.subscribe("int_sensor_data", 50, int_sensor_data_callback);
ros::Subscriber float_sub = lSubscriber.subscribe("float_sensor_data", 50, float_sensor_data_callback);
vel_pub = n.advertise<std_msgs::Float32>("robot_linear_vel", 10);
ros::Publisher odom_pub = n.advertise<nav_msgs::Odometry>("odom", 10);
ros::Publisher imu_pub = n.advertise<sensor_msgs::Imu>("imu/data", 10);
ros::Publisher mag_pub = n.advertise<sensor_msgs::MagneticField>("imu/mag", 10);
//param setting
ros::NodeHandle pn("~");
pn.param<bool>("tf_publish", tf_publish, true);
odom_2d=pn.param<bool>("odom_2d",true);
while (n.ok()) {
/*
//TF
tf::Transform transform;
transform.setOrigin( tf::Vector3(float_sensor_data[0], float_sensor_data[1], 0.0) );
tf::Quaternion q;
q.setRPY(0, 0, float_sensor_data[2]);
transform.setRotation(q);
br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), "map","base_link"));
*/
nav_msgs::Odometry odom;
static uint32_t seq_odom=0;
odom.header.frame_id="odom";
odom.header.stamp=ros::Time::now();
odom.header.seq=seq_odom;
odom.child_frame_id="base_link";
if(odom_2d){
//2D
odom.pose.pose.position.x=float_sensor_data[19];
odom.pose.pose.position.y=float_sensor_data[18];
odom.pose.pose.position.z=0.0;
}
else{
//3D
odom.pose.pose.position.x=float_sensor_data[1];
odom.pose.pose.position.y=float_sensor_data[0];
odom.pose.pose.position.z=float_sensor_data[2];
}
odom.pose.pose.orientation.x=-float_sensor_data[10];
odom.pose.pose.orientation.y=float_sensor_data[9];
odom.pose.pose.orientation.z=float_sensor_data[11];
odom.pose.pose.orientation.w=float_sensor_data[12];
odom.twist.twist.linear.x=float_sensor_data[13];
odom.twist.twist.angular.x=-float_sensor_data.at(7);
odom.twist.twist.angular.y=float_sensor_data.at(6);
odom.twist.twist.angular.z=float_sensor_data.at(8);
odom.pose.covariance.at(0)=2.0;
odom.pose.covariance.at(7)=2.0;
odom.pose.covariance.at(14)=1.0;//3.0;
odom.pose.covariance.at(21)=0.00;
odom.pose.covariance.at(28)=0.00;
odom.pose.covariance.at(35)=0.01;//0.02;
seq_odom++;
sensor_msgs::Imu imu;
static uint32_t seq_imu=0;
imu.header.frame_id="imu_link";
imu.header.stamp=ros::Time::now();
imu.header.seq=seq_imu;
imu.linear_acceleration.x=-float_sensor_data.at(4);
imu.linear_acceleration.y=float_sensor_data.at(3);
imu.linear_acceleration.z=float_sensor_data.at(5);
imu.linear_acceleration_covariance.at(0)=0.0005356910249999999;
imu.linear_acceleration_covariance.at(4)=0.0005356910249999999;
imu.linear_acceleration_covariance.at(8)=0.0005356910249999999;
imu.angular_velocity.x=-float_sensor_data.at(7);
imu.angular_velocity.y=float_sensor_data.at(6);
imu.angular_velocity.z=float_sensor_data.at(8);
imu.angular_velocity_covariance.at(0)=1.12805641/1000000.0;
imu.angular_velocity_covariance.at(4)=1.12805641/1000000.0;
imu.angular_velocity_covariance.at(8)=1.12805641/1000000.0;
imu.orientation.x=-float_sensor_data.at(10);
imu.orientation.y=float_sensor_data.at(9);
imu.orientation.z=float_sensor_data.at(11);
imu.orientation.w=float_sensor_data.at(12);
seq_imu++;
sensor_msgs::MagneticField mag;
static uint32_t seq_mag=0;
mag.header.frame_id="imu_link";
mag.header.stamp=ros::Time::now();
mag.header.seq=seq_mag;
seq_imu++;
mag.magnetic_field.x=float_sensor_data.at(15);
mag.magnetic_field.y=float_sensor_data.at(16);
mag.magnetic_field.z=float_sensor_data.at(17);
odom_pub.publish(odom);
imu_pub.publish(imu);
mag_pub.publish(mag);
ros::spinOnce();
loop_rate.sleep();
}
}
|
/*
*add_notes.cpp
*
*This function uses a matrix of probabilities to add the possible chords in an
*alrorithmic/random manner. To do this, it uses the key class to create the set of
*possible chords. Then, it adds them until a specific criteria is met, and returns
*the final vector of chords.
*
*The function defined here is called from the generate_note_file.cpp file
*
*/
#include "add_notes.hpp"
vector<vector<int>> add_notes(int tonic){
//initializes a scale based on tonic notes
Keys scale(tonic);
//Holds set of chords, which is return later
vector<vector<int>> set_of_chords;
//set_of_chords = example.get_chords();
//2D array of probabilities for markov chain. It is 5x5 because there are 5 possible chords per octave
double probabilities[5][5];
//chord I
probabilities[0][0] = .3;
probabilities[0][1] = .6;
probabilities[0][2] = .1;
probabilities[0][3] = 0;
probabilities[0][4] = 0;
//chord IV
probabilities[1][0] = .1;
probabilities[1][1] = .2;
probabilities[1][2] = .5;
probabilities[1][3] = 0;
probabilities[1][4] = .2;
//chord V
probabilities[2][0] = .2;
probabilities[2][1] = .1;
probabilities[2][2] = .3;
probabilities[2][3] = .4;
probabilities[2][4] = 0;
//chord VI
probabilities[3][0] = 0;
probabilities[3][1] = 0;
probabilities[3][2] = 0;
probabilities[3][3] = 0;
probabilities[3][4] = 1;
//chord flatVI
probabilities[4][0] = 0;
probabilities[4][1] = 1;
probabilities[4][2] = 0;
probabilities[4][3] = 0;
probabilities[4][4] = 0;
//adds the first two chords. Without this, the next while() loop cannot make comparisons, as the set of chords will be empty
set_of_chords.push_back(scale.get_chords()[0]);
set_of_chords.push_back(scale.get_chords()[0]);
int counter = 1;
double random = 0;
//Specifies that chords will be added until a 5-1 progression given the song is at least 20 chords long
while(set_of_chords.size() < 20 || !(set_of_chords[counter-1] == scale.get_chords()[2] && set_of_chords[counter] == scale.get_chords()[0])){
//random number between 0 and 1. Used with decimal probabilites to pick next chord
random = (double)rand() / RAND_MAX;
//This part basically finds the probability from the matrix, and uses the randomly generated number to pick the chords based on these probabilities
if(set_of_chords[counter] == scale.get_chords()[0]){
if(random < probabilities[0][0]){
set_of_chords.push_back(scale.get_chords()[0]);
}
else if(random < (probabilities[0][0] + probabilities[0][1])){
set_of_chords.push_back(scale.get_chords()[1]);
}
else if(random < (probabilities[0][0] + probabilities[0][1] + probabilities[0][2])){
set_of_chords.push_back(scale.get_chords()[2]);
}
else if(random < (probabilities[0][0] + probabilities[0][1] + probabilities[0][2]) + probabilities[0][3]){
set_of_chords.push_back(scale.get_chords()[3]);
}
else{
set_of_chords.push_back(scale.get_chords()[4]);
}
}
//Probabilities of added chord are based on previous chord
else if(set_of_chords[counter] == scale.get_chords()[1]){
if(random < probabilities[1][0]){
set_of_chords.push_back(scale.get_chords()[0]);
}
else if(random < (probabilities[1][0] + probabilities[1][1])){
set_of_chords.push_back(scale.get_chords()[1]);
}
else if(random < (probabilities[1][0] + probabilities[1][1] + probabilities[1][2])){
set_of_chords.push_back(scale.get_chords()[2]);
}
else if(random < (probabilities[1][0] + probabilities[1][1] + probabilities[1][2]) + probabilities[1][3]){
set_of_chords.push_back(scale.get_chords()[3]);
}
else{
set_of_chords.push_back(scale.get_chords()[4]);
}
}
else if(set_of_chords[counter] == scale.get_chords()[2]){
if(random < probabilities[2][0]){
set_of_chords.push_back(scale.get_chords()[0]);
}
else if(random < (probabilities[2][0] + probabilities[2][1])){
set_of_chords.push_back(scale.get_chords()[1]);
}
else if(random < (probabilities[2][0] + probabilities[2][1] + probabilities[2][2])){
set_of_chords.push_back(scale.get_chords()[2]);
}
else if(random < (probabilities[2][0] + probabilities[2][1] + probabilities[2][2]) + probabilities[2][3]){
set_of_chords.push_back(scale.get_chords()[3]);
}
else{
set_of_chords.push_back(scale.get_chords()[4]);
}
}
else if(set_of_chords[counter] == scale.get_chords()[3]){
if(random < probabilities[3][0]){
set_of_chords.push_back(scale.get_chords()[0]);
}
else if(random < (probabilities[3][0] + probabilities[3][1])){
set_of_chords.push_back(scale.get_chords()[1]);
}
else if(random < (probabilities[3][0] + probabilities[3][1] + probabilities[3][2])){
set_of_chords.push_back(scale.get_chords()[2]);
}
else if(random < (probabilities[3][0] + probabilities[3][1] + probabilities[3][2]) + probabilities[3][3]){
set_of_chords.push_back(scale.get_chords()[3]);
}
else{
set_of_chords.push_back(scale.get_chords()[4]);
}
}
else if(set_of_chords[counter] == scale.get_chords()[4]){
if(random < probabilities[4][0]){
set_of_chords.push_back(scale.get_chords()[0]);
}
else if(random < (probabilities[4][0] + probabilities[4][1])){
set_of_chords.push_back(scale.get_chords()[1]);
}
else if(random < (probabilities[4][0] + probabilities[4][1] + probabilities[4][2])){
set_of_chords.push_back(scale.get_chords()[2]);
}
else if(random < (probabilities[4][0] + probabilities[4][1] + probabilities[4][2]) + probabilities[4][3]){
set_of_chords.push_back(scale.get_chords()[3]);
}
else{
set_of_chords.push_back(scale.get_chords()[4]);
}
}
//counter tracks number of chords added, as one is added per iteration of the loop
counter += 1;
}
return set_of_chords;
}
|
#pragma once
#include <atomic>
#include <type_traits>
#include <vector>
#include <cstdint>
#include <cstdio>
template <typename T_, size_t Size_, typename FetchMax_> struct queue_t {
static_assert(std::is_nothrow_default_constructible_v<T_>);
static_assert(std::is_nothrow_copy_constructible_v<T_>);
static_assert(std::is_nothrow_swappable_v<T_>);
using element_t = T_;
static constexpr int size = Size_;
static constexpr FetchMax_ fetch_max = {};
struct entry_t {
element_t item{}; // a queue element
std::atomic<int> tag{-1}; // its generation number
};
entry_t elts[size] = {}; // a bounded array
std::atomic<int> back{-1};
explicit queue_t() noexcept {
// Visit elements to ensure that backing memory is:
// 1. not over-committed
// 2. paged in and
// 3. stored in TLB
int d = 1;
for (size_t i = 0; i < size; i += 16) {
auto &e = elts[i];
d *= e.tag.load(std::memory_order_relaxed);
}
std::fprintf(::stderr, "Check: %i\n\n", d);
}
friend auto enqueue(queue_t &queue, element_t x) noexcept -> bool {
// get a slot in the array for the new element
int i = queue.back.load(std::memory_order_acquire);
while (true) {
if (++i >= size) {
return false;
}
// exchange the new element with slots value if that slot has not been
// used
int empty = -1; // expected tag for an empty slot
auto &e = queue.elts[i % size];
// use two-step write: first store an odd value while we are writing the
// new element
if (e.tag.compare_exchange_strong(empty, (i / size) * 2 + 1,
std::memory_order_acq_rel,
std::memory_order_acquire)) {
using std::swap;
swap(x, e.item);
// done writing, switch tag to even (ie. ready)
e.tag.store((i / size) * 2, std::memory_order_seq_cst);
break;
}
}
// reset the value of back
fetch_max(&queue.back, i, std::memory_order_release);
return true;
}
friend auto dequeue(queue_t &queue) noexcept -> element_t {
while (true) { // keep trying until an element is found
int range = queue.back.load(); // search up to back slots
for (int i = 0; i <= range; i++) {
int ready = (i / size) * 2; // expected even tag for ready slot
auto &e = queue.elts[i % size];
// use two-step read: first store -2 while we are reading the element
if (std::atomic_compare_exchange_strong(&e.tag, &ready, -2)) {
using std::swap;
element_t ret{};
swap(ret, e.item);
e.tag.store(-1); // done reading, switch tag to -1 (ie. empty)
return ret;
}
}
}
}
};
|
#include <FishEngine/AssetBundle.hpp>
|
#include "CommandRecorder.h"
ZE::CommandRecorder::CommandRecorder(ID3D12Device5* device, D3D12_COMMAND_LIST_TYPE type)
{
for (UINT i = 0; i < NUM_SWAP_BUFFERS; i++)
{
HRESULT hr = device->CreateCommandAllocator(type, IID_PPV_ARGS(&this->_commandAllocators[i]));
hr = device->CreateCommandList(0,
type,
this->_commandAllocators[i],
nullptr,
IID_PPV_ARGS(&this->_commandLists[i]));
this->_commandLists[i]->Close();
}
}
ZE::CommandRecorder::~CommandRecorder()
{
for (unsigned int i = 0; i < NUM_SWAP_BUFFERS; i++)
{
SAFE_RELEASE(&this->_commandAllocators[i]);
SAFE_RELEASE(&this->_commandLists[i]);
}
}
void ZE::CommandRecorder::Reset(int index)
{
_commandAllocators[index]->Reset();
_commandLists[index]->Reset(_commandAllocators[index], NULL);
}
ID3D12GraphicsCommandList5* ZE::CommandRecorder::GetCommandList(int index)
{
return this->_commandLists[index];
}
ID3D12CommandAllocator* ZE::CommandRecorder::GetCommandAllocator(int index)
{
return this->_commandAllocators[index];
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ElemExtensionCall.hpp"
#include <xalanc/PlatformSupport/XalanMessageLoader.hpp>
#include "Constants.hpp"
#include "Stylesheet.hpp"
#include "StylesheetConstructionContext.hpp"
#include "StylesheetExecutionContext.hpp"
namespace XALAN_CPP_NAMESPACE {
ElemExtensionCall::ElemExtensionCall(
StylesheetConstructionContext& constructionContext,
Stylesheet& stylesheetTree,
const XalanDOMChar* name,
const AttributeListType& atts,
XalanFileLoc lineNumber,
XalanFileLoc columnNumber,
ExtensionNSHandler& ns) :
ElemLiteralResult(constructionContext,
stylesheetTree,
name,
atts,
lineNumber,
columnNumber,
StylesheetConstructionContext::ELEMNAME_EXTENSION_CALL),
m_qname(
constructionContext.createXalanQName(
name,
getStylesheet().getNamespaces(),
getLocator())),
m_nsh(ns)
{
assert(m_qname != 0);
}
ElemExtensionCall*
ElemExtensionCall::create(
MemoryManager& theManager,
StylesheetConstructionContext& constructionContext,
Stylesheet& stylesheetTree,
const XalanDOMChar* name,
const AttributeListType& atts,
XalanFileLoc lineNumber,
XalanFileLoc columnNumber,
ExtensionNSHandler& ns)
{
typedef ElemExtensionCall ThisType;
XalanAllocationGuard theGuard(theManager, theManager.allocate(sizeof(ThisType)));
ThisType* const theResult =
new (theGuard.get()) ThisType(constructionContext,
stylesheetTree,
name,
atts,
lineNumber,
columnNumber,
ns);
theGuard.release();
return theResult;
}
#if !defined(XALAN_RECURSIVE_STYLESHEET_EXECUTION)
const ElemTemplateElement*
ElemExtensionCall::startElement(StylesheetExecutionContext& executionContext) const
{
ElemTemplateElement::startElement(executionContext);
warn(
executionContext,
XalanMessages::XalanHandleExtensions);
return ElemTemplateElement::getFirstChildElemToExecute(executionContext);
}
void
ElemExtensionCall::endElement(StylesheetExecutionContext& /*executionContext*/) const
{
}
bool
ElemExtensionCall::executeChildElement(
StylesheetExecutionContext& /*executionContext*/,
const ElemTemplateElement* element) const
{
return element->getXSLToken() == StylesheetConstructionContext::ELEMNAME_FALLBACK;
}
#endif
#if defined(XALAN_RECURSIVE_STYLESHEET_EXECUTION)
void
ElemExtensionCall::execute(StylesheetExecutionContext& executionContext) const
{
ElemTemplateElement::execute(executionContext);
warn(
executionContext,
XalanMessages::XalanHandleExtensions);
for (const ElemTemplateElement* child = getFirstChildElem(); child != 0; child = child->getNextSiblingElem())
{
if(child->getXSLToken() == StylesheetConstructionContext::ELEMNAME_FALLBACK)
{
child->execute(executionContext);
}
}
}
#endif
bool
ElemExtensionCall::elementAvailable(StylesheetExecutionContext& executionContext) const
{
return executionContext.elementAvailable(*m_qname);
}
}
|
// Copyright (c) 2014 GitHub, Inc.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
#include "atom/app/atom_content_client.h"
#include <string>
#include <vector>
#include "atom/common/options_switches.h"
#include "base/command_line.h"
#include "base/files/file_util.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
#include "content/public/common/content_constants.h"
#include "content/public/common/pepper_plugin_info.h"
#include "electron/buildflags/buildflags.h"
#include "ppapi/shared_impl/ppapi_permissions.h"
#include "ui/base/l10n/l10n_util.h"
#include "ui/base/resource/resource_bundle.h"
#include "url/url_constants.h"
// In SHARED_INTERMEDIATE_DIR.
#include "widevine_cdm_version.h" // NOLINT(build/include)
#if defined(WIDEVINE_CDM_AVAILABLE)
#include "base/native_library.h"
#include "content/public/common/cdm_info.h"
#include "media/base/video_codecs.h"
#endif // defined(WIDEVINE_CDM_AVAILABLE)
#if BUILDFLAG(ENABLE_PDF_VIEWER)
#include "atom/common/atom_constants.h"
#include "pdf/pdf.h"
#endif // BUILDFLAG(ENABLE_PDF_VIEWER)
namespace atom {
namespace {
#if defined(WIDEVINE_CDM_AVAILABLE)
bool IsWidevineAvailable(
base::FilePath* cdm_path,
std::vector<media::VideoCodec>* codecs_supported,
base::flat_set<media::CdmSessionType>* session_types_supported,
base::flat_set<media::EncryptionMode>* modes_supported) {
static enum {
NOT_CHECKED,
FOUND,
NOT_FOUND,
} widevine_cdm_file_check = NOT_CHECKED;
if (widevine_cdm_file_check == NOT_CHECKED) {
base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
*cdm_path = command_line->GetSwitchValuePath(switches::kWidevineCdmPath);
if (!cdm_path->empty()) {
*cdm_path = cdm_path->AppendASCII(
base::GetNativeLibraryName(kWidevineCdmLibraryName));
widevine_cdm_file_check = base::PathExists(*cdm_path) ? FOUND : NOT_FOUND;
}
}
if (widevine_cdm_file_check == FOUND) {
// Add the supported codecs as if they came from the component manifest.
// This list must match the CDM that is being bundled with Chrome.
codecs_supported->push_back(media::VideoCodec::kCodecVP8);
codecs_supported->push_back(media::VideoCodec::kCodecVP9);
#if BUILDFLAG(USE_PROPRIETARY_CODECS)
codecs_supported->push_back(media::VideoCodec::kCodecH264);
#endif // BUILDFLAG(USE_PROPRIETARY_CODECS)
// TODO(crbug.com/767941): Push persistent-license support info here once
// we check in a new CDM that supports it on Linux.
session_types_supported->insert(media::CdmSessionType::kTemporary);
#if defined(OS_CHROMEOS)
session_types_supported->insert(media::CdmSessionType::kPersistentLicense);
#endif // defined(OS_CHROMEOS)
modes_supported->insert(media::EncryptionMode::kCenc);
return true;
}
return false;
}
#endif // defined(WIDEVINE_CDM_AVAILABLE)
#if BUILDFLAG(ENABLE_PEPPER_FLASH)
content::PepperPluginInfo CreatePepperFlashInfo(const base::FilePath& path,
const std::string& version) {
content::PepperPluginInfo plugin;
plugin.is_out_of_process = true;
plugin.name = content::kFlashPluginName;
plugin.path = path;
plugin.permissions = ppapi::PERMISSION_ALL_BITS;
std::vector<std::string> flash_version_numbers = base::SplitString(
version, ".", base::TRIM_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
if (flash_version_numbers.empty())
flash_version_numbers.push_back("11");
// |SplitString()| puts in an empty string given an empty string. :(
else if (flash_version_numbers[0].empty())
flash_version_numbers[0] = "11";
if (flash_version_numbers.size() < 2)
flash_version_numbers.push_back("2");
if (flash_version_numbers.size() < 3)
flash_version_numbers.push_back("999");
if (flash_version_numbers.size() < 4)
flash_version_numbers.push_back("999");
// E.g., "Shockwave Flash 10.2 r154":
plugin.description = plugin.name + " " + flash_version_numbers[0] + "." +
flash_version_numbers[1] + " r" +
flash_version_numbers[2];
plugin.version = base::JoinString(flash_version_numbers, ".");
content::WebPluginMimeType swf_mime_type(content::kFlashPluginSwfMimeType,
content::kFlashPluginSwfExtension,
content::kFlashPluginSwfDescription);
plugin.mime_types.push_back(swf_mime_type);
content::WebPluginMimeType spl_mime_type(content::kFlashPluginSplMimeType,
content::kFlashPluginSplExtension,
content::kFlashPluginSplDescription);
plugin.mime_types.push_back(spl_mime_type);
return plugin;
}
void AddPepperFlashFromCommandLine(
base::CommandLine* command_line,
std::vector<content::PepperPluginInfo>* plugins) {
base::FilePath flash_path =
command_line->GetSwitchValuePath(switches::kPpapiFlashPath);
if (flash_path.empty())
return;
auto flash_version =
command_line->GetSwitchValueASCII(switches::kPpapiFlashVersion);
plugins->push_back(CreatePepperFlashInfo(flash_path, flash_version));
}
#endif // BUILDFLAG(ENABLE_PEPPER_FLASH)
void ComputeBuiltInPlugins(std::vector<content::PepperPluginInfo>* plugins) {
#if BUILDFLAG(ENABLE_PDF_VIEWER)
content::PepperPluginInfo pdf_info;
pdf_info.is_internal = true;
pdf_info.is_out_of_process = true;
pdf_info.name = "Chromium PDF Viewer";
pdf_info.description = "Portable Document Format";
pdf_info.path = base::FilePath::FromUTF8Unsafe(kPdfPluginPath);
content::WebPluginMimeType pdf_mime_type(kPdfPluginMimeType, "pdf",
"Portable Document Format");
pdf_info.mime_types.push_back(pdf_mime_type);
pdf_info.internal_entry_points.get_interface = chrome_pdf::PPP_GetInterface;
pdf_info.internal_entry_points.initialize_module =
chrome_pdf::PPP_InitializeModule;
pdf_info.internal_entry_points.shutdown_module =
chrome_pdf::PPP_ShutdownModule;
pdf_info.permissions = ppapi::PERMISSION_PRIVATE | ppapi::PERMISSION_DEV;
plugins->push_back(pdf_info);
#endif // BUILDFLAG(ENABLE_PDF_VIEWER)
}
void ConvertStringWithSeparatorToVector(std::vector<std::string>* vec,
const char* separator,
const char* cmd_switch) {
auto* command_line = base::CommandLine::ForCurrentProcess();
auto string_with_separator = command_line->GetSwitchValueASCII(cmd_switch);
if (!string_with_separator.empty())
*vec = base::SplitString(string_with_separator, separator,
base::TRIM_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
}
} // namespace
AtomContentClient::AtomContentClient() {}
AtomContentClient::~AtomContentClient() {}
base::string16 AtomContentClient::GetLocalizedString(int message_id) const {
return l10n_util::GetStringUTF16(message_id);
}
base::StringPiece AtomContentClient::GetDataResource(
int resource_id,
ui::ScaleFactor scale_factor) const {
return ui::ResourceBundle::GetSharedInstance().GetRawDataResourceForScale(
resource_id, scale_factor);
}
gfx::Image& AtomContentClient::GetNativeImageNamed(int resource_id) const {
return ui::ResourceBundle::GetSharedInstance().GetNativeImageNamed(
resource_id);
}
base::RefCountedMemory* AtomContentClient::GetDataResourceBytes(
int resource_id) const {
return ui::ResourceBundle::GetSharedInstance().LoadDataResourceBytes(
resource_id);
}
void AtomContentClient::AddAdditionalSchemes(Schemes* schemes) {
std::vector<std::string> splited;
ConvertStringWithSeparatorToVector(&splited, ",",
switches::kServiceWorkerSchemes);
for (const std::string& scheme : splited)
schemes->service_worker_schemes.push_back(scheme);
schemes->service_worker_schemes.push_back(url::kFileScheme);
ConvertStringWithSeparatorToVector(&splited, ",", switches::kStandardSchemes);
for (const std::string& scheme : splited)
schemes->standard_schemes.push_back(scheme);
schemes->standard_schemes.push_back("chrome-extension");
ConvertStringWithSeparatorToVector(&splited, ",", switches::kSecureSchemes);
for (const std::string& scheme : splited)
schemes->secure_schemes.push_back(scheme);
ConvertStringWithSeparatorToVector(&splited, ",",
switches::kBypassCSPSchemes);
for (const std::string& scheme : splited)
schemes->csp_bypassing_schemes.push_back(scheme);
ConvertStringWithSeparatorToVector(&splited, ",", switches::kCORSSchemes);
for (const std::string& scheme : splited)
schemes->cors_enabled_schemes.push_back(scheme);
}
void AtomContentClient::AddPepperPlugins(
std::vector<content::PepperPluginInfo>* plugins) {
#if BUILDFLAG(ENABLE_PEPPER_FLASH)
base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
AddPepperFlashFromCommandLine(command_line, plugins);
#endif // BUILDFLAG(ENABLE_PEPPER_FLASH)
ComputeBuiltInPlugins(plugins);
}
void AtomContentClient::AddContentDecryptionModules(
std::vector<content::CdmInfo>* cdms,
std::vector<media::CdmHostFilePath>* cdm_host_file_paths) {
if (cdms) {
#if defined(WIDEVINE_CDM_AVAILABLE)
base::FilePath cdm_path;
std::vector<media::VideoCodec> video_codecs_supported;
base::flat_set<media::CdmSessionType> session_types_supported;
base::flat_set<media::EncryptionMode> encryption_modes_supported;
if (IsWidevineAvailable(&cdm_path, &video_codecs_supported,
&session_types_supported,
&encryption_modes_supported)) {
base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
auto cdm_version_string =
command_line->GetSwitchValueASCII(switches::kWidevineCdmVersion);
// CdmInfo needs |path| to be the actual Widevine library,
// not the adapter, so adjust as necessary. It will be in the
// same directory as the installed adapter.
const base::Version version(cdm_version_string);
DCHECK(version.IsValid());
content::CdmCapability capability(
video_codecs_supported, encryption_modes_supported,
session_types_supported, base::flat_set<media::CdmProxy::Protocol>());
cdms->push_back(content::CdmInfo(
kWidevineCdmDisplayName, kWidevineCdmGuid, version, cdm_path,
kWidevineCdmFileSystemId, capability, kWidevineKeySystem, false));
}
#endif // defined(WIDEVINE_CDM_AVAILABLE)
}
}
} // namespace atom
|
//------------------------------------------------------------------------------
//
// Copyright 2018-2020 Fetch.AI Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//------------------------------------------------------------------------------
#include "test_types.hpp"
#include "ml/ops/weights.hpp"
#include "ml/regularisers/l1_regulariser.hpp"
#include "ml/regularisers/l2_regulariser.hpp"
#include "ml/regularisers/regularisation.hpp"
#include "vectorise/fixed_point/fixed_point.hpp"
#include "gtest/gtest.h"
namespace fetch {
namespace ml {
namespace test {
template <typename T>
class WeightsTest : public ::testing::Test
{
};
TYPED_TEST_SUITE(WeightsTest, math::test::TensorFloatingTypes, );
TYPED_TEST(WeightsTest, allocation_test)
{
fetch::ml::ops::Weights<TypeParam> w;
}
TYPED_TEST(WeightsTest, l1_regulariser_test)
{
using TensorType = TypeParam;
using DataType = typename TypeParam::Type;
using RegType = fetch::ml::regularisers::L1Regulariser<TensorType>;
// Initialise values
auto regularisation_rate = fetch::math::Type<DataType>("0.1");
auto regulariser = std::make_shared<RegType>();
TensorType data = TensorType::FromString("1, -2, 3, -4, 5, -6, 7, -8");
TensorType gt = TensorType::FromString("0.9, -1.9, 2.9, -3.9, 4.9, -5.9, 6.9, -7.9");
fetch::ml::ops::Weights<TensorType> w;
w.SetData(data);
// Apply regularisation
w.SetRegularisation(regulariser, regularisation_rate);
TensorType grad = w.GetGradients();
grad.Fill(DataType{0});
w.ApplyGradient(grad);
// Evaluate weight
TensorType prediction(w.ComputeOutputShape({}));
w.Forward({}, prediction);
// Test actual values
ASSERT_TRUE(prediction.AllClose(gt, fetch::math::function_tolerance<DataType>(),
fetch::math::function_tolerance<DataType>()));
}
TYPED_TEST(WeightsTest, l2_regulariser_test)
{
using TensorType = TypeParam;
using DataType = typename TypeParam::Type;
using RegType = fetch::ml::regularisers::L2Regulariser<TensorType>;
// Initialise values
auto regularisation_rate = fetch::math::Type<DataType>("0.1");
auto regulariser = std::make_shared<RegType>();
TensorType data = TensorType::FromString("1, -2, 3, -4, 5, -6, 7, -8");
TensorType gt = TensorType::FromString("0.8, -1.6, 2.4, -3.2, 4.0, -4.8, 5.6, -6.4");
fetch::ml::ops::Weights<TensorType> w;
w.SetData(data);
// Apply regularisation
w.SetRegularisation(regulariser, regularisation_rate);
TensorType grad = w.GetGradients();
grad.Fill(DataType{0});
w.ApplyGradient(grad);
// Evaluate weight
TensorType prediction(w.ComputeOutputShape({}));
w.Forward({}, prediction);
// Test actual values
ASSERT_TRUE(prediction.AllClose(gt, fetch::math::function_tolerance<DataType>(),
fetch::math::function_tolerance<DataType>()));
}
} // namespace test
} // namespace ml
} // namespace fetch
|
#include <string>
#include <InsOrdVecMap.h>
int main()
{
InsOrdVecMap<std::string, std::string> insOrdVecMap {
{"john", "male"},
{"natasha","female"},
{"sam", "male"},
{"adam", "male"}
};
std::cout << "insOrdVecMap: " << std::endl;
std::cout << "Current Size:" << insOrdVecMap.size() << std::endl;
std::cout << "Capacity: " << insOrdVecMap.capacity() << std::endl;
insOrdVecMap.print(); //debug function
std::cout << "\n";
decltype(insOrdVecMap) insOrdVecMap1 = insOrdVecMap;
insOrdVecMap1.push_back(std::make_pair("rene", "female")); //debug function
std::cout << "insOrdVecMap1: " << std::endl;
std::cout << "Current Size:" << insOrdVecMap1.size() << std::endl;
std::cout << "Capacity: " << insOrdVecMap1.capacity() << std::endl;
std::cout << "Value at 3 is: " << insOrdVecMap1.at(2).first << "::" << insOrdVecMap1.at(2).second << std::endl;
std::cout << "Value of inOrdVecMap[\"adam\"] is " << insOrdVecMap["adam"] << std::endl;
insOrdVecMap["sam"].assign(std::string("raghu"));
std::cout << "Value of inOrdVecMap[\"sam\"] is " << insOrdVecMap["sam"] << std::endl;
insOrdVecMap1.print();
std::cout << "\n";
std::cout << "insOrdVecMap1 after pop_front: " <<std::endl;
insOrdVecMap1.pop_front();
insOrdVecMap1.print();
std::cout << "\n";
std::cout << "insOrdVecMap1 after pop_back: " <<std::endl;
insOrdVecMap1.pop_back();
insOrdVecMap1.print();
insOrdVecMap1.clear();
std::cout << "\n";
std::cout << "insOrdVecMap1 after clear: " <<std::endl;
std::cout << "Current Size:" << insOrdVecMap1.size() << std::endl;
std::cout << "Capacity: " << insOrdVecMap1.capacity() << std::endl;
insOrdVecMap1.print();
return 0;
}
|
// Distributed under the MIT License (See
// accompanying file "LICENSE" or the website
// http://www.opensource.org/licenses/mit-license.php)
#include "ParsingContext.h"
#include "Techniques.h"
#include "SystemUniformsDelegate.h"
#include "RenderPass.h"
#include "../../Assets/AssetUtils.h"
#include "../../Utility/StringFormat.h"
#include "../../Utility/ArithmeticUtils.h"
#include <memory>
namespace RenderCore { namespace Techniques
{
void ParsingContext::Process(const ::Assets::Exceptions::RetrievalError& e)
{
// Handle a "invalid asset" and "pending asset" exception that
// occurred during rendering. Normally this will just mean
// reporting the assert to the screen.
//
// These happen fairly often -- particularly when just starting up, or
// when changing rendering settings.
// at the moment, this will result in a bunch of allocations -- that's not
// ideal during error processing.
auto* id = e.Initializer();
auto* bufferStart = _stringHelpers->_pendingAssets;
if (e.State() == ::Assets::AssetState::Invalid)
bufferStart = _stringHelpers->_invalidAssets;
static_assert(
dimof(_stringHelpers->_pendingAssets) == dimof(_stringHelpers->_invalidAssets),
"Assuming pending and invalid asset buffers are the same length");
if (!XlFindStringI(bufferStart, id)) {
StringMeldAppend(bufferStart, bufferStart + dimof(_stringHelpers->_pendingAssets)) << "," << id;
if (e.State() == ::Assets::AssetState::Invalid) {
// Writing the exception string into "_errorString" here can help to pass shader error message
// back to the PreviewRenderManager for the material tool
StringMeldAppend(_stringHelpers->_errorString, ArrayEnd(_stringHelpers->_errorString)) << e.what() << "\n";
}
}
}
FragmentStitchingContext& ParsingContext::GetFragmentStitchingContext()
{
if (!_stitchingContext)
_stitchingContext = std::make_unique<FragmentStitchingContext>();
return *_stitchingContext;
}
ParsingContext::ParsingContext(TechniqueContext& techniqueContext, IThreadContext& threadContext)
: _techniqueContext(&techniqueContext)
, _threadContext(&threadContext)
{
assert(_techniqueContext);
_stringHelpers = std::make_unique<StringHelpers>();
_internal = std::make_unique<Internal>();
assert(size_t(_internal.get()) % 16 == 0);
_uniformDelegateManager = _techniqueContext->_uniformDelegateManager;
}
ParsingContext::~ParsingContext() {}
ParsingContext::StringHelpers::StringHelpers()
{
_errorString[0] = _pendingAssets[0] = _invalidAssets[0] = _quickMetrics[0] = '\0';
}
}}
|
/*!
* @section LICENSE
*
* @copyright
* Copyright (c) 2015-2017 Intel Corporation
*
* @copyright
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* @copyright
* http://www.apache.org/licenses/LICENSE-2.0
*
* @copyright
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* */
#pragma once
#include <string>
static std::string FabricManager1 =
R"({
"collections": [
{
"name": "Fabrics",
"slotMask": "SlotMask",
"type": "Fabrics"
}
],
"commandShell": {
"enabled": null,
"maxSessions": null,
"typesSupported": []
},
"dateTime": null,
"dateTimeLocalOffset": null,
"firmwareVersion": "2.58",
"graphicalConsole": {
"enabled": null,
"maxSessions": null,
"typesSupported": []
},
"guid": "123e4567-e89b-ffff-a456-426655440000",
"ipv4Address": "1.1.2.1",
"location": null,
"model": "DD43",
"networkServices": [
{
"enabled": false,
"name": "IPMI",
"port": 0
},
{
"enabled": false,
"name": "SSH",
"port": 0
},
{
"enabled": false,
"name": "Telnet",
"port": 0
}
],
"oem": {},
"parentId": "",
"serialConsole": {
"bitrate": 115200,
"dataBits": 8,
"enabled": true,
"flowControl": "None",
"maxSessions": null,
"parity": "None",
"pinOut": "Cisco",
"signalType": "Rs232",
"stopBits": 1,
"typesSupported": []
},
"status": {
"health": "OK",
"state": "Enabled"
},
"type": "ManagementController"
})";
static std::string Fabric1 =
R"({
"collections": [
{
"name": "Zones",
"slotMask": "SlotMask",
"type": "Zones"
},
{
"name": "Endpoints",
"slotMask": "SlotMask",
"type": "Endpoints"
},
{
"name": "Switches",
"slotMask": "SlotMask",
"type": "Switches"
}
],
"oem": {},
"protocol": "PCIe",
"status": {
"health": "OK",
"state": "Enabled"
}
})";
static std::string FabricZone1 =
R"({
"collections": [
{
"name": "Endpoints",
"slotMask": "SlotMask",
"type": "Endpoints"
}
],
"oem": {},
"status": {
"health": "OK",
"state": "Enabled"
}
})";
static std::string FabricZone2 =
R"({
"collections": [
{
"name": "Endpoints",
"slotMask": "SlotMask",
"type": "Endpoints"
}
],
"oem": {},
"status": {
"health": "OK",
"state": "Enabled"
}
})";
static std::string Endpoint1InZone1 =
R"({
"collections": [
{
"name": "Ports",
"slotMask": "SlotMask",
"type": "Ports"
}
],
"entities": [
{
"entity": null,
"entityRole": "Initiator",
"entityType": "RootComplex"
}
],
"identifiers": [],
"oem": {},
"protocol": "PCIe",
"status": {
"health": "OK",
"state": "Enabled"
}
})";
static std::string Endpoint1InZone2 =
R"({
"collections": [
{
"name": "Ports",
"slotMask": "SlotMask",
"type": "Ports"
}
],
"entities": [
{
"entity": null,
"entityRole": "Initiator",
"entityType": "RootComplex"
}
],
"identifiers": [],
"oem": {},
"protocol": "PCIe",
"status": {
"health": "OK",
"state": "Enabled"
}
})";
static std::string Endpoint2InZone2 =
R"({
"collections": [
{
"name": "Ports",
"slotMask": "SlotMask",
"type": "Ports"
}
],
"entities": [
{
"entity": null,
"entityRole": "Initiator",
"entityType": "RootComplex"
}
],
"identifiers": [],
"oem": {},
"protocol": "PCIe",
"status": {
"health": "OK",
"state": "Enabled"
}
})";
|
// Copyright (c) 2011-2015 The Bitcoin Core developers
// Copyright (c) 2014-2017 The Dash Core developers
// Copyright (c) 2017-2018 The BroFist Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#if defined(HAVE_CONFIG_H)
#include "config/brofist-config.h"
#endif
#include "addressbookpage.h"
#include "ui_addressbookpage.h"
#include "addresstablemodel.h"
#include "bitcoingui.h"
#include "csvmodelwriter.h"
#include "editaddressdialog.h"
#include "guiutil.h"
#include "platformstyle.h"
#include <QIcon>
#include <QMenu>
#include <QMessageBox>
#include <QSortFilterProxyModel>
AddressBookPage::AddressBookPage(const PlatformStyle *platformStyle, Mode mode, Tabs tab, QWidget *parent) :
QDialog(parent),
ui(new Ui::AddressBookPage),
model(0),
mode(mode),
tab(tab)
{
QString theme = GUIUtil::getThemeName();
ui->setupUi(this);
if (!platformStyle->getImagesOnButtons()) {
ui->newAddress->setIcon(QIcon());
ui->copyAddress->setIcon(QIcon());
ui->deleteAddress->setIcon(QIcon());
ui->exportButton->setIcon(QIcon());
} else {
ui->newAddress->setIcon(QIcon(":/icons/" + theme + "/add"));
ui->copyAddress->setIcon(QIcon(":/icons/" + theme + "/editcopy"));
ui->deleteAddress->setIcon(QIcon(":/icons/" + theme + "/remove"));
ui->exportButton->setIcon(QIcon(":/icons/" + theme + "/export"));
}
switch(mode)
{
case ForSelection:
switch(tab)
{
case SendingTab: setWindowTitle(tr("Choose the address to send coins to")); break;
case ReceivingTab: setWindowTitle(tr("Choose the address to receive coins with")); break;
}
connect(ui->tableView, SIGNAL(doubleClicked(QModelIndex)), this, SLOT(accept()));
ui->tableView->setEditTriggers(QAbstractItemView::NoEditTriggers);
ui->tableView->setFocus();
ui->closeButton->setText(tr("C&hoose"));
ui->exportButton->hide();
break;
case ForEditing:
switch(tab)
{
case SendingTab: setWindowTitle(tr("Sending addresses")); break;
case ReceivingTab: setWindowTitle(tr("Receiving addresses")); break;
}
break;
}
switch(tab)
{
case SendingTab:
ui->labelExplanation->setText(tr("These are your BroFist addresses for sending payments. Always check the amount and the receiving address before sending coins."));
ui->deleteAddress->setVisible(true);
break;
case ReceivingTab:
ui->labelExplanation->setText(tr("These are your BroFist addresses for receiving payments. It is recommended to use a new receiving address for each transaction."));
ui->deleteAddress->setVisible(false);
break;
}
// Context menu actions
QAction *copyAddressAction = new QAction(tr("&Copy Address"), this);
QAction *copyLabelAction = new QAction(tr("Copy &Label"), this);
QAction *editAction = new QAction(tr("&Edit"), this);
deleteAction = new QAction(ui->deleteAddress->text(), this);
// Build context menu
contextMenu = new QMenu();
contextMenu->addAction(copyAddressAction);
contextMenu->addAction(copyLabelAction);
contextMenu->addAction(editAction);
if(tab == SendingTab)
contextMenu->addAction(deleteAction);
contextMenu->addSeparator();
// Connect signals for context menu actions
connect(copyAddressAction, SIGNAL(triggered()), this, SLOT(on_copyAddress_clicked()));
connect(copyLabelAction, SIGNAL(triggered()), this, SLOT(onCopyLabelAction()));
connect(editAction, SIGNAL(triggered()), this, SLOT(onEditAction()));
connect(deleteAction, SIGNAL(triggered()), this, SLOT(on_deleteAddress_clicked()));
connect(ui->tableView, SIGNAL(customContextMenuRequested(QPoint)), this, SLOT(contextualMenu(QPoint)));
connect(ui->closeButton, SIGNAL(clicked()), this, SLOT(accept()));
}
AddressBookPage::~AddressBookPage()
{
delete ui;
}
void AddressBookPage::setModel(AddressTableModel *model)
{
this->model = model;
if(!model)
return;
proxyModel = new QSortFilterProxyModel(this);
proxyModel->setSourceModel(model);
proxyModel->setDynamicSortFilter(true);
proxyModel->setSortCaseSensitivity(Qt::CaseInsensitive);
proxyModel->setFilterCaseSensitivity(Qt::CaseInsensitive);
switch(tab)
{
case ReceivingTab:
// Receive filter
proxyModel->setFilterRole(AddressTableModel::TypeRole);
proxyModel->setFilterFixedString(AddressTableModel::Receive);
break;
case SendingTab:
// Send filter
proxyModel->setFilterRole(AddressTableModel::TypeRole);
proxyModel->setFilterFixedString(AddressTableModel::Send);
break;
}
ui->tableView->setModel(proxyModel);
ui->tableView->sortByColumn(0, Qt::AscendingOrder);
// Set column widths
#if QT_VERSION < 0x050000
ui->tableView->horizontalHeader()->setResizeMode(AddressTableModel::Label, QHeaderView::Stretch);
ui->tableView->horizontalHeader()->setResizeMode(AddressTableModel::Address, QHeaderView::ResizeToContents);
#else
ui->tableView->horizontalHeader()->setSectionResizeMode(AddressTableModel::Label, QHeaderView::Stretch);
ui->tableView->horizontalHeader()->setSectionResizeMode(AddressTableModel::Address, QHeaderView::ResizeToContents);
#endif
connect(ui->tableView->selectionModel(), SIGNAL(selectionChanged(QItemSelection,QItemSelection)),
this, SLOT(selectionChanged()));
// Select row for newly created address
connect(model, SIGNAL(rowsInserted(QModelIndex,int,int)), this, SLOT(selectNewAddress(QModelIndex,int,int)));
selectionChanged();
}
void AddressBookPage::on_copyAddress_clicked()
{
GUIUtil::copyEntryData(ui->tableView, AddressTableModel::Address);
}
void AddressBookPage::onCopyLabelAction()
{
GUIUtil::copyEntryData(ui->tableView, AddressTableModel::Label);
}
void AddressBookPage::onEditAction()
{
if(!model)
return;
if(!ui->tableView->selectionModel())
return;
QModelIndexList indexes = ui->tableView->selectionModel()->selectedRows();
if(indexes.isEmpty())
return;
EditAddressDialog dlg(
tab == SendingTab ?
EditAddressDialog::EditSendingAddress :
EditAddressDialog::EditReceivingAddress, this);
dlg.setModel(model);
QModelIndex origIndex = proxyModel->mapToSource(indexes.at(0));
dlg.loadRow(origIndex.row());
dlg.exec();
}
void AddressBookPage::on_newAddress_clicked()
{
if(!model)
return;
EditAddressDialog dlg(
tab == SendingTab ?
EditAddressDialog::NewSendingAddress :
EditAddressDialog::NewReceivingAddress, this);
dlg.setModel(model);
if(dlg.exec())
{
newAddressToSelect = dlg.getAddress();
}
}
void AddressBookPage::on_deleteAddress_clicked()
{
QTableView *table = ui->tableView;
if(!table->selectionModel())
return;
QModelIndexList indexes = table->selectionModel()->selectedRows();
if(!indexes.isEmpty())
{
table->model()->removeRow(indexes.at(0).row());
}
}
void AddressBookPage::selectionChanged()
{
// Set button states based on selected tab and selection
QTableView *table = ui->tableView;
if(!table->selectionModel())
return;
if(table->selectionModel()->hasSelection())
{
switch(tab)
{
case SendingTab:
// In sending tab, allow deletion of selection
ui->deleteAddress->setEnabled(true);
ui->deleteAddress->setVisible(true);
deleteAction->setEnabled(true);
break;
case ReceivingTab:
// Deleting receiving addresses, however, is not allowed
ui->deleteAddress->setEnabled(false);
ui->deleteAddress->setVisible(false);
deleteAction->setEnabled(false);
break;
}
ui->copyAddress->setEnabled(true);
}
else
{
ui->deleteAddress->setEnabled(false);
ui->copyAddress->setEnabled(false);
}
}
void AddressBookPage::done(int retval)
{
QTableView *table = ui->tableView;
if(!table->selectionModel() || !table->model())
return;
// Figure out which address was selected, and return it
QModelIndexList indexes = table->selectionModel()->selectedRows(AddressTableModel::Address);
Q_FOREACH (const QModelIndex& index, indexes) {
QVariant address = table->model()->data(index);
returnValue = address.toString();
}
if(returnValue.isEmpty())
{
// If no address entry selected, return rejected
retval = Rejected;
}
QDialog::done(retval);
}
void AddressBookPage::on_exportButton_clicked()
{
// CSV is currently the only supported format
QString filename = GUIUtil::getSaveFileName(this,
tr("Export Address List"), QString(),
tr("Comma separated file (*.csv)"), NULL);
if (filename.isNull())
return;
CSVModelWriter writer(filename);
// name, column, role
writer.setModel(proxyModel);
writer.addColumn("Label", AddressTableModel::Label, Qt::EditRole);
writer.addColumn("Address", AddressTableModel::Address, Qt::EditRole);
if(!writer.write()) {
QMessageBox::critical(this, tr("Exporting Failed"),
tr("There was an error trying to save the address list to %1. Please try again.").arg(filename));
}
}
void AddressBookPage::contextualMenu(const QPoint &point)
{
QModelIndex index = ui->tableView->indexAt(point);
if(index.isValid())
{
contextMenu->exec(QCursor::pos());
}
}
void AddressBookPage::selectNewAddress(const QModelIndex &parent, int begin, int /*end*/)
{
QModelIndex idx = proxyModel->mapFromSource(model->index(begin, AddressTableModel::Address, parent));
if(idx.isValid() && (idx.data(Qt::EditRole).toString() == newAddressToSelect))
{
// Select row of newly created address, once
ui->tableView->setFocus();
ui->tableView->selectRow(idx.row());
newAddressToSelect.clear();
}
}
|
// Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include <cmath>
#include "mace/core/operator.h"
namespace mace {
namespace ops {
template <DeviceType D, class T>
class LocalResponseNormOp;
template <>
class LocalResponseNormOp<DeviceType::CPU, float> : public Operation {
public:
explicit LocalResponseNormOp(OpConstructContext *context)
: Operation(context),
depth_radius_(Operation::GetOptionalArg<int>("depth_radius", 5)),
bias_(Operation::GetOptionalArg<float>("bias", 1.0f)),
alpha_(Operation::GetOptionalArg<float>("alpha", 1.0f)),
beta_(Operation::GetOptionalArg<float>("beta", 0.5f)) {}
MaceStatus Run(OpContext *context) override {
MACE_UNUSED(context);
const Tensor *input = this->Input(0);
MACE_CHECK(input->dim_size() == 4, "input must be 4-dimensional. ",
input->dim_size());
Tensor *output = this->Output(0);
MACE_RETURN_IF_ERROR(output->ResizeLike(input));
const index_t batch = input->dim(0);
const index_t channels = input->dim(1);
const index_t height = input->dim(2);
const index_t width = input->dim(3);
const float *input_ptr = input->data<float>();
float *output_ptr = output->mutable_data<float>();
index_t image_size = height * width;
index_t batch_size = channels * image_size;
#pragma omp parallel for collapse(2) schedule(runtime)
for (index_t b = 0; b < batch; ++b) {
for (index_t c = 0; c < channels; ++c) {
const int begin_input_c = std::max(static_cast<index_t>(0),
c - depth_radius_);
const int end_input_c = std::min(channels, c + depth_radius_ + 1);
index_t pos = b * batch_size;
for (index_t hw = 0; hw < height * width; ++hw, ++pos) {
float accum = 0.f;
for (int input_c = begin_input_c; input_c < end_input_c; ++input_c) {
const float input_val = input_ptr[pos + input_c * image_size];
accum += input_val * input_val;
}
const float multiplier = std::pow(bias_ + alpha_ * accum, -beta_);
output_ptr[pos + c * image_size] =
input_ptr[pos + c * image_size] * multiplier;
}
}
}
return MaceStatus::MACE_SUCCESS;
}
private:
int depth_radius_;
float bias_;
float alpha_;
float beta_;
};
void RegisterLocalResponseNorm(OpRegistryBase *op_registry) {
MACE_REGISTER_OP(op_registry, "LocalResponseNorm",
LocalResponseNormOp, DeviceType::CPU, float);
}
} // namespace ops
} // namespace mace
|
#include "arrays_pointers.h"
int main()
{
//arrays_and_pointers();
//int times_table[ROWS][COLS];
//populate_time_table(times_table, ROWS);
//display_times_table(times_table, ROWS);
int nums[ROWS]{};
display_array(nums, ROWS);
return 0;
}
|
/*
* This file belongs to the Galois project, a C++ library for exploiting
* parallelism. The code is being released under the terms of the 3-Clause BSD
* License (a copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
#include "galois/gIO.h"
#include "galois/substrate/SimpleLock.h"
#include "galois/substrate/EnvCheck.h"
#include "galois/substrate/ThreadPool.h"
#include <cstdlib>
#include <cstdio>
#include <ctime>
#include <cstring>
#include <cstdarg>
#include <cerrno>
#include <unistd.h>
#include <iostream>
#include <fstream>
#include <iomanip>
#include <mutex>
static void printString(bool error, bool newline, const std::string& prefix,
const std::string& s) {
static galois::substrate::SimpleLock IOLock;
std::lock_guard<decltype(IOLock)> lock(IOLock);
std::ostream& o = error ? std::cerr : std::cout;
if (prefix.length())
o << prefix << ": ";
o << s;
if (newline)
o << "\n";
}
void galois::gDebugStr(const std::string& s) {
static bool skip = galois::substrate::EnvCheck("GALOIS_DEBUG_SKIP");
if (skip)
return;
static const unsigned TIME_STR_SIZE = 32;
char time_str[TIME_STR_SIZE];
time_t rawtime;
struct tm* timeinfo;
time(&rawtime);
timeinfo = localtime(&rawtime);
strftime(time_str, TIME_STR_SIZE, "[%H:%M:%S]", timeinfo);
std::ostringstream os;
os << "[" << time_str << " " << std::setw(3)
<< galois::substrate::ThreadPool::getTID() << "] " << s;
if (galois::substrate::EnvCheck("GALOIS_DEBUG_TO_FILE")) {
static galois::substrate::SimpleLock dIOLock;
std::lock_guard<decltype(dIOLock)> lock(dIOLock);
static std::ofstream debugOut;
if (!debugOut.is_open()) {
char fname[] = "gdebugXXXXXX";
int fd = mkstemp(fname);
close(fd);
debugOut.open(fname);
gInfo("Debug output going to ", fname);
}
debugOut << os.str() << "\n";
debugOut.flush();
} else {
printString(true, true, "DEBUG", os.str());
}
}
void galois::gPrintStr(const std::string& s) {
printString(false, false, "", s);
}
void galois::gInfoStr(const std::string& s) {
printString(false, true, "INFO", s);
}
void galois::gWarnStr(const std::string& s) {
printString(false, true, "WARNING", s);
}
void galois::gErrorStr(const std::string& s) {
printString(true, true, "ERROR", s);
}
void galois::gFlush() { fflush(stdout); }
|
/**
* @file openssl_exception.cpp
*
*/
#include "derecho/openssl/openssl_exception.hpp"
namespace openssl {
/**
* A plain C function matching the type expected by ERR_print_errors_cb that
* appends each provided error string to a std::stringstream.
*/
int openssl_error_callback(const char* str, size_t len, void* user_data) {
std::stringstream* the_stringstream = reinterpret_cast<std::stringstream*>(user_data);
(*the_stringstream) << str << std::endl;
return 0;
}
std::string openssl_errors_to_string() {
std::stringstream error_str;
ERR_print_errors_cb(openssl_error_callback, &error_str);
return error_str.str();
}
std::string get_error_string(unsigned long error_code, const std::string& extra_message) {
const size_t buf_size = 512; //I hope this is big enough
char string_buf[buf_size];
ERR_error_string_n(error_code, string_buf, buf_size);
std::stringstream string_builder;
string_builder << extra_message << " " << string_buf;
return string_builder.str();
}
} // namespace openssl
|
/*
* File: Logging.cpp
*
* Copyright (c) Freescale Semiconductor, Inc. All rights reserved.
* See included license file for license details.
*/
#include "Logging.h"
#include <stdarg.h>
#include <stdio.h>
#include "smart_ptr.h"
// init global logger to null
Logger *Log::s_logger = NULL;
void Logger::log(const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
log(m_level, fmt, args);
va_end(args);
}
void Logger::log(log_level_t level, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
log(level, fmt, args);
va_end(args);
}
void Logger::log(const char *fmt, va_list args)
{
log(m_level, fmt, args);
}
//! Allocates a temporary 1KB buffer which is used to hold the
//! formatted string.
void Logger::log(log_level_t level, const char *fmt, va_list args)
{
smart_array_ptr<char> buffer = new char[1024];
vsprintf(buffer, fmt, args);
if (level <= m_filter)
{
_log(buffer);
}
}
void Log::log(const char *fmt, ...)
{
if (s_logger)
{
va_list args;
va_start(args, fmt);
s_logger->log(fmt, args);
va_end(args);
}
}
void Log::log(const std::string &msg)
{
if (s_logger)
{
s_logger->log(msg);
}
}
void Log::log(Logger::log_level_t level, const char *fmt, ...)
{
if (s_logger)
{
va_list args;
va_start(args, fmt);
s_logger->log(level, fmt, args);
va_end(args);
}
}
void Log::log(Logger::log_level_t level, const std::string &msg)
{
if (s_logger)
{
s_logger->log(level, msg);
}
}
void StdoutLogger::_log(const char *msg)
{
printf("%s", msg);
}
|
/*
Copyright (C) 2012-2017 FCEUX team
This file is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This file is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the this software. If not, see <http://www.gnu.org/licenses/>.
*/
#include "mapinc.h"
// http://wiki.nesdev.com/w/index.php/INES_Mapper_028
//config
static int prg_mask_16k;
// state
uint8 reg;
uint8 chr;
uint8 prg;
uint8 mode;
uint8 outer;
void SyncMirror()
{
switch (mode & 3)
{
case 0: setmirror(MI_0); break;
case 1: setmirror(MI_1); break;
case 2: setmirror(MI_V); break;
case 3: setmirror(MI_H); break;
}
}
void Mirror(uint8 value)
{
if ((mode & 2) == 0)
{
mode &= 0xfe;
mode |= value >> 4 & 1;
}
SyncMirror();
}
static void Sync()
{
int prglo = 0;
int prghi = 0;
int outb = outer << 1;
//this can probably be rolled up, but i have no motivation to do so
//until it's been tested
switch (mode & 0x3c)
{
//32K modes
case 0x00:
case 0x04:
prglo = outb;
prghi = outb | 1;
break;
case 0x10:
case 0x14:
prglo = (outb & ~2) | ((prg << 1) & 2);
prghi = (outb & ~2) | ((prg << 1) & 2) | 1;
break;
case 0x20:
case 0x24:
prglo = (outb & ~6) | ((prg << 1) & 6);
prghi = (outb & ~6) | ((prg << 1) & 6) | 1;
break;
case 0x30:
case 0x34:
prglo = (outb & ~14) | ((prg << 1) & 14);
prghi = (outb & ~14) | ((prg << 1) & 14) | 1;
break;
//bottom fixed modes
case 0x08:
prglo = outb;
prghi = outb | (prg & 1);
break;
case 0x18:
prglo = outb;
prghi = (outb & ~2) | (prg & 3);
break;
case 0x28:
prglo = outb;
prghi = (outb & ~6) | (prg & 7);
break;
case 0x38:
prglo = outb;
prghi = (outb & ~14) | (prg & 15);
break;
//top fixed modes
case 0x0c:
prglo = outb | (prg & 1);
prghi = outb | 1;
break;
case 0x1c:
prglo = (outb & ~2) | (prg & 3);
prghi = outb | 1;
break;
case 0x2c:
prglo = (outb & ~6) | (prg & 7);
prghi = outb | 1;
break;
case 0x3c:
prglo = (outb & ~14) | (prg & 15);
prghi = outb | 1;
break;
}
prglo &= prg_mask_16k;
prghi &= prg_mask_16k;
setprg16(0x8000, prglo);
setprg16(0xC000, prghi);
setchr8(chr);
}
static DECLFW(WriteEXP)
{
uint8 value = V;
reg = value & 0x81;
}
static DECLFW(WritePRG)
{
uint8 value = V;
switch (reg)
{
case 0x00:
chr = value & 3;
Mirror(value);
Sync();
break;
case 0x01:
prg = value & 15;
Mirror(value);
Sync();
break;
case 0x80:
mode = value & 63;
SyncMirror();
Sync();
break;
case 0x81:
outer = value & 63;
Sync();
break;
}
}
static void M28Reset(void)
{
outer = 63;
prg = 15;
Sync();
}
static void M28Power(void)
{
prg_mask_16k = PRGsize[0] - 1;
//EXP
SetWriteHandler(0x5000,0x5FFF,WriteEXP);
//PRG
SetWriteHandler(0x8000,0xFFFF,WritePRG);
SetReadHandler(0x8000,0xFFFF,CartBR);
//WRAM
SetReadHandler(0x6000,0x7FFF,CartBR);
SetWriteHandler(0x6000,0x7FFF,CartBW);
M28Reset();
}
static void M28Close(void)
{
}
static SFORMAT StateRegs[]=
{
{®, 1, "REG"},
{&chr, 1, "CHR"},
{&prg, 1, "PRG"},
{&mode, 1, "MODE"},
{&outer, 1, "OUTR"},
{0}
};
static void StateRestore(int version)
{
Sync();
}
void Mapper28_Init(CartInfo* info)
{
info->Power=M28Power;
info->Reset=M28Reset;
info->Close=M28Close;
GameStateRestore=StateRestore;
AddExState(&StateRegs, ~0, 0, 0);
}
|
#include "tgl.h"
#include "toonz/strokegenerator.h"
//#include "tofflinegl.h"
#include "tstroke.h"
#include "toonz/preferences.h"
using namespace std;
//-------------------------------------------------------------------
void StrokeGenerator::clear() {
m_points.clear();
m_modifiedRegion = TRectD();
m_lastPointRect.empty();
m_lastModifiedRegion.empty();
m_paintedPointCount = 0;
m_p0 = m_p1 = TPointD();
}
//-------------------------------------------------------------------
bool StrokeGenerator::isEmpty() const { return m_points.empty(); }
//-------------------------------------------------------------------
void StrokeGenerator::add(const TThickPoint &point, double pixelSize2) {
if (m_points.empty()) {
double x = point.x, y = point.y, d = point.thick + 3;
m_points.push_back(point);
TRectD rect(x - d, y - d, x + d, y + d);
m_modifiedRegion = rect;
m_lastPointRect = rect;
m_lastModifiedRegion = rect;
m_p0 = m_p1 = point;
} else {
TThickPoint lastPoint = m_points.back();
if (tdistance2(lastPoint, point) >= 4 * pixelSize2) {
m_points.push_back(point);
double d = std::max(point.thick, lastPoint.thick) + 3;
TRectD rect(TRectD(lastPoint, point).enlarge(d));
m_modifiedRegion += rect;
m_lastModifiedRegion = m_lastPointRect + rect;
m_lastPointRect = rect;
} else {
m_points.back().thick = std::max(m_points.back().thick, point.thick);
}
}
}
//-------------------------------------------------------------------
void StrokeGenerator::filterPoints() {
if (m_points.size() < 10) return;
// filtra m_points iniziali: generalmente elevate variazioni di thickness
// si hanno tra m_points[0] (al massimo m_points[1]) e i successivi)
int size1 = m_points.size();
int kMin = 0;
int kMax = std::min(
4,
size1 -
2); // confronta 5 m_points iniziali con i successivi corrispondenti
int k = kMax;
for (k = kMax; k >= kMin; --k) {
TThickPoint currPoint = m_points[k];
TThickPoint nextPoint = m_points[k + 1];
double dist = tdistance(currPoint, nextPoint);
double deltaThick = fabs(currPoint.thick - nextPoint.thick);
if (deltaThick > 0.6 * dist) // deltaThick <= dist (condizione
// approssimata di non-autocontenimento per
// TTQ)
{
vector<TThickPoint>::iterator it1 = m_points.begin();
vector<TThickPoint>::iterator it2 = it1 + k + 1;
m_points.erase(it1, it2); // cancella da m_points[0] a m_points[k]
assert((int)m_points.size() == size1 - k - 1);
break;
}
}
// filtra m_points finali: generalmente elevate variazioni di thickness
// si hanno tra m_points[size - 1] (al massimo m_points[size - 2]) e i
// predecessori)
int size2 = m_points.size();
kMax = size2 - 1;
kMin = std::max(
kMax - 4,
1); // confronta 5 m_points finali con i predecessori corrispondenti
k = kMin;
for (k = kMin; k <= kMax; ++k) {
TThickPoint currPoint = m_points[k];
TThickPoint prevPoint = m_points[k - 1];
double dist = tdistance(currPoint, prevPoint);
double deltaThick = fabs(currPoint.thick - prevPoint.thick);
if (deltaThick > 0.6 * dist) // deltaThick <= dist (condizione
// approssimata di non-autocontenimento per
// TTQ)
{
int kTmp = k;
while (k <= kMax) // cancella da m_points[k] a m_points[size2 - 1]
{
m_points.pop_back();
++k;
}
assert((int)m_points.size() == size2 - (kMax - kTmp + 1));
break;
}
}
}
//-------------------------------------------------------------------
void StrokeGenerator::drawFragments(int first, int last) {
if (m_points.empty()) return;
int i = first;
if (last >= (int)m_points.size()) last = m_points.size() - 1;
const double h = 0.01;
TThickPoint a;
TThickPoint b;
TThickPoint c;
TPointD v;
// If drawing a straight line, a stroke can have only two points
if (m_points.size() == 2) {
a = m_points[0];
b = m_points[1];
if (Preferences::instance()->getShow0ThickLines()) {
if (a.thick == 0) a.thick = 0.1;
if (b.thick == 0) b.thick = 0.1;
}
// m_p0 = m_p1 = b;
assert(tdistance(b, a) > h);
v = a.thick * normalize(rotate90(b - a));
m_p0 = a + v;
m_p1 = a - v;
v = b.thick * normalize(rotate90(b - a));
TPointD p0 = b + v;
TPointD p1 = b - v;
glBegin(GL_POLYGON);
tglVertex(m_p0);
tglVertex(m_p1);
tglVertex(p1);
tglVertex(p0);
glEnd();
m_p0 = p0;
m_p1 = p1;
glBegin(GL_LINE_STRIP);
tglVertex(a);
tglVertex(b);
glEnd();
return;
}
while (i < last) {
a = m_points[i - 1];
b = m_points[i];
c = m_points[i + 1];
if (Preferences::instance()->getShow0ThickLines()) {
if (a.thick == 0) a.thick = 0.1;
if (b.thick == 0) b.thick = 0.1;
if (c.thick == 0) c.thick = 0.1;
}
if (a.thick >= h && b.thick >= h && tdistance2(b, a) >= h &&
tdistance2(a, c) >= h) {
if (i - 1 == 0) {
assert(tdistance(b, a) > h);
v = a.thick * normalize(rotate90(b - a));
m_p0 = a + v;
m_p1 = a - v;
}
assert(tdistance(c, a) > h);
v = b.thick * normalize(rotate90(c - a));
TPointD p0 = b + v;
TPointD p1 = b - v;
glBegin(GL_POLYGON);
tglVertex(m_p0);
tglVertex(m_p1);
tglVertex(p1);
tglVertex(p0);
glEnd();
m_p0 = p0;
m_p1 = p1;
} else {
m_p0 = m_p1 = b;
}
glBegin(GL_LINE_STRIP);
tglVertex(a);
tglVertex(b);
glEnd();
i++;
}
}
//-------------------------------------------------------------------
void StrokeGenerator::drawLastFragments() {
if (m_points.empty()) return;
int n = m_points.size();
int i = m_paintedPointCount;
const double h = 0.01;
if (i == 0) {
TThickPoint a = m_points[0];
if (a.thick >= h) tglDrawDisk(a, a.thick);
i++;
}
drawFragments(i, n - 1);
m_paintedPointCount = std::max(0, n - 2);
}
//-------------------------------------------------------------------
void StrokeGenerator::drawAllFragments() {
if (m_points.empty()) return;
int n = m_points.size();
int i = 0;
const double h = 0.01;
TThickPoint a = m_points[0];
if (a.thick >= h) tglDrawDisk(a, a.thick);
drawFragments(1, n - 1);
/*
//last fragment
TPointD p0 = c+v;
TPointD p1 = c-v;
glBegin(GL_POLYGON);
tglVertex(m_p0);
tglVertex(m_p1);
tglVertex(p1);
tglVertex(p0);
glEnd();
*/
a = m_points.back();
if (a.thick >= h) tglDrawDisk(a, a.thick);
}
//-------------------------------------------------------------------
TRectD StrokeGenerator::getModifiedRegion() const { return m_modifiedRegion; }
//-------------------------------------------------------------------
void StrokeGenerator::removeMiddlePoints() {
int size = m_points.size();
if (size > 2) {
m_points.erase(m_points.begin() + 1, m_points.begin() + (size - 1));
}
}
//-------------------------------------------------------------------
TRectD StrokeGenerator::getLastModifiedRegion() { return m_lastModifiedRegion; }
//-------------------------------------------------------------------
TPointD StrokeGenerator::getFirstPoint() { return m_points[0]; }
//-------------------------------------------------------------------
TStroke *StrokeGenerator::makeStroke(double error, UINT onlyLastPoints) const {
if (onlyLastPoints == 0 || onlyLastPoints > m_points.size())
return TStroke::interpolate(m_points, error);
vector<TThickPoint> lastPoints(onlyLastPoints);
vector<TThickPoint>::const_iterator first =
m_points.begin() + (m_points.size() - onlyLastPoints);
copy(first, m_points.end(), lastPoints.begin());
return TStroke::interpolate(lastPoints, error);
}
//-------------------------------------------------------------------
|
/*
==============================================================================
GroupHandle.cpp
Created: 29 May 2020 4:33:46pm
Author: Vincenzo
==============================================================================
*/
#include "GroupHandle.h"
GroupHandle::GroupHandle(int groupIndexIn, int degreeIndexIn, bool addsGroupIn, bool clockwiseDragIn)
: groupIndex(groupIndexIn),
degreeIndex(degreeIndexIn),
addsGroup(addsGroupIn),
clockwiseDrag(clockwiseDragIn)
{
colour = Colours::black;
}
int GroupHandle::getGroupIndex() const
{
return groupIndex;
}
int GroupHandle::getDegreeIndex() const
{
return degreeIndex;
}
bool GroupHandle::addsGroupWhenDragged() const
{
return addsGroup;
}
bool GroupHandle::isDraggingClockwise() const
{
return clockwiseDrag;
}
/*
Returns polar coordinates
*/
Point<float> GroupHandle::getPosition() const
{
return position;
}
float GroupHandle::getSize() const
{
return size;
}
/*
Returns the path that represents this handle as a dot
*/
Path GroupHandle::getDot(float dotRadius) const
{
Path dot;
Point<float> p1, p2;
float sqrtSizeSq = sqrtf(dotRadius * dotRadius * 2);
float xv = position.y * cosf(position.x) + center.x;
float yv = position.y * sinf(position.x) + center.y;
p1.setXY(xv - sqrtSizeSq, yv + sqrtSizeSq);
p2.setXY(xv + sqrtSizeSq, yv - sqrtSizeSq);
dot.addEllipse(Rectangle<float>(p1, p2));
return dot;
}
/*
Returns the path that represents this handle as a line (edge)
*/
Path GroupHandle::getLine(float lineThickness) const
{
Line<float> line = getGroupEdgeLine(center, position, size);
Path diamond;
if (size > 0)
diamond.addPolygon(line.getPointAlongLineProportionally(1/size), 4, lineThickness, position.x + MathConstants<float>::pi / 2);
return diamond;
}
/*
Returns the path that represents this handle depending on the addsGroup member
*/
Path GroupHandle::getPath() const
{
return handlePath;
}
Colour GroupHandle::getColour() const
{
return colour;
}
bool GroupHandle::isMouseOver(const MouseEvent& event)
{
bool isOver = handlePath.contains(event.position, 100.0f);
setMouseOver(isOver);
return isOver;
}
/*
Returns thickness value.
If represents an edge (addsGroup = false), this is thickness. If it's a dot, it's a radius.
*/
void GroupHandle::setDraggingClockwise(bool clockwiseDragIn)
{
clockwiseDrag = clockwiseDragIn;
}
/*
Set center position in polar coordinates
*/
void GroupHandle::setPosition(float radius, float angle, Point<float> centerIn)
{
position = { radius, angle };
center = centerIn;
}
void GroupHandle::setPosition(Point<float> polarCoordinates, Point<float> centerIn)
{
position = polarCoordinates;
center = centerIn;
}
/*
If represents an edge(addsGroup = false), this is thickness. If it's a dot, it's a radius.
*/
void GroupHandle::setSize(float sizeIn)
{
size = sizeIn;
if (addsGroup)
handlePath = getDot(size);
else
handlePath = getLine(lineThickness);
}
void GroupHandle::setColour(Colour colourIn)
{
colour = colourIn;
}
void GroupHandle::setMouseOver(bool isMouseOver)
{
if (mouseIsOver != isMouseOver)
{
if (mouseIsOver)
{
if (addsGroup)
handlePath = getDot(size);
else
handlePath = getLine(lineThickness);
}
else
{
if (addsGroup)
handlePath = getDot(size * mouseOverMultiplier);
else
handlePath = getLine(lineThickness * mouseOverMultiplier);
}
mouseIsOver = isMouseOver;
}
}
Line<float> GroupHandle::getGroupEdgeLine(Point<float> centerCircle, Point<float> polarCoords, float ringWidthRatio)
{
Point<float> p1, p2;
float xm = polarCoords.y * cosf(polarCoords.x);
float ym = polarCoords.y * sinf(polarCoords.x);
p1 = { xm + centerCircle.x, ym + centerCircle.y };
p2 = { xm * ringWidthRatio + centerCircle.x, ym * ringWidthRatio + centerCircle.y };
return Line<float>(p1, p2);
}
|
/**
******************************************************************************
* This file is part of the TouchGFX 4.16.1 distribution.
*
* <h2><center>© Copyright (c) 2021 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under Ultimate Liberty license
* SLA0044, the "License"; You may not use this file except in compliance with
* the License. You may obtain a copy of the License at:
* www.st.com/SLA0044
*
******************************************************************************
*/
#include <touchgfx/widgets/SnapshotWidget.hpp>
#include <touchgfx/hal/HAL.hpp>
namespace touchgfx
{
SnapshotWidget::SnapshotWidget()
: Widget(), bitmapId(BITMAP_INVALID), alpha(255)
{
}
void SnapshotWidget::draw(const Rect& invalidatedArea) const
{
if (alpha == 0 || bitmapId == BITMAP_INVALID)
{
return;
}
Rect absRect(0, 0, Bitmap(bitmapId).getWidth(), rect.height);
translateRectToAbsolute(absRect);
HAL::lcd().blitCopy((const uint16_t*)Bitmap(bitmapId).getData(), absRect, invalidatedArea, alpha, false);
}
Rect SnapshotWidget::getSolidRect() const
{
if (alpha < 255 || bitmapId == BITMAP_INVALID)
{
return Rect(0, 0, 0, 0);
}
else
{
return Rect(0, 0, getWidth(), getHeight());
}
}
void SnapshotWidget::makeSnapshot()
{
makeSnapshot(BITMAP_ANIMATION_STORAGE);
}
void SnapshotWidget::makeSnapshot(const BitmapId bmp)
{
Rect visRect(0, 0, rect.width, rect.height);
getVisibleRect(visRect);
Rect absRect = getAbsoluteRect();
bitmapId = (HAL::lcd().copyFrameBufferRegionToMemory(visRect, absRect, bmp)) ? bmp : BITMAP_INVALID;
}
} // namespace touchgfx
|
/**********************************************************************
Audacium: A Digital Audio Editor
StereoToMono.cpp
Lynn Allan
*******************************************************************//**
\class EffectStereoToMono
\brief An Effect to convert stereo to mono.
*//*******************************************************************/
#include "StereoToMono.h"
#include "LoadEffects.h"
#include <wx/intl.h>
#include "../Mix.h"
#include "../Project.h"
#include "../WaveTrack.h"
#include "../widgets/ProgressDialog.h"
const ComponentInterfaceSymbol EffectStereoToMono::Symbol
{ XO("Stereo To Mono") };
namespace{ BuiltinEffectsModule::Registration< EffectStereoToMono > reg; }
EffectStereoToMono::EffectStereoToMono()
{
}
EffectStereoToMono::~EffectStereoToMono()
{
}
// ComponentInterface implementation
ComponentInterfaceSymbol EffectStereoToMono::GetSymbol()
{
return Symbol;
}
TranslatableString EffectStereoToMono::GetDescription()
{
return XO("Converts stereo tracks to mono");
}
// EffectDefinitionInterface implementation
EffectType EffectStereoToMono::GetType()
{
// Really EffectTypeProcess, but this prevents it from showing in the Effect Menu
return EffectTypeHidden;
}
bool EffectStereoToMono::IsInteractive()
{
return false;
}
// EffectClientInterface implementation
unsigned EffectStereoToMono::GetAudioInCount()
{
return 2;
}
unsigned EffectStereoToMono::GetAudioOutCount()
{
return 1;
}
// Effect implementation
bool EffectStereoToMono::Process()
{
// Do not use mWaveTracks here. We will possibly DELETE tracks,
// so we must use the "real" tracklist.
this->CopyInputTracks(); // Set up mOutputTracks.
bool bGoodResult = true;
// Determine the total time (in samples) used by all of the target tracks
sampleCount totalTime = 0;
auto trackRange = mOutputTracks->SelectedLeaders< WaveTrack >();
while (trackRange.first != trackRange.second)
{
auto left = *trackRange.first;
auto channels = TrackList::Channels(left);
if (channels.size() > 1)
{
auto right = *channels.rbegin();
auto leftRate = left->GetRate();
auto rightRate = right->GetRate();
if (leftRate != rightRate)
{
if (leftRate != mProjectRate)
{
mProgress->SetMessage(XO("Resampling left channel"));
left->Resample(mProjectRate, mProgress);
leftRate = mProjectRate;
}
if (rightRate != mProjectRate)
{
mProgress->SetMessage(XO("Resampling right channel"));
right->Resample(mProjectRate, mProgress);
rightRate = mProjectRate;
}
}
{
auto start = wxMin(left->TimeToLongSamples(left->GetStartTime()),
right->TimeToLongSamples(right->GetStartTime()));
auto end = wxMax(left->TimeToLongSamples(left->GetEndTime()),
right->TimeToLongSamples(right->GetEndTime()));
totalTime += (end - start);
}
}
++trackRange.first;
}
// Process each stereo track
sampleCount curTime = 0;
bool refreshIter = false;
mProgress->SetMessage(XO("Mixing down to mono"));
trackRange = mOutputTracks->SelectedLeaders< WaveTrack >();
while (trackRange.first != trackRange.second)
{
auto left = *trackRange.first;
auto channels = TrackList::Channels(left);
if (channels.size() > 1)
{
auto right = *channels.rbegin();
bGoodResult = ProcessOne(curTime, totalTime, left, right);
if (!bGoodResult)
{
break;
}
// The right channel has been deleted, so we must restart from the beginning
refreshIter = true;
}
if (refreshIter)
{
trackRange = mOutputTracks->SelectedLeaders< WaveTrack >();
refreshIter = false;
}
else
{
++trackRange.first;
}
}
this->ReplaceProcessedTracks(bGoodResult);
return bGoodResult;
}
bool EffectStereoToMono::ProcessOne(sampleCount & curTime, sampleCount totalTime, WaveTrack *left, WaveTrack *right)
{
auto idealBlockLen = left->GetMaxBlockSize() * 2;
bool bResult = true;
sampleCount processed = 0;
auto start = wxMin(left->GetStartTime(), right->GetStartTime());
auto end = wxMax(left->GetEndTime(), right->GetEndTime());
WaveTrackConstArray tracks;
tracks.push_back(left->SharedPointer< const WaveTrack >());
tracks.push_back(right->SharedPointer< const WaveTrack >());
Mixer mixer(tracks,
true, // Throw to abort mix-and-render if read fails:
Mixer::WarpOptions{*inputTracks()},
start,
end,
1,
idealBlockLen,
false, // Not interleaved
left->GetRate(), // Process() checks that left and right
// rates are the same
floatSample);
auto outTrack = left->EmptyCopy();
outTrack->ConvertToSampleFormat(floatSample);
while (auto blockLen = mixer.Process(idealBlockLen))
{
auto buffer = mixer.GetBuffer();
for (auto i = 0; i < blockLen; i++)
{
((float *)buffer)[i] /= 2.0;
}
outTrack->Append(buffer, floatSample, blockLen);
curTime += blockLen;
if (TotalProgress(curTime.as_double() / totalTime.as_double()))
{
return false;
}
}
outTrack->Flush();
double minStart = wxMin(left->GetStartTime(), right->GetStartTime());
left->Clear(left->GetStartTime(), left->GetEndTime());
left->Paste(minStart, outTrack.get());
mOutputTracks->GroupChannels(*left, 1);
mOutputTracks->Remove(right);
return bResult;
}
bool EffectStereoToMono::IsHidden()
{
return true;
}
|
#ifndef COMMON_HPP
#define COMMON_HPP
/*!
* MIT License
*
* Copyright (c) 2021 Kambiz Asadzadeh
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*!
* C++20 introduces modules, a modern solution for componentization of C++ libraries and programs.
* A module is a set of source code files that are compiled independently of the translation units that import them.
* Modules eliminate or greatly reduce many of the problems associated with the use of header files, and also potentially reduce compilation times.
* Macros, preprocessor directives, and non-exported names declared in a module are not visible and therefore have no effect on the compilation of the translation unit that imports the module.
* You can import modules in any order without concern for macro redefinitions. Declarations in the importing translation unit do not participate in overload resolution or name lookup in the imported module.
* After a module is compiled once, the results are stored in a binary file that describes all the exported types, functions and templates. That file can be processed much faster than a header file, and can be reused by the compiler every place where the module is imported in a project.
*
!*/
#include "utilities/preprocessor.hpp"
#include "utilities/featuretest.hpp"
#include "utilities/types.hpp"
#ifdef USE_LATEST_STANDARD
////TODO use module...
#else
///!
#endif
//#if defined(CXX_STANDARD_20)
//#ifdef __has_cpp_attribute
//# if __has_cpp_attribute(__cpp_modules)
//# pragma message("Your project is based on modern solution for componentization of C++ libraries and programs.")
//# endif
//#else
//# pragma message("Your project is based on classic precompiled-header system. [enable module technique in C++]")
//#endif
//#endif
#if __cplusplus > 201703
#ifdef __has_include
# if __has_include("precompiled/pch.hpp")
# include "precompiled/pch.hpp"
# else
# pragma message("Your project is based on classic precompiled-header system.")
# endif
#endif
#else
# include "precompiled/pch.hpp"
#endif
#endif // COMMON_HPP
|
#define DEBUG 1
/**
* File : E.cpp
* Author : Kazune Takahashi
* Created : 6/2/2020, 12:34:10 PM
* Powered by Visual Studio Code
*/
#include <algorithm>
#include <bitset>
#include <cassert>
#include <cctype>
#include <chrono>
#include <cmath>
#include <complex>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <functional>
#include <iomanip>
#include <iostream>
#include <map>
#include <queue>
#include <random>
#include <set>
#include <stack>
#include <string>
#include <tuple>
#include <unordered_map>
#include <unordered_set>
#include <vector>
// ----- boost -----
#include <boost/rational.hpp>
#include <boost/multiprecision/cpp_int.hpp>
// ----- using directives and manipulations -----
using namespace std;
using boost::rational;
using boost::multiprecision::cpp_int;
using ll = long long;
template <typename T>
using max_heap = priority_queue<T>;
template <typename T>
using min_heap = priority_queue<T, vector<T>, greater<T>>;
// ----- constexpr for Mint and Combination -----
constexpr ll MOD{1000000007LL};
// constexpr ll MOD{998244353LL}; // be careful
constexpr ll MAX_SIZE{3000010LL};
// constexpr ll MAX_SIZE{30000010LL}; // if 10^7 is needed
// ----- ch_max and ch_min -----
template <typename T>
void ch_max(T &left, T right)
{
if (left < right)
{
left = right;
}
}
template <typename T>
void ch_min(T &left, T right)
{
if (left > right)
{
left = right;
}
}
// ----- Mint -----
template <ll MOD = MOD>
class Mint
{
public:
ll x;
Mint() : x{0LL} {}
Mint(ll x) : x{(x % MOD + MOD) % MOD} {}
Mint operator-() const { return x ? MOD - x : 0; }
Mint &operator+=(const Mint &a)
{
if ((x += a.x) >= MOD)
{
x -= MOD;
}
return *this;
}
Mint &operator-=(const Mint &a) { return *this += -a; }
Mint &operator++() { return *this += 1; }
Mint operator++(int)
{
Mint tmp{*this};
++*this;
return tmp;
}
Mint &operator--() { return *this -= 1; }
Mint operator--(int)
{
Mint tmp{*this};
--*this;
return tmp;
}
Mint &operator*=(const Mint &a)
{
(x *= a.x) %= MOD;
return *this;
}
Mint &operator/=(const Mint &a)
{
Mint b{a};
return *this *= b.power(MOD - 2);
}
Mint operator+(const Mint &a) const { return Mint(*this) += a; }
Mint operator-(const Mint &a) const { return Mint(*this) -= a; }
Mint operator*(const Mint &a) const { return Mint(*this) *= a; }
Mint operator/(const Mint &a) const { return Mint(*this) /= a; }
bool operator<(const Mint &a) const { return x < a.x; }
bool operator<=(const Mint &a) const { return x <= a.x; }
bool operator>(const Mint &a) const { return x > a.x; }
bool operator>=(const Mint &a) const { return x >= a.x; }
bool operator==(const Mint &a) const { return x == a.x; }
bool operator!=(const Mint &a) const { return !(*this == a); }
const Mint power(ll N)
{
if (N == 0)
{
return 1;
}
else if (N % 2 == 1)
{
return *this * power(N - 1);
}
else
{
Mint half = power(N / 2);
return half * half;
}
}
};
template <ll MOD>
Mint<MOD> operator+(ll lhs, const Mint<MOD> &rhs)
{
return rhs + lhs;
}
template <ll MOD>
Mint<MOD> operator-(ll lhs, const Mint<MOD> &rhs)
{
return -rhs + lhs;
}
template <ll MOD>
Mint<MOD> operator*(ll lhs, const Mint<MOD> &rhs)
{
return rhs * lhs;
}
template <ll MOD>
Mint<MOD> operator/(ll lhs, const Mint<MOD> &rhs)
{
return Mint<MOD>{lhs} / rhs;
}
template <ll MOD>
istream &operator>>(istream &stream, Mint<MOD> &a)
{
return stream >> a.x;
}
template <ll MOD>
ostream &operator<<(ostream &stream, const Mint<MOD> &a)
{
return stream << a.x;
}
// ----- Combination -----
template <ll MOD = MOD, ll MAX_SIZE = MAX_SIZE>
class Combination
{
public:
vector<Mint<MOD>> inv, fact, factinv;
Combination() : inv(MAX_SIZE), fact(MAX_SIZE), factinv(MAX_SIZE)
{
inv[1] = 1;
for (auto i = 2LL; i < MAX_SIZE; i++)
{
inv[i] = (-inv[MOD % i]) * (MOD / i);
}
fact[0] = factinv[0] = 1;
for (auto i = 1LL; i < MAX_SIZE; i++)
{
fact[i] = Mint<MOD>(i) * fact[i - 1];
factinv[i] = inv[i] * factinv[i - 1];
}
}
Mint<MOD> operator()(int n, int k)
{
if (n >= 0 && k >= 0 && n - k >= 0)
{
return fact[n] * factinv[k] * factinv[n - k];
}
return 0;
}
Mint<MOD> catalan(int x, int y)
{
return (*this)(x + y, y) - (*this)(x + y, y - 1);
}
};
// ----- for C++14 -----
using mint = Mint<MOD>;
using combination = Combination<MOD, MAX_SIZE>;
template <typename T>
T gcd(T x, T y) { return y ? gcd(y, x % y) : x; }
template <typename T>
T lcm(T x, T y) { return x / gcd(x, y) * y; }
// ----- for C++17 -----
template <typename T>
int popcount(T x) // C++20
{
int ans{0};
while (x != 0)
{
ans += x & 1;
x >>= 1;
}
return ans;
}
// ----- frequently used constexpr -----
// constexpr double epsilon{1e-10};
constexpr ll infty{1000000000000000LL}; // or
// constexpr int infty{1'000'000'010};
// constexpr int dx[4] = {1, 0, -1, 0};
// constexpr int dy[4] = {0, 1, 0, -1};
// ----- Yes() and No() -----
void Yes()
{
cout << "Yes" << endl;
exit(0);
}
void No()
{
cout << "No" << endl;
exit(0);
}
// ----- main() -----
struct Edge
{
bool valid;
int src, dst;
ll cost;
Edge() {}
Edge(int src, int dst, ll cost) : valid{true}, src{src}, dst{dst}, cost{cost} {}
void added_edge(vector<vector<int>> &V)
{
V[src].push_back(dst);
}
void added_rev(vector<vector<int>> &V)
{
V[dst].push_back(src);
}
};
class Solve
{
int N, M;
ll P;
vector<Edge> E;
public:
Solve(int N, int M) : N{N}, M{M}, E(M)
{
cin >> P;
for (auto i = 0; i < M; ++i)
{
int A, B;
ll C;
cin >> A >> B >> C;
--A;
--B;
E[i] = Edge(A, B, P - C);
}
}
void flush()
{
determine_validness();
cout << bf() << endl;
}
private:
ll bf()
{
vector<ll> D(N, infty);
D[0] = 0;
bool updated{false};
for (auto t = 0; t < N + 2; ++t)
{
updated = false;
for (auto const &e : E)
{
if (!e.valid)
{
continue;
}
auto tmp{D[e.src] + e.cost};
if (D[e.dst] > tmp)
{
D[e.dst] = tmp;
updated = true;
}
}
if (!updated)
{
return max(0LL, -D[N - 1]);
}
if (t == N + 1)
{
return -1;
}
}
assert(false);
return -2;
}
void determine_validness()
{
auto table{valid_vertexes()};
for (auto &e : E)
{
if (!(table[e.src] && table[e.dst]))
{
e.valid = false;
}
#if DEBUG == 1
else
{
cerr << "src: " << e.src << ", dst: " << e.dst << ", cost: " << e.cost << endl;
}
#endif
}
}
vector<bool> valid_vertexes()
{
vector<vector<int>> V(N);
for (auto i = 0; i < M; ++i)
{
E[i].added_edge(V);
}
vector<bool> X(N, false);
dfs(V, X, 0);
vector<vector<int>> W(N);
for (auto i = 0; i < M; ++i)
{
E[i].added_rev(W);
}
vector<bool> Y(N, false);
dfs(W, Y, N - 1);
vector<bool> res(N, false);
for (auto i = 0; i < N; ++i)
{
res[i] = X[i] && Y[i];
}
return res;
}
void dfs(vector<vector<int>> const &V, vector<bool> &visited, int src, int parent = -1)
{
visited[src] = true;
for (auto dst : V[src])
{
if (dst != parent && !visited[dst])
{
dfs(V, visited, dst, src);
}
}
}
};
int main()
{
int N, M;
cin >> N >> M;
Solve solve(N, M);
solve.flush();
}
|
// Copyright (c) 2015 Baidu, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Author: Ge,Jun (gejun@baidu.com)
// Date: Thu Jul 30 17:44:54 CST 2015
#include <unistd.h> // getpagesize
#include <sys/types.h>
#include <sys/resource.h> // getrusage
#include <dirent.h> // dirent
#include <iomanip> // setw
#if defined(__APPLE__)
#include <libproc.h>
#include <sys/resource.h>
#else
#endif
#include "butil/time.h"
#include "butil/memory/singleton_on_pthread_once.h"
#include "butil/scoped_lock.h"
#include "butil/files/scoped_file.h"
#include "butil/files/dir_reader_posix.h"
#include "butil/file_util.h"
#include "butil/process_util.h" // ReadCommandLine
#include "butil/popen.h" // read_command_output
#include "bvar/passive_status.h"
namespace bvar {
template <class T, class M> M get_member_type(M T::*);
#define BVAR_MEMBER_TYPE(member) BAIDU_TYPEOF(bvar::get_member_type(member))
int do_link_default_variables = 0;
const int64_t CACHED_INTERVAL_US = 100000L; // 100ms
// ======================================
struct ProcStat {
int pid;
//std::string comm;
char state;
int ppid;
int pgrp;
int session;
int tty_nr;
int tpgid;
unsigned flags;
unsigned long minflt;
unsigned long cminflt;
unsigned long majflt;
unsigned long cmajflt;
unsigned long utime;
unsigned long stime;
unsigned long cutime;
unsigned long cstime;
long priority;
long nice;
long num_threads;
};
static bool read_proc_status(ProcStat &stat) {
stat = ProcStat();
errno = 0;
#if defined(OS_LINUX)
// Read status from /proc/self/stat. Information from `man proc' is out of date,
// see http://man7.org/linux/man-pages/man5/proc.5.html
butil::ScopedFILE fp("/proc/self/stat", "r");
if (NULL == fp) {
PLOG_ONCE(WARNING) << "Fail to open /proc/self/stat";
return false;
}
if (fscanf(fp, "%d %*s %c "
"%d %d %d %d %d "
"%u %lu %lu %lu "
"%lu %lu %lu %lu %lu "
"%ld %ld %ld",
&stat.pid, &stat.state,
&stat.ppid, &stat.pgrp, &stat.session, &stat.tty_nr, &stat.tpgid,
&stat.flags, &stat.minflt, &stat.cminflt, &stat.majflt,
&stat.cmajflt, &stat.utime, &stat.stime, &stat.cutime, &stat.cstime,
&stat.priority, &stat.nice, &stat.num_threads) != 19) {
PLOG(WARNING) << "Fail to fscanf";
return false;
}
return true;
#elif defined(OS_MACOSX)
// TODO(zhujiashun): get remaining state in MacOS.
memset(&stat, 0, sizeof(stat));
static pid_t pid = getpid();
std::ostringstream oss;
char cmdbuf[128];
snprintf(cmdbuf, sizeof(cmdbuf),
"ps -p %ld -o pid,ppid,pgid,sess"
",tpgid,flags,pri,nice | tail -n1", (long)pid);
if (butil::read_command_output(oss, cmdbuf) != 0) {
LOG(ERROR) << "Fail to read stat";
return -1;
}
const std::string& result = oss.str();
if (sscanf(result.c_str(), "%d %d %d %d"
"%d %u %ld %ld",
&stat.pid, &stat.ppid, &stat.pgrp, &stat.session,
&stat.tpgid, &stat.flags, &stat.priority, &stat.nice) != 8) {
PLOG(WARNING) << "Fail to sscanf";
return false;
}
return true;
#else
return false;
#endif
}
// Reduce pressures to functions to get system metrics.
template <typename T>
class CachedReader {
public:
CachedReader() : _mtime_us(0) {
CHECK_EQ(0, pthread_mutex_init(&_mutex, NULL));
}
~CachedReader() {
pthread_mutex_destroy(&_mutex);
}
// NOTE: may return a volatile value that may be overwritten at any time.
// This is acceptable right now. Both 32-bit and 64-bit numbers are atomic
// to fetch in 64-bit machines(most of baidu machines) and the code inside
// this .cpp utilizing this class generally return a struct with 32-bit
// and 64-bit numbers.
template <typename ReadFn>
static const T& get_value(const ReadFn& fn) {
CachedReader* p = butil::get_leaky_singleton<CachedReader>();
const int64_t now = butil::gettimeofday_us();
if (now > p->_mtime_us + CACHED_INTERVAL_US) {
pthread_mutex_lock(&p->_mutex);
if (now > p->_mtime_us + CACHED_INTERVAL_US) {
p->_mtime_us = now;
pthread_mutex_unlock(&p->_mutex);
// don't run fn inside lock otherwise a slow fn may
// block all concurrent bvar dumppers. (e.g. /vars)
T result;
if (fn(&result)) {
pthread_mutex_lock(&p->_mutex);
p->_cached = result;
} else {
pthread_mutex_lock(&p->_mutex);
}
}
pthread_mutex_unlock(&p->_mutex);
}
return p->_cached;
}
private:
int64_t _mtime_us;
pthread_mutex_t _mutex;
T _cached;
};
class ProcStatReader {
public:
bool operator()(ProcStat* stat) const {
return read_proc_status(*stat);
}
template <typename T, size_t offset>
static T get_field(void*) {
return *(T*)((char*)&CachedReader<ProcStat>::get_value(
ProcStatReader()) + offset);
}
};
#define BVAR_DEFINE_PROC_STAT_FIELD(field) \
PassiveStatus<BVAR_MEMBER_TYPE(&ProcStat::field)> g_##field( \
ProcStatReader::get_field<BVAR_MEMBER_TYPE(&ProcStat::field), \
offsetof(ProcStat, field)>, NULL);
#define BVAR_DEFINE_PROC_STAT_FIELD2(field, name) \
PassiveStatus<BVAR_MEMBER_TYPE(&ProcStat::field)> g_##field( \
name, \
ProcStatReader::get_field<BVAR_MEMBER_TYPE(&ProcStat::field), \
offsetof(ProcStat, field)>, NULL);
// ==================================================
struct ProcMemory {
long size; // total program size
long resident; // resident set size
long share; // shared pages
long trs; // text (code)
long drs; // data/stack
long lrs; // library
long dt; // dirty pages
};
static bool read_proc_memory(ProcMemory &m) {
m = ProcMemory();
errno = 0;
#if defined(OS_LINUX)
butil::ScopedFILE fp("/proc/self/statm", "r");
if (NULL == fp) {
PLOG_ONCE(WARNING) << "Fail to open /proc/self/statm";
return false;
}
if (fscanf(fp, "%ld %ld %ld %ld %ld %ld %ld",
&m.size, &m.resident, &m.share,
&m.trs, &m.drs, &m.lrs, &m.dt) != 7) {
PLOG(WARNING) << "Fail to fscanf";
return false;
}
return true;
#elif defined(OS_MACOSX)
// TODO(zhujiashun): get remaining memory info in MacOS.
memset(&m, 0, sizeof(m));
static pid_t pid = getpid();
static int64_t pagesize = getpagesize();
std::ostringstream oss;
char cmdbuf[128];
snprintf(cmdbuf, sizeof(cmdbuf), "ps -p %ld -o rss=,vsz=", (long)pid);
if (butil::read_command_output(oss, cmdbuf) != 0) {
LOG(ERROR) << "Fail to read memory state";
return -1;
}
const std::string& result = oss.str();
if (sscanf(result.c_str(), "%ld %ld", &m.resident, &m.size) != 2) {
PLOG(WARNING) << "Fail to sscanf";
return false;
}
// resident and size in Kbytes
m.resident = m.resident * 1024 / pagesize;
m.size = m.size * 1024 / pagesize;
return true;
#else
return false;
#endif
}
class ProcMemoryReader {
public:
bool operator()(ProcMemory* stat) const {
return read_proc_memory(*stat);
};
template <typename T, size_t offset>
static T get_field(void*) {
static int64_t pagesize = getpagesize();
return *(T*)((char*)&CachedReader<ProcMemory>::get_value(
ProcMemoryReader()) + offset) * pagesize;
}
};
#define BVAR_DEFINE_PROC_MEMORY_FIELD(field, name) \
PassiveStatus<BVAR_MEMBER_TYPE(&ProcMemory::field)> g_##field( \
name, \
ProcMemoryReader::get_field<BVAR_MEMBER_TYPE(&ProcMemory::field), \
offsetof(ProcMemory, field)>, NULL);
// ==================================================
struct LoadAverage {
double loadavg_1m;
double loadavg_5m;
double loadavg_15m;
};
static bool read_load_average(LoadAverage &m) {
#if defined(OS_LINUX)
butil::ScopedFILE fp("/proc/loadavg", "r");
if (NULL == fp) {
PLOG_ONCE(WARNING) << "Fail to open /proc/loadavg";
return false;
}
m = LoadAverage();
errno = 0;
if (fscanf(fp, "%lf %lf %lf",
&m.loadavg_1m, &m.loadavg_5m, &m.loadavg_15m) != 3) {
PLOG(WARNING) << "Fail to fscanf";
return false;
}
return true;
#elif defined(OS_MACOSX)
std::ostringstream oss;
if (butil::read_command_output(oss, "sysctl -n vm.loadavg") != 0) {
LOG(ERROR) << "Fail to read loadavg";
return -1;
}
const std::string& result = oss.str();
if (sscanf(result.c_str(), "{ %lf %lf %lf }",
&m.loadavg_1m, &m.loadavg_5m, &m.loadavg_15m) != 3) {
PLOG(WARNING) << "Fail to sscanf";
return false;
}
return true;
#else
return false;
#endif
}
class LoadAverageReader {
public:
bool operator()(LoadAverage* stat) const {
return read_load_average(*stat);
};
template <typename T, size_t offset>
static T get_field(void*) {
return *(T*)((char*)&CachedReader<LoadAverage>::get_value(
LoadAverageReader()) + offset);
}
};
#define BVAR_DEFINE_LOAD_AVERAGE_FIELD(field, name) \
PassiveStatus<BVAR_MEMBER_TYPE(&LoadAverage::field)> g_##field( \
name, \
LoadAverageReader::get_field<BVAR_MEMBER_TYPE(&LoadAverage::field), \
offsetof(LoadAverage, field)>, NULL);
// ==================================================
static int get_fd_count(int limit) {
#if defined(OS_LINUX)
butil::DirReaderPosix dr("/proc/self/fd");
int count = 0;
if (!dr.IsValid()) {
PLOG(WARNING) << "Fail to open /proc/self/fd";
return -1;
}
// Have to limit the scaning which consumes a lot of CPU when #fd
// are huge (100k+)
for (; dr.Next() && count <= limit + 3; ++count) {}
return count - 3 /* skipped ., .. and the fd in dr*/;
#elif defined(OS_MACOSX)
// TODO(zhujiashun): following code will cause core dump with some
// probability under mac when program exits. Fix it.
/*
static pid_t pid = getpid();
std::ostringstream oss;
char cmdbuf[128];
snprintf(cmdbuf, sizeof(cmdbuf),
"lsof -p %ld | grep -v \"txt\" | wc -l", (long)pid);
if (butil::read_command_output(oss, cmdbuf) != 0) {
LOG(ERROR) << "Fail to read open files";
return -1;
}
const std::string& result = oss.str();
int count = 0;
if (sscanf(result.c_str(), "%d", &count) != 1) {
PLOG(WARNING) << "Fail to sscanf";
return -1;
}
// skipped . and first column line
count = count - 2;
return std::min(count, limit);
*/
return 0;
#else
return 0;
#endif
}
extern PassiveStatus<int> g_fd_num;
const int MAX_FD_SCAN_COUNT = 10003;
static butil::static_atomic<bool> s_ever_reached_fd_scan_limit = BUTIL_STATIC_ATOMIC_INIT(false);
class FdReader {
public:
bool operator()(int* stat) const {
if (s_ever_reached_fd_scan_limit.load(butil::memory_order_relaxed)) {
// Never update the count again.
return false;
}
const int count = get_fd_count(MAX_FD_SCAN_COUNT);
if (count < 0) {
return false;
}
if (count == MAX_FD_SCAN_COUNT - 2
&& s_ever_reached_fd_scan_limit.exchange(
true, butil::memory_order_relaxed) == false) {
// Rename the bvar to notify user.
g_fd_num.hide();
g_fd_num.expose("process_fd_num_too_many");
}
*stat = count;
return true;
}
};
static int print_fd_count(void*) {
return CachedReader<int>::get_value(FdReader());
}
// ==================================================
struct ProcIO {
// number of bytes the process read, using any read-like system call (from
// files, pipes, tty...).
size_t rchar;
// number of bytes the process wrote using any write-like system call.
size_t wchar;
// number of read-like system call invocations that the process performed.
size_t syscr;
// number of write-like system call invocations that the process performed.
size_t syscw;
// number of bytes the process directly read from disk.
size_t read_bytes;
// number of bytes the process originally dirtied in the page-cache
// (assuming they will go to disk later).
size_t write_bytes;
// number of bytes the process "un-dirtied" - e.g. using an "ftruncate"
// call that truncated pages from the page-cache.
size_t cancelled_write_bytes;
};
static bool read_proc_io(ProcIO* s) {
#if defined(OS_LINUX)
butil::ScopedFILE fp("/proc/self/io", "r");
if (NULL == fp) {
PLOG_ONCE(WARNING) << "Fail to open /proc/self/io";
return false;
}
errno = 0;
if (fscanf(fp, "%*s %lu %*s %lu %*s %lu %*s %lu %*s %lu %*s %lu %*s %lu",
&s->rchar, &s->wchar, &s->syscr, &s->syscw,
&s->read_bytes, &s->write_bytes, &s->cancelled_write_bytes)
!= 7) {
PLOG(WARNING) << "Fail to fscanf";
return false;
}
return true;
#elif defined(OS_MACOSX)
// TODO(zhujiashun): get rchar, wchar, syscr, syscw, cancelled_write_bytes
// in MacOS.
memset(s, 0, sizeof(ProcIO));
static pid_t pid = getpid();
rusage_info_current rusage;
if (proc_pid_rusage(pid, RUSAGE_INFO_CURRENT, (void**)&rusage) != 0) {
PLOG(WARNING) << "Fail to proc_pid_rusage";
return false;
}
s->read_bytes = rusage.ri_diskio_bytesread;
s->write_bytes = rusage.ri_diskio_byteswritten;
return true;
#else
return false;
#endif
}
class ProcIOReader {
public:
bool operator()(ProcIO* stat) const {
return read_proc_io(stat);
}
template <typename T, size_t offset>
static T get_field(void*) {
return *(T*)((char*)&CachedReader<ProcIO>::get_value(
ProcIOReader()) + offset);
}
};
#define BVAR_DEFINE_PROC_IO_FIELD(field) \
PassiveStatus<BVAR_MEMBER_TYPE(&ProcIO::field)> g_##field( \
ProcIOReader::get_field<BVAR_MEMBER_TYPE(&ProcIO::field), \
offsetof(ProcIO, field)>, NULL);
// ==================================================
// Refs:
// https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats
// https://www.kernel.org/doc/Documentation/iostats.txt
//
// The /proc/diskstats file displays the I/O statistics of block devices.
// Each line contains the following 14 fields:
struct DiskStat {
long long major_number;
long long minor_mumber;
char device_name[64];
// The total number of reads completed successfully.
long long reads_completed; // wMB/s wKB/s
// Reads and writes which are adjacent to each other may be merged for
// efficiency. Thus two 4K reads may become one 8K read before it is
// ultimately handed to the disk, and so it will be counted (and queued)
// as only one I/O. This field lets you know how often this was done.
long long reads_merged; // rrqm/s
// The total number of sectors read successfully.
long long sectors_read; // rsec/s
// The total number of milliseconds spent by all reads (as
// measured from __make_request() to end_that_request_last()).
long long time_spent_reading_ms;
// The total number of writes completed successfully.
long long writes_completed; // rKB/s rMB/s
// See description of reads_merged
long long writes_merged; // wrqm/s
// The total number of sectors written successfully.
long long sectors_written; // wsec/s
// The total number of milliseconds spent by all writes (as
// measured from __make_request() to end_that_request_last()).
long long time_spent_writing_ms;
// The only field that should go to zero. Incremented as requests are
// given to appropriate struct request_queue and decremented as they finish.
long long io_in_progress;
// This field increases so long as `io_in_progress' is nonzero.
long long time_spent_io_ms;
// This field is incremented at each I/O start, I/O completion, I/O
// merge, or read of these stats by the number of I/Os in progress
// `io_in_progress' times the number of milliseconds spent doing
// I/O since the last update of this field. This can provide an easy
// measure of both I/O completion time and the backlog that may be
// accumulating.
long long weighted_time_spent_io_ms;
};
static bool read_disk_stat(DiskStat* s) {
#if defined(OS_LINUX)
butil::ScopedFILE fp("/proc/diskstats", "r");
if (NULL == fp) {
PLOG_ONCE(WARNING) << "Fail to open /proc/diskstats";
return false;
}
errno = 0;
if (fscanf(fp, "%lld %lld %s %lld %lld %lld %lld %lld %lld %lld "
"%lld %lld %lld %lld",
&s->major_number,
&s->minor_mumber,
s->device_name,
&s->reads_completed,
&s->reads_merged,
&s->sectors_read,
&s->time_spent_reading_ms,
&s->writes_completed,
&s->writes_merged,
&s->sectors_written,
&s->time_spent_writing_ms,
&s->io_in_progress,
&s->time_spent_io_ms,
&s->weighted_time_spent_io_ms) != 14) {
PLOG(WARNING) << "Fail to fscanf";
return false;
}
return true;
#elif defined(OS_MACOSX)
// TODO(zhujiashun)
return false;
#else
return false;
#endif
}
class DiskStatReader {
public:
bool operator()(DiskStat* stat) const {
return read_disk_stat(stat);
}
template <typename T, size_t offset>
static T get_field(void*) {
return *(T*)((char*)&CachedReader<DiskStat>::get_value(
DiskStatReader()) + offset);
}
};
#define BVAR_DEFINE_DISK_STAT_FIELD(field) \
PassiveStatus<BVAR_MEMBER_TYPE(&DiskStat::field)> g_##field( \
DiskStatReader::get_field<BVAR_MEMBER_TYPE(&DiskStat::field), \
offsetof(DiskStat, field)>, NULL);
// =====================================
struct ReadSelfCmdline {
std::string content;
ReadSelfCmdline() {
char buf[1024];
const ssize_t nr = butil::ReadCommandLine(buf, sizeof(buf), true);
content.append(buf, nr);
}
};
static void get_cmdline(std::ostream& os, void*) {
os << butil::get_leaky_singleton<ReadSelfCmdline>()->content;
}
struct ReadVersion {
std::string content;
ReadVersion() {
std::ostringstream oss;
if (butil::read_command_output(oss, "uname -ap") != 0) {
LOG(ERROR) << "Fail to read kernel version";
return;
}
content.append(oss.str());
}
};
static void get_kernel_version(std::ostream& os, void*) {
os << butil::get_leaky_singleton<ReadVersion>()->content;
}
// ======================================
static int64_t g_starting_time = butil::gettimeofday_us();
static timeval get_uptime(void*) {
int64_t uptime_us = butil::gettimeofday_us() - g_starting_time;
timeval tm;
tm.tv_sec = uptime_us / 1000000L;
tm.tv_usec = uptime_us - tm.tv_sec * 1000000L;
return tm;
}
// ======================================
class RUsageReader {
public:
bool operator()(rusage* stat) const {
const int rc = getrusage(RUSAGE_SELF, stat);
if (rc < 0) {
PLOG(WARNING) << "Fail to getrusage";
return false;
}
return true;
}
template <typename T, size_t offset>
static T get_field(void*) {
return *(T*)((char*)&CachedReader<rusage>::get_value(
RUsageReader()) + offset);
}
};
#define BVAR_DEFINE_RUSAGE_FIELD(field) \
PassiveStatus<BVAR_MEMBER_TYPE(&rusage::field)> g_##field( \
RUsageReader::get_field<BVAR_MEMBER_TYPE(&rusage::field), \
offsetof(rusage, field)>, NULL); \
#define BVAR_DEFINE_RUSAGE_FIELD2(field, name) \
PassiveStatus<BVAR_MEMBER_TYPE(&rusage::field)> g_##field( \
name, \
RUsageReader::get_field<BVAR_MEMBER_TYPE(&rusage::field), \
offsetof(rusage, field)>, NULL); \
// ======================================
BVAR_DEFINE_PROC_STAT_FIELD2(pid, "pid");
BVAR_DEFINE_PROC_STAT_FIELD2(ppid, "ppid");
BVAR_DEFINE_PROC_STAT_FIELD2(pgrp, "pgrp");
static void get_username(std::ostream& os, void*) {
char buf[32];
if (getlogin_r(buf, sizeof(buf)) == 0) {
buf[sizeof(buf)-1] = '\0';
os << buf;
} else {
os << "unknown (" << berror() << ')' ;
}
}
PassiveStatus<std::string> g_username(
"process_username", get_username, NULL);
BVAR_DEFINE_PROC_STAT_FIELD(minflt);
PerSecond<PassiveStatus<unsigned long> > g_minflt_second(
"process_faults_minor_second", &g_minflt);
BVAR_DEFINE_PROC_STAT_FIELD2(majflt, "process_faults_major");
BVAR_DEFINE_PROC_STAT_FIELD2(priority, "process_priority");
BVAR_DEFINE_PROC_STAT_FIELD2(nice, "process_nice");
BVAR_DEFINE_PROC_STAT_FIELD2(num_threads, "process_thread_count");
PassiveStatus<int> g_fd_num("process_fd_count", print_fd_count, NULL);
BVAR_DEFINE_PROC_MEMORY_FIELD(size, "process_memory_virtual");
BVAR_DEFINE_PROC_MEMORY_FIELD(resident, "process_memory_resident");
BVAR_DEFINE_PROC_MEMORY_FIELD(share, "process_memory_shared");
BVAR_DEFINE_PROC_MEMORY_FIELD(trs, "process_memory_text");
BVAR_DEFINE_PROC_MEMORY_FIELD(drs, "process_memory_data_and_stack");
BVAR_DEFINE_PROC_MEMORY_FIELD(lrs, "process_memory_library");
BVAR_DEFINE_PROC_MEMORY_FIELD(dt, "process_memory_dirty");
BVAR_DEFINE_LOAD_AVERAGE_FIELD(loadavg_1m, "system_loadavg_1m");
BVAR_DEFINE_LOAD_AVERAGE_FIELD(loadavg_5m, "system_loadavg_5m");
BVAR_DEFINE_LOAD_AVERAGE_FIELD(loadavg_15m, "system_loadavg_15m");
BVAR_DEFINE_PROC_IO_FIELD(rchar);
BVAR_DEFINE_PROC_IO_FIELD(wchar);
PerSecond<PassiveStatus<size_t> > g_io_read_second(
"process_io_read_bytes_second", &g_rchar);
PerSecond<PassiveStatus<size_t> > g_io_write_second(
"process_io_write_bytes_second", &g_wchar);
BVAR_DEFINE_PROC_IO_FIELD(syscr);
BVAR_DEFINE_PROC_IO_FIELD(syscw);
PerSecond<PassiveStatus<size_t> > g_io_num_reads_second(
"process_io_read_second", &g_syscr);
PerSecond<PassiveStatus<size_t> > g_io_num_writes_second(
"process_io_write_second", &g_syscw);
BVAR_DEFINE_PROC_IO_FIELD(read_bytes);
BVAR_DEFINE_PROC_IO_FIELD(write_bytes);
PerSecond<PassiveStatus<size_t> > g_disk_read_second(
"process_disk_read_bytes_second", &g_read_bytes);
PerSecond<PassiveStatus<size_t> > g_disk_write_second(
"process_disk_write_bytes_second", &g_write_bytes);
BVAR_DEFINE_RUSAGE_FIELD(ru_utime);
BVAR_DEFINE_RUSAGE_FIELD(ru_stime);
PassiveStatus<timeval> g_uptime("process_uptime", get_uptime, NULL);
static int get_core_num(void*) {
return sysconf(_SC_NPROCESSORS_ONLN);
}
PassiveStatus<int> g_core_num("system_core_count", get_core_num, NULL);
struct TimePercent {
int64_t time_us;
int64_t real_time_us;
void operator-=(const TimePercent& rhs) {
time_us -= rhs.time_us;
real_time_us -= rhs.real_time_us;
}
void operator+=(const TimePercent& rhs) {
time_us += rhs.time_us;
real_time_us += rhs.real_time_us;
}
};
inline std::ostream& operator<<(std::ostream& os, const TimePercent& tp) {
if (tp.real_time_us <= 0) {
return os << "0";
} else {
return os << std::fixed << std::setprecision(3)
<< (double)tp.time_us / tp.real_time_us;
}
}
static TimePercent get_cputime_percent(void*) {
TimePercent tp = { butil::timeval_to_microseconds(g_ru_stime.get_value()) +
butil::timeval_to_microseconds(g_ru_utime.get_value()),
butil::timeval_to_microseconds(g_uptime.get_value()) };
return tp;
}
PassiveStatus<TimePercent> g_cputime_percent(get_cputime_percent, NULL);
Window<PassiveStatus<TimePercent>, SERIES_IN_SECOND> g_cputime_percent_second(
"process_cpu_usage", &g_cputime_percent, FLAGS_bvar_dump_interval);
static TimePercent get_stime_percent(void*) {
TimePercent tp = { butil::timeval_to_microseconds(g_ru_stime.get_value()),
butil::timeval_to_microseconds(g_uptime.get_value()) };
return tp;
}
PassiveStatus<TimePercent> g_stime_percent(get_stime_percent, NULL);
Window<PassiveStatus<TimePercent>, SERIES_IN_SECOND> g_stime_percent_second(
"process_cpu_usage_system", &g_stime_percent, FLAGS_bvar_dump_interval);
static TimePercent get_utime_percent(void*) {
TimePercent tp = { butil::timeval_to_microseconds(g_ru_utime.get_value()),
butil::timeval_to_microseconds(g_uptime.get_value()) };
return tp;
}
PassiveStatus<TimePercent> g_utime_percent(get_utime_percent, NULL);
Window<PassiveStatus<TimePercent>, SERIES_IN_SECOND> g_utime_percent_second(
"process_cpu_usage_user", &g_utime_percent, FLAGS_bvar_dump_interval);
// According to http://man7.org/linux/man-pages/man2/getrusage.2.html
// Unsupported fields in linux:
// ru_ixrss
// ru_idrss
// ru_isrss
// ru_nswap
// ru_nsignals
BVAR_DEFINE_RUSAGE_FIELD(ru_inblock);
BVAR_DEFINE_RUSAGE_FIELD(ru_oublock);
BVAR_DEFINE_RUSAGE_FIELD(ru_nvcsw);
BVAR_DEFINE_RUSAGE_FIELD(ru_nivcsw);
PerSecond<PassiveStatus<long> > g_ru_inblock_second(
"process_inblocks_second", &g_ru_inblock);
PerSecond<PassiveStatus<long> > g_ru_oublock_second(
"process_outblocks_second", &g_ru_oublock);
PerSecond<PassiveStatus<long> > cs_vol_second(
"process_context_switches_voluntary_second", &g_ru_nvcsw);
PerSecond<PassiveStatus<long> > cs_invol_second(
"process_context_switches_involuntary_second", &g_ru_nivcsw);
PassiveStatus<std::string> g_cmdline("process_cmdline", get_cmdline, NULL);
PassiveStatus<std::string> g_kernel_version(
"kernel_version", get_kernel_version, NULL);
static std::string* s_gcc_version = NULL;
pthread_once_t g_gen_gcc_version_once = PTHREAD_ONCE_INIT;
void gen_gcc_version() {
#if defined(__GNUC__)
const int gcc_major = __GNUC__;
#else
const int gcc_major = -1;
#endif
#if defined(__GNUC_MINOR__)
const int gcc_minor = __GNUC_MINOR__;
#else
const int gcc_minor = -1;
#endif
#if defined(__GNUC_PATCHLEVEL__)
const int gcc_patchlevel = __GNUC_PATCHLEVEL__;
#else
const int gcc_patchlevel = -1;
#endif
s_gcc_version = new std::string;
if (gcc_major == -1) {
*s_gcc_version = "unknown";
return;
}
std::ostringstream oss;
oss << gcc_major;
if (gcc_minor == -1) {
return;
}
oss << '.' << gcc_minor;
if (gcc_patchlevel == -1) {
return;
}
oss << '.' << gcc_patchlevel;
*s_gcc_version = oss.str();
}
void get_gcc_version(std::ostream& os, void*) {
pthread_once(&g_gen_gcc_version_once, gen_gcc_version);
os << *s_gcc_version;
}
// =============================================
PassiveStatus<std::string> g_gcc_version("gcc_version", get_gcc_version, NULL);
void get_work_dir(std::ostream& os, void*) {
butil::FilePath path;
const bool rc = butil::GetCurrentDirectory(&path);
LOG_IF(WARNING, !rc) << "Fail to GetCurrentDirectory";
os << path.value();
}
PassiveStatus<std::string> g_work_dir("process_work_dir", get_work_dir, NULL);
#undef BVAR_MEMBER_TYPE
#undef BVAR_DEFINE_PROC_STAT_FIELD
#undef BVAR_DEFINE_PROC_STAT_FIELD2
#undef BVAR_DEFINE_PROC_MEMORY_FIELD
#undef BVAR_DEFINE_RUSAGE_FIELD
#undef BVAR_DEFINE_RUSAGE_FIELD2
} // namespace bvar
// In the same scope where timeval is defined. Required by clang.
inline std::ostream& operator<<(std::ostream& os, const timeval& tm) {
return os << tm.tv_sec << '.' << std::setw(6) << std::setfill('0') << tm.tv_usec;
}
|
#include "pch.h"
#include "JsonHelpers.h"
#include "FancyZonesData.h"
#include "FancyZonesDataTypes.h"
#include "trace.h"
#include "util.h"
#include <common/logger/logger.h>
#include <filesystem>
#include <optional>
#include <utility>
#include <vector>
// Non-Localizable strings
namespace NonLocalizable
{
const wchar_t ActiveZoneSetStr[] = L"active-zoneset";
const wchar_t AppPathStr[] = L"app-path";
const wchar_t AppZoneHistoryStr[] = L"app-zone-history";
const wchar_t CanvasStr[] = L"canvas";
const wchar_t CellChildMapStr[] = L"cell-child-map";
const wchar_t ColumnsPercentageStr[] = L"columns-percentage";
const wchar_t ColumnsStr[] = L"columns";
const wchar_t CustomZoneSetsStr[] = L"custom-zone-sets";
const wchar_t DeviceIdStr[] = L"device-id";
const wchar_t DevicesStr[] = L"devices";
const wchar_t EditorShowSpacingStr[] = L"editor-show-spacing";
const wchar_t EditorSpacingStr[] = L"editor-spacing";
const wchar_t EditorZoneCountStr[] = L"editor-zone-count";
const wchar_t EditorSensitivityRadiusStr[] = L"editor-sensitivity-radius";
const wchar_t GridStr[] = L"grid";
const wchar_t HeightStr[] = L"height";
const wchar_t HistoryStr[] = L"history";
const wchar_t InfoStr[] = L"info";
const wchar_t NameStr[] = L"name";
const wchar_t QuickAccessKey[] = L"key";
const wchar_t QuickAccessUuid[] = L"uuid";
const wchar_t QuickLayoutKeys[] = L"quick-layout-keys";
const wchar_t RefHeightStr[] = L"ref-height";
const wchar_t RefWidthStr[] = L"ref-width";
const wchar_t RowsPercentageStr[] = L"rows-percentage";
const wchar_t RowsStr[] = L"rows";
const wchar_t SensitivityRadius[] = L"sensitivity-radius";
const wchar_t ShowSpacing[] = L"show-spacing";
const wchar_t Spacing[] = L"spacing";
const wchar_t Templates[] = L"templates";
const wchar_t TypeStr[] = L"type";
const wchar_t UuidStr[] = L"uuid";
const wchar_t WidthStr[] = L"width";
const wchar_t XStr[] = L"X";
const wchar_t YStr[] = L"Y";
const wchar_t ZoneIndexSetStr[] = L"zone-index-set";
const wchar_t ZoneIndexStr[] = L"zone-index";
const wchar_t ZoneSetUuidStr[] = L"zoneset-uuid";
const wchar_t ZonesStr[] = L"zones";
// Editor arguments
const wchar_t Dpi[] = L"dpi";
const wchar_t MonitorId[] = L"monitor-id";
const wchar_t TopCoordinate[] = L"top-coordinate";
const wchar_t LeftCoordinate[] = L"left-coordinate";
const wchar_t IsSelected[] = L"is-selected";
const wchar_t ProcessId[] = L"process-id";
const wchar_t SpanZonesAcrossMonitors[] = L"span-zones-across-monitors";
const wchar_t Monitors[] = L"monitors";
}
namespace
{
json::JsonArray NumVecToJsonArray(const std::vector<int>& vec)
{
json::JsonArray arr;
for (const auto& val : vec)
{
arr.Append(json::JsonValue::CreateNumberValue(val));
}
return arr;
}
std::vector<int> JsonArrayToNumVec(const json::JsonArray& arr)
{
std::vector<int> vec;
for (const auto& val : arr)
{
vec.emplace_back(static_cast<int>(val.GetNumber()));
}
return vec;
}
std::optional<FancyZonesDataTypes::AppZoneHistoryData> ParseSingleAppZoneHistoryItem(const json::JsonObject& json)
{
FancyZonesDataTypes::AppZoneHistoryData data;
if (json.HasKey(NonLocalizable::ZoneIndexSetStr))
{
data.zoneIndexSet = {};
for (const auto& value : json.GetNamedArray(NonLocalizable::ZoneIndexSetStr))
{
data.zoneIndexSet.push_back(static_cast<size_t>(value.GetNumber()));
}
}
else if (json.HasKey(NonLocalizable::ZoneIndexStr))
{
data.zoneIndexSet = { static_cast<size_t>(json.GetNamedNumber(NonLocalizable::ZoneIndexStr)) };
}
data.deviceId = json.GetNamedString(NonLocalizable::DeviceIdStr);
data.zoneSetUuid = json.GetNamedString(NonLocalizable::ZoneSetUuidStr);
if (!FancyZonesUtils::IsValidGuid(data.zoneSetUuid) || !FancyZonesUtils::IsValidDeviceId(data.deviceId))
{
return std::nullopt;
}
return data;
}
inline bool DeleteTmpFile(std::wstring_view tmpFilePath)
{
return DeleteFileW(tmpFilePath.data());
}
}
namespace JSONHelpers
{
json::JsonObject CanvasLayoutInfoJSON::ToJson(const FancyZonesDataTypes::CanvasLayoutInfo& canvasInfo)
{
json::JsonObject infoJson{};
infoJson.SetNamedValue(NonLocalizable::RefWidthStr, json::value(canvasInfo.lastWorkAreaWidth));
infoJson.SetNamedValue(NonLocalizable::RefHeightStr, json::value(canvasInfo.lastWorkAreaHeight));
json::JsonArray zonesJson;
for (const auto& [x, y, width, height] : canvasInfo.zones)
{
json::JsonObject zoneJson;
zoneJson.SetNamedValue(NonLocalizable::XStr, json::value(x));
zoneJson.SetNamedValue(NonLocalizable::YStr, json::value(y));
zoneJson.SetNamedValue(NonLocalizable::WidthStr, json::value(width));
zoneJson.SetNamedValue(NonLocalizable::HeightStr, json::value(height));
zonesJson.Append(zoneJson);
}
infoJson.SetNamedValue(NonLocalizable::ZonesStr, zonesJson);
infoJson.SetNamedValue(NonLocalizable::SensitivityRadius, json::value(canvasInfo.sensitivityRadius));
return infoJson;
}
std::optional<FancyZonesDataTypes::CanvasLayoutInfo> CanvasLayoutInfoJSON::FromJson(const json::JsonObject& infoJson)
{
try
{
FancyZonesDataTypes::CanvasLayoutInfo info;
info.lastWorkAreaWidth = static_cast<int>(infoJson.GetNamedNumber(NonLocalizable::RefWidthStr));
info.lastWorkAreaHeight = static_cast<int>(infoJson.GetNamedNumber(NonLocalizable::RefHeightStr));
json::JsonArray zonesJson = infoJson.GetNamedArray(NonLocalizable::ZonesStr);
uint32_t size = zonesJson.Size();
info.zones.reserve(size);
for (uint32_t i = 0; i < size; ++i)
{
json::JsonObject zoneJson = zonesJson.GetObjectAt(i);
const int x = static_cast<int>(zoneJson.GetNamedNumber(NonLocalizable::XStr));
const int y = static_cast<int>(zoneJson.GetNamedNumber(NonLocalizable::YStr));
const int width = static_cast<int>(zoneJson.GetNamedNumber(NonLocalizable::WidthStr));
const int height = static_cast<int>(zoneJson.GetNamedNumber(NonLocalizable::HeightStr));
FancyZonesDataTypes::CanvasLayoutInfo::Rect zone{ x, y, width, height };
info.zones.push_back(zone);
}
info.sensitivityRadius = static_cast<int>(infoJson.GetNamedNumber(NonLocalizable::SensitivityRadius, DefaultValues::SensitivityRadius));
return info;
}
catch (const winrt::hresult_error&)
{
return std::nullopt;
}
}
json::JsonObject GridLayoutInfoJSON::ToJson(const FancyZonesDataTypes::GridLayoutInfo& gridInfo)
{
json::JsonObject infoJson;
infoJson.SetNamedValue(NonLocalizable::RowsStr, json::value(gridInfo.m_rows));
infoJson.SetNamedValue(NonLocalizable::ColumnsStr, json::value(gridInfo.m_columns));
infoJson.SetNamedValue(NonLocalizable::RowsPercentageStr, NumVecToJsonArray(gridInfo.m_rowsPercents));
infoJson.SetNamedValue(NonLocalizable::ColumnsPercentageStr, NumVecToJsonArray(gridInfo.m_columnsPercents));
json::JsonArray cellChildMapJson;
for (int i = 0; i < gridInfo.m_cellChildMap.size(); ++i)
{
cellChildMapJson.Append(NumVecToJsonArray(gridInfo.m_cellChildMap[i]));
}
infoJson.SetNamedValue(NonLocalizable::CellChildMapStr, cellChildMapJson);
infoJson.SetNamedValue(NonLocalizable::SensitivityRadius, json::value(gridInfo.m_sensitivityRadius));
infoJson.SetNamedValue(NonLocalizable::ShowSpacing, json::value(gridInfo.m_showSpacing));
infoJson.SetNamedValue(NonLocalizable::Spacing, json::value(gridInfo.m_spacing));
return infoJson;
}
std::optional<FancyZonesDataTypes::GridLayoutInfo> GridLayoutInfoJSON::FromJson(const json::JsonObject& infoJson)
{
try
{
FancyZonesDataTypes::GridLayoutInfo info(FancyZonesDataTypes::GridLayoutInfo::Minimal{});
info.m_rows = static_cast<int>(infoJson.GetNamedNumber(NonLocalizable::RowsStr));
info.m_columns = static_cast<int>(infoJson.GetNamedNumber(NonLocalizable::ColumnsStr));
json::JsonArray rowsPercentage = infoJson.GetNamedArray(NonLocalizable::RowsPercentageStr);
json::JsonArray columnsPercentage = infoJson.GetNamedArray(NonLocalizable::ColumnsPercentageStr);
json::JsonArray cellChildMap = infoJson.GetNamedArray(NonLocalizable::CellChildMapStr);
if (rowsPercentage.Size() != info.m_rows || columnsPercentage.Size() != info.m_columns || cellChildMap.Size() != info.m_rows)
{
return std::nullopt;
}
info.m_rowsPercents = JsonArrayToNumVec(rowsPercentage);
info.m_columnsPercents = JsonArrayToNumVec(columnsPercentage);
for (const auto& cellsRow : cellChildMap)
{
const auto cellsArray = cellsRow.GetArray();
if (cellsArray.Size() != info.m_columns)
{
return std::nullopt;
}
info.cellChildMap().push_back(JsonArrayToNumVec(cellsArray));
}
info.m_showSpacing = infoJson.GetNamedBoolean(NonLocalizable::ShowSpacing, DefaultValues::ShowSpacing);
info.m_spacing = static_cast<int>(infoJson.GetNamedNumber(NonLocalizable::Spacing, DefaultValues::Spacing));
info.m_sensitivityRadius = static_cast<int>(infoJson.GetNamedNumber(NonLocalizable::SensitivityRadius, DefaultValues::SensitivityRadius));
return info;
}
catch (const winrt::hresult_error&)
{
return std::nullopt;
}
}
json::JsonObject CustomZoneSetJSON::ToJson(const CustomZoneSetJSON& customZoneSet)
{
json::JsonObject result{};
result.SetNamedValue(NonLocalizable::UuidStr, json::value(customZoneSet.uuid));
result.SetNamedValue(NonLocalizable::NameStr, json::value(customZoneSet.data.name));
switch (customZoneSet.data.type)
{
case FancyZonesDataTypes::CustomLayoutType::Canvas:
{
result.SetNamedValue(NonLocalizable::TypeStr, json::value(NonLocalizable::CanvasStr));
FancyZonesDataTypes::CanvasLayoutInfo info = std::get<FancyZonesDataTypes::CanvasLayoutInfo>(customZoneSet.data.info);
result.SetNamedValue(NonLocalizable::InfoStr, CanvasLayoutInfoJSON::ToJson(info));
break;
}
case FancyZonesDataTypes::CustomLayoutType::Grid:
{
result.SetNamedValue(NonLocalizable::TypeStr, json::value(NonLocalizable::GridStr));
FancyZonesDataTypes::GridLayoutInfo gridInfo = std::get<FancyZonesDataTypes::GridLayoutInfo>(customZoneSet.data.info);
result.SetNamedValue(NonLocalizable::InfoStr, GridLayoutInfoJSON::ToJson(gridInfo));
break;
}
}
return result;
}
std::optional<CustomZoneSetJSON> CustomZoneSetJSON::FromJson(const json::JsonObject& customZoneSet)
{
try
{
CustomZoneSetJSON result;
result.uuid = customZoneSet.GetNamedString(NonLocalizable::UuidStr);
if (!FancyZonesUtils::IsValidGuid(result.uuid))
{
return std::nullopt;
}
result.data.name = customZoneSet.GetNamedString(NonLocalizable::NameStr);
json::JsonObject infoJson = customZoneSet.GetNamedObject(NonLocalizable::InfoStr);
std::wstring zoneSetType = std::wstring{ customZoneSet.GetNamedString(NonLocalizable::TypeStr) };
if (zoneSetType.compare(NonLocalizable::CanvasStr) == 0)
{
if (auto info = CanvasLayoutInfoJSON::FromJson(infoJson); info.has_value())
{
result.data.type = FancyZonesDataTypes::CustomLayoutType::Canvas;
result.data.info = std::move(info.value());
}
else
{
return std::nullopt;
}
}
else if (zoneSetType.compare(NonLocalizable::GridStr) == 0)
{
if (auto info = GridLayoutInfoJSON::FromJson(infoJson); info.has_value())
{
result.data.type = FancyZonesDataTypes::CustomLayoutType::Grid;
result.data.info = std::move(info.value());
}
else
{
return std::nullopt;
}
}
else
{
return std::nullopt;
}
return result;
}
catch (const winrt::hresult_error&)
{
return std::nullopt;
}
}
json::JsonObject ZoneSetDataJSON::ToJson(const FancyZonesDataTypes::ZoneSetData& zoneSet)
{
json::JsonObject result{};
result.SetNamedValue(NonLocalizable::UuidStr, json::value(zoneSet.uuid));
result.SetNamedValue(NonLocalizable::TypeStr, json::value(TypeToString(zoneSet.type)));
return result;
}
std::optional<FancyZonesDataTypes::ZoneSetData> ZoneSetDataJSON::FromJson(const json::JsonObject& zoneSet)
{
try
{
FancyZonesDataTypes::ZoneSetData zoneSetData;
zoneSetData.uuid = zoneSet.GetNamedString(NonLocalizable::UuidStr);
zoneSetData.type = FancyZonesDataTypes::TypeFromString(std::wstring{ zoneSet.GetNamedString(NonLocalizable::TypeStr) });
if (!FancyZonesUtils::IsValidGuid(zoneSetData.uuid))
{
return std::nullopt;
}
return zoneSetData;
}
catch (const winrt::hresult_error&)
{
return std::nullopt;
}
}
json::JsonObject AppZoneHistoryJSON::ToJson(const AppZoneHistoryJSON& appZoneHistory)
{
json::JsonObject result{};
result.SetNamedValue(NonLocalizable::AppPathStr, json::value(appZoneHistory.appPath));
json::JsonArray appHistoryArray;
for (const auto& data : appZoneHistory.data)
{
json::JsonObject desktopData;
json::JsonArray jsonIndexSet;
for (size_t index : data.zoneIndexSet)
{
jsonIndexSet.Append(json::value(static_cast<int>(index)));
}
desktopData.SetNamedValue(NonLocalizable::ZoneIndexSetStr, jsonIndexSet);
desktopData.SetNamedValue(NonLocalizable::DeviceIdStr, json::value(data.deviceId));
desktopData.SetNamedValue(NonLocalizable::ZoneSetUuidStr, json::value(data.zoneSetUuid));
appHistoryArray.Append(desktopData);
}
result.SetNamedValue(NonLocalizable::HistoryStr, appHistoryArray);
return result;
}
std::optional<AppZoneHistoryJSON> AppZoneHistoryJSON::FromJson(const json::JsonObject& zoneSet)
{
try
{
AppZoneHistoryJSON result;
result.appPath = zoneSet.GetNamedString(NonLocalizable::AppPathStr);
if (zoneSet.HasKey(NonLocalizable::HistoryStr))
{
auto appHistoryArray = zoneSet.GetNamedArray(NonLocalizable::HistoryStr);
for (uint32_t i = 0; i < appHistoryArray.Size(); ++i)
{
json::JsonObject json = appHistoryArray.GetObjectAt(i);
if (auto data = ParseSingleAppZoneHistoryItem(json); data.has_value())
{
result.data.push_back(std::move(data.value()));
}
}
}
else
{
// handle previous file format, with single desktop layout information per application
if (auto data = ParseSingleAppZoneHistoryItem(zoneSet); data.has_value())
{
result.data.push_back(std::move(data.value()));
}
}
if (result.data.empty())
{
return std::nullopt;
}
return result;
}
catch (const winrt::hresult_error&)
{
return std::nullopt;
}
}
json::JsonObject DeviceInfoJSON::ToJson(const DeviceInfoJSON& device)
{
json::JsonObject result{};
result.SetNamedValue(NonLocalizable::DeviceIdStr, json::value(device.deviceId));
result.SetNamedValue(NonLocalizable::ActiveZoneSetStr, JSONHelpers::ZoneSetDataJSON::ToJson(device.data.activeZoneSet));
result.SetNamedValue(NonLocalizable::EditorShowSpacingStr, json::value(device.data.showSpacing));
result.SetNamedValue(NonLocalizable::EditorSpacingStr, json::value(device.data.spacing));
result.SetNamedValue(NonLocalizable::EditorZoneCountStr, json::value(device.data.zoneCount));
result.SetNamedValue(NonLocalizable::EditorSensitivityRadiusStr, json::value(device.data.sensitivityRadius));
return result;
}
std::optional<DeviceInfoJSON> DeviceInfoJSON::FromJson(const json::JsonObject& device)
{
try
{
DeviceInfoJSON result;
result.deviceId = device.GetNamedString(NonLocalizable::DeviceIdStr);
if (!FancyZonesUtils::IsValidDeviceId(result.deviceId))
{
return std::nullopt;
}
if (auto zoneSet = JSONHelpers::ZoneSetDataJSON::FromJson(device.GetNamedObject(NonLocalizable::ActiveZoneSetStr)); zoneSet.has_value())
{
result.data.activeZoneSet = std::move(zoneSet.value());
}
else
{
return std::nullopt;
}
result.data.showSpacing = device.GetNamedBoolean(NonLocalizable::EditorShowSpacingStr);
result.data.spacing = static_cast<int>(device.GetNamedNumber(NonLocalizable::EditorSpacingStr));
result.data.zoneCount = static_cast<int>(device.GetNamedNumber(NonLocalizable::EditorZoneCountStr));
result.data.sensitivityRadius = static_cast<int>(device.GetNamedNumber(NonLocalizable::EditorSensitivityRadiusStr, DefaultValues::SensitivityRadius));
return result;
}
catch (const winrt::hresult_error&)
{
return std::nullopt;
}
}
json::JsonObject LayoutQuickKeyJSON::ToJson(const LayoutQuickKeyJSON& layoutQuickKey)
{
json::JsonObject result{};
result.SetNamedValue(NonLocalizable::QuickAccessUuid, json::value(layoutQuickKey.layoutUuid));
result.SetNamedValue(NonLocalizable::QuickAccessKey, json::value(layoutQuickKey.key));
return result;
}
std::optional<LayoutQuickKeyJSON> LayoutQuickKeyJSON::FromJson(const json::JsonObject& layoutQuickKey)
{
try
{
LayoutQuickKeyJSON result;
result.layoutUuid = layoutQuickKey.GetNamedString(NonLocalizable::QuickAccessUuid);
if (!FancyZonesUtils::IsValidGuid(result.layoutUuid))
{
return std::nullopt;
}
result.key = static_cast<int>(layoutQuickKey.GetNamedNumber(NonLocalizable::QuickAccessKey));
return result;
}
catch (const winrt::hresult_error&)
{
return std::nullopt;
}
}
json::JsonObject MonitorInfo::ToJson(const MonitorInfo& monitor)
{
json::JsonObject result{};
result.SetNamedValue(NonLocalizable::Dpi, json::value(monitor.dpi));
result.SetNamedValue(NonLocalizable::MonitorId, json::value(monitor.id));
result.SetNamedValue(NonLocalizable::TopCoordinate, json::value(monitor.top));
result.SetNamedValue(NonLocalizable::LeftCoordinate, json::value(monitor.left));
result.SetNamedValue(L"width", json::value(monitor.width));
result.SetNamedValue(L"height", json::value(monitor.height));
result.SetNamedValue(NonLocalizable::IsSelected, json::value(monitor.isSelected));
return result;
}
json::JsonObject EditorArgs::ToJson(const EditorArgs& args)
{
json::JsonObject result{};
result.SetNamedValue(NonLocalizable::ProcessId, json::value(args.processId));
result.SetNamedValue(NonLocalizable::SpanZonesAcrossMonitors, json::value(args.spanZonesAcrossMonitors));
json::JsonArray monitors;
for (const auto& monitor : args.monitors)
{
monitors.Append(MonitorInfo::ToJson(monitor));
}
result.SetNamedValue(NonLocalizable::Monitors, monitors);
return result;
}
json::JsonObject GetPersistFancyZonesJSON(const std::wstring& zonesSettingsFileName, const std::wstring& appZoneHistoryFileName)
{
auto result = json::from_file(zonesSettingsFileName);
if (result)
{
if (!result->HasKey(NonLocalizable::AppZoneHistoryStr))
{
auto appZoneHistory = json::from_file(appZoneHistoryFileName);
if (appZoneHistory)
{
result->SetNamedValue(NonLocalizable::AppZoneHistoryStr, appZoneHistory->GetNamedArray(NonLocalizable::AppZoneHistoryStr));
}
else
{
result->SetNamedValue(NonLocalizable::AppZoneHistoryStr, json::JsonArray());
}
}
return *result;
}
else
{
return json::JsonObject();
}
}
void SaveZoneSettings(const std::wstring& zonesSettingsFileName, const TDeviceInfoMap& deviceInfoMap, const TCustomZoneSetsMap& customZoneSetsMap, const TLayoutQuickKeysMap& quickKeysMap)
{
auto before = json::from_file(zonesSettingsFileName);
json::JsonObject root{};
json::JsonArray templates{};
try
{
if (before.has_value() && before->HasKey(NonLocalizable::Templates))
{
templates = before->GetNamedArray(NonLocalizable::Templates);
}
}
catch (const winrt::hresult_error&)
{
}
root.SetNamedValue(NonLocalizable::DevicesStr, JSONHelpers::SerializeDeviceInfos(deviceInfoMap));
root.SetNamedValue(NonLocalizable::CustomZoneSetsStr, JSONHelpers::SerializeCustomZoneSets(customZoneSetsMap));
root.SetNamedValue(NonLocalizable::Templates, templates);
root.SetNamedValue(NonLocalizable::QuickLayoutKeys, JSONHelpers::SerializeQuickKeys(quickKeysMap));
if (!before.has_value() || before.value().Stringify() != root.Stringify())
{
Trace::FancyZones::DataChanged();
json::to_file(zonesSettingsFileName, root);
}
}
void SaveAppZoneHistory(const std::wstring& appZoneHistoryFileName, const TAppZoneHistoryMap& appZoneHistoryMap)
{
json::JsonObject root{};
root.SetNamedValue(NonLocalizable::AppZoneHistoryStr, JSONHelpers::SerializeAppZoneHistory(appZoneHistoryMap));
auto before = json::from_file(appZoneHistoryFileName);
if (!before.has_value() || before.value().Stringify() != root.Stringify())
{
json::to_file(appZoneHistoryFileName, root);
}
}
TAppZoneHistoryMap ParseAppZoneHistory(const json::JsonObject& fancyZonesDataJSON)
{
try
{
TAppZoneHistoryMap appZoneHistoryMap{};
auto appLastZones = fancyZonesDataJSON.GetNamedArray(NonLocalizable::AppZoneHistoryStr);
for (uint32_t i = 0; i < appLastZones.Size(); ++i)
{
json::JsonObject appLastZone = appLastZones.GetObjectAt(i);
if (auto appZoneHistory = AppZoneHistoryJSON::FromJson(appLastZone); appZoneHistory.has_value())
{
appZoneHistoryMap[appZoneHistory->appPath] = std::move(appZoneHistory->data);
}
}
return std::move(appZoneHistoryMap);
}
catch (const winrt::hresult_error&)
{
return {};
}
}
json::JsonArray SerializeAppZoneHistory(const TAppZoneHistoryMap& appZoneHistoryMap)
{
json::JsonArray appHistoryArray;
for (const auto& [appPath, appZoneHistoryData] : appZoneHistoryMap)
{
appHistoryArray.Append(AppZoneHistoryJSON::ToJson(AppZoneHistoryJSON{ appPath, appZoneHistoryData }));
}
return appHistoryArray;
}
TDeviceInfoMap ParseDeviceInfos(const json::JsonObject& fancyZonesDataJSON)
{
try
{
TDeviceInfoMap deviceInfoMap{};
auto devices = fancyZonesDataJSON.GetNamedArray(NonLocalizable::DevicesStr);
for (uint32_t i = 0; i < devices.Size(); ++i)
{
if (auto device = DeviceInfoJSON::DeviceInfoJSON::FromJson(devices.GetObjectAt(i)); device.has_value())
{
deviceInfoMap[device->deviceId] = std::move(device->data);
}
}
return std::move(deviceInfoMap);
}
catch (const winrt::hresult_error&)
{
return {};
}
}
json::JsonArray SerializeDeviceInfos(const TDeviceInfoMap& deviceInfoMap)
{
json::JsonArray DeviceInfosJSON{};
for (const auto& [deviceID, deviceData] : deviceInfoMap)
{
DeviceInfosJSON.Append(DeviceInfoJSON::DeviceInfoJSON::ToJson(DeviceInfoJSON{ deviceID, deviceData }));
}
return DeviceInfosJSON;
}
TCustomZoneSetsMap ParseCustomZoneSets(const json::JsonObject& fancyZonesDataJSON)
{
try
{
TCustomZoneSetsMap customZoneSetsMap{};
auto customZoneSets = fancyZonesDataJSON.GetNamedArray(NonLocalizable::CustomZoneSetsStr);
for (uint32_t i = 0; i < customZoneSets.Size(); ++i)
{
if (auto zoneSet = CustomZoneSetJSON::FromJson(customZoneSets.GetObjectAt(i)); zoneSet.has_value())
{
customZoneSetsMap[zoneSet->uuid] = std::move(zoneSet->data);
}
}
return std::move(customZoneSetsMap);
}
catch (const winrt::hresult_error&)
{
return {};
}
}
json::JsonArray SerializeCustomZoneSets(const TCustomZoneSetsMap& customZoneSetsMap)
{
json::JsonArray customZoneSetsJSON{};
for (const auto& [zoneSetId, zoneSetData] : customZoneSetsMap)
{
customZoneSetsJSON.Append(CustomZoneSetJSON::ToJson(CustomZoneSetJSON{ zoneSetId, zoneSetData }));
}
return customZoneSetsJSON;
}
TLayoutQuickKeysMap ParseQuickKeys(const json::JsonObject& fancyZonesDataJSON)
{
try
{
TLayoutQuickKeysMap quickKeysMap{};
auto quickKeys = fancyZonesDataJSON.GetNamedArray(NonLocalizable::QuickLayoutKeys);
for (uint32_t i = 0; i < quickKeys.Size(); ++i)
{
if (auto quickKey = LayoutQuickKeyJSON::FromJson(quickKeys.GetObjectAt(i)); quickKey.has_value())
{
quickKeysMap[quickKey->layoutUuid] = std::move(quickKey->key);
}
}
return std::move(quickKeysMap);
}
catch (const winrt::hresult_error& e)
{
Logger::error(L"Parsing quick keys error: {}", e.message());
return {};
}
}
json::JsonArray SerializeQuickKeys(const TLayoutQuickKeysMap& quickKeysMap)
{
json::JsonArray quickKeysJSON{};
for (const auto& [uuid, key] : quickKeysMap)
{
quickKeysJSON.Append(LayoutQuickKeyJSON::ToJson(LayoutQuickKeyJSON{ uuid, key }));
}
return quickKeysJSON;
}
}
|
// @formatter:off
//
// Balau core C++ library
//
// Copyright (C) 2008 Bora Software (contact@borasoftware.com)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///
/// @file ApplicationConfiguration.hpp
///
/// %Application configurations specify application injector bindings.
///
#ifndef COM_BORA_SOFTWARE__BALAU_APPLICATION__APPLICATION_CONFIGURATION
#define COM_BORA_SOFTWARE__BALAU_APPLICATION__APPLICATION_CONFIGURATION
#include <Balau/Application/BindingBuilder.hpp>
#include <Balau/Application/InjectorConfiguration.hpp>
namespace Balau {
///
/// %Application configurations specify application injector bindings.
///
/// One or more application configuration implementations are passed to the
/// injector create function.
///
/// To implement a concrete application configuration class, implement this
/// class and place the binding calls in the implemented configure() method.
///
class ApplicationConfiguration : public InjectorConfiguration {
///
/// Create a binding specification for the specified named or unnamed interface.
///
/// The deleter type is only used for unique bindings, otherwise it is ignored.
///
protected: template <typename BaseT, typename DeleterT = std::default_delete<BaseT>>
BindingBuilder<BaseT, DeleterT> & bind(std::string_view name = std::string_view()) const {
return Bind<BaseT, DeleterT>(*this).bind(name);
}
protected: void addConfiguration(const ApplicationConfiguration & conf) const {
extraConfiguration.push_back(&conf);
}
///
/// Register with the injector a callback that will be called by the injector at the end of construction.
///
/// In order to use this method, inject the injector into the injectable via a
/// weak pointer and call the method.
///
/// @param call the callback
///
protected: void registerPostConstructionCall(const std::function<void (const Injector &)> & call) const {
postConstructionCalls.push_back(call);
}
///
/// Register with the injector a callback that will be called in the injector's destructor, before the bindings are deleted.
///
/// Although pre-destruction callbacks must be noexcept(true), the pre-destruction
/// function signature does not contain noexcept(true), as this is not yet handled
/// by std::function in C++17. Despite this, functions registered as pre-destruction
/// callbacks must nevertheless be noexcept(true).
///
/// In order to use this method, inject the injector into the injectable via a
/// weak pointer and call the method.
///
/// @param call the callback
///
protected: void registerPreDestructionCall(const std::function<void ()> & call) const {
preDestructionCalls.push_back(call);
}
///
/// Register a static singleton pointer that the injector will set up post-construction and invalidate pre-destruction.
///
/// The static pointer will be valid immediately after injection construction
/// up to the start of injector destruction.
///
/// This call is a convenience method for calling the registerPostConstructionCall
/// and registerPreDestructionCall methods in order to set up and tear down the static
/// singleton pointer.
///
/// @tparam T the binding type
/// @param ptrPtr a raw pointer to the statically allocated shared pointer
/// @param name an optional binding name
///
protected: template <typename T> void registerStaticSingleton(std::shared_ptr<T> * ptrPtr, std::string_view name = std::string_view()) const {
staticSingletonPostConstructionCalls.emplace_back(new StaticSingletonRegistration<T>(ptrPtr, name));
preDestructionCalls.push_back([ptrPtr] () { ptrPtr->reset(); });
}
////////////////////////// Private implementation /////////////////////////
private: std::vector<std::shared_ptr<Impl::BindingBuilderBase>> build() const override {
// Move the vector because it is not used anymore after this call from the injector.
return std::move(builders);
}
public: std::vector<const InjectorConfiguration *> getExtraConfiguration() const override {
return extraConfiguration;
}
public: std::list<std::function<void (const Injector& )>> getPostConstructionCalls() const override {
return postConstructionCalls;
}
public: std::list<std::function<void ()>> getPreDestructionCalls() const override {
return preDestructionCalls;
}
public: std::list<std::unique_ptr<StaticSingletonRegistrationBase>> getStaticSingletonPostConstructionCalls() const override {
return std::move(staticSingletonPostConstructionCalls);
}
private: template <typename T, typename DeleterT> struct Bind {
static_assert(
std::negation<typename std::is_pointer<T>::type>::value
, "Raw pointers are not permitted in bindings. Use Unique or Shared bindings instead."
);
const ApplicationConfiguration & parent;
explicit Bind(const ApplicationConfiguration & parent_) : parent(parent_) {}
BindingBuilder<T, DeleterT> & bind(std::string_view name) const {
parent.builders.emplace_back(
std::shared_ptr<Impl::BindingBuilderBase>(new BindingBuilder<T, DeleterT>(std::string(name)))
);
return *reinterpret_cast<BindingBuilder<T, DeleterT> *>(parent.builders.back().get());
}
};
private: template <typename T, typename DeleterT> struct Bind<const T, DeleterT> {
static_assert(
std::negation<typename std::is_pointer<T>::type>::value
, "Raw pointers are not permitted in bindings. Use Unique or Shared bindings instead."
);
const ApplicationConfiguration & parent;
explicit Bind(const ApplicationConfiguration & parent_) : parent(parent_) {}
BindingBuilder<const T, DeleterT> & bind(std::string_view name) const {
parent.builders.emplace_back(
std::shared_ptr<Impl::BindingBuilderBase>(new BindingBuilder<const T, DeleterT>(std::string(name)))
);
return *reinterpret_cast<BindingBuilder<const T, DeleterT> *>(parent.builders.back().get());
}
};
private: mutable std::vector<std::shared_ptr<Impl::BindingBuilderBase>> builders;
private: mutable std::vector<const InjectorConfiguration *> extraConfiguration;
private: mutable std::list<std::function<void (const Injector& )>> postConstructionCalls;
private: mutable std::list<std::function<void ()>> preDestructionCalls;
private: mutable std::list<std::unique_ptr<StaticSingletonRegistrationBase>> staticSingletonPostConstructionCalls;
};
} // namespace Balau
#endif // COM_BORA_SOFTWARE__BALAU_APPLICATION__APPLICATION_CONFIGURATION
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.